New Files: (click to see in XXR)

./tools/trace/wqtrace.lua is a new file
./tools/trace/bridgetime.lua is a new file
./tools/tests/personas/persona_test_run.sh is a new file
./tools/lldbmacros/workqueue.py is a new file
./tools/lldbmacros/turnstile.py is a new file
./tools/lldbmacros/skywalk.py is a new file
./tools/lldbmacros/zonetriage.py is a new file
./config/generate_linker_aliases.sh is a new file
./config/MASTER.arm64.bcm2837 is a new file
./iokit/IOKit/perfcontrol/Makefile is a new file
./iokit/IOKit/perfcontrol/IOPerfControl.h is a new file
./iokit/Kernel/IOPerfControl.cpp is a new file
./tests/vm_set_max_addr_helper.c is a new file
./tests/xnu_quick_test_helpers.h is a new file
./tests/stackshot.m is a new file
./tests/xnu_quick_test_getsetpriority.c is a new file
./tests/perf_vmfault.c is a new file
./tests/proc_core_name_24152432.c is a new file
./tests/kdebug.c is a new file
./tests/ltable_exhaustion_test.c is a new file
./tests/perf_kdebug.c is a new file
./tests/kernel_uuid_match.c is a new file
./tests/host_statistics_rate_limiting.c is a new file
./tests/task_info.c is a new file
./tests/stackshot_spawn_exit_stress.c is a new file
./tests/socket_bind_35243417.c is a new file
./tests/kperf.c is a new file
./tests/gettimeofday.c is a new file
./tests/ntp_adjtime_29192647.c is a new file
./tests/work_interval_test.entitlements is a new file
./tests/task_inspect.c is a new file
./tests/socket_bind_35685803.c is a new file
./tests/drop_priv.c is a new file
./tests/excserver.defs is a new file
./tests/Makefile is a new file
./tests/kevent_continuous_time.c is a new file
./tests/suspended_spawn_26184412.c is a new file
./tests/socket_poll_close_25786011.c is a new file
./tests/mach_port_mod_refs.c is a new file
./tests/xnu_quick_test.entitlements is a new file
./tests/monotonic_core.c is a new file
./tests/kevent_qos.c is a new file
./tests/kperf_backtracing.c is a new file
./tests/task_for_pid_entitlement.plist is a new file
./tests/mach_boottime_usec.c is a new file
./tests/poll.c is a new file
./tests/network_entitlements.plist is a new file
./tests/phys_footprint_interval_max.c is a new file
./tests/kqueue_close.c is a new file
./tests/kqueue_fifo_18776047.c is a new file
./tests/cpucount.c is a new file
./tests/telemetry.c is a new file
./tests/remote_time.c is a new file
./tests/turnstiles_test.c is a new file
./tests/disk_mount_conditioner-entitlements.plist is a new file
./tests/mach_continuous_time.c is a new file
./tests/contextswitch.c is a new file
./tests/kqueue_add_and_trigger.c is a new file
./tests/memorystatus_vm_map_fork.c is a new file
./tests/settimeofday_29193041.entitlements is a new file
./tests/verify_kalloc_config.c is a new file
./tests/kperf_helpers.h is a new file
./tests/mach_get_times.c is a new file
./tests/disk_mount_conditioner.c is a new file
./tests/kernel_mtx_perf.c is a new file
./tests/task_inspect.entitlements is a new file
./tests/xnu_quick_test_entitled.c is a new file
./tests/mach_port_deallocate_21692215.c is a new file
./tests/regression_17272465.c is a new file
./tests/quiesce_counter.c is a new file
./tests/private_entitlement.plist is a new file
./tests/mach_port_insert_right.c is a new file
./tests/data_protection.c is a new file
./tests/proc_info.c is a new file
./tests/kqueue_timer_tests.c is a new file
./tests/mktimer_kobject.c is a new file
./tests/perf_spawn_fork.c is a new file
./tests/proc_uuid_policy_26567533.c is a new file
./tests/xnu_quick_test_helpers.c is a new file
./tests/freebsd_waitpid_nohang.c is a new file
./tests/turnstile_multihop_types.h is a new file
./tests/turnstile_multihop.c is a new file
./tests/memorystatus_freeze_test.c is a new file
./tests/pwrite_avoid_sigxfsz_28581610.c is a new file
./tests/jumbo_va_spaces_28530648.entitlements is a new file
./tests/gettimeofday_29192647.c is a new file
./tests/net_tuntests.c is a new file
./tests/exc_resource_threads.c is a new file
./tests/host_notifications.c is a new file
./tests/voucher_traps.c is a new file
./tests/proc_info_list_kthreads.c is a new file
./tests/workq_sigprof.c is a new file
./tests/vm_set_max_addr_test.c is a new file
./tests/kevent_pty.c is a new file
./tests/stackshot_idle_25570396.m is a new file
./tests/ioperf.c is a new file
./tests/port_descriptions.c is a new file
./tests/avx.c is a new file
./tests/perf_exit_proc.c is a new file
./tests/launchd_plists/com.apple.xnu.test.kevent_qos.plist is a new file
./tests/launchd_plists/com.apple.xnu.test.turnstile_multihop.plist is a new file
./tests/stackshot_block_owner_14362384.m is a new file
./tests/kqueue_file_tests.c is a new file
./tests/kpc.c is a new file
./tests/voucher_entry_18826844.c is a new file
./tests/mach_timebase_info.c is a new file
./tests/tty_hang.c is a new file
./tests/netbsd_utimensat.c is a new file
./tests/wired_mem_bench.c is a new file
./tests/backtracing.c is a new file
./tests/jumbo_va_spaces_28530648.c is a new file
./tests/work_interval_test.c is a new file
./tests/settimeofday_29193041_entitled.c is a new file
./tests/memorystatus_zone_test.c is a new file
./tests/atm_diagnostic_flag.c is a new file
./tests/perf_compressor.c is a new file
./tests/no32exec_35914211_helper.c is a new file
./tests/no32exec_35914211.c is a new file
./tests/proc_info_udata.c is a new file
./tests/perf_exit.c is a new file
./tests/thread_group_set_32261625.c is a new file
./tests/settimeofday_29193041.c is a new file
./tests/sigchld_return.c is a new file
./tests/sigcont_return.c is a new file
./tests/poll_select_kevent_paired_fds.c is a new file
./tests/kperf_helpers.c is a new file
./tests/utimensat.c is a new file
./tests/xnu_quick_test.c is a new file
./tests/task_info_28439149.c is a new file
./tests/proc_info_list_kthreads.entitlements is a new file
./tests/turnstile_multihop_helper.h is a new file
./tests/net_tun_pr_35136664.c is a new file
./bsd/net/nat464_utils.h is a new file
./bsd/net/if_ports_used.c is a new file
./bsd/net/if_low_power_mode.c is a new file
./bsd/net/if_ports_used.h is a new file
./bsd/net/nat464_utils.c is a new file
./bsd/tests/pmap_test_sysctl.c is a new file
./bsd/tests/bsd_tests.c is a new file
./bsd/tests/ctrr_test_sysctl.c is a new file
./bsd/netinet/isakmp.h is a new file
./bsd/pthread/pthread_priority.c is a new file
./bsd/pthread/Makefile is a new file
./bsd/pthread/pthread_workqueue.c is a new file
./bsd/pthread/bsdthread_private.h is a new file
./bsd/pthread/workqueue_internal.h is a new file
./bsd/pthread/workqueue_trace.h is a new file
./bsd/pthread/priority_private.h is a new file
./bsd/pthread/workqueue_syscalls.h is a new file
./bsd/pthread/pthread_shims.c is a new file
./bsd/libkern/copyio.h is a new file
./EXTERNAL_HEADERS/img4/img4.h is a new file
./EXTERNAL_HEADERS/img4/payload.h is a new file
./EXTERNAL_HEADERS/img4/environment.h is a new file
./EXTERNAL_HEADERS/img4/api.h is a new file
./EXTERNAL_HEADERS/ptrauth.h is a new file
./EXTERNAL_HEADERS/corecrypto/cckprng.h is a new file
./EXTERNAL_HEADERS/corecrypto/cc_error.h is a new file
./libsyscall/wrappers/mach_bridge_remote_time.c is a new file
./libsyscall/wrappers/skywalk/cpu_copy_in_cksum.s is a new file
./libsyscall/wrappers/skywalk/os_channel.c is a new file
./libsyscall/wrappers/skywalk/cpu_in_cksum.s is a new file
./libsyscall/wrappers/skywalk/cpu_copy_in_cksum_gen.c is a new file
./libsyscall/wrappers/skywalk/os_packet.c is a new file
./libsyscall/wrappers/skywalk/cpu_in_cksum_gen.c is a new file
./libsyscall/wrappers/skywalk/os_nexus.c is a new file
./libsyscall/mach/port_descriptions.c is a new file
./libsyscall/mach/mach/port_descriptions.h is a new file
./libsyscall/mach/mach/mach_sync_ipc.h is a new file
./libsyscall/mach/mach/mach_right.h is a new file
./libsyscall/mach/mach_right.c is a new file
./libsyscall/os/thread_self_restrict.h is a new file
./pexpert/pexpert/arm64/BCM2837.h is a new file
./san/tools/validate_blacklist.sh is a new file
./san/ubsan-blacklist is a new file
./san/ubsan.h is a new file
./san/ubsan_log.c is a new file
./san/ubsan.c is a new file
./san/conf/files.arm64 is a new file
./san/conf/Makefile.arm64 is a new file
./san/conf/Makefile.arm is a new file
./san/conf/files.arm is a new file
./san/kasan-blacklist-dynamic is a new file
./libkdd/kdd.framework/module.modulemap is a new file
./libkdd/kdd.framework/Info.plist is a new file
./libkdd/tests/stackshot-sample-asid-pagetable is a new file
./libkdd/tests/stackshot-with-shared-cache-layout is a new file
./libkdd/tests/stackshot-sample-stacktop is a new file
./libkdd/tests/stackshot-sample-stacktop.plist.gz is a new file
./libkdd/tests/stackshot-sample-cpu-times.plist.gz is a new file
./libkdd/tests/stackshot-sample-delta-thread-policy.plist.gz is a new file
./libkdd/tests/stackshot-sample-asid is a new file
./libkdd/tests/stackshot-sample-delta-thread-policy is a new file
./libkdd/tests/stackshot-with-shared-cache-layout.plist.gz is a new file
./libkdd/tests/stackshot-sample-asid.plist.gz is a new file
./libkdd/tests/stackshot-sample-asid-pagetable.plist.gz is a new file
./libkdd/tests/stackshot-sample-cpu-times is a new file
./libkdd/kdd.frameworkTests/Info.plist is a new file
./osfmk/kern/ux_handler.c is a new file
./osfmk/kern/zcache.h is a new file
./osfmk/kern/turnstile.h is a new file
./osfmk/kern/priority_queue.h is a new file
./osfmk/kern/cpu_quiesce.h is a new file
./osfmk/kern/test_lock.c is a new file
./osfmk/kern/ecc_logging.c is a new file
./osfmk/kern/ux_handler.h is a new file
./osfmk/kern/zcache.c is a new file
./osfmk/kern/trustcache.h is a new file
./osfmk/kern/cpu_quiesce.c is a new file
./osfmk/kern/priority_queue.c is a new file
./osfmk/kern/turnstile.c is a new file
./osfmk/i386/fp_simd.s is a new file
./osfmk/i386/locks_i386_opt.c is a new file
./osfmk/i386/locks_i386_inlines.h is a new file
./osfmk/tests/ktest_emit.c is a new file
./osfmk/tests/ktest_internal.h is a new file
./osfmk/tests/ktest.h is a new file
./osfmk/tests/Makefile is a new file
./osfmk/tests/ktest_accessor.c is a new file
./osfmk/tests/test_thread_call.c is a new file
./osfmk/tests/xnupost.h is a new file
./osfmk/tests/pmap_tests.c is a new file
./osfmk/tests/kernel_tests.c is a new file
./osfmk/tests/README.md is a new file
./osfmk/tests/ktest.c is a new file
./osfmk/tests/bitmap_test.c is a new file
./osfmk/tests/ktest_global.c is a new file
./osfmk/prng/prng_random.c is a new file
./osfmk/kperf/lazy.c is a new file
./osfmk/kperf/lazy.h is a new file
./osfmk/vm/vm_shared_region_pager.c is a new file
./osfmk/mach/memory_entry.defs is a new file
./osfmk/corecrypto/ccsha1/src/ccsha1_internal.h is a new file
./osfmk/corecrypto/ccsha1/src/ccdigest_internal.h is a new file
./osfmk/corecrypto/ccsha2/src/ccdigest_internal.h is a new file
./osfmk/arm/pmap_public.h is a new file
./libkern/libclosure/libclosuredata.c is a new file
./libkern/libclosure/runtime.cpp is a new file
./libkern/zlib/z_crc32.c is a new file
./libkern/img4/interface.c is a new file
./libkern/os/refcnt.c is a new file
./libkern/os/refcnt.h is a new file
./libkern/libkern/Block_private.h is a new file
./libkern/libkern/crc.h is a new file
./libkern/libkern/OSRuntime.h is a new file
./libkern/libkern/img4/Makefile is a new file
./libkern/libkern/img4/interface.h is a new file
./libkern/libkern/Block.h is a new file
./libkern/conf/files.arm64 is a new file

Others:

NO DIFFS in ./tools/symbolify.py
NO DIFFS in ./tools/trace/parse_ipc_trace.py

./tools/trace/kqtrace.lua differences detected: 78,81c78,80 < ['THREQUESTED'] = 0x02, < ['WAKEUP'] = 0x04, < ['BOUND'] = 0x08, < ['DRAIN'] = 0x40, --- > ['PROCESSING'] = 0x1, > ['THREQUESTED'] = 0x2, > ['WAKEUP'] = 0x4 104c103 < ['LOCKED'] = 0x0010, --- > ['USEWAIT'] = 0x0010, 112c111 < ['MERGE_QOS'] = 0x1000, --- > ['STOLENDROP'] = 0x1000, 115a115,132 > knote_state_strings = { > ['ACTIVE'] = 0x0001, > ['QUEUED'] = 0x0002, > ['DISABLED'] = 0x0004, > ['DROPPING'] = 0x0008, > ['USEWAIT'] = 0x0010, > ['ATTACHING'] = 0x0020, > ['STAYACTIVE'] = 0x0040, > ['DEFERDELETE'] = 0x0080, > ['ATTACHED'] = 0x0100, > ['DISPATCH'] = 0x0200, > ['UDATA_SPECIFIC'] = 0x0400, > ['SUPPRESSED'] = 0x0800, > ['STOLENDROP'] = 0x1000, > ['REQVANISH'] = 0x2000, > ['VANISHED'] = 0x4000, > } > 258c275 < duplicate and ", duplicate" or "") --- > duplicate ? ", duplicate" : "") NO DIFFS in ./tools/trace/ios_trace_ipc.sh
NO DIFFS in ./tools/remote_build.sh
NO DIFFS in ./tools/Makefile
NO DIFFS in ./tools/xcrun_cache.sh

./tools/tests/MPMMTest/Makefile differences detected: 20,27c20 < # These are convenience functions for filtering based on substrings, as the < # normal filter functions only accept one wildcard. < FILTER_OUT_SUBSTRING=$(strip $(foreach string,$(2),$(if $(findstring $(1),$(string)),,$(string)))) < FILTER_SUBSTRING=$(strip $(foreach string,$(2),$(if $(findstring $(1),$(string)),$(string),))) < < ARCH_32:=$(call FILTER_OUT_SUBSTRING,64,$(ARCHS)) < ARCH_64:=$(call FILTER_SUBSTRING,64,$(ARCHS)) < --- > ARCH_32 := $(filter-out %64, $(ARCHS)) 28a22 > ARCH_64 := $(filter %64, $(ARCHS)) NO DIFFS in ./tools/tests/MPMMTest/README
NO DIFFS in ./tools/tests/MPMMTest/KQMPMMtest.c
NO DIFFS in ./tools/tests/MPMMTest/MPMMtest.c
NO DIFFS in ./tools/tests/MPMMTest/MPMMtest_run.sh
NO DIFFS in ./tools/tests/execperf/Makefile
NO DIFFS in ./tools/tests/execperf/run.c
NO DIFFS in ./tools/tests/execperf/exit.c
NO DIFFS in ./tools/tests/execperf/exit-asm.S
NO DIFFS in ./tools/tests/execperf/test.sh
NO DIFFS in ./tools/tests/execperf/printexecinfo.c
NO DIFFS in ./tools/tests/affinity/sets.c

./tools/tests/affinity/tags.c differences detected: 115,117c115,118 < ret = task_for_pid(mach_task_self(), pid, &port); < if (ret != KERN_SUCCESS) < err(1, "task_for_pid(,%d,) returned %d", pid, ret); --- > //ret = task_for_pid(mach_task_self(), pid, &port); > //if (ret != KERN_SUCCESS) > // err(1, "task_for_pid(,%d,) returned %d", pid, ret); > port = mach_task_self();
./tools/tests/affinity/Makefile differences detected: 17,24c17 < # These are convenience functions for filtering based on substrings, as the < # normal filter functions only accept one wildcard. < FILTER_OUT_SUBSTRING=$(strip $(foreach string,$(2),$(if $(findstring $(1),$(string)),,$(string)))) < FILTER_SUBSTRING=$(strip $(foreach string,$(2),$(if $(findstring $(1),$(string)),$(string),))) < < ARCH_32:=$(call FILTER_OUT_SUBSTRING,64,$(ARCHS)) < ARCH_64:=$(call FILTER_SUBSTRING,64,$(ARCHS)) < --- > ARCH_32 := $(filter-out %64, $(ARCHS)) 25a19 > ARCH_64 := $(filter %64, $(ARCHS)) NO DIFFS in ./tools/tests/affinity/pool.c

./tools/tests/Makefile differences detected: 27a28 > execperf \ 32c33 < personas \ --- > darwintests \ NO DIFFS in ./tools/tests/perf_index/perfindex-file_create.c
NO DIFFS in ./tools/tests/perf_index/ramdisk.c
NO DIFFS in ./tools/tests/perf_index/perfindex-fault.c
NO DIFFS in ./tools/tests/perf_index/perf_index.h
NO DIFFS in ./tools/tests/perf_index/perfindex-file_write.c
NO DIFFS in ./tools/tests/perf_index/Makefile
NO DIFFS in ./tools/tests/perf_index/md5.h
NO DIFFS in ./tools/tests/perf_index/perfindex-ram_file_write.c
NO DIFFS in ./tools/tests/perf_index/test_fault_helper.c
NO DIFFS in ./tools/tests/perf_index/perfindex-zfod.c
NO DIFFS in ./tools/tests/perf_index/perfindex-cpu.c
NO DIFFS in ./tools/tests/perf_index/test_file_helper.c
NO DIFFS in ./tools/tests/perf_index/README
NO DIFFS in ./tools/tests/perf_index/perfindex-compile.c
NO DIFFS in ./tools/tests/perf_index/PerfIndex_COPS_Module/PITest.h
NO DIFFS in ./tools/tests/perf_index/PerfIndex_COPS_Module/PerfIndex.h
NO DIFFS in ./tools/tests/perf_index/PerfIndex_COPS_Module/Prefix.pch
NO DIFFS in ./tools/tests/perf_index/PerfIndex_COPS_Module/PerfIndex.xcodeproj/project.pbxproj
NO DIFFS in ./tools/tests/perf_index/PerfIndex_COPS_Module/PITest.m
NO DIFFS in ./tools/tests/perf_index/PerfIndex_COPS_Module/Info.plist
NO DIFFS in ./tools/tests/perf_index/perf_index.c
NO DIFFS in ./tools/tests/perf_index/perfindex-ram_file_read.c
NO DIFFS in ./tools/tests/perf_index/ramdisk.h
NO DIFFS in ./tools/tests/perf_index/test_fault_helper.h
NO DIFFS in ./tools/tests/perf_index/md5.c
NO DIFFS in ./tools/tests/perf_index/perfindex-iperf.c
NO DIFFS in ./tools/tests/perf_index/perfindex-ram_file_create.c
NO DIFFS in ./tools/tests/perf_index/perfindex-memory.c
NO DIFFS in ./tools/tests/perf_index/test_file_helper.h
NO DIFFS in ./tools/tests/perf_index/perfindex-file_read.c
NO DIFFS in ./tools/tests/perf_index/fail.h
NO DIFFS in ./tools/tests/perf_index/perfindex-syscall.c
NO DIFFS in ./tools/tests/perf_index/test_controller.py
NO DIFFS in ./tools/tests/zero-to-n/Makefile
NO DIFFS in ./tools/tests/zero-to-n/zero-to-n.c
NO DIFFS in ./tools/tests/Makefile.common
NO DIFFS in ./tools/tests/personas/persona_test.h

./tools/tests/personas/Makefile differences detected: 19,25c19,20 < # These are convenience functions for filtering based on substrings, as the < # normal filter functions only accept one wildcard. < FILTER_OUT_SUBSTRING=$(strip $(foreach string,$(2),$(if $(findstring $(1),$(string)),,$(string)))) < FILTER_SUBSTRING=$(strip $(foreach string,$(2),$(if $(findstring $(1),$(string)),$(string),))) < < ARCH_32:=$(call FILTER_OUT_SUBSTRING,64,$(ARCHS)) < ARCH_64:=$(call FILTER_SUBSTRING,64,$(ARCHS)) --- > ARCH_32 := $(filter-out %64, $(ARCHS)) > ARCH_64 := $(filter %64, $(ARCHS)) 33c28 < TARGETS := persona_mgr persona_spawn persona_test_run.sh --- > TARGETS := persona_mgr persona_spawn 41,44d35 < $(DSTROOT)/persona_test_run.sh: persona_test_run.sh < cp $? $@ < chmod +x $@ <
./tools/tests/personas/persona_spawn.c differences detected: 22d21 < #include 76c75 < if (!(pa->flags & PA_HAS_ID)) { --- > if (!pa->flags & PA_HAS_ID) { 133,141d131 < if (pa->flags & PA_HAS_GROUPS) { < ret = posix_spawnattr_set_persona_groups_np(&attr, pa->kinfo.persona_ngroups, pa->kinfo.persona_groups, KAUTH_UID_NONE); < if (ret != 0) { < err_print(""); < ret = -ERR_SPAWN_ATTR; < goto out_err; < } < } < 272,273d261 < printf("\t%-15s\tGroups to which the persona will belong\n", "-G {groupspec}"); < printf("\t%-15s\tgroupspec: G1{,G2,G3...}\n", " "); 298,300d285 < exit(ret); < if (strcmp(argv[optind], "spawn") != 0) { < printf("child exiting (%s).\n", argv[optind]); 302,303d286 < } < optind++; 325c308 < while ((ch = getopt(argc, argv, "Vg:G:I:u:vwh")) != -1) { --- > while ((ch = getopt(argc, argv, "Vg:I:u:vwh")) != -1) { 331a315,316 > if (pa.kinfo.persona_gid <= 500) > err("Invalid GID: %d", pa.kinfo.persona_gid); 335,341d319 < case 'G': < ret = parse_groupspec(&pa.kinfo, optarg); < if (ret < 0) < err("Invalid groupspec: \"%s\"", optarg); < pa.flags |= PA_HAS_GROUPS; < pa.flags |= PA_OVERRIDE; < break; 349a328,329 > if (pa.override_uid <= 500) > err("Invalid UID: %d", pa.override_uid);
./tools/tests/personas/persona_mgr.c differences detected: 44,45c44 < PERSONA_OP_SUPPORT = 4, < PERSONA_OP_MAX = 4, --- > PERSONA_OP_MAX = 3, 88c87 < info("Looking up persona (login:%s, pid:%d, uid:%d)", ki->persona_name, pid, uid); --- > info("Looking up persona (pid:%d, uid:%d)", pid, uid); 122,134d120 < static int persona_op_support(void) < { < uid_t pna_id = -1; < int ret = kpersona_get(&pna_id); < if (ret == 0 || errno != ENOSYS) { < info("Persona subsystem is supported (id=%d)", pna_id); < return 0; < } < < info("Persona subsystem is not supported"); < return ENOSYS; < } < 154c140 < printf("\t%-15s\tOne of: create | destroy | lookup | support\n", "[op]"); --- > printf("\t%-15s\tOne of: create | destroy | lookup\n", "[op]"); 177c163 < int persona_op = -1; --- > int persona_op = 0; 201,202d186 < else if (strcmp(op_str, "support") == 0) < persona_op = PERSONA_OP_SUPPORT; 220,222c204,205 < if (ret <= 0) { < ret = PERSONA_ID_NONE; < } --- > if (ret <= 0) > err("Invalid Persona ID: %s", optarg); 226,240c209,212 < if (strncmp(optarg, "guest", 6) == 0) { < kinfo.persona_type = PERSONA_GUEST; < } else if (strncmp(optarg, "managed", 8) == 0) { < kinfo.persona_type = PERSONA_MANAGED; < } else if (strncmp(optarg, "priv", 4) == 0) { /* shortcut... */ < kinfo.persona_type = PERSONA_PRIV; < } else if (strncmp(optarg, "system", 7) == 0) { < kinfo.persona_type = PERSONA_SYSTEM; < } else { < ret = atoi(optarg); < if (ret <= PERSONA_INVALID || ret > PERSONA_TYPE_MAX) { < err("Invalid type specification: %s", optarg); < } < kinfo.persona_type = ret; < } --- > ret = atoi(optarg); > if (ret <= PERSONA_INVALID || ret > PERSONA_TYPE_MAX) > err("Invalid type specification: %s", optarg); > kinfo.persona_type = ret; 250,253c222,223 < /* allow invalid / -1 as a wildcard for lookup */ < if (ret < 0 && persona_op != PERSONA_OP_LOOKUP) { < err("Invalid UID:%s (%d)", optarg, ret); < } --- > if (ret <= 0) > err("Invalid UID: %s", optarg); 288c258 < if (uid == (uid_t)-1 && persona_op != PERSONA_OP_LOOKUP) { --- > if (uid == (uid_t)-1) 290d259 < } 292c261 < if (kinfo.persona_gmuid != KAUTH_UID_NONE && kinfo.persona_ngroups == 0) { --- > if (kinfo.persona_gmuid && kinfo.persona_ngroups == 0) { 317,319d285 < case PERSONA_OP_SUPPORT: < ret = persona_op_support(); < break; NO DIFFS in ./tools/tests/mktimer/Makefile
NO DIFFS in ./tools/tests/mktimer/mktimer_test.c
NO DIFFS in ./tools/tests/jitter/timer_jitter.c
NO DIFFS in ./tools/tests/jitter/Makefile
NO DIFFS in ./tools/tests/superpages/Makefile
NO DIFFS in ./tools/tests/superpages/testsp.c
NO DIFFS in ./tools/tests/superpages/measure_tlbs.c
NO DIFFS in ./tools/tests/testkext/testthreadcall.h
NO DIFFS in ./tools/tests/testkext/testthreadcall-Info.plist
NO DIFFS in ./tools/tests/testkext/testkext.xcodeproj/project.pbxproj
NO DIFFS in ./tools/tests/testkext/testvmx-Info.plist
NO DIFFS in ./tools/tests/testkext/testvmx.h
NO DIFFS in ./tools/tests/testkext/pgokext/pgokext.c
NO DIFFS in ./tools/tests/testkext/pgokext/Info.plist
NO DIFFS in ./tools/tests/testkext/testthreadcall.cpp
NO DIFFS in ./tools/tests/testkext/testvmx.cpp
NO DIFFS in ./tools/tests/TLBcoherency/Makefile
NO DIFFS in ./tools/tests/TLBcoherency/TLBcoherency.c
NO DIFFS in ./tools/lldbmacros/apic.py

./tools/lldbmacros/misc.py differences detected: 98c98 < elif kern.arch.startswith('arm'): --- > elif kern.arch in ['arm', 'arm64'] : 692c692 < if ((kern.arch == "x86_64") or kern.arch.startswith("arm64")) : --- > if ((kern.arch == "x86_64") or (kern.arch == "arm64")) : NO DIFFS in ./tools/lldbmacros/macho.py

./tools/lldbmacros/kasan.py differences detected: 112c112 < print_hexdump(addr, asz, 1) --- > print_hexdump(addr, asz, 0) 116,123d115 < def magic_for_addr(addr, xor): < magic = addr & 0xffff < magic ^= (addr >> 16) & 0xffff < magic ^= (addr >> 32) & 0xffff < magic ^= (addr >> 48) & 0xffff < magic ^= xor < return magic < 155c147,150 < if magic_for_addr(addr, 0x3a65) == unsigned(liveh.magic): --- > live_magic = (addr & 0xffffffff) ^ 0xA110C8ED > free_magic = (addr & 0xffffffff) ^ 0xF23333D > > if live_magic == unsigned(liveh.magic): 164d158 < offset = _addr - addr 169c163 < print "Offset: {} bytes (shadow: 0x{:02x} {}, remaining: {} bytes)".format(offset, _shbyte, _shstr, usz - offset) --- > print "Offset: {} bytes (shadow: 0x{:02x} {})".format(_addr - addr, _shbyte, _shstr) 180c174 < print_hexdump(base, asz, 1) --- > print_hexdump(base, asz, 0) 183c177 < elif magic_for_addr(addr, 0xf233) == unsigned(freeh.magic): --- > elif free_magic == unsigned(freeh.magic): 205,212c199,200 < shaddr = shadow_for_address(addr, shift) < try: < shbyte = get_shadow_byte(shaddr) < except: < print "Unmapped shadow 0x{:x} for address 0x{:x}".format(shaddr, addr) < return < < maxsearch = 8*4096 --- > shbyte = get_shadow_byte(shadow_for_address(addr, shift)) > maxsearch = 4096 * 2 275c263 < print_hexdump(base, total_size, 1) --- > print_hexdump(base, total_size, 0) 278,280d265 < if size < 16: < size = 16 < base -= base % 16 306c291 < elif cmd == 'key' or cmd == 'legend': --- > elif cmd == 'legend': 320,321d304 < else: < print "Unknown subcommand: `{}'".format(cmd) NO DIFFS in ./tools/lldbmacros/usertaskdebugging/interface.py
NO DIFFS in ./tools/lldbmacros/usertaskdebugging/__init__.py
NO DIFFS in ./tools/lldbmacros/usertaskdebugging/gdbserver.py
NO DIFFS in ./tools/lldbmacros/usertaskdebugging/target.py
NO DIFFS in ./tools/lldbmacros/usertaskdebugging/rsprotocol.py

./tools/lldbmacros/usertaskdebugging/userprocess.py differences detected: 13a14,22 > > CPU_SUBTYPE_X86_64_ALL = 3 > CPU_SUBTYPE_X86_64_H = 8 > CPU_SUBTYPE_ARMV8 = 13 > CPU_SUBTYPE_ARM_V7 = 9 > CPU_SUBTYPE_ARM_V7S = 11 > CPU_SUBTYPE_ARM_V7K = 12 > > 15c24 < if cputype == CPU_TYPE_ARM64: --- > if cputype == CPU_TYPE_ARM64: 31c40 < def __init__(self, thr_obj, cputype, cpusubtype, is_kern_64bit): --- > def __init__(self, thr_obj, cputype, cpusubtype, kern_cputype): 36a46 > isKern64Bit = bool(kern_cputype & 0x01000000) 51c61 < if not is_kern_64bit: --- > if not isKern64Bit: 55,57c65,66 < < logging.debug("created thread id 0x%x of type %s, is_kern_64bit 0x%x cputype 0x%x" < % (self.thread_id, self.reg_type, is_kern_64bit, cputype)) --- > logging.debug("created thread id 0x%x of type %s, kern_cputype 0x%x cputype 0x%x" > % (self.thread_id, self.reg_type, kern_cputype, cputype)) 102c111 < dataregisters64bit = True --- > dataregisters64bit = 8 104,105c113,114 < is_kern_64bit = kern.arch in ['x86_64', 'x86_64h', 'arm64' < ] --- > cputype = CPU_TYPE_X86_64 > cpusubtype = CPU_SUBTYPE_X86_64_ALL 107,108d115 < self.cputype = unsigned(self.proc.p_cputype) < self.cpusubtype = unsigned(self.proc.p_cpusubtype) 110c117,126 < super(UserProcess, self).__init__(self.cputype, self.cpusubtype, ptrsize) --- > """ these computations should come out of the macho header i think """ > """ where does kern.arch come from? what's kern.arch == armv8?? """ > if kern.arch in ('arm'): > cputype = CPU_TYPE_ARM > cpusubtype = CPU_SUBTYPE_ARM_V7 > elif kern.arch in ('armv8', 'arm64'): > cputype = CPU_TYPE_ARM64 > cpusubtype = CPU_SUBTYPE_ARMV8 > > super(UserProcess, self).__init__(cputype, cpusubtype, ptrsize) 113c129 < if self.cputype != CPU_TYPE_X86_64 and self.cputype != CPU_TYPE_I386: --- > if cputype != CPU_TYPE_X86_64: 116c132,134 < self.registerset = GetRegisterSetForCPU(self.cputype, self.cpusubtype) --- > self.cputype = unsigned(self.proc.p_cputype) > self.cpusubtype = unsigned(self.proc.p_cpusubtype) > self.registerset = GetRegisterSetForCPU(cputype, cpusubtype) 125c143 < self.threads[unsigned(thval.thread_id)] = UserThreadObject(thval, self.cputype, self.cpusubtype, is_kern_64bit) --- > self.threads[unsigned(thval.thread_id)] = UserThreadObject(thval, self.cputype, self.cpusubtype, cputype) NO DIFFS in ./tools/lldbmacros/routedefines.py
NO DIFFS in ./tools/lldbmacros/.lldbinit
NO DIFFS in ./tools/lldbmacros/pci.py
NO DIFFS in ./tools/lldbmacros/pgtrace.py
NO DIFFS in ./tools/lldbmacros/core/configuration.py
NO DIFFS in ./tools/lldbmacros/core/syntax_checker.py

./tools/lldbmacros/core/cvalue.py differences detected: 419,430d418 < def containerof(obj, target_type, field_name): < """ Type cast an object to another C type from a pointer to a field. < params: < obj - core.value object representing some C construct in lldb < target_type - str : ex 'struct thread' < - lldb.SBType : < field_name - the field name within the target_type obj is a pointer to < """ < addr = int(obj) - getfieldoffset(target_type, field_name) < obj = value(obj.GetSBValue().CreateValueFromExpression(None,'(void *)'+str(addr))) < return cast(obj, target_type + " *") < NO DIFFS in ./tools/lldbmacros/core/standard.py

./tools/lldbmacros/core/kernelcore.py differences detected: 9d8 < from utils import * 226,249d224 < def IteratePriorityQueueEntry(root, element_type, field_name): < """ iterate over a priority queue as defined with struct priority_queue from osfmk/kern/priority_queue.h < root - value : Value object for the priority queue < element_type - str : Type of the link element < field_name - str : Name of the field in link element's structure < returns: < A generator does not return. It is used for iterating < value : an object thats of type (element_type). Always a pointer object < """ < def _make_pqe(addr): < return value(root.GetSBValue().CreateValueFromExpression(None,'(struct priority_queue_entry *)'+str(addr))) < < queue = [unsigned(root.pq_root_packed) & ~3] < < while len(queue): < elt = _make_pqe(queue.pop()) < < while elt: < yield containerof(elt, element_type, field_name) < addr = unsigned(elt.child) < if addr: queue.append(addr) < elt = elt.next < < 427,434d401 < < def PhysToKVARM64(self, addr): < ptov_table = self.GetGlobalVariable('ptov_table') < for i in range(0, self.GetGlobalVariable('ptov_index')): < if (addr >= long(unsigned(ptov_table[i].pa))) and (addr < (long(unsigned(ptov_table[i].pa)) + long(unsigned(ptov_table[i].len)))): < return (addr - long(unsigned(ptov_table[i].pa)) + long(unsigned(ptov_table[i].va))) < return (addr - unsigned(self.GetGlobalVariable("gPhysBase")) + unsigned(self.GetGlobalVariable("gVirtBase"))) < 438,439d404 < elif self.arch.startswith('arm64'): < return self.PhysToKVARM64(addr) 586c551 < if arch == 'x86_64' or arch.startswith('arm64'): --- > if arch in ('x86_64', 'arm64'): 596c561 < elif self.arch.startswith('arm64'): --- > elif self.arch == 'arm64': NO DIFFS in ./tools/lldbmacros/core/__init__.py
NO DIFFS in ./tools/lldbmacros/core/operating_system.py
NO DIFFS in ./tools/lldbmacros/core/caching.py

./tools/lldbmacros/core/xnu_lldb_init.py differences detected: 106,123d105 < < load_kexts = True < if "XNU_LLDBMACROS_NOBUILTINKEXTS" in os.environ and len(os.environ['XNU_LLDBMACROS_NOBUILTINKEXTS']) > 0: < load_kexts = False < builtinkexts_path = os.path.join(os.path.dirname(self_path), "lldbmacros", "builtinkexts") < if os.access(builtinkexts_path, os.F_OK): < kexts = os.listdir(builtinkexts_path) < if len(kexts) > 0: < print "\nBuiltin kexts: %s\n" % kexts < if load_kexts == False: < print "XNU_LLDBMACROS_NOBUILTINKEXTS is set, not loading:\n" < for kextdir in kexts: < script = os.path.join(builtinkexts_path, kextdir, kextdir.split('.')[-1] + ".py") < import_kext_cmd = "command script import \"%s\"" % script < print "%s" % import_kext_cmd < if load_kexts: < debugger.HandleCommand(import_kext_cmd) < NO DIFFS in ./tools/lldbmacros/core/lazytarget.py

./tools/lldbmacros/xnudefines.py differences detected: 91c91 < 0x0010: 'LOCKED', --- > 0x0010: 'USERWAIT', 99c99 < 0x1000: 'MERGE_QOS', --- > 0x1000: 'STOLENDROP', 103c103 < kqrequest_state_strings = { 0x01: 'WORKLOOP', --- > kqrequest_state_strings = { 0x01: 'PROCESSING', 106,108c106,109 < 0x08: 'THOVERCOMMIT', < 0x10: 'R2K_ARMED', < 0x20: 'ALLOC_TURNSTILE' } --- > 0x08: 'BOUND', > 0x20: 'THOVERCOMMIT', > 0x40: 'DRAIN' } > 120c121 < KQWQ_NBUCKETS = 8 --- > KQWQ_NBUCKETS = 22 184,185c185 < 'NAMED_MEM', 'IOKIT_CON', 'IOKIT_OBJ', 'UPL', 'MEM_OBJ_CONTROL', 'AU_SESSIONPORT', 'FILEPORT', 'LABELH', 'TASK_RESUME', 'VOUCHER', 'VOUCHER_ATTR_CONTROL', 'WORK_INTERVAL', < 'UX_HANDLER'] --- > 'NAMED_MEM', 'IOKIT_CON', 'IOKIT_OBJ', 'UPL', 'MEM_OBJ_CONTROL', 'AU_SESSIONPORT', 'FILEPORT', 'LABELH', 'TASK_RESUME', 'VOUCHER', 'VOUCHER_ATTR_CONTROL', 'IKOT_WORK_INTERVAL'] 199,201d198 < FSHIFT = 11 < FSCALE = 1 << FSHIFT
./tools/lldbmacros/mbufs.py differences detected: 783,791d782 < < # Macro: mbuf_wdlog < @lldb_command('mbuf_wdlog') < def McacheShowCache(cmd_args=None): < """Display the watchdog log < """ < lldb_run_command('settings set max-string-summary-length 4096') < print('%s' % lldb_run_command('p/s mbwdog_logging').replace("\\n","\n")) < # EndMacro: mbuf_wdlog NO DIFFS in ./tools/lldbmacros/net.py

./tools/lldbmacros/ioreg.py differences detected: 3d2 < from kdp import * NO DIFFS in ./tools/lldbmacros/usertaskgdbserver.py
NO DIFFS in ./tools/lldbmacros/bank.py

./tools/lldbmacros/waitq.py differences detected: 835,838d834 < if not wqset.wqset_q.waitq_prepost: < # If the "prepost_id" is non-zero, but the 'waitq_prepost' bit is < # *not* set, then this waitq actually has a prepost hook! < return [ "{0: <#18x}:{1: <18s}".format(wqset.wqset_prepost_id, "") ]
./tools/lldbmacros/README.md differences detected: 202c202 < The lldb debugger provides ways for user to customize how a particular type of object be described when printed. These are very useful in displaying complex and large structures --- > The lldb debugger provides ways for user to customize how a particular type of object be decsribed when printed. These are very useful in displaying complex and large structures NO DIFFS in ./tools/lldbmacros/kauth.py

./tools/lldbmacros/utils.py differences detected: 143,152c143 < def ContainerOf(obj, target_type, field_name): < """ Type cast an object to another C type from a pointer to a field. < params: < obj - core.value object representing some C construct in lldb < target_type - str : ex 'struct thread' < - lldb.SBType : < field_name - the field name within the target_type obj is a pointer to < """ < return containerof(obj, target_type, field_name) < --- > 473,476d463 < < def Ones(x): < return (1 << x)-1 < NO DIFFS in ./tools/lldbmacros/atm.py
NO DIFFS in ./tools/lldbmacros/ntstat.py
NO DIFFS in ./tools/lldbmacros/mbufdefines.py

./tools/lldbmacros/kcdata.py differences detected: 33c33 < 'KCDATA_TYPE_TYPEDEFINITION': 0x12, --- > 'KCDATA_TYPE_TYPEDEFINTION': 0x12, 35c35 < 'KCDATA_TYPE_CONTAINER_END': 0x14, --- > 'KCDATA_TYPE_CONTIANER_END': 0x14, 76a77,79 > 'STACKSHOT_KCTYPE_TASK_DELTA_SNAPSHOT': 0x940, > 'STACKSHOT_KCTYPE_THREAD_DELTA_SNAPSHOT': 0x941, > 92d94 < 'STACKSHOT_KCTYPE_THREAD_POLICY_VERSION': 0x922, 94,100d95 < 'STACKSHOT_KCTYPE_USER_STACKTOP' : 0x924, < 'STACKSHOT_KCTYPE_ASID' : 0x925, < 'STACKSHOT_KCTYPE_PAGE_TABLES' : 0x926, < 'STACKSHOT_KCTYPE_SYS_SHAREDCACHE_LAYOUT' : 0x927, < < 'STACKSHOT_KCTYPE_TASK_DELTA_SNAPSHOT': 0x940, < 'STACKSHOT_KCTYPE_THREAD_DELTA_SNAPSHOT': 0x941, 101a97 > 'STACKSHOT_KCTYPE_THREAD_POLICY_VERSION': 0x922, 207c203 < def FromBasicCtype(st_name, st_type, st_offset=0, legacy_size=None): --- > def FromBasicCtype(st_name, st_type, st_offset=0): 213,214d208 < if legacy_size: < retval.legacy_size = legacy_size 298c292 < def __init__(self, t_type_id, t_elements=[], t_name='anon', custom_repr=None, legacy_size=None, merge=False, naked=False): --- > def __init__(self, t_type_id, t_elements=[], t_name='anon', custom_repr=None, legacy_size=None, merge=False): 304,305c298 < if legacy_size: < self.legacy_size = legacy_size --- > self.legacy_size = legacy_size 307d299 < self.naked = naked 328,329c320 < retval = KCTypeDescription(t_type_id, other.elements, t_name, other.custom_JsonRepr, < legacy_size=getattr(other, 'legacy_size', None)) --- > retval = KCTypeDescription(t_type_id, other.elements, t_name, other.custom_JsonRepr) 340c331 < elif hasattr(self, 'legacy_size') and len(base_data) == self.legacy_size + ((-self.legacy_size) & 0xf): --- > elif self.legacy_size and len(base_data) == self.legacy_size + ((-self.legacy_size) & 0xf): 344,347c335 < if self.naked: < o = ", ".join([e.GetJsonRepr(base_data) for e in self.elements if not e.ShouldSkip(base_data)]) < else: < o = ", ".join(['"%s": %s' % (e.GetName(), e.GetJsonRepr(base_data)) for e in self.elements if not e.ShouldSkip(base_data)]) --- > o = ", ".join(['"%s": %s' % (e.GetName(), e.GetJsonRepr(base_data)) for e in self.elements if not e.ShouldSkip(base_data)]) 438c426 < return self.i_type == GetTypeForName('KCDATA_TYPE_CONTAINER_END') --- > return self.i_type == GetTypeForName('KCDATA_TYPE_CONTIANER_END') 480c468 < elif self.i_type == GetTypeForName('KCDATA_TYPE_CONTAINER_END'): --- > elif self.i_type == GetTypeForName('KCDATA_TYPE_CONTIANER_END'): 502c490 < elif self.i_type == GetTypeForName('KCDATA_TYPE_TYPEDEFINITION'): --- > elif self.i_type == GetTypeForName('KCDATA_TYPE_TYPEDEFINTION'): 511a500 > #print str(e) 513a503 > #print str(type_desc) 524c514 < e_s = KNOWN_TYPES_COLLECTION[e_t].legacy_size --- > e_s = KNOWN_TYPES_COLLECTION[e_t].sizeof() 526c516 < raise Exception("array too small for its count") --- > raise Excpetion, "array too small for its count" 837,838d826 < KCSubTypeElement.FromBasicCtype('tds_requested_policy', KCSUBTYPE_TYPE.KC_ST_UINT64, 48), < KCSubTypeElement.FromBasicCtype('tds_effective_policy', KCSUBTYPE_TYPE.KC_ST_UINT64, 56), 840,841c828 < 'thread_delta_snapshot', < legacy_size = 48 --- > 'thread_delta_snapshot' 865,870c852 < KNOWN_TYPES_COLLECTION[GetTypeForName('STACKSHOT_KCTYPE_SYS_SHAREDCACHE_LAYOUT')] = KCTypeDescription(GetTypeForName('STACKSHOT_KCTYPE_SYS_SHAREDCACHE_LAYOUT'), ( < KCSubTypeElement('imageLoadAddress', KCSUBTYPE_TYPE.KC_ST_UINT64, 8, 0, 0), < KCSubTypeElement('imageUUID', KCSUBTYPE_TYPE.KC_ST_UINT8, KCSubTypeElement.GetSizeForArray(16, 1), 8, 1) < ), < 'system_shared_cache_layout' < ) --- > 876,877c858 < 'dyld_load_info', < legacy_size = 24 --- > 'dyld_load_info' 884,885c865 < 'dyld_load_info', < legacy_size = 20 --- > 'dyld_load_info' 905c885 < KNOWN_TYPES_COLLECTION[0x907] = KCSubTypeElement.FromBasicCtype('donating_pids', KCSUBTYPE_TYPE.KC_ST_INT32, legacy_size=4) --- > KNOWN_TYPES_COLLECTION[0x907] = KCSubTypeElement.FromBasicCtype('donating_pids', KCSUBTYPE_TYPE.KC_ST_INT32) 913,914c893 < 'kernel_stack_frames', < legacy_size = 8 --- > 'kernel_stack_frames' 940,941c919 < 'kernel_stack_frames', < legacy_size = 16 --- > 'kernel_stack_frames' 1030,1041d1007 < def set_type(name, *args): < typ = GetTypeForName(name) < KNOWN_TYPES_COLLECTION[typ] = KCTypeDescription(GetTypeForName(typ), *args) < < < set_type('STACKSHOT_KCTYPE_USER_STACKTOP', < ( < KCSubTypeElement.FromBasicCtype('sp', KCSUBTYPE_TYPE.KC_ST_UINT64, 0), < KCSubTypeElement('stack_contents', KCSUBTYPE_TYPE.KC_ST_UINT8, KCSubTypeElement.GetSizeForArray(8, 1), 8, 1), < ), < 'user_stacktop') < 1123d1088 < KCSubTypeElement.FromBasicCtype('runnable_usec', KCSUBTYPE_TYPE.KC_ST_UINT64, 16), 1179,1188d1143 < KNOWN_TYPES_COLLECTION[GetTypeForName('STACKSHOT_KCTYPE_ASID')] = ( < KCSubTypeElement('ts_asid', KCSUBTYPE_TYPE.KC_ST_UINT32, 4, 0, 0)) < < KNOWN_TYPES_COLLECTION[GetTypeForName('STACKSHOT_KCTYPE_PAGE_TABLES')] = KCTypeDescription(GetTypeForName('STACKSHOT_KCTYPE_PAGE_TABLES'), ( < KCSubTypeElement(None, KCSUBTYPE_TYPE.KC_ST_UINT64, 8, 0, 0, KCSubTypeElement._get_naked_element_value), ), < 'ts_pagetable', < merge=True, < naked=True < ) < 1233d1187 < TH_WAIT_REPORT = 0x40 1247,1248d1200 < if (s & TH_WAIT_REPORT): < retval.append("TH_WAIT_REPORT") 1274d1225 < kThreadWaitOnProcess = 0x11 1333c1284 < s += "semaphore port %x " % context --- > s += "semaphore port %x" % context 1346c1297 < s += "unfair lock %x owned by thread %d" % (context, owner) --- > s += "unfair lock %x owned by pid %d" % (context, owner) 1352c1303 < s += " owned by thread %d" % owner --- > s += " owned by pid %d" % owner 1372,1374d1322 < elif type == kThreadWaitOnProcess: < s += "waitpid, for pid %d" % owner < 1413,1415c1361,1363 < shared_cache_base_addr, "S" ] < < dsc_layout = ss.get('system_shared_cache_layout') --- > shared_cache_base_addr, > "S" > ] 1427c1375 < dsc_libs.append([_uuid, _addr, "C"]) --- > dsc_libs.append([_uuid, _addr, "P"]) 1432,1437d1379 < elif dsc_layout: < print "Found in memory system shared cache layout with {} images".format(len(dsc_layout)) < slide = ss.get('shared_cache_dyld_load_info')['imageLoadAddress'] < < for image in dsc_layout: < dsc_libs.append([format_uuid(image['imageUUID']), image['imageLoadAddress'] + slide, "C"]) 1504,1512d1445 < if 'ts_asid' in piddata: < tsnap["asid"] = piddata["ts_asid"] < < if 'ts_pagetable' in piddata: < pagetables = [] < for tte in piddata["ts_pagetable"]: < pagetables.append(tte) < tsnap["pageTables"] = pagetables < 1554,1558d1486 < < if "user_stacktop" in thdata: < (address,) = struct.unpack("NO DIFFS in ./tools/lldbmacros/ktrace.py
NO DIFFS in ./tools/lldbmacros/ipcimportancedetail.py

./tools/lldbmacros/kevent.py differences detected: 102c102 < return kern.GetValueFromAddress(int(kn.kn_kq_packed), 'struct kqueue *') --- > return kern.GetValueFromAddress(kn.kn_kq_packed + kern.VM_MIN_KERNEL_AND_KEXT_ADDRESS, 'struct kqueue *') 151c151 < @header('{:<20s} {:<20s} {:<5s} {:<5s} {:<5s} {:s}'.format('kqrequest', 'thread', 'qos', 'ovr_qos', 'sa_qos', 'state')) --- > @header('{:<20s} {:<20s} {:<5s} {:<5s} {:<5s} {:<5s} {:s}'.format('kqrequest', 'thread', 'qos', 'ovr_qos', 'w_qos', 'sa_qos', 'state')) 159c159 < fmt = '{kqrp: <#020x} {kqr.kqr_thread: <#020x} {qos: <5s} {ovr_qos: <5s} {sa_qos: <5s} {state_str: fmt = '{kqrp: <#020x} {kqr.kqr_bound.kqrb_thread: <#020x} {qos: <5s} {ovr_qos: <5s} {w_qos: <5s} {sa_qos: <5s} {state_str: w_qos=xnudefines.thread_qos_short_strings[int(kqr.kqr_dsync_waiters_qos)], 282c283 < servicer=int(kqwl.kqwl_request.kqr_thread), --- > servicer=int(kqwl.kqwl_request.kqr_bound.kqrb_thread), NO DIFFS in ./tools/lldbmacros/netdefines.py

./tools/lldbmacros/process.py differences detected: 173a174,175 > C - AST_CHUD > C - AST_CHUD_URGENT 182,183d183 < R - AST_REBALANCE < N - AST_UNQUIESCE 188c188 < 0x40:'L', 0x80:'B', 0x100:'K', 0x200:'M', --- > 0x40:'L', 0x80:'B', 0x100:'K', 0x200:'M', 0x400:'C', 0x800:'C', 190c190 < 0x20000: 'D', 0x40000: 'I', 0x80000: 'E', 0x100000: 'R', 0x200000: 'N'} --- > 0x20000: 'D', 0x40000: 'I', 0x80000: 'E'} 454,461d453 < if verbose: < out_string += "\n\t cpu_time_effective[THREAD_QOS_DEFAULT] {0: @lldb_type_summary(['thread_group_t', 'thread_group *']) 1505c1497 < def GetLedgerEntrySummary(ledger_template, ledger, i, show_footprint_interval_max=False): --- > def GetLedgerEntrySummary(ledger_template, ledger, i): 1522,1524c1514 < if (show_footprint_interval_max): < out_str += "{:12d} ".format(ledger._le._le_max.le_interval_max) < out_str += "{:14d} ".format(ledger._le._le_max.le_lifetime_max) --- > out_str += "{:9d} {:5d} ".format(ledger._le.le_maxtracking.le_peaks[0].le_max, now - unsigned(ledger._le.le_maxtracking.le_peaks[0].le_time)) 1526,1528c1516,1521 < if (show_footprint_interval_max): < out_str += " - " < out_str += " - " --- > out_str += " - -" > > if (ledger.le_flags & lf_tracking_max): > out_str += "{:12d} ".format(ledger._le.le_maxtracking.le_lifetime_max) > else: > out_str += " -" 1573c1566,1570 < def GetTaskLedgers(task_val, show_footprint_interval_max=False): --- > @header("{0: <15s} {1: >16s} {2: <2s} {3: >15s} {4: >9s} {5: >6s} {6: >12s} {7: >11s} \ > {8: >7s} {9: >13s} {10: <15s} {11: <8s} {12: <9s} {13: <6s} {14: >6s}".format( > "task [thread]", "entry", "#", "balance", "peakA", "(age)", "lifemax", "credit", > "debit", "limit", "refill period", "lim pct", "warn pct", "over?", "flags")) > def GetTaskLedgers(task_val): 1588c1585 < out_str += GetLedgerEntrySummary(kern.globals.task_ledger_template, task_ledgerp.l_entries[i], i, show_footprint_interval_max) --- > out_str += GetLedgerEntrySummary(kern.globals.task_ledger_template, task_ledgerp.l_entries[i], i) 1599c1596 < @lldb_command('showtaskledgers', 'F:I') --- > @lldb_command('showtaskledgers', 'F:') 1602,1606c1599,1600 < or : showtaskledgers [ -I ] [ -F ] < options: < -I: show footprint interval max (DEV/DEBUG only) < -F: specify task via name instead of address < - > Usage: showtaskledgers
> or : showtaskledgers -F > 1617,1619d1610 < show_footprint_interval_max = False < if "-I" in cmd_options: < show_footprint_interval_max = True 1623,1631c1614,1615 < if (show_footprint_interval_max): < print "{0: <15s} {1: >16s} {2: <2s} {3: >15s} {4: >12s} {5: >14s} {6: >12s} {7: >12s} {8: >12s} {9: <15s} {10: <8s} {11: <9s} {12: <6s} {13: >6s}".format( < "task [thread]", "entry", "#", "balance", "intrvl_max", "lifetime_max", "credit", < "debit", "limit", "refill period", "lim pct", "warn pct", "over?", "flags") < else: < print "{0: <15s} {1: >16s} {2: <2s} {3: >15s} {4: >14s} {5: >12s} {6: >12s} {7: >12s} {8: <15s} {9: <8s} {10: <9s} {11: <6s} {12: >6s}".format( < "task [thread]", "entry", "#", "balance", "lifetime_max", "credit", < "debit", "limit", "refill period", "lim pct", "warn pct", "over?", "flags") < print GetTaskLedgers(tval, show_footprint_interval_max) --- > print GetTaskLedgers.header > print GetTaskLedgers(tval) 1638c1622 < def ShowAllTaskLedgers(cmd_args=None, cmd_options={}): --- > def ShowAllTaskLedgers(cmd_args=None): 1644c1628 < ShowTaskLedgers([task_val], cmd_options=cmd_options) --- > ShowTaskLedgers([task_val])
./tools/lldbmacros/scheduler.py differences detected: 150d149 < processor_array = kern.globals.processor_array 154,156c153,161 < cpu_bitmap = int(pset.cpu_bitmask) < for cpuid in IterateBitmap(cpu_bitmap): < processor = processor_array[cpuid] --- > for processor in ParanoidIterateLinkageChain(pset.active_queue, "processor_t", "processor_queue"): > if unsigned(processor.last_dispatch) > cur_abstime: > cur_abstime = unsigned(processor.last_dispatch) > > for processor in ParanoidIterateLinkageChain(pset.idle_queue, "processor_t", "processor_queue"): > if unsigned(processor.last_dispatch) > cur_abstime: > cur_abstime = unsigned(processor.last_dispatch) > > for processor in ParanoidIterateLinkageChain(pset.idle_secondary_queue, "processor_t", "processor_queue"): 375d379 < share_df_count = run_buckets[GetEnumValue('sched_bucket_t::TH_BUCKET_SHARE_DF')] 382d385 < share_df_shift = sched_pri_shifts[GetEnumValue('sched_bucket_t::TH_BUCKET_SHARE_DF')] 388c391 < print "FG Timeshare threads: {:d} DF Timeshare threads: {:d} UT Timeshare threads: {:d} BG Timeshare threads: {:d}\n".format(share_fg_count, share_df_count, share_ut_count, share_bg_count) --- > print "FG Timeshare threads: {:d} UT Timeshare threads: {:d} BG Timeshare threads: {:d}\n".format(share_fg_count, share_ut_count, share_bg_count) 390c393 < print "Fixed shift: {g.sched_fixed_shift:d} FG shift: {:d} DF shift: {:d} UT shift: {:d} BG shift: {:d}\n".format(share_fg_shift, share_df_shift, share_ut_shift, share_bg_shift, g=kern.globals) --- > print "Fixed shift: {g.sched_fixed_shift:d} FG shift: {:d} UT shift: {:d} BG shift: {:d}\n".format(share_fg_shift, share_ut_shift, share_bg_shift, g=kern.globals) 572d574 < share_df_count = run_buckets[GetEnumValue('sched_bucket_t::TH_BUCKET_SHARE_DF')] 577c579 < print "FG Timeshare threads: {:d} DF Timeshare threads: {:d} UT Timeshare threads: {:d} BG Timeshare threads: {:d}\n".format(share_fg_count, share_df_count, share_ut_count, share_bg_count) --- > print "FG Timeshare threads: {:d} UT Timeshare threads: {:d} BG Timeshare threads: {:d}\n".format(share_fg_count, share_ut_count, share_bg_count) 621,622d622 < < processor_array = kern.globals.processor_array 625,638c625,635 < active_bitmap = int(pset.cpu_state_map[5]) | int(pset.cpu_state_map[6]) < for cpuid in IterateBitmap(active_bitmap): < processor = processor_array[cpuid] < if processor != 0: < print " " + GetProcessorSummary(processor) < ShowActiveThread(processor) < ShowNextThread(processor) < < if show_priority_runq: < runq = processor.runq < ShowRunQSummary(runq) < if show_grrr: < grrr_runq = processor.grrr_runq < ShowGrrrSummary(grrr_runq) --- > for processor in ParanoidIterateLinkageChain(pset.active_queue, "processor_t", "processor_queue"): > print " " + GetProcessorSummary(processor) > ShowActiveThread(processor) > ShowNextThread(processor) > > if show_priority_runq: > runq = processor.runq > ShowRunQSummary(runq) > if show_grrr: > grrr_runq = processor.grrr_runq > ShowGrrrSummary(grrr_runq) 643,649c640,643 < idle_bitmap = int(pset.cpu_state_map[4]) & int(pset.primary_map) < for cpuid in IterateBitmap(idle_bitmap): < processor = processor_array[cpuid] < if processor != 0: < print " " + GetProcessorSummary(processor) < ShowActiveThread(processor) < ShowNextThread(processor) --- > for processor in ParanoidIterateLinkageChain(pset.idle_queue, "processor_t", "processor_queue"): > print " " + GetProcessorSummary(processor) > ShowActiveThread(processor) > ShowNextThread(processor) 651,652c645,646 < if show_priority_runq: < ShowRunQSummary(processor.runq) --- > if show_priority_runq: > ShowRunQSummary(processor.runq) 657,680c651,654 < idle_bitmap = int(pset.cpu_state_map[4]) & ~(int(pset.primary_map)) < for cpuid in IterateBitmap(idle_bitmap): < processor = processor_array[cpuid] < if processor != 0: < print " " + GetProcessorSummary(processor) < ShowActiveThread(processor) < ShowNextThread(processor) < < if show_priority_runq: < print ShowRunQSummary(processor.runq) < print " \n" < < < print "Other Processors:\n" < other_bitmap = 0 < for i in range(0, 4): < other_bitmap |= int(pset.cpu_state_map[i]) < other_bitmap &= int(pset.cpu_bitmask) < for cpuid in IterateBitmap(other_bitmap): < processor = processor_array[cpuid] < if processor != 0: < print " " + GetProcessorSummary(processor) < ShowActiveThread(processor) < ShowNextThread(processor) --- > for processor in ParanoidIterateLinkageChain(pset.idle_secondary_queue, "processor_t", "processor_queue"): > print " " + GetProcessorSummary(processor) > ShowActiveThread(processor) > ShowNextThread(processor) 682,683c656,657 < if show_priority_runq: < ShowRunQSummary(processor.runq) --- > if show_priority_runq: > print ShowRunQSummary(processor.runq) 820,845d793 < def bit_first(bitmap): < return bitmap.bit_length() - 1 < < def lsb_first(bitmap): < bitmap = bitmap & -bitmap < return bit_first(bitmap) < < def IterateBitmap(bitmap): < """ Iterate over a bitmap, returning the index of set bits starting from 0 < < params: < bitmap - value : bitmap < returns: < A generator does not return. It is used for iterating. < value : index of a set bit < example usage: < for cpuid in IterateBitmap(running_bitmap): < print processor_array[cpuid] < """ < i = lsb_first(bitmap) < while (i >= 0): < yield i < bitmap = bitmap & ~((1 << (i + 1)) - 1) < i = lsb_first(bitmap) < < NO DIFFS in ./tools/lldbmacros/structanalyze.py

./tools/lldbmacros/pmap.py differences detected: 121c121 < kdp_pkt_size = GetType('kdp_writephysmem64_req_t').GetByteSize() + (bits / 8) --- > kdp_pkt_size = GetType('kdp_writephysmem64_req_t').GetByteSize() 923,926d922 < < PVH_HIGH_FLAGS_ARM64 = (1 << 62) | (1 << 61) | (1 << 60) | (1 << 59) < PVH_HIGH_FLAGS_ARM32 = (1 << 31) < 935c931 < page_size = kern.globals.page_size --- > page_size = kern.globals.arm_hardware_page_size 939,947d934 < print "PVH raw value: ({:#x})".format(pvh) < if kern.arch.startswith('arm64'): < iommu_flag = 0x4 < iommu_table_flag = 1 << 63 < pvh = pvh | PVH_HIGH_FLAGS_ARM64 < else: < iommu_flag = 0 < iommu_table_flag = 0 < pvh = pvh | PVH_HIGH_FLAGS_ARM32 956d942 < pte_str = '' 958,965c944 < if ptep & iommu_flag: < ptep = ptep & ~iommu_flag < if ptep & iommu_table_flag: < pte_str = ' (IOMMU table), entry' < else: < pte_str = ' (IOMMU state), descriptor' < ptep = ptep | iommu_table_flag < print "PTE {:#x}{:s}: {:#x}".format(ptep, pte_str, dereference(kern.GetValueFromAddress(ptep, 'pt_entry_t *'))) --- > print "PTE {:#x}: {:#x}".format(ptep, dereference(kern.GetValueFromAddress(ptep, 'pt_entry_t *'))) 978,984d956 < if ptep & iommu_flag: < ptep = ptep & ~iommu_flag < if ptep & iommu_table_flag: < pve_str = ' (IOMMU table), entry' < else: < pve_str = ' (IOMMU state), descriptor' < ptep = ptep | iommu_table_flag 998,1028d969 < @lldb_command('kvtophys') < def KVToPhys(cmd_args=None): < """ Translate a kernel virtual address to the corresponding physical address. < Assumes the virtual address falls within the kernel static region. < Syntax: (lldb) kvtophys < """ < if cmd_args == None or len(cmd_args) < 1: < raise ArgumentError("Too few arguments to kvtophys.") < if kern.arch.startswith('arm'): < print "{:#x}".format(KVToPhysARM(long(unsigned(kern.GetValueFromAddress(cmd_args[0], 'unsigned long'))))) < elif kern.arch == 'x86_64': < print "{:#x}".format(long(unsigned(kern.GetValueFromAddress(cmd_args[0], 'unsigned long'))) - unsigned(kern.globals.physmap_base)) < < @lldb_command('phystokv') < def PhysToKV(cmd_args=None): < """ Translate a physical address to the corresponding static kernel virtual address. < Assumes the physical address corresponds to managed DRAM. < Syntax: (lldb) phystokv < """ < if cmd_args == None or len(cmd_args) < 1: < raise ArgumentError("Too few arguments to phystokv.") < print "{:#x}".format(kern.PhysToKernelVirt(long(unsigned(kern.GetValueFromAddress(cmd_args[0], 'unsigned long'))))) < < def KVToPhysARM(addr): < if kern.arch.startswith('arm64'): < ptov_table = kern.globals.ptov_table < for i in range(0, kern.globals.ptov_index): < if (addr >= long(unsigned(ptov_table[i].va))) and (addr < (long(unsigned(ptov_table[i].va)) + long(unsigned(ptov_table[i].len)))): < return (addr - long(unsigned(ptov_table[i].va)) + long(unsigned(ptov_table[i].pa))) < return (addr - unsigned(kern.globals.gVirtBase) + unsigned(kern.globals.gPhysBase)) < 1034,1039c975,976 < pn = (KVToPhysARM(pte) - unsigned(kern.globals.vm_first_phys)) / page_size < pvh = unsigned(kern.globals.pv_head_table[pn]) < if kern.arch.startswith('arm64'): < pvh = pvh | PVH_HIGH_FLAGS_ARM64 < else: < pvh = pvh | PVH_HIGH_FLAGS_ARM32 --- > pn = (pte - unsigned(kern.globals.gVirtBase) + unsigned(kern.globals.gPhysBase) - unsigned(kern.globals.vm_first_phys)) / page_size > pvh = kern.globals.pv_head_table[pn] 1041c978 < if pvh_type != 0x3: --- > if pvh_type != 0x3 and pvh_type != 0x0: 1203c1140 < """ Checks an ARM physical-to-virtual mapping list for consistency errors. --- > """ Checks an ARM physical-to-virtual mapping list for consistency error. 1213c1150 < page_size = kern.globals.page_size --- > page_size = kern.globals.arm_hardware_page_size 1219d1155 < pvh_set_bits = PVH_HIGH_FLAGS_ARM64 1224d1159 < pvh_set_bits = PVH_HIGH_FLAGS_ARM32 1228c1163 < pvh = unsigned(kern.globals.pv_head_table[pn]) | pvh_set_bits --- > pvh = unsigned(kern.globals.pv_head_table[pn]) 1275c1210 < raise ArgumentError("Too few arguments to pv_check.") --- > raise ArgumentError("Too few arguments to showallmappings.") NO DIFFS in ./tools/lldbmacros/kdp.py

./tools/reindent.sh differences detected: 18c18 < find -x . \! \( \( -name BUILD -o -name EXTERNAL_HEADERS -o -name zlib -o -name .svn -o -name .git -o -name cscope.\* -o -name \*~ \) -prune \) -type f \( -name \*.c -o -name \*.cpp \) -print0 | \ --- > find -x . \! \( \( -name BUILD -o -name EXTERNAL_HEADERS -o -name libMicro -o -name zlib -o -name .svn -o -name .git -o -name cscope.\* -o -name \*~ \) -prune \) -type f \( -name \*.c -o -name \*.cpp \) -print0 | \ NO DIFFS in ./tools/cred_dump_creds.c
NO DIFFS in ./tools/cred_dump_backtraces.c
NO DIFFS in ./tools/lockstat/Makefile
NO DIFFS in ./tools/lockstat/lockstat.c
NO DIFFS in ./libsa/Makefile
NO DIFFS in ./libsa/lastkerneldataconst.c
NO DIFFS in ./libsa/lastkernelconstructor.c

./libsa/bootstrap.cpp differences detected: 31a32,35 > > #if CONFIG_EMBEDDED > extern uuid_t kernelcache_uuid; > #endif 103d106 < "com.apple.kpi.kasan", 245a249 > #if CONFIG_EMBEDDED 246a251 > #endif 371a377 > #if CONFIG_EMBEDDED 375,383c381,387 < if (kernelcacheUUID) { < if (kernelcacheUUID->getLength() != sizeof(kernelcache_uuid)) { < panic("kernelcacheUUID length is %d, expected %lu", kernelcacheUUID->getLength(), < sizeof(kernelcache_uuid)); < } else { < kernelcache_uuid_valid = TRUE; < memcpy((void *)&kernelcache_uuid, (const void *)kernelcacheUUID->getBytesNoCopy(), kernelcacheUUID->getLength()); < uuid_unparse_upper(kernelcache_uuid, kernelcache_uuid_string); < } --- > if (!kernelcacheUUID) { > bzero(&kernelcache_uuid, sizeof(kernelcache_uuid)); > } else if (kernelcacheUUID->getLength() != sizeof(kernelcache_uuid)) { > panic("kernelcacheUUID length is %d, expected %lu", kernelcacheUUID->getLength(), > sizeof(kernelcache_uuid)); > } else { > memcpy((void *)&kernelcache_uuid, (void *)kernelcacheUUID->getBytesNoCopy(), kernelcacheUUID->getLength()); 384a389 > #endif /* CONFIG_EMBEDDED */ 452c457 < vm_offset_t data = ml_static_slide(addressNum->unsigned64BitValue()); --- > vm_offset_t data = (vm_offset_t) ((addressNum->unsigned64BitValue()) + vm_kernel_slide); 479,480c484,485 < const kaslrPackedOffsets * myOffsets = NULL; < myOffsets = (const kaslrPackedOffsets *) kaslrOffsets->getBytesNoCopy(); --- > kaslrPackedOffsets * myOffsets = NULL; > myOffsets = (kaslrPackedOffsets *) kaslrOffsets->getBytesNoCopy(); 491c496 < addrToSlideSegIndex = __whereIsAddr(ml_static_slide((vm_offset_t)(*slideAddr)), &plk_segSizes[0], &plk_segAddrs[0], PLK_SEGMENTS ); --- > addrToSlideSegIndex = __whereIsAddr( (vm_offset_t)(*slideAddr + vm_kernel_slide), &plk_segSizes[0], &plk_segAddrs[0], PLK_SEGMENTS ); 503c508 < *slideAddr = ml_static_slide(*slideAddr); --- > *(slideAddr) += vm_kernel_slide; NO DIFFS in ./libsa/conf/Makefile.x86_64
NO DIFFS in ./libsa/conf/files.arm64
NO DIFFS in ./libsa/conf/Makefile.template
NO DIFFS in ./libsa/conf/Makefile
NO DIFFS in ./libsa/conf/Makefile.arm64
NO DIFFS in ./libsa/conf/Makefile.arm
NO DIFFS in ./libsa/conf/files.x86_64
NO DIFFS in ./libsa/conf/files
NO DIFFS in ./libsa/conf/copyright
NO DIFFS in ./libsa/conf/files.arm
NO DIFFS in ./APPLE_LICENSE

./config/BSDKernel.exports differences detected: 477d476 < _proc_is64bit_data
./config/newvers.pl differences detected: 17d16 < # ###KERNEL_BUILD_CONFIG### development 60,61d58 < my $BUILD_CONFIG = "unknown"; < $BUILD_CONFIG = $ENV{'CURRENT_KERNEL_CONFIG_LC'} if defined($ENV{'CURRENT_KERNEL_CONFIG_LC'}); 172d168 < $count += $data =~ s/###KERNEL_BUILD_CONFIG###/$BUILD_CONFIG/g; 190d185 < print "newvers.pl: ###KERNEL_BUILD_CONFIG### = $BUILD_CONFIG\n";
./config/Libkern.exports differences detected: 2d1 < _img4_interface_register 48,50d46 < _SHA256_Final < _SHA256_Init < _SHA256_Update 54,56d49 < _SHA512_Final < _SHA512_Init < _SHA512_Update 658d650 < _kmod_info:_invalid_kmod_info 713,718d704 < _os_ref_init_count < _os_ref_retain < _os_ref_release_explicit < _os_ref_retain_try < _os_ref_retain_locked < _os_ref_release_locked 726d711 < _random_buf 772,783d756 < < __Block_copy < __Block_release < __NSConcreteAutoBlock < __NSConcreteFinalizingBlock < __NSConcreteGlobalBlock < __NSConcreteMallocBlock < __NSConcreteStackBlock < __NSConcreteWeakBlockVariable < __ZN12OSCollection14iterateObjectsEU13block_pointerFbP8OSObjectE < __ZN12OSDictionary14iterateObjectsEU13block_pointerFbPK8OSSymbolP8OSObjectE < __ZN12OSSerializer9withBlockEU13block_pointerFbP11OSSerializeE
./config/Private.exports differences detected: 2,3d1 < __ZN15IORegistryEntry18setIndexedPropertyEjP8OSObject < __ZNK15IORegistryEntry18getIndexedPropertyEj 12,14d9 < __ZN12IOUserClient27copyObjectForPortNameInTaskEP4taskjPP8OSObject < __ZN12IOUserClient27copyPortNameForObjectInTaskEP4taskP8OSObjectPj < __ZN12IOUserClient30adjustPortNameReferencesInTaskEP4taskji 56a52 > _chudxnu_platform_ptr 92,94d87 < _cpx_writeprotect < _cs_blob_create_validated < _cs_blob_free 96a90 > _cs_enforcement 99,101d92 < _cs_debug_fail_on_unsigned_code < _cs_debug_unsigned_exec_failures < _cs_debug_unsigned_mmap_failures 104,105d94 < _cs_process_enforcement < _cs_process_global_enforcement 107d95 < _cs_system_enforcement 126d113 < _csfg_get_identity 129d115 < _csfg_get_platform_identifier 135d120 < _csproc_get_platform_identifier 141d125 < _csvnode_get_platform_identifier 208d191 < _ifnet_set_low_power_mode 249a233,234 > _kevent_qos_internal_bind > _kevent_qos_internal_unbind 340a326 > _prng_factory_register 342d327 < _proc_issetugid 366d350 < _register_and_init_prng 412d395 < _thread_clear_honor_qlimit 415d397 < _thread_set_honor_qlimit 431d412 < _throttle_get_thread_effective_io_policy 496,497d476 < _vnode_getbackingvnode < _vnode_setasnamedstream 614,625d592 < < _Block_size < __Block_extended_layout < __Block_has_signature < __Block_isDeallocating < __Block_layout < __Block_object_assign < __Block_object_dispose < __Block_signature < __Block_tryRetain < __Block_use_RR2 < __Block_use_stret NO DIFFS in ./config/MACFramework.arm64.exports
NO DIFFS in ./config/BSDKernel.x86_64.exports
NO DIFFS in ./config/MACFramework.arm.exports

./config/Private.arm64.exports differences detected: 10,11d9 < _PE_smc_stashed_x86_shutdown_cause < _PE_smc_stashed_x86_prev_power_transitions 21d18 < _get_preemption_level 42d38 < _PE_panic_debugging_enabled NO DIFFS in ./config/list_supported.sh
NO DIFFS in ./config/README.DEBUG-kernel.txt

./config/MACFramework.exports differences detected: 15c15,17 < _mac_vnode_check_trigger_resolve --- > _mac_iokit_check_nvram_delete > _mac_iokit_check_nvram_get > _mac_iokit_check_nvram_set
./config/Mach.exports differences detected: 7d6 < _clock_continuoustime_interval_to_deadline
./config/Makefile differences detected: 53,56d52 < ifeq ($(KASAN),1) < KASAN_EXPORTS = $(SRCROOT)/san/Kasan_kasan.exports < endif < 147,150c143 < $(_v)$(SOURCE)/generate_linker_exports.sh $@ $+ $(KASAN_EXPORTS) < < $(OBJPATH)/all-alias.exp: $(EXPORTS_FILES) < $(_v)$(SOURCE)/generate_linker_aliases.sh $@ $+ $(KASAN_EXPORTS) --- > $(_v)$(SOURCE)/generate_linker_exports.sh $@ $+ 152c145 < do_build_all:: $(OBJPATH)/all-kpi.exp $(OBJPATH)/all-alias.exp --- > do_build_all:: $(OBJPATH)/all-kpi.exp NO DIFFS in ./config/Unsupported.arm.exports
NO DIFFS in ./config/Libkern.x86_64.exports

./config/Private.arm.exports differences detected: 17d16 < _get_preemption_level 23d21 < _PE_panic_debugging_enabled NO DIFFS in ./config/System.kext/PlugIns/IOKit.kext/Info.plist
NO DIFFS in ./config/System.kext/PlugIns/Kasan.kext/Info.plist
NO DIFFS in ./config/System.kext/PlugIns/ApplePlatformFamily.kext/Info.plist
NO DIFFS in ./config/System.kext/PlugIns/BSDKernel.kext/Info.plist
NO DIFFS in ./config/System.kext/PlugIns/Libkern.kext/Info.plist
NO DIFFS in ./config/System.kext/PlugIns/IOSystemManagement.kext/Info.plist
NO DIFFS in ./config/System.kext/PlugIns/Mach.kext/Info.plist
NO DIFFS in ./config/System.kext/PlugIns/MACFramework.kext/Info.plist
NO DIFFS in ./config/System.kext/PlugIns/IONVRAMFamily.kext/Info.plist
NO DIFFS in ./config/System.kext/PlugIns/Unsupported.kext/Info.plist
NO DIFFS in ./config/System.kext/PlugIns/Private.kext/Info.plist
NO DIFFS in ./config/System.kext/PlugIns/AppleNMI.kext/Info.plist
NO DIFFS in ./config/System.kext/Info.plist
NO DIFFS in ./config/BSDKernel.arm.exports

./config/MasterVersion differences detected: 1c1 < 18.2.0 --- > 17.3.0
./config/IOKit.arm64.exports differences detected: 231,232d230 < < __ZN9IOService23addMatchingNotificationEPK8OSSymbolP12OSDictionaryiU13block_pointerFbPS_P10IONotifierE
./config/Libkern.arm.exports differences detected: 3d2 < __ZN15OSMetaClassBase9_ptmf2ptfEPKS_MS_FvvE
./config/IOKit.exports differences detected: 645d644 < __ZN17IOPolledInterface16setEncryptionKeyEPKhm 889d887 < __ZN23IOMultiMemoryDescriptor16getPreparationIDEv 968d965 < __ZN28IOFilterInterruptEventSource4freeEv 1659,1669d1655 < __ZN10IOWorkLoop14runActionBlockEU13block_pointerFivE < __ZN13IOCommandGate14runActionBlockEU13block_pointerFivE < __ZN13IOEventSource14setActionBlockEU13block_pointerFivE < __ZN18IOTimerEventSource16timerEventSourceEjP8OSObjectU13block_pointerFvPS_E < __ZN22IOInterruptEventSource20interruptEventSourceEP8OSObjectP9IOServiceiU13block_pointerFvPS_iE < __ZN28IOFilterInterruptEventSource26filterInterruptEventSourceEP8OSObjectP9IOServiceiU13block_pointerFvP22IOInterruptEventSourceiEU13block_pointerFbPS_E < __ZN9IOService16registerInterestEPK8OSSymbolU13block_pointerFijPS_PvmE < __ZN9IOService22registerInterruptBlockEiP8OSObjectU13block_pointerFvPS_iE < __ZNK13IOEventSource14getActionBlockEU13block_pointerFivE < __ZN13IOEventSource9setRefconEPv < __ZNK13IOEventSource9getRefconEv
./config/MASTER differences detected: 4c4 < # Copyright 2001-2018 Apple Inc. --- > # Copyright 2001-2014 Apple Inc. 119d118 < options CONFIG_WORKLOOP_DEBUG # 145d143 < options CONFIG_MNT_ROOTSNAP # allow rooting from snapshot # 177,178d174 < options CONFIG_IMG4 # < 314,319d309 < # < # configurable kernel - general switch to say we are building for an < # embedded device < # < options CONFIG_EMBEDDED # < 339,341d328 < #allow write-protection of key page < options CONFIG_KEYPAGE_WP # < 388,392d374 < # Ledger features < # < options CONFIG_LEDGER_INTERVAL_MAX # < < # 498d479 < options CONFIG_BLOCKS # Blocks runtime # 576c557 < options CONFIG_ZCACHE #Enable per-cpu caching for zones # --- > 757,764d737 < < # < # Telemetry for 32-bit process launch < # < options CONFIG_32BIT_TELEMETRY # # < < options CONFIG_QUIESCE_COUNTER # Support for _COMM_PAGE_CPU_QUIESCENT_COUNTER # <
./config/IOKit.arm.exports differences detected: 310,311d309 < < __ZN9IOService23addMatchingNotificationEPK8OSSymbolP12OSDictionarylU13block_pointerFbPS_P10IONotifierE
./config/Private.x86_64.exports differences detected: 3a4,6 > _SHA256_Final > _SHA256_Init > _SHA256_Update 64,70d66 < < #macOS only codesigning kpi < _csproc_disable_enforcement < _csproc_mark_invalid_allowed < _csproc_check_invalid_allowed < _csproc_hardened_runtime < _csproc_forced_lv NO DIFFS in ./config/MACFramework.x86_64.exports
NO DIFFS in ./config/BSDKernel.arm64.exports

./config/Libkern.arm64.exports differences detected: 4d3 < __ZN15OSMetaClassBase9_ptmf2ptfEPKS_MS_FvvE NO DIFFS in ./config/Unsupported.exports

./config/version.c differences detected: 38,39d37 < // for what(1): < const char __kernelVersionString[] __attribute__((used)) = "@(#)VERSION: " OSTYPE " Kernel Version ###KERNEL_VERSION_LONG###: ###KERNEL_BUILD_DATE###; ###KERNEL_BUILDER###:###KERNEL_BUILD_OBJROOT###"; 47d44 < const char osbuild_config[] = "###KERNEL_BUILD_CONFIG###"; NO DIFFS in ./config/Mach.arm.exports
NO DIFFS in ./config/Mach.x86_64.exports
NO DIFFS in ./config/generate_linker_exports.sh

./config/MASTER.x86_64 differences detected: 4c4 < # Copyright 2001-2018 Apple Inc. --- > # Copyright 2001-2016 Apple Inc. 19c19 < # KERNEL_BASE = [ intel medium config_requires_u32_munging config_zcache ] --- > # KERNEL_BASE = [ intel medium config_requires_u32_munging ] 22,23c22,23 < # KERNEL_DEBUG = [ KERNEL_BASE debug mach_assert config_xnupost config_ltable_stats config_ltable_debug config_waitq_stats config_workloop_debug config_waitq_debug ] < # BSD_BASE = [ mach_bsd sysv_sem sysv_msg sysv_shm config_imageboot config_workqueue psynch config_proc_uuid_policy config_coredump pgo config_32bit_telemetry ] --- > # KERNEL_DEBUG = [ KERNEL_BASE debug mach_assert config_xnupost config_ltable_stats config_ltable_debug config_waitq_stats config_waitq_debug ] > # BSD_BASE = [ mach_bsd sysv_sem sysv_msg sysv_shm config_imageboot config_workqueue psynch config_proc_uuid_policy config_coredump pgo ] 27c27 < # FILESYS_BASE = [ devfs fdesc config_dev_kmem config_fse quota namedstreams config_mnt_rootsnap config_keypage_wp config_protect fifo config_volfs fs_compression config_imgsrc_access config_triggers config_ext_resolver config_searchfs config_appledouble nullfs config_mnt_suid ] --- > # FILESYS_BASE = [ devfs fdesc config_dev_kmem config_fse quota namedstreams config_protect fifo config_volfs fs_compression config_imgsrc_access config_triggers config_ext_resolver config_searchfs config_appledouble nullfs config_mnt_suid ] 40c40 < # LIBKERN_BASE = [ libkerncpp config_blocks config_kxld config_kec_fips zlib crypto_sha2 config_img4 ] --- > # LIBKERN_BASE = [ libkerncpp config_kxld config_kec_fips zlib crypto_sha2 ] 47,48c47,48 < # MACH_DEV = [ MACH_BASE task_zone_info importance_trace config_ledger_interval_max ] < # MACH_DEBUG = [ MACH_BASE task_zone_info importance_trace config_ledger_interval_max importance_debug ] --- > # MACH_DEV = [ MACH_BASE task_zone_info importance_trace ] > # MACH_DEBUG = [ MACH_BASE task_zone_info importance_trace importance_debug ] NO DIFFS in ./config/Unused.exports

./config/IOKit.x86_64.exports differences detected: 192a193 > __ZN17IOPolledInterface27_RESERVEDIOPolledInterface0Ev 502,503d502 < < __ZN9IOService23addMatchingNotificationEPK8OSSymbolP12OSDictionaryiU13block_pointerFbPS_P10IONotifierE NO DIFFS in ./config/Mach.arm64.exports

./config/Unsupported.arm64.exports differences detected: 7d6 < __get_commpage_priv_address NO DIFFS in ./config/Unsupported.x86_64.exports

./config/MASTER.arm64 differences detected: 4c4 < # Copyright 2001-2018 Apple Inc. --- > # Copyright 2001-2016 Apple Inc. 19c19 < # KERNEL_BASE = [ arm64 xsmall config_embedded config_enforce_signed_code config_requires_u32_munging config_zcache ] --- > # KERNEL_BASE = [ arm64 xsmall config_embedded config_requires_u32_munging ] 22c22 < # KERNEL_DEBUG = [ KERNEL_BASE debug mach_assert config_xnupost config_ltable_stats config_ltable_debug config_waitq_stats config_workloop_debug config_waitq_debug pgtrace ] --- > # KERNEL_DEBUG = [ KERNEL_BASE debug mach_assert config_xnupost config_ltable_stats config_ltable_debug config_waitq_stats config_waitq_debug pgtrace ] 27c27 < # FILESYS_BASE = [ devfs fifo fs_compression config_mnt_rootsnap config_protect config_fse routefs quota namedstreams ] --- > # FILESYS_BASE = [ devfs fifo fs_compression config_protect config_fse routefs quota namedstreams ] 31a32,35 > # SKYWALK_BASE = [ skywalk config_nexus_user_pipe config_nexus_kernel_pipe config_nexus_monitor config_nexus_flowswitch config_nexus_netif ] > # SKYWALK_RELEASE = [ SKYWALK_BASE ] > # SKYWALK_DEV = [ SKYWALK_BASE ] > # SKYWALK_DEBUG = [ SKYWALK_BASE ] 40c44 < # LIBKERN_BASE = [ libkerncpp config_blocks config_kec_fips zlib crypto_sha2 config_img4 ] --- > # LIBKERN_BASE = [ libkerncpp config_kec_fips zlib crypto_sha2 ] 48c52 < # MACH_BASE = [ mach slidable config_ecc_logging vc_progress_white mdebug ipc_debug importance_inheritance config_atm config_coalitions config_iosched config_library_validation config_sysdiagnose config_telemetry config_mach_bridge_recv_time config_quiesce_counter ] --- > # MACH_BASE = [ mach slidable config_ecc_logging vc_progress_white mdebug ipc_debug importance_inheritance config_atm config_coalitions config_iosched config_library_validation config_sysdiagnose config_telemetry config_mach_bridge_recv_time] 50,51c54,55 < # MACH_DEV = [ MACH_BASE task_zone_info config_io_accounting importance_trace config_ledger_interval_max ] < # MACH_DEBUG = [ MACH_BASE task_zone_info config_io_accounting importance_trace config_ledger_interval_max importance_debug ] --- > # MACH_DEV = [ MACH_BASE task_zone_info config_io_accounting importance_trace ] > # MACH_DEBUG = [ MACH_BASE task_zone_info config_io_accounting importance_trace importance_debug ]
./config/MASTER.arm differences detected: 4c4 < # Copyright 2001-2018 Apple Inc. --- > # Copyright 2001-2016 Apple Inc. 19c19 < # KERNEL_BASE = [ arm xsmall config_embedded config_enforce_signed_code config_zcache ] --- > # KERNEL_BASE = [ arm xsmall config_embedded ] 22c22 < # KERNEL_DEBUG = [ KERNEL_BASE debug mach_assert config_xnupost config_ltable_stats config_ltable_debug config_waitq_stats config_workloop_debug config_waitq_debug ] --- > # KERNEL_DEBUG = [ KERNEL_BASE debug mach_assert config_xnupost config_ltable_stats config_ltable_debug config_waitq_stats config_waitq_debug ] 27c27 < # FILESYS_BASE = [ devfs fifo fs_compression config_mnt_rootsnap config_protect config_fse routefs quota namedstreams ] --- > # FILESYS_BASE = [ devfs fifo fs_compression config_protect config_fse routefs quota namedstreams ] 31a32,35 > # SKYWALK_BASE = [ skywalk config_nexus_user_pipe config_nexus_kernel_pipe config_nexus_monitor config_nexus_flowswitch config_nexus_netif ] > # SKYWALK_RELEASE = [ SKYWALK_BASE ] > # SKYWALK_DEV = [ SKYWALK_BASE ] > # SKYWALK_DEBUG = [ SKYWALK_BASE ] 40c44 < # LIBKERN_BASE = [ libkerncpp config_blocks config_kec_fips zlib crypto_sha2 config_img4 ] --- > # LIBKERN_BASE = [ libkerncpp config_kec_fips zlib crypto_sha2 ] 48c52 < # MACH_BASE = [ mach slidable vc_progress_white mdebug ipc_debug importance_inheritance config_atm config_coalitions config_library_validation config_iosched config_telemetry config_sysdiagnose config_quiesce_counter ] --- > # MACH_BASE = [ mach slidable vc_progress_white mdebug ipc_debug importance_inheritance config_atm config_coalitions config_library_validation config_iosched config_telemetry config_sysdiagnose ] 50,51c54,55 < # MACH_DEV = [ MACH_BASE task_zone_info config_io_accounting importance_trace config_ledger_interval_max ] < # MACH_DEBUG = [ MACH_BASE task_zone_info config_io_accounting importance_trace config_ledger_interval_max importance_debug ] --- > # MACH_DEV = [ MACH_BASE task_zone_info config_io_accounting importance_trace ] > # MACH_DEBUG = [ MACH_BASE task_zone_info config_io_accounting importance_trace importance_debug ] NO DIFFS in ./security/mac_socket.c
NO DIFFS in ./security/mac_pipe.c
NO DIFFS in ./security/mac_posix_sem.c
NO DIFFS in ./security/mac_sysv_sem.c
NO DIFFS in ./security/mac_skywalk.c

./security/mac_iokit.c differences detected: 121a122,149 > int > mac_iokit_check_nvram_delete(kauth_cred_t cred, const char *name) > { > int error; > > MAC_CHECK(iokit_check_nvram_delete, cred, name); > return (error); > } > > int > mac_iokit_check_nvram_get(kauth_cred_t cred, const char *name) > { > int error; > > MAC_CHECK(iokit_check_nvram_get, cred, name); > return (error); > } > > int > mac_iokit_check_nvram_set(kauth_cred_t cred, const char *name, io_object_t value) > { > int error; > > MAC_CHECK(iokit_check_nvram_set, cred, name, value); > return (error); > } > > NO DIFFS in ./security/mac_priv.c
NO DIFFS in ./security/mac_system.c
NO DIFFS in ./security/mac_alloc.c

./security/mac_framework.h differences detected: 240a241,243 > int mac_iokit_check_nvram_delete(kauth_cred_t cred, const char *name); > int mac_iokit_check_nvram_get(kauth_cred_t cred, const char *name); > int mac_iokit_check_nvram_set(kauth_cred_t cred, const char *name, io_object_t value); 534,536c537,539 < struct cs_blob *cs_blob, struct image_params *imgp, < unsigned int *cs_flags, unsigned int *signer_type, < int flags); --- > struct cs_blob *cs_blob, struct image_params *imgp, > unsigned int *cs_flags, unsigned int *signer_type, > int flags); 539,540d541 < int mac_vnode_check_trigger_resolve(vfs_context_t ctx, struct vnode *dvp, < struct componentname *cnp); NO DIFFS in ./security/Makefile

./security/mac_base.c differences detected: 2010,2011c2010,2024 < int mac_vnode_check_trigger_resolve(vfs_context_t ctx __unused, struct vnode *dvp __unused, struct componentname *cnp __unused); < int mac_vnode_check_trigger_resolve(vfs_context_t ctx __unused, struct vnode *dvp __unused, struct componentname *cnp __unused) --- > > int mac_iokit_check_nvram_delete(kauth_cred_t cred __unused, const char *name __unused); > int mac_iokit_check_nvram_delete(kauth_cred_t cred __unused, const char *name __unused) > { > return 0; > } > > int mac_iokit_check_nvram_get(kauth_cred_t cred __unused, const char *name __unused); > int mac_iokit_check_nvram_get(kauth_cred_t cred __unused, const char *name __unused) > { > return 0; > } > > int mac_iokit_check_nvram_set(kauth_cred_t cred __unused, const char *name __unused, io_object_t value __unused); > int mac_iokit_check_nvram_set(kauth_cred_t cred __unused, const char *name __unused, io_object_t value __unused) NO DIFFS in ./security/mac_vfs_subr.c

./security/mac_policy.h differences detected: 3844,3845c3844 < performance-related tasks using the CHUD system call. This interface is < deprecated. --- > performance-related tasks using the CHUD system call. 3851c3850 < kauth_cred_t cred --- > kauth_cred_t cred 4553,4565d4551 < @brief Notification a process is finished with exec and will jump to userspace < @param p Object process < < Notifies all MAC policies that a process has completed an exec and is about to < jump to userspace to continue execution. This may result in process termination < via signals. Hook is designed to hold no/minimal locks so it can be used for any < necessary upcalls. < */ < typedef void mpo_proc_notify_exec_complete_t( < struct proc *p < ); < < /** 5406d5391 < @param cpu_type cpu type of the signature being checked 5420d5404 < cpu_type_t cpu_type, 5451,5471d5434 < @brief Access control check for vnode trigger resolution < @param cred Subject credential < @param dvp Object vnode < @param dlabel Policy label for dvp < @param cnp Component name that triggered resolution < < Determine whether the subject identified by the credential can trigger < resolution of the passed name (cnp) in the passed directory vnode < via an external trigger resolver. < < @return Return 0 if access is granted, otherwise an appropriate value for < errno should be returned. Suggested failure: EACCES for label mismatch or < EPERM for lack of privilege. < */ < typedef int mpo_vnode_check_trigger_resolve_t( < kauth_cred_t cred, < struct vnode *dvp, < struct label *dlabel, < struct componentname *cnp < ); < /** 6279a6243,6292 > /** > @brief Access control check for getting NVRAM variables. > @param cred Subject credential > @param name NVRAM variable to get > > Determine whether the subject identifier by the credential can get the > value of the named NVRAM variable. > > @return Return 0 if access is granted, otherwise an appropriate value for > errno should be returned. Suggested failure: EPERM for lack of privilege. > */ > typedef int mpo_iokit_check_nvram_get_t( > kauth_cred_t cred, > const char *name > ); > > /** > @brief Access control check for setting NVRAM variables. > @param cred Subject credential > @param name NVRAM variable to set > @param value The new value for the NVRAM variable > > Determine whether the subject identifier by the credential can set the > value of the named NVRAM variable. > > @return Return 0 if access is granted, otherwise an appropriate value for > errno should be returned. Suggested failure: EPERM for lack of privilege. > */ > typedef int mpo_iokit_check_nvram_set_t( > kauth_cred_t cred, > const char *name, > io_object_t value > ); > > /** > @brief Access control check for deleting NVRAM variables. > @param cred Subject credential > @param name NVRAM variable to delete > > Determine whether the subject identifier by the credential can delete the > named NVRAM variable. > > @return Return 0 if access is granted, otherwise an appropriate value for > errno should be returned. Suggested failure: EPERM for lack of privilege. > */ > typedef int mpo_iokit_check_nvram_delete_t( > kauth_cred_t cred, > const char *name > ); > 6291c6304 < #define MAC_POLICY_OPS_VERSION 55 /* inc when new reserved slots are taken */ --- > #define MAC_POLICY_OPS_VERSION 52 /* inc when new reserved slots are taken */ 6430,6432c6443,6445 < mpo_proc_notify_exec_complete_t *mpo_proc_notify_exec_complete; < mpo_reserved_hook_t *mpo_reserved5; < mpo_reserved_hook_t *mpo_reserved6; --- > mpo_iokit_check_nvram_get_t *mpo_iokit_check_nvram_get; > mpo_iokit_check_nvram_set_t *mpo_iokit_check_nvram_set; > mpo_iokit_check_nvram_delete_t *mpo_iokit_check_nvram_delete; 6443d6455 < mpo_vnode_check_trigger_resolve_t *mpo_vnode_check_trigger_resolve; 6446a6459 > mpo_reserved_hook_t *mpo_reserved4;
./security/mac_mach.c differences detected: 152,166d151 < void < mac_proc_notify_exec_complete(struct proc *proc) < { < thread_t thread = current_thread(); < < /* < * Since this MAC hook was designed to support upcalls, make sure the hook < * is called with kernel importance propagation enabled so any daemons < * can get any appropriate importance donations. < */ < thread_enable_send_importance(thread, TRUE); < MAC_PERFORM(proc_notify_exec_complete, proc); < thread_enable_send_importance(thread, FALSE); < } < NO DIFFS in ./security/mac_data.h

./security/mac_mach_internal.h differences detected: 102,103d101 < void mac_proc_notify_exec_complete(struct proc *proc); < NO DIFFS in ./security/mac_pty.c
NO DIFFS in ./security/mac_sysv_msg.c
NO DIFFS in ./security/mac_process.c
NO DIFFS in ./security/mac_sysv_shm.c
NO DIFFS in ./security/mac_alloc.h
NO DIFFS in ./security/mac_posix_shm.c
NO DIFFS in ./security/mac_net.c
NO DIFFS in ./security/mac_label.c
NO DIFFS in ./security/mac_internal.h
NO DIFFS in ./security/mac_kext.c
NO DIFFS in ./security/mac_audit.c
NO DIFFS in ./security/mac.h
NO DIFFS in ./security/conf/Makefile.x86_64
NO DIFFS in ./security/conf/files.arm64
NO DIFFS in ./security/conf/Makefile.template
NO DIFFS in ./security/conf/Makefile
NO DIFFS in ./security/conf/copyright.nai
NO DIFFS in ./security/conf/Makefile.arm64
NO DIFFS in ./security/conf/Makefile.arm
NO DIFFS in ./security/conf/files.x86_64
NO DIFFS in ./security/conf/files
NO DIFFS in ./security/conf/files.arm
NO DIFFS in ./security/mac_file.c

./security/mac_vfs.c differences detected: 1114d1113 < cpu_type_t cpu_type = (imgp == NULL) ? CPU_TYPE_ANY : imgp->ip_origcputype; 1123c1122 < MAC_CHECK(vnode_check_signature, vp, vp->v_label, cpu_type, cs_blob, --- > MAC_CHECK(vnode_check_signature, vp, vp->v_label, cs_blob, 1704,1722d1702 < mac_vnode_check_trigger_resolve(vfs_context_t ctx, struct vnode *dvp, < struct componentname *cnp) < { < kauth_cred_t cred; < int error; < < #if SECURITY_MAC_CHECK_ENFORCE < /* 21167099 - only check if we allow write */ < if (!mac_vnode_enforce) < return 0; < #endif < cred = vfs_context_ucred(ctx); < if (!mac_cred_check_enforce(cred)) < return (0); < MAC_CHECK(vnode_check_trigger_resolve, cred, dvp, dvp->v_label, cnp); < return (error); < } < < int NO DIFFS in ./security/mac_inet.c
NO DIFFS in ./security/mac_data.c
NO DIFFS in ./security/_label.h

./Makefile differences detected: 223c223 < SETUP_SUBDIRS = SETUP osfmk san --- > SETUP_SUBDIRS = SETUP san 241c241 < installapi_libkdd installhdrs_libkdd install_libkdd: --- > installhdrs_libkdd install_libkdd: 243,263c243 < xcodebuild -target Default $(subst _libkdd,,$@) \ < "SRCROOT=$(SRCROOT)/libkdd" \ < "OBJROOT=$(OBJROOT)" \ < "SYMROOT=$(SYMROOT)" \ < "DSTROOT=$(DSTROOT)" \ < "SDKROOT=$(SDKROOT)" < < < installapi_libkdd_tests installhdrs_libkdd_tests install_libkdd_tests: < cd libkdd; \ < xcodebuild -target tests $(subst _libkdd_tests,,$@) \ < "SRCROOT=$(SRCROOT)/libkdd" \ < "OBJROOT=$(OBJROOT)" \ < "SYMROOT=$(SYMROOT)" \ < "DSTROOT=$(DSTROOT)" \ < "SDKROOT=$(SDKROOT)" < < < installapi_libkdd_host installhdrs_libkdd_host install_libkdd_host: < cd libkdd; \ < xcodebuild -configuration ReleaseHost -target kdd.framework $(subst _libkdd_host,,$@) \ --- > xcodebuild -target libkdd $(subst _libkdd,,$@) \ 278,279d257 < $(MAKE) -C $(SRCROOT)/tests $(if $(filter -j,$(MAKEFLAGS)),,$(MAKEJOBS)) \ < SRCROOT=$(SRCROOT)/tests NO DIFFS in ./iokit/bsddev/IOKitBSDInit.h
NO DIFFS in ./iokit/bsddev/DINetBootHook.h

./iokit/bsddev/IOKitBSDInit.cpp differences detected: 41d40 < #include 60a60 > 65d64 < 70,73c69,70 < < #else /* defined(XNU_TARGET_OS_BRIDGE) */ < #define kIOCoreDumpMinSize 350ULL*1024ULL*1024ULL < #define kIOCoreDumpLargeSize 500ULL*1024ULL*1024ULL --- > #else > #define kIOCoreDumpSize 350ULL*1024ULL*1024ULL 76a74 > #endif 78,80c76 < #endif /* defined(XNU_TARGET_OS_BRIDGE) */ < < #elif DEVELOPMENT /* CONFIG_EMBEDDED */ --- > #elif DEVELOPMENT 85c81 < #else /* CONFIG_EMBEDDED */ --- > #else 87c83 < #endif /* CONFIG_EMBEDDED */ --- > #endif 771c767 < kern_return_t gIOPolledCoreFileOpenRet = kIOReturnNotReady; --- > 779d774 < uint64_t corefile_size_bytes = 0; 788,859c783,786 < #if CONFIG_EMBEDDED < unsigned int requested_corefile_size = 0; < if (PE_parse_boot_argn("corefile_size_mb", &requested_corefile_size, sizeof(requested_corefile_size))) { < IOLog("Boot-args specify %d MB kernel corefile\n", requested_corefile_size); < < corefile_size_bytes = (requested_corefile_size * 1024ULL * 1024ULL); < } < #endif < < < do { < #if defined(kIOCoreDumpLargeSize) < if (0 == corefile_size_bytes) < { < // If no custom size was requested and we're on a device with >3GB of DRAM, attempt < // to allocate a large corefile otherwise use a small file. < if (max_mem > (3 * 1024ULL * 1024ULL * 1024ULL)) < { < corefile_size_bytes = kIOCoreDumpLargeSize; < err = IOPolledFileOpen(filename, < kIOPolledFileCreate, < corefile_size_bytes, kIOCoreDumpFreeSize, < NULL, 0, < &gIOPolledCoreFileVars, NULL, NULL, 0); < if (kIOReturnSuccess == err) < { < break; < } < else if (kIOReturnNoSpace == err) < { < IOLog("Failed to open corefile of size %llu MB (low disk space)", < (corefile_size_bytes / (1024ULL * 1024ULL))); < if (corefile_size_bytes == kIOCoreDumpMinSize) < { < gIOPolledCoreFileOpenRet = err; < return (err); < } < // Try to open a smaller corefile (set size and fall-through) < corefile_size_bytes = kIOCoreDumpMinSize; < } < else < { < IOLog("Failed to open corefile of size %llu MB (returned error 0x%x)\n", < (corefile_size_bytes / (1024ULL * 1024ULL)), err); < gIOPolledCoreFileOpenRet = err; < return (err); < } < } < else < { < corefile_size_bytes = kIOCoreDumpMinSize; < } < } < #else /* defined(kIOCoreDumpLargeSize) */ < if (0 == corefile_size_bytes) < { < corefile_size_bytes = kIOCoreDumpSize; < } < #endif /* defined(kIOCoreDumpLargeSize) */ < err = IOPolledFileOpen(filename, < kIOPolledFileCreate, < corefile_size_bytes, kIOCoreDumpFreeSize, < NULL, 0, < &gIOPolledCoreFileVars, NULL, NULL, 0); < if (kIOReturnSuccess != err) < { < IOLog("Failed to open corefile of size %llu MB (returned error 0x%x)\n", < (corefile_size_bytes / (1024ULL * 1024ULL)), err); < gIOPolledCoreFileOpenRet = err; < return (err); < } < } while (false); --- > err = IOPolledFileOpen(filename, kIOCoreDumpSize, kIOCoreDumpFreeSize, > NULL, 0, > &gIOPolledCoreFileVars, NULL, NULL, 0); > if (kIOReturnSuccess != err) return (err); 864,870c791 < IOPolledFileClose(&gIOPolledCoreFileVars, NULL, NULL, 0, 0, 0); < IOLog("IOPolledFilePollersSetup for corefile failed with error: 0x%x\n", err); < gIOPolledCoreFileOpenRet = err; < } < else < { < IOLog("Opened corefile of size %llu MB\n", (corefile_size_bytes / (1024ULL * 1024ULL))); --- > IOPolledFileClose(&gIOPolledCoreFileVars, NULL, NULL, 0, 0, 0); 879d799 < gIOPolledCoreFileOpenRet = kIOReturnNotOpen; NO DIFFS in ./iokit/bsddev/DINetBootHook.cpp
NO DIFFS in ./iokit/KernelConfigTables.cpp
NO DIFFS in ./iokit/Makefile

./iokit/IOKit/IOTypes.h differences detected: 86c86 < #if !defined(__arm__) && !defined(__i386__) && !(defined(__x86_64__) && !defined(KERNEL)) && !(defined(__arm64__) && !defined(__LP64__)) --- > #if !defined(__arm__) && !defined(__i386__) && !(defined(__x86_64__) && !defined(KERNEL)) NO DIFFS in ./iokit/IOKit/IOConditionLock.h

./iokit/IOKit/IONVRAM.h differences detected: 139d138 < IOReturn syncVariables(void); NO DIFFS in ./iokit/IOKit/power/Makefile
NO DIFFS in ./iokit/IOKit/power/IOPwrController.h

./iokit/IOKit/IOEventSource.h differences detected: 109,112d108 < #ifdef __BLOCKS__ < typedef IOReturn (^ActionBlock)(); < #endif /* __BLOCKS__ */ < 123,126d118 < < #if XNU_KERNEL_PRIVATE < union { Action action; ActionBlock actionBlock; }; < #else /* XNU_KERNEL_PRIVATE */ 128d119 < #endif /* !XNU_KERNEL_PRIVATE */ 134a126 > 137,140c129,130 < kPassive = 0x0001, < kActive = 0x0002, < kActionBlock = 0x0004, < kSubClass0 = 0x0008, --- > kPassive = 0x0001, > kActive = 0x0002, 244,263d233 < #ifdef __BLOCKS__ < /*! @function setActionBlock < @abstract Setter for action ivar. The current block is released, & the new block is retained. < @param block Block pointer of type IOEventSource::ActionBlock. */ < void setActionBlock(ActionBlock block); < /*! @function getActionBlock < @abstract Getter for action ivar. < @result Block pointer of type IOEventSource::ActionBlock, if set, or NULL. */ < ActionBlock getActionBlock(ActionBlock) const; < #endif /* __BLOCKS__ */ < < /*! @function setRefcon < @abstract Setter for refcon ivar. This function will assert if a block action has been set. < @param refcon Refcon. */ < void setRefcon(void *refcon); < /*! @function getRefcon < @abstract Getter for refcon ivar. < @result The refcon. This function will assert if a block action has been set. */ < void * getRefcon() const; < NO DIFFS in ./iokit/IOKit/IODeviceTreeSupport.h

./iokit/IOKit/IOPlatformExpert.h differences detected: 76,83d75 < #ifdef XNU_KERNEL_PRIVATE < enum { < kIOSystemShutdownNotificationStageProcessExit = 0, < kIOSystemShutdownNotificationStageRootUnmount = 1, < }; < extern void IOSystemShutdownNotification(int stage); < #endif /* XNU_KERNEL_PRIVATE */ < 152c144 < ExpansionData *iope_reserved __unused; --- > ExpansionData *reserved; 247c239 < ExpansionData *iodtpe_reserved; --- > ExpansionData *reserved; 324c316 < ExpansionData *ioped_reserved __unused; --- > ExpansionData *reserved; 356c348 < ExpansionData *iopd_reserved; --- > ExpansionData *reserved;
./iokit/IOKit/IOKitKeys.h differences detected: 138d137 < #define kIOMaximumSwapWriteKey "IOMaximumSwapWrite" // (OSNumber) NO DIFFS in ./iokit/IOKit/IOMapper.h
NO DIFFS in ./iokit/IOKit/IOMessage.h

./iokit/IOKit/IOReturn.h differences detected: 65d64 < #define sub_iokit_wirelesscharging err_sub(18) 85d83 < #define sub_iokit_appleppm err_sub(0x20A) NO DIFFS in ./iokit/IOKit/IOSubMemoryDescriptor.h

./iokit/IOKit/IOHibernatePrivate.h differences detected: 5c5 < * --- > * 14c14 < * --- > * 17c17 < * --- > * 25c25 < * --- > * 57c57 < --- > 64c64 < --- > 89c89 < --- > 108,110c108 < uint8_t bridgeBootSessionUUID[16]; < < uint32_t reserved[54]; // make sizeof == 512 --- > uint32_t reserved[58]; // make sizeof == 512 143d140 < kIOHibernateOptionHWEncrypt = 0x00000010, 181c178 < enum --- > enum 191d187 < kIOHibernateHandoffTypeVolumeCryptKey = kIOHibernateHandoffType + 7, 202c198 < enum --- > enum 312a309,310 > void IOOpenDebugDataFile(const char *fname, uint64_t size); > void IOCloseDebugDataFile(); 331c329 < kern_return_t --- > kern_return_t 337c335 < kern_return_t --- > kern_return_t 344c342 < kern_return_t --- > kern_return_t 349c347 < kern_return_t --- > kern_return_t 352c350 < kern_return_t --- > kern_return_t 379c377 < boolean_t preflight, --- > boolean_t preflight, 383c381 < // mark pages to be saved, or pages not to be saved but available --- > // mark pages to be saved, or pages not to be saved but available 407c405 < void --- > void 410c408 < boolean_t --- > boolean_t 419c417 < uintptr_t --- > uintptr_t 433c431 < hibernate_newruntime_map(void * map, vm_size_t map_size, --- > hibernate_newruntime_map(void * map, vm_size_t map_size, 543,544c541,542 < kIOPreviewImageIndexDesktop = 0, < kIOPreviewImageIndexLockScreen = 1, --- > kIOPreviewImageIndexDesktop = 0, > kIOPreviewImageIndexLockScreen = 1, 554c552 < }; --- > }; NO DIFFS in ./iokit/IOKit/IOReportTypes.h
NO DIFFS in ./iokit/IOKit/IODataQueue.h

./iokit/IOKit/IOCommandGate.h differences detected: 94d93 < APPLE_KEXT_WSHADOW_PUSH; 96d94 < APPLE_KEXT_WSHADOW_POP; 157,171d154 < #ifdef __BLOCKS__ < /*! @function runActionBlock < @abstract Single thread a call to an action with the target work loop. < @discussion Client function that causes the given action to be called in < a single threaded manner. Beware the work loop's gate is recursive and command < gates can cause direct or indirect re-entrancy. When the executing on a < client's thread runAction will sleep until the work loop's gate opens for < execution of client actions, the action is single threaded against all other < work loop event sources. If the command is disabled the attempt to run a command will be stalled until enable is called. < @param action Block to be executed in the context of the work loop. < @result The return value of action if it was called, kIOReturnBadArgument if action is not defined, kIOReturnAborted if a disabled command gate is free()ed before being reenabled. < */ < IOReturn runActionBlock(ActionBlock action); < #endif /* __BLOCKS__ */ < 205c188 < @discussion Put a thread to sleep waiting for an event but release the gate first. If the event occurs then the commandGate is closed before the function returns. If the thread does not hold the gate, panic. --- > @discussion Put a thread to sleep waiting for an event but release the gate first. If the event occurs then the commandGate is closed before the function returns. 208c191 < @result THREAD_AWAKENED - normal wakeup, THREAD_TIMED_OUT - timeout expired, THREAD_INTERRUPTED - interrupted, THREAD_RESTART - restart operation entirely. */ --- > @result THREAD_AWAKENED - normal wakeup, THREAD_TIMED_OUT - timeout expired, THREAD_INTERRUPTED - interrupted, THREAD_RESTART - restart operation entirely, kIOReturnNotPermitted if the calling thread does not hold the command gate. */ 230c213 < @discussion Put a thread to sleep waiting for an event but release the gate first. If the event occurs or timeout occurs then the commandGate is closed before the function returns. If the thread does not hold the gate, panic. --- > @discussion Put a thread to sleep waiting for an event but release the gate first. If the event occurs or timeout occurs then the commandGate is closed before the function returns. 234c217 < @result THREAD_AWAKENED - normal wakeup, THREAD_TIMED_OUT - timeout expired, THREAD_INTERRUPTED - interrupted, THREAD_RESTART - restart operation entirely. */ --- > @result THREAD_AWAKENED - normal wakeup, THREAD_TIMED_OUT - timeout expired, THREAD_INTERRUPTED - interrupted, THREAD_RESTART - restart operation entirely, kIOReturnNotPermitted if the calling thread does not hold the command gate. */ NO DIFFS in ./iokit/IOKit/IODataQueueShared.h
NO DIFFS in ./iokit/IOKit/OSMessageNotification.h
NO DIFFS in ./iokit/IOKit/IOKernelReporters.h
NO DIFFS in ./iokit/IOKit/system_management/Makefile

./iokit/IOKit/system_management/IOWatchDogTimer.h differences detected: 41d40 < APPLE_KEXT_WSHADOW_PUSH; 43d41 < APPLE_KEXT_WSHADOW_POP; NO DIFFS in ./iokit/IOKit/Makefile

./iokit/IOKit/IOFilterInterruptEventSource.h differences detected: 69,72d68 < #ifdef __BLOCKS__ < typedef bool (^FilterBlock)(IOFilterInterruptEventSource *sender); < #endif /* __BLOCKS__ */ < 88,91d83 < < #if XNU_KERNEL_PRIVATE < union { Filter filterAction; FilterBlock filterActionBlock; }; < #else /* XNU_KERNEL_PRIVATE */ 93d84 < #endif /* !XNU_KERNEL_PRIVATE */ 102d92 < APPLE_KEXT_WSHADOW_PUSH; 104d93 < APPLE_KEXT_WSHADOW_POP; 122,145d110 < #ifdef __BLOCKS__ < /*! @function filterInterruptEventSource < @abstract Factor method to create and initialise an IOFilterInterruptEventSource. See $link init. < @param owner Owner/client of this event source. < @param provider Service that provides interrupts. < @param intIndex The index of the interrupt within the provider's interrupt sources. < @param action Block for the callout routine of this event source. < @param filter Block to invoke when HW interrupt occurs. < @result a new event source if succesful, 0 otherwise. */ < static IOFilterInterruptEventSource * < filterInterruptEventSource(OSObject *owner, < IOService *provider, < int intIndex, < IOInterruptEventSource::ActionBlock action, < FilterBlock filter); < #endif /* __BLOCKS__ */ < < #if XNU_KERNEL_PRIVATE < enum < { < kFilterBlock = kSubClass0, < }; < #endif < 161d125 < virtual void free( void ) APPLE_KEXT_OVERRIDE; 173,179d136 < #ifdef __BLOCKS__ < /*! @function getFilterActionBlock < @abstract Get'ter for filterAction variable. < @result value of filterAction. */ < FilterBlock getFilterActionBlock() const; < #endif /* __BLOCKS__ */ < NO DIFFS in ./iokit/IOKit/IOLocksPrivate.h
NO DIFFS in ./iokit/IOKit/platform/ApplePlatformExpert.h
NO DIFFS in ./iokit/IOKit/platform/Makefile
NO DIFFS in ./iokit/IOKit/platform/AppleMacIO.h
NO DIFFS in ./iokit/IOKit/platform/AppleNMI.h
NO DIFFS in ./iokit/IOKit/platform/AppleMacIODevice.h

./iokit/IOKit/IOUserClient.h differences detected: 198d197 < APPLE_KEXT_WSHADOW_PUSH; 200d198 < APPLE_KEXT_WSHADOW_POP; 214d211 < IOLock * lock; 216c213 < void * __reserved[4]; --- > void * __reserved[5]; 218c215 < void * __reserved[3]; --- > void * __reserved[4]; 391,430d387 < #if KERNEL_PRIVATE < < /*! < @function copyPortNameForObjectInTask < Make an arbitrary OSObject available to the client task as a port name. < The port does not respond to any IOKit IPC calls. < @param task The task. < @param object The object we want to export to the client. < The port holds a reference on the object, this function does not consume any reference on the object. < @param port_name Returned value is the task's port name. It has one send right created by this function. < @result A return code. < */ < static IOReturn copyPortNameForObjectInTask(task_t task, OSObject *object, < mach_port_name_t * port_name); < < /*! < @function copyObjectForPortNameInTask < Look up an OSObject given a task's port name created with copyPortNameForObjectInTask(). < @param task The task. < @param port_name The task's port name. This function does not consume any reference on the port name. < @param object If the port name is valid, a reference to the object is returned. It should be released by the caller. < @result A return code. < */ < static IOReturn copyObjectForPortNameInTask(task_t task, mach_port_name_t port_name, < OSObject **object); < < /*! < @function adjustPortNameReferencesInTask < Adjust the send rights for a port name created with copyPortNameForObjectInTask(). < @param task The task. < @param port_name The task's port name. < @param delta Signed value change to the number of user references. < @result A return code. < */ < static IOReturn adjustPortNameReferencesInTask(task_t task, mach_port_name_t port_name, mach_port_delta_t delta); < < #define IOUC_COPYPORTNAMEFOROBJECTINTASK 1 < < #endif /* KERNEL_PRIVATE */ < NO DIFFS in ./iokit/IOKit/IOStatisticsPrivate.h
NO DIFFS in ./iokit/IOKit/IOStatistics.h
NO DIFFS in ./iokit/IOKit/IOMemoryCursor.h
NO DIFFS in ./iokit/IOKit/IONotifier.h

./iokit/IOKit/IOServicePM.h differences detected: 75d74 < const void *callMethod; NO DIFFS in ./iokit/IOKit/IOSyncer.h
NO DIFFS in ./iokit/IOKit/IOKitDebug.h
NO DIFFS in ./iokit/IOKit/IOKitDiagnosticsUserClient.h
NO DIFFS in ./iokit/IOKit/IODMACommand.h

./iokit/IOKit/IOBSD.h differences detected: 66d65 < extern kern_return_t gIOPolledCoreFileOpenRet;
./iokit/IOKit/IOService.h differences detected: 158,161d157 < #ifdef __BLOCKS__ < typedef void (^IOInterruptActionBlock)(IOService * nub, int source); < #endif /* __BLOCKS__ */ < 174,179d169 < #ifdef __BLOCKS__ < typedef bool (^IOServiceMatchingNotificationHandlerBlock)(IOService * newService, < IONotifier * notifier ); < #endif /* __BLOCKS__ */ < < 192,196d181 < #ifdef __BLOCKS__ < typedef IOReturn (^IOServiceInterestHandlerBlock)( uint32_t messageType, IOService * provider, < void * messageArgument, size_t argSize ); < #endif /* __BLOCKS__ */ < 337d321 < APPLE_KEXT_WSHADOW_PUSH; 339d322 < APPLE_KEXT_WSHADOW_POP; 792,799d774 < < #ifdef __BLOCKS__ < static IONotifier * addMatchingNotification( < const OSSymbol * type, OSDictionary * matching, < SInt32 priority, < IOServiceMatchingNotificationHandlerBlock handler); < #endif /* __BLOCKS__ */ < 1139,1151d1113 < < #ifdef __BLOCKS__ < /*! @function registerInterrupt < @abstract Registers a block handler for a device supplying interrupts. < @discussion This method installs a C function interrupt handler to be called at primary interrupt time for a device's interrupt. Only one handler may be installed per interrupt source. IOInterruptEventSource provides a work loop based abstraction for interrupt delivery that may be more appropriate for work loop based drivers. < @param source The index of the interrupt source in the device. < @param target An object instance to be passed to the interrupt handler. < @param handler The block to be invoked at primary interrupt time when the interrupt occurs. The handler should process the interrupt by clearing the interrupt, or by disabling the source. < @result An IOReturn code.
kIOReturnNoInterrupt is returned if the source is not valid; kIOReturnNoResources is returned if the interrupt already has an installed handler. */ < < IOReturn registerInterruptBlock(int source, OSObject *target, < IOInterruptActionBlock handler); < #endif /* __BLOCKS__ */ 1254,1258d1215 < #ifdef __BLOCKS__ < IONotifier * registerInterest(const OSSymbol * typeOfInterest, < IOServiceInterestHandlerBlock handler); < #endif /* __BLOCKS__ */ < 1885c1842 < void reset_watchdog_timer(IOService *obj, int timeout); --- > void reset_watchdog_timer( void ); 1887,1888c1844 < void stop_watchdog_timer ( void ); < void start_watchdog_timer(uint64_t deadline); --- > bool stop_watchdog_timer ( void ); 1893d1848 < bool getBlockingDriverCall(thread_t *thread, const void **callMethod);
./iokit/IOKit/IORegistryEntry.h differences detected: 60,67d59 < #ifdef KERNEL_PRIVATE < enum < { < kIORegistryEntryIndexedPropertyCLPC = 0, < kIORegistryEntryIndexedPropertyCount, < }; < #endif /* KERNEL_PRIVATE */ < 291,295d282 < #ifdef KERNEL_PRIVATE < OSObject * setIndexedProperty(uint32_t index, OSObject * anObject); < OSObject * getIndexedProperty(uint32_t index) const; < #endif /* KERNEL_PRIVATE */ <
./iokit/IOKit/pwr_mgt/IOPM.h differences detected: 252,254c252,253 < * entering Deep Sleep state when on battery power and when remaining < * battery capacity is below a particular threshold (e.g., 50%.) The < * property is not present if Deep Sleep is unsupported. --- > * entering Deep Sleep state. The property is not present if Deep Sleep is > * unsupported. 258,274d256 < /* kIOPMDeepSleepDelayHighKey < * Key refers to a CFNumberRef that represents the delay in seconds before < * entering Deep Sleep state. This is used instead of the value specified by < * kIOPMDeepSleepDelayKey if the remaining battery capacity is above a < * particular threshold (e.g. 50%) or on AC power. The property is not < * present if Deep Sleep is unsupported. < */ < #define kIOPMDeepSleepDelayHighKey "High Standby Delay" < < /* kIOPMLowBatteryThresholdKey < * Key refers to a CFNumberRef that represents the threshold used to choose < * between the normal deep sleep delay and the high deep sleep delay (as a < * percentage of total battery capacity remaining.) The property is not < * present if Deep Sleep is unsupported. < */ < #define kIOPMStandbyBatteryThresholdKey "Standby Battery Threshold" < 635d616 < #define kIOPMPSAdapterDetailsErrorFlagsKey "ErrorFlags" 637d617 < #define kIOPMPSAdapterDetailsCloakedKey "CloakedSource" 652,654d631 < kIOPSFamilyCodeUSBCBrick = iokit_family_err(sub_iokit_usb, 8), < kIOPSFamilyCodeUSBCTypeC = iokit_family_err(sub_iokit_usb, 9), < kIOPSFamilyCodeUSBCPD = iokit_family_err(sub_iokit_usb, 10), 660,668d636 < kIOPSFamilyCodeExternal5 = iokit_family_err(sub_iokit_pmu, 5), < }; < < // values for kIOPMPSAdapterDetailsErrorFlagsKey < enum { < kIOPSAdapterErrorFlagNoErrors = 0, < kIOPSAdapterErrorFlagInsufficientAvailablePower = (1 << 1), < kIOPSAdapterErrorFlagForeignObjectDetected = (1 << 2), < kIOPSAdapterErrorFlagDeviceNeedsToBeRepositioned = (1 << 3), NO DIFFS in ./iokit/IOKit/pwr_mgt/IOPMinformeeList.h
NO DIFFS in ./iokit/IOKit/pwr_mgt/IOPMlog.h
NO DIFFS in ./iokit/IOKit/pwr_mgt/IOPowerConnection.h
NO DIFFS in ./iokit/IOKit/pwr_mgt/Makefile
NO DIFFS in ./iokit/IOKit/pwr_mgt/IOPMinformee.h

./iokit/IOKit/pwr_mgt/RootDomain.h differences detected: 514c514 < void traceDetail(OSObject *notifier, bool start); --- > void traceDetail(OSObject *notifier); 555a556 > static void saveTimeoutAppStackShot(void *p0, void *p1); 556a558,559 > void swdDebugSetup(); > void swdDebugTeardown(); 559d561 < uint32_t getWatchdogTimeout(); 581a584,587 > static bool IONVRAMMatchPublished( void * target, void * refCon, > IOService * newService, > IONotifier * notifier); > 650a657,658 > thread_call_t swdDebugSetupEntry; > thread_call_t swdDebugTearDownEntry; 782c790 < void * swd_compressed_buffer; --- > uint8_t swd_DebugImageSetup; 784,785d791 < thread_t notifierThread; < OSObject *notifierObject; 788a795,796 > IOMemoryMap * swd_logBufMap; /* Memory with sleep/wake logs from previous boot */ > 847a856 > void evaluateWranglerAssertions(); 853,855d861 < void getFailureData(thread_t *thread, char *failureStr, size_t strLen); < void saveFailureData2File(); < void tracePhase2String(uint32_t tracePhase, const char **phaseString, const char **description); 857a864,866 > void sleepWakeDebugDumpFromMem(IOMemoryMap *logBufMap); > void sleepWakeDebugDumpFromFile( ); > IOMemoryMap *sleepWakeDebugRetrieve(); 858a868,874 > errno_t sleepWakeDebugCopyFile( struct vnode *srcVp, > vfs_context_t srcCtx, > char *tmpBuf, uint64_t tmpBufSize, > uint64_t srcOffset, > const char *dstFname, > uint64_t numBytes, > uint32_t crc); NO DIFFS in ./iokit/IOKit/pwr_mgt/IOPMLibDefs.h

./iokit/IOKit/pwr_mgt/IOPMPrivate.h differences detected: 860,863c860,861 < #define SWD_STACKSHOT_SIZE (40*PAGE_SIZE) < #define SWD_COMPRESSED_BUFSIZE (5*PAGE_SIZE) < #define SWD_ZLIB_BUFSIZE (10*PAGE_SIZE) < #define SWD_STACKSHOT_VAR_PREFIX "sleepwake_diags" --- > #define SWD_BUF_SIZE (40*PAGE_SIZE) > #define SWD_INITIAL_STACK_SIZE ((SWD_BUF_SIZE/2)-sizeof(swd_hdr)) 867d864 < #define SWD_MAX_STACKSHOTS (10) 886,890c883,901 < #define kOSWatchdogStacksFilename "/var/log/OSXWatchdogStacks.gz" < #define kOSWatchdogFailureStringFile "/var/log/OSWatchdogFailureString.txt" < #define kSleepWakeStacksFilename "/var/log/SleepWakeStacks.gz" < #define kSleepWakeFailureStringFile "/var/log/SleepWakeFailureString.txt" < --- > #define kSleepWakeStackBinFilename "/var/log/SleepWakeStacks.bin" > #define kSleepWakeStackFilename "/var/log/SleepWakeStacks.dump" > #define kSleepWakeLogFilename "/var/log/SleepWakeLog.dump" > #define kAppleOSXWatchdogStackFilename "/var/log/AppleOSXWatchdogStacks.dump" > #define kAppleOSXWatchdogLogFilename "/var/log/AppleOSXWatchdogLog.dump" > > inline char const* getDumpStackFilename(swd_hdr *hdr) > { > if (hdr && hdr->is_osx_watchdog) > return kAppleOSXWatchdogStackFilename; > return kSleepWakeStackFilename; > } > > inline char const* getDumpLogFilename(swd_hdr *hdr) > { > if (hdr && hdr->is_osx_watchdog) > return kAppleOSXWatchdogLogFilename; > return kSleepWakeLogFilename; > } NO DIFFS in ./iokit/IOKit/pwr_mgt/IOPMpowerState.h
NO DIFFS in ./iokit/IOKit/pwr_mgt/IOPMPowerSourceList.h

./iokit/IOKit/pwr_mgt/IOPMPowerSource.h differences detected: 43d42 < NO DIFFS in ./iokit/IOKit/IOInterleavedMemoryDescriptor.h
NO DIFFS in ./iokit/IOKit/IOKernelReportStructs.h

./iokit/IOKit/IOBufferMemoryDescriptor.h differences detected: 73d72 < APPLE_KEXT_WSHADOW_PUSH; 84d82 < APPLE_KEXT_WSHADOW_POP; NO DIFFS in ./iokit/IOKit/IOCommand.h
NO DIFFS in ./iokit/IOKit/IOCatalogue.h
NO DIFFS in ./iokit/IOKit/IODMAController.h

./iokit/IOKit/IOInterruptEventSource.h differences detected: 73,76d72 < #ifdef __BLOCKS__ < typedef void (^ActionBlock)(IOInterruptEventSource *sender, int count); < #endif /* __BLOCKS__ */ < 111d106 < APPLE_KEXT_WSHADOW_PUSH; 113d107 < APPLE_KEXT_WSHADOW_POP; 144,163d137 < < #ifdef __BLOCKS__ < /*! @function interruptEventSource < @abstract Factory function for IOInterruptEventSources creation and initialisation. < @param owner Owning client of the new event source. < @param provider IOService that represents the interrupt source. When no provider is defined the event source assumes that the client will in some manner call the interruptOccured method explicitly. This will start the ball rolling for safe delivery of asynchronous event's into the driver. < @param intIndex The index of the interrupt within the provider's interrupt sources. < @param action Block for the callout routine of this event source.. < @result A new interrupt event source if successfully created and initialised, 0 otherwise. */ < static IOInterruptEventSource * < interruptEventSource(OSObject *owner, < IOService *provider, < int intIndex, < ActionBlock action); < #endif /* __BLOCKS__ */ < < #if XNU_KERNEL_PRIVATE < static void actionToBlock(OSObject *owner, IOInterruptEventSource *sender, int count); < #endif /* XNU_KERNEL_PRIVATE */ < NO DIFFS in ./iokit/IOKit/IOInterruptAccountingPrivate.h
NO DIFFS in ./iokit/IOKit/IOReportMacros.h

./iokit/IOKit/IOPolledInterface.h differences detected: 94,96c94 < virtual IOReturn setEncryptionKey(const uint8_t * key, size_t keySize); < < OSMetaClassDeclareReservedUsed(IOPolledInterface, 0); --- > OSMetaClassDeclareReservedUnused(IOPolledInterface, 0); 122,129d119 < // kern_open_file_for_direct_io() flags < enum < { < kIOPolledFileCreate = 0x00000001, < kIOPolledFileHibernate = 0x00000002, < }; < < // kern_open_file_for_direct_io() oflags 132c122 < kIOPolledFileSSD = 0x00000001 --- > kIOPolledFileSSD = 0x00000001 185,186c175 < IOReturn IOPolledFileOpen(const char * filename, < uint32_t flags, --- > IOReturn IOPolledFileOpen(const char * filename, 191c180 < uint8_t * volumeCryptKey, size_t * keySize); --- > uint8_t * volumeCryptKey, size_t keySize); 224,226d212 < extern __C IOReturn IOPolledFilePollersSetEncryptionKey(IOPolledFileIOVars * vars, < const uint8_t * key, size_t keySize); < 236,237c222 < kern_open_file_for_direct_io(const char * name, < uint32_t flags, --- > kern_open_file_for_direct_io(const char * name, boolean_t create_file, NO DIFFS in ./iokit/IOKit/IOTimeStamp.h

./iokit/IOKit/IOSharedDataQueue.h differences detected: 35,37d34 < #ifdef enqueue < #undef enqueue < #endif 154,159d150 < #ifdef PRIVATE < /* workaround for queue.h redefine, please do not use */ < __inline__ Boolean enqueue_tail(void *data, UInt32 dataSize) { return (IOSharedDataQueue::enqueue(data, dataSize)); } < #endif < < #if APPLE_KEXT_VTABLE_PADDING 168d158 < #endif NO DIFFS in ./iokit/IOKit/IOSharedLock.h
NO DIFFS in ./iokit/IOKit/machine/Makefile
NO DIFFS in ./iokit/IOKit/IODMAEventSource.h
NO DIFFS in ./iokit/IOKit/IOInterruptAccounting.h
NO DIFFS in ./iokit/IOKit/rtc/Makefile

./iokit/IOKit/rtc/IORTCController.h differences detected: 58c58 < ExpansionData *iortc_reserved __unused; --- > ExpansionData *reserved; NO DIFFS in ./iokit/IOKit/IOCommandQueue.h
NO DIFFS in ./iokit/IOKit/IOCommandPool.h

./iokit/IOKit/IOInterrupts.h differences detected: 53,62d52 < #ifdef XNU_KERNEL_PRIVATE < < struct IOInterruptSourcePrivate { < void * vectorBlock; < }; < typedef struct IOInterruptSourcePrivate IOInterruptSourcePrivate; < < #endif /* XNU_KERNEL_PRIVATE */ < <
./iokit/IOKit/IOInterruptController.h differences detected: 77c77 < ExpansionData *ioic_reserved; --- > ExpansionData *reserved; 138c138 < ExpansionData *iosic_reserved __unused; --- > ExpansionData *reserved;
./iokit/IOKit/IOTimerEventSource.h differences detected: 133d132 < APPLE_KEXT_WSHADOW_PUSH; 135d133 < APPLE_KEXT_WSHADOW_POP; 162,165d159 < #ifdef __BLOCKS__ < typedef void (^ActionBlock)(IOTimerEventSource *sender); < #endif /* __BLOCKS__ */ < 178,193d171 < #ifdef __BLOCKS__ < /*! @function timerEventSource < @abstract Allocates and returns an initialized timer instance. < @param options Mask of kIOTimerEventSourceOptions* options. < @param inOwner The object that that will be passed to the Action callback. < @param action Block for the callout routine of this event source. < */ < static IOTimerEventSource * < timerEventSource(uint32_t options, OSObject *inOwner, ActionBlock action); < #endif /* __BLOCKS__ */ < < #if XNU_KERNEL_PRIVATE < __inline__ void invokeAction(IOTimerEventSource::Action action, IOTimerEventSource * ts, < OSObject * owner, IOWorkLoop * workLoop); < #endif /* XNU_KERNEL_PRIVATE */ < NO DIFFS in ./iokit/IOKit/system.h

./iokit/IOKit/IOKitKeysPrivate.h differences detected: 5c5 < * --- > * 14c14 < * --- > * 17c17 < * --- > * 25c25 < * --- > * 57d56 < #define kIOBridgeBootSessionUUIDKey "bridge-boot-session-uuid" /* value is OSData */
./iokit/IOKit/IOMemoryDescriptor.h differences detected: 111a112 > #ifdef XNU_KERNEL_PRIVATE 112a114 > #endif
./iokit/IOKit/IOKitServer.h differences detected: 116,193d115 < < #ifdef XNU_KERNEL_PRIVATE < < #ifdef __cplusplus < extern "C" { < #endif /* __cplusplus */ < < #include < < /* < * Functions in iokit:IOUserClient.cpp < */ < < extern void iokit_add_reference( io_object_t obj, ipc_kobject_type_t type ); < < extern ipc_port_t iokit_port_for_object( io_object_t obj, < ipc_kobject_type_t type ); < < extern kern_return_t iokit_client_died( io_object_t obj, < ipc_port_t port, ipc_kobject_type_t type, mach_port_mscount_t * mscount ); < < extern kern_return_t < iokit_client_memory_for_type( < io_object_t connect, < unsigned int type, < unsigned int * flags, < vm_address_t * address, < vm_size_t * size ); < < /* < * Functions in osfmk:iokit_rpc.c < */ < < extern ipc_port_t iokit_alloc_object_port( io_object_t obj, < ipc_kobject_type_t type ); < < extern kern_return_t iokit_destroy_object_port( ipc_port_t port ); < < extern mach_port_name_t iokit_make_send_right( task_t task, < io_object_t obj, ipc_kobject_type_t type ); < < extern kern_return_t iokit_mod_send_right( task_t task, mach_port_name_t name, mach_port_delta_t delta ); < < extern io_object_t iokit_lookup_object_with_port_name(mach_port_name_t name, ipc_kobject_type_t type, task_t task); < < extern io_object_t iokit_lookup_connect_ref_current_task(mach_port_name_t name); < < extern void iokit_retain_port( ipc_port_t port ); < extern void iokit_release_port( ipc_port_t port ); < extern void iokit_release_port_send( ipc_port_t port ); < < extern void iokit_lock_port(ipc_port_t port); < extern void iokit_unlock_port(ipc_port_t port); < < extern kern_return_t iokit_switch_object_port( ipc_port_t port, io_object_t obj, ipc_kobject_type_t type ); < < /* < * Functions imported by iokit:IOMemoryDescriptor.cpp < */ < < extern ppnum_t IOGetLastPageNumber(void); < < extern kern_return_t IOMapPages(vm_map_t map, mach_vm_address_t va, mach_vm_address_t pa, < mach_vm_size_t length, unsigned int mapFlags); < < extern kern_return_t IOUnmapPages(vm_map_t map, mach_vm_address_t va, mach_vm_size_t length); < < extern kern_return_t IOProtectCacheMode(vm_map_t map, mach_vm_address_t va, < mach_vm_size_t length, unsigned int options); < < extern unsigned int IODefaultCacheBits(addr64_t pa); < < #ifdef __cplusplus < } /* extern "C" */ < #endif /* __cplusplus */ < < #endif /* MACH_KERNEL_PRIVATE */ < NO DIFFS in ./iokit/IOKit/AppleKeyStoreInterface.h
NO DIFFS in ./iokit/IOKit/nvram/Makefile
NO DIFFS in ./iokit/IOKit/nvram/IONVRAMController.h

./iokit/IOKit/IOMultiMemoryDescriptor.h differences detected: 121,122d120 < virtual uint64_t getPreparationID( void ) APPLE_KEXT_OVERRIDE; < NO DIFFS in ./iokit/IOKit/IOLocks.h
NO DIFFS in ./iokit/IOKit/IOLib.h

./iokit/IOKit/IOWorkLoop.h differences detected: 77,81d76 < < #ifdef __BLOCKS__ < typedef IOReturn (^ActionBlock)(); < #endif /* __BLOCKS__ */ < 300,309d294 < #ifdef __BLOCKS__ < /*! @function runAction < @abstract Single thread a call to an action with the work-loop. < @discussion Client function that causes the given action to be called in a single threaded manner. Beware: the work-loop's gate is recursive and runAction can cause direct or indirect re-entrancy. When executing on a client's thread, runAction will sleep until the work-loop's gate opens for execution of client actions, the action is single threaded against all other work-loop event sources. < @param action Block to be executed in work-loop context. < @result Returns the result of the action block. < */ < IOReturn runActionBlock(ActionBlock action); < #endif /* __BLOCKS__ */ < NO DIFFS in ./iokit/IOKit/assert.h
NO DIFFS in ./iokit/IOKit/IODeviceMemory.h
NO DIFFS in ./iokit/IOKit/IORangeAllocator.h

./iokit/IOKit/IOCPU.h differences detected: 69c69 < ExpansionData *iocpu_reserved; --- > ExpansionData *reserved; 127c127 < ExpansionData *iocpuic_reserved; --- > ExpansionData *reserved; NO DIFFS in ./iokit/Tests/TestDevice.cpp
NO DIFFS in ./iokit/Tests/Tests.h
NO DIFFS in ./iokit/Tests/TestCollections.cpp

./iokit/Tests/Tests.cpp differences detected: 32c32 < #define TEST_HEADERS 0 --- > #define TEST_HEADERS 0 179a180,181 > #include > #include 183,190d184 < #include < #include < #include < #include < #include < #include < #include < 206d199 < IOInterruptEventSource * ies; 214c207 < clock_interval_to_deadline(100, kMillisecondScale, &gIOWorkLoopTestDeadline); --- > clock_interval_to_deadline(2000, kMillisecondScale, &gIOWorkLoopTestDeadline); 222,251d214 < < int value = 3; < < tes = IOTimerEventSource::timerEventSource(kIOTimerEventSourceOptionsDefault, wl, ^(IOTimerEventSource * tes){ < kprintf("wl %p, value %d\n", wl, value); < }); < err = wl->addEventSource(tes); < assert(kIOReturnSuccess == err); < < value = 2; < tes->setTimeout(1, kNanosecondScale); < IOSleep(1); < wl->removeEventSource(tes); < tes->release(); < < ies = IOInterruptEventSource::interruptEventSource(wl, NULL, 0, ^void(IOInterruptEventSource *sender, int count){ < kprintf("ies block %p, %d\n", sender, count); < }); < < assert(ies); < kprintf("ies %p\n", ies); < err = wl->addEventSource(ies); < assert(kIOReturnSuccess == err); < ies->interruptOccurred(NULL, NULL, 0); < IOSleep(1); < ies->interruptOccurred(NULL, NULL, 0); < IOSleep(1); < wl->removeEventSource(ies); < ies->release(); < 257,381d219 < static int < OSCollectionTest(int newValue) < { < OSArray * array = OSArray::withCapacity(8); < array->setObject(kOSBooleanTrue); < array->setObject(kOSBooleanFalse); < array->setObject(kOSBooleanFalse); < array->setObject(kOSBooleanTrue); < array->setObject(kOSBooleanFalse); < array->setObject(kOSBooleanTrue); < < __block unsigned int index; < index = 0; < array->iterateObjects(^bool(OSObject * obj) { < kprintf("%d:%d ", index, (obj == kOSBooleanTrue) ? 1 : (obj == kOSBooleanFalse) ? 0 : 2); < index++; < return (false); < }); < kprintf("\n"); < array->release(); < < OSDictionary * dict = IOService::resourceMatching("hello"); < assert(dict); < index = 0; < dict->iterateObjects(^bool(const OSSymbol * sym, OSObject * obj) { < OSString * str = OSDynamicCast(OSString, obj); < assert(str); < kprintf("%d:%s=%s\n", index, sym->getCStringNoCopy(), str->getCStringNoCopy()); < index++; < return (false); < }); < dict->release(); < < OSSerializer * serializer = OSSerializer::withBlock(^bool(OSSerialize * s){ < return (gIOBSDUnitKey->serialize(s)); < }); < assert(serializer); < IOService::getPlatform()->setProperty("OSSerializer_withBlock", serializer); < serializer->release(); < < return (0); < } < < #if 0 < #include < class TestUserClient : public IOUserClient < { < OSDeclareDefaultStructors(TestUserClient); < virtual void stop( IOService *provider) APPLE_KEXT_OVERRIDE; < virtual bool finalize(IOOptionBits options) APPLE_KEXT_OVERRIDE; < virtual IOReturn externalMethod( uint32_t selector, < IOExternalMethodArguments * arguments, < IOExternalMethodDispatch * dispatch, < OSObject * target, < void * reference ) APPLE_KEXT_OVERRIDE; < }; < < void TestUserClient::stop( IOService *provider) < { < kprintf("TestUserClient::stop\n"); < } < bool TestUserClient::finalize(IOOptionBits options) < { < kprintf("TestUserClient::finalize\n"); < return(true); < } < IOReturn TestUserClient::externalMethod( uint32_t selector, < IOExternalMethodArguments * arguments, < IOExternalMethodDispatch * dispatch, < OSObject * target, < void * reference ) < { < getProvider()->terminate(); < IOSleep(500); < return (0); < } < OSDefineMetaClassAndStructors(TestUserClient, IOUserClient); < #endif < < static int < IOServiceTest(int newValue) < { < OSDictionary * matching; < IONotifier * note; < __block IOService * found; < < #if 0 < found = new IOService; < found->init(); < found->setName("IOTestUserClientProvider"); < found->attach(IOService::getPlatform()); < found->setProperty("IOUserClientClass", "TestUserClient"); < found->registerService(); < #endif < < matching = IOService::serviceMatching("IOPlatformExpert"); < assert(matching); < found = nullptr; < note = IOService::addMatchingNotification(gIOMatchedNotification, matching, 0, < ^bool(IOService * newService, IONotifier * notifier) { < kprintf("found %s, %d\n", newService->getName(), newService->getRetainCount()); < found = newService; < found->retain(); < return (true); < } < ); < assert(note); < assert(found); < matching->release(); < note->remove(); < < note = found->registerInterest(gIOBusyInterest, < ^IOReturn(uint32_t messageType, IOService * provider, < void * messageArgument, size_t argSize) { < kprintf("%p messageType 0x%08x %p\n", provider, messageType, messageArgument); < return (kIOReturnSuccess); < }); < assert(note); < IOSleep(1*1000); < note->remove(); < found->release(); < < return (0); < } < 394,418d231 < if (changed && (66==newValue)) < { < IOReturn ret; < IOWorkLoop * wl = IOWorkLoop::workLoop(); < IOCommandGate * cg = IOCommandGate::commandGate(wl); < ret = wl->addEventSource(cg); < < struct x < { < uint64_t h; < uint64_t l; < }; < struct x y; < < y.h = 0x1111111122222222; < y.l = 0x3333333344444444; < < kprintf("ret1 %d\n", ret); < ret = cg->runActionBlock(^(){ < printf("hello %d 0x%qx\n", wl->inGate(), y.h); < return 99; < }); < kprintf("ret %d\n", ret); < } < 421c234 < OSData * data = OSData::withCapacity(16); --- > OSData * data = OSData::withCapacity(16); 431,434d243 < error = IOServiceTest(newValue); < assert(KERN_SUCCESS == error); < error = OSCollectionTest(newValue); < assert(KERN_SUCCESS == error); 444,445c253,254 < CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, < 0, 0, sysctl_iokittest, "I", ""); --- > CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, > 0, 0, sysctl_iokittest, "I", ""); NO DIFFS in ./iokit/Tests/TestIOMemoryDescriptor.cpp
NO DIFFS in ./iokit/Tests/TestContainers.cpp
NO DIFFS in ./iokit/Examples/drvGenericInterruptController/GenericInterruptController.cpp
NO DIFFS in ./iokit/Examples/drvGenericInterruptController/GenericInterruptController.h
NO DIFFS in ./iokit/.clang-format

./iokit/conf/Makefile.x86_64 differences detected: 12c12 < IOHibernateRestoreKernel.o_CFLAGS_ADD += -fno-stack-protector -fno-stack-check $(CFLAGS_NOLTO_FLAG) --- > IOHibernateRestoreKernel.o_CFLAGS_ADD += -fno-stack-protector $(CFLAGS_NOLTO_FLAG) NO DIFFS in ./iokit/conf/files.arm64
NO DIFFS in ./iokit/conf/Makefile.template
NO DIFFS in ./iokit/conf/Makefile
NO DIFFS in ./iokit/conf/Makefile.arm64
NO DIFFS in ./iokit/conf/Makefile.arm
NO DIFFS in ./iokit/conf/files.x86_64

./iokit/conf/files differences detected: 110d109 < NO DIFFS in ./iokit/conf/copyright
NO DIFFS in ./iokit/conf/files.arm
NO DIFFS in ./iokit/Kernel/IOPMPowerSource.cpp
NO DIFFS in ./iokit/Kernel/IOSyncer.cpp
NO DIFFS in ./iokit/Kernel/IODeviceTreeSupport.cpp

./iokit/Kernel/IOServicePMPrivate.h differences detected: 189,193d188 < IOLock * WatchdogLock; < OSArray * BlockedArray; < uint64_t PendingResponseDeadline; < uint64_t WatchdogDeadline; < 368,371d362 < #define fWatchdogDeadline pwrMgt->WatchdogDeadline < #define fWatchdogLock pwrMgt->WatchdogLock < #define fBlockedArray pwrMgt->BlockedArray < #define fPendingResponseDeadline pwrMgt->PendingResponseDeadline 471,472c462 < #define WATCHDOG_SLEEP_TIMEOUT (180) // 180 secs < #define WATCHDOG_WAKE_TIMEOUT (180) // 180 secs --- > #define WATCHDOG_TIMER_PERIOD (300) // 300 secs 474,475c464 < #define WATCHDOG_SLEEP_TIMEOUT (180) // 180 secs < #define WATCHDOG_WAKE_TIMEOUT (180) // 180 secs --- > #define WATCHDOG_TIMER_PERIOD (180) // 180 secs
./iokit/Kernel/IOMemoryCursor.cpp differences detected: 247,250d246 < #if IOPhysSize == 64 < OSWriteBigInt64(segment, 0, inSegment.location); < OSWriteBigInt64(segment, sizeof(IOPhysicalAddress), inSegment.length); < #else 253d248 < #endif 299,302d293 < #if IOPhysSize == 64 < OSWriteLittleInt64(segment, 0, inSegment.location); < OSWriteLittleInt64(segment, sizeof(IOPhysicalAddress), inSegment.length); < #else 305d295 < #endif
./iokit/Kernel/IODMACommand.cpp differences detected: 382,383c382 < fInternalState->fSetActiveNoMapper = (!fMapper); < if (fInternalState->fSetActiveNoMapper) mem->dmaCommandOperation(kIOMDSetDMAActive, this, 0); --- > if (!fMapper) mem->dmaCommandOperation(kIOMDSetDMAActive, this, 0); 403c402 < if (fInternalState->fSetActiveNoMapper) fMemory->dmaCommandOperation(kIOMDSetDMAInactive, this, 0); --- > if (!fMapper) fMemory->dmaCommandOperation(kIOMDSetDMAInactive, this, 0); 826a826,827 > state->fLocalMapper = (fMapper && (fMapper != IOMapper::gSystem)); > NO DIFFS in ./iokit/Kernel/IOInterruptAccounting.cpp

./iokit/Kernel/IONVRAM.cpp differences detected: 38a39,45 > #if CONFIG_MACF > extern "C" { > #include > #include > }; > #endif /* MAC */ > 114,116c121 < IOLockLock(_ofLock); < (void) syncVariables(); < IOLockUnlock(_ofLock); --- > syncOFVariables(); 292c297,301 < ( ! (variablePerm == kOFVariablePermKernelOnly && current_task() != kernel_task) )) { } --- > ( ! (variablePerm == kOFVariablePermKernelOnly && current_task() != kernel_task) ) > #if CONFIG_MACF > && (current_task() == kernel_task || mac_iokit_check_nvram_get(kauth_cred_get(), key->getCStringNoCopy()) == 0) > #endif > ) { } 323a333,338 > #if CONFIG_MACF > if (current_task() != kernel_task && > mac_iokit_check_nvram_get(kauth_cred_get(), aKey->getCStringNoCopy()) != 0) > return 0; > #endif > 370,372c385,387 < OSString *tmpString = 0; < OSObject *propObject = 0, *oldObject; < --- > OSString *tmpString; > OSObject *propObject = 0; > 374c389 < --- > 377c392,393 < if (IOUserClient::clientHasPrivilege(current_task(), kIONVRAMPrivilege) != kIOReturnSuccess) { --- > result = IOUserClient::clientHasPrivilege(current_task(), kIONVRAMPrivilege); > if (result != kIOReturnSuccess) { 384a401,406 > #if CONFIG_MACF > if (current_task() != kernel_task && > mac_iokit_check_nvram_set(kauth_cred_get(), aKey->getCStringNoCopy(), anObject) != 0) > return false; > #endif > 391c413 < --- > 395c417 < --- > 399c421 < --- > 411c433 < --- > 415,419d436 < < oldObject = _ofDict->getObject(aKey); < if (oldObject) { < oldObject->retain(); < } 420a438 > IOLockUnlock(_ofLock); 423,432c441 < if (syncVariables() != kIOReturnSuccess) { < if (oldObject) { < _ofDict->setObject(aKey, oldObject); < } < else { < _ofDict->removeObject(aKey); < } < (void) syncVariables(); < result = false; < } --- > syncOFVariables(); 434,443c443 < < if (oldObject) { < oldObject->release(); < } < if (tmpString) { < propObject->release(); < } < < IOLockUnlock(_ofLock); < --- > 464a465,470 > #if CONFIG_MACF > if (current_task() != kernel_task && > mac_iokit_check_nvram_delete(kauth_cred_get(), aKey->getCStringNoCopy()) != 0) > return; > #endif > 471a478 > IOLockUnlock(_ofLock); 474c481 < (void) syncVariables(); --- > syncOFVariables(); 476,477d482 < < IOLockUnlock(_ofLock); 756,760d760 < return kIOReturnUnsupported; < } < < IOReturn IODTNVRAM::syncVariables(void) < { 767,769c767 < < IOLockAssert(_ofLock, kIOLockAssertOwned); < --- > 771c769 < --- > 775c773 < --- > 778a777 > IOLockLock(_ofLock); 781c780 < --- > 785c784 < --- > 788c787 < --- > 790c789 < --- > 799c798,799 < --- > IOLockUnlock(_ofLock); > 803c803 < --- > 805c805 < --- > 807c807 < --- > 809c809 < return _nvramController->write(0, _nvramImage, kIODTNVRAMImageSize); --- > _nvramController->write(0, _nvramImage, kIODTNVRAMImageSize); 811,812c811,812 < < return kIOReturnNotReady; --- > > return kIOReturnSuccess; 1379c1379 < OSData *oldData, *escapedData; --- > OSData *oldData; 1403d1402 < 1406c1405 < --- > 1434c1433 < --- > 1480c1479 < --- > 1482,1484c1481,1483 < escapedData = escapeDataToData(value); < ok &= (escapedData != 0); < if (ok) ok &= data->appendBytes(escapedData); --- > oldData = escapeDataToData(value); > ok &= (oldData != 0); > if (ok) ok &= data->appendBytes(oldData); 1488d1486 < oldData->retain(); 1492a1491 > IOLockUnlock(_ofLock); 1495,1512c1494 < if (ok) { < if (syncVariables() != kIOReturnSuccess) { < if (oldData) { < _ofDict->setObject(_registryPropertiesKey, oldData); < } < else { < _ofDict->removeObject(_registryPropertiesKey); < } < (void) syncVariables(); < ok = false; < } < } < < if (oldData) { < oldData->release(); < } < < IOLockUnlock(_ofLock); --- > if (ok) syncOFVariables();
./iokit/Kernel/IOMultiMemoryDescriptor.cpp differences detected: 397,424d396 < < uint64_t IOMultiMemoryDescriptor::getPreparationID( void ) < { < < if (!super::getKernelReserved()) < { < return (kIOPreparationIDUnsupported); < } < < for (unsigned index = 0; index < _descriptorsCount; index++) < { < uint64_t preparationID = _descriptors[index]->getPreparationID(); < < if ( preparationID == kIOPreparationIDUnsupported ) < { < return (kIOPreparationIDUnsupported); < } < < if ( preparationID == kIOPreparationIDUnprepared ) < { < return (kIOPreparationIDUnprepared); < } < } < < super::setPreparationID(); < < return (super::getPreparationID()); < } NO DIFFS in ./iokit/Kernel/IOPMPowerStateQueue.h
NO DIFFS in ./iokit/Kernel/IOPMrootDomainInternal.h

./iokit/Kernel/IOUserClient.cpp differences detected: 43d42 < #include 127a127,135 > // definitions we should get from osfmk > > //typedef struct ipc_port * ipc_port_t; > typedef natural_t ipc_kobject_type_t; > > #define IKOT_IOKIT_SPARE 27 > #define IKOT_IOKIT_CONNECT 29 > #define IKOT_IOKIT_OBJECT 30 > 129a138,159 > extern ipc_port_t iokit_alloc_object_port( io_object_t obj, > ipc_kobject_type_t type ); > > extern kern_return_t iokit_destroy_object_port( ipc_port_t port ); > > extern mach_port_name_t iokit_make_send_right( task_t task, > io_object_t obj, ipc_kobject_type_t type ); > > extern kern_return_t iokit_mod_send_right( task_t task, mach_port_name_t name, mach_port_delta_t delta ); > > extern io_object_t iokit_lookup_connect_ref(io_object_t clientRef, ipc_space_t task); > > extern io_object_t iokit_lookup_connect_ref_current_task(io_object_t clientRef); > > extern ipc_port_t master_device_port; > > extern void iokit_retain_port( ipc_port_t port ); > extern void iokit_release_port( ipc_port_t port ); > extern void iokit_release_port_send( ipc_port_t port ); > > extern kern_return_t iokit_switch_object_port( ipc_port_t port, io_object_t obj, ipc_kobject_type_t type ); > 134a165 > 174d204 < static OSDictionary * gIOIdentifierPorts; 180,195c210,215 < switch (type) < { < case IKOT_IOKIT_OBJECT: < dict = &gIOObjectPorts; < break; < case IKOT_IOKIT_CONNECT: < dict = &gIOConnectPorts; < break; < case IKOT_IOKIT_IDENT: < dict = &gIOIdentifierPorts; < break; < default: < panic("dictForType %d", type); < dict = NULL; < break; < } --- > if( IKOT_IOKIT_OBJECT == type ) > dict = &gIOObjectPorts; > else if( IKOT_IOKIT_CONNECT == type ) > dict = &gIOConnectPorts; > else > return( 0 ); 386d405 < virtual OSObject * copyNextObject(); 475,482c494 < assert(false); < return (NULL); < } < < OSObject * < IOUserIterator::copyNextObject() < { < OSObject * ret = NULL; --- > OSObject * ret; 485,488c497,498 < if (userIteratorObject) { < ret = ((OSIterator *)userIteratorObject)->getNextObject(); < if (ret) ret->retain(); < } --- > assert(OSDynamicCast(OSIterator, userIteratorObject)); > ret = ((OSIterator *)userIteratorObject)->getNextObject(); 500c510 < iokit_add_reference( io_object_t obj, ipc_kobject_type_t type ) --- > iokit_add_reference( io_object_t obj ) 502,512c512,513 < IOUserClient * uc; < < if (!obj) return; < < if ((IKOT_IOKIT_CONNECT == type) < && (uc = OSDynamicCast(IOUserClient, obj))) < { < OSIncrementAtomic(&uc->__ipc); < } < < obj->retain(); --- > if( obj) > obj->retain(); 522a524,535 > iokit_add_connect_reference( io_object_t obj ) > { > IOUserClient * uc; > > if (!obj) return; > > if ((uc = OSDynamicCast(IOUserClient, obj))) OSIncrementAtomic(&uc->__ipc); > > obj->retain(); > } > > void 595d607 < IOLockLock(client->lock); 597d608 < IOLockUnlock(client->lock); 628a640 > OSObject * lastEntry; 645d656 < virtual OSObject * copyNextObject() APPLE_KEXT_OVERRIDE; 683d693 < virtual OSObject * copyNextObject() APPLE_KEXT_OVERRIDE; 789a800 > OSObject * _lastEntry; 792a804 > _lastEntry = lastEntry; 803a816,818 > if( _lastEntry) > _lastEntry->release(); > 859,863d873 < OSObject * IOServiceUserNotification::getNextObject() < { < assert(false); < return (NULL); < } 865c875 < OSObject * IOServiceUserNotification::copyNextObject() --- > OSObject * IOServiceUserNotification::getNextObject() 868a879 > OSObject * releaseEntry; 871a883 > releaseEntry = lastEntry; 880a893 > lastEntry = result; 883a897,898 > if (releaseEntry) releaseEntry->release(); > 1077,1081d1091 < OSObject * IOServiceMessageUserNotification::copyNextObject() < { < return( NULL ); < } < 1452c1462 < if (!owner) ret = kIOReturnNoMemory; --- > if (!newOwner) ret = kIOReturnNoMemory; 1537d1546 < if (lock) IOLockFree(lock); 1652,1666d1660 < < if (obj) obj->release(); < < return kIOReturnSuccess; < } < < IOReturn IOUserClient::copyPortNameForObjectInTask(task_t task, < OSObject *obj, mach_port_name_t * port_name) < { < mach_port_name_t name; < < name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_IDENT ); < < *(mach_port_name_t *) port_name = name; < 1670,1686d1663 < IOReturn IOUserClient::copyObjectForPortNameInTask(task_t task, mach_port_name_t port_name, < OSObject **obj) < { < OSObject * object; < < object = iokit_lookup_object_with_port_name(port_name, IKOT_IOKIT_IDENT, task); < < *obj = object; < < return (object ? kIOReturnSuccess : kIOReturnIPCError); < } < < IOReturn IOUserClient::adjustPortNameReferencesInTask(task_t task, mach_port_name_t port_name, mach_port_delta_t delta) < { < return (iokit_mod_send_right(task, port_name, delta)); < } < 2063,2064d2039 < OSIterator * iter; < IOUserIterator * uiter; 2066,2078c2041 < if ((uiter = OSDynamicCast(IOUserIterator, iterator))) < { < obj = uiter->copyNextObject(); < } < else if ((iter = OSDynamicCast(OSIterator, iterator))) < { < obj = iter->getNextObject(); < if (obj) obj->retain(); < } < else < { < return( kIOReturnBadArgument ); < } --- > CHECK( OSIterator, iterator, iter ); 2079a2043 > obj = iter->getNextObject(); 2080a2045 > obj->retain(); 3318,3319c3283,3284 < *iterator = IOUserIterator::withIterator(entry->getChildIterator( < IORegistryEntry::getPlane( plane ))); --- > *iterator = entry->getChildIterator( > IORegistryEntry::getPlane( plane )); 3332,3333c3297,3298 < *iterator = IOUserIterator::withIterator(entry->getParentIterator( < IORegistryEntry::getPlane( plane ))); --- > *iterator = entry->getParentIterator( > IORegistryEntry::getPlane( plane )); 3503d3467 < client->lock = IOLockAlloc(); 3555d3518 < IOLockLock(client->lock); 3557d3519 < IOLockUnlock(client->lock); 3593d3554 < kern_return_t ret; 3597,3601c3558,3559 < IOLockLock(client->lock); < ret = client->registerNotificationPort( port, notification_type, < (io_user_reference_t) reference ); < IOLockUnlock(client->lock); < return (ret); --- > return( client->registerNotificationPort( port, notification_type, > (io_user_reference_t) reference )); 3611d3568 < kern_return_t ret; 3615,3619c3572,3573 < IOLockLock(client->lock); < ret = client->registerNotificationPort( port, notification_type, < reference ); < IOLockUnlock(client->lock); < return (ret); --- > return( client->registerNotificationPort( port, notification_type, > reference )); 3655d3608 < map->release(); 3766d3718 < { 3768,3770d3719 < map->release(); < } < 4970c4919 < if (!IOTaskHasEntitlement(current_task(), "com.apple.rootless.kext-secure-management")) --- > if (!IOTaskHasEntitlement(current_task(), "com.apple.rootless.kext-management")) 5277c5226 < iokit_lookup_connect_ref_current_task((mach_port_name_t)(uintptr_t)args->userClientRef)))) { --- > iokit_lookup_connect_ref_current_task((OSObject *)(args->userClientRef))))) { 5299,5316d5247 < /* Routine io_device_tree_entry_exists_with_name */ < kern_return_t is_io_device_tree_entry_exists_with_name( < mach_port_t master_port, < io_name_t name, < boolean_t *exists ) < { < OSCollectionIterator *iter; < < if (master_port != master_device_port) < return (kIOReturnNotPrivileged); < < iter = IODTFindMatchingEntries(IORegistryEntry::getRegistryRoot(), kIODTRecursive, name); < *exists = iter && iter->getNextObject(); < OSSafeReleaseNULL(iter); < < return kIOReturnSuccess; < } < NO DIFFS in ./iokit/Kernel/IOCommand.cpp
NO DIFFS in ./iokit/Kernel/IOPMPowerSourceList.cpp

./iokit/Kernel/IOCommandQueue.cpp differences detected: 232c232 < * test for work is producerIndex != consumerIndex and a signal. --- > * as the test for work is producerIndex != consumerIndex and a signal.
./iokit/Kernel/IOStatistics.cpp differences detected: 765c765 < memset(buffer, 0, calculatedSize); --- > 830c830 < memset(buffer, 0, calculatedSize); --- > NO DIFFS in ./iokit/Kernel/IOConditionLock.cpp

./iokit/Kernel/IOInterruptEventSource.cpp differences detected: 223,235d222 < IOInterruptEventSource * < IOInterruptEventSource::interruptEventSource(OSObject *inOwner, < IOService *inProvider, < int inIntIndex, < ActionBlock inAction) < { < IOInterruptEventSource * ies; < ies = IOInterruptEventSource::interruptEventSource(inOwner, (Action) NULL, inProvider, inIntIndex); < if (ies) ies->setActionBlock((IOEventSource::ActionBlock) inAction); < < return ies; < } < 316d302 < ActionBlock intActionBlock = (ActionBlock) actionBlock; 339,340c325 < if (kActionBlock & flags) (intActionBlock)(this, numInts); < else (*intAction)(owner, this, numInts); --- > (*intAction)(owner, this, numInts); 386,387c371 < if (kActionBlock & flags) (intActionBlock)(this, numInts); < else (*intAction)(owner, this, numInts); --- > (*intAction)(owner, this, -numInts);
./iokit/Kernel/IOKitKernelInternal.h differences detected: 41d40 < #include 100a100,109 > // osfmk/device/iokit_rpc.c > extern kern_return_t IOMapPages(vm_map_t map, mach_vm_address_t va, mach_vm_address_t pa, > mach_vm_size_t length, unsigned int mapFlags); > extern kern_return_t IOUnmapPages(vm_map_t map, mach_vm_address_t va, mach_vm_size_t length); > > extern kern_return_t IOProtectCacheMode(vm_map_t map, mach_vm_address_t va, > mach_vm_size_t length, unsigned int mapFlags); > > extern ppnum_t IOGetLastPageNumber(void); > 133a143 > UInt8 fLocalMapper; 137d146 < UInt8 fSetActiveNoMapper; 217,219d225 < extern "C" void IOKitKernelLogBuffer(const char * title, const void * buffer, size_t size, < void (*output)(const char *format, ...)); < NO DIFFS in ./iokit/Kernel/RootDomainUserClient.h
NO DIFFS in ./iokit/Kernel/IOCommandPool.cpp

./iokit/Kernel/IOServicePM.cpp differences detected: 477,479d476 < fWatchdogLock = IOLockAlloc(); < < fBlockedArray = OSArray::withCapacity(4); 550,559d546 < if (fWatchdogLock) { < IOLockFree(fWatchdogLock); < fWatchdogLock = NULL; < } < < if (fBlockedArray) { < fBlockedArray->release(); < fBlockedArray = NULL; < } < 1096d1082 < getPMRootDomain()->reset_watchdog_timer(this, 0); 1617d1602 < getPMRootDomain()->reset_watchdog_timer(this, 0); 3626d3610 < int maxTimeout = 0; 3669,3671d3652 < if (result > maxTimeout) { < maxTimeout = result; < } 3687d3667 < getPMRootDomain()->reset_watchdog_timer(this, maxTimeout/USEC_PER_SEC+1); 4009d3988 < callEntry.callMethod = OSMemberFunctionCast(const void *, fControllingDriver, &IOService::setPowerState); 4090,4095d4068 < if (fDriverCallReason == kDriverCallInformPreChange) { < callEntry.callMethod = OSMemberFunctionCast(const void *, driver, &IOService::powerStateWillChangeTo); < } < else { < callEntry.callMethod = OSMemberFunctionCast(const void *, driver, &IOService::powerStateDidChangeTo); < } 4307d4279 < getPMRootDomain()->reset_watchdog_timer(this, result/USEC_PER_SEC+1); 5342d5313 < getPMRootDomain()->reset_watchdog_timer(this, 0); 5424,5425c5395,5397 < int timeout; < uint64_t deadline; --- > AbsoluteTime deadline; > boolean_t pending; > static int timeout = -1; 5430c5402,5408 < IOLockLock(fWatchdogLock); --- > if (thread_call_isactive(fWatchdogTimer)) return; > if (timeout == -1) { > PE_parse_boot_argn("swd_timeout", &timeout, sizeof(timeout)); > } > if (timeout < 60) { > timeout = WATCHDOG_TIMER_PERIOD; > } 5432d5409 < timeout = getPMRootDomain()->getWatchdogTimeout(); 5434,5442d5410 < fWatchdogDeadline = deadline; < start_watchdog_timer(deadline); < IOLockUnlock(fWatchdogLock); < } < < void IOService::start_watchdog_timer(uint64_t deadline) < { < < IOLockAssert(fWatchdogLock, kIOLockAssertOwned); 5444,5446c5412,5414 < if (!thread_call_isactive(fWatchdogTimer)) { < thread_call_enter_delayed(fWatchdogTimer, deadline); < } --- > retain(); > pending = thread_call_enter_delayed(fWatchdogTimer, deadline); > if (pending) release(); 5451a5420 > // Returns true if watchdog was enabled and stopped now 5454c5423 < void IOService::stop_watchdog_timer( void ) --- > bool IOService::stop_watchdog_timer( void ) 5456,5459c5425 < if (!fWatchdogTimer || (kIOSleepWakeWdogOff & gIOKitDebug)) < return; < < IOLockLock(fWatchdogLock); --- > boolean_t pending; 5461,5462c5427,5428 < thread_call_cancel(fWatchdogTimer); < fWatchdogDeadline = 0; --- > if (!fWatchdogTimer || (kIOSleepWakeWdogOff & gIOKitDebug)) > return false; 5464,5470c5430,5431 < while (fBlockedArray->getCount()) { < IOService *obj = OSDynamicCast(IOService, fBlockedArray->getObject(0)); < if (obj) { < PM_ERROR("WDOG:Object %s unexpected in blocked array\n", obj->fName); < fBlockedArray->removeObject(0); < } < } --- > pending = thread_call_cancel(fWatchdogTimer); > if (pending) release(); 5472c5433 < IOLockUnlock(fWatchdogLock); --- > return pending; 5479c5440 < void IOService::reset_watchdog_timer(IOService *blockedObject, int pendingResponseTimeout) --- > void IOService::reset_watchdog_timer( void ) 5481,5541c5442,5443 < unsigned int i; < uint64_t deadline; < IOService *obj; < < if (!fWatchdogTimer || (kIOSleepWakeWdogOff & gIOKitDebug)) < return; < < < IOLockLock(fWatchdogLock); < if (!fWatchdogDeadline) { < goto exit; < } < < i = fBlockedArray->getNextIndexOfObject(blockedObject, 0); < if (pendingResponseTimeout == 0) { < blockedObject->fPendingResponseDeadline = 0; < if (i == (unsigned int)-1) { < goto exit; < } < fBlockedArray->removeObject(i); < } < else { < // Set deadline 2secs after the expected response timeout to allow < // ack timer to handle the timeout. < clock_interval_to_deadline(pendingResponseTimeout+2, kSecondScale, &deadline); < < if (i != (unsigned int)-1) { < PM_ERROR("WDOG:Object %s is already blocked for responses. Ignoring timeout %d\n", < fName, pendingResponseTimeout); < goto exit; < } < < < for (i = 0; i < fBlockedArray->getCount(); i++) { < obj = OSDynamicCast(IOService, fBlockedArray->getObject(i)); < if (obj && (obj->fPendingResponseDeadline < deadline)) { < blockedObject->fPendingResponseDeadline = deadline; < fBlockedArray->setObject(i, blockedObject); < break; < } < } < if (i == fBlockedArray->getCount()) { < blockedObject->fPendingResponseDeadline = deadline; < fBlockedArray->setObject(blockedObject); < } < } < < obj = OSDynamicCast(IOService, fBlockedArray->getObject(0)); < if (!obj) { < int timeout = getPMRootDomain()->getWatchdogTimeout(); < clock_interval_to_deadline(timeout, kSecondScale, &deadline); < } < else { < deadline = obj->fPendingResponseDeadline; < } < < thread_call_cancel(fWatchdogTimer); < start_watchdog_timer(deadline); < < exit: < IOLockUnlock(fWatchdogLock); --- > if (stop_watchdog_timer()) > start_watchdog_timer(); 5593a5496,5499 > // Stop watchdog if ack is delayed by more than a sec > if (interval * scale > kSecondScale) { > stop_watchdog_timer(); > } 5605a5512,5513 > > start_watchdog_timer(); 5629a5538 > me->start_watchdog_timer(); 5926,5928d5834 < if (IS_ROOT_DOMAIN) { < getPMRootDomain()->reset_watchdog_timer(this, 0); < } 6021d5926 < context.enableTracing = isRootDomain; 6070d5974 < getPMRootDomain()->reset_watchdog_timer(this, context.maxTimeRequested/USEC_PER_SEC+1); 6259c6163 < getPMRootDomain()->traceDetail(notifier, true); --- > getPMRootDomain()->traceDetail(notifier); 6266,6271d6169 < if (context->enableTracing && (notifier != NULL)) < { < getPMRootDomain()->traceDetail(notifier, false); < } < < 6478c6376 < getPMRootDomain()->traceDetail(notifier, true); --- > getPMRootDomain()->traceDetail(notifier); 6485,6488d6382 < if (context->enableTracing && (notifier != NULL)) < { < getPMRootDomain()->traceDetail(notifier, false); < } 6599d6492 < context.enableTracing = IS_ROOT_DOMAIN; 6649,6652d6541 < if (context->enableTracing && object) < { < IOService::getPMRootDomain()->traceDetail(object, true); < } 6654,6659d6542 < if (context->enableTracing && object) < { < IOService::getPMRootDomain()->traceDetail(object, false); < } < < 8094d7976 < getPMRootDomain()->reset_watchdog_timer(this, 0); 8230,8256d8111 < bool IOService::getBlockingDriverCall(thread_t *thread, const void **callMethod) < { < const IOPMDriverCallEntry * entry = NULL; < bool blocked = false; < < if (!initialized) { < return false; < } < < if (current_thread() != gIOPMWatchDogThread) { < // Meant to be accessed only from watchdog thread < return false; < } < < PM_LOCK(); < entry = qe_queue_first(&fPMDriverCallQueue, IOPMDriverCallEntry, link); < if (entry) { < *thread = entry->thread; < *callMethod = entry->callMethod; < blocked = true; < } < PM_UNLOCK(); < < return blocked; < } < <
./iokit/Kernel/IOFilterInterruptEventSource.cpp differences detected: 35d34 < #include 127,159d125 < < IOFilterInterruptEventSource *IOFilterInterruptEventSource < ::filterInterruptEventSource(OSObject *inOwner, < IOService *inProvider, < int inIntIndex, < ActionBlock inAction, < FilterBlock inFilterAction) < { < IOFilterInterruptEventSource *me = new IOFilterInterruptEventSource; < < FilterBlock filter = Block_copy(inFilterAction); < if (!filter) return 0; < < if (me < && !me->init(inOwner, (Action) NULL, (Filter) filter, inProvider, inIntIndex)) { < me->release(); < Block_release(filter); < return 0; < } < me->flags |= kFilterBlock; < me->setActionBlock((IOEventSource::ActionBlock) inAction); < < return me; < } < < < void IOFilterInterruptEventSource::free( void ) < { < if ((kFilterBlock & flags) && filterActionBlock) Block_release(filterActionBlock); < < super::free(); < } < 181d146 < if (kFilterBlock & flags) return NULL; 185,190c150,151 < IOFilterInterruptEventSource::FilterBlock < IOFilterInterruptEventSource::getFilterActionBlock() const < { < if (kFilterBlock & flags) return filterActionBlock; < return (NULL); < } --- > > 211,212c172 < if (kFilterBlock & flags) filterRes = (filterActionBlock)(this); < else filterRes = (*filterAction)(owner, this); --- > filterRes = (*filterAction)(owner, this); 253,254c213 < if (kFilterBlock & flags) filterRes = (filterActionBlock)(this); < else filterRes = (*filterAction)(owner, this); --- > filterRes = (*filterAction)(owner, this);
./iokit/Kernel/IOLib.cpp differences detected: 551,552c551,552 < < if (os_mul_and_add_overflow(2, size, sizeofIOLibPageMallocHeader, &adjustedSize)) return (0); --- > adjustedSize = (2 * size) + sizeofIOLibPageMallocHeader; > if (adjustedSize < size) return (0); 1195,1228d1194 < void IOKitKernelLogBuffer(const char * title, const void * buffer, size_t size, < void (*output)(const char *format, ...)) < { < uint8_t c, chars[17]; < size_t idx; < < output("%s(0x%x):\n", title, size); < if (size > 4096) size = 4096; < chars[16] = idx = 0; < while (true) { < if (!(idx & 15)) { < if (idx) output(" |%s|\n", chars); < if (idx >= size) break; < output("%04x: ", idx); < } < else if (!(idx & 7)) output(" "); < < c = ((char *)buffer)[idx]; < output("%02x ", c); < chars[idx & 15] = ((c >= 0x20) && (c <= 0x7f)) ? c : ' '; < < idx++; < if ((idx == size) && (idx & 15)) { < chars[idx & 15] = 0; < while (idx & 15) { < idx++; < output(" "); < } < } < } < } < < /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ <
./iokit/Kernel/IORangeAllocator.cpp differences detected: 145,146c145 < if (os_add_overflow(capacity, capacityIncrement, &newCapacity)) < return( false ); --- > newCapacity = capacity + capacityIncrement;
./iokit/Kernel/IODataQueue.cpp differences detected: 172c172 < // See rdar://problem/40780584 for an explanation of relaxed/acquire barriers --- > head = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED); 174d173 < head = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_ACQUIRE); 242,243c241,242 < // Publish the data we just enqueued < __c11_atomic_store((_Atomic UInt32 *)&dataQueue->tail, newTail, __ATOMIC_RELEASE); --- > // Store tail with a release memory barrier > __c11_atomic_store((_Atomic UInt32 *)&dataQueue->tail, newTail, __ATOMIC_RELEASE); 245,263c244,252 < if (tail != head) { < // < // The memory barrier below paris with the one in ::dequeue < // so that either our store to the tail cannot be missed by < // the next dequeue attempt, or we will observe the dequeuer < // making the queue empty. < // < // Of course, if we already think the queue is empty, < // there's no point paying this extra cost. < // < __c11_atomic_thread_fence(__ATOMIC_SEQ_CST); < head = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED); < } < < if (tail == head) { < // Send notification (via mach message) that data is now available. < sendDataAvailableNotification(); < } < return true; --- > // Send notification (via mach message) that data is available. > > if ( ( head == tail ) /* queue was empty prior to enqueue() */ > || ( tail == __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED) ) ) /* queue was emptied during enqueue() */ > { > sendDataAvailableNotification(); > } > > return true; NO DIFFS in ./iokit/Kernel/i386/IOKeyStoreHelper.cpp

./iokit/Kernel/IOInterruptController.cpp differences detected: 468a469,470 > > reserved = NULL;
./iokit/Kernel/IOEventSource.cpp differences detected: 39d38 < #include 166,167d164 < < if ((kActionBlock & flags) && actionBlock) Block_release(actionBlock); 175,195c172 < void IOEventSource::setRefcon(void *newrefcon) < { < refcon = newrefcon; < } < < void * IOEventSource::getRefcon() const < { < return refcon; < } < < IOEventSource::Action IOEventSource::getAction() const < { < if (kActionBlock & flags) return NULL; < return (action); < } < < IOEventSource::ActionBlock IOEventSource::getActionBlock(ActionBlock) const < { < if (kActionBlock & flags) return actionBlock; < return (NULL); < } --- > IOEventSource::Action IOEventSource::getAction () const { return action; }; 199d175 < if ((kActionBlock & flags) && actionBlock) Block_release(actionBlock); 203,209d178 < void IOEventSource::setActionBlock(ActionBlock block) < { < if ((kActionBlock & flags) && actionBlock) Block_release(actionBlock); < actionBlock = Block_copy(block); < flags |= kActionBlock; < } < NO DIFFS in ./iokit/Kernel/IOStateReporter.cpp
NO DIFFS in ./iokit/Kernel/printPlist
NO DIFFS in ./iokit/Kernel/IOInterleavedMemoryDescriptor.cpp

./iokit/Kernel/IORegistryEntry.cpp differences detected: 36c36 < #include --- > 63,66c63,65 < IORecursiveLock * fLock; < uint64_t fRegistryEntryID; < SInt32 fRegistryEntryGenerationCount; < OSObject **_Atomic fIndexedProperties; --- > IORecursiveLock * fLock; > uint64_t fRegistryEntryID; > SInt32 fRegistryEntryGenerationCount; 408,416c407 < if (reserved->fIndexedProperties) < { < for (int idx = 0; idx < kIORegistryEntryIndexedPropertyCount; idx++) < { < if (reserved->fIndexedProperties[idx]) reserved->fIndexedProperties[idx]->release(); < } < IODelete(reserved->fIndexedProperties, OSObject *, kIORegistryEntryIndexedPropertyCount); < } < if (reserved->fLock) IORecursiveLockFree(reserved->fLock); --- > if (reserved->fLock) IORecursiveLockFree(reserved->fLock); 756,789d746 < OSObject * IORegistryEntry::setIndexedProperty(uint32_t index, OSObject * anObject) < { < OSObject ** array; < OSObject * prior; < < if (index >= kIORegistryEntryIndexedPropertyCount) return (0); < < array = atomic_load_explicit(&reserved->fIndexedProperties, memory_order_acquire); < if (!array) < { < array = IONew(OSObject *, kIORegistryEntryIndexedPropertyCount); < if (!array) return (0); < bzero(array, kIORegistryEntryIndexedPropertyCount * sizeof(array[0])); < if (!OSCompareAndSwapPtr(NULL, array, &reserved->fIndexedProperties)) IODelete(array, OSObject *, kIORegistryEntryIndexedPropertyCount); < } < if (!reserved->fIndexedProperties) return (0); < < prior = reserved->fIndexedProperties[index]; < if (anObject) anObject->retain(); < reserved->fIndexedProperties[index] = anObject; < < return (prior); < } < < OSObject * IORegistryEntry::getIndexedProperty(uint32_t index) const < { < if (index >= kIORegistryEntryIndexedPropertyCount) return (0); < if (!reserved->fIndexedProperties) return (0); < < return (reserved->fIndexedProperties[index]); < } < < /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ < 932c889 < isEqual = (sym && sym->isEqualTo(name)); --- > isEqual = sym->isEqualTo( name ); NO DIFFS in ./iokit/Kernel/IOReportLegend.cpp

./iokit/Kernel/IOCommandGate.cpp differences detected: 165,177d164 < < static IOReturn IOCommandGateActionToBlock(OSObject *owner, < void *arg0, void *arg1, < void *arg2, void *arg3) < { < return ((IOEventSource::ActionBlock) arg0)(); < } < < IOReturn IOCommandGate::runActionBlock(ActionBlock action) < { < return (runAction(&IOCommandGateActionToBlock, action)); < } < 291,294c278,279 < if (!workLoop->inGate()) { < /* The equivalent of 'msleep' while not holding the mutex is invalid */ < panic("invalid commandSleep while not holding the gate"); < } --- > if (!workLoop->inGate()) > return kIOReturnNotPermitted; 301,304c286,287 < if (!workLoop->inGate()) { < /* The equivalent of 'msleep' while not holding the mutex is invalid */ < panic("invalid commandSleep while not holding the gate"); < } --- > if (!workLoop->inGate()) > return kIOReturnNotPermitted; NO DIFFS in ./iokit/Kernel/IODMAEventSource.cpp

./iokit/Kernel/IOHibernateInternal.h differences detected: 56d55 < uint8_t hwEncrypt; 59,60c58 < size_t volumeCryptKeySize; < uint8_t volumeCryptKey[64]; --- > uint8_t volumeCryptKey[kIOHibernateAESKeySize / 8]; NO DIFFS in ./iokit/Kernel/IOMapper.cpp
NO DIFFS in ./iokit/Kernel/IOHibernateRestoreKernel.c

./iokit/Kernel/IOPolledInterface.cpp differences detected: 47c47 < OSMetaClassDefineReservedUsed(IOPolledInterface, 0); --- > OSMetaClassDefineReservedUnused(IOPolledInterface, 0); 296,327d295 < < /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ < < IOReturn IOPolledInterface::setEncryptionKey(const uint8_t * key, size_t keySize) < { < return (kIOReturnUnsupported); < } < < /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ < < IOReturn < IOPolledFilePollersSetEncryptionKey(IOPolledFileIOVars * filevars, < const uint8_t * key, size_t keySize) < { < IOReturn ret = kIOReturnUnsupported; < IOReturn err; < int32_t idx; < IOPolledFilePollers * vars = filevars->pollers; < IOPolledInterface * poller; < < for (idx = 0; < (poller = (IOPolledInterface *) vars->pollers->getObject(idx)); < idx++) < { < poller = (IOPolledInterface *) vars->pollers->getObject(idx); < err = poller->setEncryptionKey(key, keySize); < if (kIOReturnSuccess == err) ret = err; < } < < return (ret); < } < 506c474 < uint8_t * volumeCryptKey, size_t * keySize) --- > uint8_t * volumeCryptKey, size_t keySize) 514d481 < size_t callerKeySize; 521d487 < callerKeySize = *keySize; 525,533c491,493 < err = part->callPlatformFunction(APFSMEDIA_GETHIBERKEY, false, &volUuid, volumeCryptKey, keySize, keySize); < if (kIOReturnBadArgument == err) < { < // apfs fails on buffer size >32 < *keySize = 32; < err = part->callPlatformFunction(APFSMEDIA_GETHIBERKEY, false, &volUuid, volumeCryptKey, keySize, keySize); < } < if (err != kIOReturnSuccess) *keySize = 0; < else --- > size_t sizeOut = 0; > err = part->callPlatformFunction(APFSMEDIA_GETHIBERKEY, false, &volUuid, volumeCryptKey, &keySize, &sizeOut); > if (err == kIOReturnSuccess) 567,568c527,528 < if (vek.key.keybytecount <= callerKeySize) *keySize = vek.key.keybytecount; < bcopy(&vek.key.keybytes[0], volumeCryptKey, *keySize); --- > if (vek.key.keybytecount < keySize) keySize = vek.key.keybytecount; > bcopy(&vek.key.keybytes[0], volumeCryptKey, keySize); 587d546 < uint32_t flags, 592c551 < uint8_t * volumeCryptKey, size_t * keySize) --- > uint8_t * volumeCryptKey, size_t keySize) 618c577 < flags, --- > (write_file_addr != NULL) || (0 != setFileSize), NO DIFFS in ./iokit/Kernel/IOSimpleReporter.cpp

./iokit/Kernel/IOPMrootDomain.cpp differences detected: 2c2 < * Copyright (c) 1998-2017 Apple Inc. All rights reserved. --- > * Copyright (c) 1998-2008 Apple Inc. All rights reserved. 62d61 < #include 68,69d66 < #include < 188d184 < extern "C" boolean_t kdp_has_polled_corefile(); 205,206c201 < #define kIOSleepWakeFailureString "SleepWakeFailureString" < #define kIOOSWatchdogFailureString "OSWatchdogFailureString" --- > #define kIOSleepWakeDebugKey "Persistent-memory-note" 339,342d333 < static uint32_t gSwdPanic = 0; < static uint32_t gSwdSleepTimeout = 0; < static uint32_t gSwdWakeTimeout = 0; < static uint32_t gSwdSleepWakeTimeout = 0; 345d335 < 359,360d348 < static AbsoluteTime gUserActiveAbsTime; < static AbsoluteTime gUserInactiveAbsTime; 366,370d353 < z_stream swd_zs; < vm_offset_t swd_zs_zmem; < //size_t swd_zs_zsize; < size_t swd_zs_zoffset; < 660c643 < millis = nano / NSEC_PER_MSEC; --- > millis = nano / 1000000ULL; 679c662 < extern "C" void IOSystemShutdownNotification(int stage) --- > extern "C" void IOSystemShutdownNotification(void) 683,700d665 < if (kIOSystemShutdownNotificationStageRootUnmount == stage) < { < #if !CONFIG_EMBEDDED < uint64_t nano, millis; < startTime = mach_absolute_time(); < IOService::getPlatform()->waitQuiet(30 * NSEC_PER_SEC); < absolutetime_to_nanoseconds(mach_absolute_time() - startTime, &nano); < millis = nano / NSEC_PER_MSEC; < if (gHaltTimeMaxLog && (millis >= gHaltTimeMaxLog)) < { < printf("waitQuiet() for unmount %qd ms\n", millis); < } < #endif < return; < } < < assert(kIOSystemShutdownNotificationStageProcessExit == stage); < 715a681 > gRootDomain->swdDebugTeardown(); 806a773,842 > static void swdDebugSetupCallout( thread_call_param_t p0, thread_call_param_t p1 ) > { > IOPMrootDomain * rootDomain = (IOPMrootDomain *) p0; > uint32_t notifyRef = (uint32_t)(uintptr_t) p1; > > rootDomain->swdDebugSetup(); > > if (p1) { > rootDomain->allowPowerChange(notifyRef); > } > DLOG("swdDebugSetupCallout finish\n"); > } > > void IOPMrootDomain::swdDebugSetup( ) > { > #if HIBERNATION > static int32_t noDebugFile = -1; > if (noDebugFile == -1) { > if (PEGetCoprocessorVersion() >= kCoprocessorVersion2) > noDebugFile = 1; > else if (PE_parse_boot_argn("swd_mem_only", &noDebugFile, sizeof(noDebugFile)) == false) > noDebugFile = 0; > } > > if ((noDebugFile == 1) || (gRootDomain->sleepWakeDebugIsWdogEnabled() == false)) { > return; > } > DLOG("swdDebugSetup state:%d\n", swd_DebugImageSetup); > if (swd_DebugImageSetup == FALSE) { > swd_DebugImageSetup = TRUE; > if (CAP_GAIN(kIOPMSystemCapabilityGraphics) || > (CAP_LOSS(kIOPMSystemCapabilityGraphics))) { > IOHibernateSystemPostWake(true); > IOCloseDebugDataFile(); > } > IOOpenDebugDataFile(kSleepWakeStackBinFilename, SWD_BUF_SIZE); > } > #endif > > > } > > static void swdDebugTeardownCallout( thread_call_param_t p0, thread_call_param_t p1 ) > { > IOPMrootDomain * rootDomain = (IOPMrootDomain *) p0; > uint32_t notifyRef = (uint32_t)(uintptr_t) p1; > > rootDomain->swdDebugTeardown(); > if (p1) { > rootDomain->allowPowerChange(notifyRef); > } > DLOG("swdDebugTeardownCallout finish\n"); > } > > void IOPMrootDomain::swdDebugTeardown( ) > { > > #if HIBERNATION > DLOG("swdDebugTeardown state:%d\n", swd_DebugImageSetup); > if (swd_DebugImageSetup == TRUE) { > swd_DebugImageSetup = FALSE; > IOCloseDebugDataFile(); > } > #endif > > > } > //****************************************************************************** > > 822a859 > swdDebugSetupCallout(p0, NULL); 826a864 > swdDebugTeardownCallout(p0, NULL); 853c891 < return (UInt32)(nano / NSEC_PER_MSEC); --- > return (UInt32)(nano / 1000000ULL); 889,890d926 < SYSCTL_QUAD(_kern, OID_AUTO, useractive_abs_time, CTLFLAG_RD|CTLFLAG_LOCKED, &gUserActiveAbsTime, ""); < SYSCTL_QUAD(_kern, OID_AUTO, userinactive_abs_time, CTLFLAG_RD|CTLFLAG_LOCKED, &gUserInactiveAbsTime, ""); 1029,1033d1064 < static SYSCTL_INT(_debug, OID_AUTO, swd_sleep_timeout, CTLFLAG_RW, &gSwdSleepTimeout, 0, ""); < static SYSCTL_INT(_debug, OID_AUTO, swd_wake_timeout, CTLFLAG_RW, &gSwdWakeTimeout, 0, ""); < static SYSCTL_INT(_debug, OID_AUTO, swd_timeout, CTLFLAG_RW, &gSwdSleepWakeTimeout, 0, ""); < static SYSCTL_INT(_debug, OID_AUTO, swd_panic, CTLFLAG_RW, &gSwdPanic, 0, ""); < 1055a1087,1089 > #if defined(__i386__) || defined(__x86_64__) > IONotifier * notifier; > #endif 1101,1103d1134 < PE_parse_boot_argn("swd_sleeptimeout", &gSwdSleepTimeout, sizeof(gSwdSleepTimeout)); < PE_parse_boot_argn("swd_waketimeout", &gSwdWakeTimeout, sizeof(gSwdWakeTimeout)); < PE_parse_boot_argn("swd_timeout", &gSwdSleepWakeTimeout, sizeof(gSwdSleepWakeTimeout)); 1124a1156,1161 > swdDebugSetupEntry = thread_call_allocate( > &swdDebugSetupCallout, > (thread_call_param_t) this); > swdDebugTearDownEntry = thread_call_allocate( > &swdDebugTeardownCallout, > (thread_call_param_t) this); 1161d1197 < clock_get_uptime(&gUserActiveAbsTime); 1248a1285,1293 > if ((tmpDict = serviceMatching("IODTNVRAM"))) > { > notifier = addMatchingNotification( > gIOFirstPublishNotification, tmpDict, > (IOServiceMatchingNotificationHandler) &IONVRAMMatchPublished, > this, 0); > tmpDict->release(); > } > 2262d2306 < notifierThread = current_thread(); 2314a2359 > ((IOService *)this)->stop_watchdog_timer(); //14456299 2340a2386 > ((IOService *)this)->start_watchdog_timer(); //14456299 2548d2593 < notifierThread = NULL; 3020c3065,3077 < if (messageType == kIOMessageSystemCapabilityChange) --- > if (messageType == kIOMessageSystemWillSleep) > { > #if HIBERNATION > IOPowerStateChangeNotification *notify = > (IOPowerStateChangeNotification *)messageArgs; > > notify->returnValue = 30 * 1000 * 1000; > thread_call_enter1( > gRootDomain->swdDebugSetupEntry, > (thread_call_param_t)(uintptr_t) notify->powerRef); > #endif > } > else if (messageType == kIOMessageSystemCapabilityChange) 3086a3144,3162 > else if (CAP_WILL_CHANGE_TO_OFF(params, kIOPMSystemCapabilityGraphics) || > CAP_WILL_CHANGE_TO_ON(params, kIOPMSystemCapabilityGraphics)) > { > // WillChange for Full wake -> Darkwake > params->maxWaitForReply = 30 * 1000 * 1000; > thread_call_enter1( > gRootDomain->swdDebugSetupEntry, > (thread_call_param_t)(uintptr_t) params->notifyRef); > } > else if (CAP_DID_CHANGE_TO_OFF(params, kIOPMSystemCapabilityGraphics) || > CAP_DID_CHANGE_TO_ON(params, kIOPMSystemCapabilityGraphics)) > { > // DidChange for Full wake -> Darkwake > params->maxWaitForReply = 30 * 1000 * 1000; > thread_call_enter1( > gRootDomain->swdDebugTearDownEntry, > (thread_call_param_t)(uintptr_t) params->notifyRef); > > } 3185,3218d3260 < // IOPMGetSleepWakeUUIDKey < // < // Return the truth value of gSleepWakeUUIDIsSet and optionally copy the key. < // To get the full key -- a C string -- the buffer must large enough for < // the end-of-string character. < // The key is expected to be an UUID string < //****************************************************************************** < < extern "C" bool < IOPMCopySleepWakeUUIDKey(char *buffer, size_t buf_len) < { < if (!gSleepWakeUUIDIsSet) { < return (false); < } < < if (buffer != NULL) { < OSString *string; < < string = (OSString *) < gRootDomain->copyProperty(kIOPMSleepWakeUUIDKey); < < if (string == NULL) { < *buffer = '\0'; < } else { < strlcpy(buffer, string->getCStringNoCopy(), buf_len); < < string->release(); < } < } < < return (true); < } < < //****************************************************************************** 3309d3350 < notifierThread = current_thread(); 3339d3379 < notifierThread = NULL; 4433,4434c4473 < && (!((kIOPMSleepFactorStandbyForced|kIOPMSleepFactorAutoPowerOffForced|kIOPMSleepFactorHibernateForced) < & gSleepPolicyVars->sleepFactors))) --- > && (!(kIOPMSleepFactorStandbyForced & gSleepPolicyVars->sleepFactors))) 4875,4879d4913 < // found the display wrangler, check for any display assertions already created < if (pmAssertions->getActivatedAssertions() & kIOPMDriverAssertionPreventDisplaySleepBit) { < DLOG("wrangler setIgnoreIdleTimer\(1) due to pre-existing assertion\n"); < wrangler->setIgnoreIdleTimer( true ); < } 4905c4939 < if (parent->metaCast("IOPCIDevice") || --- > if ((parent == pciHostBridgeDriver) || 4912d4945 < DLOG("delayChildNotification for 0x%llx\n", conn->getRegistryEntryID()); 5614c5647 < ((int)((nsec) / NSEC_PER_MSEC))); --- > ((int)((nsec) / 1000000ULL))); 6159a6193,6194 > // found the display wrangler, check for any display assertions already created > gRootDomain->evaluateWranglerAssertions(); 6169a6205,6248 > #if defined(__i386__) || defined(__x86_64__) > > bool IOPMrootDomain::IONVRAMMatchPublished( > void * target, > void * refCon, > IOService * newService, > IONotifier * notifier) > { > unsigned int len = 0; > IOPMrootDomain *rd = (IOPMrootDomain *)target; > OSNumber *statusCode = NULL; > > if (PEReadNVRAMProperty(kIOSleepWakeDebugKey, NULL, &len)) > { > statusCode = OSDynamicCast(OSNumber, rd->getProperty(kIOPMSleepWakeFailureCodeKey)); > if (statusCode != NULL) { > if (statusCode->unsigned64BitValue() != 0) { > rd->swd_flags |= SWD_BOOT_BY_SW_WDOG; > MSG("System was rebooted due to Sleep/Wake failure\n"); > } > else { > rd->swd_flags |= SWD_BOOT_BY_OSX_WDOG; > MSG("System was non-responsive and was rebooted by watchdog\n"); > } > } > > rd->swd_logBufMap = rd->sleepWakeDebugRetrieve(); > } > if (notifier) notifier->remove(); > return true; > } > > #else > bool IOPMrootDomain::IONVRAMMatchPublished( > void * target, > void * refCon, > IOService * newService, > IONotifier * notifier __unused) > { > return false; > } > > #endif > 6527,6529c6606,6618 < sleepWakeDebugMemAlloc(); < saveFailureData2File(); < --- > if (swd_flags & SWD_VALID_LOGS) { > if (swd_flags & SWD_LOGS_IN_MEM) { > sleepWakeDebugDumpFromMem(swd_logBufMap); > swd_logBufMap->release(); > swd_logBufMap = 0; > } > else if (swd_flags & SWD_LOGS_IN_FILE) > sleepWakeDebugDumpFromFile(); > } > else if (swd_flags & (SWD_BOOT_BY_SW_WDOG|SWD_BOOT_BY_OSX_WDOG)) { > // If logs are invalid, write the failure code > sleepWakeDebugDumpFromMem(NULL); > } 6858,6866c6947,6951 < if (clamshellClosed && clamshellExists) { < DLOG("Ignoring redundant Clamshell close event\n"); < } < else { < DLOG("Clamshell closed\n"); < // Received clamshel open message from clamshell controlling driver < // Update our internal state and tell general interest clients < clamshellClosed = true; < clamshellExists = true; --- > DLOG("Clamshell closed\n"); > // Received clamshel open message from clamshell controlling driver > // Update our internal state and tell general interest clients > clamshellClosed = true; > clamshellExists = true; 6868,6869c6953,6954 < // Tell PMCPU < informCPUStateChange(kInformLid, 1); --- > // Tell PMCPU > informCPUStateChange(kInformLid, 1); 6871,6872c6956,6957 < // Tell general interest clients < sendClientClamshellNotification(); --- > // Tell general interest clients > sendClientClamshellNotification(); 6874,6876c6959,6960 < // And set eval_clamshell = so we can attempt < eval_clamshell = true; < } --- > // And set eval_clamshell = so we can attempt > eval_clamshell = true; 7049d7132 < clock_get_uptime(&gUserActiveAbsTime); 7069d7151 < clock_get_uptime(&gUserInactiveAbsTime); 7449c7531 < fullWakeReason, ((int)((nsec) / NSEC_PER_MSEC))); --- > fullWakeReason, ((int)((nsec) / 1000000ULL))); 7558a7641,7656 > void IOPMrootDomain::evaluateWranglerAssertions() > { > if (gIOPMWorkLoop->inGate() == false) { > gIOPMWorkLoop->runAction( > OSMemberFunctionCast(IOWorkLoop::Action, this, &IOPMrootDomain::evaluateWranglerAssertions), > (OSObject *)this); > > return; > } > > if (pmAssertions->getActivatedAssertions() & kIOPMDriverAssertionPreventDisplaySleepBit) { > DLOG("wrangler setIgnoreIdleTimer\(1) on matching\n"); > wrangler->setIgnoreIdleTimer( true ); > } > } > 7590c7688 < IOLog("PMStats: Hibernate write took %qd ms\n", delta/NSEC_PER_MSEC); --- > IOLog("PMStats: Hibernate write took %qd ms\n", delta/1000000ULL); 7601c7699 < IOLog("PMStats: Hibernate read took %qd ms\n", delta/NSEC_PER_MSEC); --- > IOLog("PMStats: Hibernate read took %qd ms\n", delta/1000000ULL); 7848c7946 < void IOPMrootDomain::traceDetail(OSObject *object, bool start) --- > void IOPMrootDomain::traceDetail(OSObject *object) 7850,7856c7948 < IOPMServiceInterestNotifier *notifier; < < if (systemBooting) { < return; < } < < notifier = OSDynamicCast(IOPMServiceInterestNotifier, object); --- > IOPMServiceInterestNotifier *notifier = OSDynamicCast(IOPMServiceInterestNotifier, object); 7857a7950 > DLOG("Unknown notifier\n"); 7861c7954 < if (start) { --- > if (!systemBooting) { 7871,7878d7963 < notifierThread = current_thread(); < notifierObject = notifier; < notifier->retain(); < } < else { < notifierThread = NULL; < notifierObject = NULL; < notifier->release(); 7879a7965 > 9635,9648d9720 < uint32_t IOPMrootDomain::getWatchdogTimeout() < { < if (gSwdSleepWakeTimeout) { < gSwdSleepTimeout = gSwdWakeTimeout = gSwdSleepWakeTimeout; < } < if ((pmTracer->getTracePhase() < kIOPMTracePointSystemSleep) || < (pmTracer->getTracePhase() == kIOPMTracePointDarkWakeEntry)) { < return gSwdSleepTimeout ? gSwdSleepTimeout : WATCHDOG_SLEEP_TIMEOUT; < } < else { < return gSwdWakeTimeout ? gSwdWakeTimeout : WATCHDOG_WAKE_TIMEOUT; < } < } < 9652a9725,9727 > if ((swd_flags & SWD_WDOG_ENABLED) == 0) > return kIOReturnError; > 9663c9738 < void IOPMrootDomain::tracePhase2String(uint32_t tracePhase, const char **phaseString, const char **description) --- > void IOPMrootDomain::takeStackshot(bool wdogTrigger, bool isOSXWatchdog, bool isSpinDump) 9665,9670c9740,9747 < switch (tracePhase) { < < case kIOPMTracePointSleepStarted: < *phaseString = "kIOPMTracePointSleepStarted"; < *description = "starting sleep"; < break; --- > swd_hdr * hdr = NULL; > addr64_t data[3]; > int wdog_panic = -1; > int stress_rack = -1; > int cnt = 0; > pid_t pid = 0; > kern_return_t kr = KERN_SUCCESS; > uint32_t flags; 9672,9675c9749,9757 < case kIOPMTracePointSleepApplications: < *phaseString = "kIOPMTracePointSleepApplications"; < *description = "notifying applications"; < break; --- > char * dstAddr; > uint32_t size; > uint32_t bytesRemaining; > unsigned bytesWritten = 0; > unsigned totalBytes = 0; > unsigned int len; > OSString * UUIDstring = NULL; > uint64_t code; > IOMemoryMap * logBufMap = NULL; 9677,9680d9758 < case kIOPMTracePointSleepPriorityClients: < *phaseString = "kIOPMTracePointSleepPriorityClients"; < *description = "notifying clients about upcoming system capability changes"; < break; 9682,9685c9760,9761 < case kIOPMTracePointSleepWillChangeInterests: < *phaseString = "kIOPMTracePointSleepWillChangeInterests"; < *description = "creating hibernation file or while calling rootDomain's clients about upcoming rootDomain's state changes"; < break; --- > uint32_t bufSize; > uint32_t initialStackSize; 9687,9690c9763,9770 < case kIOPMTracePointSleepPowerPlaneDrivers: < *phaseString = "kIOPMTracePointSleepPowerPlaneDrivers"; < *description = "calling power state change callbacks"; < break; --- > if (isSpinDump) { > if (_systemTransitionType != kSystemTransitionSleep && > _systemTransitionType != kSystemTransitionWake) > return; > } else { > if ( kIOSleepWakeWdogOff & gIOKitDebug ) > return; > } 9692,9695c9772,9792 < case kIOPMTracePointSleepDidChangeInterests: < *phaseString = "kIOPMTracePointSleepDidChangeInterests"; < *description = "calling rootDomain's clients about rootDomain's state changes"; < break; --- > if (wdogTrigger) { > PE_parse_boot_argn("swd_panic", &wdog_panic, sizeof(wdog_panic)); > PE_parse_boot_argn("stress-rack", &stress_rack, sizeof(stress_rack)); > if ((wdog_panic == 1) || (stress_rack == 1) || (PEGetCoprocessorVersion() >= kCoprocessorVersion2)) { > // If boot-arg specifies to panic then panic. > panic("Sleep/Wake hang detected"); > return; > } > else if (swd_flags & SWD_BOOT_BY_SW_WDOG) { > // If current boot is due to this watch dog trigger restart in previous boot, > // then don't trigger again until at least 1 successful sleep & wake. > if (!(sleepCnt && (displayWakeCnt || darkWakeCnt))) { > IOLog("Shutting down due to repeated Sleep/Wake failures\n"); > if (!tasksSuspended) { > tasksSuspended = TRUE; > tasks_system_suspend(true); > } > PEHaltRestart(kPEHaltCPU); > return; > } > } 9697,9700c9794 < case kIOPMTracePointSleepCapabilityClients: < *phaseString = "kIOPMTracePointSleepCapabilityClients"; < *description = "notifying clients about current system capabilities"; < break; --- > } 9702,9705c9796,9802 < case kIOPMTracePointSleepPlatformActions: < *phaseString = "kIOPMTracePointSleepPlatformActions"; < *description = "calling Quiesce/Sleep action callbacks"; < break; --- > if (isSpinDump) { > if (gSpinDumpBufferFull) > return; > if (swd_spindump_buffer == NULL) { > sleepWakeDebugSpinDumpMemAlloc(); > if (swd_spindump_buffer == NULL) return; > } 9707,9710c9804,9808 < case kIOPMTracePointSleepCPUs: < *phaseString = "kIOPMTracePointSleepCPUs"; < *description = "halting all non-boot CPUs"; < break; --- > bufSize = SWD_SPINDUMP_SIZE; > initialStackSize = SWD_INITIAL_SPINDUMP_SIZE; > } else { > if (sleepWakeDebugIsWdogEnabled() == false) > return; 9712,9715c9810,9813 < case kIOPMTracePointSleepPlatformDriver: < *phaseString = "kIOPMTracePointSleepPlatformDriver"; < *description = "executing platform specific code"; < break; --- > if (swd_buffer == NULL) { > sleepWakeDebugMemAlloc(); > if (swd_buffer == NULL) return; > } 9717,9720c9815,9817 < case kIOPMTracePointHibernate: < *phaseString = "kIOPMTracePointHibernate"; < *description = "writing the hibernation image"; < break; --- > bufSize = SWD_BUF_SIZE; > initialStackSize = SWD_INITIAL_STACK_SIZE; > } 9722,9725c9819,9820 < case kIOPMTracePointSystemSleep: < *phaseString = "kIOPMTracePointSystemSleep"; < *description = "in EFI/Bootrom after last point of entry to sleep"; < break; --- > if (!OSCompareAndSwap(0, 1, &gRootDomain->swd_lock)) > return; 9727,9730c9822,9827 < case kIOPMTracePointWakePlatformDriver: < *phaseString = "kIOPMTracePointWakePlatformDriver"; < *description = "executing platform specific code"; < break; --- > if (isSpinDump) { > hdr = (swd_hdr *)swd_spindump_buffer; > } > else { > hdr = (swd_hdr *)swd_buffer; > } 9731a9829,9830 > memset(hdr->UUID, 0x20, sizeof(hdr->UUID)); > if ((UUIDstring = OSDynamicCast(OSString, getProperty(kIOPMSleepWakeUUIDKey))) != NULL ) { 9733,9736c9832,9840 < case kIOPMTracePointWakePlatformActions: < *phaseString = "kIOPMTracePointWakePlatformActions"; < *description = "calling Wake action callbacks"; < break; --- > if (wdogTrigger || (!UUIDstring->isEqualTo(hdr->UUID))) { > const char *str = UUIDstring->getCStringNoCopy(); > snprintf(hdr->UUID, sizeof(hdr->UUID), "UUID: %s", str); > } > else { > DLOG("Data for current UUID already exists\n"); > goto exit; > } > } 9738,9741c9842,9843 < case kIOPMTracePointWakeCPUs: < *phaseString = "kIOPMTracePointWakeCPUs"; < *description = "starting non-boot CPUs"; < break; --- > dstAddr = (char*)hdr + hdr->spindump_offset; > bytesRemaining = bufSize - hdr->spindump_offset; 9743,9746c9845,9846 < case kIOPMTracePointWakeWillPowerOnClients: < *phaseString = "kIOPMTracePointWakeWillPowerOnClients"; < *description = "sending kIOMessageSystemWillPowerOn message to kernel and userspace clients"; < break; --- > /* if AppleOSXWatchdog triggered the stackshot, set the flag in the heaer */ > hdr->is_osx_watchdog = isOSXWatchdog; 9748,9751c9848 < case kIOPMTracePointWakeWillChangeInterests: < *phaseString = "kIOPMTracePointWakeWillChangeInterests"; < *description = "calling rootDomain's clients about upcoming rootDomain's state changes"; < break; --- > DLOG("Taking snapshot. bytesRemaining: %d\n", bytesRemaining); 9753,9756c9850,9851 < case kIOPMTracePointWakeDidChangeInterests: < *phaseString = "kIOPMTracePointWakeDidChangeInterests"; < *description = "calling rootDomain's clients about completed rootDomain's state changes"; < break; --- > flags = STACKSHOT_KCDATA_FORMAT|STACKSHOT_NO_IO_STATS|STACKSHOT_SAVE_KEXT_LOADINFO; > while (kr == KERN_SUCCESS) { 9758,10222c9853,9866 < case kIOPMTracePointWakePowerPlaneDrivers: < *phaseString = "kIOPMTracePointWakePowerPlaneDrivers"; < *description = "calling power state change callbacks"; < break; < < case kIOPMTracePointWakeCapabilityClients: < *phaseString = "kIOPMTracePointWakeCapabilityClients"; < *description = "informing clients about current system capabilities"; < break; < < case kIOPMTracePointWakeApplications: < *phaseString = "kIOPMTracePointWakeApplications"; < *description = "sending asynchronous kIOMessageSystemHasPoweredOn message to userspace clients"; < break; < < case kIOPMTracePointDarkWakeEntry: < *phaseString = "kIOPMTracePointDarkWakeEntry"; < *description = "entering darkwake on way to sleep"; < break; < < case kIOPMTracePointDarkWakeExit: < *phaseString = "kIOPMTracePointDarkWakeExit"; < *description = "entering fullwake from darkwake"; < break; < < default: < *phaseString = NULL; < *description = NULL; < } < < } < < void IOPMrootDomain::saveFailureData2File( ) < { < unsigned int len = 0; < char failureStr[512]; < errno_t error; < char *outbuf; < bool oswatchdog = false; < < if (!PEReadNVRAMProperty(kIOSleepWakeFailureString, NULL, &len) && < !PEReadNVRAMProperty(kIOOSWatchdogFailureString, NULL, &len) ) { < DLOG("No SleepWake failure or OSWatchdog failure string to read\n"); < return; < } < < if (len == 0) { < DLOG("Ignoring zero byte SleepWake failure string\n"); < goto exit; < } < < if (len > sizeof(failureStr)) { < len = sizeof(failureStr); < } < failureStr[0] = 0; < if (PEReadNVRAMProperty(kIOSleepWakeFailureString, failureStr, &len) == false) { < if (PEReadNVRAMProperty(kIOOSWatchdogFailureString, failureStr, &len)) { < oswatchdog = true; < } < } < if (failureStr[0] != 0) { < error = sleepWakeDebugSaveFile(oswatchdog ? kOSWatchdogFailureStringFile : kSleepWakeFailureStringFile, < failureStr, len); < if (error) { < DLOG("Failed to save SleepWake failure string to file. error:%d\n", error); < } < else { < DLOG("Saved SleepWake failure string to file.\n"); < } < if (!oswatchdog) { < swd_flags |= SWD_BOOT_BY_SW_WDOG; < } < } < < if (!OSCompareAndSwap(0, 1, &gRootDomain->swd_lock)) < goto exit; < < if (swd_buffer) { < unsigned int len = 0; < errno_t error; < char nvram_var_name_buffer[20]; < unsigned int concat_len = 0; < swd_hdr *hdr = NULL; < < < hdr = (swd_hdr *)swd_buffer; < outbuf = (char *)hdr + hdr->spindump_offset; < < for (int i=0; i < 8; i++) { < snprintf(nvram_var_name_buffer, 20, "%s%02d", SWD_STACKSHOT_VAR_PREFIX, i+1); < if (!PEReadNVRAMProperty(nvram_var_name_buffer, NULL, &len)) { < LOG("No SleepWake blob to read beyond chunk %d\n", i); < break; < } < if (PEReadNVRAMProperty(nvram_var_name_buffer, outbuf+concat_len, &len) == FALSE) { < PERemoveNVRAMProperty(nvram_var_name_buffer); < LOG("Could not read the property :-(\n"); < break; < } < PERemoveNVRAMProperty(nvram_var_name_buffer); < concat_len += len; < } < LOG("Concatenated length for the SWD blob %d\n", concat_len); < < if (concat_len) { < error = sleepWakeDebugSaveFile(oswatchdog ? kOSWatchdogStacksFilename : kSleepWakeStacksFilename, < outbuf, concat_len); < if (error) { < LOG("Failed to save SleepWake zipped data to file. error:%d\n", error); < } else { < LOG("Saved SleepWake zipped data to file.\n"); < } < } < < } < else { < LOG("No buffer allocated to save failure stackshot\n"); < } < < < gRootDomain->swd_lock = 0; < exit: < PERemoveNVRAMProperty(oswatchdog ? kIOOSWatchdogFailureString : kIOSleepWakeFailureString); < return; < } < < < void IOPMrootDomain::getFailureData(thread_t *thread, char *failureStr, size_t strLen) < { < IORegistryIterator * iter; < IORegistryEntry * entry; < IOService * node; < bool nodeFound = false; < < const void * callMethod = NULL; < const char * objectName = NULL; < uint32_t timeout = getWatchdogTimeout(); < const char * phaseString = NULL; < const char * phaseDescription = NULL; < < IOPMServiceInterestNotifier *notifier = OSDynamicCast(IOPMServiceInterestNotifier, notifierObject); < uint32_t tracePhase = pmTracer->getTracePhase(); < < *thread = NULL; < if ((tracePhase < kIOPMTracePointSystemSleep) || (tracePhase == kIOPMTracePointDarkWakeEntry)) { < snprintf(failureStr, strLen, "%sSleep transition timed out after %d seconds", failureStr, timeout); < } < else { < snprintf(failureStr, strLen, "%sWake transition timed out after %d seconds", failureStr,timeout); < } < tracePhase2String(tracePhase, &phaseString, &phaseDescription); < < if (notifierThread) { < if (notifier && (notifier->identifier)) { < objectName = notifier->identifier->getCStringNoCopy(); < } < *thread = notifierThread; < } < else { < < iter = IORegistryIterator::iterateOver( < getPMRootDomain(), gIOPowerPlane, kIORegistryIterateRecursively); < < if (iter) < { < while ((entry = iter->getNextObject())) < { < node = OSDynamicCast(IOService, entry); < if (!node) < continue; < if (OSDynamicCast(IOPowerConnection, node)) { < continue; < } < < if(node->getBlockingDriverCall(thread, &callMethod)) { < nodeFound = true; < break; < } < } < iter->release(); < } < if (nodeFound) { < OSKext *kext = OSKext::lookupKextWithAddress((vm_address_t)callMethod); < if (kext) { < objectName = kext->getIdentifierCString(); < } < } < } < if (phaseDescription) { < snprintf(failureStr, strLen, "%s while %s.", failureStr, phaseDescription); < } < if (objectName) { < snprintf(failureStr, strLen, "%s Suspected bundle: %s.", failureStr, objectName); < } < if (*thread) { < snprintf(failureStr, strLen, "%s Thread 0x%llx.", failureStr, thread_tid(*thread)); < } < < DLOG("%s\n", failureStr); < } < < struct swd_stackshot_compressed_data < { < z_output_func zoutput; < size_t zipped; < uint64_t totalbytes; < uint64_t lastpercent; < IOReturn error; < unsigned outremain; < unsigned outlen; < unsigned writes; < Bytef * outbuf; < }; < struct swd_stackshot_compressed_data swd_zip_var = { }; < < static void *swd_zs_alloc(void *__unused ref, u_int items, u_int size) < { < void *result; < LOG("Alloc in zipping %d items of size %d\n", items, size); < < result = (void *)(swd_zs_zmem + swd_zs_zoffset); < swd_zs_zoffset += ~31L & (31 + (items * size)); // 32b align for vector crc < LOG("Offset %zu\n", swd_zs_zoffset); < return (result); < } < < static int swd_zinput(z_streamp strm, Bytef *buf, unsigned size) < { < unsigned len; < < len = strm->avail_in; < < if (len > size) < len = size; < if (len == 0) < return 0; < < if (strm->next_in != (Bytef *) strm) < memcpy(buf, strm->next_in, len); < else < bzero(buf, len); < < strm->adler = z_crc32(strm->adler, buf, len); < < strm->avail_in -= len; < strm->next_in += len; < strm->total_in += len; < < return (int)len; < } < < static int swd_zoutput(z_streamp strm, Bytef *buf, unsigned len) < { < unsigned int i = 0; < // if outlen > max size don't add to the buffer < if (strm && buf) { < if (swd_zip_var.outlen + len > SWD_COMPRESSED_BUFSIZE) { < LOG("No space to GZIP... not writing to NVRAM\n"); < return (len); < } < } < for (i = 0; i < len; i++) { < *(swd_zip_var.outbuf + swd_zip_var.outlen + i) = *(buf +i); < } < swd_zip_var.outlen += len; < return (len); < } < static void swd_zs_free(void * __unused ref, void * __unused ptr) {} < < static int swd_compress(char *inPtr, char *outPtr, size_t numBytes) < { < int wbits = 12; < int memlevel = 3; < < if (!swd_zs.zalloc) { < swd_zs.zalloc = swd_zs_alloc; < swd_zs.zfree = swd_zs_free; < if (deflateInit2(&swd_zs, Z_BEST_SPEED, Z_DEFLATED, wbits + 16, memlevel, Z_DEFAULT_STRATEGY)) { < // allocation failed < bzero(&swd_zs, sizeof(swd_zs)); < // swd_zs_zoffset = 0; < } else { < LOG("PMRD inited the zlib allocation routines\n"); < } < } < < < < swd_zip_var.zipped = 0; < swd_zip_var.totalbytes = 0; // should this be the max that we have? < swd_zip_var.lastpercent = 0; < swd_zip_var.error = kIOReturnSuccess; < swd_zip_var.outremain = 0; < swd_zip_var.outlen = 0; < swd_zip_var.writes = 0; < swd_zip_var.outbuf = (Bytef *)outPtr; < < swd_zip_var.totalbytes = numBytes; < < swd_zs.avail_in = 0; < swd_zs.next_in = NULL; < swd_zs.avail_out = 0; < swd_zs.next_out = NULL; < < deflateResetWithIO(&swd_zs, swd_zinput, swd_zoutput); < < z_stream *zs; < int zr; < zs = &swd_zs; < < zr = Z_OK; < < while (swd_zip_var.error >= 0) { < if (!zs->avail_in) { < zs->next_in = (unsigned char *)inPtr ? (Bytef *)inPtr : (Bytef *)zs; /* zero marker? */ < zs->avail_in = numBytes; < } < if (!zs->avail_out) { < zs->next_out = (Bytef *)zs; < zs->avail_out = UINT32_MAX; < } < zr = deflate(zs, Z_NO_FLUSH); < if (Z_STREAM_END == zr) < break; < if (zr != Z_OK) { < LOG("ZERR %d\n", zr); < swd_zip_var.error = zr; < } else { < if (zs->total_in == numBytes) { < break; < } < } < } < zr = Z_OK; < //now flush the stream < while (swd_zip_var.error >= 0) { < if (!zs->avail_out) { < zs->next_out = (Bytef *)zs; < zs->avail_out = UINT32_MAX; < } < zr = deflate(zs, Z_FINISH); < if (Z_STREAM_END == zr) { < break; < } < if (zr != Z_OK) { < LOG("ZERR %d\n", zr); < swd_zip_var.error = zr; < } else { < if (zs->total_in == numBytes) { < LOG("Total output size %d\n", swd_zip_var.outlen); < break; < } < } < } < < return swd_zip_var.outlen; < } < < void IOPMrootDomain::takeStackshot(bool wdogTrigger, bool isOSXWatchdog, bool isSpinDump) < { < swd_hdr * hdr = NULL; < int wdog_panic = -1; < int cnt = 0; < pid_t pid = 0; < kern_return_t kr = KERN_SUCCESS; < uint32_t flags; < < char * dstAddr; < uint32_t size; < uint32_t bytesRemaining; < unsigned bytesWritten = 0; < unsigned totalBytes = 0; < OSString * UUIDstring = NULL; < < char failureStr[512]; < thread_t thread = NULL; < const char * uuid; < < < uint32_t bufSize; < uint32_t initialStackSize; < < < < failureStr[0] = 0; < if (isSpinDump) { < if (_systemTransitionType != kSystemTransitionSleep && < _systemTransitionType != kSystemTransitionWake) < return; < < if (gSpinDumpBufferFull) < return; < if (swd_spindump_buffer == NULL) { < sleepWakeDebugSpinDumpMemAlloc(); < if (swd_spindump_buffer == NULL) return; < } < < bufSize = SWD_SPINDUMP_SIZE; < initialStackSize = SWD_INITIAL_SPINDUMP_SIZE; < hdr = (swd_hdr *)swd_spindump_buffer; < < } else { < if ( (kIOSleepWakeWdogOff & gIOKitDebug) || systemBooting || systemShutdown || gWillShutdown) < return; < < if (isOSXWatchdog) { < snprintf(failureStr, sizeof(failureStr), "Stackshot Reason: "); < snprintf(failureStr, sizeof(failureStr), "%smacOS watchdog triggered failure\n", failureStr); < } < else if (wdogTrigger) { < if ((UUIDstring = OSDynamicCast(OSString, getProperty(kIOPMSleepWakeUUIDKey))) != NULL ) { < uuid = UUIDstring->getCStringNoCopy(); < snprintf(failureStr, sizeof(failureStr), "UUID: %s\n", uuid); < } < < snprintf(failureStr, sizeof(failureStr), "%sStackshot Reason: ", failureStr); < getFailureData(&thread, failureStr, sizeof(failureStr)); < if (PEGetCoprocessorVersion() >= kCoprocessorVersion2) { < goto skip_stackshot; < } < < } < else { < snprintf(failureStr, sizeof(failureStr), "%sStackshot triggered for debugging stackshot collection.\n", failureStr); < } < // Take only one stackshot in this case. < cnt = SWD_MAX_STACKSHOTS-1; < < if (swd_buffer == NULL) { < sleepWakeDebugMemAlloc(); < if (swd_buffer == NULL) return; < } < hdr = (swd_hdr *)swd_buffer; < < bufSize = hdr->alloc_size;; < initialStackSize = bufSize; < < } < < < if (!OSCompareAndSwap(0, 1, &gRootDomain->swd_lock)) < return; < < < dstAddr = (char*)hdr + hdr->spindump_offset; < bytesRemaining = bufSize - hdr->spindump_offset; < < DLOG("Taking snapshot. bytesRemaining: %d\n", bytesRemaining); < < flags = STACKSHOT_KCDATA_FORMAT|STACKSHOT_NO_IO_STATS|STACKSHOT_SAVE_KEXT_LOADINFO|STACKSHOT_ACTIVE_KERNEL_THREADS_ONLY|STACKSHOT_THREAD_WAITINFO; < while (kr == KERN_SUCCESS) { < < if (cnt == 0) { < /* < * Take stackshot of all process on first sample. Size is restricted < * to SWD_INITIAL_STACK_SIZE < */ < pid = -1; < size = (bytesRemaining > initialStackSize) ? initialStackSize : bytesRemaining; < } < else { < /* Take sample of kernel threads only */ < pid = 0; < size = bytesRemaining; < } --- > if (cnt == 0) { > /* > * Take stackshot of all process on first sample. Size is restricted > * to SWD_INITIAL_STACK_SIZE > */ > pid = -1; > size = (bytesRemaining > initialStackSize) ? initialStackSize : bytesRemaining; > flags |= STACKSHOT_ACTIVE_KERNEL_THREADS_ONLY; > } > else { > /* Take sample of kernel threads only */ > pid = 0; > size = bytesRemaining; > } 10244c9888 < if (++cnt == SWD_MAX_STACKSHOTS) { --- > if (++cnt == 10) { 10251a9896,9901 > > memset(hdr->spindump_status, 0x20, sizeof(hdr->spindump_status)); > code = pmTracer->getPMStatusCode(); > memset(hdr->PMStatusCode, 0x20, sizeof(hdr->PMStatusCode)); > snprintf(hdr->PMStatusCode, sizeof(hdr->PMStatusCode), "\nCode: %08x %08x", > (uint32_t)((code >> 32) & 0xffffffff), (uint32_t)(code & 0xffffffff)); 10254c9904 < snprintf(hdr->reason, sizeof(hdr->reason), "\nStackshot reason: Power State Change Delay\n\n"); --- > snprintf(hdr->reason, sizeof(hdr->reason), "\nStackshot reason: PSC Delay\n\n"); 10258a9909 > snprintf(hdr->reason, sizeof(hdr->reason), "\nStackshot reason: Watchdog\n\n"); 10260,10294d9910 < // Compress stackshot and save to NVRAM < { < char *outbuf = (char *)swd_compressed_buffer; < int outlen = 0; < int num_chunks = 0; < int max_chunks = 0; < int leftover = 0; < char nvram_var_name_buffer[20]; < < outlen = swd_compress((char*)hdr + hdr->spindump_offset, outbuf, bytesWritten); < < if (outlen) { < max_chunks = outlen / (2096 - 200); < leftover = outlen % (2096 - 200); < < if (max_chunks < 8) { < for (num_chunks = 0; num_chunks < max_chunks; num_chunks++) { < snprintf(nvram_var_name_buffer, 20, "%s%02d", SWD_STACKSHOT_VAR_PREFIX, num_chunks+1); < if (PEWriteNVRAMProperty(nvram_var_name_buffer, (outbuf + (num_chunks * (2096-200))), (2096 - 200)) == FALSE) { < LOG("Failed to update NVRAM %d\n", num_chunks); < break; < } < } < if (leftover) { < snprintf(nvram_var_name_buffer, 20, "%s%02d", SWD_STACKSHOT_VAR_PREFIX, num_chunks+1); < if (PEWriteNVRAMProperty(nvram_var_name_buffer, (outbuf + (num_chunks * (2096-200))), leftover) == FALSE) { < LOG("Failed to update NVRAM with leftovers\n"); < } < } < } < else { < LOG("Compressed failure stackshot is too large. size=%d bytes\n", outlen); < } < } < } 10296c9912,9918 < if (failureStr[0]) { --- > data[0] = round_page(sizeof(swd_hdr) + hdr->spindump_size); > /* Header & rootdomain log is constantly changing and is not covered by CRC */ > data[1] = hdr->crc = crc32(0, ((char*)swd_buffer+hdr->spindump_offset), hdr->spindump_size); > data[2] = kvtophys((vm_offset_t)swd_buffer); > len = sizeof(addr64_t)*3; > DLOG("bytes: 0x%llx crc:0x%llx paddr:0x%llx\n", > data[0], data[1], data[2]); 10298,10310c9920,9923 < if (!isOSXWatchdog) { < // append sleep-wake failure code < snprintf(failureStr, sizeof(failureStr), "%s\nFailure code:: 0x%08x %08x\n", < failureStr, pmTracer->getTraceData(), pmTracer->getTracePhase()); < if (PEWriteNVRAMProperty(kIOSleepWakeFailureString, failureStr, strlen(failureStr)) == false) { < DLOG("Failed to write SleepWake failure string\n"); < } < } < else { < if (PEWriteNVRAMProperty(kIOOSWatchdogFailureString, failureStr, strlen(failureStr)) == false) { < DLOG("Failed to write OSWatchdog failure string\n"); < } < } --- > if (PEWriteNVRAMProperty(kIOSleepWakeDebugKey, data, len) == false) > { > DLOG("Failed to update nvram boot-args\n"); > goto exit; 10312,10316d9924 < gRootDomain->swd_lock = 0; < < skip_stackshot: < if (wdogTrigger) { < PE_parse_boot_argn("swd_panic", &wdog_panic, sizeof(wdog_panic)); 10318,10340c9926 < if ((wdog_panic == 1) || (PEGetCoprocessorVersion() >= kCoprocessorVersion2)) { < if (thread) { < panic_with_thread_context(0, NULL, DEBUGGER_OPTION_ATTEMPTCOREDUMPANDREBOOT, thread, "%s", failureStr); < } < else { < panic_with_options(0, NULL, DEBUGGER_OPTION_ATTEMPTCOREDUMPANDREBOOT, "%s", failureStr); < } < return; < } < else if (swd_flags & SWD_BOOT_BY_SW_WDOG) { < // If current boot is due to this watch dog trigger restart in previous boot, < // then don't trigger again until at least 1 successful sleep & wake. < if (!(sleepCnt && (displayWakeCnt || darkWakeCnt))) { < LOG("Shutting down due to repeated Sleep/Wake failures\n"); < if (!tasksSuspended) { < tasksSuspended = TRUE; < tasks_system_suspend(true); < } < PEHaltRestart(kPEHaltCPU); < return; < } < } < } --- > exit: 10341a9928 > gRootDomain->swd_lock = 0; 10344c9931 < LOG("Restarting to collect Sleep wake debug logs\n"); --- > IOLog("Restarting to collect Sleep wake debug logs\n"); 10353c9940,9945 < saveFailureData2File(); --- > logBufMap = sleepWakeDebugRetrieve(); > if (logBufMap) { > sleepWakeDebugDumpFromMem(logBufMap); > logBufMap->release(); > logBufMap = 0; > } 10359c9951 < vm_size_t size = SWD_STACKSHOT_SIZE + SWD_COMPRESSED_BUFSIZE + SWD_ZLIB_BUFSIZE; --- > vm_size_t size = SWD_BUF_SIZE; 10362d9953 < void *bufPtr = NULL; 10376,10378c9967,9976 < memDesc = IOBufferMemoryDescriptor::inTaskWithOptions( < kernel_task, kIODirectionIn|kIOMemoryMapperNone, < size); --- > // Try allocating above 4GB. If that fails, try at 2GB > memDesc = IOBufferMemoryDescriptor::inTaskWithPhysicalMask( > kernel_task, kIOMemoryPhysicallyContiguous|kIOMemoryMapperNone, > size, 0xFFFFFFFF00000000ULL); > if (!memDesc) { > memDesc = IOBufferMemoryDescriptor::inTaskWithPhysicalMask( > kernel_task, kIOMemoryPhysicallyContiguous|kIOMemoryMapperNone, > size, 0xFFFFFFFF10000000ULL); > } > 10385d9982 < bufPtr = memDesc->getBytesNoCopy(); 10387,10396c9984 < // Carve out memory for zlib routines < swd_zs_zmem = (vm_offset_t)bufPtr; < bufPtr = (char *)bufPtr + SWD_ZLIB_BUFSIZE; < < // Carve out memory for compressed stackshots < swd_compressed_buffer = bufPtr; < bufPtr = (char *)bufPtr + SWD_COMPRESSED_BUFSIZE; < < // Remaining is used for holding stackshot < hdr = (swd_hdr *)bufPtr; --- > hdr = (swd_hdr *)memDesc->getBytesNoCopy(); 10400c9988 < hdr->alloc_size = SWD_STACKSHOT_SIZE; --- > hdr->alloc_size = size; 10447a10036,10038 > swd_flags |= SWD_WDOG_ENABLED; > if (!swd_buffer) > sleepWakeDebugMemAlloc(); 10452c10043,10044 < return (!systemBooting && !systemShutdown && !gWillShutdown); --- > return ((swd_flags & SWD_WDOG_ENABLED) && > !systemBooting && !systemShutdown && !gWillShutdown); 10487c10079 < LOG("Failed to open the file %s\n", name); --- > IOLog("Failed to open the file %s\n", name); 10496c10088 < LOG("Bailing as this is not a regular file\n"); --- > IOLog("Bailing as this is not a regular file\n"); 10507c10099 < UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *) NULL, vfs_context_proc(ctx)); --- > UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *) 0, vfs_context_proc(ctx)); 10509c10101 < LOG("Failed to save sleep wake log. err 0x%x\n", error); --- > IOLog("Failed to save sleep wake log. err 0x%x\n", error); 10524a10117,10625 > errno_t IOPMrootDomain::sleepWakeDebugCopyFile( > struct vnode *srcVp, > vfs_context_t srcCtx, > char *tmpBuf, uint64_t tmpBufSize, > uint64_t srcOffset, > const char *dstFname, > uint64_t numBytes, > uint32_t crc) > { > struct vnode *vp = NULL; > vfs_context_t ctx = vfs_context_create(vfs_context_current()); > struct vnode_attr va; > errno_t error = EIO; > uint64_t bytesToRead, bytesToWrite; > uint64_t readFileOffset, writeFileOffset, srcDataOffset; > uint32_t newcrc = 0; > > if (vnode_open(dstFname, (O_CREAT | FWRITE | O_NOFOLLOW), > S_IRUSR|S_IRGRP|S_IROTH, VNODE_LOOKUP_NOFOLLOW, &vp, ctx) != 0) > { > IOLog("Failed to open the file %s\n", dstFname); > swd_flags |= SWD_FILEOP_ERROR; > goto exit; > } > VATTR_INIT(&va); > VATTR_WANTED(&va, va_nlink); > /* Don't dump to non-regular files or files with links. */ > if (vp->v_type != VREG || > vnode_getattr(vp, &va, ctx) || va.va_nlink != 1) { > IOLog("Bailing as this is not a regular file\n"); > swd_flags |= SWD_FILEOP_ERROR; > goto exit; > } > VATTR_INIT(&va); > VATTR_SET(&va, va_data_size, 0); > vnode_setattr(vp, &va, ctx); > > writeFileOffset = 0; > while(numBytes) { > bytesToRead = (round_page(numBytes) > tmpBufSize) ? tmpBufSize : round_page(numBytes); > readFileOffset = trunc_page(srcOffset); > > DLOG("Read file (numBytes:0x%llx offset:0x%llx)\n", bytesToRead, readFileOffset); > error = vn_rdwr(UIO_READ, srcVp, tmpBuf, bytesToRead, readFileOffset, > UIO_SYSSPACE, IO_SKIP_ENCRYPTION|IO_SYNC|IO_NODELOCKED|IO_UNIT|IO_NOCACHE, > vfs_context_ucred(srcCtx), (int *) 0, > vfs_context_proc(srcCtx)); > if (error) { > IOLog("Failed to read file(numBytes:0x%llx)\n", bytesToRead); > swd_flags |= SWD_FILEOP_ERROR; > break; > } > > srcDataOffset = (uint64_t)tmpBuf + (srcOffset - readFileOffset); > bytesToWrite = bytesToRead - (srcOffset - readFileOffset); > if (bytesToWrite > numBytes) bytesToWrite = numBytes; > > if (crc) { > newcrc = crc32(newcrc, (void *)srcDataOffset, bytesToWrite); > } > DLOG("Write file (numBytes:0x%llx offset:0x%llx)\n", bytesToWrite, writeFileOffset); > error = vn_rdwr(UIO_WRITE, vp, (char *)srcDataOffset, bytesToWrite, writeFileOffset, > UIO_SYSSPACE, IO_SYNC|IO_NODELOCKED|IO_UNIT, > vfs_context_ucred(ctx), (int *) 0, > vfs_context_proc(ctx)); > if (error) { > IOLog("Failed to write file(numBytes:0x%llx)\n", bytesToWrite); > swd_flags |= SWD_FILEOP_ERROR; > break; > } > > writeFileOffset += bytesToWrite; > numBytes -= bytesToWrite; > srcOffset += bytesToWrite; > > } > if (crc != newcrc) { > /* Set stackshot size to 0 if crc doesn't match */ > VATTR_INIT(&va); > VATTR_SET(&va, va_data_size, 0); > vnode_setattr(vp, &va, ctx); > > IOLog("CRC check failed. expected:0x%x actual:0x%x\n", crc, newcrc); > swd_flags |= SWD_DATA_CRC_ERROR; > error = EFAULT; > } > exit: > if (vp) { > error = vnode_close(vp, FWRITE, ctx); > DLOG("vnode_close on file %s returned 0x%x\n",dstFname, error); > } > if (ctx) vfs_context_rele(ctx); > > return error; > > > > } > uint32_t IOPMrootDomain::checkForValidDebugData(const char *fname, vfs_context_t *ctx, > void *tmpBuf, struct vnode **vp) > { > int rc; > uint64_t hdrOffset; > uint32_t error = 0; > > struct vnode_attr va; > IOHibernateImageHeader *imageHdr; > > *vp = NULL; > if (vnode_open(fname, (FREAD | O_NOFOLLOW), 0, > VNODE_LOOKUP_NOFOLLOW, vp, *ctx) != 0) > { > DMSG("sleepWakeDebugDumpFromFile: Failed to open the file %s\n", fname); > goto err; > } > VATTR_INIT(&va); > VATTR_WANTED(&va, va_nlink); > VATTR_WANTED(&va, va_data_alloc); > if ((*vp)->v_type != VREG || > vnode_getattr((*vp), &va, *ctx) || va.va_nlink != 1) { > IOLog("sleepWakeDebugDumpFromFile: Bailing as %s is not a regular file\n", fname); > error = SWD_FILEOP_ERROR; > goto err; > } > > /* Read the sleepimage file header */ > rc = vn_rdwr(UIO_READ, *vp, (char *)tmpBuf, round_page(sizeof(IOHibernateImageHeader)), 0, > UIO_SYSSPACE, IO_SKIP_ENCRYPTION|IO_SYNC|IO_NODELOCKED|IO_UNIT|IO_NOCACHE, > vfs_context_ucred(*ctx), (int *) 0, > vfs_context_proc(*ctx)); > if (rc != 0) { > IOLog("sleepWakeDebugDumpFromFile: Failed to read header size %llu(rc=%d) from %s\n", > mach_vm_round_page(sizeof(IOHibernateImageHeader)), rc, fname); > error = SWD_FILEOP_ERROR; > goto err; > } > > imageHdr = ((IOHibernateImageHeader *)tmpBuf); > if (imageHdr->signature != kIOHibernateHeaderDebugDataSignature) { > IOLog("sleepWakeDebugDumpFromFile: File %s header has unexpected value 0x%x\n", > fname, imageHdr->signature); > error = SWD_HDR_SIGNATURE_ERROR; > goto err; > } > > /* Sleep/Wake debug header(swd_hdr) is at the beggining of the second block */ > hdrOffset = imageHdr->deviceBlockSize; > if (hdrOffset + sizeof(swd_hdr) >= va.va_data_alloc) { > IOLog("sleepWakeDebugDumpFromFile: header is crossing file size(0x%llx) in file %s\n", > va.va_data_alloc, fname); > error = SWD_HDR_SIZE_ERROR; > goto err; > } > > return 0; > > err: > if (*vp) vnode_close(*vp, FREAD, *ctx); > *vp = NULL; > > return error; > } > > void IOPMrootDomain::sleepWakeDebugDumpFromFile( ) > { > #if HIBERNATION > int rc; > char hibernateFilename[MAXPATHLEN+1]; > void *tmpBuf; > swd_hdr *hdr = NULL; > uint32_t stacksSize, logSize; > uint64_t tmpBufSize; > uint64_t hdrOffset, stacksOffset, logOffset; > errno_t error = EIO; > OSObject *obj = NULL; > OSString *str = NULL; > OSNumber *failStat = NULL; > struct vnode *vp = NULL; > vfs_context_t ctx = NULL; > const char *stacksFname, *logFname; > > IOBufferMemoryDescriptor *tmpBufDesc = NULL; > > DLOG("sleepWakeDebugDumpFromFile\n"); > if ((swd_flags & SWD_LOGS_IN_FILE) == 0) > return; > > if (!OSCompareAndSwap(0, 1, &gRootDomain->swd_lock)) > return; > > > /* Allocate a temp buffer to copy data between files */ > tmpBufSize = 2*4096; > tmpBufDesc = IOBufferMemoryDescriptor:: > inTaskWithOptions(kernel_task, kIODirectionOutIn | kIOMemoryMapperNone, > tmpBufSize, PAGE_SIZE); > > if (!tmpBufDesc) { > DMSG("sleepWakeDebugDumpFromFile: Fail to allocate temp buf\n"); > goto exit; > } > > tmpBuf = tmpBufDesc->getBytesNoCopy(); > > ctx = vfs_context_create(vfs_context_current()); > > /* First check if 'kSleepWakeStackBinFilename' has valid data */ > swd_flags |= checkForValidDebugData(kSleepWakeStackBinFilename, &ctx, tmpBuf, &vp); > if (vp == NULL) { > /* Check if the debug data is saved to hibernation file */ > hibernateFilename[0] = 0; > if ((obj = copyProperty(kIOHibernateFileKey))) > { > if ((str = OSDynamicCast(OSString, obj))) > strlcpy(hibernateFilename, str->getCStringNoCopy(), > sizeof(hibernateFilename)); > obj->release(); > } > if (!hibernateFilename[0]) { > DMSG("sleepWakeDebugDumpFromFile: Failed to get hibernation file name\n"); > goto exit; > } > > swd_flags |= checkForValidDebugData(hibernateFilename, &ctx, tmpBuf, &vp); > if (vp == NULL) { > DMSG("sleepWakeDebugDumpFromFile: No valid debug data is found\n"); > goto exit; > } > DLOG("Getting SW Stacks image from file %s\n", hibernateFilename); > } > else { > DLOG("Getting SW Stacks image from file %s\n", kSleepWakeStackBinFilename); > } > > hdrOffset = ((IOHibernateImageHeader *)tmpBuf)->deviceBlockSize; > > DLOG("Reading swd_hdr len 0x%llx offset 0x%lx\n", mach_vm_round_page(sizeof(swd_hdr)), trunc_page(hdrOffset)); > /* Read the sleep/wake debug header(swd_hdr) */ > rc = vn_rdwr(UIO_READ, vp, (char *)tmpBuf, round_page(sizeof(swd_hdr)), trunc_page(hdrOffset), > UIO_SYSSPACE, IO_SKIP_ENCRYPTION|IO_SYNC|IO_NODELOCKED|IO_UNIT|IO_NOCACHE, > vfs_context_ucred(ctx), (int *) 0, > vfs_context_proc(ctx)); > if (rc != 0) { > DMSG("sleepWakeDebugDumpFromFile: Failed to debug read header size %llu. rc=%d\n", > mach_vm_round_page(sizeof(swd_hdr)), rc); > swd_flags |= SWD_FILEOP_ERROR; > goto exit; > } > > hdr = (swd_hdr *)((char *)tmpBuf + (hdrOffset - trunc_page(hdrOffset))); > if ((hdr->signature != SWD_HDR_SIGNATURE) || (hdr->alloc_size > SWD_BUF_SIZE) || > (hdr->spindump_offset > SWD_BUF_SIZE) || (hdr->spindump_size > SWD_BUF_SIZE)) { > DMSG("sleepWakeDebugDumpFromFile: Invalid data in debug header. sign:0x%x size:0x%x spindump_offset:0x%x spindump_size:0x%x\n", > hdr->signature, hdr->alloc_size, hdr->spindump_offset, hdr->spindump_size); > swd_flags |= SWD_BUF_SIZE_ERROR; > goto exit; > } > stacksSize = hdr->spindump_size; > > /* Get stacks & log offsets in the image file */ > stacksOffset = hdrOffset + hdr->spindump_offset; > logOffset = hdrOffset + offsetof(swd_hdr, UUID); > logSize = sizeof(swd_hdr)-offsetof(swd_hdr, UUID); > stacksFname = getDumpStackFilename(hdr); > logFname = getDumpLogFilename(hdr); > > error = sleepWakeDebugCopyFile(vp, ctx, (char *)tmpBuf, tmpBufSize, stacksOffset, > stacksFname, stacksSize, hdr->crc); > if (error == EFAULT) { > DMSG("sleepWakeDebugDumpFromFile: Stackshot CRC doesn't match\n"); > goto exit; > } > error = sleepWakeDebugCopyFile(vp, ctx, (char *)tmpBuf, tmpBufSize, logOffset, > logFname, logSize, 0); > if (error) { > DMSG("sleepWakeDebugDumpFromFile: Failed to write the log file(0x%x)\n", error); > goto exit; > } > exit: > if (error) { > // Write just the SleepWakeLog.dump with failure code > uint64_t fcode = 0; > const char *fname; > swd_hdr hdrCopy; > char *offset = NULL; > int size; > > hdr = &hdrCopy; > if (swd_flags & SWD_BOOT_BY_SW_WDOG) { > failStat = OSDynamicCast(OSNumber, getProperty(kIOPMSleepWakeFailureCodeKey)); > fcode = failStat->unsigned64BitValue(); > fname = kSleepWakeLogFilename; > } > else { > fname = kAppleOSXWatchdogLogFilename; > } > > offset = (char*)hdr+offsetof(swd_hdr, UUID); > size = sizeof(swd_hdr)-offsetof(swd_hdr, UUID); > memset(offset, 0x20, size); // Fill with spaces > > > snprintf(hdr->spindump_status, sizeof(hdr->spindump_status), "\nstatus: 0x%x", swd_flags); > snprintf(hdr->PMStatusCode, sizeof(hdr->PMStatusCode), "\nCode: 0x%llx", fcode); > snprintf(hdr->reason, sizeof(hdr->reason), "\nStackshot reason: Watchdog\n\n"); > sleepWakeDebugSaveFile(fname, offset, size); > > } > gRootDomain->swd_lock = 0; > > if (vp) vnode_close(vp, FREAD, ctx); > if (ctx) vfs_context_rele(ctx); > if (tmpBufDesc) tmpBufDesc->release(); > #endif /* HIBERNATION */ > } > > void IOPMrootDomain::sleepWakeDebugDumpFromMem(IOMemoryMap *logBufMap) > { > IOVirtualAddress srcBuf = NULL; > char *stackBuf = NULL, *logOffset = NULL; > int logSize = 0; > > errno_t error = EIO; > uint64_t bufSize = 0; > swd_hdr *hdr = NULL; > OSNumber *failStat = NULL; > > if (!OSCompareAndSwap(0, 1, &gRootDomain->swd_lock)) > return; > > if ((logBufMap == 0) || ( (srcBuf = logBufMap->getVirtualAddress()) == 0) ) > { > DLOG("Nothing saved to dump to file\n"); > goto exit; > } > > hdr = (swd_hdr *)srcBuf; > bufSize = logBufMap->getLength(); > if (bufSize <= sizeof(swd_hdr)) > { > IOLog("SleepWake log buffer size is invalid\n"); > swd_flags |= SWD_BUF_SIZE_ERROR; > goto exit; > } > > stackBuf = (char*)hdr+hdr->spindump_offset; > > error = sleepWakeDebugSaveFile(getDumpStackFilename(hdr), stackBuf, hdr->spindump_size); > if (error) goto exit; > > logOffset = (char*)hdr+offsetof(swd_hdr, UUID); > logSize = sizeof(swd_hdr)-offsetof(swd_hdr, UUID); > > error = sleepWakeDebugSaveFile(getDumpLogFilename(hdr), logOffset, logSize); > if (error) goto exit; > > hdr->spindump_size = 0; > error = 0; > > exit: > if (error) { > // Write just the SleepWakeLog.dump with failure code > uint64_t fcode = 0; > const char *sname, *lname; > swd_hdr hdrCopy; > > /* Try writing an empty stacks file */ > hdr = &hdrCopy; > if (swd_flags & SWD_BOOT_BY_SW_WDOG) { > failStat = OSDynamicCast(OSNumber, getProperty(kIOPMSleepWakeFailureCodeKey)); > fcode = failStat->unsigned64BitValue(); > lname = kSleepWakeLogFilename; > sname = kSleepWakeStackFilename; > } > else { > lname = kAppleOSXWatchdogLogFilename; > sname= kAppleOSXWatchdogStackFilename; > } > > sleepWakeDebugSaveFile(sname, NULL, 0); > > logOffset = (char*)hdr+offsetof(swd_hdr, UUID); > logSize = sizeof(swd_hdr)-offsetof(swd_hdr, UUID); > memset(logOffset, 0x20, logSize); // Fill with spaces > > > snprintf(hdr->spindump_status, sizeof(hdr->spindump_status), "\nstatus: 0x%x", swd_flags); > snprintf(hdr->PMStatusCode, sizeof(hdr->PMStatusCode), "\nCode: 0x%llx", fcode); > snprintf(hdr->reason, sizeof(hdr->reason), "\nStackshot reason: Watchdog\n\n"); > sleepWakeDebugSaveFile(lname, logOffset, logSize); > } > > gRootDomain->swd_lock = 0; > } > > IOMemoryMap *IOPMrootDomain::sleepWakeDebugRetrieve( ) > { > IOVirtualAddress vaddr = NULL; > IOMemoryDescriptor * desc = NULL; > IOMemoryMap * logBufMap = NULL; > > uint32_t len = INT_MAX; > addr64_t data[3]; > uint64_t bufSize = 0; > uint64_t crc = 0; > uint64_t newcrc = 0; > uint64_t paddr = 0; > swd_hdr *hdr = NULL; > bool ret = false; > char str[20]; > > > if (!OSCompareAndSwap(0, 1, &gRootDomain->swd_lock)) > return NULL; > > if (!PEReadNVRAMProperty(kIOSleepWakeDebugKey, 0, &len)) { > DLOG("No sleepWakeDebug note to read\n"); > goto exit; > } > > if (len == strlen("sleepimage")) { > str[0] = 0; > PEReadNVRAMProperty(kIOSleepWakeDebugKey, str, &len); > > if (!strncmp((char*)str, "sleepimage", strlen("sleepimage"))) { > DLOG("sleepWakeDebugRetrieve: in file logs\n"); > swd_flags |= SWD_LOGS_IN_FILE|SWD_VALID_LOGS; > goto exit; > } > } > else if (len == sizeof(addr64_t)*3) { > PEReadNVRAMProperty(kIOSleepWakeDebugKey, data, &len); > } > else { > DLOG("Invalid sleepWakeDebug note length(%d)\n", len); > goto exit; > } > > > > DLOG("sleepWakeDebugRetrieve: data[0]:0x%llx data[1]:0x%llx data[2]:0x%llx\n", > data[0], data[1], data[2]); > DLOG("sleepWakeDebugRetrieve: in mem logs\n"); > bufSize = data[0]; > crc = data[1]; > paddr = data[2]; > if ( (bufSize <= sizeof(swd_hdr)) ||(bufSize > SWD_BUF_SIZE) || (crc == 0) ) > { > IOLog("SleepWake log buffer size is invalid\n"); > swd_flags |= SWD_BUF_SIZE_ERROR; > return NULL; > } > > DLOG("size:0x%llx crc:0x%llx paddr:0x%llx\n", > bufSize, crc, paddr); > > > desc = IOMemoryDescriptor::withAddressRange( paddr, bufSize, > kIODirectionOutIn | kIOMemoryMapperNone, NULL); > if (desc == NULL) > { > IOLog("Fail to map SleepWake log buffer\n"); > swd_flags |= SWD_INTERNAL_FAILURE; > goto exit; > } > > logBufMap = desc->map(); > > vaddr = logBufMap->getVirtualAddress(); > > > if ( (logBufMap->getLength() <= sizeof(swd_hdr)) || (vaddr == NULL) ) { > IOLog("Fail to map SleepWake log buffer\n"); > swd_flags |= SWD_INTERNAL_FAILURE; > goto exit; > } > > hdr = (swd_hdr *)vaddr; > if (hdr->spindump_offset+hdr->spindump_size > bufSize) > { > IOLog("SleepWake log header size is invalid\n"); > swd_flags |= SWD_HDR_SIZE_ERROR; > goto exit; > } > > hdr->crc = crc; > newcrc = crc32(0, (void *)((char*)vaddr+hdr->spindump_offset), > hdr->spindump_size); > if (newcrc != crc) { > IOLog("SleepWake log buffer contents are invalid\n"); > swd_flags |= SWD_DATA_CRC_ERROR; > goto exit; > } > > ret = true; > swd_flags |= SWD_LOGS_IN_MEM | SWD_VALID_LOGS; > > > exit: > PERemoveNVRAMProperty(kIOSleepWakeDebugKey); > if (!ret) { > if (logBufMap) logBufMap->release(); > logBufMap = 0; > } > if (desc) desc->release(); > gRootDomain->swd_lock = 0; > > return logBufMap; > } 10551c10652,10671 < void IOPMrootDomain::saveFailureData2File( ) --- > void IOPMrootDomain::sleepWakeDebugDumpFromMem(IOMemoryMap *map) > { > } > errno_t IOPMrootDomain::sleepWakeDebugCopyFile( > struct vnode *srcVp, > vfs_context_t srcCtx, > char *tmpBuf, uint64_t tmpBufSize, > uint64_t srcOffset, > const char *dstFname, > uint64_t numBytes, > uint32_t crc) > { > return EIO; > } > > void IOPMrootDomain::sleepWakeDebugDumpFromFile() > { > } > > IOMemoryMap *IOPMrootDomain::sleepWakeDebugRetrieve( ) 10552a10673 > return NULL; NO DIFFS in ./iokit/Kernel/RootDomainUserClient.cpp
NO DIFFS in ./iokit/Kernel/IOPMPowerStateQueue.cpp
NO DIFFS in ./iokit/Kernel/IODMAController.cpp
NO DIFFS in ./iokit/Kernel/IODeviceMemory.cpp

./iokit/Kernel/IOServicePrivate.h differences detected: 68d67 < kIOServiceFinalized = 0x00010000, 74,75c73 < kIOServiceNotifyWaiter = 0x00000002, < kIOServiceNotifyBlock = 0x00000004 --- > kIOServiceNotifyWaiter = 0x00000002 229,235d226 < < #define _interruptSourcesPrivate(service) \ < ((IOInterruptSourcePrivate *)(&(service)->_interruptSources[(service)->_numInterruptSources])) < < #define sizeofAllIOInterruptSource \ < (sizeof(IOInterruptSourcePrivate) + sizeof(IOInterruptSource)) <
./iokit/Kernel/IOPlatformExpert.cpp differences detected: 62d61 < #define APPLE_SECURE_BOOT_VARIABLE_GUID "94b73556-2197-4702-82a8-3e1337dafbfb" 873,876c872 < #if !CONFIG_EMBEDDED < if (coprocessor_cross_panic_enabled) < #endif < IOCPURunPlatformPanicActions(kPEPanicEnd); --- > IOCPURunPlatformPanicActions(kPEPanicEnd); 1199c1195 < data = OSDynamicCast( OSData, entry->getProperty( APPLE_SECURE_BOOT_VARIABLE_GUID":EffectiveProductionStatus" ) ); --- > data = OSDynamicCast( OSData, entry->getProperty( "EffectiveProductionStatus" ) ); 1625a1622 > reserved = NULL; 1639a1637,1678 > OSDictionary * dictionary; > OSObject * object; > IOReturn status; > > status = super::setProperties( properties ); > if ( status != kIOReturnUnsupported ) return status; > > status = IOUserClient::clientHasPrivilege( current_task( ), kIOClientPrivilegeAdministrator ); > if ( status != kIOReturnSuccess ) return status; > > dictionary = OSDynamicCast( OSDictionary, properties ); > if ( dictionary == 0 ) return kIOReturnBadArgument; > > object = dictionary->getObject( kIOPlatformUUIDKey ); > if ( object ) > { > IORegistryEntry * entry; > OSString * string; > uuid_t uuid; > > string = ( OSString * ) getProperty( kIOPlatformUUIDKey ); > if ( string ) return kIOReturnNotPermitted; > > string = OSDynamicCast( OSString, object ); > if ( string == 0 ) return kIOReturnBadArgument; > > status = uuid_parse( string->getCStringNoCopy( ), uuid ); > if ( status != 0 ) return kIOReturnBadArgument; > > entry = IORegistryEntry::fromPath( "/options", gIODTPlane ); > if ( entry ) > { > entry->setProperty( "platform-uuid", uuid, sizeof( uuid_t ) ); > entry->release( ); > } > > setProperty( kIOPlatformUUIDKey, string ); > publishResource( kIOPlatformUUIDKey, string ); > > return kIOReturnSuccess; > } > NO DIFFS in ./iokit/Kernel/IORTC.cpp
NO DIFFS in ./iokit/Kernel/IOLocks.cpp
NO DIFFS in ./iokit/Kernel/IOBufferMemoryDescriptor.cpp
NO DIFFS in ./iokit/Kernel/IOPowerConnection.cpp
NO DIFFS in ./iokit/Kernel/IOCatalogue.cpp
NO DIFFS in ./iokit/Kernel/IOSubMemoryDescriptor.cpp
NO DIFFS in ./iokit/Kernel/IOKitDebug.cpp

./iokit/Kernel/IOWorkLoop.cpp differences detected: 419,420c419 < closeGate(); < thread_t thread = workThread; --- > thread_t thread = workThread; 422,423d420 < openGate(); < 426c423 < thread_deallocate(thread); --- > thread_deallocate(thread); 500,511d496 < static IOReturn IOWorkLoopActionToBlock(OSObject *owner, < void *arg0, void *arg1, < void *arg2, void *arg3) < { < return ((IOWorkLoop::ActionBlock) arg0)(); < } < < IOReturn IOWorkLoop::runActionBlock(ActionBlock action) < { < return (runAction(&IOWorkLoopActionToBlock, this, action)); < } < NO DIFFS in ./iokit/Kernel/IOPMinformeeList.cpp

./iokit/Kernel/IOTimerEventSource.cpp differences detected: 48,50d47 < #include < < 102,103c99,100 < __inline__ void < IOTimerEventSource::invokeAction(IOTimerEventSource::Action action, IOTimerEventSource * ts, --- > static __inline__ void > InvokeAction(IOTimerEventSource::Action action, IOTimerEventSource * ts, 112,113c109 < if (kActionBlock & flags) ((IOTimerEventSource::ActionBlock) actionBlock)(ts); < else (*action)(owner, ts); --- > (*action)(owner, ts); 142c138 < me->invokeAction(doit, me, me->owner, me->workLoop); --- > InvokeAction(doit, me, me->owner, me->workLoop); 171c167 < me->invokeAction(doit, me, me->owner, me->workLoop); --- > InvokeAction(doit, me, me->owner, me->workLoop); 193c189 < invokeAction(doit, this, owner, workLoop); --- > InvokeAction(doit, this, owner, workLoop); 310,319d305 < IOTimerEventSource * < IOTimerEventSource::timerEventSource(uint32_t options, OSObject *inOwner, ActionBlock action) < { < IOTimerEventSource * tes; < tes = IOTimerEventSource::timerEventSource(options, inOwner, (Action) NULL); < if (tes) tes->setActionBlock((IOEventSource::ActionBlock) action); < < return tes; < } < 429,433c415 < if (options & kIOTimeOptionsContinuous) < clock_continuoustime_interval_to_deadline(abstime, &end); < else < clock_absolutetime_interval_to_deadline(abstime, &end); < --- > clock_continuoustime_interval_to_deadline(abstime, &end); 434a417 >
./iokit/Kernel/IOHibernateIO.cpp differences detected: 35c35 < - IOHibernateSystemSleep opens the hibernation file (or partition) at the bsd level, --- > - IOHibernateSystemSleep opens the hibernation file (or partition) at the bsd level, 38c38 < the disk, and other ioctls to get the transfer constraints --- > the disk, and other ioctls to get the transfer constraints 48c48 < - Regular sleep progresses - some drivers may inspect the root domain property --- > - Regular sleep progresses - some drivers may inspect the root domain property 60c60 < The kernel segment "__HIB" is written uncompressed to the image. This segment of code and data --- > The kernel segment "__HIB" is written uncompressed to the image. This segment of code and data 64c64 < More areas are removed from the bitmaps (after they have been written to the image) - the --- > More areas are removed from the bitmaps (after they have been written to the image) - the 66c66 < Each wired page is compressed and written and then each non-wired page. Compression and --- > Each wired page is compressed and written and then each non-wired page. Compression and 70c70 < --- > 79,80c79,80 < hibernate_kernel_entrypoint(), passing the location of the image in memory. Translation is off, < only code & data in that section is safe to call since all the other wired pages are still --- > hibernate_kernel_entrypoint(), passing the location of the image in memory. Translation is off, > only code & data in that section is safe to call since all the other wired pages are still 90c90 < are removed from the software strutures, and the hash table is reinitialized. --- > are removed from the software strutures, and the hash table is reinitialized. 94c94 < for the remaining non wired pages. --- > for the remaining non wired pages. 109c109 < There is an Open/Close pair of calls made to each of the interfaces at various stages since there are --- > There is an Open/Close pair of calls made to each of the interfaces at various stages since there are 119c119 < from the low level bits (motherboard I/O etc). There is only one thread running. The close can be --- > from the low level bits (motherboard I/O etc). There is only one thread running. The close can be 149d148 < #include 193,195d191 < < static uuid_string_t gIOHibernateBridgeBootSessionUUIDString; < 205,206d200 < static const OSSymbol * gIOHibernateBootSignatureKey; < static const OSSymbol * gIOBridgeBootSessionUUIDKey; 216a211,213 > static IOPolledFileIOVars * gDebugImageFileVars; > static IOLock * gDebugImageLock; > 230c227 < enum --- > enum 416c413 < gIOHibernateMode ^= (kIOHibernateModeDiscardCleanInactive --- > gIOHibernateMode ^= (kIOHibernateModeDiscardCleanInactive 460c457 < vars->handoffBuffer = IOBufferMemoryDescriptor::withOptions(kIODirectionOutIn, --- > vars->handoffBuffer = IOBufferMemoryDescriptor::withOptions(kIODirectionOutIn, 490c487 < err = hibernate_alloc_page_lists(&vars->page_list, --- > err = hibernate_alloc_page_lists(&vars->page_list, 518,519c515,516 < < HIBLOG("hibernate_page_list_setall preflight pageCount %d est comp %qd setfile %qd min %qd\n", --- > > HIBLOG("hibernate_page_list_setall preflight pageCount %d est comp %qd setfile %qd min %qd\n", 525c522 < { --- > { 528a526,535 > > // Invalidate the image file > if (gDebugImageLock) { > IOLockLock(gDebugImageLock); > if (gDebugImageFileVars != 0) { > IOSetBootImageNVRAM(0); > IOPolledFileClose(&gDebugImageFileVars, 0, 0, 0, 0, 0); > } > IOLockUnlock(gDebugImageLock); > } 530,536c537,540 < vars->volumeCryptKeySize = sizeof(vars->volumeCryptKey); < err = IOPolledFileOpen(gIOHibernateFilename, < (kIOPolledFileCreate | kIOPolledFileHibernate), < setFileSize, 0, < gIOHibernateCurrentHeader, sizeof(gIOHibernateCurrentHeader), < &vars->fileVars, &nvramData, < &vars->volumeCryptKey[0], &vars->volumeCryptKeySize); --- > err = IOPolledFileOpen(gIOHibernateFilename, setFileSize, 0, > gIOHibernateCurrentHeader, sizeof(gIOHibernateCurrentHeader), > &vars->fileVars, &nvramData, > &vars->volumeCryptKey[0], sizeof(vars->volumeCryptKey)); 558c562 < err = hibernate_setup(gIOHibernateCurrentHeader, --- > err = hibernate_setup(gIOHibernateCurrentHeader, 585c589 < if (vars->volumeCryptKeySize && --- > if (!uuid_is_null(vars->volumeCryptKey) && 589c593 < smcVars[0] = vars->volumeCryptKeySize; --- > smcVars[0] = sizeof(vars->volumeCryptKey); 598,599c602,603 < if (encryptedswap || vars->volumeCryptKeySize) < gIOHibernateMode ^= kIOHibernateModeEncrypt; --- > if (encryptedswap || !uuid_is_null(vars->volumeCryptKey)) > gIOHibernateMode ^= kIOHibernateModeEncrypt; 639c643 < && (data = OSDynamicCast(OSData, gIOChosenEntry->getProperty(gIOHibernateBootSignatureKey))) --- > && (data = OSDynamicCast(OSData, gIOChosenEntry->getProperty(kIOHibernateBootSignatureKey))) 670,674d673 < #if DEBUG || DEVELOPMENT < if (kIOLogHibernate & gIOKitDebug) IOKitKernelLogBuffer("H> rtc:", < &rtcVars, sizeof(rtcVars), &kprintf); < #endif /* DEBUG || DEVELOPMENT */ < 677c676 < { --- > { 691c690 < if (data && data->getLength() >= 4) fileData = OSDynamicCast(OSData, gIOChosenEntry->getProperty("boot-file-path")); --- > if (data->getLength() >= 4) fileData = OSDynamicCast(OSData, gIOChosenEntry->getProperty("boot-file-path")); 726,730d724 < < #if DEBUG || DEVELOPMENT < if (kIOLogHibernate & gIOKitDebug) IOKitKernelLogBuffer("H> bootnext:", < gIOHibernateBoot0082Data->getBytesNoCopy(), gIOHibernateBoot0082Data->getLength(), &kprintf); < #endif /* DEBUG || DEVELOPMENT */ 761,773d754 < < #if DEBUG || DEVELOPMENT < if (kIOLogHibernate & gIOKitDebug) < { < OSData * data = OSDynamicCast(OSData, IOService::getPMRootDomain()->getProperty(kIOHibernateSMCVariablesKey)); < if (data) < { < uintptr_t * smcVars = (typeof(smcVars)) data->getBytesNoCopy(); < IOKitKernelLogBuffer("H> smc:", < (const void *)smcVars[1], smcVars[0], &kprintf); < } < } < #endif /* DEBUG || DEVELOPMENT */ 811,818c792 < if (data) < { < gIOOptionsEntry->setProperty(gIOHibernateBootImageKey, data); < #if DEBUG || DEVELOPMENT < if (kIOLogHibernate & gIOKitDebug) IOKitKernelLogBuffer("H> boot-image:", < data->getBytesNoCopy(), data->getLength(), &kprintf); < #endif /* DEBUG || DEVELOPMENT */ < } --- > if (data) gIOOptionsEntry->setProperty(gIOHibernateBootImageKey, data); 828c802 < /* --- > /* 859,860c833,834 < rc = kern_write_file(vars->fileRef, vars->blockSize, < (caddr_t)(((uint8_t *)fileExtents) + sizeof(hdr.fileExtentMap)), --- > rc = kern_write_file(vars->fileRef, vars->blockSize, > (caddr_t)(((uint8_t *)fileExtents) + sizeof(hdr.fileExtentMap)), 866c840 < } --- > } 881a856,924 > extern "C" boolean_t root_is_CF_drive; > > void > IOOpenDebugDataFile(const char *fname, uint64_t size) > { > IOReturn err; > OSData * imagePath = NULL; > uint64_t padding; > > if (!gDebugImageLock) { > gDebugImageLock = IOLockAlloc(); > } > > if (root_is_CF_drive) return; > > // Try to get a lock, but don't block for getting lock > if (!IOLockTryLock(gDebugImageLock)) { > HIBLOG("IOOpenDebugDataFile: Failed to get lock\n"); > return; > } > > if (gDebugImageFileVars || !fname || !size) { > HIBLOG("IOOpenDebugDataFile: conditions failed\n"); > goto exit; > } > > padding = (PAGE_SIZE*2); // allocate couple more pages for header and fileextents > err = IOPolledFileOpen(fname, size+padding, 32ULL*1024*1024*1024, > NULL, 0, > &gDebugImageFileVars, &imagePath, NULL, 0); > > if ((kIOReturnSuccess == err) && imagePath) > { > if ((gDebugImageFileVars->fileSize < (size+padding)) || > (gDebugImageFileVars->fileExtents->getLength() > PAGE_SIZE)) { > // Can't use the file > IOPolledFileClose(&gDebugImageFileVars, 0, 0, 0, 0, 0); > HIBLOG("IOOpenDebugDataFile: too many file extents\n"); > goto exit; > } > > // write extents for debug data usage in EFI > IOWriteExtentsToFile(gDebugImageFileVars, kIOHibernateHeaderOpenSignature); > IOSetBootImageNVRAM(imagePath); > } > > exit: > IOLockUnlock(gDebugImageLock); > > if (imagePath) imagePath->release(); > return; > } > > void > IOCloseDebugDataFile() > { > IOSetBootImageNVRAM(0); > > if (gDebugImageLock) { > IOLockLock(gDebugImageLock); > if (gDebugImageFileVars != 0) { > IOPolledFileClose(&gDebugImageFileVars, 0, 0, 0, 0, 0); > } > IOLockUnlock(gDebugImageLock); > } > > > } > 899c942 < --- > 903c946 < --- > 960c1003 < screen += ((display->width --- > screen += ((display->width 1048,1049c1091,1092 < && vars->previewBuffer < && (data = OSDynamicCast(OSData, --- > && vars->previewBuffer > && (data = OSDynamicCast(OSData, 1071c1114 < vars->consoleMapping, graphicsInfo->depth, --- > vars->consoleMapping, graphicsInfo->depth, 1138,1140d1180 < IOReturn err; < OSData * data; < 1161c1201 < IOService::getPMRootDomain()->setProperty(kIOHibernateOptionsKey, --- > IOService::getPMRootDomain()->setProperty(kIOHibernateOptionsKey, 1172c1212 < IOService::getPMRootDomain()->setProperty(kIOHibernateGfxStatusKey, --- > IOService::getPMRootDomain()->setProperty(kIOHibernateGfxStatusKey, 1237c1277 < --- > 1250c1290 < --- > 1254c1294 < } --- > } 1256,1267d1295 < #if defined(__i386__) || defined(__x86_64__) < if (vars->volumeCryptKeySize) < { < IOBufferMemoryDescriptor * < bmd = IOBufferMemoryDescriptor::withBytes(&vars->volumeCryptKey[0], < vars->volumeCryptKeySize, kIODirectionOutIn); < if (!bmd) panic("IOBufferMemoryDescriptor"); < IOSetAPFSKeyStoreData(bmd); < bzero(&vars->volumeCryptKey[0], sizeof(vars->volumeCryptKey)); < } < #endif < 1272,1285d1299 < if (gIOChosenEntry < && (data = OSDynamicCast(OSData, gIOChosenEntry->getProperty(gIOBridgeBootSessionUUIDKey))) < && (sizeof(gIOHibernateBridgeBootSessionUUIDString) <= data->getLength())) < { < bcopy(data->getBytesNoCopy(), &gIOHibernateBridgeBootSessionUUIDString[0], < sizeof(gIOHibernateBridgeBootSessionUUIDString)); < } < < if (vars->hwEncrypt) < { < err = IOPolledFilePollersSetEncryptionKey(vars->fileVars, NULL, 0); < HIBLOG("IOPolledFilePollersSetEncryptionKey(0,%x)\n", err); < } < 1317,1318d1330 < IOSetBootImageNVRAM(0); < 1366c1378 < SYSCTL_STRING(_kern, OID_AUTO, hibernatefile, --- > SYSCTL_STRING(_kern, OID_AUTO, hibernatefile, 1369c1381 < SYSCTL_STRING(_kern, OID_AUTO, bootsignature, --- > SYSCTL_STRING(_kern, OID_AUTO, bootsignature, 1372c1384 < SYSCTL_UINT(_kern, OID_AUTO, hibernatemode, --- > SYSCTL_UINT(_kern, OID_AUTO, hibernatemode, 1378,1380d1389 < SYSCTL_STRING(_kern_bridge, OID_AUTO, bootsessionuuid, < CTLFLAG_RD | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, < gIOHibernateBridgeBootSessionUUIDString, sizeof(gIOHibernateBridgeBootSessionUUIDString), ""); 1394a1404 > 1398,1400c1408 < gIOHibernateBootImageKey = OSSymbol::withCStringNoCopy(kIOHibernateBootImageKey); < gIOHibernateBootSignatureKey = OSSymbol::withCStringNoCopy(kIOHibernateBootSignatureKey); < gIOBridgeBootSessionUUIDKey = OSSymbol::withCStringNoCopy(kIOBridgeBootSessionUUIDKey); --- > gIOHibernateBootImageKey = OSSymbol::withCStringNoCopy(kIOHibernateBootImageKey); 1432,1439d1439 < if (gIOChosenEntry < && (data = OSDynamicCast(OSData, gIOChosenEntry->getProperty(gIOBridgeBootSessionUUIDKey))) < && (sizeof(gIOHibernateBridgeBootSessionUUIDString) <= data->getLength())) < { < sysctl_register_oid(&sysctl__kern_bridge_bootsessionuuid); < bcopy(data->getBytesNoCopy(), &gIOHibernateBridgeBootSessionUUIDString[0], sizeof(gIOHibernateBridgeBootSessionUUIDString)); < } < 1445c1445 < static IOReturn --- > static IOReturn 1534c1534 < --- > 1547a1548 > bzero(&vars->volumeCryptKey[0], sizeof(vars->volumeCryptKey)); 1563c1564 < ((0 == (kIOHibernateModeSleep & gIOHibernateMode)) --- > ((0 == (kIOHibernateModeSleep & gIOHibernateMode)) 1575c1576 < HIBLOG("fileExtents[%d] %qx, %qx (%qx)\n", page, --- > HIBLOG("fileExtents[%d] %qx, %qx (%qx)\n", page, 1586c1587 < IOService::getPMRootDomain()->pmStatsRecordEvent( --- > IOService::getPMRootDomain()->pmStatsRecordEvent( 1588c1589 < do --- > do 1596,1597c1597,1598 < < HIBLOG("IOHibernatePollerOpen, ml_get_interrupts_enabled %d\n", --- > > HIBLOG("IOHibernatePollerOpen, ml_get_interrupts_enabled %d\n", 1606,1615c1607 < < if (vars->volumeCryptKeySize) < { < err = IOPolledFilePollersSetEncryptionKey(vars->fileVars, &vars->volumeCryptKey[0], vars->volumeCryptKeySize); < HIBLOG("IOPolledFilePollersSetEncryptionKey(%x)\n", err); < vars->hwEncrypt = (kIOReturnSuccess == err); < bzero(&vars->volumeCryptKey[0], sizeof(vars->volumeCryptKey)); < if (vars->hwEncrypt) header->options |= kIOHibernateOptionHWEncrypt; < } < --- > 1617c1609 < --- > 1651,1655d1642 < if (uuid_parse(&gIOHibernateBridgeBootSessionUUIDString[0], &header->bridgeBootSessionUUID[0])) < { < bzero(&header->bridgeBootSessionUUID[0], sizeof(header->bridgeBootSessionUUID)); < } < 1667c1654 < --- > 1678c1665 < err = IOHibernatePolledFileWrite(vars->fileVars, --- > err = IOHibernatePolledFileWrite(vars->fileVars, 1693c1680 < if (!vars->hwEncrypt && (kIOHibernateModeEncrypt & gIOHibernateMode)) --- > if (kIOHibernateModeEncrypt & gIOHibernateMode) 1711,1712c1698,1699 < err = IOHibernatePolledFileWrite(vars->fileVars, < (const uint8_t *) &pageAndCount, sizeof(pageAndCount), --- > err = IOHibernatePolledFileWrite(vars->fileVars, > (const uint8_t *) &pageAndCount, sizeof(pageAndCount), 1749c1736 < hibernate_set_page_state(vars->page_list, vars->page_list_wired, --- > hibernate_set_page_state(vars->page_list, vars->page_list_wired, 1754c1741 < --- > 1759c1746 < hibernate_set_page_state(vars->page_list, vars->page_list_wired, --- > hibernate_set_page_state(vars->page_list, vars->page_list_wired, 1766c1753 < --- > 1773c1760 < // mark more areas for no save, but these are not available --- > // mark more areas for no save, but these are not available 1777c1764 < --- > 1790c1777 < hibernate_set_page_state(vars->page_list, vars->page_list_wired, --- > hibernate_set_page_state(vars->page_list, vars->page_list_wired, 1800c1787 < hibernate_set_page_state(vars->page_list, vars->page_list_wired, --- > hibernate_set_page_state(vars->page_list, vars->page_list_wired, 1822,1823c1809,1810 < < HIBLOG("bitmap_size 0x%x, previewSize 0x%x, writing %d pages @ 0x%llx\n", --- > > HIBLOG("bitmap_size 0x%x, previewSize 0x%x, writing %d pages @ 0x%llx\n", 1829c1816 < { --- > { 1837a1825 > #define _pmap_is_noencrypt(x) (cpuAES ? false : pmap_is_noencrypt((x))) 1842c1830 < { --- > { 1844c1832 < if (!vars->hwEncrypt && (kIOHibernateModeEncrypt & gIOHibernateMode)) --- > if (kIOHibernateModeEncrypt & gIOHibernateMode) 1850,1851c1838,1839 < bcopy(&cryptvars->aes_iv[0], < &gIOHibernateCryptWakeContext.aes_iv[0], --- > bcopy(&cryptvars->aes_iv[0], > &gIOHibernateCryptWakeContext.aes_iv[0], 1857,1865c1845,1847 < if (cpuAES && (pageType == kWiredClear)) < { < count = 0; < } < else < { < count = hibernate_page_list_iterate((kWired & pageType) ? vars->page_list_wired : vars->page_list, < &ppnum); < } --- > count = hibernate_page_list_iterate((kWired & pageType) > ? vars->page_list_wired : vars->page_list, > &ppnum); 1869,1886c1851,1865 < if (!cpuAES) < { < if (count && (kWired & pageType) && needEncrypt) < { < uint32_t checkIndex; < for (checkIndex = 0; < (checkIndex < count) < && (((kEncrypt & pageType) == 0) == pmap_is_noencrypt(ppnum + checkIndex)); < checkIndex++) < {} < if (!checkIndex) < { < ppnum++; < continue; < } < count = checkIndex; < } < } --- > if (count && (kWired & pageType) && needEncrypt) > { > uint32_t checkIndex; > for (checkIndex = 0; > (checkIndex < count) > && (((kEncrypt & pageType) == 0) == _pmap_is_noencrypt(ppnum + checkIndex)); > checkIndex++) > {} > if (!checkIndex) > { > ppnum++; > continue; > } > count = checkIndex; > } 1894c1873 < --- > 1900,1901c1879,1880 < err = IOHibernatePolledFileWrite(vars->fileVars, < (const uint8_t *) &pageAndCount, sizeof(pageAndCount), --- > err = IOHibernatePolledFileWrite(vars->fileVars, > (const uint8_t *) &pageAndCount, sizeof(pageAndCount), 1906c1885 < --- > 1915c1894 < --- > 1921c1900 < --- > 1924c1903 < (WK_word*) compressed, --- > (WK_word*) compressed, 1935c1914 < if (pageCompressedSize == 0) --- > if (pageCompressedSize == 0) 1945c1924 < else --- > else 1952c1931 < --- > 1957c1936 < --- > 1961c1940 < --- > 1965c1944 < --- > 2036c2015 < --- > 2041c2020 < --- > 2049c2028 < --- > 2062c2041 < --- > 2065c2044 < (uint8_t *) header, sizeof(IOHibernateImageHeader), --- > (uint8_t *) header, sizeof(IOHibernateImageHeader), 2072c2051 < --- > 2075c2054 < IOService::getPMRootDomain()->pmStatsRecordEvent( --- > IOService::getPMRootDomain()->pmStatsRecordEvent( 2083,2084c2062,2063 < HIBLOG("comp bytes: %qd time: %qd ms %qd Mb/s, ", < compBytes, --- > HIBLOG("comp bytes: %qd time: %qd ms %qd Mb/s, ", > compBytes, 2089,2091c2068,2070 < HIBLOG("crypt bytes: %qd time: %qd ms %qd Mb/s, ", < vars->fileVars->cryptBytes, < nsec / 1000000ULL, --- > HIBLOG("crypt bytes: %qd time: %qd ms %qd Mb/s, ", > vars->fileVars->cryptBytes, > nsec / 1000000ULL, 2094c2073 < HIBLOG("\nimage %qd (%lld%%), uncompressed %qd (%d), compressed %qd (%d%%), sum1 %x, sum2 %x\n", --- > HIBLOG("\nimage %qd (%lld%%), uncompressed %qd (%d), compressed %qd (%d%%), sum1 %x, sum2 %x\n", 2100c2079 < HIBLOG("svPageCount %d, zvPageCount %d, wiredPagesEncrypted %d, wiredPagesClear %d, dirtyPagesEncrypted %d\n", --- > HIBLOG("svPageCount %d, zvPageCount %d, wiredPagesEncrypted %d, wiredPagesClear %d, dirtyPagesEncrypted %d\n", 2107c2086 < ProgressUpdate(gIOHibernateGraphicsInfo, --- > ProgressUpdate(gIOHibernateGraphicsInfo, 2147c2126 < extern "C" void --- > extern "C" void 2179c2158 < gIOHibernateCurrentHeader->diag[0], gIOHibernateCurrentHeader->diag[1], --- > gIOHibernateCurrentHeader->diag[0], gIOHibernateCurrentHeader->diag[1], 2203c2182 < HIBLOG("booter start at %d ms smc %d ms, [%d, %d, %d] total %d ms, dsply %d, %d ms, tramp %d ms\n", --- > HIBLOG("booter start at %d ms smc %d ms, [%d, %d, %d] total %d ms, dsply %d, %d ms, tramp %d ms\n", 2218c2197 < if ((0 != (kIOHibernateModeSleep & gIOHibernateMode)) --- > if ((0 != (kIOHibernateModeSleep & gIOHibernateMode)) 2230,2232c2209,2210 < bool done = false; < bool foundCryptData = false; < bool foundVolumeEncryptData = false; --- > bool done = false; > bool foundCryptData = false; 2264,2272d2241 < case kIOHibernateHandoffTypeVolumeCryptKey: < if (handoff->bytecount == vars->volumeCryptKeySize) < { < bcopy(data, &vars->volumeCryptKey[0], vars->volumeCryptKeySize); < foundVolumeEncryptData = true; < } < else panic("kIOHibernateHandoffTypeVolumeCryptKey(%d)", handoff->bytecount); < break; < 2277c2246 < hibernate_newruntime_map(data, handoff->bytecount, --- > hibernate_newruntime_map(data, handoff->bytecount, 2281c2250 < --- > 2284c2253 < --- > 2299c2268 < } --- > } 2301,2305c2270,2271 < < if (vars->hwEncrypt && !foundVolumeEncryptData) < panic("no volumeCryptKey"); < else if (cryptvars && !foundCryptData) < panic("hibernate handoff"); --- > if (cryptvars && !foundCryptData) > panic("hibernate handoff"); 2308,2309c2274,2275 < gIOHibernateGraphicsInfo->physicalAddress, gIOHibernateGraphicsInfo->depth, < gIOHibernateGraphicsInfo->width, gIOHibernateGraphicsInfo->height, gIOHibernateGraphicsInfo->gfxStatus); --- > gIOHibernateGraphicsInfo->physicalAddress, gIOHibernateGraphicsInfo->depth, > gIOHibernateGraphicsInfo->width, gIOHibernateGraphicsInfo->height, gIOHibernateGraphicsInfo->gfxStatus); 2313c2279 < vars->videoMapSize = round_page(gIOHibernateGraphicsInfo->height --- > vars->videoMapSize = round_page(gIOHibernateGraphicsInfo->height 2318c2284 < IOMapPages(kernel_map, --- > IOMapPages(kernel_map, 2325c2291 < ProgressUpdate(gIOHibernateGraphicsInfo, --- > ProgressUpdate(gIOHibernateGraphicsInfo, 2345,2353d2310 < if (vars->hwEncrypt) < { < err = IOPolledFilePollersSetEncryptionKey(vars->fileVars, < &vars->volumeCryptKey[0], vars->volumeCryptKeySize); < HIBLOG("IOPolledFilePollersSetEncryptionKey(%x) %ld\n", err, vars->volumeCryptKeySize); < if (kIOReturnSuccess != err) panic("IOPolledFilePollersSetEncryptionKey(0x%x)", err); < cryptvars = 0; < } < 2417c2374 < --- > 2424c2381 < else --- > else 2454c2411 < HIBPRINT("pages %d (%d%%)\n", pagesDone, --- > HIBPRINT("pages %d (%d%%)\n", pagesDone, 2475c2432 < IOService::getPMRootDomain()->pmStatsRecordEvent( --- > IOService::getPMRootDomain()->pmStatsRecordEvent( 2477c2434 < IOService::getPMRootDomain()->pmStatsRecordEvent( --- > IOService::getPMRootDomain()->pmStatsRecordEvent( 2489c2446 < HIBLOG("hibernate_machine_init pagesDone %d sum2 %x, time: %d ms, disk(0x%x) %qd Mb/s, ", --- > HIBLOG("hibernate_machine_init pagesDone %d sum2 %x, time: %d ms, disk(0x%x) %qd Mb/s, ", 2494,2495c2451,2452 < HIBLOG("comp bytes: %qd time: %qd ms %qd Mb/s, ", < compBytes, --- > HIBLOG("comp bytes: %qd time: %qd ms %qd Mb/s, ", > compBytes, 2500,2502c2457,2459 < HIBLOG("crypt bytes: %qd time: %qd ms %qd Mb/s\n", < vars->fileVars->cryptBytes, < nsec / 1000000ULL, --- > HIBLOG("crypt bytes: %qd time: %qd ms %qd Mb/s\n", > vars->fileVars->cryptBytes, > nsec / 1000000ULL, 2568a2526,2528 > > >
./iokit/Kernel/IOMemoryDescriptor.cpp differences detected: 45d44 < #include 71a71,74 > // osfmk/device/iokit_rpc.c > unsigned int IODefaultCacheBits(addr64_t pa); > unsigned int IOTranslateCacheBits(struct phys_entry *pp); > 571,578c574 < if (kIOMemoryBufferPurgeable & _flags) < { < prot |= (MAP_MEM_PURGABLE | MAP_MEM_PURGABLE_KERNEL_ONLY); < if (VM_KERN_MEMORY_SKYWALK == tag) < { < prot |= MAP_MEM_LEDGER_TAG_NETWORK; < } < } --- > if (kIOMemoryBufferPurgeable & _flags) prot |= (MAP_MEM_PURGABLE | MAP_MEM_PURGABLE_KERNEL_ONLY); 615c611 < err = mach_make_memory_entry_internal(map, --- > err = mach_make_memory_entry_64(map, 877c873 < if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) --- > if ((reserved != NULL) && (reserved->dp.devicePager) && (_memoryEntries == NULL) && (_wireCount != 0)) 1708d1703 < if (_kernelTag == gIOSurfaceTag) _userTag = VM_MEMORY_IOSURFACE; 1967,1969d1961 < #if defined(__x86_64__) < // copypv does not cppvFsnk on intel < #else 1971d1962 < #endif 2087,2088d2077 < if ((data->fMapper == gIOSystemMapper) && _prepareLock) IOLockLock(_prepareLock); < 2113,2114d2101 < < if ((data->fMapper == gIOSystemMapper) && _prepareLock) IOLockUnlock(_prepareLock); 3651d3637 < && (!(kIOMapUnique & options)) 4545,4546c4531,4533 < IOMemoryDescriptor * mapDesc = 0; < __block IOMemoryMap * result = 0; --- > IOMemoryDescriptor * mapDesc = 0; > IOMemoryMap * result = 0; > OSIterator * iter; 4591c4578 < if (_mappings) _mappings->iterateObjects(^(OSObject * object) --- > if( (iter = OSCollectionIterator::withCollection(_mappings))) 4593,4594c4580,4581 < IOMemoryMap * lookMapping = (IOMemoryMap *) object; < if ((result = lookMapping->copyCompatible(mapping))) --- > IOMemoryMap * lookMapping; > while ((lookMapping = (IOMemoryMap *) iter->getNextObject())) 4596,4598c4583,4588 < addMapping(result); < result->setMemoryDescriptor(this, offset); < return (true); --- > if ((result = lookMapping->copyCompatible(mapping))) > { > addMapping(result); > result->setMemoryDescriptor(this, offset); > break; > } 4600,4601c4590,4591 < return (false); < }); --- > iter->release(); > } 4721,4722c4711,4712 < OSSymbol const *keys[2] = {0}; < OSObject *values[2] = {0}; --- > OSSymbol const *keys[2]; > OSObject *values[2]; 4724d4713 < vm_size_t vcopy_size; 4729c4718 < } *vcopy = NULL; --- > } *vcopy; 4731c4720 < bool result = false; --- > bool result; 4741,4749c4730,4731 < if (os_mul_overflow(sizeof(SerData), nRanges, &vcopy_size)) { < result = false; < goto bail; < } < vcopy = (SerData *) IOMalloc(vcopy_size); < if (vcopy == 0) { < result = false; < goto bail; < } --- > vcopy = (SerData *) IOMalloc(sizeof(SerData) * nRanges); > if (vcopy == 0) return false; 4753a4736,4740 > result = false; > values[0] = values[1] = 0; > > // From this point on we can go to bail. > 4813c4800 < IOFree(vcopy, vcopy_size); --- > IOFree(vcopy, sizeof(SerData) * nRanges);
./iokit/Kernel/IOSharedDataQueue.cpp differences detected: 169d168 < // See rdar://problem/40780584 for an explanation of relaxed/acquire barriers 215c214 < // See rdar://problem/40780584 for an explanation of relaxed/acquire barriers --- > head = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED); 217d215 < head = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_ACQUIRE); 288,309c286,297 < // Publish the data we just enqueued < __c11_atomic_store((_Atomic UInt32 *)&dataQueue->tail, newTail, __ATOMIC_RELEASE); < < if (tail != head) { < // < // The memory barrier below paris with the one in ::dequeue < // so that either our store to the tail cannot be missed by < // the next dequeue attempt, or we will observe the dequeuer < // making the queue empty. < // < // Of course, if we already think the queue is empty, < // there's no point paying this extra cost. < // < __c11_atomic_thread_fence(__ATOMIC_SEQ_CST); < head = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED); < } < < if (tail == head) { < // Send notification (via mach message) that data is now available. < sendDataAvailableNotification(); < } < return true; --- > // Update tail with release barrier > __c11_atomic_store((_Atomic UInt32 *)&dataQueue->tail, newTail, __ATOMIC_RELEASE); > > // Send notification (via mach message) that data is available. > > if ( ( tail == head ) /* queue was empty prior to enqueue() */ > || ( tail == __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED) ) ) /* queue was emptied during enqueue() */ > { > sendDataAvailableNotification(); > } > > return true; 321c309 < if (!dataQueue || (data && !dataSize)) { --- > if (!dataQueue) { 326,328c314,315 < // See rdar://problem/40780584 for an explanation of relaxed/acquire barriers < headOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED); < tailOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_ACQUIRE); --- > tailOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_RELAXED); > headOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_ACQUIRE); 369,392c356,379 < } else { < // empty queue < return false; < } < < if (data) { < if (entrySize > *dataSize) { < // not enough space < return false; < } < memcpy(data, &(entry->data), entrySize); < *dataSize = entrySize; < } < < __c11_atomic_store((_Atomic UInt32 *)&dataQueue->head, newHeadOffset, __ATOMIC_RELEASE); < < if (newHeadOffset == tailOffset) { < // < // If we are making the queue empty, then we need to make sure < // that either the enqueuer notices, or we notice the enqueue < // that raced with our making of the queue empty. < // < __c11_atomic_thread_fence(__ATOMIC_SEQ_CST); < } --- > } > > if (entry) { > if (data) { > if (dataSize) { > if (entrySize <= *dataSize) { > memcpy(data, &(entry->data), entrySize); > __c11_atomic_store((_Atomic UInt32 *)&dataQueue->head, newHeadOffset, __ATOMIC_RELEASE); > } else { > retVal = FALSE; > } > } else { > retVal = FALSE; > } > } else { > __c11_atomic_store((_Atomic UInt32 *)&dataQueue->head, newHeadOffset, __ATOMIC_RELEASE); > } > > if (dataSize) { > *dataSize = entrySize; > } > } else { > retVal = FALSE; > }
./iokit/Kernel/IOService.cpp differences detected: 36d35 < #include 470,474d468 < const char *getCpuInterruptDelayHolderName(void); < const char *getCpuInterruptDelayHolderName(void) { < return sCPULatencyHolderName[kCpuDelayInterrupt]; < } < 617,622c611 < for (i = 0; i < _numInterruptSources; i++) { < void * block = _interruptSourcesPrivate(this)[i].vectorBlock; < if (block) Block_release(block); < } < IOFree(_interruptSources, < _numInterruptSources * sizeofAllIOInterruptSource); --- > IOFree(_interruptSources, _numInterruptSources * sizeof(IOInterruptSource)); 733c722 < if( newProvider && adjParent) { --- > if( newProvider) { 1819,1844d1807 < < < static IOReturn < IOServiceInterestHandlerToBlock( void * target __unused, void * refCon, < UInt32 messageType, IOService * provider, < void * messageArgument, vm_size_t argSize ) < { < return ((IOServiceInterestHandlerBlock) refCon)(messageType, provider, messageArgument, argSize); < } < < IONotifier * IOService::registerInterest(const OSSymbol * typeOfInterest, < IOServiceInterestHandlerBlock handler) < { < IONotifier * notify; < void * block; < < block = Block_copy(handler); < if (!block) return (NULL); < < notify = registerInterest(typeOfInterest, &IOServiceInterestHandlerToBlock, NULL, block); < < if (!notify) Block_release(block); < < return (notify); < } < 1851,1857d1813 < if (!svcNotify || !(notify = OSDynamicCast(_IOServiceInterestNotifier, svcNotify))) < return( kIOReturnBadArgument ); < < notify->handler = handler; < notify->target = target; < notify->ref = ref; < 1864a1821,1823 > if (!svcNotify || !(notify = OSDynamicCast(_IOServiceInterestNotifier, svcNotify))) > return( kIOReturnBadArgument ); > 1867a1827,1829 > notify->handler = handler; > notify->target = target; > notify->ref = ref; 1978,1980d1939 < < if (handler == &IOServiceInterestHandlerToBlock) Block_release(ref); < 2322c2281 < /* wait for the victim to go non-busy */ --- > // wait for the victim to go non-busy 2327,2329d2285 < /* let others do work while we wait */ < gIOTerminateThread = 0; < IOLockWakeup( gJobsLock, (event_t) &gIOTerminateThread, /* one-thread */ false); 2332c2288 < if (__improbable(waitResult == THREAD_TIMED_OUT)) { --- > if(__improbable(waitResult == THREAD_TIMED_OUT)) { 2334,2335c2290 < } < waitToBecomeTerminateThread(); --- > } 2720,2721c2675 < _workLoopAction( (IOWorkLoop::Action) &actionWillTerminate, < victim, (void *)(uintptr_t) options, (void *)(uintptr_t) doPhase2List ); --- > if( 0 == victim->getClient()) { 2722a2677,2683 > // no clients - will go to finalize > victim->scheduleFinalize(false); > > } else { > _workLoopAction( (IOWorkLoop::Action) &actionWillTerminate, > victim, (void *)(uintptr_t) options, (void *)(uintptr_t) doPhase2List ); > } 2734,2735c2695,2696 < bool scheduleFinalize = false; < if( victim->lockForArbitration( true )) { --- > > if( victim->lockForArbitration( true )) { 2737d2697 < scheduleFinalize = (0 == victim->getClient()); 2746,2747d2705 < // no clients - will go to finalize < if (scheduleFinalize) victim->scheduleFinalize(false); 2758c2716 < bool sendFinal = false; --- > 2760,2766c2718 < if (victim->lockForArbitration(true)) { < sendFinal = (0 == (victim->__state[1] & kIOServiceFinalized)); < if (sendFinal) victim->__state[1] |= kIOServiceFinalized; < victim->unlockForArbitration(); < } < if (sendFinal) { < _workLoopAction( (IOWorkLoop::Action) &actionFinalize, --- > _workLoopAction( (IOWorkLoop::Action) &actionFinalize, 2768d2719 < } 2797,2815c2748,2760 < bool deferStop = (0 != (kIOServiceInactiveState & client->__state[0])); < IOLockUnlock( gJobsLock ); < if (deferStop && client->lockForArbitration(true)) { < deferStop = (0 == (client->__state[1] & kIOServiceFinalized)); < //deferStop = (!deferStop && (0 != client->getClient())); < //deferStop = (0 != client->getClient()); < client->unlockForArbitration(); < if (deferStop) { < TLOG("%s[0x%qx]::defer stop()\n", client->getName(), regID2); < IOServiceTrace(IOSERVICE_TERMINATE_STOP_DEFER, < (uintptr_t) regID1, < (uintptr_t) (regID1 >> 32), < (uintptr_t) regID2, < (uintptr_t) (regID2 >> 32)); < < idx++; < IOLockLock( gJobsLock ); < continue; < } --- > if( (kIOServiceInactiveState & client->__state[0]) && client->getClient()) { > TLOG("%s[0x%qx]::defer stop(%s[0x%qx])\n", > client->getName(), regID2, > client->getClient()->getName(), client->getClient()->getRegistryEntryID()); > IOServiceTrace( > IOSERVICE_TERMINATE_STOP_DEFER, > (uintptr_t) regID1, > (uintptr_t) (regID1 >> 32), > (uintptr_t) regID2, > (uintptr_t) (regID2 >> 32)); > > idx++; > continue; 2816a2762,2763 > > IOLockUnlock( gJobsLock ); 4719,4746d4665 < static bool < IOServiceMatchingNotificationHandlerToBlock( void * target __unused, void * refCon, < IOService * newService, < IONotifier * notifier ) < { < return ((IOServiceMatchingNotificationHandlerBlock) refCon)(newService, notifier); < } < < IONotifier * IOService::addMatchingNotification( < const OSSymbol * type, OSDictionary * matching, < SInt32 priority, < IOServiceMatchingNotificationHandlerBlock handler) < { < IONotifier * notify; < void * block; < < block = Block_copy(handler); < if (!block) return (NULL); < < notify = addMatchingNotification(type, matching, < &IOServiceMatchingNotificationHandlerToBlock, NULL, block, priority); < < if (!notify) Block_release(block); < < return (notify); < } < < 5048,5050d4966 < < if (handler == &IOServiceMatchingNotificationHandlerToBlock) Block_release(ref); < 6373,6374c6289 < interruptSources = (IOInterruptSource *)IOMalloc( < numSources * sizeofAllIOInterruptSource); --- > interruptSources = (IOInterruptSource *)IOMalloc(numSources * sizeof(IOInterruptSource)); 6377c6292 < bzero(interruptSources, numSources * sizeofAllIOInterruptSource); --- > bzero(interruptSources, numSources * sizeof(IOInterruptSource)); 6424c6339 < /* Try to resolve the interrupt */ --- > /* Try to reslove the interrupt */ 6450,6474d6364 < static void IOServiceInterruptActionToBlock( OSObject * target, void * refCon, < IOService * nub, int source ) < { < ((IOInterruptActionBlock)(refCon))(nub, source); < } < < IOReturn IOService::registerInterruptBlock(int source, OSObject *target, < IOInterruptActionBlock handler) < { < IOReturn ret; < void * block; < < block = Block_copy(handler); < if (!block) return (kIOReturnNoMemory); < < ret = registerInterrupt(source, target, &IOServiceInterruptActionToBlock, block); < if (kIOReturnSuccess != ret) { < Block_release(block); < return (ret); < } < _interruptSourcesPrivate(this)[source].vectorBlock = block; < < return (ret); < } < 6477d6366 < IOReturn ret; 6479c6368 < void *block; --- > IOReturn ret; 6485,6492c6374 < block = _interruptSourcesPrivate(this)[source].vectorBlock; < ret = interruptController->unregisterInterrupt(this, source); < if ((kIOReturnSuccess == ret) && (block = _interruptSourcesPrivate(this)[source].vectorBlock)) { < _interruptSourcesPrivate(this)[source].vectorBlock = NULL; < Block_release(block); < } < < return ret; --- > return interruptController->unregisterInterrupt(this, source); NO DIFFS in ./iokit/Kernel/IOStartIOKit.cpp

./iokit/Kernel/IOStringFuncs.c differences detected: 478,480c478,479 < if (n != 0) { < char *d = s1; < const char *s = s2; --- > char *os1; > int i = n; 482,491c481,490 < while (*d != 0) < d++; < do { < if ((*d = *s++) == '\0') < break; < d++; < } while (--n != 0); < *d = '\0'; < } < return (s1); --- > os1 = s1; > while (*s1++) > ; > --s1; > while ((*s1++ = *s2++)) > if (--i < 0) { > *--s1 = '\0'; > break; > } > return(os1);
./iokit/Kernel/IOCPU.cpp differences detected: 50,51d49 < extern "C" void sched_override_recommended_cores_for_sleep(void); < extern "C" void sched_restore_recommended_cores_after_sleep(void); 357c355 < IOCPU *targetCPU = (IOCPU *)target; --- > IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target); 359,360c357,358 < if (targetCPU == NULL) return KERN_FAILURE; < return targetCPU->startCPU(start_paddr, arg_paddr); --- > if (targetCPU == 0) return KERN_FAILURE; > return targetCPU->startCPU(start_paddr, arg_paddr); 365c363 < IOCPU *targetCPU = (IOCPU *)target; --- > IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target); 367c365 < targetCPU->haltCPU(); --- > if (targetCPU) targetCPU->haltCPU(); 372,373c370,371 < IOCPU *sourceCPU = (IOCPU *)source; < IOCPU *targetCPU = (IOCPU *)target; --- > IOCPU *sourceCPU = OSDynamicCast(IOCPU, (OSObject *)source); > IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target); 375c373 < sourceCPU->signalCPU(targetCPU); --- > if (sourceCPU && targetCPU) sourceCPU->signalCPU(targetCPU); 380,381c378,379 < IOCPU *sourceCPU = (IOCPU *)source; < IOCPU *targetCPU = (IOCPU *)target; --- > IOCPU *sourceCPU = OSDynamicCast(IOCPU, (OSObject *)source); > IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target); 383c381 < sourceCPU->signalCPUDeferred(targetCPU); --- > if (sourceCPU && targetCPU) sourceCPU->signalCPUDeferred(targetCPU); 388,389c386,387 < IOCPU *sourceCPU = (IOCPU *)source; < IOCPU *targetCPU = (IOCPU *)target; --- > IOCPU *sourceCPU = OSDynamicCast(IOCPU, (OSObject *)source); > IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target); 391c389 < sourceCPU->signalCPUCancel(targetCPU); --- > if (sourceCPU && targetCPU) sourceCPU->signalCPUCancel(targetCPU); 396,401c394,397 < IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target); < < if (targetCPU == NULL) < panic("%s: invalid target CPU %p", __func__, target); < < targetCPU->initCPU(bootb); --- > IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target); > > if (targetCPU) { > targetCPU->initCPU(bootb); 403c399 < if (!bootb && (targetCPU->getCPUNumber() == (UInt32)master_cpu)) ml_set_is_quiescing(false); --- > if (!bootb && (targetCPU->getCPUNumber() == (UInt32)master_cpu)) ml_set_is_quiescing(false); 404a401 > } 409c406,407 < IOCPU *targetCPU = (IOCPU*)target; --- > IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target); > if (targetCPU) { 411c409 < if (targetCPU->getCPUNumber() == (UInt32)master_cpu) ml_set_is_quiescing(true); --- > if (targetCPU->getCPUNumber() == (UInt32)master_cpu) ml_set_is_quiescing(true); 413c411,412 < targetCPU->quiesceCPU(); --- > targetCPU->quiesceCPU(); > } 428,432c427 < IOCPU *targetCPU = (IOCPU*)target; < < if (targetCPU == nullptr) { < return; < } --- > IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target); 434,438c429,435 < if (enable) { < targetCPU->getProvider()->registerInterrupt(1, targetCPU, (IOInterruptAction)pmi_handler, 0); < targetCPU->getProvider()->enableInterrupt(1); < } else { < targetCPU->getProvider()->disableInterrupt(1); --- > if (targetCPU) { > if (enable) { > targetCPU->getProvider()->registerInterrupt(1, targetCPU, (IOInterruptAction)pmi_handler, 0); > targetCPU->getProvider()->enableInterrupt(1); > } else { > targetCPU->getProvider()->disableInterrupt(1); > } 467,469d463 < #if defined(__arm64__) < sched_override_recommended_cores_for_sleep(); < #endif 535d528 < rootDomain->stop_watchdog_timer(); 540d532 < rootDomain->start_watchdog_timer(); 575,578d566 < < #if defined(__arm64__) < sched_restore_recommended_cores_after_sleep(); < #endif NO DIFFS in ./iokit/Kernel/IOHistogramReporter.cpp
NO DIFFS in ./iokit/Kernel/IOReporterDefs.h
NO DIFFS in ./iokit/Kernel/x86_64/IOAsmSupport.s
NO DIFFS in ./iokit/Kernel/x86_64/IOSharedLock.s
NO DIFFS in ./iokit/Kernel/IOReporter.cpp
NO DIFFS in ./iokit/Kernel/IOPMinformee.cpp
NO DIFFS in ./iokit/Families/IOSystemManagement/IOWatchDogTimer.cpp
NO DIFFS in ./iokit/Families/IONVRAM/IONVRAMController.cpp
NO DIFFS in ./SETUP/setup.sh
NO DIFFS in ./SETUP/config/externs.c
NO DIFFS in ./SETUP/config/searchp.c
NO DIFFS in ./SETUP/config/config.h
NO DIFFS in ./SETUP/config/openp.c
NO DIFFS in ./SETUP/config/Makefile
NO DIFFS in ./SETUP/config/lexer.l
NO DIFFS in ./SETUP/config/mkioconf.c
NO DIFFS in ./SETUP/config/main.c
NO DIFFS in ./SETUP/config/mkmakefile.c
NO DIFFS in ./SETUP/config/parser.y
NO DIFFS in ./SETUP/config/mkheaders.c
NO DIFFS in ./SETUP/config/doconf
NO DIFFS in ./SETUP/installfile/Makefile
NO DIFFS in ./SETUP/installfile/installfile.c
NO DIFFS in ./SETUP/Makefile
NO DIFFS in ./SETUP/setsegname/Makefile
NO DIFFS in ./SETUP/setsegname/setsegname.c
NO DIFFS in ./SETUP/replacecontents/Makefile
NO DIFFS in ./SETUP/replacecontents/replacecontents.c

./SETUP/kextsymboltool/kextsymboltool.c differences detected: 477,480d476 < { "armv7", 12 /* CPU_TYPE_ARM */, 9 /* CPU_SUBTYPE_ARM_V7 */, NX_LittleEndian, NULL }, < { "armv7s", 12 /* CPU_TYPE_ARM */, 11 /* CPU_SUBTYPE_ARM_V7S */, NX_LittleEndian, NULL }, < { "armv7k", 12 /* CPU_TYPE_ARM */, 12 /* CPU_SUBTYPE_ARM_V7K */, NX_LittleEndian, NULL }, < { "arm64", 0x0100000c /* CPU_TYPE_ARM64 */, 0 /* CPU_SUBTYPE_ARM64_ALL */, NX_LittleEndian, NULL }, NO DIFFS in ./SETUP/kextsymboltool/Makefile
NO DIFFS in ./SETUP/json_compilation_db/Makefile
NO DIFFS in ./SETUP/json_compilation_db/json_compilation_db.c
NO DIFFS in ./SETUP/decomment/Makefile
NO DIFFS in ./SETUP/decomment/decomment.c
NO DIFFS in ./SETUP/newvers
NO DIFFS in ./bsd/man/man2/sigaction.2
NO DIFFS in ./bsd/man/man2/readlinkat.2
NO DIFFS in ./bsd/man/man2/pread.2
NO DIFFS in ./bsd/man/man2/utimes.2

./bsd/man/man2/fs_snapshot_create.2 differences detected: 1c1 < .\" Copyright (c) 2017-2018 Apple Computer, Inc. All rights reserved. --- > .\" Copyright (c) 2017 Apple Computer, Inc. All rights reserved. 23c23 < .Nm fs_snapshot_create --- > .Nm fs_snasphot_create 51c51 < of the filesystem frozen at a point in time. The Filesystem is identified by the --- > of the filesystem frozen at a point in time. The Filesystem is identified by the the 53c53 < parameter which should be a file descriptor associated with the root directory of the filesystem for which the snapshot is to be created. --- > parameter which should be a file descriptor associated with the root directory of the filesystem for the snapshot is to be created. 97c97 < and --- > , 99c99 < returns 0. Otherwise, a value of -1 is returned and errno is set to indicate the error. --- > and 101,103c101 < returns the number of entries successfully read. A return value of 0 indicates there are no more entries. < Otherwise, a value of -1 is returned and errno is set to indicate the error. Return values are the same as < .Xr getattrlistbulk 2 . --- > returns 0. Otherwise, a value of -1 is returned and errno is set to indicate the error. NO DIFFS in ./bsd/man/man2/i386_set_ldt.2
NO DIFFS in ./bsd/man/man2/setauid.2
NO DIFFS in ./bsd/man/man2/sem_open.2
NO DIFFS in ./bsd/man/man2/getrusage.2
NO DIFFS in ./bsd/man/man2/ftruncate.2
NO DIFFS in ./bsd/man/man2/flock.2
NO DIFFS in ./bsd/man/man2/fstatat.2
NO DIFFS in ./bsd/man/man2/wait3.2
NO DIFFS in ./bsd/man/man2/munlock.2
NO DIFFS in ./bsd/man/man2/fstat.2
NO DIFFS in ./bsd/man/man2/faccessat.2
NO DIFFS in ./bsd/man/man2/getaudit.2
NO DIFFS in ./bsd/man/man2/getgroups.2
NO DIFFS in ./bsd/man/man2/fsetxattr.2
NO DIFFS in ./bsd/man/man2/aio_read.2
NO DIFFS in ./bsd/man/man2/getpriority.2
NO DIFFS in ./bsd/man/man2/recvfrom.2
NO DIFFS in ./bsd/man/man2/semctl.2
NO DIFFS in ./bsd/man/man2/ptrace.2
NO DIFFS in ./bsd/man/man2/getpeername.2
NO DIFFS in ./bsd/man/man2/access.2
NO DIFFS in ./bsd/man/man2/fork.2
NO DIFFS in ./bsd/man/man2/rename.2
NO DIFFS in ./bsd/man/man2/getattrlistat.2
NO DIFFS in ./bsd/man/man2/sem_wait.2
NO DIFFS in ./bsd/man/man2/writev.2
NO DIFFS in ./bsd/man/man2/revoke.2
NO DIFFS in ./bsd/man/man2/execve.2
NO DIFFS in ./bsd/man/man2/quotactl.2
NO DIFFS in ./bsd/man/man2/kevent.2
NO DIFFS in ./bsd/man/man2/pathconf.2
NO DIFFS in ./bsd/man/man2/undelete.2
NO DIFFS in ./bsd/man/man2/getrlimit.2
NO DIFFS in ./bsd/man/man2/shmget.2
NO DIFFS in ./bsd/man/man2/gettimeofday.2
NO DIFFS in ./bsd/man/man2/connectx.2
NO DIFFS in ./bsd/man/man2/auditon.2
NO DIFFS in ./bsd/man/man2/getauid.2
NO DIFFS in ./bsd/man/man2/i386_get_ldt.2
NO DIFFS in ./bsd/man/man2/pipe.2
NO DIFFS in ./bsd/man/man2/intro.2
NO DIFFS in ./bsd/man/man2/fgetxattr.2
NO DIFFS in ./bsd/man/man2/mkfifo.2
NO DIFFS in ./bsd/man/man2/setaudit.2
NO DIFFS in ./bsd/man/man2/mincore.2
NO DIFFS in ./bsd/man/man2/auditctl.2

./bsd/man/man2/send.2 differences detected: 233,235d232 < .\" ========== < .It Bq Er EADDRNOTAVAIL < The specified address is not available or no longer available on this machine.
./bsd/man/man2/getattrlist.2 differences detected: 1701,1704d1700 < .It Bq Er ERANGE < .Fa attrBufSize < is too small to hold a u_int32_t. < . NO DIFFS in ./bsd/man/man2/unlinkat.2
NO DIFFS in ./bsd/man/man2/fs_snapshot_rename.2
NO DIFFS in ./bsd/man/man2/poll.2
NO DIFFS in ./bsd/man/man2/openat.2
NO DIFFS in ./bsd/man/man2/listen.2

./bsd/man/man2/Makefile differences detected: 44d43 < errno.2 \ NO DIFFS in ./bsd/man/man2/errno.2
NO DIFFS in ./bsd/man/man2/mount.2
NO DIFFS in ./bsd/man/man2/kevent_qos.2
NO DIFFS in ./bsd/man/man2/fstatfs64.2
NO DIFFS in ./bsd/man/man2/pselect.2

./bsd/man/man2/getdirentries.2 differences detected: 71,75c71 < .Xr dir 5) < The order of the directory entries vended out via < .Fn getdirentries < is not specified. Some filesystems may return entries in lexicographic sort order < and others may not. --- > .Xr dir 5 )
./bsd/man/man2/searchfs.2 differences detected: 19c19 < .Dd November 16, 2017 --- > .Dd October 13, 2008 29c29 < .Fn searchfs "const char* path" "struct fssearchblock* searchBlock" "unsigned long* numMatches" "unsigned int scriptCode" "unsigned int options" "struct searchstate* state" --- > .Fn searchfs "const char* path" "struct fssearchblock* searchBlock" "unsigned int* numMatches" "unsigned int scriptCode" "unsigned int options" "struct searchstate* state" 821,822c821,822 < unsigned long matchCount; < unsigned long matchIndex; --- > unsigned int matchCount; > unsigned int matchIndex; NO DIFFS in ./bsd/man/man2/getaudit_addr.2
NO DIFFS in ./bsd/man/man2/fchmodat.2
NO DIFFS in ./bsd/man/man2/setgroups.2
NO DIFFS in ./bsd/man/man2/mkdirat.2
NO DIFFS in ./bsd/man/man2/wait4.2
NO DIFFS in ./bsd/man/man2/fsgetpath.2

./bsd/man/man2/exchangedata.2 differences detected: 76,85d75 < WARNING: This system call is largely supported only by HFS and AFP file systems. Many other < file systems, including APFS, do not support it. Further, it is not supported on iOS, tvOS, or watchOS. < It is recommended that callers refer < instead to < .Fn rename < or < .Fn renamex_np < to conduct safe-save operations instead. < .Pp < . 128d117 < This includes APFS volumes. NO DIFFS in ./bsd/man/man2/link.2
NO DIFFS in ./bsd/man/man2/bind.2
NO DIFFS in ./bsd/man/man2/mmap.2
NO DIFFS in ./bsd/man/man2/fsetattrlist.2
NO DIFFS in ./bsd/man/man2/removexattr.2
NO DIFFS in ./bsd/man/man2/aio_cancel.2
NO DIFFS in ./bsd/man/man2/getdtablesize.2
NO DIFFS in ./bsd/man/man2/vfork.2
NO DIFFS in ./bsd/man/man2/getdirentriesattr.2
NO DIFFS in ./bsd/man/man2/syscall.2
NO DIFFS in ./bsd/man/man2/read.2
NO DIFFS in ./bsd/man/man2/posix_madvise.2
NO DIFFS in ./bsd/man/man2/adjtime.2
NO DIFFS in ./bsd/man/man2/chflags.2
NO DIFFS in ./bsd/man/man2/reboot.2
NO DIFFS in ./bsd/man/man2/acct.2
NO DIFFS in ./bsd/man/man2/sigsuspend.2
NO DIFFS in ./bsd/man/man2/munmap.2
NO DIFFS in ./bsd/man/man2/socket.2
NO DIFFS in ./bsd/man/man2/settimeofday.2
NO DIFFS in ./bsd/man/man2/select.2
NO DIFFS in ./bsd/man/man2/sem_post.2
NO DIFFS in ./bsd/man/man2/recvmsg.2
NO DIFFS in ./bsd/man/man2/getfh.2
NO DIFFS in ./bsd/man/man2/fsync.2
NO DIFFS in ./bsd/man/man2/futimes.2
NO DIFFS in ./bsd/man/man2/setrlimit.2
NO DIFFS in ./bsd/man/man2/clonefileat.2
NO DIFFS in ./bsd/man/man2/fclonefileat.2
NO DIFFS in ./bsd/man/man2/lseek.2
NO DIFFS in ./bsd/man/man2/posix_spawn.2
NO DIFFS in ./bsd/man/man2/setxattr.2
NO DIFFS in ./bsd/man/man2/sendmsg.2
NO DIFFS in ./bsd/man/man2/setitimer.2
NO DIFFS in ./bsd/man/man2/fchflags.2
NO DIFFS in ./bsd/man/man2/chdir.2
NO DIFFS in ./bsd/man/man2/fsctl.2
NO DIFFS in ./bsd/man/man2/_exit.2
NO DIFFS in ./bsd/man/man2/fchdir.2
NO DIFFS in ./bsd/man/man2/mlock.2
NO DIFFS in ./bsd/man/man2/nfsclnt.2
NO DIFFS in ./bsd/man/man2/umask.2
NO DIFFS in ./bsd/man/man2/audit.2
NO DIFFS in ./bsd/man/man2/sendfile.2
NO DIFFS in ./bsd/man/man2/shmctl.2
NO DIFFS in ./bsd/man/man2/FD_ISSET.2
NO DIFFS in ./bsd/man/man2/aio_suspend.2
NO DIFFS in ./bsd/man/man2/sem_unlink.2
NO DIFFS in ./bsd/man/man2/sigaltstack.2
NO DIFFS in ./bsd/man/man2/ioctl.2
NO DIFFS in ./bsd/man/man2/chroot.2
NO DIFFS in ./bsd/man/man2/unlink.2
NO DIFFS in ./bsd/man/man2/truncate.2
NO DIFFS in ./bsd/man/man2/renamex_np.2
NO DIFFS in ./bsd/man/man2/fstat64.2
NO DIFFS in ./bsd/man/man2/madvise.2
NO DIFFS in ./bsd/man/man2/disconnectx.2

./bsd/man/man2/getsockname.2 differences detected: 66,76d65 < .Pp < Note: For the UNIX domain, the address length returned is the < .Fa address_len < parameter passed to the previous < .Xr bind 2 < system call and not the < .Va sa_len < field of the < .Fa address < parameter passed to < .Xr bind 2 . NO DIFFS in ./bsd/man/man2/getfsstat.2
NO DIFFS in ./bsd/man/man2/accept.2
NO DIFFS in ./bsd/man/man2/kevent64.2
NO DIFFS in ./bsd/man/man2/getpgid.2

./bsd/man/man2/setuid.2 differences detected: 107c107 < of the real group ID or the saved set-group-ID. --- > of the real group ID or the saved set-user-ID. NO DIFFS in ./bsd/man/man2/getppid.2
NO DIFFS in ./bsd/man/man2/mkdir.2
NO DIFFS in ./bsd/man/man2/getegid.2
NO DIFFS in ./bsd/man/man2/semget.2
NO DIFFS in ./bsd/man/man2/getuid.2
NO DIFFS in ./bsd/man/man2/fremovexattr.2
NO DIFFS in ./bsd/man/man2/seteuid.2
NO DIFFS in ./bsd/man/man2/shm_unlink.2
NO DIFFS in ./bsd/man/man2/recv.2
NO DIFFS in ./bsd/man/man2/setattrlist.2
NO DIFFS in ./bsd/man/man2/setpgid.2
NO DIFFS in ./bsd/man/man2/fstatfs.2
NO DIFFS in ./bsd/man/man2/socketpair.2
NO DIFFS in ./bsd/man/man2/symlink.2
NO DIFFS in ./bsd/man/man2/setaudit_addr.2
NO DIFFS in ./bsd/man/man2/futimens.2
NO DIFFS in ./bsd/man/man2/aio_write.2
NO DIFFS in ./bsd/man/man2/aio_return.2
NO DIFFS in ./bsd/man/man2/renameatx_np.2
NO DIFFS in ./bsd/man/man2/chown.2
NO DIFFS in ./bsd/man/man2/setgid.2
NO DIFFS in ./bsd/man/man2/readlink.2
NO DIFFS in ./bsd/man/man2/fchown.2
NO DIFFS in ./bsd/man/man2/setsockopt.2
NO DIFFS in ./bsd/man/man2/semop.2
NO DIFFS in ./bsd/man/man2/geteuid.2
NO DIFFS in ./bsd/man/man2/getgid.2
NO DIFFS in ./bsd/man/man2/getpid.2
NO DIFFS in ./bsd/man/man2/getxattr.2
NO DIFFS in ./bsd/man/man2/setegid.2
NO DIFFS in ./bsd/man/man2/sigprocmask.2
NO DIFFS in ./bsd/man/man2/mknod.2
NO DIFFS in ./bsd/man/man2/chmod.2
NO DIFFS in ./bsd/man/man2/fs_snapshot_delete.2
NO DIFFS in ./bsd/man/man2/dup2.2
NO DIFFS in ./bsd/man/man2/sync.2
NO DIFFS in ./bsd/man/man2/sendto.2
NO DIFFS in ./bsd/man/man2/kill.2
NO DIFFS in ./bsd/man/man2/getentropy.2
NO DIFFS in ./bsd/man/man2/fchownat.2
NO DIFFS in ./bsd/man/man2/setsid.2
NO DIFFS in ./bsd/man/man2/linkat.2
NO DIFFS in ./bsd/man/man2/shutdown.2
NO DIFFS in ./bsd/man/man2/sigstack.2
NO DIFFS in ./bsd/man/man2/fpathconf.2
NO DIFFS in ./bsd/man/man2/waitpid.2
NO DIFFS in ./bsd/man/man2/fgetattrlist.2
NO DIFFS in ./bsd/man/man2/getsid.2
NO DIFFS in ./bsd/man/man2/fchmod.2
NO DIFFS in ./bsd/man/man2/renameat.2
NO DIFFS in ./bsd/man/man2/flistxattr.2
NO DIFFS in ./bsd/man/man2/getpgrp.2
NO DIFFS in ./bsd/man/man2/nfssvc.2
NO DIFFS in ./bsd/man/man2/sem_close.2
NO DIFFS in ./bsd/man/man2/setpriority.2

./bsd/man/man2/getattrlistbulk.2 differences detected: 42,44d41 < The order of the directory entries (and their associated metadata) vended by < .Fn getattrlistbulk < is not specified. Some file systems may return entries in lexicographic sort order and others may not. NO DIFFS in ./bsd/man/man2/FD_SET.2
NO DIFFS in ./bsd/man/man2/setlogin.2
NO DIFFS in ./bsd/man/man2/wait.2
NO DIFFS in ./bsd/man/man2/issetugid.2
NO DIFFS in ./bsd/man/man2/lstat.2
NO DIFFS in ./bsd/man/man2/fhopen.2
NO DIFFS in ./bsd/man/man2/getsockopt.2
NO DIFFS in ./bsd/man/man2/getitimer.2
NO DIFFS in ./bsd/man/man2/shmat.2
NO DIFFS in ./bsd/man/man2/mprotect.2
NO DIFFS in ./bsd/man/man2/pwrite.2
NO DIFFS in ./bsd/man/man2/lstat64.2
NO DIFFS in ./bsd/man/man2/listxattr.2
NO DIFFS in ./bsd/man/man2/setreuid.2
NO DIFFS in ./bsd/man/man2/pthread_setugid_np.2
NO DIFFS in ./bsd/man/man2/setattrlistat.2

./bsd/man/man2/fcntl.2 differences detected: 59c59 < .Dd August 24, 2017 --- > .Dd February 17, 2011 156,159c156,157 < the space that is allocated can be the size requested, < larger than the size requested, or (if the < .Dv F_ALLOCATEALL < flag is not provided) smaller than the space requested. --- > the space that is allocated can be the same size or > larger than the space requested. 784,808d781 < .It Bq Er ENOSPC < The argument < .Fa cmd < is < .Dv F_PREALLOCATE < and either there is no space available on the volume containing < .Fa fildes < or < .Fa fst_flags < contains < .Dv F_ALLOCATEALL < and there is not enough space available on the volume containing < .Fa fildes < to satisfy the entire request. < .Pp < The argument < .Fa cmd < is < .Dv F_PUNCHHOLE < and there is not enough space available on the volume containing < .Fa fildes < to satisfy the request. As an example, a filesystem that supports < cloned files may return this error if punching a hole requires the < creation of a clone and there is not enough space available to do so. < .\" ========== 818,822d790 < .It Bq Er EPERM < The argument cmd is < .Dv F_PUNCHHOLE < and the calling process does not have file write permission. < .\" ========== NO DIFFS in ./bsd/man/man2/clonefile.2
NO DIFFS in ./bsd/man/man2/fs_snapshot_list.2
NO DIFFS in ./bsd/man/man2/statfs.2
NO DIFFS in ./bsd/man/man2/connect.2
NO DIFFS in ./bsd/man/man2/shm_open.2
NO DIFFS in ./bsd/man/man2/FD_ZERO.2
NO DIFFS in ./bsd/man/man2/statfs64.2
NO DIFFS in ./bsd/man/man2/aio_error.2
NO DIFFS in ./bsd/man/man2/stat.2
NO DIFFS in ./bsd/man/man2/FD_CLR.2
NO DIFFS in ./bsd/man/man2/minherit.2
NO DIFFS in ./bsd/man/man2/setregid.2
NO DIFFS in ./bsd/man/man2/getlogin.2
NO DIFFS in ./bsd/man/man2/kqueue.2
NO DIFFS in ./bsd/man/man2/close.2
NO DIFFS in ./bsd/man/man2/readv.2
NO DIFFS in ./bsd/man/man2/stat64.2
NO DIFFS in ./bsd/man/man2/open.2
NO DIFFS in ./bsd/man/man2/shmdt.2
NO DIFFS in ./bsd/man/man2/utimensat.2
NO DIFFS in ./bsd/man/man2/sigpending.2
NO DIFFS in ./bsd/man/man2/FD_COPY.2
NO DIFFS in ./bsd/man/man2/lchown.2
NO DIFFS in ./bsd/man/man2/msync.2
NO DIFFS in ./bsd/man/man2/setpgrp.2
NO DIFFS in ./bsd/man/man2/EV_SET.2
NO DIFFS in ./bsd/man/man2/dup.2
NO DIFFS in ./bsd/man/man2/write.2
NO DIFFS in ./bsd/man/man2/symlinkat.2
NO DIFFS in ./bsd/man/man2/rmdir.2
NO DIFFS in ./bsd/man/man2/unmount.2
NO DIFFS in ./bsd/man/man5/Makefile
NO DIFFS in ./bsd/man/man5/dir.5
NO DIFFS in ./bsd/man/man5/acct.5
NO DIFFS in ./bsd/man/man5/core.5
NO DIFFS in ./bsd/man/man5/types.5
NO DIFFS in ./bsd/man/man5/dirent.5
NO DIFFS in ./bsd/man/man4/stderr.4
NO DIFFS in ./bsd/man/man4/ipl.4
NO DIFFS in ./bsd/man/man4/lo.4
NO DIFFS in ./bsd/man/man4/netintro.4
NO DIFFS in ./bsd/man/man4/arp.4
NO DIFFS in ./bsd/man/man4/faith.4
NO DIFFS in ./bsd/man/man4/ip6.4
NO DIFFS in ./bsd/man/man4/Makefile
NO DIFFS in ./bsd/man/man4/ip.4
NO DIFFS in ./bsd/man/man4/icmp6.4
NO DIFFS in ./bsd/man/man4/networking.4
NO DIFFS in ./bsd/man/man4/termios.4
NO DIFFS in ./bsd/man/man4/random.4
NO DIFFS in ./bsd/man/man4/udp.4
NO DIFFS in ./bsd/man/man4/dummynet.4
NO DIFFS in ./bsd/man/man4/unix.4
NO DIFFS in ./bsd/man/man4/divert.4
NO DIFFS in ./bsd/man/man4/pty.4
NO DIFFS in ./bsd/man/man4/ipsec.4
NO DIFFS in ./bsd/man/man4/stdin.4
NO DIFFS in ./bsd/man/man4/fd.4
NO DIFFS in ./bsd/man/man4/auditpipe.4
NO DIFFS in ./bsd/man/man4/tcp.4
NO DIFFS in ./bsd/man/man4/audit.4
NO DIFFS in ./bsd/man/man4/stdout.4
NO DIFFS in ./bsd/man/man4/urandom.4
NO DIFFS in ./bsd/man/man4/aio.4
NO DIFFS in ./bsd/man/man4/inet6.4
NO DIFFS in ./bsd/man/man4/icmp.4
NO DIFFS in ./bsd/man/man4/tty.4
NO DIFFS in ./bsd/man/man4/inet.4
NO DIFFS in ./bsd/man/man4/gif.4
NO DIFFS in ./bsd/man/man4/bpf.4
NO DIFFS in ./bsd/man/man4/route.4
NO DIFFS in ./bsd/man/man4/null.4
NO DIFFS in ./bsd/man/man4/stf.4
NO DIFFS in ./bsd/man/man4/ifmib.4
NO DIFFS in ./bsd/man/man3/posix_spawn_file_actions_addclose.3
NO DIFFS in ./bsd/man/man3/posix_spawnattr_init.3
NO DIFFS in ./bsd/man/man3/posix_spawnattr_setsigmask.3
NO DIFFS in ./bsd/man/man3/Makefile
NO DIFFS in ./bsd/man/man3/posix_spawnattr_setpgroup.3
NO DIFFS in ./bsd/man/man3/posix_spawn_file_actions_init.3
NO DIFFS in ./bsd/man/man3/queue.3
NO DIFFS in ./bsd/man/man3/posix_spawnattr_setspecialport_np.3
NO DIFFS in ./bsd/man/man3/posix_spawnattr_setflags.3

./bsd/man/man3/getiopolicy_np.3 differences detected: 25a26,39 > The I/O type is specified in the argument > .Fa iotype . > The only currently supported I/O type is > .Dv IOPOL_TYPE_DISK , > which can mean either the I/O policy for I/Os to local disks or to > remote volumes. > I/Os to local disks are I/Os sent to the media without going through a network, > including I/Os to internal and external hard drives, optical media in internal > and external drives, flash drives, floppy disks, ram disks, and mounted disk > images which reside on these media. > I/Os to remote volumes are I/Os that require network activity to complete the > operation. > This is currently only supported for remote volumes mounted by SMB or AFP. > .Pp 44,61c58,59 < .Pp < The I/O type is specified in the argument < .Fa iotype . < The currently supported I/O types are as follows: < .Bl -tag -width F1 < .It IOPOL_TYPE_DISK < This can mean either the I/O policy for I/Os to local disks or to < remote volumes. < I/Os to local disks are I/Os sent to the media without going through a network, < including I/Os to internal and external hard drives, optical media in internal < and external drives, flash drives, floppy disks, ram disks, and mounted disk < images which reside on these media. < I/Os to remote volumes are I/Os that require network activity to complete the < operation. < This is currently only supported for remote volumes mounted by SMB or AFP. < .Pp < IOPOL_TYPE_DISK supports following values for < .Fa policy: --- > .Fa Policy > can have the following values: 107,128d104 < .It IOPOL_TYPE_VFS_ATIME_UPDATES < This < .Fa iotype < lets users change the access time updates policy for the files accessed < by the current thread or process. < .Pp < IOPOL_TYPE_VFS_ATIME_UPDATES supports following values for < .Fa policy: < .Bl -tag -width IOPOL_ATIME_UPDATES_DEFAULT < .It IOPOL_ATIME_UPDATES_OFF < The ATIME_UPDATES_OFF policy turns off access time updation for files accessed. < This policy is useful for applications which access a large number of files < to reduce the metadata I/O writes. < .It IOPOL_ATIME_UPDATES_DEFAULT < This is the default I/O policy for new threads. < .El < .El < .Pp < Like with IOPOL_TYPE_DISK, the I/O policy of a newly created process is < inherited from its parent process. Access time updates are turned off if the < I/O policy is set to IOPOL_ATIME_UPDATES_OFF for the current thread or current < process. NO DIFFS in ./bsd/man/man3/posix_spawnattr_setbinpref_np.3
NO DIFFS in ./bsd/man/man3/posix_spawnattr_setsigdefault.3
NO DIFFS in ./bsd/man/Makefile
NO DIFFS in ./bsd/man/man7/Makefile
NO DIFFS in ./bsd/man/man7/sticky.7
NO DIFFS in ./bsd/man/man9/copystr.9
NO DIFFS in ./bsd/man/man9/style.9
NO DIFFS in ./bsd/man/man9/fuiword.9
NO DIFFS in ./bsd/man/man9/suibyte.9
NO DIFFS in ./bsd/man/man9/suiword.9
NO DIFFS in ./bsd/man/man9/fuibyte.9
NO DIFFS in ./bsd/man/man9/Makefile
NO DIFFS in ./bsd/man/man9/suword.9
NO DIFFS in ./bsd/man/man9/copyout.9
NO DIFFS in ./bsd/man/man9/subyte.9
NO DIFFS in ./bsd/man/man9/copyinstr.9
NO DIFFS in ./bsd/man/man9/fulong.9
NO DIFFS in ./bsd/man/man9/monotonic.9
NO DIFFS in ./bsd/man/man9/fetch.9
NO DIFFS in ./bsd/man/man9/intro.9
NO DIFFS in ./bsd/man/man9/suulong.9
NO DIFFS in ./bsd/man/man9/fuulong.9
NO DIFFS in ./bsd/man/man9/copyin.9
NO DIFFS in ./bsd/man/man9/copy.9
NO DIFFS in ./bsd/man/man9/sulong.9
NO DIFFS in ./bsd/man/man9/store.9
NO DIFFS in ./bsd/man/man9/fuword.9
NO DIFFS in ./bsd/man/man9/fubyte.9
NO DIFFS in ./bsd/crypto/aesxts.h
NO DIFFS in ./bsd/crypto/rc4/rc4.c
NO DIFFS in ./bsd/crypto/rc4/Makefile
NO DIFFS in ./bsd/crypto/rc4/rc4.h
NO DIFFS in ./bsd/crypto/Makefile
NO DIFFS in ./bsd/crypto/sha2.h
NO DIFFS in ./bsd/crypto/aes.h
NO DIFFS in ./bsd/crypto/des.h
NO DIFFS in ./bsd/crypto/doc/KernelCrypto.plist
NO DIFFS in ./bsd/crypto/doc/KernelCrypto.txt
NO DIFFS in ./bsd/crypto/sha1.h

./bsd/vfs/vfs_vnops.c differences detected: 1798c1798 < static int64_t --- > static intptr_t 1806c1806 < return (int64_t)cnt; --- > return (intptr_t)cnt; 1810c1810 < return 0; --- > return (intptr_t)0; 1814c1814 < return 1; --- > return (intptr_t)1; 1819,1822c1819,1822 < if (amount > INT64_MAX) { < return INT64_MAX; < } else if (amount < INT64_MIN) { < return INT64_MIN; --- > if (amount > (off_t)INTPTR_MAX) { > return INTPTR_MAX; > } else if (amount < (off_t)INTPTR_MIN) { > return INTPTR_MIN; 1824c1824 < return (int64_t)amount; --- > return (intptr_t)amount; 1938a1939,1940 > if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0) > kn->kn_udata = kev->udata; NO DIFFS in ./bsd/vfs/vfs_utfconvdata.h

./bsd/vfs/vfs_cluster.c differences detected: 210,212d209 < static void cluster_update_state_internal(vnode_t vp, struct cl_extent *cl, int flags, boolean_t defer_writes, boolean_t *first_pass, < off_t write_off, int write_cnt, off_t newEOF, int (*callback)(buf_t, void *), void *callback_arg, boolean_t vm_initiated); < 216,217c213 < static void cluster_read_ahead(vnode_t vp, struct cl_extent *extent, off_t filesize, struct cl_readahead *ra, < int (*callback)(buf_t, void *), void *callback_arg, int bflag); --- > static void cluster_read_ahead(vnode_t vp, struct cl_extent *extent, off_t filesize, struct cl_readahead *ra, int (*callback)(buf_t, void *), void *callback_arg, int bflag); 219c215 < static int cluster_push_now(vnode_t vp, struct cl_extent *, off_t EOF, int flags, int (*)(buf_t, void *), void *callback_arg, boolean_t vm_ioitiated); --- > static int cluster_push_now(vnode_t vp, struct cl_extent *, off_t EOF, int flags, int (*)(buf_t, void *), void *callback_arg); 221,222c217 < static int cluster_try_push(struct cl_writebehind *, vnode_t vp, off_t EOF, int push_flag, int flags, int (*)(buf_t, void *), < void *callback_arg, int *err, boolean_t vm_initiated); --- > static int cluster_try_push(struct cl_writebehind *, vnode_t vp, off_t EOF, int push_flag, int flags, int (*)(buf_t, void *), void *callback_arg, int *err); 224,228c219,221 < static int sparse_cluster_switch(struct cl_writebehind *, vnode_t vp, off_t EOF, int (*)(buf_t, void *), void *callback_arg, boolean_t vm_initiated); < static int sparse_cluster_push(struct cl_writebehind *, void **cmapp, vnode_t vp, off_t EOF, int push_flag, < int io_flags, int (*)(buf_t, void *), void *callback_arg, boolean_t vm_initiated); < static int sparse_cluster_add(struct cl_writebehind *, void **cmapp, vnode_t vp, struct cl_extent *, off_t EOF, < int (*)(buf_t, void *), void *callback_arg, boolean_t vm_initiated); --- > static void sparse_cluster_switch(struct cl_writebehind *, vnode_t vp, off_t EOF, int (*)(buf_t, void *), void *callback_arg); > static int sparse_cluster_push(void **cmapp, vnode_t vp, off_t EOF, int push_flag, int io_flags, int (*)(buf_t, void *), void *callback_arg); > static void sparse_cluster_add(void **cmapp, vnode_t vp, struct cl_extent *, off_t EOF, int (*)(buf_t, void *), void *callback_arg); 497c490 < cluster_try_push(wbp, vp, newEOF, PUSH_ALL | flags, 0, callback, callback_arg, NULL, FALSE); --- > cluster_try_push(wbp, vp, newEOF, PUSH_ALL | flags, 0, callback, callback_arg, NULL); 714c707 < else if (((io_flags & B_READ) == 0) && ((error != ENXIO) || vnode_isswap(vp))) --- > else if (page_out && ((error != ENXIO) || vnode_isswap(vp))) 716c709 < * transient error on pageout/write path... leave pages unchanged --- > * transient error... leave pages unchanged 840,842c833,835 < cbp_head->b_upl, < upl_offset, < transaction_size); --- > cbp_head->b_upl, > upl_offset, > transaction_size); 891d883 < 895,897c887 < if (error) { < upl_set_iodone_error(upl, error); < --- > if (error) 899c889 < } else { --- > else { 2990,3263d2979 < void < cluster_update_state(vnode_t vp, vm_object_offset_t s_offset, vm_object_offset_t e_offset, boolean_t vm_initiated) < { < struct cl_extent cl; < boolean_t first_pass = TRUE; < < assert(s_offset < e_offset); < assert((s_offset & PAGE_MASK_64) == 0); < assert((e_offset & PAGE_MASK_64) == 0); < < cl.b_addr = (daddr64_t)(s_offset / PAGE_SIZE_64); < cl.e_addr = (daddr64_t)(e_offset / PAGE_SIZE_64); < < cluster_update_state_internal(vp, &cl, 0, TRUE, &first_pass, s_offset, (int)(e_offset - s_offset), < vp->v_un.vu_ubcinfo->ui_size, NULL, NULL, vm_initiated); < } < < < static void < cluster_update_state_internal(vnode_t vp, struct cl_extent *cl, int flags, boolean_t defer_writes, < boolean_t *first_pass, off_t write_off, int write_cnt, off_t newEOF, < int (*callback)(buf_t, void *), void *callback_arg, boolean_t vm_initiated) < { < struct cl_writebehind *wbp; < int cl_index; < int ret_cluster_try_push; < u_int max_cluster_pgcount; < < < max_cluster_pgcount = MAX_CLUSTER_SIZE(vp) / PAGE_SIZE; < < /* < * take the lock to protect our accesses < * of the writebehind and sparse cluster state < */ < wbp = cluster_get_wbp(vp, CLW_ALLOCATE | CLW_RETURNLOCKED); < < if (wbp->cl_scmap) { < < if ( !(flags & IO_NOCACHE)) { < /* < * we've fallen into the sparse < * cluster method of delaying dirty pages < */ < sparse_cluster_add(wbp, &(wbp->cl_scmap), vp, cl, newEOF, callback, callback_arg, vm_initiated); < < lck_mtx_unlock(&wbp->cl_lockw); < return; < } < /* < * must have done cached writes that fell into < * the sparse cluster mechanism... we've switched < * to uncached writes on the file, so go ahead < * and push whatever's in the sparse map < * and switch back to normal clustering < */ < wbp->cl_number = 0; < < sparse_cluster_push(wbp, &(wbp->cl_scmap), vp, newEOF, PUSH_ALL, 0, callback, callback_arg, vm_initiated); < /* < * no clusters of either type present at this point < * so just go directly to start_new_cluster since < * we know we need to delay this I/O since we've < * already released the pages back into the cache < * to avoid the deadlock with sparse_cluster_push < */ < goto start_new_cluster; < } < if (*first_pass == TRUE) { < if (write_off == wbp->cl_last_write) < wbp->cl_seq_written += write_cnt; < else < wbp->cl_seq_written = write_cnt; < < wbp->cl_last_write = write_off + write_cnt; < < *first_pass = FALSE; < } < if (wbp->cl_number == 0) < /* < * no clusters currently present < */ < goto start_new_cluster; < < for (cl_index = 0; cl_index < wbp->cl_number; cl_index++) { < /* < * check each cluster that we currently hold < * try to merge some or all of this write into < * one or more of the existing clusters... if < * any portion of the write remains, start a < * new cluster < */ < if (cl->b_addr >= wbp->cl_clusters[cl_index].b_addr) { < /* < * the current write starts at or after the current cluster < */ < if (cl->e_addr <= (wbp->cl_clusters[cl_index].b_addr + max_cluster_pgcount)) { < /* < * we have a write that fits entirely < * within the existing cluster limits < */ < if (cl->e_addr > wbp->cl_clusters[cl_index].e_addr) < /* < * update our idea of where the cluster ends < */ < wbp->cl_clusters[cl_index].e_addr = cl->e_addr; < break; < } < if (cl->b_addr < (wbp->cl_clusters[cl_index].b_addr + max_cluster_pgcount)) { < /* < * we have a write that starts in the middle of the current cluster < * but extends beyond the cluster's limit... we know this because < * of the previous checks < * we'll extend the current cluster to the max < * and update the b_addr for the current write to reflect that < * the head of it was absorbed into this cluster... < * note that we'll always have a leftover tail in this case since < * full absorbtion would have occurred in the clause above < */ < wbp->cl_clusters[cl_index].e_addr = wbp->cl_clusters[cl_index].b_addr + max_cluster_pgcount; < < cl->b_addr = wbp->cl_clusters[cl_index].e_addr; < } < /* < * we come here for the case where the current write starts < * beyond the limit of the existing cluster or we have a leftover < * tail after a partial absorbtion < * < * in either case, we'll check the remaining clusters before < * starting a new one < */ < } else { < /* < * the current write starts in front of the cluster we're currently considering < */ < if ((wbp->cl_clusters[cl_index].e_addr - cl->b_addr) <= max_cluster_pgcount) { < /* < * we can just merge the new request into < * this cluster and leave it in the cache < * since the resulting cluster is still < * less than the maximum allowable size < */ < wbp->cl_clusters[cl_index].b_addr = cl->b_addr; < < if (cl->e_addr > wbp->cl_clusters[cl_index].e_addr) { < /* < * the current write completely < * envelops the existing cluster and since < * each write is limited to at most max_cluster_pgcount pages < * we can just use the start and last blocknos of the write < * to generate the cluster limits < */ < wbp->cl_clusters[cl_index].e_addr = cl->e_addr; < } < break; < } < /* < * if we were to combine this write with the current cluster < * we would exceed the cluster size limit.... so, < * let's see if there's any overlap of the new I/O with < * the cluster we're currently considering... in fact, we'll < * stretch the cluster out to it's full limit and see if we < * get an intersection with the current write < * < */ < if (cl->e_addr > wbp->cl_clusters[cl_index].e_addr - max_cluster_pgcount) { < /* < * the current write extends into the proposed cluster < * clip the length of the current write after first combining it's < * tail with the newly shaped cluster < */ < wbp->cl_clusters[cl_index].b_addr = wbp->cl_clusters[cl_index].e_addr - max_cluster_pgcount; < < cl->e_addr = wbp->cl_clusters[cl_index].b_addr; < } < /* < * if we get here, there was no way to merge < * any portion of this write with this cluster < * or we could only merge part of it which < * will leave a tail... < * we'll check the remaining clusters before starting a new one < */ < } < } < if (cl_index < wbp->cl_number) < /* < * we found an existing cluster(s) that we < * could entirely merge this I/O into < */ < goto delay_io; < < if (defer_writes == FALSE && < wbp->cl_number == MAX_CLUSTERS && < wbp->cl_seq_written >= (MAX_CLUSTERS * (max_cluster_pgcount * PAGE_SIZE))) { < uint32_t n; < < if (vp->v_mount->mnt_minsaturationbytecount) { < n = vp->v_mount->mnt_minsaturationbytecount / MAX_CLUSTER_SIZE(vp); < < if (n > MAX_CLUSTERS) < n = MAX_CLUSTERS; < } else < n = 0; < < if (n == 0) { < if (disk_conditioner_mount_is_ssd(vp->v_mount)) < n = WRITE_BEHIND_SSD; < else < n = WRITE_BEHIND; < } < while (n--) < cluster_try_push(wbp, vp, newEOF, 0, 0, callback, callback_arg, NULL, vm_initiated); < } < if (wbp->cl_number < MAX_CLUSTERS) { < /* < * we didn't find an existing cluster to < * merge into, but there's room to start < * a new one < */ < goto start_new_cluster; < } < /* < * no exisitng cluster to merge with and no < * room to start a new one... we'll try < * pushing one of the existing ones... if none of < * them are able to be pushed, we'll switch < * to the sparse cluster mechanism < * cluster_try_push updates cl_number to the < * number of remaining clusters... and < * returns the number of currently unused clusters < */ < ret_cluster_try_push = 0; < < /* < * if writes are not deferred, call cluster push immediately < */ < if (defer_writes == FALSE) { < < ret_cluster_try_push = cluster_try_push(wbp, vp, newEOF, (flags & IO_NOCACHE) ? 0 : PUSH_DELAY, 0, callback, callback_arg, NULL, vm_initiated); < } < /* < * execute following regardless of writes being deferred or not < */ < if (ret_cluster_try_push == 0) { < /* < * no more room in the normal cluster mechanism < * so let's switch to the more expansive but expensive < * sparse mechanism.... < */ < sparse_cluster_switch(wbp, vp, newEOF, callback, callback_arg, vm_initiated); < sparse_cluster_add(wbp, &(wbp->cl_scmap), vp, cl, newEOF, callback, callback_arg, vm_initiated); < < lck_mtx_unlock(&wbp->cl_lockw); < return; < } < start_new_cluster: < wbp->cl_clusters[wbp->cl_number].b_addr = cl->b_addr; < wbp->cl_clusters[wbp->cl_number].e_addr = cl->e_addr; < < wbp->cl_clusters[wbp->cl_number].io_flags = 0; < < if (flags & IO_NOCACHE) < wbp->cl_clusters[wbp->cl_number].io_flags |= CLW_IONOCACHE; < < if (flags & IO_PASSIVE) < wbp->cl_clusters[wbp->cl_number].io_flags |= CLW_IOPASSIVE; < < wbp->cl_number++; < delay_io: < lck_mtx_unlock(&wbp->cl_lockw); < return; < } < < 3291a3008 > struct cl_writebehind *wbp; 3292a3010 > u_int max_cluster_pgcount; 3320a3039 > max_cluster_pgcount = MAX_CLUSTER_SIZE(vp) / PAGE_SIZE; 3577c3296 < ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY); --- > ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY); 3602,3609c3321,3322 < int do_zeroing = 1; < < io_size += start_offset; < < /* Force more restrictive zeroing behavior only on APFS */ < if ((vnode_tag(vp) == VT_APFS) && (newEOF < oldEOF)) { < do_zeroing = 0; < } --- > int cl_index; > int ret_cluster_try_push; 3611c3324 < if (do_zeroing && (upl_f_offset + io_size) >= newEOF && (u_int)io_size < upl_size) { --- > io_size += start_offset; 3613c3326,3327 < /* --- > if ((upl_f_offset + io_size) >= newEOF && (u_int)io_size < upl_size) { > /* 3619c3333 < cluster_zero(upl, io_size, upl_size - io_size, NULL); --- > cluster_zero(upl, io_size, upl_size - io_size, NULL); 3648a3363,3388 > /* > * if the IO_SYNC flag is set than we need to > * bypass any clusters and immediately issue > * the I/O > */ > goto issue_io; > } > /* > * take the lock to protect our accesses > * of the writebehind and sparse cluster state > */ > wbp = cluster_get_wbp(vp, CLW_ALLOCATE | CLW_RETURNLOCKED); > > if (wbp->cl_scmap) { > > if ( !(flags & IO_NOCACHE)) { > /* > * we've fallen into the sparse > * cluster method of delaying dirty pages > */ > sparse_cluster_add(&(wbp->cl_scmap), vp, &cl, newEOF, callback, callback_arg); > > lck_mtx_unlock(&wbp->cl_lockw); > > continue; > } 3650,3659c3390,3394 < * if the IO_SYNC flag is set than we need to bypass < * any clustering and immediately issue the I/O < * < * we don't hold the lock at this point < * < * we've already dropped the current upl, so pick it back up with COPYOUT_FROM set < * so that we correctly deal with a change in state of the hardware modify bit... < * we do this via cluster_push_now... by passing along the IO_SYNC flag, we force < * cluster_push_now to wait until all the I/Os have completed... cluster_push_now is also < * responsible for generating the correct sized I/O(s) --- > * must have done cached writes that fell into > * the sparse cluster mechanism... we've switched > * to uncached writes on the file, so go ahead > * and push whatever's in the sparse map > * and switch back to normal clustering 3661,3663c3396,3530 < retval = cluster_push_now(vp, &cl, newEOF, flags, callback, callback_arg, FALSE); < } else { < boolean_t defer_writes = FALSE; --- > wbp->cl_number = 0; > > sparse_cluster_push(&(wbp->cl_scmap), vp, newEOF, PUSH_ALL, 0, callback, callback_arg); > /* > * no clusters of either type present at this point > * so just go directly to start_new_cluster since > * we know we need to delay this I/O since we've > * already released the pages back into the cache > * to avoid the deadlock with sparse_cluster_push > */ > goto start_new_cluster; > } > if (first_pass) { > if (write_off == wbp->cl_last_write) > wbp->cl_seq_written += write_cnt; > else > wbp->cl_seq_written = write_cnt; > > wbp->cl_last_write = write_off + write_cnt; > > first_pass = FALSE; > } > if (wbp->cl_number == 0) > /* > * no clusters currently present > */ > goto start_new_cluster; > > for (cl_index = 0; cl_index < wbp->cl_number; cl_index++) { > /* > * check each cluster that we currently hold > * try to merge some or all of this write into > * one or more of the existing clusters... if > * any portion of the write remains, start a > * new cluster > */ > if (cl.b_addr >= wbp->cl_clusters[cl_index].b_addr) { > /* > * the current write starts at or after the current cluster > */ > if (cl.e_addr <= (wbp->cl_clusters[cl_index].b_addr + max_cluster_pgcount)) { > /* > * we have a write that fits entirely > * within the existing cluster limits > */ > if (cl.e_addr > wbp->cl_clusters[cl_index].e_addr) > /* > * update our idea of where the cluster ends > */ > wbp->cl_clusters[cl_index].e_addr = cl.e_addr; > break; > } > if (cl.b_addr < (wbp->cl_clusters[cl_index].b_addr + max_cluster_pgcount)) { > /* > * we have a write that starts in the middle of the current cluster > * but extends beyond the cluster's limit... we know this because > * of the previous checks > * we'll extend the current cluster to the max > * and update the b_addr for the current write to reflect that > * the head of it was absorbed into this cluster... > * note that we'll always have a leftover tail in this case since > * full absorbtion would have occurred in the clause above > */ > wbp->cl_clusters[cl_index].e_addr = wbp->cl_clusters[cl_index].b_addr + max_cluster_pgcount; > > cl.b_addr = wbp->cl_clusters[cl_index].e_addr; > } > /* > * we come here for the case where the current write starts > * beyond the limit of the existing cluster or we have a leftover > * tail after a partial absorbtion > * > * in either case, we'll check the remaining clusters before > * starting a new one > */ > } else { > /* > * the current write starts in front of the cluster we're currently considering > */ > if ((wbp->cl_clusters[cl_index].e_addr - cl.b_addr) <= max_cluster_pgcount) { > /* > * we can just merge the new request into > * this cluster and leave it in the cache > * since the resulting cluster is still > * less than the maximum allowable size > */ > wbp->cl_clusters[cl_index].b_addr = cl.b_addr; > > if (cl.e_addr > wbp->cl_clusters[cl_index].e_addr) { > /* > * the current write completely > * envelops the existing cluster and since > * each write is limited to at most max_cluster_pgcount pages > * we can just use the start and last blocknos of the write > * to generate the cluster limits > */ > wbp->cl_clusters[cl_index].e_addr = cl.e_addr; > } > break; > } > > /* > * if we were to combine this write with the current cluster > * we would exceed the cluster size limit.... so, > * let's see if there's any overlap of the new I/O with > * the cluster we're currently considering... in fact, we'll > * stretch the cluster out to it's full limit and see if we > * get an intersection with the current write > * > */ > if (cl.e_addr > wbp->cl_clusters[cl_index].e_addr - max_cluster_pgcount) { > /* > * the current write extends into the proposed cluster > * clip the length of the current write after first combining it's > * tail with the newly shaped cluster > */ > wbp->cl_clusters[cl_index].b_addr = wbp->cl_clusters[cl_index].e_addr - max_cluster_pgcount; > > cl.e_addr = wbp->cl_clusters[cl_index].b_addr; > } > /* > * if we get here, there was no way to merge > * any portion of this write with this cluster > * or we could only merge part of it which > * will leave a tail... > * we'll check the remaining clusters before starting a new one > */ > } > } > if (cl_index < wbp->cl_number) > /* > * we found an existing cluster(s) that we > * could entirely merge this I/O into > */ > goto delay_io; 3665,3666c3532,3535 < if (vfs_flags(vp->v_mount) & MNT_DEFWRITE) < defer_writes = TRUE; --- > if (!((unsigned int)vfs_flags(vp->v_mount) & MNT_DEFWRITE) && > wbp->cl_number == MAX_CLUSTERS && > wbp->cl_seq_written >= (MAX_CLUSTERS * (max_cluster_pgcount * PAGE_SIZE))) { > uint32_t n; 3668,3669c3537,3579 < cluster_update_state_internal(vp, &cl, flags, defer_writes, &first_pass, < write_off, write_cnt, newEOF, callback, callback_arg, FALSE); --- > if (vp->v_mount->mnt_minsaturationbytecount) { > n = vp->v_mount->mnt_minsaturationbytecount / MAX_CLUSTER_SIZE(vp); > > if (n > MAX_CLUSTERS) > n = MAX_CLUSTERS; > } else > n = 0; > > if (n == 0) { > if (disk_conditioner_mount_is_ssd(vp->v_mount)) > n = WRITE_BEHIND_SSD; > else > n = WRITE_BEHIND; > } > while (n--) > cluster_try_push(wbp, vp, newEOF, 0, 0, callback, callback_arg, NULL); > } > if (wbp->cl_number < MAX_CLUSTERS) { > /* > * we didn't find an existing cluster to > * merge into, but there's room to start > * a new one > */ > goto start_new_cluster; > } > /* > * no exisitng cluster to merge with and no > * room to start a new one... we'll try > * pushing one of the existing ones... if none of > * them are able to be pushed, we'll switch > * to the sparse cluster mechanism > * cluster_try_push updates cl_number to the > * number of remaining clusters... and > * returns the number of currently unused clusters > */ > ret_cluster_try_push = 0; > > /* > * if writes are not deferred, call cluster push immediately > */ > if (!((unsigned int)vfs_flags(vp->v_mount) & MNT_DEFWRITE)) { > > ret_cluster_try_push = cluster_try_push(wbp, vp, newEOF, (flags & IO_NOCACHE) ? 0 : PUSH_DELAY, 0, callback, callback_arg, NULL); 3670a3581,3625 > > /* > * execute following regardless of writes being deferred or not > */ > if (ret_cluster_try_push == 0) { > /* > * no more room in the normal cluster mechanism > * so let's switch to the more expansive but expensive > * sparse mechanism.... > */ > sparse_cluster_switch(wbp, vp, newEOF, callback, callback_arg); > sparse_cluster_add(&(wbp->cl_scmap), vp, &cl, newEOF, callback, callback_arg); > > lck_mtx_unlock(&wbp->cl_lockw); > > continue; > } > start_new_cluster: > wbp->cl_clusters[wbp->cl_number].b_addr = cl.b_addr; > wbp->cl_clusters[wbp->cl_number].e_addr = cl.e_addr; > > wbp->cl_clusters[wbp->cl_number].io_flags = 0; > > if (flags & IO_NOCACHE) > wbp->cl_clusters[wbp->cl_number].io_flags |= CLW_IONOCACHE; > > if (bflag & CL_PASSIVE) > wbp->cl_clusters[wbp->cl_number].io_flags |= CLW_IOPASSIVE; > > wbp->cl_number++; > delay_io: > lck_mtx_unlock(&wbp->cl_lockw); > > continue; > issue_io: > /* > * we don't hold the lock at this point > * > * we've already dropped the current upl, so pick it back up with COPYOUT_FROM set > * so that we correctly deal with a change in state of the hardware modify bit... > * we do this via cluster_push_now... by passing along the IO_SYNC flag, we force > * cluster_push_now to wait until all the I/Os have completed... cluster_push_now is also > * responsible for generating the correct sized I/O(s) > */ > retval = cluster_push_now(vp, &cl, newEOF, flags, callback, callback_arg); 4405a4361 > boolean_t strict_uncached_IO = FALSE; 4469a4426,4427 > strict_uncached_IO = ubc_strict_uncached_IO(vp); > 4547c4505 < if ((flags & IO_ENCRYPTED) == 0) { --- > if ((strict_uncached_IO == FALSE) && ((flags & IO_ENCRYPTED) == 0)) { 4637c4595 < if ((flags & IO_ENCRYPTED) == 0) { --- > if ((strict_uncached_IO == FALSE) && ((flags & IO_ENCRYPTED) == 0)) { 4900,4909c4858 < if (flags & IO_ENCRYPTED) { < /* < * We cannot fall back to the copy path for encrypted I/O. If this < * happens, there is something wrong with the user buffer passed < * down. < */ < retval = EFAULT; < } else { < retval = cluster_read_copy(vp, uio, io_req_size, filesize, flags, callback, callback_arg); < } --- > retval = cluster_read_copy(vp, uio, io_req_size, filesize, flags, callback, callback_arg); 5415d5363 < int local_err = 0; 5485c5433 < retval = sparse_cluster_push(wbp, &scmap, vp, ubc_getsize(vp), PUSH_ALL, flags, callback, callback_arg, FALSE); --- > retval = sparse_cluster_push(&scmap, vp, ubc_getsize(vp), PUSH_ALL, flags, callback, callback_arg); 5490,5497d5437 < < if (retval) { < if (wbp->cl_scmap != NULL) { < panic("cluster_push_err: Expected NULL cl_scmap\n"); < } < < wbp->cl_scmap = scmap; < } 5502c5442 < retval = sparse_cluster_push(wbp, &(wbp->cl_scmap), vp, ubc_getsize(vp), PUSH_ALL, flags, callback, callback_arg, FALSE); --- > retval = sparse_cluster_push(&(wbp->cl_scmap), vp, ubc_getsize(vp), PUSH_ALL, flags, callback, callback_arg); 5504,5506d5443 < < local_err = retval; < 5511,5513c5448 < retval = cluster_try_push(wbp, vp, ubc_getsize(vp), PUSH_ALL, flags, callback, callback_arg, &local_err, FALSE); < if (err) < *err = local_err; --- > retval = cluster_try_push(wbp, vp, ubc_getsize(vp), PUSH_ALL, flags, callback, callback_arg, err); 5534c5469 < wbp->cl_scmap, wbp->cl_number, retval, local_err, 0); --- > wbp->cl_scmap, wbp->cl_number, retval, 0, 0); 5574c5509 < cluster_try_push(struct cl_writebehind *wbp, vnode_t vp, off_t EOF, int push_flag, int io_flags, int (*callback)(buf_t, void *), void *callback_arg, int *err, boolean_t vm_initiated) --- > cluster_try_push(struct cl_writebehind *wbp, vnode_t vp, off_t EOF, int push_flag, int io_flags, int (*callback)(buf_t, void *), void *callback_arg, int *err) 5655,5657d5589 < if (vm_initiated == TRUE) < lck_mtx_unlock(&wbp->cl_lockw); < 5680c5612 < retval = cluster_push_now(vp, &cl, EOF, flags, callback, callback_arg, vm_initiated); --- > retval = cluster_push_now(vp, &cl, EOF, flags, callback, callback_arg); 5682,5687c5614 < if (retval == 0) { < cl_pushed++; < < l_clusters[cl_index].b_addr = 0; < l_clusters[cl_index].e_addr = 0; < } else if (error == 0) { --- > if (error == 0 && retval) 5689c5616,5620 < } --- > > l_clusters[cl_index].b_addr = 0; > l_clusters[cl_index].e_addr = 0; > > cl_pushed++; 5694,5696d5624 < if (vm_initiated == TRUE) < lck_mtx_lock(&wbp->cl_lockw); < 5716c5644 < sparse_cluster_switch(wbp, vp, EOF, callback, callback_arg, vm_initiated); --- > sparse_cluster_switch(wbp, vp, EOF, callback, callback_arg); 5736c5664 < sparse_cluster_switch(wbp, vp, EOF, callback, callback_arg, vm_initiated); --- > sparse_cluster_switch(wbp, vp, EOF, callback, callback_arg); 5766,5767c5694 < cluster_push_now(vnode_t vp, struct cl_extent *cl, off_t EOF, int flags, < int (*callback)(buf_t, void *), void *callback_arg, boolean_t vm_initiated) --- > cluster_push_now(vnode_t vp, struct cl_extent *cl, off_t EOF, int flags, int (*callback)(buf_t, void *), void *callback_arg) 5824,5830d5750 < < if (vm_initiated) { < vnode_pageout(vp, NULL, (upl_offset_t)0, upl_f_offset, (upl_size_t)upl_size, < UPL_MSYNC | UPL_VNODE_PAGER | UPL_KEEPCACHED, &error); < < return (error); < } 5941c5861 < KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_END, 1, 3, error, 0, 0); --- > KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_END, 1, 3, 0, 0, 0); 5950,5951c5870,5871 < static int < sparse_cluster_switch(struct cl_writebehind *wbp, vnode_t vp, off_t EOF, int (*callback)(buf_t, void *), void *callback_arg, boolean_t vm_initiated) --- > static void > sparse_cluster_switch(struct cl_writebehind *wbp, vnode_t vp, off_t EOF, int (*callback)(buf_t, void *), void *callback_arg) 5954d5873 < int error; 5956c5875 < KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 78)) | DBG_FUNC_START, kdebug_vnode(vp), wbp->cl_scmap, wbp->cl_number, 0, 0); --- > KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 78)) | DBG_FUNC_START, kdebug_vnode(vp), wbp->cl_scmap, 0, 0, 0); 5968,5972c5887 < error = sparse_cluster_add(wbp, &(wbp->cl_scmap), vp, &cl, EOF, callback, callback_arg, vm_initiated); < < if (error) { < break; < } --- > sparse_cluster_add(&(wbp->cl_scmap), vp, &cl, EOF, callback, callback_arg); 5977,5979c5892 < wbp->cl_number -= cl_index; < < KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 78)) | DBG_FUNC_END, kdebug_vnode(vp), wbp->cl_scmap, wbp->cl_number, error, 0); --- > wbp->cl_number = 0; 5981c5894 < return error; --- > KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 78)) | DBG_FUNC_END, kdebug_vnode(vp), wbp->cl_scmap, 0, 0, 0); 5991,5992c5904 < sparse_cluster_push(struct cl_writebehind *wbp, void **scmap, vnode_t vp, off_t EOF, int push_flag, < int io_flags, int (*callback)(buf_t, void *), void *callback_arg, boolean_t vm_initiated) --- > sparse_cluster_push(void **scmap, vnode_t vp, off_t EOF, int push_flag, int io_flags, int (*callback)(buf_t, void *), void *callback_arg) 5997d5908 < void *l_scmap; 6005,6006d5915 < l_scmap = *scmap; < 6009d5917 < 6013,6015d5920 < if (vm_initiated == TRUE) < lck_mtx_unlock(&wbp->cl_lockw); < 6019c5924 < retval = cluster_push_now(vp, &cl, EOF, io_flags, callback, callback_arg, vm_initiated); --- > retval = cluster_push_now(vp, &cl, EOF, io_flags & (IO_PASSIVE|IO_CLOSE), callback, callback_arg); 6023,6038c5928 < if (vm_initiated == TRUE) { < lck_mtx_lock(&wbp->cl_lockw); < < if (*scmap != l_scmap) < break; < } < < if (error) { < if (vfs_drt_mark_pages(scmap, offset, length, NULL) != KERN_SUCCESS) { < panic("Failed to restore dirty state on failure\n"); < } < < break; < } < < if ( !(push_flag & PUSH_ALL)) { --- > if ( !(push_flag & PUSH_ALL) ) 6040d5929 < } 6042c5931 < KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 79)) | DBG_FUNC_END, kdebug_vnode(vp), (*scmap), error, 0, 0); --- > KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 79)) | DBG_FUNC_END, kdebug_vnode(vp), (*scmap), 0, 0, 0); 6051,6053c5940,5941 < static int < sparse_cluster_add(struct cl_writebehind *wbp, void **scmap, vnode_t vp, struct cl_extent *cl, off_t EOF, < int (*callback)(buf_t, void *), void *callback_arg, boolean_t vm_initiated) --- > static void > sparse_cluster_add(void **scmap, vnode_t vp, struct cl_extent *cl, off_t EOF, int (*callback)(buf_t, void *), void *callback_arg) 6058d5945 < int error; 6071,6075c5958 < error = sparse_cluster_push(wbp, scmap, vp, EOF, 0, 0, callback, callback_arg, vm_initiated); < < if (error) { < break; < } --- > sparse_cluster_push(scmap, vp, EOF, 0, 0, callback, callback_arg); 6080,6082c5963 < KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 80)) | DBG_FUNC_END, kdebug_vnode(vp), (*scmap), error, 0, 0); < < return error; --- > KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 80)) | DBG_FUNC_END, kdebug_vnode(vp), (*scmap), 0, 0, 0); 6371c6252 < #define DRT_BITVECTOR_PAGES ((1024 * 256) / PAGE_SIZE) --- > #define DRT_BITVECTOR_PAGES ((1024 * 1024) / PAGE_SIZE) 6418d6298 < #if CONFIG_EMBEDDED 6427c6307 < * For DRT_BITVECTOR_SIZE = 64, the entry size is 16 bytes. --- > * For DRT_BITVECTOR_SIZE = 256, the entry size is 40 bytes. 6429,6430c6309,6310 < * The small hashtable allocation is 4096 bytes, so the modulus is 251. < * The large hashtable allocation is 32768 bytes, so the modulus is 2039. --- > * The small hashtable allocation is 1024 bytes, so the modulus is 23. > * The large hashtable allocation is 16384 bytes, so the modulus is 401. 6432,6434c6312,6313 < < #define DRT_HASH_SMALL_MODULUS 251 < #define DRT_HASH_LARGE_MODULUS 2039 --- > #define DRT_HASH_SMALL_MODULUS 23 > #define DRT_HASH_LARGE_MODULUS 401 6444,6476c6323,6324 < #define DRT_SMALL_ALLOCATION 4096 /* 80 bytes spare */ < #define DRT_LARGE_ALLOCATION 32768 /* 144 bytes spare */ < < #else < /* < * Hash table moduli. < * < * Since the hashtable entry's size is dependent on the size of < * the bitvector, and since the hashtable size is constrained to < * both being prime and fitting within the desired allocation < * size, these values need to be manually determined. < * < * For DRT_BITVECTOR_SIZE = 64, the entry size is 16 bytes. < * < * The small hashtable allocation is 16384 bytes, so the modulus is 1019. < * The large hashtable allocation is 131072 bytes, so the modulus is 8179. < */ < < #define DRT_HASH_SMALL_MODULUS 1019 < #define DRT_HASH_LARGE_MODULUS 8179 < < /* < * Physical memory required before the large hash modulus is permitted. < * < * On small memory systems, the large hash modulus can lead to phsyical < * memory starvation, so we avoid using it there. < */ < #define DRT_HASH_LARGE_MEMORY_REQUIRED (4 * 1024LL * 1024LL * 1024LL) /* 4GiB */ < < #define DRT_SMALL_ALLOCATION 16384 /* 80 bytes spare */ < #define DRT_LARGE_ALLOCATION 131072 /* 208 bytes spare */ < < #endif --- > #define DRT_SMALL_ALLOCATION 1024 /* 104 bytes spare */ > #define DRT_LARGE_ALLOCATION 16384 /* 344 bytes spare */ 6481,6496d6328 < * Hashtable entry. < */ < struct vfs_drt_hashentry { < u_int64_t dhe_control; < /* < * dhe_bitvector was declared as dhe_bitvector[DRT_BITVECTOR_PAGES / 32]; < * DRT_BITVECTOR_PAGES is defined as ((1024 * 256) / PAGE_SIZE) < * Since PAGE_SIZE is only known at boot time, < * -define MAX_DRT_BITVECTOR_PAGES for smallest supported page size (4k) < * -declare dhe_bitvector array for largest possible length < */ < #define MAX_DRT_BITVECTOR_PAGES (1024 * 256)/( 4 * 1024) < u_int32_t dhe_bitvector[MAX_DRT_BITVECTOR_PAGES/32]; < }; < < /* 6512c6344 < bzero(&(scm)->scm_hashtable[(i)].dhe_bitvector[0], (MAX_DRT_BITVECTOR_PAGES / 32) * sizeof(u_int32_t)) --- > bzero(&(scm)->scm_hashtable[(i)].dhe_bitvector[0], (DRT_BITVECTOR_PAGES / 32) * sizeof(u_int32_t)) 6517c6349,6367 < (MAX_DRT_BITVECTOR_PAGES / 32) * sizeof(u_int32_t)) --- > (DRT_BITVECTOR_PAGES / 32) * sizeof(u_int32_t)) > > > > /* > * Hashtable entry. > */ > struct vfs_drt_hashentry { > u_int64_t dhe_control; > /* > * dhe_bitvector was declared as dhe_bitvector[DRT_BITVECTOR_PAGES / 32]; > * DRT_BITVECTOR_PAGES is defined as ((1024 * 1024) / PAGE_SIZE) > * Since PAGE_SIZE is only known at boot time, > * -define MAX_DRT_BITVECTOR_PAGES for smallest supported page size (4k) > * -declare dhe_bitvector array for largest possible length > */ > #define MAX_DRT_BITVECTOR_PAGES (1024 * 1024)/( 4 * 1024) > u_int32_t dhe_bitvector[MAX_DRT_BITVECTOR_PAGES/32]; > }; 6897,6898d6746 < if (ecount >= DRT_BITVECTOR_PAGES) < panic("ecount >= DRT_BITVECTOR_PAGES, cmap = %p, index = %d, bit = %d", cmap, index, pgoff+i); 6905,6907d6752 < if (ecount <= 0) < panic("ecount <= 0, cmap = %p, index = %d, bit = %d", cmap, index, pgoff+i); < assert(ecount > 0); 7018,7019c6863 < panic("vfs_drt: entry summary count > 0 but no bits set in map, cmap = %p, index = %d, count = %lld", < cmap, index, DRT_HASH_GET_COUNT(cmap, index)); --- > panic("vfs_drt: entry summary count > 0 but no bits set in map");
./bsd/vfs/vfs_cprotect.c differences detected: 2c2 < * Copyright (c) 2015-2018 Apple Inc. All rights reserved. --- > * Copyright (c) 2015 Apple Inc. All rights reserved. 36,38d35 < //for write protection < #include < #include 60,63c57 < CPX_COMPOSITEKEY = 0x20, < < //write page protection < CPX_WRITE_PROTECTABLE = 0x40 --- > CPX_COMPOSITEKEY = 0x20 97,129c91,92 < cpx_t cpx = NULL; < < #if CONFIG_KEYPAGE_WP < /* < * Macs only use 1 key per volume, so force it into its own page. < * This way, we can write-protect as needed. < */ < size_t cpsize = cpx_size (key_len); < if (cpsize < PAGE_SIZE) { < /* < * Don't use MALLOC to allocate the page-sized structure. Instead, < * use kmem_alloc to bypass KASAN since we are supplying our own < * unilateral write protection on this page. Note that kmem_alloc < * can block. < */ < if (kmem_alloc (kernel_map, (vm_offset_t *)&cpx, PAGE_SIZE, VM_KERN_MEMORY_FILE)) { < /* < * returning NULL at this point (due to failed allocation) would just < * result in a panic. fall back to attempting a normal MALLOC, and don't < * let the cpx get marked PROTECTABLE. < */ < MALLOC(cpx, cpx_t, cpx_size(key_len), M_TEMP, M_WAITOK); < } < else { < //mark the page as protectable, since kmem_alloc succeeded. < cpx->cpx_flags |= CPX_WRITE_PROTECTABLE; < } < } < else { < panic ("cpx_size too large ! (%lu)", cpsize); < } < #else < /* If key page write protection disabled, just switch to kernel MALLOC */ --- > cpx_t cpx; > 131c94 < #endif --- > 137,151d99 < /* this is really a void function */ < void cpx_writeprotect (cpx_t cpx) < { < #if CONFIG_KEYPAGE_WP < void *cpxstart = (void*)cpx; < void *cpxend = (void*)((uint8_t*)cpx + PAGE_SIZE); < if (cpx->cpx_flags & CPX_WRITE_PROTECTABLE) { < vm_map_protect (kernel_map, (vm_map_offset_t)cpxstart, (vm_map_offset_t)cpxend, (VM_PROT_READ), FALSE); < } < #else < (void) cpx; < #endif < return; < } < 159d106 < 164,179d110 < < #if CONFIG_KEYPAGE_WP < /* unprotect the page before bzeroing */ < void *cpxstart = (void*)cpx; < void *cpxend = (void*)((uint8_t*)cpx + PAGE_SIZE); < if (cpx->cpx_flags & CPX_WRITE_PROTECTABLE) { < vm_map_protect (kernel_map, (vm_map_offset_t)cpxstart, (vm_map_offset_t)cpxend, (VM_PROT_DEFAULT), FALSE); < < //now zero the memory after un-protecting it < bzero(cpx->cpx_cached_key, cpx->cpx_max_key_len); < < //If we are here, then we used kmem_alloc to get the page. Must use kmem_free to drop it. < kmem_free(kernel_map, (vm_offset_t)cpx, PAGE_SIZE); < return; < } < #else 182,184d112 < return; < #endif <
./bsd/vfs/vfs_xattr.c differences detected: 400,441d399 < static void < vnode_setasnamedstream_internal(vnode_t vp, vnode_t svp) < { < uint32_t streamflags = VISNAMEDSTREAM; < < if ((vp->v_mount->mnt_kern_flag & MNTK_NAMED_STREAMS) == 0) { < streamflags |= VISSHADOW; < } < < /* Tag the vnode. */ < vnode_lock_spin(svp); < svp->v_flag |= streamflags; < vnode_unlock(svp); < < /* Tag the parent so we know to flush credentials for streams on setattr */ < vnode_lock_spin(vp); < vp->v_lflag |= VL_HASSTREAMS; < vnode_unlock(vp); < < /* Make the file it's parent. < * Note: This parent link helps us distinguish vnodes for < * shadow stream files from vnodes for resource fork on file < * systems that support namedstream natively (both have < * VISNAMEDSTREAM set) by allowing access to mount structure < * for checking MNTK_NAMED_STREAMS bit at many places in the < * code. < */ < vnode_update_identity(svp, vp, NULL, 0, 0, VNODE_UPDATE_NAMEDSTREAM_PARENT); < < return; < } < < errno_t < vnode_setasnamedstream(vnode_t vp, vnode_t svp) < { < if ((vp->v_mount->mnt_kern_flag & MNTK_NAMED_STREAMS) == 0) < return (EINVAL); < < vnode_setasnamedstream_internal(vp, svp); < return (0); < } < 462,463c420,446 < vnode_setasnamedstream_internal(vp, *svpp); < } --- > uint32_t streamflags = VISNAMEDSTREAM; > vnode_t svp = *svpp; > > if ((vp->v_mount->mnt_kern_flag & MNTK_NAMED_STREAMS) == 0) { > streamflags |= VISSHADOW; > } > > /* Tag the vnode. */ > vnode_lock_spin(svp); > svp->v_flag |= streamflags; > vnode_unlock(svp); > > /* Tag the parent so we know to flush credentials for streams on setattr */ > vnode_lock_spin(vp); > vp->v_lflag |= VL_HASSTREAMS; > vnode_unlock(vp); > > /* Make the file it's parent. > * Note: This parent link helps us distinguish vnodes for > * shadow stream files from vnodes for resource fork on file > * systems that support namedstream natively (both have > * VISNAMEDSTREAM set) by allowing access to mount structure > * for checking MNTK_NAMED_STREAMS bit at many places in the > * code. > */ > vnode_update_identity(svp, vp, NULL, 0, 0, VNODE_UPDATE_PARENT); > } 482,483c465,476 < vnode_setasnamedstream_internal(vp, *svpp); < } --- > uint32_t streamflags = VISNAMEDSTREAM; > vnode_t svp = *svpp; > > /* Tag the vnode. */ > if ((vp->v_mount->mnt_kern_flag & MNTK_NAMED_STREAMS) == 0) { > streamflags |= VISSHADOW; > } > > /* Tag the vnode. */ > vnode_lock_spin(svp); > svp->v_flag |= streamflags; > vnode_unlock(svp); 484a478,492 > /* Tag the parent so we know to flush credentials for streams on setattr */ > vnode_lock_spin(vp); > vp->v_lflag |= VL_HASSTREAMS; > vnode_unlock(vp); > > /* Make the file it's parent. > * Note: This parent link helps us distinguish vnodes for > * shadow stream files from vnodes for resource fork on file > * systems that support namedstream natively (both have > * VISNAMEDSTREAM set) by allowing access to mount structure > * for checking MNTK_NAMED_STREAMS bit at many places in the > * code. > */ > vnode_update_identity(svp, vp, NULL, 0, 0, VNODE_UPDATE_PARENT); > } NO DIFFS in ./bsd/vfs/Makefile

./bsd/vfs/vfs_fslog.c differences detected: 78c78 < if (0 != escape_str(c_name, strlen(c_name) + 1, sizeof(c_name))) { --- > if (0 != escape_str(c_name, strlen(c_name), sizeof(c_name))) { 87c87 < if (0 != escape_str(t_name, strlen(t_name) + 1, sizeof(t_name))) { --- > if (0 != escape_str(t_name, strlen(t_name), sizeof(t_name))) {
./bsd/vfs/vfs_fsevents.c differences detected: 1306c1306 < if (((kfse->type == FSE_RENAME) || (kfse->type == FSE_CLONE)) && kfse->dest == NULL) { --- > if (kfse->type == FSE_RENAME && kfse->dest == NULL) { 1310c1310 < // destination of a rename or clone which we'll process separately --- > // destination of a rename which we'll process separately 1970c1970 < --- > 2003a2004,2005 > if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0) > kn->kn_udata = kev->udata; 2079a2082,2083 > fseh->watcher->flags |= WATCHER_CLOSING; >
./bsd/vfs/vfs_attrlist.c differences detected: 2c2 < * Copyright (c) 1995-2018 Apple Inc. All rights reserved. --- > * Copyright (c) 1995-2017 Apple Inc. All rights reserved. 1139,1140c1139,1140 < ab.allocated = fixedsize + varsize; < if (((size_t)ab.allocated) > ATTR_MAX_BUFFER) { --- > ab.allocated = ulmin(bufferSize, fixedsize + varsize); > if (ab.allocated > ATTR_MAX_BUFFER) { 1145,1165d1144 < < if (return_valid && < (ab.allocated < (ssize_t)(sizeof(uint32_t) + sizeof(attribute_set_t))) && < !(options & FSOPT_REPORT_FULLSIZE)) { < uint32_t num_bytes_valid = sizeof(uint32_t); < /* < * Not enough to return anything and we don't have to report < * how much space is needed. Get out now. < * N.B. - We have only been called after having verified that < * attributeBuffer is at least sizeof(uint32_t); < */ < if (UIO_SEG_IS_USER_SPACE(segflg)) { < error = copyout(&num_bytes_valid, < CAST_USER_ADDR_T(attributeBuffer), num_bytes_valid); < } else { < bcopy(&num_bytes_valid, (void *)attributeBuffer, < (size_t)num_bytes_valid); < } < goto out; < } < 1185,1188d1163 < if (alp->commonattr & ATTR_CMN_ERROR) { < ATTR_PACK4(ab, 0); < ab.actual.commonattr |= ATTR_CMN_ERROR; < } 1484,1485c1459,1460 < *(uint32_t *)ab.base = (options & FSOPT_REPORT_FULLSIZE) ? ab.needed : imin(bufferSize, ab.needed); < --- > *(uint32_t *)ab.base = (options & FSOPT_REPORT_FULLSIZE) ? ab.needed : imin(ab.allocated, ab.needed); > 1487,1488c1462 < if (return_valid && < (ab.allocated >= (ssize_t)(sizeof(uint32_t) + sizeof(ab.actual)))) { --- > if (return_valid) { 1500c1474 < ulmin(bufferSize, ab.needed)); --- > ab.allocated); 1502c1476 < bcopy(ab.base, (void *)attributeBuffer, (size_t)ulmin(bufferSize, ab.needed)); --- > bcopy(ab.base, (void *)attributeBuffer, (size_t)ab.allocated); 2804,2806d2777 < if (bufferSize < sizeof(uint32_t)) < return (ERANGE); < 3707d3677 < uthread_t ut; 3727d3696 < ut = get_bsdthread_info(current_thread()); 3874,3878d3842 < /* < * Set UT_KERN_RAGE_VNODES to cause all vnodes created by the < * filesystem to be rapidly aged. < */ < ut->uu_flag |= UT_KERN_RAGE_VNODES; 3881d3844 < ut->uu_flag &= ~UT_KERN_RAGE_VNODES; 3902d3864 < ut->uu_flag |= UT_KERN_RAGE_VNODES; 3905d3866 < ut->uu_flag &= ~UT_KERN_RAGE_VNODES;
./bsd/vfs/vfs_syscalls.c differences detected: 148,159d147 < #ifndef HFS_GET_BOOT_INFO < #define HFS_GET_BOOT_INFO (FCNTL_FS_SPECIFIC_BASE + 0x00004) < #endif < < #ifndef HFS_SET_BOOT_INFO < #define HFS_SET_BOOT_INFO (FCNTL_FS_SPECIFIC_BASE + 0x00005) < #endif < < #ifndef APFSIOC_REVERT_TO_SNAPSHOT < #define APFSIOC_REVERT_TO_SNAPSHOT _IOW('J', 1, u_int64_t) < #endif < 176a165,166 > static void sync_thread(void *, __unused wait_result_t); > static int sync_async(int); 218,224d207 < //snapshot functions < #if CONFIG_MNT_ROOTSNAP < static int snapshot_root(int dirfd, user_addr_t name, uint32_t flags, vfs_context_t ctx); < #else < static int snapshot_root(int dirfd, user_addr_t name, uint32_t flags, vfs_context_t ctx) __attribute__((unused)); < #endif < 2330a2314 > int sync_timeout = 60; // Sync time limit (sec) 2364,2394d2347 < typedef enum { < SYNC_ALL = 0, < SYNC_ONLY_RELIABLE_MEDIA = 1, < SYNC_ONLY_UNRELIABLE_MEDIA = 2 < } sync_type_t; < < static int < sync_internal_callback(mount_t mp, void *arg) < { < if (arg) { < int is_reliable = !(mp->mnt_kern_flag & MNTK_VIRTUALDEV) && < (mp->mnt_flag & MNT_LOCAL); < sync_type_t sync_type = *((sync_type_t *)arg); < < if ((sync_type == SYNC_ONLY_RELIABLE_MEDIA) && !is_reliable) < return (VFS_RETURNED); < else if ((sync_type = SYNC_ONLY_UNRELIABLE_MEDIA) && is_reliable) < return (VFS_RETURNED); < } < < (void)sync_callback(mp, NULL); < < return (VFS_RETURNED); < } < < int sync_thread_state = 0; < int sync_timeout_seconds = 5; < < #define SYNC_THREAD_RUN 0x0001 < #define SYNC_THREAD_RUNNING 0x0002 < 2396c2349 < sync_thread(__unused void *arg, __unused wait_result_t wr) --- > sync_thread(void *arg, __unused wait_result_t wr) 2398,2403c2351 < sync_type_t sync_type; < < lck_mtx_lock(sync_mtx_lck); < while (sync_thread_state & SYNC_THREAD_RUN) { < sync_thread_state &= ~SYNC_THREAD_RUN; < lck_mtx_unlock(sync_mtx_lck); --- > int *timeout = (int *) arg; 2405,2420c2353 < sync_type = SYNC_ONLY_RELIABLE_MEDIA; < vfs_iterate(LK_NOWAIT, sync_internal_callback, &sync_type); < sync_type = SYNC_ONLY_UNRELIABLE_MEDIA; < vfs_iterate(LK_NOWAIT, sync_internal_callback, &sync_type); < < lck_mtx_lock(sync_mtx_lck); < } < /* < * This wakeup _has_ to be issued before the lock is released otherwise < * we may end up waking up a thread in sync_internal which is < * expecting a wakeup from a thread it just created and not from this < * thread which is about to exit. < */ < wakeup(&sync_thread_state); < sync_thread_state &= ~SYNC_THREAD_RUNNING; < lck_mtx_unlock(sync_mtx_lck); --- > vfs_iterate(LK_NOWAIT, sync_callback, NULL); 2421a2355,2356 > if (timeout) > wakeup((caddr_t) timeout); 2432,2433d2366 < struct timeval sync_timeout_last_print = {0, 0}; < 2435,2436c2368 < * An in-kernel sync for power management to call. < * This function always returns within sync_timeout seconds. --- > * Sync in a separate thread so we can time out if it blocks. 2438,2439c2370,2371 < __private_extern__ int < sync_internal(void) --- > static int > sync_async(int timeout) 2443,2444c2375 < int thread_created = FALSE; < struct timespec ts = {sync_timeout_seconds, 0}; --- > struct timespec ts = {timeout, 0}; 2447,2459c2378,2381 < sync_thread_state |= SYNC_THREAD_RUN; < if (!(sync_thread_state & SYNC_THREAD_RUNNING)) { < int kr; < < sync_thread_state |= SYNC_THREAD_RUNNING; < kr = kernel_thread_start(sync_thread, NULL, &thd); < if (kr != KERN_SUCCESS) { < sync_thread_state &= ~SYNC_THREAD_RUNNING; < lck_mtx_unlock(sync_mtx_lck); < printf("sync_thread failed\n"); < return (0); < } < thread_created = TRUE; --- > if (kernel_thread_start(sync_thread, &timeout, &thd) != KERN_SUCCESS) { > printf("sync_thread failed\n"); > lck_mtx_unlock(sync_mtx_lck); > return (0); 2462,2463c2384 < error = msleep((caddr_t)&sync_thread_state, sync_mtx_lck, < (PVFS | PDROP | PCATCH), "sync_thread", &ts); --- > error = msleep((caddr_t) &timeout, sync_mtx_lck, (PVFS | PDROP | PCATCH), "sync_thread", &ts); 2465,2471c2386 < struct timeval now; < < microtime(&now); < if (now.tv_sec - sync_timeout_last_print.tv_sec > 120) { < printf("sync timed out: %d sec\n", sync_timeout_seconds); < sync_timeout_last_print.tv_sec = now.tv_sec; < } --- > printf("sync timed out: %d sec\n", timeout); 2473,2475c2388 < < if (thread_created) < thread_deallocate(thd); --- > thread_deallocate(thd); 2477a2391,2401 > } > > /* > * An in-kernel sync for power management to call. > */ > __private_extern__ int > sync_internal(void) > { > (void) sync_async(sync_timeout); > > return 0; 2488c2412 < int error, quota_cmd, quota_status = 0; --- > int error, quota_cmd, quota_status; 2493c2417 < struct dqblk my_dqblk = {}; --- > struct dqblk my_dqblk; 3712,3717d3635 < strlen(vp->v_name)) || < !strncmp(vp->v_name, < "SpringBoard", < strlen(vp->v_name)) || < !strncmp(vp->v_name, < "backboardd", 5366c5284 < MALLOC(result, errno_t *, desc_actual * sizeof(errno_t), M_TEMP, M_WAITOK | M_ZERO); --- > MALLOC(result, errno_t *, desc_actual * sizeof(errno_t), M_TEMP, M_WAITOK); 5585c5503 < } source = {}; --- > } source; 5591c5509 < } dest = {}; --- > } dest; 7412,7462d7329 < < #if CONFIG_FSE < need_event = need_fsevent(FSE_RENAME, fdvp); < if (need_event) { < if (fvp) { < get_fse_info(fvp, &from_finfo, ctx); < } else { < error = vfs_get_notify_attributes(&__rename_data->fv_attr); < if (error) { < goto out1; < } < < fvap = &__rename_data->fv_attr; < } < < if (tvp) { < get_fse_info(tvp, &to_finfo, ctx); < } else if (batched) { < error = vfs_get_notify_attributes(&__rename_data->tv_attr); < if (error) { < goto out1; < } < < tvap = &__rename_data->tv_attr; < } < } < #else < need_event = 0; < #endif /* CONFIG_FSE */ < < if (need_event || kauth_authorize_fileop_has_listeners()) { < if (from_name == NULL) { < GET_PATH(from_name); < if (from_name == NULL) { < error = ENOMEM; < goto out1; < } < } < < from_len = safe_getpath(fdvp, fromnd->ni_cnd.cn_nameptr, from_name, MAXPATHLEN, &from_truncated); < < if (to_name == NULL) { < GET_PATH(to_name); < if (to_name == NULL) { < error = ENOMEM; < goto out1; < } < } < < to_len = safe_getpath(tdvp, tond->ni_cnd.cn_nameptr, to_name, MAXPATHLEN, &to_truncated); < } 7482c7349 < error = vn_authorize_renamex_with_paths(fdvp, fvp, &fromnd->ni_cnd, from_name, tdvp, tvp, &tond->ni_cnd, to_name, ctx, flags, NULL); --- > error = vn_authorize_renamex(fdvp, fvp, &fromnd->ni_cnd, tdvp, tvp, &tond->ni_cnd, ctx, flags, NULL); 7672a7540,7589 > #if CONFIG_FSE > need_event = need_fsevent(FSE_RENAME, fdvp); > if (need_event) { > if (fvp) { > get_fse_info(fvp, &from_finfo, ctx); > } else { > error = vfs_get_notify_attributes(&__rename_data->fv_attr); > if (error) { > goto out1; > } > > fvap = &__rename_data->fv_attr; > } > > if (tvp) { > get_fse_info(tvp, &to_finfo, ctx); > } else if (batched) { > error = vfs_get_notify_attributes(&__rename_data->tv_attr); > if (error) { > goto out1; > } > > tvap = &__rename_data->tv_attr; > } > } > #else > need_event = 0; > #endif /* CONFIG_FSE */ > > if (need_event || kauth_authorize_fileop_has_listeners()) { > if (from_name == NULL) { > GET_PATH(from_name); > if (from_name == NULL) { > error = ENOMEM; > goto out1; > } > } > > from_len = safe_getpath(fdvp, fromnd->ni_cnd.cn_nameptr, from_name, MAXPATHLEN, &from_truncated); > > if (to_name == NULL) { > GET_PATH(to_name); > if (to_name == NULL) { > error = ENOMEM; > goto out1; > } > } > > to_len = safe_getpath(tdvp, tond->ni_cnd.cn_nameptr, to_name, MAXPATHLEN, &to_truncated); > } 8731,8732c8648,8649 < uint32_t count = 0, savecount = 0; < uint32_t newstate = 0; --- > uint32_t count, savecount; > uint32_t newstate; 8734c8651 < uint32_t loff = 0; --- > uint32_t loff; 10530,10557d10446 < /* other, known commands shouldn't be passed down here */ < switch (cmd) { < case F_PUNCHHOLE: < case F_TRIM_ACTIVE_FILE: < case F_RDADVISE: < case F_TRANSCODEKEY: < case F_GETPROTECTIONLEVEL: < case F_GETDEFAULTPROTLEVEL: < case F_MAKECOMPRESSED: < case F_SET_GREEDY_MODE: < case F_SETSTATICCONTENT: < case F_SETIOTYPE: < case F_SETBACKINGSTORE: < case F_GETPATH_MTMINFO: < case APFSIOC_REVERT_TO_SNAPSHOT: < case FSIOC_FIOSEEKHOLE: < case FSIOC_FIOSEEKDATA: < case HFS_GET_BOOT_INFO: < case HFS_SET_BOOT_INFO: < case FIOPINSWAP: < case F_CHKCLEAN: < case F_FULLFSYNC: < case F_BARRIERFSYNC: < case F_FREEZE_FS: < case F_THAW_FS: < error = EINVAL; < goto outdrop; < } 10571d10459 < outdrop: 10686,10687c10574 < error = copyinstr(uap->attrname, attrname, sizeof(attrname), &namelen); < if (error != 0) { --- > if ((error = copyinstr(uap->attrname, attrname, sizeof(attrname), &namelen) != 0)) { 10767,10768c10654 < error = copyinstr(uap->attrname, attrname, sizeof(attrname), &namelen); < if (error != 0) { --- > if ((error = copyinstr(uap->attrname, attrname, sizeof(attrname), &namelen) != 0)) { 10814,10815c10700 < error = copyinstr(uap->attrname, attrname, sizeof(attrname), &namelen); < if (error != 0) { --- > if ((error = copyinstr(uap->attrname, attrname, sizeof(attrname), &namelen) != 0)) { 10874,10875c10759 < error = copyinstr(uap->attrname, attrname, sizeof(attrname), &namelen); < if (error != 0) { --- > if ((error = copyinstr(uap->attrname, attrname, sizeof(attrname), &namelen) != 0)) { 11173c11057 < int dbg_namelen; --- > int dbg_namelen; 11175c11059 < dbg_namelen = (int)sizeof(dbg_parms); --- > dbg_namelen = (int)sizeof(dbg_parms); 11186,11187c11070 < kdebug_vfs_lookup(dbg_parms, dbg_namelen, (void *)vp, < KDBG_VFS_LOOKUP_FLAG_LOOKUP); --- > kdebug_lookup_gen_events(dbg_parms, dbg_namelen, (void *)vp, TRUE); 11218c11101 < MALLOC(realpath, char *, uap->bufsize, M_TEMP, M_WAITOK | M_ZERO); --- > MALLOC(realpath, char *, uap->bufsize, M_TEMP, M_WAITOK); 11831a11715,11718 > #ifndef APFSIOC_REVERT_TO_SNAPSHOT > #define APFSIOC_REVERT_TO_SNAPSHOT _IOW('J', 1, u_int64_t) > #endif > 12109c11996 < #if CONFIG_MNT_ROOTSNAP --- > #if !TARGET_OS_OSX 12113c12000 < #endif /* CONFIG_MNT_ROOTSNAP */ --- > #endif /* !TARGET_OS_OSX */ NO DIFFS in ./bsd/vfs/vfs_disk_conditioner.h

./bsd/vfs/vfs_bio.c differences detected: 3258c3258 < } // end BLK_READ --- > } 3263,3265c3263,3264 < } // end switch < } //end buf_t !incore < --- > } > } 4048,4052c4047,4049 < KDBG_RELEASE_NOPROCFILT(FSDBG_CODE(DBG_DKRW, code), < buf_kernel_addrperm_addr(bp), < (uintptr_t)VM_KERNEL_ADDRPERM(bp->b_vp), bp->b_resid, < bp->b_error); < } --- > KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON, FSDBG_CODE(DBG_DKRW, code) | DBG_FUNC_NONE, > buf_kernel_addrperm_addr(bp), (uintptr_t)VM_KERNEL_ADDRPERM(bp->b_vp), bp->b_resid, bp->b_error, 0); > } 4585c4582 < static boolean_t --- > boolean_t NO DIFFS in ./bsd/vfs/vnode_if.c
NO DIFFS in ./bsd/vfs/vfs_support.h

./bsd/vfs/kpi_vfs.c differences detected: 106d105 < #include 111d109 < #include 125,128d122 < #if NULLFS < #include < #endif < 1604c1598 < --- > 1606c1600 < if (ut->uu_flag & (UT_RAGE_VNODES | UT_ATIME_UPDATE)) { --- > if (ut->uu_flag & UT_RAGE_VNODES) { 1610,1613d1603 < < if (proc->p_vfs_iopolicy & P_VFS_IOPOLICY_ATIME_UPDATES) { < return 1; < } 2917,2930d2906 < int < vnode_getbackingvnode(vnode_t in_vp, vnode_t* out_vpp) < { < if (out_vpp) { < *out_vpp = NULLVP; < } < #if NULLFS < return nullfs_getbackingvnode(in_vp, out_vpp); < #else < #pragma unused(in_vp) < return ENOENT; < #endif < } < 4030,4039c4006,4016 < MALLOC(fromnd, struct nameidata *, sizeof (struct nameidata), M_TEMP, M_WAITOK); < NDINIT(fromnd, RENAME, OP_RENAME, NOFOLLOW | USEDVP | CN_NBMOUNTLOOK, < UIO_SYSSPACE, CAST_USER_ADDR_T(xfromname), ctx); < fromnd->ni_dvp = fdvp; < error = namei(fromnd); < < /* < * If there was an error looking up source attribute file, < * we'll behave as if it didn't exist. < */ --- > if (xfromname != NULL) { > MALLOC(fromnd, struct nameidata *, sizeof (struct nameidata), M_TEMP, M_WAITOK); > NDINIT(fromnd, RENAME, OP_RENAME, NOFOLLOW | USEDVP | CN_NBMOUNTLOOK, > UIO_SYSSPACE, CAST_USER_ADDR_T(xfromname), ctx); > fromnd->ni_dvp = fdvp; > error = namei(fromnd); > > /* > * If there was an error looking up source attribute file, > * we'll behave as if it didn't exist. > */ 4041,4048c4018,4034 < if (error == 0) { < if (fromnd->ni_vp) { < /* src_attr_vp indicates need to call vnode_put / nameidone later */ < src_attr_vp = fromnd->ni_vp; < < if (fromnd->ni_vp->v_type != VREG) { < src_attr_vp = NULLVP; < vnode_put(fromnd->ni_vp); --- > if (error == 0) { > if (fromnd->ni_vp) { > /* src_attr_vp indicates need to call vnode_put / nameidone later */ > src_attr_vp = fromnd->ni_vp; > > if (fromnd->ni_vp->v_type != VREG) { > src_attr_vp = NULLVP; > vnode_put(fromnd->ni_vp); > } > } > /* > * Either we got an invalid vnode type (not a regular file) or the namei lookup > * suppressed ENOENT as a valid error since we're renaming. Either way, we don't > * have a vnode here, so we drop our namei buffer for the source attribute file > */ > if (src_attr_vp == NULLVP) { > nameidone(fromnd); 4051,4058d4036 < /* < * Either we got an invalid vnode type (not a regular file) or the namei lookup < * suppressed ENOENT as a valid error since we're renaming. Either way, we don't < * have a vnode here, so we drop our namei buffer for the source attribute file < */ < if (src_attr_vp == NULLVP) { < nameidone(fromnd); < } 5491c5469 < if (_err == 0 && *vpp) { --- > if (_err == 0 && *vpp) 5493,5495d5470 < if (kdebug_enable) < kdebug_lookup(*vpp, cnp); < } NO DIFFS in ./bsd/vfs/vfs_init.c

./bsd/vfs/vfs_subr.c differences detected: 2c2 < * Copyright (c) 2000-2018 Apple Inc. All rights reserved. --- > * Copyright (c) 2000-2017 Apple Inc. All rights reserved. 1030d1029 < #define DBG_MOUNTROOT (FSDBG_CODE(DBG_MOUNT, 0)) 1053d1051 < KDBG_RELEASE(DBG_MOUNTROOT | DBG_FUNC_START); 1059,1060d1056 < < KDBG_RELEASE(DBG_MOUNTROOT | DBG_FUNC_END, error, 0); 1065,1066d1060 < < KDBG_RELEASE(DBG_MOUNTROOT | DBG_FUNC_END, error, 1); 1179,1180c1173 < if ((vfs_flags(mp) & MNT_MULTILABEL) == 0) { < KDBG_RELEASE(DBG_MOUNTROOT | DBG_FUNC_END, 0, 2); --- > if ((vfs_flags(mp) & MNT_MULTILABEL) == 0) 1182d1174 < } 1204d1195 < KDBG_RELEASE(DBG_MOUNTROOT | DBG_FUNC_END, 0, 3); 1211c1202 < --- > 1215d1205 < KDBG_RELEASE(DBG_MOUNTROOT | DBG_FUNC_END, error ? error : ENODEV, 4); 3329c3319 < mp->mnt_ioscale = MNT_IOSCALE(mp->mnt_ioqueue_depth); --- > mp->mnt_ioscale = (mp->mnt_ioqueue_depth + (MNT_DEFAULT_IOQUEUE_DEPTH - 1)) / MNT_DEFAULT_IOQUEUE_DEPTH; 3505c3495 < MALLOC(fsidlst, fsid_t *, req->oldlen, M_TEMP, M_WAITOK | M_ZERO); --- > MALLOC(fsidlst, fsid_t *, req->oldlen, M_TEMP, M_WAITOK); 3784a3775,3776 > if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0) > kn->kn_udata = kev->udata; 3872c3864 < struct vfsconf vfsc = {}; --- > struct vfsconf vfsc; 3920c3912 < SYSCTL_INT(_vfs_generic, OID_AUTO, sync_timeout, CTLFLAG_RW | CTLFLAG_LOCKED, &sync_timeout_seconds, 0, ""); --- > SYSCTL_INT(_vfs_generic, OID_AUTO, sync_timeout, CTLFLAG_RW | CTLFLAG_LOCKED, &sync_timeout, 0, ""); 5134c5126 < (ut->uu_flag & (UT_RAGE_VNODES | UT_KERN_RAGE_VNODES))) { --- > (ut->uu_flag & UT_RAGE_VNODES)) { 5140,5144d5131 < * < * if UT_KERN_RAGE_VNODES is set, then the < * kernel internally wants vnodes to be rapidly < * aged, even if the process hasn't requested < * this 5849d5835 < vp = NULLVP; 5853,5859d5838 < /* < * For creation VNOPs, this is the equivalent of < * lookup_handle_found_vnode. < */ < if (kdebug_enable && *vpp) < kdebug_lookup(*vpp, cnp); < 6149,6157d6127 < < return vn_authorize_renamex_with_paths(fdvp, fvp, fcnp, NULL, tdvp, tvp, tcnp, NULL, ctx, flags, reserved); < } < < int < vn_authorize_renamex_with_paths(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp, const char *from_path, < struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp, const char *to_path, < vfs_context_t ctx, vfs_rename_flags_t flags, void *reserved) < { 6254,6270d6223 < /* < * As part of the Kauth step, we call out to allow 3rd-party < * fileop notification of "about to rename". This is needed < * in the event that 3rd-parties need to know that the DELETE < * authorization is actually part of a rename. It's important < * that we guarantee that the DELETE call-out will always be < * made if the WILL_RENAME call-out is made. Another fileop < * call-out will be performed once the operation is completed. < * We can ignore the result of kauth_authorize_fileop(). < * < * N.B. We are passing the vnode and *both* paths to each < * call; kauth_authorize_fileop() extracts the "from" path < * when posting a KAUTH_FILEOP_WILL_RENAME notification. < * As such, we only post these notifications if all of the < * information we need is provided. < */ < 6284,6288d6236 < if (to_path != NULL) < kauth_authorize_fileop(vfs_context_ucred(ctx), < KAUTH_FILEOP_WILL_RENAME, < (uintptr_t)fvp, < (uintptr_t)to_path); 6292,6296d6239 < if (from_path != NULL) < kauth_authorize_fileop(vfs_context_ucred(ctx), < KAUTH_FILEOP_WILL_RENAME, < (uintptr_t)tvp, < (uintptr_t)from_path); 6328,6332d6270 < if (to_path != NULL) < kauth_authorize_fileop(vfs_context_ucred(ctx), < KAUTH_FILEOP_WILL_RENAME, < (uintptr_t)fvp, < (uintptr_t)to_path); 8846a8785 > 9567,9572d9505 < #if CONFIG_MACF < int rv = mac_vnode_check_trigger_resolve(ctx, vp, &ndp->ni_cnd); < if (rv != 0) < return rv; < #endif < 9935,9936c9868 < kdebug_vfs_lookup(ctx->path, len, vp, < KDBG_VFS_LOOKUP_FLAG_LOOKUP | KDBG_VFS_LOOKUP_FLAG_NOPROCFILT); --- > kdebug_lookup_gen_events(ctx->path, len, vp, TRUE); NO DIFFS in ./bsd/vfs/vfs_quota.c
NO DIFFS in ./bsd/vfs/vfs_utfconv.c
NO DIFFS in ./bsd/vfs/vnode_if.src

./bsd/vfs/vfs_cache.c differences detected: 85d84 < #include 879a879 > #if NAMEDSTREAMS 881a882 > #endif 889a891 > #if NAMEDSTREAMS 896a899 > #endif 959a963 > #if NAMEDSTREAMS 966a971 > #endif 971a977 > #if NAMEDSTREAMS 977a984 > #endif 1433c1440 < if ((dp->v_flag & VISHARDLINK)) { --- > if (dp && (dp->v_flag & VISHARDLINK)) { 2163c2170 < resize_namecache(int newsize) --- > resize_namecache(u_int newsize) 2170c2177 < int dNodes, dNegNodes, nelements; --- > int dNodes, dNegNodes; 2173,2175d2179 < if (newsize < 0) < return EINVAL; < 2177a2182 > 2180,2184c2185 < return 0; < } < < if (os_mul_overflow(dNodes, 2, &nelements)) { < return EINVAL; --- > return 0; 2186,2187c2187 < < new_table = hashinit(nelements, M_CACHE, &nchashmask); --- > new_table = hashinit(2 * dNodes, M_CACHE, &nchashmask); 2191c2191 < return ENOMEM; --- > return ENOMEM; NO DIFFS in ./bsd/vfs/doc_tombstone.c
NO DIFFS in ./bsd/vfs/vnode_if.sh
NO DIFFS in ./bsd/vfs/vfs_conf.c
NO DIFFS in ./bsd/vfs/vfs_support.c

./bsd/vfs/vfs_lookup.c differences detected: 107a108,109 > static void kdebug_lookup(struct vnode *dp, struct componentname *cnp); > 1747c1749 < kdebug_vfs_lookup(long *dbg_parms, int dbg_namelen, void *dp, uint32_t flags) --- > kdebug_lookup_gen_events(long *dbg_parms, int dbg_namelen, void *dp, boolean_t lookup) 1751,1752d1752 < bool lookup = flags & KDBG_VFS_LOOKUP_FLAG_LOOKUP; < bool noprocfilt = flags & KDBG_VFS_LOOKUP_FLAG_NOPROCFILT; 1758c1758 < if (lookup) { --- > if (lookup == TRUE) 1760c1760 < } else { --- > else 1762d1761 < } 1767,1773c1766 < if (noprocfilt) { < KDBG_RELEASE_NOPROCFILT(code, kdebug_vnode(dp), dbg_parms[0], < dbg_parms[1], dbg_parms[2]); < } else { < KDBG_RELEASE(code, kdebug_vnode(dp), dbg_parms[0], dbg_parms[1], < dbg_parms[2]); < } --- > KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, code, kdebug_vnode(dp), dbg_parms[0], dbg_parms[1], dbg_parms[2], 0); 1781,1787c1774 < if (noprocfilt) { < KDBG_RELEASE_NOPROCFILT(code, dbg_parms[i], dbg_parms[i + 1], < dbg_parms[i + 2], dbg_parms[i + 3]); < } else { < KDBG_RELEASE(code, dbg_parms[i], dbg_parms[i + 1], dbg_parms[i + 2], < dbg_parms[i + 3]); < } --- > KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, code, dbg_parms[i], dbg_parms[i+1], dbg_parms[i+2], dbg_parms[i+3], 0); 1791,1799c1778 < void < kdebug_lookup_gen_events(long *dbg_parms, int dbg_namelen, void *dp, < boolean_t lookup) < { < kdebug_vfs_lookup(dbg_parms, dbg_namelen, dp, < lookup ? KDBG_VFS_LOOKUP_FLAG_LOOKUP : 0); < } < < void --- > static void 1823,1825c1802,1803 < kdebug_vfs_lookup(dbg_parms, dbg_namelen, (void *)dp, < KDBG_VFS_LOOKUP_FLAG_LOOKUP); < } --- > kdebug_lookup_gen_events(dbg_parms, dbg_namelen, (void *)dp, TRUE); > } 1830,1831c1808 < kdebug_vfs_lookup(long *dbg_parms __unused, int dbg_namelen __unused, < void *dp __unused, __unused uint32_t flags) --- > kdebug_lookup_gen_events(long *dbg_parms __unused, int dbg_namelen __unused, void *dp __unused)
./bsd/vfs/vfs_disk_conditioner.c differences detected: 55,63d54 < struct saved_mount_fields { < uint32_t mnt_maxreadcnt; /* Max. byte count for read */ < uint32_t mnt_maxwritecnt; /* Max. byte count for write */ < uint32_t mnt_segreadcnt; /* Max. segment count for read */ < uint32_t mnt_segwritecnt; /* Max. segment count for write */ < uint32_t mnt_ioqueue_depth; /* the maxiumum number of commands a device can accept */ < uint32_t mnt_ioscale; /* scale the various throttles/limits imposed on the amount of I/O in flight */ < }; < 65,67c56,60 < disk_conditioner_info dcinfo; // all the original data from fsctl < struct saved_mount_fields mnt_fields; // fields to restore in mount_t when conditioner is disabled < --- > boolean_t enabled; // if other fields have any effect > uint64_t access_time_usec; // maximum latency before an I/O transfer begins > uint64_t read_throughput_mbps; // throughput of an I/O read > uint64_t write_throughput_mbps; // throughput of an I/O write > boolean_t is_ssd; // behave like an SSD (for both conditioning and affecting behavior in other parts of VFS) 95,96c88 < struct _disk_conditioner_info_t *internal_info = NULL; < disk_conditioner_info *info = NULL; --- > struct _disk_conditioner_info_t *info = NULL; 99,104d90 < vnode_t vp; < < vp = buf_vnode(bp); < if (!vp) { < return; < } 106c92 < mp = vp->v_mount; --- > mp = buf_vnode(bp)->v_mount; 111,112c97,98 < internal_info = mp->mnt_disk_conditioner_info; < if (!internal_info || !internal_info->dcinfo.enabled) { --- > info = mp->mnt_disk_conditioner_info; > if (!info || !info->enabled) { 115d100 < info = &(internal_info->dcinfo); 119c104 < last_blkno = internal_info->last_blkno; --- > last_blkno = info->last_blkno; 121c106 < internal_info->last_blkno = bp->b_blkno + bp->b_bcount; --- > info->last_blkno = bp->b_blkno + bp->b_bcount; 140c125 < timevalsub(&elapsed, &internal_info->last_io_timestamp); --- > timevalsub(&elapsed, &info->last_io_timestamp); 142c127 < if (elapsed.tv_sec > DISK_IDLE_SEC && internal_info->last_io_timestamp.tv_sec != 0) { --- > if (elapsed.tv_sec > DISK_IDLE_SEC && info->last_io_timestamp.tv_sec != 0) { 148c133 < microuptime(&internal_info->last_io_timestamp); --- > microuptime(&info->last_io_timestamp); 171c156 < microuptime(&internal_info->last_io_timestamp); --- > microuptime(&info->last_io_timestamp); 185,186c170,171 < if (info) { < memcpy(uinfo, &(info->dcinfo), sizeof(disk_conditioner_info)); --- > if (!info) { > return 0; 189,190c174,178 < return 0; < } --- > uinfo->enabled = info->enabled; > uinfo->access_time_usec = info->access_time_usec; > uinfo->read_throughput_mbps = info->read_throughput_mbps; > uinfo->write_throughput_mbps = info->write_throughput_mbps; > uinfo->is_ssd = info->is_ssd; 192,199c180 < static inline void < disk_conditioner_restore_mount_fields(mount_t mp, struct saved_mount_fields *mnt_fields) { < mp->mnt_maxreadcnt = mnt_fields->mnt_maxreadcnt; < mp->mnt_maxwritecnt = mnt_fields->mnt_maxwritecnt; < mp->mnt_segreadcnt = mnt_fields->mnt_segreadcnt; < mp->mnt_segwritecnt = mnt_fields->mnt_segwritecnt; < mp->mnt_ioqueue_depth = mnt_fields->mnt_ioqueue_depth; < mp->mnt_ioscale = mnt_fields->mnt_ioscale; --- > return 0; 205,207c186 < struct _disk_conditioner_info_t *internal_info; < disk_conditioner_info *info; < struct saved_mount_fields *mnt_fields; --- > struct _disk_conditioner_info_t *info; 217,267c196,199 < mount_lock(mp); < < internal_info = mp->mnt_disk_conditioner_info; < if (!internal_info) { < internal_info = mp->mnt_disk_conditioner_info = kalloc(sizeof(struct _disk_conditioner_info_t)); < bzero(internal_info, sizeof(struct _disk_conditioner_info_t)); < mnt_fields = &(internal_info->mnt_fields); < < /* save mount_t fields for restoration later */ < mnt_fields->mnt_maxreadcnt = mp->mnt_maxreadcnt; < mnt_fields->mnt_maxwritecnt = mp->mnt_maxwritecnt; < mnt_fields->mnt_segreadcnt = mp->mnt_segreadcnt; < mnt_fields->mnt_segwritecnt = mp->mnt_segwritecnt; < mnt_fields->mnt_ioqueue_depth = mp->mnt_ioqueue_depth; < mnt_fields->mnt_ioscale = mp->mnt_ioscale; < } < < info = &(internal_info->dcinfo); < mnt_fields = &(internal_info->mnt_fields); < < if (!uinfo->enabled && info->enabled) { < /* disk conditioner is being disabled when already enabled */ < disk_conditioner_restore_mount_fields(mp, mnt_fields); < } < < memcpy(info, uinfo, sizeof(disk_conditioner_info)); < < /* scale back based on hardware advertised limits */ < if (uinfo->ioqueue_depth == 0 || uinfo->ioqueue_depth > mnt_fields->mnt_ioqueue_depth) { < info->ioqueue_depth = mnt_fields->mnt_ioqueue_depth; < } < if (uinfo->maxreadcnt == 0 || uinfo->maxreadcnt > mnt_fields->mnt_maxreadcnt) { < info->maxreadcnt = mnt_fields->mnt_maxreadcnt; < } < if (uinfo->maxwritecnt == 0 || uinfo->maxwritecnt > mnt_fields->mnt_maxwritecnt) { < info->maxwritecnt = mnt_fields->mnt_maxwritecnt; < } < if (uinfo->segreadcnt == 0 || uinfo->segreadcnt > mnt_fields->mnt_segreadcnt) { < info->segreadcnt = mnt_fields->mnt_segreadcnt; < } < if (uinfo->segwritecnt == 0 || uinfo->segwritecnt > mnt_fields->mnt_segwritecnt) { < info->segwritecnt = mnt_fields->mnt_segwritecnt; < } < < if (uinfo->enabled) { < mp->mnt_maxreadcnt = info->maxreadcnt; < mp->mnt_maxwritecnt = info->maxwritecnt; < mp->mnt_segreadcnt = info->segreadcnt; < mp->mnt_segwritecnt = info->segwritecnt; < mp->mnt_ioqueue_depth = info->ioqueue_depth; < mp->mnt_ioscale = MNT_IOSCALE(info->ioqueue_depth); --- > info = mp->mnt_disk_conditioner_info; > if (!info) { > info = mp->mnt_disk_conditioner_info = kalloc(sizeof(struct _disk_conditioner_info_t)); > bzero(info, sizeof(struct _disk_conditioner_info_t)); 270,272c202,207 < mount_unlock(mp); < < microuptime(&internal_info->last_io_timestamp); --- > info->enabled = uinfo->enabled; > info->access_time_usec = uinfo->access_time_usec; > info->read_throughput_mbps = uinfo->read_throughput_mbps; > info->write_throughput_mbps = uinfo->write_throughput_mbps; > info->is_ssd = uinfo->is_ssd; > microuptime(&info->last_io_timestamp); 283,285c218 < struct _disk_conditioner_info_t *internal_info = mp->mnt_disk_conditioner_info; < < if (!internal_info) { --- > if (!mp->mnt_disk_conditioner_info) { 288,291c221 < < if (internal_info->dcinfo.enabled) { < disk_conditioner_restore_mount_fields(mp, &(internal_info->mnt_fields)); < } --- > kfree(mp->mnt_disk_conditioner_info, sizeof(struct _disk_conditioner_info_t)); 293d222 < kfree(internal_info, sizeof(struct _disk_conditioner_info_t)); 299c228 < struct _disk_conditioner_info_t *internal_info = mp->mnt_disk_conditioner_info; --- > struct _disk_conditioner_info_t *info = mp->mnt_disk_conditioner_info; 301,302c230,231 < if (!internal_info || !internal_info->dcinfo.enabled) { < return !!(mp->mnt_kern_flag & MNTK_SSD); --- > if (!info || !info->enabled) { > return (mp->mnt_kern_flag & MNTK_SSD); 305c234 < return internal_info->dcinfo.is_ssd; --- > return info->is_ssd; NO DIFFS in ./bsd/kern/chunklist.h
NO DIFFS in ./bsd/kern/proc_uuid_policy.c

./bsd/kern/kern_memorystatus.c differences detected: 2c2 < * Copyright (c) 2006-2018 Apple Inc. All rights reserved. --- > * Copyright (c) 2006 Apple Computer, Inc. All rights reserved. 74d73 < #include 78,90c77,87 < "" , /* kMemorystatusInvalid */ < "jettisoned" , /* kMemorystatusKilled */ < "highwater" , /* kMemorystatusKilledHiwat */ < "vnode-limit" , /* kMemorystatusKilledVnodes */ < "vm-pageshortage" , /* kMemorystatusKilledVMPageShortage */ < "proc-thrashing" , /* kMemorystatusKilledProcThrashing */ < "fc-thrashing" , /* kMemorystatusKilledFCThrashing */ < "per-process-limit" , /* kMemorystatusKilledPerProcessLimit */ < "disk-space-shortage" , /* kMemorystatusKilledDiskSpaceShortage */ < "idle-exit" , /* kMemorystatusKilledIdleExit */ < "zone-map-exhaustion" , /* kMemorystatusKilledZoneMapExhaustion */ < "vm-compressor-thrashing" , /* kMemorystatusKilledVMCompressorThrashing */ < "vm-compressor-space-shortage" , /* kMemorystatusKilledVMCompressorSpaceShortage */ --- > "" , > "jettisoned" , /* kMemorystatusKilled */ > "highwater" , /* kMemorystatusKilledHiwat */ > "vnode-limit" , /* kMemorystatusKilledVnodes */ > "vm-pageshortage" , /* kMemorystatusKilledVMPageShortage */ > "vm-thrashing" , /* kMemorystatusKilledVMThrashing */ > "fc-thrashing" , /* kMemorystatusKilledFCThrashing */ > "per-process-limit" , /* kMemorystatusKilledPerProcessLimit */ > "diagnostic" , /* kMemorystatusKilledDiagnostic */ > "idle-exit" , /* kMemorystatusKilledIdleExit */ > "zone-map-exhaustion" , /* kMemorystatusKilledZoneMapExhaustion */ 120a118 > case kMemorystatusKilledVMThrashing: 122,123d119 < case kMemorystatusKilledVMCompressorThrashing: < case kMemorystatusKilledVMCompressorSpaceShortage: 286a283,284 > int memorystatus_wakeup = 0; > 291d288 < 309,318c306,307 < < /* < * Checking the p_memstat_state almost always requires the proc_list_lock < * because the jetsam thread could be on the other core changing the state. < * < * App -- almost always managed by a system process. Always have dirty tracking OFF. Can include extensions too. < * System Processes -- not managed by anybody. Always have dirty tracking ON. Can include extensions (here) too. < */ < #define isApp(p) ((p->p_memstat_state & P_MEMSTAT_MANAGED) || ! (p->p_memstat_dirty & P_DIRTY_TRACK)) < #define isSysProc(p) ( ! (p->p_memstat_state & P_MEMSTAT_MANAGED) || (p->p_memstat_dirty & P_DIRTY_TRACK)) --- > #define isApp(p) (! (p->p_memstat_dirty & P_DIRTY_TRACK)) > #define isSysProc(p) ((p->p_memstat_dirty & P_DIRTY_TRACK)) 331,335c320 < extern uint64_t vm_purgeable_purge_task_owned(task_t task); < boolean_t memorystatus_allowed_vm_map_fork(task_t); < #if DEVELOPMENT || DEBUG < void memorystatus_abort_vm_map_fork(task_t); < #endif --- > boolean_t memorystatus_allowed_vm_map_fork(__unused task_t); 612d596 < static memorystatus_jetsam_snapshot_t *memorystatus_jetsam_snapshot_copy; 615d598 < static unsigned int memorystatus_jetsam_snapshot_copy_count = 0; 617d599 < static unsigned int memorystatus_jetsam_snapshot_size = 0; 632c614 < static void memorystatus_get_task_page_counts(task_t task, uint32_t *footprint, uint32_t *max_footprint_lifetime, uint32_t *purgeable_pages); --- > static void memorystatus_get_task_page_counts(task_t task, uint32_t *footprint, uint32_t *max_footprint, uint32_t *max_footprint_lifetime, uint32_t *purgeable_pages); 646,647c628,629 < static boolean_t memorystatus_kill_elevated_process(uint32_t cause, os_reason_t jetsam_reason, unsigned int band, int aggr_count, uint32_t *errors); < static boolean_t memorystatus_kill_hiwat_proc(uint32_t *errors, boolean_t *purged); --- > static boolean_t memorystatus_kill_elevated_process(uint32_t cause, os_reason_t jetsam_reason, int aggr_count, uint32_t *errors); > static boolean_t memorystatus_kill_hiwat_proc(uint32_t *errors); 708,712d689 < unsigned int memorystatus_frozen_processes_max = 0; < unsigned int memorystatus_frozen_shared_mb = 0; < unsigned int memorystatus_frozen_shared_mb_max = 0; < unsigned int memorystatus_freeze_shared_mb_per_process_max = 0; /* Max. MB allowed per process to be freezer-eligible. */ < unsigned int memorystatus_freeze_private_shared_pages_ratio = 2; /* Ratio of private:shared pages for a process to be freezer-eligible. */ 714,715d690 < unsigned int memorystatus_thaw_count = 0; < unsigned int memorystatus_refreeze_eligible_count = 0; /* # of processes currently thawed i.e. have state on disk & in-memory */ 739,753d713 < /* < * This value is the threshold that a process must meet to be considered for scavenging. < */ < #if CONFIG_EMBEDDED < #define VM_PRESSURE_MINIMUM_RSIZE 6 /* MB */ < #else /* CONFIG_EMBEDDED */ < #define VM_PRESSURE_MINIMUM_RSIZE 10 /* MB */ < #endif /* CONFIG_EMBEDDED */ < < uint32_t vm_pressure_task_footprint_min = VM_PRESSURE_MINIMUM_RSIZE; < < #if DEVELOPMENT || DEBUG < SYSCTL_UINT(_kern, OID_AUTO, memorystatus_vm_pressure_task_footprint_min, CTLFLAG_RW|CTLFLAG_LOCKED, &vm_pressure_task_footprint_min, 0, ""); < #endif /* DEVELOPMENT || DEBUG */ < 767,778d726 < /* < * Table that expresses the probability of a process < * being used in the next hour. < */ < typedef struct memorystatus_internal_probabilities { < char proc_name[MAXCOMLEN + 1]; < int use_probability; < } memorystatus_internal_probabilities_t; < < static memorystatus_internal_probabilities_t *memorystatus_global_probabilities_table = NULL; < static size_t memorystatus_global_probabilities_size = 0; < 781a730 > 784d732 < int memorystatus_freeze_jetsam_band = 0; /* the jetsam band which will contain P_MEMSTAT_FROZEN processes */ 792,794d739 < static boolean_t memorystatus_is_process_eligible_for_freeze(proc_t p); < static void memorystatus_freeze_thread(void *param __unused, wait_result_t wr __unused); < static boolean_t memorystatus_freeze_thread_should_run(void); 796c741 < void memorystatus_disable_freeze(void); --- > static void memorystatus_freeze_thread(void *param __unused, wait_result_t wr __unused); 807,811d751 < static uint64_t memorystatus_freeze_budget_pages_remaining = 0; //remaining # of pages that can be frozen to disk < static boolean_t memorystatus_freeze_degradation = FALSE; //protected by the freezer mutex. Signals we are in a degraded freeze mode. < < static unsigned int memorystatus_max_frozen_demotions_daily = 0; < static unsigned int memorystatus_thaw_count_demotion_threshold = 0; 813a754 > static uint64_t memorystatus_freeze_count = 0; 817,819d757 < #define DEGRADED_WINDOW_MINS (30) < #define NORMAL_WINDOW_MINS (24 * 60) < 821,822c759,760 < { DEGRADED_WINDOW_MINS, 1, 0, 0, { 0, 0 }}, < { NORMAL_WINDOW_MINS, 1, 0, 0, { 0, 0 }}, --- > { 60, 8, 0, 0, { 0, 0 }, FALSE }, /* 1 hour intermediate interval, 8x burst */ > { 24 * 60, 1, 0, 0, { 0, 0 }, FALSE }, /* 24 hour long interval, no burst */ 824,825d761 < throttle_interval_t *degraded_throttle_window = &throttle_intervals[0]; < throttle_interval_t *normal_throttle_window = &throttle_intervals[1]; 827,828c763 < extern uint64_t vm_swap_get_free_space(void); < extern boolean_t vm_swap_max_budget(uint64_t *); --- > static uint64_t memorystatus_freeze_throttle_count = 0; 830c765 < static void memorystatus_freeze_update_throttle(uint64_t *budget_pages_allowed); --- > static unsigned int memorystatus_suspended_footprint_total = 0; /* pages */ 832c767 < static uint64_t memorystatus_freezer_thread_next_run_ts = 0; --- > extern uint64_t vm_swap_get_free_space(void); 834,837c769 < SYSCTL_UINT(_kern, OID_AUTO, memorystatus_freeze_count, CTLFLAG_RD|CTLFLAG_LOCKED, &memorystatus_frozen_count, 0, ""); < SYSCTL_UINT(_kern, OID_AUTO, memorystatus_thaw_count, CTLFLAG_RD|CTLFLAG_LOCKED, &memorystatus_thaw_count, 0, ""); < SYSCTL_QUAD(_kern, OID_AUTO, memorystatus_freeze_pageouts, CTLFLAG_RD|CTLFLAG_LOCKED, &memorystatus_freeze_pageouts, ""); < SYSCTL_QUAD(_kern, OID_AUTO, memorystatus_freeze_budget_pages_remaining, CTLFLAG_RD|CTLFLAG_LOCKED, &memorystatus_freeze_budget_pages_remaining, ""); --- > static boolean_t memorystatus_freeze_update_throttle(void); 1202d1133 < SYSCTL_UINT(_kern, OID_AUTO, memorystatus_freeze_jetsam_band, CTLFLAG_RW|CTLFLAG_LOCKED, &memorystatus_freeze_jetsam_band, 0, ""); 1204d1134 < SYSCTL_UINT(_kern, OID_AUTO, memorystatus_freeze_degraded_mode, CTLFLAG_RD|CTLFLAG_LOCKED, &memorystatus_freeze_degradation, 0, ""); 1211,1225c1141,1143 < SYSCTL_UINT(_kern, OID_AUTO, memorystatus_refreeze_eligible_count, CTLFLAG_RD|CTLFLAG_LOCKED, &memorystatus_refreeze_eligible_count, 0, ""); < SYSCTL_UINT(_kern, OID_AUTO, memorystatus_freeze_processes_max, CTLFLAG_RW|CTLFLAG_LOCKED, &memorystatus_frozen_processes_max, 0, ""); < < /* < * Max. shared-anonymous memory in MB that can be held by frozen processes in the high jetsam band. < * "0" means no limit. < * Default is 10% of system-wide task limit. < */ < < SYSCTL_UINT(_kern, OID_AUTO, memorystatus_freeze_shared_mb_max, CTLFLAG_RW|CTLFLAG_LOCKED, &memorystatus_frozen_shared_mb_max, 0, ""); < SYSCTL_UINT(_kern, OID_AUTO, memorystatus_freeze_shared_mb, CTLFLAG_RD|CTLFLAG_LOCKED, &memorystatus_frozen_shared_mb, 0, ""); < < SYSCTL_UINT(_kern, OID_AUTO, memorystatus_freeze_shared_mb_per_process_max, CTLFLAG_RW|CTLFLAG_LOCKED, &memorystatus_freeze_shared_mb_per_process_max, 0, ""); < SYSCTL_UINT(_kern, OID_AUTO, memorystatus_freeze_private_shared_pages_ratio, CTLFLAG_RW|CTLFLAG_LOCKED, &memorystatus_freeze_private_shared_pages_ratio, 0, ""); < --- > SYSCTL_QUAD(_kern, OID_AUTO, memorystatus_freeze_count, CTLFLAG_RD|CTLFLAG_LOCKED, &memorystatus_freeze_count, ""); > SYSCTL_QUAD(_kern, OID_AUTO, memorystatus_freeze_pageouts, CTLFLAG_RD|CTLFLAG_LOCKED, &memorystatus_freeze_pageouts, ""); > SYSCTL_QUAD(_kern, OID_AUTO, memorystatus_freeze_throttle_count, CTLFLAG_RD|CTLFLAG_LOCKED, &memorystatus_freeze_throttle_count, ""); 1228,1236d1145 < /* < * max. # of frozen process demotions we will allow in our daily cycle. < */ < SYSCTL_UINT(_kern, OID_AUTO, memorystatus_max_freeze_demotions_daily, CTLFLAG_RW|CTLFLAG_LOCKED, &memorystatus_max_frozen_demotions_daily, 0, ""); < /* < * min # of thaws needed by a process to protect it from getting demoted into the IDLE band. < */ < SYSCTL_UINT(_kern, OID_AUTO, memorystatus_thaw_count_demotion_threshold, CTLFLAG_RW|CTLFLAG_LOCKED, &memorystatus_thaw_count_demotion_threshold, 0, ""); < 1250d1158 < int freezer_error_code = 0; 1253d1160 < printf("sysctl_freeze: Freeze is DISABLED\n"); 1271,1272c1178,1180 < uint32_t purgeable, wired, clean, dirty, shared; < uint32_t max_pages = 0, state = 0; --- > uint32_t purgeable, wired, clean, dirty; > boolean_t shared; > uint32_t max_pages = 0; 1274a1183,1185 > > unsigned int avail_swap_space = 0; /* in pages. */ > 1277,1284c1188 < * will hold compressed data. < * < * We don't care about the global freezer budget or the process's (min/max) budget here. < * The freeze sysctl is meant to force-freeze a process. < * < * We also don't update any global or process stats on this path, so that the jetsam/ freeze < * logic remains unaffected. The tasks we're performing here are: freeze the process, set the < * P_MEMSTAT_FROZEN bit, and elevate the process to a higher band (if the freezer is active). --- > * while will hold compressed data. 1286c1190,1192 < max_pages = memorystatus_freeze_pages_max; --- > avail_swap_space = vm_swap_get_free_space() / PAGE_SIZE_64; > > max_pages = MIN(avail_swap_space, memorystatus_freeze_pages_max); 1295,1366c1201 < proc_list_lock(); < state = p->p_memstat_state; < proc_list_unlock(); < < /* < * The jetsam path also verifies that the process is a suspended App. We don't care about that here. < * We simply ensure that jetsam is not already working on the process and that the process has not < * explicitly disabled freezing. < */ < if (state & (P_MEMSTAT_TERMINATED | P_MEMSTAT_LOCKED | P_MEMSTAT_FREEZE_DISABLED)) { < printf("sysctl_freeze: p_memstat_state check failed, process is%s%s%s\n", < (state & P_MEMSTAT_TERMINATED) ? " terminated" : "", < (state & P_MEMSTAT_LOCKED) ? " locked" : "", < (state & P_MEMSTAT_FREEZE_DISABLED) ? " unfreezable" : ""); < < proc_rele(p); < lck_mtx_unlock(&freezer_mutex); < return EPERM; < } < < error = task_freeze(p->task, &purgeable, &wired, &clean, &dirty, max_pages, &shared, &freezer_error_code, FALSE /* eval only */); < < if (error) { < char reason[128]; < if (freezer_error_code == FREEZER_ERROR_EXCESS_SHARED_MEMORY) { < strlcpy(reason, "too much shared memory", 128); < } < < if (freezer_error_code == FREEZER_ERROR_LOW_PRIVATE_SHARED_RATIO) { < strlcpy(reason, "low private-shared pages ratio", 128); < } < < if (freezer_error_code == FREEZER_ERROR_NO_COMPRESSOR_SPACE) { < strlcpy(reason, "no compressor space", 128); < } < < if (freezer_error_code == FREEZER_ERROR_NO_SWAP_SPACE) { < strlcpy(reason, "no swap space", 128); < } < < printf("sysctl_freeze: task_freeze failed: %s\n", reason); < < if (error == KERN_NO_SPACE) { < /* Make it easy to distinguish between failures due to low compressor/ swap space and other failures. */ < error = ENOSPC; < } else { < error = EIO; < } < } else { < proc_list_lock(); < if ((p->p_memstat_state & P_MEMSTAT_FROZEN) == 0) { < p->p_memstat_state |= P_MEMSTAT_FROZEN; < memorystatus_frozen_count++; < } < p->p_memstat_frozen_count++; < < < proc_list_unlock(); < < if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) { < /* < * We elevate only if we are going to swap out the data. < */ < error = memorystatus_update_inactive_jetsam_priority_band(pid, MEMORYSTATUS_CMD_ELEVATED_INACTIVEJETSAMPRIORITY_ENABLE, < memorystatus_freeze_jetsam_band, TRUE); < < if (error) { < printf("sysctl_freeze: Elevating frozen process to higher jetsam band failed with %d\n", error); < } < } < } < --- > error = task_freeze(p->task, &purgeable, &wired, &clean, &dirty, max_pages, &shared, FALSE); 1368a1204,1206 > if (error) > error = EIO; > 1371,1372d1208 < } else { < printf("sysctl_freeze: Invalid process\n"); 1375d1210 < 1405a1241 > proc_rele(p); 1407c1243 < if (error) { --- > if (error) 1409,1422d1244 < } else { < /* < * task_thaw() succeeded. < * < * We increment memorystatus_frozen_count on the sysctl freeze path. < * And so we need the P_MEMSTAT_FROZEN to decrement the frozen count < * when this process exits. < * < * proc_list_lock(); < * p->p_memstat_state &= ~P_MEMSTAT_FROZEN; < * proc_list_unlock(); < */ < } < proc_rele(p); 1433,1620d1254 < typedef struct _global_freezable_status{ < boolean_t freeze_pages_threshold_crossed; < boolean_t freeze_eligible_procs_available; < boolean_t freeze_scheduled_in_future; < }global_freezable_status_t; < < typedef struct _proc_freezable_status{ < boolean_t freeze_has_memstat_state; < boolean_t freeze_has_pages_min; < int freeze_has_probability; < boolean_t freeze_attempted; < uint32_t p_memstat_state; < uint32_t p_pages; < int p_freeze_error_code; < int p_pid; < char p_name[MAXCOMLEN + 1]; < }proc_freezable_status_t; < < #define MAX_FREEZABLE_PROCESSES 100 < < static int < memorystatus_freezer_get_status(user_addr_t buffer, size_t buffer_size, int32_t *retval) < { < uint32_t proc_count = 0, i = 0; < global_freezable_status_t *list_head; < proc_freezable_status_t *list_entry; < size_t list_size = 0; < proc_t p; < memstat_bucket_t *bucket; < uint32_t state = 0, pages = 0, entry_count = 0; < boolean_t try_freeze = TRUE; < int error = 0, probability_of_use = 0; < < < if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE == FALSE) { < return ENOTSUP; < } < < list_size = sizeof(global_freezable_status_t) + (sizeof(proc_freezable_status_t) * MAX_FREEZABLE_PROCESSES); < < if (buffer_size < list_size) { < return EINVAL; < } < < list_head = (global_freezable_status_t*)kalloc(list_size); < if (list_head == NULL) { < return ENOMEM; < } < < memset(list_head, 0, list_size); < < list_size = sizeof(global_freezable_status_t); < < proc_list_lock(); < < uint64_t curr_time = mach_absolute_time(); < < list_head->freeze_pages_threshold_crossed = (memorystatus_available_pages < memorystatus_freeze_threshold); < list_head->freeze_eligible_procs_available = ((memorystatus_suspended_count - memorystatus_frozen_count) > memorystatus_freeze_suspended_threshold); < list_head->freeze_scheduled_in_future = (curr_time < memorystatus_freezer_thread_next_run_ts); < < list_entry = (proc_freezable_status_t*) ((uintptr_t)list_head + sizeof(global_freezable_status_t)); < < bucket = &memstat_bucket[JETSAM_PRIORITY_IDLE]; < < entry_count = (memorystatus_global_probabilities_size / sizeof(memorystatus_internal_probabilities_t)); < < p = memorystatus_get_first_proc_locked(&i, FALSE); < proc_count++; < < while ((proc_count <= MAX_FREEZABLE_PROCESSES) && < (p) && < (list_size < buffer_size)) { < < if (isApp(p) == FALSE) { < p = memorystatus_get_next_proc_locked(&i, p, FALSE); < proc_count++; < continue; < } < < strlcpy(list_entry->p_name, p->p_name, MAXCOMLEN + 1); < < list_entry->p_pid = p->p_pid; < < state = p->p_memstat_state; < < if ((state & (P_MEMSTAT_TERMINATED | P_MEMSTAT_LOCKED | P_MEMSTAT_FREEZE_DISABLED | P_MEMSTAT_FREEZE_IGNORE)) || < !(state & P_MEMSTAT_SUSPENDED)) { < < try_freeze = list_entry->freeze_has_memstat_state = FALSE; < } else { < try_freeze = list_entry->freeze_has_memstat_state = TRUE; < } < < list_entry->p_memstat_state = state; < < memorystatus_get_task_page_counts(p->task, &pages, NULL, NULL); < if (pages < memorystatus_freeze_pages_min) { < try_freeze = list_entry->freeze_has_pages_min = FALSE; < } else { < list_entry->freeze_has_pages_min = TRUE; < if (try_freeze != FALSE) { < try_freeze = TRUE; < } < } < < list_entry->p_pages = pages; < < if (entry_count) { < uint32_t j = 0; < for (j = 0; j < entry_count; j++ ) { < if (strncmp(memorystatus_global_probabilities_table[j].proc_name, < p->p_name, < MAXCOMLEN + 1) == 0) { < < probability_of_use = memorystatus_global_probabilities_table[j].use_probability; < break; < } < } < < list_entry->freeze_has_probability = probability_of_use; < < if (probability_of_use && try_freeze != FALSE) { < try_freeze = TRUE; < } else { < try_freeze = FALSE; < } < } else { < if (try_freeze != FALSE) { < try_freeze = TRUE; < } < list_entry->freeze_has_probability = -1; < } < < if (try_freeze) { < < uint32_t purgeable, wired, clean, dirty, shared; < uint32_t max_pages = 0; < int freezer_error_code = 0; < < error = task_freeze(p->task, &purgeable, &wired, &clean, &dirty, max_pages, &shared, &freezer_error_code, TRUE /* eval only */); < < if (error) { < list_entry->p_freeze_error_code = freezer_error_code; < } < < list_entry->freeze_attempted = TRUE; < } < < list_entry++; < < list_size += sizeof(proc_freezable_status_t); < < p = memorystatus_get_next_proc_locked(&i, p, FALSE); < proc_count++; < } < < proc_list_unlock(); < < buffer_size = list_size; < < error = copyout(list_head, buffer, buffer_size); < if (error == 0) { < *retval = buffer_size; < } else { < *retval = 0; < } < < list_size = sizeof(global_freezable_status_t) + (sizeof(proc_freezable_status_t) * MAX_FREEZABLE_PROCESSES); < kfree(list_head, list_size); < < MEMORYSTATUS_DEBUG(1, "memorystatus_freezer_get_status: returning %d (%lu - size)\n", error, (unsigned long)*list_size); < < return error; < } < < static int < memorystatus_freezer_control(int32_t flags, user_addr_t buffer, size_t buffer_size, int32_t *retval) < { < int err = ENOTSUP; < < if (flags == FREEZER_CONTROL_GET_STATUS) { < err = memorystatus_freezer_get_status(buffer, buffer_size, retval); < } < < return err; < } < 1755c1389 < memorystatus_get_task_page_counts(p->task, &pages, NULL, NULL); --- > memorystatus_get_task_page_counts(p->task, &pages, NULL, NULL, NULL); 1763c1397 < memorystatus_get_task_page_counts(p->task, &pages, NULL, NULL); --- > memorystatus_get_task_page_counts(p->task, &pages, NULL, NULL, NULL); 1824,1867d1457 < /* < * Structure to hold state for a jetsam thread. < * Typically there should be a single jetsam thread < * unless parallel jetsam is enabled. < */ < struct jetsam_thread_state { < boolean_t inited; /* if the thread is initialized */ < int memorystatus_wakeup; /* wake channel */ < int index; /* jetsam thread index */ < thread_t thread; /* jetsam thread pointer */ < } *jetsam_threads; < < /* Maximum number of jetsam threads allowed */ < #define JETSAM_THREADS_LIMIT 3 < < /* Number of active jetsam threads */ < _Atomic int active_jetsam_threads = 1; < < /* Number of maximum jetsam threads configured */ < int max_jetsam_threads = JETSAM_THREADS_LIMIT; < < /* < * Global switch for enabling fast jetsam. Fast jetsam is < * hooked up via the system_override() system call. It has the < * following effects: < * - Raise the jetsam threshold ("clear-the-deck") < * - Enabled parallel jetsam on eligible devices < */ < int fast_jetsam_enabled = 0; < < /* Routine to find the jetsam state structure for the current jetsam thread */ < static inline struct jetsam_thread_state * < jetsam_current_thread(void) < { < for (int thr_id = 0; thr_id < max_jetsam_threads; thr_id++) { < if (jetsam_threads[thr_id].thread == current_thread()) < return &(jetsam_threads[thr_id]); < } < panic("jetsam_current_thread() is being called from a non-jetsam thread\n"); < /* Contol should not reach here */ < return NULL; < } < < 1870a1461 > thread_t thread = THREAD_NULL; 1875,1878d1465 < memorystatus_freeze_jetsam_band = JETSAM_PRIORITY_UI_SUPPORT; < memorystatus_frozen_processes_max = FREEZE_PROCESSES_MAX; < memorystatus_frozen_shared_mb_max = ((MAX_FROZEN_SHARED_MB_PERCENT * max_task_footprint_mb) / 100); /* 10% of the system wide task limit */ < memorystatus_freeze_shared_mb_per_process_max = (memorystatus_frozen_shared_mb_max / 4); 1881,1882d1467 < memorystatus_max_frozen_demotions_daily = MAX_FROZEN_PROCESS_DEMOTIONS; < memorystatus_thaw_count_demotion_threshold = MIN_THAW_DEMOTION_THRESHOLD; 2005,2008d1589 < < memorystatus_jetsam_snapshot_size = sizeof(memorystatus_jetsam_snapshot_t) + < (sizeof(memorystatus_jetsam_snapshot_entry_t) * memorystatus_jetsam_snapshot_max); < 2010c1591,1592 < (memorystatus_jetsam_snapshot_t*)kalloc(memorystatus_jetsam_snapshot_size); --- > (memorystatus_jetsam_snapshot_t*)kalloc(sizeof(memorystatus_jetsam_snapshot_t) + > sizeof(memorystatus_jetsam_snapshot_entry_t) * memorystatus_jetsam_snapshot_max); 2015,2020d1596 < memorystatus_jetsam_snapshot_copy = < (memorystatus_jetsam_snapshot_t*)kalloc(memorystatus_jetsam_snapshot_size); < if (!memorystatus_jetsam_snapshot_copy) { < panic("Could not allocate memorystatus_jetsam_snapshot_copy"); < } < 2029,2063c1605,1609 < /* Check the boot-arg to see if fast jetsam is allowed */ < if (!PE_parse_boot_argn("fast_jetsam_enabled", &fast_jetsam_enabled, sizeof (fast_jetsam_enabled))) { < fast_jetsam_enabled = 0; < } < < /* Check the boot-arg to configure the maximum number of jetsam threads */ < if (!PE_parse_boot_argn("max_jetsam_threads", &max_jetsam_threads, sizeof (max_jetsam_threads))) { < max_jetsam_threads = JETSAM_THREADS_LIMIT; < } < < /* Restrict the maximum number of jetsam threads to JETSAM_THREADS_LIMIT */ < if (max_jetsam_threads > JETSAM_THREADS_LIMIT) { < max_jetsam_threads = JETSAM_THREADS_LIMIT; < } < < /* For low CPU systems disable fast jetsam mechanism */ < if (vm_pageout_state.vm_restricted_to_single_processor == TRUE) { < max_jetsam_threads = 1; < fast_jetsam_enabled = 0; < } < < /* Initialize the jetsam_threads state array */ < jetsam_threads = kalloc(sizeof(struct jetsam_thread_state) * max_jetsam_threads); < < /* Initialize all the jetsam threads */ < for (i = 0; i < max_jetsam_threads; i++) { < < result = kernel_thread_start_priority(memorystatus_thread, NULL, 95 /* MAXPRI_KERNEL */, &jetsam_threads[i].thread); < if (result == KERN_SUCCESS) { < jetsam_threads[i].inited = FALSE; < jetsam_threads[i].index = i; < thread_deallocate(jetsam_threads[i].thread); < } else { < panic("Could not create memorystatus_thread %d", i); < } --- > result = kernel_thread_start_priority(memorystatus_thread, NULL, 95 /* MAXPRI_KERNEL */, &thread); > if (result == KERN_SUCCESS) { > thread_deallocate(thread); > } else { > panic("Could not create memorystatus_thread"); 2111,2114d1656 < /* < * The jetsam_reason (os_reason_t) has enough information about the kill cause. < * We don't really need jetsam_flags anymore, so it's okay that not all possible kill causes have been mapped. < */ 2117,2124c1659,1665 < case kMemorystatusKilledHiwat: jetsam_flags |= P_JETSAM_HIWAT; break; < case kMemorystatusKilledVnodes: jetsam_flags |= P_JETSAM_VNODE; break; < case kMemorystatusKilledVMPageShortage: jetsam_flags |= P_JETSAM_VMPAGESHORTAGE; break; < case kMemorystatusKilledVMCompressorThrashing: < case kMemorystatusKilledVMCompressorSpaceShortage: jetsam_flags |= P_JETSAM_VMTHRASHING; break; < case kMemorystatusKilledFCThrashing: jetsam_flags |= P_JETSAM_FCTHRASHING; break; < case kMemorystatusKilledPerProcessLimit: jetsam_flags |= P_JETSAM_PID; break; < case kMemorystatusKilledIdleExit: jetsam_flags |= P_JETSAM_IDLEEXIT; break; --- > case kMemorystatusKilledHiwat: jetsam_flags |= P_JETSAM_HIWAT; break; > case kMemorystatusKilledVnodes: jetsam_flags |= P_JETSAM_VNODE; break; > case kMemorystatusKilledVMPageShortage: jetsam_flags |= P_JETSAM_VMPAGESHORTAGE; break; > case kMemorystatusKilledVMThrashing: jetsam_flags |= P_JETSAM_VMTHRASHING; break; > case kMemorystatusKilledFCThrashing: jetsam_flags |= P_JETSAM_FCTHRASHING; break; > case kMemorystatusKilledPerProcessLimit: jetsam_flags |= P_JETSAM_PID; break; > case kMemorystatusKilledIdleExit: jetsam_flags |= P_JETSAM_IDLEEXIT; break; 2160c1701 < memorystatus_update_inactive_jetsam_priority_band(pid_t pid, uint32_t op_flags, int jetsam_prio, boolean_t effective_now) --- > memorystatus_update_inactive_jetsam_priority_band(pid_t pid, uint32_t op_flags, boolean_t effective_now) 2192c1733 < if (p->p_memstat_effectivepriority < jetsam_prio) { --- > if (p->p_memstat_effectivepriority < JETSAM_PRIORITY_ELEVATED_INACTIVE) { 2204c1745 < memorystatus_update_priority_locked(p, jetsam_prio, FALSE, FALSE); --- > memorystatus_update_priority_locked(p, JETSAM_PRIORITY_ELEVATED_INACTIVE, FALSE, FALSE); 2217c1758 < if (p->p_memstat_effectivepriority == jetsam_prio) { --- > if (p->p_memstat_effectivepriority == JETSAM_PRIORITY_ELEVATED_INACTIVE) { 2580,2598c2121,2123 < if (p->p_memstat_state & P_MEMSTAT_USE_ELEVATED_INACTIVE_BAND) { < /* < * 2 types of processes can use the non-standard elevated inactive band: < * - Frozen processes that always land in memorystatus_freeze_jetsam_band < * OR < * - processes that specifically opt-in to the elevated inactive support e.g. docked processes. < */ < #if CONFIG_FREEZE < if (p->p_memstat_state & P_MEMSTAT_FROZEN) { < if (priority <= memorystatus_freeze_jetsam_band) { < priority = memorystatus_freeze_jetsam_band; < } < } else < #endif /* CONFIG_FREEZE */ < { < if (priority <= JETSAM_PRIORITY_ELEVATED_INACTIVE) { < priority = JETSAM_PRIORITY_ELEVATED_INACTIVE; < } < } --- > if (priority <= JETSAM_PRIORITY_ELEVATED_INACTIVE && (p->p_memstat_state & P_MEMSTAT_USE_ELEVATED_INACTIVE_BAND)) { > priority = JETSAM_PRIORITY_ELEVATED_INACTIVE; > 2605c2130 < * - it has an 'elevated inactive jetsam band' attribute, then put it in the appropriate band. --- > * - it has an 'elevated inactive jetsam band' attribute, then put it in the JETSAM_PRIORITY_ELEVATED_INACTIVE band. 2609,2621c2134,2135 < if (p->p_memstat_state & P_MEMSTAT_USE_ELEVATED_INACTIVE_BAND) { < #if CONFIG_FREEZE < if (p->p_memstat_state & P_MEMSTAT_FROZEN) { < if (priority <= memorystatus_freeze_jetsam_band) { < priority = memorystatus_freeze_jetsam_band; < } < } else < #endif /* CONFIG_FREEZE */ < { < if (priority <= JETSAM_PRIORITY_ELEVATED_INACTIVE) { < priority = JETSAM_PRIORITY_ELEVATED_INACTIVE; < } < } --- > if (priority <= JETSAM_PRIORITY_ELEVATED_INACTIVE && (p->p_memstat_state & P_MEMSTAT_USE_ELEVATED_INACTIVE_BAND)) { > priority = JETSAM_PRIORITY_ELEVATED_INACTIVE; 2744,2752d2257 < < /* < * About to become active and so memory footprint could change. < * So mark it eligible for freeze-considerations next time around. < */ < if (p->p_memstat_state & P_MEMSTAT_FREEZE_IGNORE) { < p->p_memstat_state &= ~P_MEMSTAT_FREEZE_IGNORE; < } < 2761,2762d2265 < KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_CHANGE_PRIORITY), p->p_pid, priority, p->p_memstat_effectivepriority, 0, 0); < 3032,3037d2534 < < if (p->p_memstat_state & P_MEMSTAT_REFREEZE_ELIGIBLE) { < p->p_memstat_state &= ~P_MEMSTAT_REFREEZE_ELIGIBLE; < memorystatus_refreeze_eligible_count--; < } < 3039,3040d2535 < memorystatus_frozen_shared_mb -= p->p_memstat_freeze_sharedanon_pages; < p->p_memstat_freeze_sharedanon_pages = 0; 3043a2539 > memorystatus_suspended_footprint_total -= p->p_memstat_suspendedfootprint; 3090,3095d2585 < /* Only one type of DEFER behavior is allowed.*/ < if ((pcontrol & PROC_DIRTY_DEFER) && < (pcontrol & PROC_DIRTY_DEFER_ALWAYS)) { < return EINVAL; < } < 3097,3098c2587 < if (((pcontrol & PROC_DIRTY_DEFER) || < (pcontrol & PROC_DIRTY_DEFER_ALWAYS)) && --- > if ((pcontrol & PROC_DIRTY_DEFER) && 3224c2713 < if (pcontrol & (PROC_DIRTY_DEFER | PROC_DIRTY_DEFER_ALWAYS)) { --- > if (pcontrol & PROC_DIRTY_DEFER) { 3226,3227c2715 < if ((pcontrol & (PROC_DIRTY_DEFER)) && < !(old_dirty & P_DIRTY_DEFER)) { --- > if ( !(old_dirty & P_DIRTY_DEFER)) { 3231,3235d2718 < if ((pcontrol & (PROC_DIRTY_DEFER_ALWAYS)) && < !(old_dirty & P_DIRTY_DEFER_ALWAYS)) { < p->p_memstat_dirty |= P_DIRTY_DEFER_ALWAYS; < } < 3417,3418d2899 < * P_DIRTY_DEFER: one-time protection window given at launch < * P_DIRTY_DEFER_ALWAYS: protection window given for every dirty->clean transition. Like non-legacy mode. 3442,3443c2923 < if (((p->p_memstat_dirty & P_DIRTY_DEFER_ALWAYS) == FALSE) && < (mach_absolute_time() >= p->p_memstat_idledeadline)) { --- > if (mach_absolute_time() >= p->p_memstat_idledeadline) { 3445,3446c2925 < * The process' hasn't enrolled in the "always defer after dirty" < * mode and its deadline has expired. It currently --- > * The process' deadline has expired. It currently 3462,3463c2941 < * Process enrolled in "always stop in deferral band after dirty" OR < * it still has some protection window left and so --- > * It still has some protection window left and so 3468,3471c2946 < if (p->p_memstat_dirty & P_DIRTY_DEFER_ALWAYS) { < memorystatus_schedule_idle_demotion_locked(p, TRUE); < reschedule = TRUE; < } else if (p->p_memstat_dirty & P_DIRTY_AGING_IN_PROGRESS) { --- > if (p->p_memstat_dirty & P_DIRTY_AGING_IN_PROGRESS) { 3601c3076 < if (!pcontrol || (pcontrol & (PROC_DIRTY_LAUNCH_IN_PROGRESS | PROC_DIRTY_DEFER | PROC_DIRTY_DEFER_ALWAYS)) == 0) { --- > if (!pcontrol || (pcontrol & (PROC_DIRTY_LAUNCH_IN_PROGRESS | PROC_DIRTY_DEFER)) == 0) { 3611c3086 < if (pcontrol & (PROC_DIRTY_DEFER | PROC_DIRTY_DEFER_ALWAYS)) { --- > if (pcontrol & PROC_DIRTY_DEFER) { 3613,3615c3088 < if (p->p_memstat_dirty & P_DIRTY_DEFER) { < p->p_memstat_dirty &= ~(P_DIRTY_DEFER); < } --- > if (p->p_memstat_dirty & P_DIRTY_DEFER) { 3617,3619c3090 < if (p->p_memstat_dirty & P_DIRTY_DEFER_ALWAYS) { < p->p_memstat_dirty &= ~(P_DIRTY_DEFER_ALWAYS); < } --- > p->p_memstat_dirty &= ~P_DIRTY_DEFER; 3621,3623c3092,3095 < memorystatus_invalidate_idle_demotion_locked(p, TRUE); < memorystatus_update_idle_priority_locked(p); < memorystatus_reschedule_idle_demotion_locked(); --- > memorystatus_invalidate_idle_demotion_locked(p, TRUE); > memorystatus_update_idle_priority_locked(p); > memorystatus_reschedule_idle_demotion_locked(); > } 3683c3155 < memorystatus_get_task_page_counts(p->task, &pages, NULL, NULL); --- > memorystatus_get_task_page_counts(p->task, &pages, NULL, NULL, NULL); 3686a3159,3160 > p->p_memstat_suspendedfootprint = pages; > memorystatus_suspended_footprint_total += pages; 3706,3723c3180,3181 < /* < * Now that we don't _thaw_ a process completely, < * resuming it (and having some on-demand swapins) < * shouldn't preclude it from being counted as frozen. < * < * memorystatus_frozen_count--; < * < * We preserve the P_MEMSTAT_FROZEN state since the process < * could have state on disk AND so will deserve some protection < * in the jetsam bands. < */ < if ((p->p_memstat_state & P_MEMSTAT_REFREEZE_ELIGIBLE) == 0) { < p->p_memstat_state |= P_MEMSTAT_REFREEZE_ELIGIBLE; < memorystatus_refreeze_eligible_count++; < } < p->p_memstat_thaw_count++; < < memorystatus_thaw_count++; --- > memorystatus_frozen_count--; > p->p_memstat_state |= P_MEMSTAT_PRIOR_THAW; 3725a3184 > memorystatus_suspended_footprint_total -= p->p_memstat_suspendedfootprint; 3731,3735c3190 < /* < * P_MEMSTAT_FROZEN will remain unchanged. This used to be: < * p->p_memstat_state &= ~(P_MEMSTAT_SUSPENDED | P_MEMSTAT_FROZEN); < */ < p->p_memstat_state &= ~P_MEMSTAT_SUSPENDED; --- > p->p_memstat_state &= ~(P_MEMSTAT_SUSPENDED | P_MEMSTAT_FROZEN); 3771c3226 < if (p->p_memstat_state & P_MEMSTAT_REFREEZE_ELIGIBLE) { --- > if (p->p_memstat_state & P_MEMSTAT_PRIOR_THAW) { 3840,3867c3295,3296 < memorystatus_thread_wake(void) < { < int thr_id = 0; < int active_thr = atomic_load(&active_jetsam_threads); < < /* Wakeup all the jetsam threads */ < for (thr_id = 0; thr_id < active_thr; thr_id++) { < thread_wakeup((event_t)&jetsam_threads[thr_id].memorystatus_wakeup); < } < } < < #if CONFIG_JETSAM < < static void < memorystatus_thread_pool_max() < { < /* Increase the jetsam thread pool to max_jetsam_threads */ < int max_threads = max_jetsam_threads; < printf("Expanding memorystatus pool to %d!\n", max_threads); < atomic_store(&active_jetsam_threads, max_threads); < } < < static void < memorystatus_thread_pool_default() < { < /* Restore the jetsam thread pool to a single thread */ < printf("Reverting memorystatus pool back to 1\n"); < atomic_store(&active_jetsam_threads, 1); --- > memorystatus_thread_wake(void) { > thread_wakeup((event_t)&memorystatus_wakeup); 3870,3871d3298 < #endif /* CONFIG_JETSAM */ < 3877,3878d3303 < struct jetsam_thread_state *jetsam_thread = jetsam_current_thread(); < 3880c3305 < assert_wait_timeout(&jetsam_thread->memorystatus_wakeup, THREAD_UNINT, interval_ms, NSEC_PER_MSEC); --- > assert_wait_timeout(&memorystatus_wakeup, THREAD_UNINT, interval_ms, 1000 * NSEC_PER_USEC); 3882c3307 < assert_wait(&jetsam_thread->memorystatus_wakeup, THREAD_UNINT); --- > assert_wait(&memorystatus_wakeup, THREAD_UNINT); 3954,4139d3378 < #if CONFIG_FREEZE < extern void vm_swap_consider_defragmenting(int); < < /* < * This routine will _jetsam_ all frozen processes < * and reclaim the swap space immediately. < * < * So freeze has to be DISABLED when we call this routine. < */ < < void < memorystatus_disable_freeze(void) < { < memstat_bucket_t *bucket; < int bucket_count = 0, retries = 0; < boolean_t retval = FALSE, killed = FALSE; < uint32_t errors = 0, errors_over_prev_iteration = 0; < os_reason_t jetsam_reason = 0; < unsigned int band = 0; < proc_t p = PROC_NULL, next_p = PROC_NULL; < < assert(memorystatus_freeze_enabled == FALSE); < < jetsam_reason = os_reason_create(OS_REASON_JETSAM, JETSAM_REASON_MEMORY_DISK_SPACE_SHORTAGE); < if (jetsam_reason == OS_REASON_NULL) { < printf("memorystatus_disable_freeze: failed to allocate jetsam reason\n"); < } < < /* < * Let's relocate all frozen processes into band 8. Demoted frozen processes < * are sitting in band 0 currently and it's possible to have a frozen process < * in the FG band being actively used. We don't reset its frozen state when < * it is resumed because it has state on disk. < * < * We choose to do this relocation rather than implement a new 'kill frozen' < * process function for these reasons: < * - duplication of code: too many kill functions exist and we need to rework them better. < * - disk-space-shortage kills are rare < * - not having the 'real' jetsam band at time of the this frozen kill won't preclude us < * from answering any imp. questions re. jetsam policy/effectiveness. < * < * This is essentially what memorystatus_update_inactive_jetsam_priority_band() does while < * avoiding the application of memory limits. < */ < < again: < proc_list_lock(); < < band = JETSAM_PRIORITY_IDLE; < p = PROC_NULL; < next_p = PROC_NULL; < < next_p = memorystatus_get_first_proc_locked(&band, TRUE); < while (next_p) { < < p = next_p; < next_p = memorystatus_get_next_proc_locked(&band, p, TRUE); < < if (p->p_memstat_effectivepriority > JETSAM_PRIORITY_FOREGROUND) { < break; < } < < if ((p->p_memstat_state & P_MEMSTAT_FROZEN) == FALSE) { < continue; < } < < if (p->p_memstat_state & P_MEMSTAT_ERROR) { < p->p_memstat_state &= ~P_MEMSTAT_ERROR; < } < < if (p->p_memstat_effectivepriority == memorystatus_freeze_jetsam_band) { < continue; < } < < /* < * We explicitly add this flag here so the process looks like a normal < * frozen process i.e. P_MEMSTAT_FROZEN and P_MEMSTAT_USE_ELEVATED_INACTIVE_BAND. < * We don't bother with assigning the 'active' memory < * limits at this point because we are going to be killing it soon below. < */ < p->p_memstat_state |= P_MEMSTAT_USE_ELEVATED_INACTIVE_BAND; < memorystatus_invalidate_idle_demotion_locked(p, TRUE); < < memorystatus_update_priority_locked(p, memorystatus_freeze_jetsam_band, FALSE, TRUE); < } < < bucket = &memstat_bucket[memorystatus_freeze_jetsam_band]; < bucket_count = bucket->count; < proc_list_unlock(); < < /* < * Bucket count is already stale at this point. But, we don't expect < * freezing to continue since we have already disabled the freeze functionality. < * However, an existing freeze might be in progress. So we might miss that process < * in the first go-around. We hope to catch it in the next. < */ < < errors_over_prev_iteration = 0; < while (bucket_count) { < < bucket_count--; < < /* < * memorystatus_kill_elevated_process() drops a reference, < * so take another one so we can continue to use this exit reason < * even after it returns. < */ < < os_reason_ref(jetsam_reason); < retval = memorystatus_kill_elevated_process( < kMemorystatusKilledDiskSpaceShortage, < jetsam_reason, < memorystatus_freeze_jetsam_band, < 0, /* the iteration of aggressive jetsam..ignored here */ < &errors); < < if (errors > 0) { < printf("memorystatus_disable_freeze: memorystatus_kill_elevated_process returned %d error(s)\n", errors); < errors_over_prev_iteration += errors; < errors = 0; < } < < if (retval == 0) { < /* < * No frozen processes left to kill. < */ < break; < } < < killed = TRUE; < } < < proc_list_lock(); < < if (memorystatus_frozen_count) { < /* < * A frozen process snuck in and so < * go back around to kill it. That < * process may have been resumed and < * put into the FG band too. So we < * have to do the relocation again. < */ < assert(memorystatus_freeze_enabled == FALSE); < < retries++; < if (retries < 3) { < proc_list_unlock(); < goto again; < } < #if DEVELOPMENT || DEBUG < panic("memorystatus_disable_freeze: Failed to kill all frozen processes, memorystatus_frozen_count = %d, errors = %d", < memorystatus_frozen_count, errors_over_prev_iteration); < #endif /* DEVELOPMENT || DEBUG */ < } < proc_list_unlock(); < < os_reason_free(jetsam_reason); < < if (killed) { < < vm_swap_consider_defragmenting(VM_SWAP_FLAGS_FORCE_DEFRAG | VM_SWAP_FLAGS_FORCE_RECLAIM); < < proc_list_lock(); < size_t snapshot_size = sizeof(memorystatus_jetsam_snapshot_t) + < sizeof(memorystatus_jetsam_snapshot_entry_t) * (memorystatus_jetsam_snapshot_count); < uint64_t timestamp_now = mach_absolute_time(); < memorystatus_jetsam_snapshot->notification_time = timestamp_now; < memorystatus_jetsam_snapshot->js_gencount++; < if (memorystatus_jetsam_snapshot_count > 0 && (memorystatus_jetsam_snapshot_last_timestamp == 0 || < timestamp_now > memorystatus_jetsam_snapshot_last_timestamp + memorystatus_jetsam_snapshot_timeout)) { < proc_list_unlock(); < int ret = memorystatus_send_note(kMemorystatusSnapshotNote, &snapshot_size, sizeof(snapshot_size)); < if (!ret) { < proc_list_lock(); < memorystatus_jetsam_snapshot_last_timestamp = timestamp_now; < proc_list_unlock(); < } < } else { < proc_list_unlock(); < } < } < < return; < } < #endif /* CONFIG_FREEZE */ < 4143,4144c3382 < boolean_t purged = FALSE; < boolean_t killed = memorystatus_kill_hiwat_proc(errors, &purged); --- > boolean_t killed = memorystatus_kill_hiwat_proc(errors); 4151,4154c3389 < if (purged == FALSE) { < /* couldn't purge and couldn't kill */ < memorystatus_hwm_candidates = FALSE; < } --- > memorystatus_hwm_candidates = FALSE; 4317d3551 < JETSAM_PRIORITY_ELEVATED_INACTIVE, 4343c3577 < * jetsam_reason so the kMemorystatusKilledProcThrashing cause --- > * jetsam_reason so the kMemorystatusKilledVMThrashing cause 4347c3581 < kMemorystatusKilledProcThrashing, --- > kMemorystatusKilledVMThrashing, 4369a3604,3605 > static boolean_t is_vm_privileged = FALSE; > 4376d3611 < struct jetsam_thread_state *jetsam_thread = jetsam_current_thread(); 4378c3613 < if (jetsam_thread->inited == FALSE) { --- > if (is_vm_privileged == FALSE) { 4383,4384d3617 < < char name[32]; 4386,4394c3619,3623 < snprintf(name, 32, "VM_memorystatus_%d", jetsam_thread->index + 1); < < if (jetsam_thread->index == 0) { < if (vm_pageout_state.vm_restricted_to_single_processor == TRUE) { < thread_vm_bind_group_add(); < } < } < thread_set_thread_name(current_thread(), name); < jetsam_thread->inited = TRUE; --- > is_vm_privileged = TRUE; > > if (vm_restricted_to_single_processor == TRUE) > thread_vm_bind_group_add(); > thread_set_thread_name(current_thread(), "VM_memorystatus"); 4425,4429c3654,3655 < case kMemorystatusKilledVMCompressorThrashing: < jetsam_reason_code = JETSAM_REASON_MEMORY_VMCOMPRESSOR_THRASHING; < break; < case kMemorystatusKilledVMCompressorSpaceShortage: < jetsam_reason_code = JETSAM_REASON_MEMORY_VMCOMPRESSOR_SPACE_SHORTAGE; --- > case kMemorystatusKilledVMThrashing: > jetsam_reason_code = JETSAM_REASON_MEMORY_VMTHRASHING; 4632c3858 < } else if (corpse_for_fatal_memkill != 0 && proc_send_synchronous_EXC_RESOURCE(p) == FALSE) { --- > } else if (corpse_for_fatal_memkill != 0) { 4900,4903d4125 < < #define MEMORYSTATUS_VM_MAP_FORK_ALLOWED 0x100000000 < #define MEMORYSTATUS_VM_MAP_FORK_NOT_ALLOWED 0x200000000 < 4911,4916d4132 < * < * The pidwatch_val starts out with a PID to watch for in the map_fork path. < * Its value is: < * - OR'd with MEMORYSTATUS_VM_MAP_FORK_ALLOWED if we allow the map_fork. < * - OR'd with MEMORYSTATUS_VM_MAP_FORK_NOT_ALLOWED if we disallow the map_fork. < * - set to -1ull if the map_fork() is aborted for other reasons. 4919a4136,4137 > #define MEMORYSTATUS_VM_MAP_FORK_ALLOWED 0x100000000 > #define MEMORYSTATUS_VM_MAP_FORK_NOT_ALLOWED 0x200000000 4957,4969c4175,4183 < /* < * Record if a watched process fails to qualify for a vm_map_fork(). < */ < void < memorystatus_abort_vm_map_fork(task_t task) < { < if (memorystatus_vm_map_fork_pidwatch_val != 0) { < proc_t p = get_bsdtask_info(task); < if (p != NULL && memorystatus_vm_map_fork_pidwatch_val == (uint64_t)p->p_pid) { < memorystatus_vm_map_fork_pidwatch_val = -1ull; < } < } < } --- > #define SET_VM_MAP_FORK_PIDWATCH_ALLOWED(task) \ > MACRO_BEGIN \ > if (memorystatus_vm_map_fork_pidwatch_val != 0) { \ > proc_t p = get_bsdtask_info(task); \ > if (p && (memorystatus_vm_map_fork_pidwatch_val == (uint64_t)p->p_pid)) { \ > memorystatus_vm_map_fork_pidwatch_val |= MEMORYSTATUS_VM_MAP_FORK_ALLOWED; \ > } \ > } \ > MACRO_END 4971,4980c4185,4193 < static void < set_vm_map_fork_pidwatch(task_t task, uint64_t x) < { < if (memorystatus_vm_map_fork_pidwatch_val != 0) { < proc_t p = get_bsdtask_info(task); < if (p && (memorystatus_vm_map_fork_pidwatch_val == (uint64_t)p->p_pid)) { < memorystatus_vm_map_fork_pidwatch_val |= x; < } < } < } --- > #define SET_VM_MAP_FORK_PIDWATCH_NOT_ALLOWED(task) \ > MACRO_BEGIN \ > if (memorystatus_vm_map_fork_pidwatch_val != 0) { \ > proc_t p = get_bsdtask_info(task); \ > if (p && (memorystatus_vm_map_fork_pidwatch_val == (uint64_t)p->p_pid)) { \ > memorystatus_vm_map_fork_pidwatch_val |= MEMORYSTATUS_VM_MAP_FORK_NOT_ALLOWED; \ > } \ > } \ > MACRO_END 4984,4990c4197,4198 < < static void < set_vm_map_fork_pidwatch(task_t task, uint64_t x) < { < #pragma unused(task) < #pragma unused(x) < } --- > #define SET_VM_MAP_FORK_PIDWATCH_ALLOWED(task) > #define SET_VM_MAP_FORK_PIDWATCH_NOT_ALLOWED(task) 5016c4224 < memorystatus_allowed_vm_map_fork(task_t task) --- > memorystatus_allowed_vm_map_fork(__unused task_t task) 5022,5023c4230,4232 < uint64_t footprint_in_bytes; < uint64_t max_allowed_bytes; --- > uint64_t footprint_in_bytes = 0; > uint64_t purgeable_in_bytes = 0; > uint64_t max_allowed_bytes = 0; 5026c4235 < set_vm_map_fork_pidwatch(task, MEMORYSTATUS_VM_MAP_FORK_ALLOWED); --- > SET_VM_MAP_FORK_PIDWATCH_ALLOWED(task); 5029a4239 > purgeable_in_bytes = get_task_purgeable_size(task); 5033c4243 < * Maximum is 1/4 of the system-wide task limit. --- > * Maximum is half the system-wide task limit. 5035c4245 < max_allowed_bytes = ((uint64_t)max_task_footprint_mb * 1024 * 1024) >> 2; --- > max_allowed_bytes = ((((uint64_t)max_task_footprint_mb) * 1024ULL * 1024ULL) >> 1); 5037c4247,4254 < if (footprint_in_bytes > max_allowed_bytes) { --- > if (footprint_in_bytes > purgeable_in_bytes) { > footprint_in_bytes -= purgeable_in_bytes; > } > > if (footprint_in_bytes <= max_allowed_bytes) { > SET_VM_MAP_FORK_PIDWATCH_ALLOWED(task); > return (is_allowed); > } else { 5039c4256 < set_vm_map_fork_pidwatch(task, MEMORYSTATUS_VM_MAP_FORK_NOT_ALLOWED); --- > SET_VM_MAP_FORK_PIDWATCH_NOT_ALLOWED(task); 5042d4258 < #endif /* CONFIG_EMBEDDED */ 5044c4260,4262 < set_vm_map_fork_pidwatch(task, MEMORYSTATUS_VM_MAP_FORK_ALLOWED); --- > #else /* CONFIG_EMBEDDED */ > > SET_VM_MAP_FORK_PIDWATCH_ALLOWED(task); 5046a4265,4266 > #endif /* CONFIG_EMBEDDED */ > 5050c4270 < memorystatus_get_task_page_counts(task_t task, uint32_t *footprint, uint32_t *max_footprint_lifetime, uint32_t *purgeable_pages) --- > memorystatus_get_task_page_counts(task_t task, uint32_t *footprint, uint32_t *max_footprint, uint32_t *max_footprint_lifetime, uint32_t *purgeable_pages) 5060a4281,4285 > if (max_footprint) { > pages = (get_task_phys_footprint_recent_max(task) / PAGE_SIZE_64); > assert(((uint32_t)pages) == pages); > *max_footprint = (uint32_t)pages; > } 5129,5130d4353 < LCK_MTX_ASSERT(proc_list_mlock, LCK_MTX_ASSERT_OWNED); < 5171,5175d4393 < #if CONFIG_FREEZE < entry->jse_thaw_count = p->p_memstat_thaw_count; < #else /* CONFIG_FREEZE */ < entry->jse_thaw_count = 0; < #endif /* CONFIG_FREEZE */ 5194a4413 > uint32_t max_pages = 0; 5198c4417 < memorystatus_get_task_page_counts(p->task, &pages, &max_pages_lifetime, &purgeable_pages); --- > memorystatus_get_task_page_counts(p->task, &pages, &max_pages, &max_pages_lifetime, &purgeable_pages); 5199a4419 > entry->max_pages = (uint64_t)max_pages; 5309,5327d4528 < #if CONFIG_FREEZE < /* < * We can't grab the freezer_mutex here even though that synchronization would be correct to inspect < * the # of frozen processes and wakeup the freezer thread. Reason being that we come here into this < * code with (possibly) the page-queue locks held and preemption disabled. So trying to grab a mutex here < * will result in the "mutex with preemption disabled" panic. < */ < < if (memorystatus_freeze_thread_should_run() == TRUE) { < /* < * The freezer thread is usually woken up by some user-space call i.e. pid_hibernate(any process). < * That trigger isn't invoked often enough and so we are enabling this explicit wakeup here. < */ < if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) { < thread_wakeup((event_t)&memorystatus_freeze_wakeup); < } < } < #endif /* CONFIG_FREEZE */ < 5359a4561 > uint32_t max_pages = 0; 5379c4581 < memorystatus_get_task_page_counts(p->task, &pages, &max_pages_lifetime, &purgeable_pages); --- > memorystatus_get_task_page_counts(p->task, &pages, &max_pages, &max_pages_lifetime, &purgeable_pages); 5380a4583 > entry->max_pages = (uint64_t)max_pages; 5407,5408c4610,4611 < entry->cpu_time.tv_sec = (int64_t)tv_sec; < entry->cpu_time.tv_usec = (int64_t)tv_usec; --- > entry->cpu_time.tv_sec = tv_sec; > entry->cpu_time.tv_usec = tv_usec; 5418,5423d4620 < #if CONFIG_FREEZE < entry->jse_thaw_count = p->p_memstat_thaw_count; < #else /* CONFIG_FREEZE */ < entry->jse_thaw_count = 0; < #endif /* CONFIG_FREEZE */ < 5437c4634 < if ((kr = host_statistics64(host_self(), HOST_VM_INFO64, (host_info64_t)&vm_stat, &count)) != KERN_SUCCESS) { --- > if ((kr = host_statistics64(host_self(), HOST_VM_INFO64, (host_info64_t)&vm_stat, &count) != KERN_SUCCESS)) { 5485,5486d4681 < LCK_MTX_ASSERT(proc_list_mlock, LCK_MTX_ASSERT_OWNED); < 5593,5729d4787 < * Prepare the process to be killed (set state, update snapshot) and kill it. < */ < static uint64_t memorystatus_purge_before_jetsam_success = 0; < < static boolean_t < memorystatus_kill_proc(proc_t p, uint32_t cause, os_reason_t jetsam_reason, boolean_t *killed) < { < pid_t aPid = 0; < uint32_t aPid_ep = 0; < < uint64_t killtime = 0; < clock_sec_t tv_sec; < clock_usec_t tv_usec; < uint32_t tv_msec; < boolean_t retval = FALSE; < uint64_t num_pages_purged = 0; < < aPid = p->p_pid; < aPid_ep = p->p_memstat_effectivepriority; < < if (cause != kMemorystatusKilledVnodes && cause != kMemorystatusKilledZoneMapExhaustion) { < /* < * Genuine memory pressure and not other (vnode/zone) resource exhaustion. < */ < boolean_t success = FALSE; < < networking_memstatus_callout(p, cause); < num_pages_purged = vm_purgeable_purge_task_owned(p->task); < < if (num_pages_purged) { < /* < * We actually purged something and so let's < * check if we need to continue with the kill. < */ < if (cause == kMemorystatusKilledHiwat) { < uint64_t footprint_in_bytes = get_task_phys_footprint(p->task); < uint64_t memlimit_in_bytes = (((uint64_t)p->p_memstat_memlimit) * 1024ULL * 1024ULL); /* convert MB to bytes */ < success = (footprint_in_bytes <= memlimit_in_bytes); < } else { < success = (memorystatus_avail_pages_below_pressure() == FALSE); < } < < if (success) { < < memorystatus_purge_before_jetsam_success++; < < os_log_with_startup_serial(OS_LOG_DEFAULT, "memorystatus: purged %llu pages from pid %d [%s] and avoided %s\n", < num_pages_purged, aPid, (*p->p_name ? p->p_name : "unknown"), memorystatus_kill_cause_name[cause]); < < *killed = FALSE; < < return TRUE; < } < } < } < < #if CONFIG_JETSAM && (DEVELOPMENT || DEBUG) < MEMORYSTATUS_DEBUG(1, "jetsam: %s pid %d [%s] - %lld Mb > 1 (%d Mb)\n", < (memorystatus_jetsam_policy & kPolicyDiagnoseActive) ? "suspending": "killing", < aPid, (*p->p_name ? p->p_name : "unknown"), < (footprint_in_bytes / (1024ULL * 1024ULL)), /* converted bytes to MB */ < p->p_memstat_memlimit); < #endif /* CONFIG_JETSAM && (DEVELOPMENT || DEBUG) */ < < killtime = mach_absolute_time(); < absolutetime_to_microtime(killtime, &tv_sec, &tv_usec); < tv_msec = tv_usec / 1000; < < #if CONFIG_JETSAM && (DEVELOPMENT || DEBUG) < if (memorystatus_jetsam_policy & kPolicyDiagnoseActive) { < if (cause == kMemorystatusKilledHiwat) { < MEMORYSTATUS_DEBUG(1, "jetsam: suspending pid %d [%s] for diagnosis - memorystatus_available_pages: %d\n", < aPid, (*p->p_name ? p->p_name: "(unknown)"), memorystatus_available_pages); < } else { < int activeProcess = p->p_memstat_state & P_MEMSTAT_FOREGROUND; < if (activeProcess) { < MEMORYSTATUS_DEBUG(1, "jetsam: suspending pid %d [%s] (active) for diagnosis - memorystatus_available_pages: %d\n", < aPid, (*p->p_name ? p->p_name: "(unknown)"), memorystatus_available_pages); < < if (memorystatus_jetsam_policy & kPolicyDiagnoseFirst) { < jetsam_diagnostic_suspended_one_active_proc = 1; < printf("jetsam: returning after suspending first active proc - %d\n", aPid); < } < } < } < < proc_list_lock(); < /* This diagnostic code is going away soon. Ignore the kMemorystatusInvalid cause here. */ < memorystatus_update_jetsam_snapshot_entry_locked(p, kMemorystatusInvalid, killtime); < proc_list_unlock(); < < p->p_memstat_state |= P_MEMSTAT_DIAG_SUSPENDED; < < if (p) { < task_suspend(p->task); < *killed = TRUE; < } < } else < #endif /* CONFIG_JETSAM && (DEVELOPMENT || DEBUG) */ < { < proc_list_lock(); < memorystatus_update_jetsam_snapshot_entry_locked(p, cause, killtime); < proc_list_unlock(); < < char kill_reason_string[128]; < < if (cause == kMemorystatusKilledHiwat) { < strlcpy(kill_reason_string, "killing_highwater_process", 128); < } else { < if (aPid_ep == JETSAM_PRIORITY_IDLE) { < strlcpy(kill_reason_string, "killing_idle_process", 128); < } else { < strlcpy(kill_reason_string, "killing_top_process", 128); < } < } < < os_log_with_startup_serial(OS_LOG_DEFAULT, "%lu.%03d memorystatus: %s pid %d [%s] (%s %d) - memorystatus_available_pages: %llu\n", < (unsigned long)tv_sec, tv_msec, kill_reason_string, < aPid, (*p->p_name ? p->p_name : "unknown"), < memorystatus_kill_cause_name[cause], aPid_ep, (uint64_t)memorystatus_available_pages); < < /* < * memorystatus_do_kill drops a reference, so take another one so we can < * continue to use this exit reason even after memorystatus_do_kill() < * returns < */ < os_reason_ref(jetsam_reason); < < retval = memorystatus_do_kill(p, cause, jetsam_reason); < < *killed = retval; < } < < return retval; < } < < /* 5738c4796,4797 < boolean_t new_snapshot = FALSE, force_new_snapshot = FALSE, killed = FALSE, freed_mem = FALSE; --- > boolean_t new_snapshot = FALSE, force_new_snapshot = FALSE, killed = FALSE; > int kill_count = 0; 5740a4800,4803 > uint64_t killtime = 0; > clock_sec_t tv_sec; > clock_usec_t tv_usec; > uint32_t tv_msec; 5771c4834 < * kMemorystatusKilledVMCompressorSpaceShortage --- > * kMemorystatusKilledVMThrashing 5798a4862 > int activeProcess; 5805a4870 > activeProcess = p->p_memstat_state & P_MEMSTAT_FOREGROUND; 5841c4906 < boolean_t reclaim_proc = !(p->p_memstat_state & P_MEMSTAT_LOCKED); --- > boolean_t reclaim_proc = !(p->p_memstat_state & (P_MEMSTAT_LOCKED | P_MEMSTAT_NORECLAIM)); 5853,5873d4917 < if (proc_ref_locked(p) == p) { < /* < * Mark as terminated so that if exit1() indicates success, but the process (for example) < * is blocked in task_exception_notify(), it'll be skipped if encountered again - see < * . This is cheaper than examining P_LEXIT, which requires the < * acquisition of the proc lock. < */ < p->p_memstat_state |= P_MEMSTAT_TERMINATED; < < } else { < /* < * We need to restart the search again because < * proc_ref_locked _can_ drop the proc_list lock < * and we could have lost our stored next_p via < * an exit() on another core. < */ < i = 0; < next_p = memorystatus_get_first_proc_locked(&i, TRUE); < continue; < } < 5886a4931,4938 > > /* > * Mark as terminated so that if exit1() indicates success, but the process (for example) > * is blocked in task_exception_notify(), it'll be skipped if encountered again - see > * . This is cheaper than examining P_LEXIT, which requires the > * acquisition of the proc lock. > */ > p->p_memstat_state |= P_MEMSTAT_TERMINATED; 5888,5893c4940,4958 < proc_list_unlock(); < < freed_mem = memorystatus_kill_proc(p, cause, jetsam_reason, &killed); /* purged and/or killed 'p' */ < /* Success? */ < if (freed_mem) { < if (killed) { --- > killtime = mach_absolute_time(); > absolutetime_to_microtime(killtime, &tv_sec, &tv_usec); > tv_msec = tv_usec / 1000; > > #if CONFIG_JETSAM && (DEVELOPMENT || DEBUG) > if ((memorystatus_jetsam_policy & kPolicyDiagnoseActive) && activeProcess) { > MEMORYSTATUS_DEBUG(1, "jetsam: suspending pid %d [%s] (active) for diagnosis - memory_status_level: %d\n", > aPid, (*p->p_name ? p->p_name: "(unknown)"), memorystatus_level); > memorystatus_update_jetsam_snapshot_entry_locked(p, kMemorystatusKilledDiagnostic, killtime); > p->p_memstat_state |= P_MEMSTAT_DIAG_SUSPENDED; > if (memorystatus_jetsam_policy & kPolicyDiagnoseFirst) { > jetsam_diagnostic_suspended_one_active_proc = 1; > printf("jetsam: returning after suspending first active proc - %d\n", aPid); > } > > p = proc_ref_locked(p); > proc_list_unlock(); > if (p) { > task_suspend(p->task); 5897,5898c4962,5003 < } else { < /* purged */ --- > proc_rele(p); > killed = TRUE; > } > > goto exit; > } else > #endif /* CONFIG_JETSAM && (DEVELOPMENT || DEBUG) */ > { > /* Shift queue, update stats */ > memorystatus_update_jetsam_snapshot_entry_locked(p, cause, killtime); > > if (proc_ref_locked(p) == p) { > proc_list_unlock(); > os_log_with_startup_serial(OS_LOG_DEFAULT, "%lu.%03d memorystatus: %s pid %d [%s] (%s %d) - memorystatus_available_pages: %llu\n", > (unsigned long)tv_sec, tv_msec, > ((aPid_ep == JETSAM_PRIORITY_IDLE) ? "killing_idle_process" : "killing_top_process"), > aPid, (*p->p_name ? p->p_name : "unknown"), > memorystatus_kill_cause_name[cause], aPid_ep, (uint64_t)memorystatus_available_pages); > > /* > * memorystatus_do_kill() drops a reference, so take another one so we can > * continue to use this exit reason even after memorystatus_do_kill() > * returns. > */ > os_reason_ref(jetsam_reason); > > killed = memorystatus_do_kill(p, cause, jetsam_reason); > > /* Success? */ > if (killed) { > if (priority) { > *priority = aPid_ep; > } > proc_rele(p); > kill_count++; > goto exit; > } > > /* > * Failure - first unwind the state, > * then fall through to restart the search. > */ 5899a5005 > proc_rele_locked(p); 5901c5007,5008 < proc_list_unlock(); --- > p->p_memstat_state |= P_MEMSTAT_ERROR; > *errors += 1; 5903,5915c5010,5021 < proc_rele(p); < goto exit; < } < < /* < * Failure - first unwind the state, < * then fall through to restart the search. < */ < proc_list_lock(); < proc_rele_locked(p); < p->p_memstat_state &= ~P_MEMSTAT_TERMINATED; < p->p_memstat_state |= P_MEMSTAT_ERROR; < *errors += 1; --- > > /* > * Failure - restart the search. > * > * We might have raced with "p" exiting on another core, resulting in no > * ref on "p". Or, we may have failed to kill "p". > * > * Either way, we fall thru to here, leaving the proc in the > * P_MEMSTAT_TERMINATED state. > * > * And, we hold the the proc_list_lock at this point. > */ 5917,5918c5023,5025 < i = 0; < next_p = memorystatus_get_first_proc_locked(&i, TRUE); --- > i = 0; > next_p = memorystatus_get_first_proc_locked(&i, TRUE); > } 5935c5042 < memorystatus_available_pages, killed ? aPid : 0, 0, 0, 0); --- > memorystatus_available_pages, killed ? aPid : 0, kill_count, 0, 0); 6164d5270 < proc_list_lock(); 6166d5271 < proc_list_unlock(); 6181c5286 < memorystatus_kill_hiwat_proc(uint32_t *errors, boolean_t *purged) --- > memorystatus_kill_hiwat_proc(uint32_t *errors) 6185c5290,5291 < boolean_t new_snapshot = FALSE, killed = FALSE, freed_mem = FALSE; --- > boolean_t new_snapshot = FALSE, killed = FALSE; > int kill_count = 0; 6187a5294,5297 > uint64_t killtime = 0; > clock_sec_t tv_sec; > clock_usec_t tv_usec; > uint32_t tv_msec; 6245c5355,5362 < --- > #if CONFIG_JETSAM && (DEVELOPMENT || DEBUG) > MEMORYSTATUS_DEBUG(1, "jetsam: %s pid %d [%s] - %lld Mb > 1 (%d Mb)\n", > (memorystatus_jetsam_policy & kPolicyDiagnoseActive) ? "suspending": "killing", > aPid, (*p->p_name ? p->p_name : "unknown"), > (footprint_in_bytes / (1024ULL * 1024ULL)), /* converted bytes to MB */ > p->p_memstat_memlimit); > #endif /* CONFIG_JETSAM && (DEVELOPMENT || DEBUG) */ > 6248,6258c5365,5368 < new_snapshot = TRUE; < } < < if (proc_ref_locked(p) == p) { < /* < * Mark as terminated so that if exit1() indicates success, but the process (for example) < * is blocked in task_exception_notify(), it'll be skipped if encountered again - see < * . This is cheaper than examining P_LEXIT, which requires the < * acquisition of the proc lock. < */ < p->p_memstat_state |= P_MEMSTAT_TERMINATED; --- > new_snapshot = TRUE; > } > > p->p_memstat_state |= P_MEMSTAT_TERMINATED; 6259a5370,5380 > killtime = mach_absolute_time(); > absolutetime_to_microtime(killtime, &tv_sec, &tv_usec); > tv_msec = tv_usec / 1000; > > #if CONFIG_JETSAM && (DEVELOPMENT || DEBUG) > if (memorystatus_jetsam_policy & kPolicyDiagnoseActive) { > MEMORYSTATUS_DEBUG(1, "jetsam: pid %d suspended for diagnosis - memorystatus_available_pages: %d\n", aPid, memorystatus_available_pages); > memorystatus_update_jetsam_snapshot_entry_locked(p, kMemorystatusKilledDiagnostic, killtime); > p->p_memstat_state |= P_MEMSTAT_DIAG_SUSPENDED; > > p = proc_ref_locked(p); 6261,6273c5382,5395 < } else { < /* < * We need to restart the search again because < * proc_ref_locked _can_ drop the proc_list lock < * and we could have lost our stored next_p via < * an exit() on another core. < */ < i = 0; < next_p = memorystatus_get_first_proc_locked(&i, TRUE); < continue; < } < < freed_mem = memorystatus_kill_proc(p, kMemorystatusKilledHiwat, jetsam_reason, &killed); /* purged and/or killed 'p' */ --- > if (p) { > task_suspend(p->task); > proc_rele(p); > killed = TRUE; > } > > goto exit; > } else > #endif /* CONFIG_JETSAM && (DEVELOPMENT || DEBUG) */ > { > memorystatus_update_jetsam_snapshot_entry_locked(p, kMemorystatusKilledHiwat, killtime); > > if (proc_ref_locked(p) == p) { > proc_list_unlock(); 6275,6279c5397,5405 < /* Success? */ < if (freed_mem) { < if (killed == FALSE) { < /* purged 'p'..don't reset HWM candidate count */ < *purged = TRUE; --- > os_log_with_startup_serial(OS_LOG_DEFAULT, "%lu.%03d memorystatus: killing_highwater_process pid %d [%s] (highwater %d) - memorystatus_available_pages: %llu\n", > (unsigned long)tv_sec, tv_msec, aPid, (*p->p_name ? p->p_name : "unknown"), aPid_ep, (uint64_t)memorystatus_available_pages); > > /* > * memorystatus_do_kill drops a reference, so take another one so we can > * continue to use this exit reason even after memorystatus_do_kill() > * returns > */ > os_reason_ref(jetsam_reason); 6280a5407,5419 > killed = memorystatus_do_kill(p, kMemorystatusKilledHiwat, jetsam_reason); > > /* Success? */ > if (killed) { > proc_rele(p); > kill_count++; > goto exit; > } > > /* > * Failure - first unwind the state, > * then fall through to restart the search. > */ 6281a5421 > proc_rele_locked(p); 6283c5423,5424 < proc_list_unlock(); --- > p->p_memstat_state |= P_MEMSTAT_ERROR; > *errors += 1; 6285,6296d5425 < proc_rele(p); < goto exit; < } < /* < * Failure - first unwind the state, < * then fall through to restart the search. < */ < proc_list_lock(); < proc_rele_locked(p); < p->p_memstat_state &= ~P_MEMSTAT_TERMINATED; < p->p_memstat_state |= P_MEMSTAT_ERROR; < *errors += 1; 6298,6299c5427,5441 < i = 0; < next_p = memorystatus_get_first_proc_locked(&i, TRUE); --- > /* > * Failure - restart the search. > * > * We might have raced with "p" exiting on another core, resulting in no > * ref on "p". Or, we may have failed to kill "p". > * > * Either way, we fall thru to here, leaving the proc in the > * P_MEMSTAT_TERMINATED state. > * > * And, we hold the the proc_list_lock at this point. > */ > > i = 0; > next_p = memorystatus_get_first_proc_locked(&i, TRUE); > } 6316c5458 < memorystatus_available_pages, killed ? aPid : 0, 0, 0, 0); --- > memorystatus_available_pages, killed ? aPid : 0, kill_count, 0, 0); 6328c5470 < memorystatus_kill_elevated_process(uint32_t cause, os_reason_t jetsam_reason, unsigned int band, int aggr_count, uint32_t *errors) --- > memorystatus_kill_elevated_process(uint32_t cause, os_reason_t jetsam_reason, int aggr_count, uint32_t *errors) 6333a5476 > unsigned int i = JETSAM_PRIORITY_ELEVATED_INACTIVE; 6344,6351d5486 < #if CONFIG_FREEZE < boolean_t consider_frozen_only = FALSE; < < if (band == (unsigned int) memorystatus_freeze_jetsam_band) { < consider_frozen_only = TRUE; < } < #endif /* CONFIG_FREEZE */ < 6354c5489 < next_p = memorystatus_get_first_proc_locked(&band, FALSE); --- > next_p = memorystatus_get_first_proc_locked(&i, FALSE); 6358c5493 < next_p = memorystatus_get_next_proc_locked(&band, p, FALSE); --- > next_p = memorystatus_get_next_proc_locked(&i, p, FALSE); 6375,6378d5509 < if (consider_frozen_only && ! (p->p_memstat_state & P_MEMSTAT_FROZEN)) { < continue; < } < 6382c5513 < #endif /* CONFIG_FREEZE */ --- > #endif 6452c5583 < next_p = memorystatus_get_first_proc_locked(&band, FALSE); --- > next_p = memorystatus_get_first_proc_locked(&i, FALSE); 6481,6486c5612,5613 < if ((victim_pid != -1) || < (cause != kMemorystatusKilledVMPageShortage && < cause != kMemorystatusKilledVMCompressorThrashing && < cause != kMemorystatusKilledVMCompressorSpaceShortage && < cause != kMemorystatusKilledFCThrashing && < cause != kMemorystatusKilledZoneMapExhaustion)) { --- > if ((victim_pid != -1) || (cause != kMemorystatusKilledVMPageShortage && cause != kMemorystatusKilledVMThrashing && > cause != kMemorystatusKilledFCThrashing && cause != kMemorystatusKilledZoneMapExhaustion)) { 6496c5623 < memorystatus_kill_on_VM_compressor_space_shortage(boolean_t async) { --- > memorystatus_kill_on_VM_thrashing(boolean_t async) { 6498c5625 < return memorystatus_kill_process_async(-1, kMemorystatusKilledVMCompressorSpaceShortage); --- > return memorystatus_kill_process_async(-1, kMemorystatusKilledVMThrashing); 6500c5627 < os_reason_t jetsam_reason = os_reason_create(OS_REASON_JETSAM, JETSAM_REASON_MEMORY_VMCOMPRESSOR_SPACE_SHORTAGE); --- > os_reason_t jetsam_reason = os_reason_create(OS_REASON_JETSAM, JETSAM_REASON_MEMORY_VMTHRASHING); 6502c5629 < printf("memorystatus_kill_on_VM_compressor_space_shortage -- sync: failed to allocate jetsam reason\n"); --- > printf("memorystatus_kill_on_VM_thrashing -- sync: failed to allocate jetsam reason\n"); 6505c5632 < return memorystatus_kill_process_sync(-1, kMemorystatusKilledVMCompressorSpaceShortage, jetsam_reason); --- > return memorystatus_kill_process_sync(-1, kMemorystatusKilledVMThrashing, jetsam_reason); 6510,6523d5636 < boolean_t < memorystatus_kill_on_VM_compressor_thrashing(boolean_t async) { < if (async) { < return memorystatus_kill_process_async(-1, kMemorystatusKilledVMCompressorThrashing); < } else { < os_reason_t jetsam_reason = os_reason_create(OS_REASON_JETSAM, JETSAM_REASON_MEMORY_VMCOMPRESSOR_THRASHING); < if (jetsam_reason == OS_REASON_NULL) { < printf("memorystatus_kill_on_VM_compressor_thrashing -- sync: failed to allocate jetsam reason\n"); < } < < return memorystatus_kill_process_sync(-1, kMemorystatusKilledVMCompressorThrashing, jetsam_reason); < } < } < 6594,6602c5707 < < /* < * This is just the default value if the underlying < * storage device doesn't have any specific budget. < * We check with the storage layer in memorystatus_freeze_update_throttle() < * before we start our freezing the first time. < */ < memorystatus_freeze_budget_pages_remaining = (memorystatus_freeze_daily_mb_max * 1024 * 1024) / PAGE_SIZE; < --- > 6605,6609d5709 < < proc_set_thread_policy(thread, TASK_POLICY_INTERNAL, TASK_POLICY_IO, THROTTLE_LEVEL_COMPRESSOR_TIER2); < proc_set_thread_policy(thread, TASK_POLICY_INTERNAL, TASK_POLICY_PASSIVE_IO, TASK_POLICY_ENABLE); < thread_set_thread_name(thread, "VM_freezer"); < 6616,6669d5715 < static boolean_t < memorystatus_is_process_eligible_for_freeze(proc_t p) < { < /* < * Called with proc_list_lock held. < */ < < LCK_MTX_ASSERT(proc_list_mlock, LCK_MTX_ASSERT_OWNED); < < boolean_t should_freeze = FALSE; < uint32_t state = 0, entry_count = 0, pages = 0, i = 0; < int probability_of_use = 0; < < if (isApp(p) == FALSE) { < goto out; < } < < state = p->p_memstat_state; < < if ((state & (P_MEMSTAT_TERMINATED | P_MEMSTAT_LOCKED | P_MEMSTAT_FREEZE_DISABLED | P_MEMSTAT_FREEZE_IGNORE)) || < !(state & P_MEMSTAT_SUSPENDED)) { < goto out; < } < < /* Only freeze processes meeting our minimum resident page criteria */ < memorystatus_get_task_page_counts(p->task, &pages, NULL, NULL); < if (pages < memorystatus_freeze_pages_min) { < goto out; < } < < entry_count = (memorystatus_global_probabilities_size / sizeof(memorystatus_internal_probabilities_t)); < < if (entry_count) { < < for (i=0; i < entry_count; i++ ) { < if (strncmp(memorystatus_global_probabilities_table[i].proc_name, < p->p_name, < MAXCOMLEN + 1) == 0) { < < probability_of_use = memorystatus_global_probabilities_table[i].use_probability; < break; < } < } < < if (probability_of_use == 0) { < goto out; < } < } < < should_freeze = TRUE; < out: < return should_freeze; < } < 6673,6676d5718 < * Doesn't deal with re-freezing because this is called on a specific process and < * not by the freezer thread. If that changes, we'll have to teach it about < * refreezing a frozen process. < * 6685c5727,5729 < int freezer_error_code = 0; --- > > KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_FREEZE) | DBG_FUNC_START, > memorystatus_available_pages, 0, 0, 0, 0); 6690d5733 < printf("memorystatus_freeze_process_sync: Invalid process\n"); 6695d5737 < printf("memorystatus_freeze_process_sync: Freezing is DISABLED\n"); 6700d5741 < printf("memorystatus_freeze_process_sync: Low compressor and/or low swap space...skipping freeze\n"); 6704,6706c5745,5747 < memorystatus_freeze_update_throttle(&memorystatus_freeze_budget_pages_remaining); < if (!memorystatus_freeze_budget_pages_remaining) { < printf("memorystatus_freeze_process_sync: exit with NO available budget\n"); --- > if (memorystatus_freeze_update_throttle()) { > printf("memorystatus_freeze_process_sync: in throttle, ignorning freeze\n"); > memorystatus_freeze_throttle_count++; 6713,6714c5754,5756 < uint32_t purgeable, wired, clean, dirty, shared; < uint32_t max_pages, i; --- > uint32_t purgeable, wired, clean, dirty, state; > uint32_t max_pages, pages, i; > boolean_t shared; 6716a5759 > state = p->p_memstat_state; 6719c5762,5769 < if (memorystatus_is_process_eligible_for_freeze(p) == FALSE) { --- > if ((state & (P_MEMSTAT_TERMINATED | P_MEMSTAT_LOCKED | P_MEMSTAT_FROZEN)) || !(state & P_MEMSTAT_SUSPENDED)) { > proc_list_unlock(); > goto exit; > } > > /* Only freeze processes meeting our minimum resident page criteria */ > memorystatus_get_task_page_counts(p->task, &pages, NULL, NULL, NULL); > if (pages < memorystatus_freeze_pages_min) { 6726c5776,5784 < max_pages = MIN(memorystatus_freeze_pages_max, memorystatus_freeze_budget_pages_remaining); --- > unsigned int avail_swap_space = 0; /* in pages. */ > > /* > * Freezer backed by the compressor and swap file(s) > * while will hold compressed data. > */ > avail_swap_space = vm_swap_get_free_space() / PAGE_SIZE_64; > > max_pages = MIN(avail_swap_space, memorystatus_freeze_pages_max); 6727a5786,5789 > if (max_pages < memorystatus_freeze_pages_min) { > proc_list_unlock(); > goto exit; > } 6739,6745c5801 < KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_FREEZE) | DBG_FUNC_START, < memorystatus_available_pages, 0, 0, 0, 0); < < ret = task_freeze(p->task, &purgeable, &wired, &clean, &dirty, max_pages, &shared, &freezer_error_code, FALSE /* eval only */); < < KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_FREEZE) | DBG_FUNC_END, < memorystatus_available_pages, aPid, 0, 0, 0); --- > ret = task_freeze(p->task, &purgeable, &wired, &clean, &dirty, max_pages, &shared, FALSE); 6754a5811 > p->p_memstat_state &= ~P_MEMSTAT_LOCKED; 6757,6760d5813 < < os_log_with_startup_serial(OS_LOG_DEFAULT, "memorystatus: freezing (specific) pid %d [%s]...done", < aPid, (*p->p_name ? p->p_name : "unknown")); < 6763,6778c5816 < p->p_memstat_freeze_sharedanon_pages += shared; < < memorystatus_frozen_shared_mb += shared; < < if ((p->p_memstat_state & P_MEMSTAT_FROZEN) == 0) { < p->p_memstat_state |= P_MEMSTAT_FROZEN; < memorystatus_frozen_count++; < } < < p->p_memstat_frozen_count++; < < /* < * Still keeping the P_MEMSTAT_LOCKED bit till we are actually done elevating this frozen process < * to its higher jetsam band. < */ < proc_list_unlock(); --- > memorystatus_frozen_count++; 6780c5818 < memorystatus_send_note(kMemorystatusFreezeNote, &data, sizeof(data)); --- > p->p_memstat_state |= (P_MEMSTAT_FROZEN | (shared ? 0: P_MEMSTAT_NORECLAIM)); 6783,6794d5820 < < ret = memorystatus_update_inactive_jetsam_priority_band(p->p_pid, MEMORYSTATUS_CMD_ELEVATED_INACTIVEJETSAMPRIORITY_ENABLE, < memorystatus_freeze_jetsam_band, TRUE); < < if (ret) { < printf("Elevating the frozen process failed with %d\n", ret); < /* not fatal */ < ret = 0; < } < < proc_list_lock(); < 6799,6800d5824 < } else { < proc_list_lock(); 6803a5828 > memorystatus_freeze_count++; 6805,6828c5830 < if (memorystatus_frozen_count == (memorystatus_frozen_processes_max - 1)) { < /* < * Add some eviction logic here? At some point should we < * jetsam a process to get back its swap space so that we < * can freeze a more eligible process at this moment in time? < */ < } < } else { < char reason[128]; < if (freezer_error_code == FREEZER_ERROR_EXCESS_SHARED_MEMORY) { < strlcpy(reason, "too much shared memory", 128); < } < < if (freezer_error_code == FREEZER_ERROR_LOW_PRIVATE_SHARED_RATIO) { < strlcpy(reason, "low private-shared pages ratio", 128); < } < < if (freezer_error_code == FREEZER_ERROR_NO_COMPRESSOR_SPACE) { < strlcpy(reason, "no compressor space", 128); < } < < if (freezer_error_code == FREEZER_ERROR_NO_SWAP_SPACE) { < strlcpy(reason, "no swap space", 128); < } --- > proc_list_unlock(); 6830,6832c5832,5834 < os_log_with_startup_serial(OS_LOG_DEFAULT, "memorystatus: freezing (specific) pid %d [%s]...skipped (%s)", < aPid, (*p->p_name ? p->p_name : "unknown"), reason); < p->p_memstat_state |= P_MEMSTAT_FREEZE_IGNORE; --- > memorystatus_send_note(kMemorystatusFreezeNote, &data, sizeof(data)); > } else { > proc_list_unlock(); 6834,6836d5835 < < p->p_memstat_state &= ~P_MEMSTAT_LOCKED; < proc_list_unlock(); 6840a5840,5841 > KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_FREEZE) | DBG_FUNC_END, > memorystatus_available_pages, aPid, 0, 0, 0); 6846c5847 < memorystatus_freeze_top_process(void) --- > memorystatus_freeze_top_process(boolean_t *memorystatus_freeze_swap_low) 6852,6865d5852 < unsigned int band = JETSAM_PRIORITY_IDLE; < boolean_t refreeze_processes = FALSE; < < proc_list_lock(); < < if (memorystatus_frozen_count >= memorystatus_frozen_processes_max) { < /* < * Freezer is already full but we are here and so let's < * try to refreeze any processes we might have thawed < * in the past and push out their compressed state out. < */ < refreeze_processes = TRUE; < band = (unsigned int) memorystatus_freeze_jetsam_band; < } 6867c5854,5855 < freeze_process: --- > KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_FREEZE) | DBG_FUNC_START, > memorystatus_available_pages, 0, 0, 0, 0); 6869c5857,5859 < next_p = memorystatus_get_first_proc_locked(&band, FALSE); --- > proc_list_lock(); > > next_p = memorystatus_get_first_proc_locked(&i, TRUE); 6872c5862,5864 < uint32_t purgeable, wired, clean, dirty, shared; --- > uint32_t purgeable, wired, clean, dirty; > boolean_t shared; > uint32_t pages; 6874c5866 < int freezer_error_code = 0; --- > uint32_t state; 6877c5869 < next_p = memorystatus_get_next_proc_locked(&band, p, FALSE); --- > next_p = memorystatus_get_next_proc_locked(&i, p, TRUE); 6879a5872 > state = p->p_memstat_state; 6881,6886c5874,5876 < if (p->p_memstat_effectivepriority != (int32_t) band) { < /* < * We shouldn't be freezing processes outside the < * prescribed band. < */ < break; --- > /* Ensure the process is eligible for freezing */ > if ((state & (P_MEMSTAT_TERMINATED | P_MEMSTAT_LOCKED | P_MEMSTAT_FROZEN)) || !(state & P_MEMSTAT_SUSPENDED)) { > continue; // with lock held 6887a5878,5883 > > /* Only freeze processes meeting our minimum resident page criteria */ > memorystatus_get_task_page_counts(p->task, &pages, NULL, NULL, NULL); > if (pages < memorystatus_freeze_pages_min) { > continue; // with lock held > } 6889,6918c5885 < /* Ensure the process is eligible for (re-)freezing */ < if (refreeze_processes) { < /* < * Has to have been frozen once before. < */ < if ((p->p_memstat_state & P_MEMSTAT_FROZEN) == FALSE) { < continue; < } < < /* < * Has to have been resumed once before. < */ < if ((p->p_memstat_state & P_MEMSTAT_REFREEZE_ELIGIBLE) == FALSE) { < continue; < } < < /* < * Not currently being looked at for something. < */ < if (p->p_memstat_state & P_MEMSTAT_LOCKED) { < continue; < } < < /* < * We are going to try and refreeze and so re-evaluate < * the process. We don't want to double count the shared < * memory. So deduct the old snapshot here. < */ < memorystatus_frozen_shared_mb -= p->p_memstat_freeze_sharedanon_pages; < p->p_memstat_freeze_sharedanon_pages = 0; --- > if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) { 6920,6921c5887 < p->p_memstat_state &= ~P_MEMSTAT_REFREEZE_ELIGIBLE; < memorystatus_refreeze_eligible_count--; --- > /* Ensure there's enough free space to freeze this process. */ 6923,6927c5889 < } else { < if (memorystatus_is_process_eligible_for_freeze(p) == FALSE) { < continue; // with lock held < } < } --- > unsigned int avail_swap_space = 0; /* in pages. */ 6929d5890 < if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) { 6932c5893 < * will hold compressed data. --- > * while will hold compressed data. 6933a5895 > avail_swap_space = vm_swap_get_free_space() / PAGE_SIZE_64; 6935c5897 < max_pages = MIN(memorystatus_freeze_pages_max, memorystatus_freeze_budget_pages_remaining); --- > max_pages = MIN(avail_swap_space, memorystatus_freeze_pages_max); 6936a5899,5903 > if (max_pages < memorystatus_freeze_pages_min) { > *memorystatus_freeze_swap_low = TRUE; > proc_list_unlock(); > goto exit; > } 6947a5915 > proc_list_unlock(); 6949c5917 < break; --- > goto exit; 6951,6957c5919,5920 < < proc_list_unlock(); < < KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_FREEZE) | DBG_FUNC_START, < memorystatus_available_pages, 0, 0, 0, 0); < < kr = task_freeze(p->task, &purgeable, &wired, &clean, &dirty, max_pages, &shared, &freezer_error_code, FALSE /* eval only */); --- > > kr = task_freeze(p->task, &purgeable, &wired, &clean, &dirty, max_pages, &shared, FALSE); 6959,6961d5921 < KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_FREEZE) | DBG_FUNC_END, < memorystatus_available_pages, aPid, 0, 0, 0); < 6967a5928 > p->p_memstat_state &= ~P_MEMSTAT_LOCKED; 6971,6979d5931 < < if (refreeze_processes) { < os_log_with_startup_serial(OS_LOG_DEFAULT, "memorystatus: Refreezing (general) pid %d [%s]...done", < aPid, (*p->p_name ? p->p_name : "unknown")); < } else { < os_log_with_startup_serial(OS_LOG_DEFAULT, "memorystatus: freezing (general) pid %d [%s]...done", < aPid, (*p->p_name ? p->p_name : "unknown")); < } < 6982,6999c5934,5936 < p->p_memstat_freeze_sharedanon_pages += shared; < < memorystatus_frozen_shared_mb += shared; < < if ((p->p_memstat_state & P_MEMSTAT_FROZEN) == 0) { < p->p_memstat_state |= P_MEMSTAT_FROZEN; < memorystatus_frozen_count++; < } < < p->p_memstat_frozen_count++; < < /* < * Still keeping the P_MEMSTAT_LOCKED bit till we are actually done elevating this frozen process < * to its higher jetsam band. < */ < proc_list_unlock(); < < memorystatus_send_note(kMemorystatusFreezeNote, &data, sizeof(data)); --- > memorystatus_frozen_count++; > > p->p_memstat_state |= (P_MEMSTAT_FROZEN | (shared ? 0: P_MEMSTAT_NORECLAIM)); 7002,7012d5938 < < ret = memorystatus_update_inactive_jetsam_priority_band(p->p_pid, MEMORYSTATUS_CMD_ELEVATED_INACTIVEJETSAMPRIORITY_ENABLE, memorystatus_freeze_jetsam_band, TRUE); < < if (ret) { < printf("Elevating the frozen process failed with %d\n", ret); < /* not fatal */ < ret = 0; < } < < proc_list_lock(); < 7017,7018d5942 < } else { < proc_list_lock(); 7021a5946 > memorystatus_freeze_count++; 7023,7032c5948 < if (memorystatus_frozen_count == (memorystatus_frozen_processes_max - 1)) { < /* < * Add some eviction logic here? At some point should we < * jetsam a process to get back its swap space so that we < * can freeze a more eligible process at this moment in time? < */ < } < < /* Return KERN_SUCCESS */ < ret = kr; --- > proc_list_unlock(); 7034,7035c5950 < p->p_memstat_state &= ~P_MEMSTAT_LOCKED; < proc_rele_locked(p); --- > memorystatus_send_note(kMemorystatusFreezeNote, &data, sizeof(data)); 7037,7040c5952,5953 < /* < * We froze a process successfully. We can stop now < * and see if that helped. < */ --- > /* Return KERN_SUCESS */ > ret = kr; 7042d5954 < break; 7044,7091c5956 < < p->p_memstat_state &= ~P_MEMSTAT_LOCKED; < < if (refreeze_processes == TRUE) { < if ((freezer_error_code == FREEZER_ERROR_EXCESS_SHARED_MEMORY) || < (freezer_error_code == FREEZER_ERROR_LOW_PRIVATE_SHARED_RATIO)) { < /* < * Keeping this prior-frozen process in this high band when < * we failed to re-freeze it due to bad shared memory usage < * could cause excessive pressure on the lower bands. < * We need to demote it for now. It'll get re-evaluated next < * time because we don't set the P_MEMSTAT_FREEZE_IGNORE < * bit. < */ < < p->p_memstat_state &= ~P_MEMSTAT_USE_ELEVATED_INACTIVE_BAND; < memorystatus_invalidate_idle_demotion_locked(p, TRUE); < memorystatus_update_priority_locked(p, JETSAM_PRIORITY_IDLE, TRUE, TRUE); < } < } else { < p->p_memstat_state |= P_MEMSTAT_FREEZE_IGNORE; < } < < proc_rele_locked(p); < < char reason[128]; < if (freezer_error_code == FREEZER_ERROR_EXCESS_SHARED_MEMORY) { < strlcpy(reason, "too much shared memory", 128); < } < < if (freezer_error_code == FREEZER_ERROR_LOW_PRIVATE_SHARED_RATIO) { < strlcpy(reason, "low private-shared pages ratio", 128); < } < < if (freezer_error_code == FREEZER_ERROR_NO_COMPRESSOR_SPACE) { < strlcpy(reason, "no compressor space", 128); < } < < if (freezer_error_code == FREEZER_ERROR_NO_SWAP_SPACE) { < strlcpy(reason, "no swap space", 128); < } < < os_log_with_startup_serial(OS_LOG_DEFAULT, "memorystatus: freezing (general) pid %d [%s]...skipped (%s)", < aPid, (*p->p_name ? p->p_name : "unknown"), reason); < < if (vm_compressor_low_on_space() || vm_swap_low_on_space()) { < break; < } --- > proc_list_unlock(); 7093,7111c5958,5960 < } < < if ((ret == -1) && < (memorystatus_refreeze_eligible_count >= MIN_THAW_REFREEZE_THRESHOLD) && < (refreeze_processes == FALSE)) { < /* < * We failed to freeze a process from the IDLE < * band AND we have some thawed processes < * AND haven't tried refreezing as yet. < * Let's try and re-freeze processes in the < * frozen band that have been resumed in the past < * and so have brought in state from disk. < */ < < band = (unsigned int) memorystatus_freeze_jetsam_band; < < refreeze_processes = TRUE; < < goto freeze_process; --- > > proc_rele(p); > goto exit; 7115a5965,5968 > exit: > KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_FREEZE) | DBG_FUNC_END, > memorystatus_available_pages, aPid, 0, 0, 0); > 7126a5980,5992 > uint32_t average_resident_pages, estimated_processes; > > /* Estimate the number of suspended processes we can fit */ > average_resident_pages = memorystatus_suspended_footprint_total / memorystatus_suspended_count; > estimated_processes = memorystatus_suspended_count + > ((memorystatus_available_pages - memorystatus_available_pages_critical) / average_resident_pages); > > /* If it's predicted that no freeze will occur, lower the threshold temporarily */ > if (estimated_processes <= FREEZE_SUSPENDED_THRESHOLD_DEFAULT) { > memorystatus_freeze_suspended_threshold = FREEZE_SUSPENDED_THRESHOLD_LOW; > } else { > memorystatus_freeze_suspended_threshold = FREEZE_SUSPENDED_THRESHOLD_DEFAULT; > } 7128c5994,5995 < memorystatus_freeze_suspended_threshold = MIN(memorystatus_freeze_suspended_threshold, FREEZE_SUSPENDED_THRESHOLD_DEFAULT); --- > MEMORYSTATUS_DEBUG(1, "memorystatus_can_freeze_processes: %d suspended processes, %d average resident pages / process, %d suspended processes estimated\n", > memorystatus_suspended_count, average_resident_pages, estimated_processes); 7198,7205d6064 < /* < * This function evaluates if the currently frozen processes deserve < * to stay in the higher jetsam band. If the # of thaws of a process < * is below our threshold, then we will demote that process into the IDLE < * band and put it at the head. We don't immediately kill the process here < * because it already has state on disk and so it might be worth giving < * it another shot at getting thawed/resumed and used. < */ 7207c6066 < memorystatus_demote_frozen_processes(void) --- > memorystatus_freeze_update_throttle_interval(mach_timespec_t *ts, struct throttle_interval_t *interval) 7209,7265c6068,6073 < unsigned int band = (unsigned int) memorystatus_freeze_jetsam_band; < unsigned int demoted_proc_count = 0; < proc_t p = PROC_NULL, next_p = PROC_NULL; < < proc_list_lock(); < < if (memorystatus_freeze_enabled == FALSE) { < /* < * Freeze has been disabled likely to < * reclaim swap space. So don't change < * any state on the frozen processes. < */ < proc_list_unlock(); < return; < } < < next_p = memorystatus_get_first_proc_locked(&band, FALSE); < while (next_p) { < < p = next_p; < next_p = memorystatus_get_next_proc_locked(&band, p, FALSE); < < if ((p->p_memstat_state & P_MEMSTAT_FROZEN) == FALSE) { < continue; < } < < if (p->p_memstat_state & P_MEMSTAT_LOCKED) { < continue; < } < < if (p->p_memstat_thaw_count < memorystatus_thaw_count_demotion_threshold) { < p->p_memstat_state &= ~P_MEMSTAT_USE_ELEVATED_INACTIVE_BAND; < memorystatus_invalidate_idle_demotion_locked(p, TRUE); < < memorystatus_update_priority_locked(p, JETSAM_PRIORITY_IDLE, TRUE, TRUE); < #if DEVELOPMENT || DEBUG < os_log_with_startup_serial(OS_LOG_DEFAULT, "memorystatus_demote_frozen_process pid %d [%s]", < p->p_pid, (*p->p_name ? p->p_name : "unknown")); < #endif /* DEVELOPMENT || DEBUG */ < < /* < * The freezer thread will consider this a normal app to be frozen < * because it is in the IDLE band. So we don't need the < * P_MEMSTAT_REFREEZE_ELIGIBLE state here. Also, if it gets resumed < * we'll correctly count it as eligible for re-freeze again. < * < * We don't drop the frozen count because this process still has < * state on disk. So there's a chance it gets resumed and then it < * should land in the higher jetsam band. For that it needs to < * remain marked frozen. < */ < if (p->p_memstat_state & P_MEMSTAT_REFREEZE_ELIGIBLE) { < p->p_memstat_state &= ~P_MEMSTAT_REFREEZE_ELIGIBLE; < memorystatus_refreeze_eligible_count--; < } < < demoted_proc_count++; --- > unsigned int freeze_daily_pageouts_max = memorystatus_freeze_daily_mb_max * (1024 * 1024 / PAGE_SIZE); > if (CMP_MACH_TIMESPEC(ts, &interval->ts) >= 0) { > if (!interval->max_pageouts) { > interval->max_pageouts = (interval->burst_multiple * (((uint64_t)interval->mins * freeze_daily_pageouts_max) / (24 * 60))); > } else { > printf("memorystatus_freeze_update_throttle_interval: %d minute throttle timeout, resetting\n", interval->mins); 7267,7269c6075,6082 < < if (demoted_proc_count == memorystatus_max_frozen_demotions_daily) { < break; --- > interval->ts.tv_sec = interval->mins * 60; > interval->ts.tv_nsec = 0; > ADD_MACH_TIMESPEC(&interval->ts, ts); > /* Since we update the throttle stats pre-freeze, adjust for overshoot here */ > if (interval->pageouts > interval->max_pageouts) { > interval->pageouts -= interval->max_pageouts; > } else { > interval->pageouts = 0; 7271c6084,6088 < } --- > interval->throttle = FALSE; > } else if (!interval->throttle && interval->pageouts >= interval->max_pageouts) { > printf("memorystatus_freeze_update_throttle_interval: %d minute pageout limit exceeded; enabling throttle\n", interval->mins); > interval->throttle = TRUE; > } 7273,7274c6090,6092 < memorystatus_thaw_count = 0; < proc_list_unlock(); --- > MEMORYSTATUS_DEBUG(1, "memorystatus_freeze_update_throttle_interval: throttle updated - %d frozen (%d max) within %dm; %dm remaining; throttle %s\n", > interval->pageouts, interval->max_pageouts, interval->mins, (interval->ts.tv_sec - ts->tv_sec) / 60, > interval->throttle ? "on" : "off"); 7277,7295c6095,6096 < < /* < * This function will do 4 things: < * < * 1) check to see if we are currently in a degraded freezer mode, and if so: < * - check to see if our window has expired and we should exit this mode, OR, < * - return a budget based on the degraded throttle window's max. pageouts vs current pageouts. < * < * 2) check to see if we are in a NEW normal window and update the normal throttle window's params. < * < * 3) check what the current normal window allows for a budget. < * < * 4) calculate the current rate of pageouts for DEGRADED_WINDOW_MINS duration. If that rate is below < * what we would normally expect, then we are running low on our daily budget and need to enter < * degraded perf. mode. < */ < < static void < memorystatus_freeze_update_throttle(uint64_t *budget_pages_allowed) --- > static boolean_t > memorystatus_freeze_update_throttle(void) 7300,7301c6101,6102 < < unsigned int freeze_daily_pageouts_max = 0; --- > uint32_t i; > boolean_t throttled = FALSE; 7304,7310c6105,6106 < if (!memorystatus_freeze_throttle_enabled) { < /* < * No throttling...we can use the full budget everytime. < */ < *budget_pages_allowed = UINT64_MAX; < return; < } --- > if (!memorystatus_freeze_throttle_enabled) > return FALSE; 7315a6112,6123 > > /* Check freeze pageouts over multiple intervals and throttle if we've exceeded our budget. > * > * This ensures that periods of inactivity can't be used as 'credit' towards freeze if the device has > * remained dormant for a long period. We do, however, allow increased thresholds for shorter intervals in > * order to allow for bursts of activity. > */ > for (i = 0; i < sizeof(throttle_intervals) / sizeof(struct throttle_interval_t); i++) { > memorystatus_freeze_update_throttle_interval(&ts, &throttle_intervals[i]); > if (throttle_intervals[i].throttle == TRUE) > throttled = TRUE; > } 7317,7445c6125 < struct throttle_interval_t *interval = NULL; < < if (memorystatus_freeze_degradation == TRUE) { < < interval = degraded_throttle_window; < < if (CMP_MACH_TIMESPEC(&ts, &interval->ts) >= 0) { < memorystatus_freeze_degradation = FALSE; < interval->pageouts = 0; < interval->max_pageouts = 0; < < } else { < *budget_pages_allowed = interval->max_pageouts - interval->pageouts; < } < } < < interval = normal_throttle_window; < < if (CMP_MACH_TIMESPEC(&ts, &interval->ts) >= 0) { < /* < * New throttle window. < * Rollover any unused budget. < * Also ask the storage layer what the new budget needs to be. < */ < uint64_t freeze_daily_budget = 0; < unsigned int daily_budget_pageouts = 0; < < if (vm_swap_max_budget(&freeze_daily_budget)) { < memorystatus_freeze_daily_mb_max = (freeze_daily_budget / (1024 * 1024)); < os_log_with_startup_serial(OS_LOG_DEFAULT, "memorystatus: memorystatus_freeze_daily_mb_max set to %dMB\n", memorystatus_freeze_daily_mb_max); < } < < freeze_daily_pageouts_max = memorystatus_freeze_daily_mb_max * (1024 * 1024 / PAGE_SIZE); < < daily_budget_pageouts = (interval->burst_multiple * (((uint64_t)interval->mins * freeze_daily_pageouts_max) / NORMAL_WINDOW_MINS)); < interval->max_pageouts = (interval->max_pageouts - interval->pageouts) + daily_budget_pageouts; < < interval->ts.tv_sec = interval->mins * 60; < interval->ts.tv_nsec = 0; < ADD_MACH_TIMESPEC(&interval->ts, &ts); < /* Since we update the throttle stats pre-freeze, adjust for overshoot here */ < if (interval->pageouts > interval->max_pageouts) { < interval->pageouts -= interval->max_pageouts; < } else { < interval->pageouts = 0; < } < *budget_pages_allowed = interval->max_pageouts; < < memorystatus_demote_frozen_processes(); < < } else { < /* < * Current throttle window. < * Deny freezing if we have no budget left. < * Try graceful degradation if we are within 25% of: < * - the daily budget, and < * - the current budget left is below our normal budget expectations. < */ < < #if DEVELOPMENT || DEBUG < /* < * This can only happen in the INTERNAL configs because we allow modifying the daily budget for testing. < */ < < if (freeze_daily_pageouts_max > interval->max_pageouts) { < /* < * We just bumped the daily budget. Re-evaluate our normal window params. < */ < interval->max_pageouts = (interval->burst_multiple * (((uint64_t)interval->mins * freeze_daily_pageouts_max) / NORMAL_WINDOW_MINS)); < memorystatus_freeze_degradation = FALSE; //we'll re-evaluate this below... < } < #endif /* DEVELOPMENT || DEBUG */ < < if (memorystatus_freeze_degradation == FALSE) { < < if (interval->pageouts >= interval->max_pageouts) { < < *budget_pages_allowed = 0; < < } else { < < int budget_left = interval->max_pageouts - interval->pageouts; < int budget_threshold = (freeze_daily_pageouts_max * FREEZE_DEGRADATION_BUDGET_THRESHOLD) / 100; < < mach_timespec_t time_left = {0,0}; < < time_left.tv_sec = interval->ts.tv_sec; < time_left.tv_nsec = 0; < < SUB_MACH_TIMESPEC(&time_left, &ts); < < if (budget_left <= budget_threshold) { < < /* < * For the current normal window, calculate how much we would pageout in a DEGRADED_WINDOW_MINS duration. < * And also calculate what we would pageout for the same DEGRADED_WINDOW_MINS duration if we had the full < * daily pageout budget. < */ < < unsigned int current_budget_rate_allowed = ((budget_left / time_left.tv_sec) / 60) * DEGRADED_WINDOW_MINS; < unsigned int normal_budget_rate_allowed = (freeze_daily_pageouts_max / NORMAL_WINDOW_MINS) * DEGRADED_WINDOW_MINS; < < /* < * The current rate of pageouts is below what we would expect for < * the normal rate i.e. we have below normal budget left and so... < */ < < if (current_budget_rate_allowed < normal_budget_rate_allowed) { < < memorystatus_freeze_degradation = TRUE; < degraded_throttle_window->max_pageouts = current_budget_rate_allowed; < degraded_throttle_window->pageouts = 0; < < /* < * Switch over to the degraded throttle window so the budget < * doled out is based on that window. < */ < interval = degraded_throttle_window; < } < } < < *budget_pages_allowed = interval->max_pageouts - interval->pageouts; < } < } < } < < MEMORYSTATUS_DEBUG(1, "memorystatus_freeze_update_throttle_interval: throttle updated - %d frozen (%d max) within %dm; %dm remaining; throttle %s\n", < interval->pageouts, interval->max_pageouts, interval->mins, (interval->ts.tv_sec - ts->tv_sec) / 60, < interval->throttle ? "on" : "off"); --- > return throttled; 7454d6133 < 7456,7467c6135,6141 < < if ((memorystatus_frozen_count < memorystatus_frozen_processes_max) || < (memorystatus_refreeze_eligible_count >= MIN_THAW_REFREEZE_THRESHOLD)) { < < if (memorystatus_can_freeze(&memorystatus_freeze_swap_low)) { < < /* Only freeze if we've not exceeded our pageout budgets.*/ < memorystatus_freeze_update_throttle(&memorystatus_freeze_budget_pages_remaining); < < if (memorystatus_freeze_budget_pages_remaining) { < memorystatus_freeze_top_process(); < } --- > if (memorystatus_can_freeze(&memorystatus_freeze_swap_low)) { > /* Only freeze if we've not exceeded our pageout budgets.*/ > if (!memorystatus_freeze_update_throttle()) { > memorystatus_freeze_top_process(&memorystatus_freeze_swap_low); > } else { > printf("memorystatus_freeze_thread: in throttle, ignoring freeze\n"); > memorystatus_freeze_throttle_count++; /* Throttled, update stats */ 7471,7480d6144 < < /* < * We use memorystatus_apps_idle_delay_time because if/when we adopt aging for applications, < * it'll tie neatly into running the freezer once we age an application. < * < * Till then, it serves as a good interval that can be tuned via a sysctl too. < */ < memorystatus_freezer_thread_next_run_ts = mach_absolute_time() + memorystatus_apps_idle_delay_time; < < assert_wait((event_t) &memorystatus_freeze_wakeup, THREAD_UNINT); 7482a6147 > assert_wait((event_t) &memorystatus_freeze_wakeup, THREAD_UNINT); 7486,7524d6150 < static boolean_t < memorystatus_freeze_thread_should_run(void) < { < /* < * No freezer_mutex held here...see why near call-site < * within memorystatus_pages_update(). < */ < < boolean_t should_run = FALSE; < < if (memorystatus_freeze_enabled == FALSE) { < goto out; < } < < if (memorystatus_available_pages > memorystatus_freeze_threshold) { < goto out; < } < < if ((memorystatus_frozen_count >= memorystatus_frozen_processes_max) && < (memorystatus_refreeze_eligible_count < MIN_THAW_REFREEZE_THRESHOLD)) { < goto out; < } < < if (memorystatus_frozen_shared_mb_max && (memorystatus_frozen_shared_mb >= memorystatus_frozen_shared_mb_max)) { < goto out; < } < < uint64_t curr_time = mach_absolute_time(); < < if (curr_time < memorystatus_freezer_thread_next_run_ts) { < goto out; < } < < should_run = TRUE; < < out: < return should_run; < } < 7807,7817c6433 < < if (p->p_memstat_effectivepriority < JETSAM_PRIORITY_BACKGROUND_OPPORTUNISTIC) { < /* < * IDLE and IDLE_DEFERRED bands contain processes < * that have dropped memory to be under their inactive < * memory limits. And so they can't really give back < * anything. < */ < eligible = FALSE; < } < --- > 7833c6449,6450 < void memorystatus_proc_flags_unsafe(void * v, boolean_t *is_dirty, boolean_t *is_dirty_tracked, boolean_t *allow_idle_exit) --- > boolean_t > memorystatus_proc_is_dirty_unsafe(void *v) 7836,7843c6453 < *is_dirty = FALSE; < *is_dirty_tracked = FALSE; < *allow_idle_exit = FALSE; < } else { < proc_t p = (proc_t)v; < *is_dirty = (p->p_memstat_dirty & P_DIRTY_IS_DIRTY) != 0; < *is_dirty_tracked = (p->p_memstat_dirty & P_DIRTY_TRACK) != 0; < *allow_idle_exit = (p->p_memstat_dirty & P_DIRTY_ALLOW_IDLE_EXIT) != 0; --- > return FALSE; 7844a6455,6456 > proc_t p = (proc_t)v; > return (p->p_memstat_dirty & P_DIRTY_IS_DIRTY) != 0; 7865a6478,6485 > /* > * This value is the threshold that a process must meet to be considered for scavenging. > */ > #if CONFIG_EMBEDDED > #define VM_PRESSURE_MINIMUM_RSIZE 1 /* MB */ > #else /* CONFIG_EMBEDDED */ > #define VM_PRESSURE_MINIMUM_RSIZE 10 /* MB */ > #endif /* CONFIG_EMBEDDED */ 8107c6727 < if (resident_size >= vm_pressure_task_footprint_min) { --- > if (resident_size >= VM_PRESSURE_MINIMUM_RSIZE) { 8497a7118,7119 > extern int memorystatus_purge_on_warning; > extern int memorystatus_purge_on_critical; 8546c7168 < force_purge = vm_pageout_state.memorystatus_purge_on_warning; --- > force_purge = memorystatus_purge_on_warning; 8551c7173 < force_purge = vm_pageout_state.memorystatus_purge_on_critical; --- > force_purge = memorystatus_purge_on_critical; 8601,8603c7223,7229 < SYSCTL_INT(_kern, OID_AUTO, memorystatus_purge_on_warning, CTLFLAG_RW|CTLFLAG_LOCKED, &vm_pageout_state.memorystatus_purge_on_warning, 0, ""); < SYSCTL_INT(_kern, OID_AUTO, memorystatus_purge_on_urgent, CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_LOCKED, &vm_pageout_state.memorystatus_purge_on_urgent, 0, ""); < SYSCTL_INT(_kern, OID_AUTO, memorystatus_purge_on_critical, CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_LOCKED, &vm_pageout_state.memorystatus_purge_on_critical, 0, ""); --- > extern int memorystatus_purge_on_warning; > extern int memorystatus_purge_on_urgent; > extern int memorystatus_purge_on_critical; > > SYSCTL_INT(_kern, OID_AUTO, memorystatus_purge_on_warning, CTLFLAG_RW|CTLFLAG_LOCKED, &memorystatus_purge_on_warning, 0, ""); > SYSCTL_INT(_kern, OID_AUTO, memorystatus_purge_on_urgent, CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_LOCKED, &memorystatus_purge_on_urgent, 0, ""); > SYSCTL_INT(_kern, OID_AUTO, memorystatus_purge_on_critical, CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_LOCKED, &memorystatus_purge_on_critical, 0, ""); 8605,8607d7230 < #if DEBUG || DEVELOPMENT < SYSCTL_UINT(_kern, OID_AUTO, memorystatus_vm_pressure_events_enabled, CTLFLAG_RW|CTLFLAG_LOCKED, &vm_pressure_events_enabled, 0, ""); < #endif 8633c7256 < if (!*list_ptr) { --- > if (!list_ptr) { 8822,8847d7444 < void < memorystatus_fast_jetsam_override(boolean_t enable_override) < { < /* If fast jetsam is not enabled, simply return */ < if (!fast_jetsam_enabled) < return; < < if (enable_override) { < if ((memorystatus_jetsam_policy & kPolicyMoreFree) == kPolicyMoreFree) < return; < proc_list_lock(); < memorystatus_jetsam_policy |= kPolicyMoreFree; < memorystatus_thread_pool_max(); < memorystatus_update_levels_locked(TRUE); < proc_list_unlock(); < } else { < if ((memorystatus_jetsam_policy & kPolicyMoreFree) == 0) < return; < proc_list_lock(); < memorystatus_jetsam_policy &= ~kPolicyMoreFree; < memorystatus_thread_pool_default(); < memorystatus_update_levels_locked(TRUE); < proc_list_unlock(); < } < } < 8866a7464,7474 > if ((more_free && ((memorystatus_jetsam_policy & kPolicyMoreFree) == kPolicyMoreFree)) || > (!more_free && ((memorystatus_jetsam_policy & kPolicyMoreFree) == 0))) { > > /* > * No change in state. > */ > return 0; > } > > proc_list_lock(); > 8868c7476 < memorystatus_fast_jetsam_override(true); --- > memorystatus_jetsam_policy |= kPolicyMoreFree; 8870c7478 < memorystatus_fast_jetsam_override(false); --- > memorystatus_jetsam_policy &= ~kPolicyMoreFree; 8872a7481,7484 > memorystatus_update_levels_locked(TRUE); > > proc_list_unlock(); > 8914,8942d7525 < /* < * Get the previous fully populated snapshot < */ < static int < memorystatus_get_jetsam_snapshot_copy(memorystatus_jetsam_snapshot_t **snapshot, size_t *snapshot_size, boolean_t size_only) { < size_t input_size = *snapshot_size; < < if (memorystatus_jetsam_snapshot_copy_count > 0) { < *snapshot_size = sizeof(memorystatus_jetsam_snapshot_t) + (sizeof(memorystatus_jetsam_snapshot_entry_t) * (memorystatus_jetsam_snapshot_copy_count)); < } else { < *snapshot_size = 0; < } < < if (size_only) { < return 0; < } < < if (input_size < *snapshot_size) { < return EINVAL; < } < < *snapshot = memorystatus_jetsam_snapshot_copy; < < MEMORYSTATUS_DEBUG(7, "memorystatus_get_jetsam_snapshot_copy: returned inputsize (%ld), snapshot_size(%ld), listcount(%ld)\n", < (long)input_size, (long)*snapshot_size, (long)memorystatus_jetsam_snapshot_copy_count); < < return 0; < } < 9036c7619 < if (flags & ~(MEMORYSTATUS_SNAPSHOT_ON_DEMAND | MEMORYSTATUS_SNAPSHOT_AT_BOOT | MEMORYSTATUS_SNAPSHOT_COPY)) { --- > if (flags & ~(MEMORYSTATUS_SNAPSHOT_ON_DEMAND | MEMORYSTATUS_SNAPSHOT_AT_BOOT)) { 9043c7626,7627 < if (flags & (flags - 0x1)) { --- > if ((flags & (MEMORYSTATUS_SNAPSHOT_ON_DEMAND | MEMORYSTATUS_SNAPSHOT_AT_BOOT)) == > (MEMORYSTATUS_SNAPSHOT_ON_DEMAND | MEMORYSTATUS_SNAPSHOT_AT_BOOT)) { 9045c7629 < * Can't have multiple flags set at the same time. --- > * Can't have both set at the same time. 9061,9062d7644 < } else if (flags & MEMORYSTATUS_SNAPSHOT_COPY) { < error = memorystatus_get_jetsam_snapshot_copy(&snapshot, &buffer_size, size_only); 9083,9084d7664 < * If working with a copy of the snapshot < * there is nothing to clear or update. 9091,9092d7670 < * However, we make a copy for any parties that might be interested < * in the previous fully populated snapshot. 9095,9096d7672 < memcpy(memorystatus_jetsam_snapshot_copy, memorystatus_jetsam_snapshot, memorystatus_jetsam_snapshot_size); < memorystatus_jetsam_snapshot_copy_count = memorystatus_jetsam_snapshot_count; 9122,9123c7698,7699 < * Routine: memorystatus_cmd_grp_set_priorities < * Purpose: Update priorities for a group of processes. --- > * Routine: memorystatus_cmd_grp_set_properties > * Purpose: Update properties for a group of processes. 9124a7701 > * Supported Properties: 9156,9157c7733,7734 < static int < memorystatus_cmd_grp_set_priorities(user_addr_t buffer, size_t buffer_size) --- > /* This internal structure can expand when we add support for more properties */ > typedef struct memorystatus_internal_properties 9158a7736,7744 > proc_t proc; > int32_t priority; /* see memorytstatus_priority_entry_t : priority */ > } memorystatus_internal_properties_t; > > > static int > memorystatus_cmd_grp_set_properties(int32_t flags, user_addr_t buffer, size_t buffer_size, __unused int32_t *retval) { > > #pragma unused (flags) 9166c7752 < memorystatus_properties_entry_v1_t *entries = NULL; --- > memorystatus_priority_entry_t *entries = NULL; 9170,9174d7755 < typedef struct memorystatus_internal_properties { < proc_t proc; < int32_t priority; < } memorystatus_internal_properties_t; < 9187c7768 < if ((buffer == USER_ADDR_NULL) || (buffer_size == 0)) { --- > if ((buffer == USER_ADDR_NULL) || (buffer_size == 0) || ((buffer_size % sizeof(memorystatus_priority_entry_t)) != 0)) { 9192,9193c7773,7774 < entry_count = (buffer_size / sizeof(memorystatus_properties_entry_v1_t)); < if ((entries = (memorystatus_properties_entry_v1_t *)kalloc(buffer_size)) == NULL) { --- > entry_count = (buffer_size / sizeof(memorystatus_priority_entry_t)); > if ((entries = (memorystatus_priority_entry_t *)kalloc(buffer_size)) == NULL) { 9198c7779 < KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_GRP_SET_PROP) | DBG_FUNC_START, MEMORYSTATUS_FLAGS_GRP_SET_PRIORITY, entry_count, 0, 0, 0); --- > KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_GRP_SET_PROP) | DBG_FUNC_START, entry_count, 0, 0, 0, 0); 9205,9214d7785 < if (entries[0].version == MEMORYSTATUS_MPE_VERSION_1) { < if ((buffer_size % MEMORYSTATUS_MPE_VERSION_1_SIZE) != 0) { < error = EINVAL; < goto out; < } < } else { < error = EINVAL; < goto out; < } < 9306,9307c7877 < out: < KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_GRP_SET_PROP) | DBG_FUNC_END, MEMORYSTATUS_FLAGS_GRP_SET_PRIORITY, entry_count, table_count, 0, 0); --- > KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_GRP_SET_PROP) | DBG_FUNC_END, entry_count, table_count, 0, 0, 0); 9308a7879 > out: 9317,9433d7887 < static int < memorystatus_cmd_grp_set_probabilities(user_addr_t buffer, size_t buffer_size) < { < int error = 0; < memorystatus_properties_entry_v1_t *entries = NULL; < uint32_t entry_count = 0, i = 0; < memorystatus_internal_probabilities_t *tmp_table_new = NULL, *tmp_table_old = NULL; < size_t tmp_table_new_size = 0, tmp_table_old_size = 0; < < /* Verify inputs */ < if ((buffer == USER_ADDR_NULL) || (buffer_size == 0)) { < error = EINVAL; < goto out; < } < < entry_count = (buffer_size / sizeof(memorystatus_properties_entry_v1_t)); < < if ((entries = (memorystatus_properties_entry_v1_t *) kalloc(buffer_size)) == NULL) { < error = ENOMEM; < goto out; < } < < KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_GRP_SET_PROP) | DBG_FUNC_START, MEMORYSTATUS_FLAGS_GRP_SET_PROBABILITY, entry_count, 0, 0, 0); < < if ((error = copyin(buffer, entries, buffer_size)) != 0) { < goto out; < } < < if (entries[0].version == MEMORYSTATUS_MPE_VERSION_1) { < if ((buffer_size % MEMORYSTATUS_MPE_VERSION_1_SIZE) != 0) { < error = EINVAL; < goto out; < } < } else { < error = EINVAL; < goto out; < } < < /* Verify sanity of input priorities */ < for (i=0; i < entry_count; i++) { < /* < * 0 - low probability of use. < * 1 - high probability of use. < * < * Keeping this field an int (& not a bool) to allow < * us to experiment with different values/approaches < * later on. < */ < if (entries[i].use_probability > 1) { < error = EINVAL; < goto out; < } < } < < tmp_table_new_size = sizeof(memorystatus_internal_probabilities_t) * entry_count; < < if ( (tmp_table_new = (memorystatus_internal_probabilities_t *) kalloc(tmp_table_new_size)) == NULL) { < error = ENOMEM; < goto out; < } < memset(tmp_table_new, 0, tmp_table_new_size); < < proc_list_lock(); < < if (memorystatus_global_probabilities_table) { < tmp_table_old = memorystatus_global_probabilities_table; < tmp_table_old_size = memorystatus_global_probabilities_size; < } < < memorystatus_global_probabilities_table = tmp_table_new; < memorystatus_global_probabilities_size = tmp_table_new_size; < tmp_table_new = NULL; < < for (i=0; i < entry_count; i++ ) { < /* Build the table data */ < strlcpy(memorystatus_global_probabilities_table[i].proc_name, entries[i].proc_name, MAXCOMLEN + 1); < memorystatus_global_probabilities_table[i].use_probability = entries[i].use_probability; < } < < proc_list_unlock(); < < out: < KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_GRP_SET_PROP) | DBG_FUNC_END, MEMORYSTATUS_FLAGS_GRP_SET_PROBABILITY, entry_count, tmp_table_new_size, 0, 0); < < if (entries) { < kfree(entries, buffer_size); < entries = NULL; < } < < if (tmp_table_old) { < kfree(tmp_table_old, tmp_table_old_size); < tmp_table_old = NULL; < } < < return (error); < < } < < static int < memorystatus_cmd_grp_set_properties(int32_t flags, user_addr_t buffer, size_t buffer_size, __unused int32_t *retval) < { < int error = 0; < < if ((flags & MEMORYSTATUS_FLAGS_GRP_SET_PRIORITY) == MEMORYSTATUS_FLAGS_GRP_SET_PRIORITY) { < < error = memorystatus_cmd_grp_set_priorities(buffer, buffer_size); < < } else if ((flags & MEMORYSTATUS_FLAGS_GRP_SET_PROBABILITY) == MEMORYSTATUS_FLAGS_GRP_SET_PROBABILITY) { < < error = memorystatus_cmd_grp_set_probabilities(buffer, buffer_size); < < } else { < error = EINVAL; < } < < return error; < } 9798,9918d8251 < static int < memorystatus_get_process_is_managed(pid_t pid, int *is_managed) < { < proc_t p = NULL; < < /* Validate inputs */ < if (pid == 0) { < return EINVAL; < } < < p = proc_find(pid); < if (!p) { < return ESRCH; < } < < proc_list_lock(); < *is_managed = ((p->p_memstat_state & P_MEMSTAT_MANAGED) ? 1 : 0); < proc_rele_locked(p); < proc_list_unlock(); < < return 0; < } < < static int < memorystatus_set_process_is_managed(pid_t pid, boolean_t set_managed) < { < proc_t p = NULL; < < /* Validate inputs */ < if (pid == 0) { < return EINVAL; < } < < p = proc_find(pid); < if (!p) { < return ESRCH; < } < < proc_list_lock(); < if (set_managed == TRUE) { < p->p_memstat_state |= P_MEMSTAT_MANAGED; < } else { < p->p_memstat_state &= ~P_MEMSTAT_MANAGED; < } < proc_rele_locked(p); < proc_list_unlock(); < < return 0; < } < < static int < memorystatus_get_process_is_freezable(pid_t pid, int *is_freezable) < { < proc_t p = PROC_NULL; < < if (pid == 0) { < return EINVAL; < } < < p = proc_find(pid); < if (!p) { < return ESRCH; < } < < /* < * Only allow this on the current proc for now. < * We can check for privileges and allow targeting another process in the future. < */ < if (p != current_proc()) { < proc_rele(p); < return EPERM; < } < < proc_list_lock(); < *is_freezable = ((p->p_memstat_state & P_MEMSTAT_FREEZE_DISABLED) ? 0 : 1); < proc_rele_locked(p); < proc_list_unlock(); < < return 0; < } < < static int < memorystatus_set_process_is_freezable(pid_t pid, boolean_t is_freezable) < { < proc_t p = PROC_NULL; < < if (pid == 0) { < return EINVAL; < } < < p = proc_find(pid); < if (!p) { < return ESRCH; < } < < /* < * Only allow this on the current proc for now. < * We can check for privileges and allow targeting another process in the future. < */ < if (p != current_proc()) { < proc_rele(p); < return EPERM; < } < < proc_list_lock(); < if (is_freezable == FALSE) { < /* Freeze preference set to FALSE. Set the P_MEMSTAT_FREEZE_DISABLED bit. */ < p->p_memstat_state |= P_MEMSTAT_FREEZE_DISABLED; < printf("memorystatus_set_process_is_freezable: disabling freeze for pid %d [%s]\n", < p->p_pid, (*p->p_name ? p->p_name : "unknown")); < } else { < p->p_memstat_state &= ~P_MEMSTAT_FREEZE_DISABLED; < printf("memorystatus_set_process_is_freezable: enabling freeze for pid %d [%s]\n", < p->p_pid, (*p->p_name ? p->p_name : "unknown")); < } < proc_rele_locked(p); < proc_list_unlock(); < < return 0; < } < 9922d8254 < boolean_t skip_auth_check = FALSE; 9930,9936c8262,8263 < /* We don't need entitlements if we're setting/ querying the freeze preference for a process. Skip the check below. */ < if (args->command == MEMORYSTATUS_CMD_SET_PROCESS_IS_FREEZABLE || args->command == MEMORYSTATUS_CMD_GET_PROCESS_IS_FREEZABLE) { < skip_auth_check = TRUE; < } < < /* Need to be root or have entitlement. */ < if (!kauth_cred_issuser(kauth_cred_get()) && !IOTaskHasEntitlement(current_task(), MEMORYSTATUS_ENTITLEMENT) && !skip_auth_check) { --- > /* Need to be root or have entitlement */ > if (!kauth_cred_issuser(kauth_cred_get()) && !IOTaskHasEntitlement(current_task(), MEMORYSTATUS_ENTITLEMENT)) { 10042,10045c8369 < error = memorystatus_update_inactive_jetsam_priority_band(args->pid, args->command, JETSAM_PRIORITY_ELEVATED_INACTIVE, args->flags ? TRUE : FALSE); < break; < case MEMORYSTATUS_CMD_SET_PROCESS_IS_MANAGED: < error = memorystatus_set_process_is_managed(args->pid, args->flags); --- > error = memorystatus_update_inactive_jetsam_priority_band(args->pid, args->command, args->flags ? TRUE : FALSE); 10048,10067d8371 < case MEMORYSTATUS_CMD_GET_PROCESS_IS_MANAGED: < error = memorystatus_get_process_is_managed(args->pid, ret); < break; < < case MEMORYSTATUS_CMD_SET_PROCESS_IS_FREEZABLE: < error = memorystatus_set_process_is_freezable(args->pid, args->flags ? TRUE : FALSE); < break; < < case MEMORYSTATUS_CMD_GET_PROCESS_IS_FREEZABLE: < error = memorystatus_get_process_is_freezable(args->pid, ret); < break; < < #if CONFIG_FREEZE < #if DEVELOPMENT || DEBUG < case MEMORYSTATUS_CMD_FREEZER_CONTROL: < error = memorystatus_freezer_control(args->flags, args->buffer, args->buffersize, ret); < break; < #endif /* DEVELOPMENT || DEBUG */ < #endif /* CONFIG_FREEZE */ < 10226a8531,8533 > if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0) > kn->kn_udata = kev->udata; > 10613c8920 < if (!p || (!isApp(p)) || (p->p_memstat_state & (P_MEMSTAT_INTERNAL | P_MEMSTAT_MANAGED))) { --- > if (!p || (!isApp(p)) || (p->p_memstat_state & P_MEMSTAT_INTERNAL)) { 10616,10625d8922 < * < * We also skip processes that have the P_MEMSTAT_MANAGED bit set, i.e. < * they're managed by assertiond. These are iOS apps that have been ported < * to macOS. assertiond might be in the process of modifying the app's < * priority / memory limit - so it might have the proc_list lock, and then try < * to take the task lock. Meanwhile we've entered this function with the task lock < * held, and we need the proc_list lock below. So we'll deadlock with assertiond. < * < * It should be fine to read the P_MEMSTAT_MANAGED bit without the proc_list < * lock here, since assertiond only sets this bit on process launch. 10717,10718d9013 < KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_CHANGE_PRIORITY), p->p_pid, priority, p->p_memstat_effectivepriority, 0, 0); <
./bsd/kern/kern_fork.c differences detected: 161,167c161 < thread_t fork_create_child(task_t parent_task, < coalition_t *parent_coalitions, < proc_t child, < int inherit_memory, < int is_64bit_addr, < int is_64bit_data, < int in_exec); --- > thread_t fork_create_child(task_t parent_task, coalition_t *parent_coalitions, proc_t child, int inherit_memory, int is64bit, int in_exec); 747c741 < * child_proc child process --- > * child_proc child process 749,755c743,748 < * to be inherited by the child < * is_64bit_addr TRUE, if the child being created will < * be associated with a 64 bit address space < * is_64bit_data TRUE if the child being created will use a < 64-bit register state < * in_exec TRUE, if called from execve or posix spawn set exec < * FALSE, if called from fork or vfexec --- > * to be inherited by the child > * is64bit TRUE, if the child being created will > * be associated with a 64 bit process > * rather than a 32 bit process > * in_exec TRUE, if called from execve or posix spawn set exec > * FALSE, if called from fork or vfexec 774,780c767 < fork_create_child(task_t parent_task, < coalition_t *parent_coalitions, < proc_t child_proc, < int inherit_memory, < int is_64bit_addr, < int is_64bit_data, < int in_exec) --- > fork_create_child(task_t parent_task, coalition_t *parent_coalitions, proc_t child_proc, int inherit_memory, int is64bit, int in_exec) 790,791c777 < is_64bit_addr, < is_64bit_data, --- > is64bit, 985,1004c971 < /* < * In the case where the parent_task is TASK_NULL (during the init path) < * we make the assumption that the register size will be the same as the < * address space size since there's no way to determine the possible < * register size until an image is exec'd. < * < * The only architecture that has different address space and register sizes < * (arm64_32) isn't being used within kernel-space, so the above assumption < * always holds true for the init path. < */ < const int parent_64bit_addr = parent_proc->p_flag & P_LP64; < const int parent_64bit_data = (parent_task == TASK_NULL) ? parent_64bit_addr : task_get_64bit_data(parent_task); < < child_thread = fork_create_child(parent_task, < parent_coalitions, < child_proc, < inherit_memory, < parent_64bit_addr, < parent_64bit_data, < FALSE); --- > child_thread = fork_create_child(parent_task, parent_coalitions, child_proc, inherit_memory, parent_proc->p_flag & P_LP64, FALSE); 1016c983,984 < if (parent_64bit_addr) { --- > if (parent_proc->p_flag & P_LP64) { > task_set_64bit(child_task, TRUE); 1018a987 > task_set_64bit(child_task, FALSE); 1144d1112 < p->p_sigacts = NULL; 1146,1147d1113 < p->p_stats = NULL; < 1199d1164 < child_proc->p_stats = NULL; 1209d1173 < child_proc->p_sigacts = NULL; 1211d1174 < child_proc->p_stats = NULL; 1306c1269 < child_proc->p_vfs_iopolicy = (parent_proc->p_vfs_iopolicy & (P_VFS_IOPOLICY_VALID_MASK)); --- > child_proc->p_vfs_iopolicy = (parent_proc->p_vfs_iopolicy & (P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY)); 1456c1419 < child_proc->p_memstat_freeze_sharedanon_pages = 0; --- > child_proc->p_memstat_suspendedfootprint = 0; 1686,1687c1649,1654 < if (uth->uu_kqr_bound) { < kqueue_threadreq_unbind(p, uth->uu_kqr_bound); --- > if (uth->uu_kqueue_bound) { > kevent_qos_internal_unbind(p, > 0, /* didn't save qos_class */ > uth->uu_thread, > uth->uu_kqueue_flags); > assert(uth->uu_kqueue_override_is_sync == 0); NO DIFFS in ./bsd/kern/stackshot.c

./bsd/kern/kern_credential.c differences detected: 596c596 < struct kauth_cache_sizes sz_arg = {}; --- > struct kauth_cache_sizes sz_arg; 792c792 < message = ut->uu_save.uus_kauth.message; --- > message = ut->uu_kevent.uu_kauth.message; 919c919 < ut->uu_save.uus_kauth.message = message; --- > ut->uu_kevent.uu_kauth.message = message; 5660c5660 < MALLOC( cred_listp, debug_ucred *, req->oldlen, M_TEMP, M_WAITOK | M_ZERO); --- > MALLOC( cred_listp, debug_ucred *, req->oldlen, M_TEMP, M_WAITOK ); 5759c5759 < MALLOC( bt_bufp, cred_debug_buffer *, req->oldlen, M_TEMP, M_WAITOK | M_ZERO); --- > MALLOC( bt_bufp, cred_debug_buffer *, req->oldlen, M_TEMP, M_WAITOK );
./bsd/kern/trace_codes differences detected: 200d199 < 0x1200030 MACH_IPC_port_entry_modify 239,240d237 < 0x1300448 MACH_vm_info8 < 0x130044c MACH_vm_info9 247,248d243 < 0x1300498 MACH_vm_page_grab < 0x130049c MACH_vm_page_release 250,256c245 < 0x13004c4 MACH_vm_execve < 0x13004c8 MACH_vm_wakeup_compactor_swapper < 0x13004cc MACH_vm_upl_request < 0x13004d0 MACH_vm_iopl_request < 0x13004d4 MACH_vm_kern_request < 0x1300500 MACH_vm_data_write < 0x1300504 vm_pressure_level_change --- > 0x1300500 MACH_vm_data_write 312,316d300 < 0x14000D0 MACH_SCHED_LOAD_EFFECTIVE < 0x14000D4 MACH_PROMOTED < 0x14000D8 MACH_UNPROMOTED < 0x14000DC MACH_PROMOTED_UPDATE < 0x14000E0 MACH_QUIESCENT_COUNTER 364d347 < 0x1700048 PMAP_switch 370,372d352 < 0x1800014 MACH_CLOCK_BRIDGE_SKIP_TS < 0x1800018 MACH_CLOCK_BRIDGE_TS_MISMATCH < 0x180001c MACH_CLOCK_BRIDGE_OBSV_RATE 888d867 < 0x30B0000 VFS_MountRoot 892,896d870 < 0x3120000 DECMPFS_decmp_file < 0x3120004 DECMPFS_fetch_cmp_header < 0x3120008 DECMPFS_fetch_uncmp_data < 0x3120010 DECMPFS_free_cmp_data < 0x3120014 DECMPFS_file_is_cmp 916,917d889 < 0x4020038 MEMSTAT_change_priority < 0x402003C MEMSTAT_fast_jetsam 939d910 < 0x4030058 KEVENT_knote_vanished 1210,1213d1180 < 0x5310290 CPUPM_URGENCY < 0x5310294 CPUPM_IDLE_EXIT1 < 0x5310298 CPUPM_PST_QOS_CONT < 0x531029C CPUPM_MID 1240d1206 < 0x7010014 TRACE_STRING_THREADNAME_PREV 1263,1286c1229,1238 < 0x9010004 wq_pthread_exit < 0x9010008 wq_workqueue_exit < 0x901000c wq_runthread < 0x9010014 wq_death_call < 0x9010024 wq_thread_block < 0x9010028 wq_thactive_update < 0x901002c wq_add_timer < 0x9010030 wq_start_add_timer < 0x9010050 wq_override_dispatch < 0x9010054 wq_override_reset < 0x9010074 wq_thread_create_failed < 0x9010078 wq_thread_terminate < 0x901007c wq_thread_create < 0x9010080 wq_select_threadreq < 0x901008c wq_creator_select < 0x9010090 wq_creator_yield < 0x9010094 wq_constrained_admission < 0x9010098 wq_wqops_reqthreads < 0x9020004 wq_create < 0x9020008 wq_destroy < 0x902000c wq_thread_logical_run < 0x9020014 wq_thread_request_initiate < 0x9020018 wq_thread_request_modify < 0x9100004 bsdthread_set_qos_self --- > 0x9000084 wq_deallocate_stack > 0x9000088 wq_allocate_stack > 0x9008070 wq_run_item > 0x9008074 wq_clean_thread > 0x9008078 wq_post_done > 0x900807c wq_stk_cleanup > 0x9008080 wq_tsd_cleanup > 0x9008084 wq_tsd_destructor > 0x9008088 wq_pthread_exit > 0x900808c wq_workqueue_exit 1559,1565c1511,1514 < 0x25080004 PERF_TK_Snap_Data < 0x25080008 PERF_TK_Snap_Data1_32 < 0x2508000c PERF_TK_Snap_Data2_32 < 0x25080010 PERF_TK_Info_Data < 0x25090000 PERF_LZ_MkRunnable < 0x25090040 PERF_LZ_WaitSample < 0x25090080 PERF_LZ_CPUSample --- > 0x25080004 PERF_TK_Snap_Data1 > 0x25080008 PERF_TK_Snap_Data2 > 0x2508000c PERF_TK_Snap_Data1_32 > 0x25080010 PERF_TK_Snap_Data2_32 1569d1517 < 0x250a000c PERF_MI_SysMem_Data_2 1631d1578 < 0x263b0028 imp_thread_qos_workq_override 1667,1681d1613 < 0x35100004 TURNSTILE_thread_added_to_turnstile_waitq < 0x35100008 TURNSTILE_thread_removed_from_turnstile_waitq < 0x3510000c TURNSTILE_thread_moved_in_turnstile_waitq < 0x35100010 TURNSTILE_turnstile_added_to_turnstile_heap < 0x35100014 TURNSTILE_turnstile_removed_from_turnstile_heap < 0x35100018 TURNSTILE_turnstile_moved_in_turnstile_heap < 0x3510001c TURNSTILE_turnstile_added_to_thread_heap < 0x35100020 TURNSTILE_turnstile_removed_from_thread_heap < 0x35100024 TURNSTILE_turnstile_moved_in_thread_heap < 0x35100028 TURNSTILE_update_stopped_by_limit < 0x3510002c TURNSTILE_thread_not_waiting_on_turnstile < 0x35200004 TURNSTILE_turnstile_priority_change < 0x35200008 TURNSTILE_thread_user_promotion_change < 0x35300004 TURNSTILE_turnstile_prepare < 0x35300008 TURNSTILE_turnstile_complete
./bsd/kern/sys_ulock.c differences detected: 56d55 < #include 90a90 > static lck_mtx_t ull_table_lock; 92,98c92,97 < typedef lck_spin_t ull_lock_t; < #define ull_lock_init(ull) lck_spin_init(&ull->ull_lock, ull_lck_grp, NULL) < #define ull_lock_destroy(ull) lck_spin_destroy(&ull->ull_lock, ull_lck_grp) < #define ull_lock(ull) lck_spin_lock(&ull->ull_lock) < #define ull_unlock(ull) lck_spin_unlock(&ull->ull_lock) < #define ull_assert_owned(ull) LCK_SPIN_ASSERT(&ull->ull_lock, LCK_ASSERT_OWNED) < #define ull_assert_notwned(ull) LCK_SPIN_ASSERT(&ull->ull_lock, LCK_ASSERT_NOTOWNED) --- > #define ull_global_lock() lck_mtx_lock(&ull_table_lock) > #define ull_global_unlock() lck_mtx_unlock(&ull_table_lock) > > #define ull_lock(ull) lck_mtx_lock(&ull->ull_lock) > #define ull_unlock(ull) lck_mtx_unlock(&ull->ull_lock) > #define ull_assert_owned(ull) LCK_MTX_ASSERT(&ull->ull_lock, LCK_MTX_ASSERT_OWNED) 123,124c122 < ull_lock_t ull_lock; < uint ull_bucket_index; --- > lck_mtx_t ull_lock; 128,129c126 < uint8_t ull_opcode; < struct turnstile *ull_turnstile; --- > struct promote_token ull_promote_token; 130a128 > uint8_t ull_opcode; 132a131,132 > static const bool ull_debug = false; > 136c136 < static ull_t *ull_get(ulk_t *, uint32_t, ull_t **); --- > static ull_t *ull_get(ulk_t *, uint32_t); 138a139,140 > static thread_t ull_promote_owner_locked(ull_t* ull, thread_t thread); > 155c157 < kprintf("ull_turnstile\t%p\n\n", ull->ull_turnstile); --- > kprintf("ull_promote_token\t%d, %d\n\n", ull->ull_promote_token.pt_basepri, ull->ull_promote_token.pt_qos); 159,163d160 < typedef struct ull_bucket { < queue_head_t ulb_head; < lck_spin_t ulb_lock; < } ull_bucket_t; < 165c162 < static ull_bucket_t *ull_bucket; --- > static queue_head_t *ull_bucket; 169,171d165 < #define ull_bucket_lock(i) lck_spin_lock(&ull_bucket[i].ulb_lock) < #define ull_bucket_unlock(i) lck_spin_unlock(&ull_bucket[i].ulb_lock) < 193a188 > lck_mtx_init(&ull_table_lock, ull_lck_grp, NULL); 204c199 < ull_bucket = (ull_bucket_t *)kalloc(sizeof(ull_bucket_t) * ull_hash_buckets); --- > ull_bucket = (queue_head_t *)kalloc(sizeof(queue_head_t) * ull_hash_buckets); 208,209c203 < queue_init(&ull_bucket[i].ulb_head); < lck_spin_init(&ull_bucket[i].ulb_lock, ull_lck_grp, NULL); --- > queue_init(&ull_bucket[i]); 226a221 > ull_global_lock(); 232,233c227 < ull_bucket_lock(i); < if (!queue_empty(&ull_bucket[i].ulb_head)) { --- > if (!queue_empty(&ull_bucket[i])) { 238c232 < qe_foreach_element(elem, &ull_bucket[i].ulb_head, ull_hash_link) { --- > qe_foreach_element(elem, &ull_bucket[i], ull_hash_link) { 245d238 < ull_bucket_unlock(i); 250a244 > ull_global_unlock(); 264d257 < ull->ull_bucket_index = ULL_INDEX(key); 270c263 < ull->ull_turnstile = TURNSTILE_NULL; --- > ull->ull_promote_token = PROMOTE_TOKEN_INIT; 272c265 < ull_lock_init(ull); --- > lck_mtx_init(&ull->ull_lock, ull_lck_grp, NULL); 282d274 < assert(ull->ull_turnstile == TURNSTILE_NULL); 284c276 < ull_assert_notwned(ull); --- > LCK_MTX_ASSERT(&ull->ull_lock, LCK_ASSERT_NOTOWNED); 286c278 < ull_lock_destroy(ull); --- > lck_mtx_destroy(&ull->ull_lock, ull_lck_grp); 293a286,287 > * > * TODO: Per-bucket lock to reduce contention on global lock 296c290 < ull_get(ulk_t *key, uint32_t flags, ull_t **unused_ull) --- > ull_get(ulk_t *key, uint32_t flags) 300d293 < ull_t *new_ull = (flags & ULL_MUST_EXIST) ? NULL : ull_alloc(key); 302,304c295,296 < < ull_bucket_lock(i); < qe_foreach_element(elem, &ull_bucket[i].ulb_head, ull_hash_link) { --- > ull_global_lock(); > qe_foreach_element(elem, &ull_bucket[i], ull_hash_link) { 316,318c308 < ull_bucket_unlock(i); < assert(new_ull == NULL); < assert(unused_ull == NULL); --- > ull_global_unlock(); 322,324c312,320 < if (new_ull == NULL) { < /* Alloc above failed */ < ull_bucket_unlock(i); --- > /* NRG maybe drop the ull_global_lock before the kalloc, > * then take the lock and check again for a key match > * and either use the new ull_t or free it. > */ > > ull = ull_alloc(key); > > if (ull == NULL) { > ull_global_unlock(); 328d323 < ull = new_ull; 330,335c325,326 < enqueue(&ull_bucket[i].ulb_head, &ull->ull_hash_link); < } else if (!(flags & ULL_MUST_EXIST)) { < assert(new_ull); < assert(unused_ull); < assert(*unused_ull == NULL); < *unused_ull = new_ull; --- > > enqueue(&ull_bucket[i], &ull->ull_hash_link); 340c331 < ull_bucket_unlock(i); --- > ull_global_unlock(); 360c351 < ull_bucket_lock(ull->ull_bucket_index); --- > ull_global_lock(); 362,373c353 < ull_bucket_unlock(ull->ull_bucket_index); < < ull_free(ull); < } < < static void ulock_wait_continue(void *, wait_result_t); < static void ulock_wait_cleanup(ull_t *, thread_t, thread_t, int32_t *); < < inline static int < wait_result_to_return_code(wait_result_t wr) < { < int ret = 0; --- > ull_global_unlock(); 375,385c355,358 < switch (wr) { < case THREAD_AWAKENED: < break; < case THREAD_TIMED_OUT: < ret = ETIMEDOUT; < break; < case THREAD_INTERRUPTED: < case THREAD_RESTART: < default: < ret = EINTR; < break; --- > #if DEVELOPMENT || DEBUG > if (ull_debug) { > kprintf("%s>", __FUNCTION__); > ull_dump(ull); 387,388c360,361 < < return ret; --- > #endif > ull_free(ull); 396,400d368 < < if (flags & ULF_WAIT_CANCEL_POINT) { < __pthread_testcancel(1); < } < 402a371 > int id = thread_tid(self); 407a377,378 > thread_t old_lingering_owner = THREAD_NULL; > sched_call_t workq_callback = NULL; 409c380,382 < ull_t *unused_ull = NULL; --- > if (ull_debug) { > kprintf("[%d]%s>ENTER opcode %d addr %llx value %llx timeout %d flags %x\n", id, __FUNCTION__, opcode, (unsigned long long)(args->addr), args->value, args->timeout, flags); > } 424a398,402 > if (ull_debug) { > kprintf("[%d]%s>EINVAL opcode %d addr 0x%llx flags 0x%x\n", > id, __FUNCTION__, opcode, > (unsigned long long)(args->addr), flags); > } 440c418,423 < ull_t *ull = ull_get(&key, 0, &unused_ull); --- > if (flags & ULF_WAIT_WORKQ_DATA_CONTENTION) { > workq_callback = workqueue_get_sched_callback(); > workq_callback = thread_disable_sched_call(self, workq_callback); > } > > ull_t *ull = ull_get(&key, 0); 455a439 > ull_unlock(ull); 457c441 < goto out_locked; --- > goto out; 465c449 < * holding the ull spinlock across copyin forces any --- > * fake it by disabling preemption across copyin, which forces any 469a454 > disable_preemption(); 470a456 > enable_preemption(); 483a470,471 > ull_unlock(ull); > 488c476 < goto out_locked; --- > goto out; 493c481,486 < goto out_locked; --- > ull_unlock(ull); > if (ull_debug) { > kprintf("[%d]%s>Lock value %d has changed from expected %d so bail out\n", > id, __FUNCTION__, value, (uint32_t)(args->value)); > } > goto out; 505a499 > ull_unlock(ull); 507c501 < goto out_locked; --- > goto out; 520,521c514,515 < * Therefore, I can ask the turnstile to promote its priority, and I can rely < * on it to come by later to issue the wakeup and lose its promotion. --- > * Therefore, I can promote its priority to match mine, and I can rely on it to > * come by later to issue the wakeup and lose its promotion. 524,532c518 < /* Return the +1 ref from the ull_owner field */ < old_owner = ull->ull_owner; < ull->ull_owner = THREAD_NULL; < < if (owner_thread != THREAD_NULL) { < /* The ull_owner field now owns a +1 ref on owner_thread */ < thread_reference(owner_thread); < ull->ull_owner = owner_thread; < } --- > old_owner = ull_promote_owner_locked(ull, owner_thread); 537,542d522 < uint64_t deadline = TIMEOUT_WAIT_FOREVER; < wait_interrupt_t interruptible = THREAD_ABORTSAFE; < struct turnstile *ts; < < ts = turnstile_prepare((uintptr_t)ull, &ull->ull_turnstile, < TURNSTILE_NULL, TURNSTILE_ULOCK); 544,548d523 < < if (flags & ULF_WAIT_WORKQ_DATA_CONTENTION) { < interruptible |= THREAD_WAIT_NOREPORT; < } < 550c525,527 < clock_interval_to_deadline(timeout, NSEC_PER_USEC, &deadline); --- > wr = assert_wait_timeout(ULOCK_TO_EVENT(ull), THREAD_ABORTSAFE, timeout, NSEC_PER_USEC); > } else { > wr = assert_wait(ULOCK_TO_EVENT(ull), THREAD_ABORTSAFE); 553,558d529 < turnstile_update_inheritor(ts, owner_thread, < (TURNSTILE_DELAYED_UPDATE | TURNSTILE_INHERITOR_THREAD)); < < wr = waitq_assert_wait64(&ts->ts_waitq, CAST_EVENT64_T(ULOCK_TO_EVENT(ull)), < interruptible, deadline); < 561,563c532,533 < if (unused_ull) { < ull_free(unused_ull); < unused_ull = NULL; --- > if (ull_debug) { > kprintf("[%d]%s>after assert_wait() returned %d\n", id, __FUNCTION__, wr); 566,580c536,542 < turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_NOT_HELD); < < if (wr == THREAD_WAITING) { < uthread_t uthread = (uthread_t)get_bsdthread_info(self); < uthread->uu_save.uus_ulock_wait_data.retval = retval; < uthread->uu_save.uus_ulock_wait_data.flags = flags; < uthread->uu_save.uus_ulock_wait_data.owner_thread = owner_thread; < uthread->uu_save.uus_ulock_wait_data.old_owner = old_owner; < if (set_owner && owner_thread != THREAD_NULL) { < thread_handoff_parameter(owner_thread, ulock_wait_continue, ull); < } else { < assert(owner_thread == THREAD_NULL); < thread_block_parameter(ulock_wait_continue, ull); < } < /* NOT REACHED */ --- > if (set_owner && owner_thread != THREAD_NULL && wr == THREAD_WAITING) { > wr = thread_handoff(owner_thread); > /* owner_thread ref is consumed */ > owner_thread = THREAD_NULL; > } else { > /* NRG At some point this should be a continuation based block, so that we can avoid saving the full kernel context. */ > wr = thread_block(NULL); 582,593c544,545 < < ret = wait_result_to_return_code(wr); < < ull_lock(ull); < turnstile_complete((uintptr_t)ull, &ull->ull_turnstile, NULL); < < out_locked: < ulock_wait_cleanup(ull, owner_thread, old_owner, retval); < < if (unused_ull) { < ull_free(unused_ull); < unused_ull = NULL; --- > if (ull_debug) { > kprintf("[%d]%s>thread_block() returned %d\n", id, __FUNCTION__, wr); 595,601c547,557 < < assert(*retval >= 0); < < munge_retval: < if ((flags & ULF_NO_ERRNO) && (ret != 0)) { < *retval = -ret; < ret = 0; --- > switch (wr) { > case THREAD_AWAKENED: > break; > case THREAD_TIMED_OUT: > ret = ETIMEDOUT; > break; > case THREAD_INTERRUPTED: > case THREAD_RESTART: > default: > ret = EINTR; > break; 603,614d558 < return ret; < } < < /* < * Must be called with ull_lock held < */ < static void < ulock_wait_cleanup(ull_t *ull, thread_t owner_thread, thread_t old_owner, int32_t *retval) < { < ull_assert_owned(ull); < < thread_t old_lingering_owner = THREAD_NULL; 615a560,561 > out: > ull_lock(ull); 623,624c569,573 < old_lingering_owner = ull->ull_owner; < ull->ull_owner = THREAD_NULL; --- > if (ull->ull_owner != THREAD_NULL) { > old_lingering_owner = ull_promote_owner_locked(ull, THREAD_NULL); > } > > assert(ull->ull_owner == THREAD_NULL); 633,635d581 < /* Need to be called after dropping the interlock */ < turnstile_cleanup(); < 649,663d594 < } < < __attribute__((noreturn)) < static void < ulock_wait_continue(void * parameter, wait_result_t wr) < { < thread_t self = current_thread(); < uthread_t uthread = (uthread_t)get_bsdthread_info(self); < int ret = 0; < < ull_t *ull = (ull_t *)parameter; < int32_t *retval = uthread->uu_save.uus_ulock_wait_data.retval; < uint flags = uthread->uu_save.uus_ulock_wait_data.flags; < thread_t owner_thread = uthread->uu_save.uus_ulock_wait_data.owner_thread; < thread_t old_owner = uthread->uu_save.uus_ulock_wait_data.old_owner; 665,670c596,599 < ret = wait_result_to_return_code(wr); < < ull_lock(ull); < turnstile_complete((uintptr_t)ull, &ull->ull_turnstile, NULL); < < ulock_wait_cleanup(ull, owner_thread, old_owner, retval); --- > munge_retval: > if (workq_callback) { > thread_reenable_sched_call(self, workq_callback); > } 676,677c605 < < unix_syscall_return(ret); --- > return ret; 685a614 > int id = thread_tid(current_thread()); 691a621,625 > if (ull_debug) { > kprintf("[%d]%s>ENTER opcode %d addr %llx flags %x\n", > id, __FUNCTION__, opcode, (unsigned long long)(args->addr), flags); > } > 731c665 < ull_t *ull = ull_get(&key, ULL_MUST_EXIST, NULL); --- > ull_t *ull = ull_get(&key, ULL_MUST_EXIST); 749a684,687 > if (ull_debug) { > kprintf("[%d]%s>EINVAL opcode %d addr 0x%llx flags 0x%x\n", > id, __FUNCTION__, opcode, (unsigned long long)(args->addr), flags); > } 754a693,696 > if (ull_debug) { > kprintf("[%d]%s>EDOM - opcode mismatch - opcode %d addr 0x%llx flags 0x%x\n", > id, __FUNCTION__, opcode, (unsigned long long)(args->addr), flags); > } 763,766d704 < struct turnstile *ts; < ts = turnstile_prepare((uintptr_t)ull, &ull->ull_turnstile, < TURNSTILE_NULL, TURNSTILE_ULOCK); < 768,769c706 < waitq_wakeup64_all(&ts->ts_waitq, CAST_EVENT64_T(ULOCK_TO_EVENT(ull)), < THREAD_AWAKENED, 0); --- > thread_wakeup(ULOCK_TO_EVENT(ull)); 771,772c708 < kern_return_t kr = waitq_wakeup64_thread(&ts->ts_waitq, CAST_EVENT64_T(ULOCK_TO_EVENT(ull)), < wake_thread, THREAD_AWAKENED); --- > kern_return_t kr = thread_wakeup_thread(ULOCK_TO_EVENT(ull), wake_thread); 785,786c721 < waitq_wakeup64_one(&ts->ts_waitq, CAST_EVENT64_T(ULOCK_TO_EVENT(ull)), < THREAD_AWAKENED, WAITQ_SELECT_MAX_PRI); --- > thread_wakeup_one_with_pri(ULOCK_TO_EVENT(ull), WAITQ_SELECT_MAX_PRI); 800,804c735 < turnstile_update_inheritor(ts, THREAD_NULL, < (TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_THREAD)); < turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_HELD); < old_owner = ull->ull_owner; < ull->ull_owner = THREAD_NULL; --- > old_owner = ull_promote_owner_locked(ull, THREAD_NULL); 807,808d737 < turnstile_complete((uintptr_t)ull, &ull->ull_turnstile, NULL); < 812,814d740 < /* Need to be called after dropping the interlock */ < turnstile_cleanup(); < 830a757,796 > /* > * Change ull_owner to be new_owner, and update it with the properties > * of the current thread. > * > * Records the highest current promotion value in ull_promote_token, and applies that > * to any new owner. > * > * Returns +1 ref to the old ull_owner if it is going away. > */ > static thread_t > ull_promote_owner_locked(ull_t* ull, > thread_t new_owner) > { > if (new_owner != THREAD_NULL && ull->ull_owner == new_owner) { > thread_user_promotion_update(new_owner, current_thread(), &ull->ull_promote_token); > return THREAD_NULL; > } > > thread_t old_owner = ull->ull_owner; > ull->ull_owner = THREAD_NULL; > > if (new_owner != THREAD_NULL) { > /* The ull_owner field now owns a +1 ref on thread */ > thread_reference(new_owner); > ull->ull_owner = new_owner; > > thread_user_promotion_add(new_owner, current_thread(), &ull->ull_promote_token); > } else { > /* No new owner - clear the saturated promotion value */ > ull->ull_promote_token = PROMOTE_TOKEN_INIT; > } > > if (old_owner != THREAD_NULL) { > thread_user_promotion_drop(old_owner); > } > > /* Return the +1 ref from the ull_owner field */ > return old_owner; > } >
./bsd/kern/uipc_syscalls.c differences detected: 1738c1738 < unsigned char tmp_buffer[CMSG_SPACE(sizeof(struct user64_timeval))] = {}; --- > unsigned char tmp_buffer[CMSG_SPACE(sizeof(struct user64_timeval))]; NO DIFFS in ./bsd/kern/mcache.c

./bsd/kern/kern_kpc.c differences detected: 69,72c69,86 < /* < * Another element is needed to hold the CPU number when getting counter values. < */ < #define KPC_MAX_BUF_LEN (KPC_MAX_COUNTERS_COPIED + 1) --- > #if defined(__x86_64__) > /* 18 cores, 7 counters each */ > #define KPC_MAX_COUNTERS_COPIED (18 * 7) > #elif defined(__arm64__) > #include > #if defined(CPU_COUNT) > #define KPC_MAX_COUNTERS_COPIED (CPU_COUNT * 10) > #else /* defined(CPU_COUNT) */ > #define KPC_MAX_COUNTERS_COPIED (2 * 10) > #endif /* !defined(CPU_COUNT) */ > #elif defined(__arm__) > #define KPC_MAX_COUNTERS_COPIED (16) > #else /* !defined(__arm__) && !defined(__arm64__) && !defined(__x86_64__) */ > #error "unknown architecture for kpc buffer sizes" > #endif /* !defined(__arm__) && !defined(__arm64__) && !defined(__x86_64__) */ > > static_assert((KPC_MAX_COUNTERS_COPIED * sizeof(uint64_t)) < 1024, > "kpc's stack could grow too large"); 90,112d103 < static uint64_t * < kpc_get_bigarray(uint32_t *size_out) < { < static uint64_t *bigarray = NULL; < < LCK_MTX_ASSERT(&sysctl_lock, LCK_MTX_ASSERT_OWNED); < < uint32_t size = kpc_get_counterbuf_size() + sizeof(uint64_t); < *size_out = size; < < if (bigarray) { < return bigarray; < } < < /* < * Another element is needed to hold the CPU number when getting counter < * values. < */ < bigarray = kalloc_tag(size, VM_KERN_MEMORY_DIAG); < assert(bigarray != NULL); < return bigarray; < } < 288,289c279,280 < uint32_t bufsize = 0; < uint64_t *buf = kpc_get_bigarray(&bufsize); --- > uint64_t buf[KPC_MAX_COUNTERS_COPIED] = {}; > uint32_t bufsize = sizeof(buf); 298c289 < error = get_fn(arg, &bufsize, buf); --- > error = get_fn(arg, &bufsize, &buf); 300c291 < error = SYSCTL_OUT(req, buf, bufsize); --- > error = SYSCTL_OUT(req, &buf, bufsize); 329a321,322 > uint64_t buf[KPC_MAX_COUNTERS_COPIED] = {}; > uint32_t bufsize = sizeof(buf); 332,334d324 < uint32_t bufsize = 0; < uint64_t *buf = kpc_get_bigarray(&bufsize); < 350c340 < error = SYSCTL_IN(req, buf, regsize); --- > error = SYSCTL_IN(req, &buf, regsize); 354c344 < error = set_fn((uint32_t)arg, buf); --- > error = set_fn((uint32_t)arg, &buf); 363c353 < error = get_fn((uint32_t)arg, buf); --- > error = get_fn((uint32_t)arg, &buf); 368c358 < error = SYSCTL_OUT(req, buf, regsize); --- > error = SYSCTL_OUT(req, &buf, regsize); 382c372 < if (!kpc_initted) { --- > if( !kpc_initted ) 384,388d373 < } < < if (!kpc_supported) { < return ENOTSUP; < } 504c489 < CTLTYPE_INT|CTLFLAG_RD|CTLFLAG_ANYBODY|CTLFLAG_MASKED|CTLFLAG_LOCKED, --- > CTLTYPE_INT|CTLFLAG_RD|CTLFLAG_ANYBODY, 509c494 < CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY|CTLFLAG_MASKED|CTLFLAG_LOCKED, --- > CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, 514c499 < CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY|CTLFLAG_MASKED|CTLFLAG_LOCKED, --- > CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, 519c504 < CTLTYPE_INT|CTLFLAG_RD|CTLFLAG_ANYBODY|CTLFLAG_MASKED|CTLFLAG_LOCKED, --- > CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_ANYBODY, 525c510 < CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY|CTLFLAG_MASKED|CTLFLAG_LOCKED, --- > CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, 530c515 < CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY|CTLFLAG_MASKED|CTLFLAG_LOCKED, --- > CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, 535c520 < CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY|CTLFLAG_MASKED|CTLFLAG_LOCKED, --- > CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, 541c526 < CTLFLAG_RD|CTLFLAG_WR|CTLFLAG_ANYBODY|CTLFLAG_MASKED|CTLFLAG_LOCKED, --- > CTLFLAG_RD|CTLFLAG_WR|CTLFLAG_ANYBODY, 547c532 < CTLFLAG_RD|CTLFLAG_WR|CTLFLAG_ANYBODY|CTLFLAG_MASKED|CTLFLAG_LOCKED, --- > CTLFLAG_RD|CTLFLAG_WR|CTLFLAG_ANYBODY, 553c538 < CTLFLAG_RD|CTLFLAG_WR|CTLFLAG_ANYBODY|CTLFLAG_MASKED|CTLFLAG_LOCKED, --- > CTLFLAG_RD|CTLFLAG_WR|CTLFLAG_ANYBODY, 559c544 < CTLFLAG_RD|CTLFLAG_WR|CTLFLAG_ANYBODY|CTLFLAG_MASKED|CTLFLAG_LOCKED, --- > CTLFLAG_RD|CTLFLAG_WR|CTLFLAG_ANYBODY, 565c550 < CTLFLAG_RD|CTLFLAG_WR|CTLFLAG_ANYBODY|CTLFLAG_MASKED|CTLFLAG_LOCKED, --- > CTLFLAG_RD|CTLFLAG_WR|CTLFLAG_ANYBODY, 571c556 < CTLFLAG_RD|CTLFLAG_WR|CTLFLAG_ANYBODY|CTLFLAG_MASKED|CTLFLAG_LOCKED, --- > CTLFLAG_RD|CTLFLAG_WR|CTLFLAG_ANYBODY,
./bsd/kern/kern_sysctl.c differences detected: 125d124 < #include 189a189,190 > extern unsigned int vm_page_speculative_percentage; > extern unsigned int vm_page_speculative_q_age_ms; 307,313d307 < #ifdef CONFIG_XNUPOST < #include < < STATIC int sysctl_debug_test_oslog_ctl(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req); < STATIC int sysctl_debug_test_stackshot_mutex_owner(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req); < STATIC int sysctl_debug_test_stackshot_rwlck_owner(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req); < #endif 1278d1271 < vm_size_t alloc_size = 0; 1393,1394c1386,1387 < alloc_size = round_page(arg_size); < ret = kmem_alloc(kernel_map, ©_start, alloc_size, VM_KERN_MEMORY_BSD); --- > > ret = kmem_alloc(kernel_map, ©_start, round_page(arg_size), VM_KERN_MEMORY_BSD); 1399d1391 < bzero((void *)copy_start, alloc_size); 1633,1637d1624 < < SYSCTL_STRING(_kern, OID_AUTO, osbuildconfig, < CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED | CTLFLAG_MASKED, < &osbuild_config[0], 0, ""); < 1707,1728d1693 < static uint64_t osproductversion_string[48]; < < STATIC int < sysctl_osproductversion(__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req) < { < if (req->newptr != 0) { < /* < * Can only ever be set by launchd, and only once at boot. < */ < if (req->p->p_pid != 1 || osproductversion_string[0] != '\0') { < return EPERM; < } < } < < return sysctl_handle_string(oidp, arg1, arg2, req); < } < < SYSCTL_PROC(_kern, OID_AUTO, osproductversion, < CTLFLAG_RW | CTLFLAG_KERN | CTLTYPE_STRING | CTLFLAG_LOCKED, < osproductversion_string, sizeof(osproductversion_string), < sysctl_osproductversion, "A", "The ProductVersion from SystemVersion.plist"); < 1774,1788d1738 < STATIC int < sysctl_kernelcacheuuid(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req) < { < int rval = ENOENT; < if (kernelcache_uuid_valid) { < rval = sysctl_handle_string(oidp, arg1, arg2, req); < } < return rval; < } < < SYSCTL_PROC(_kern, OID_AUTO, kernelcacheuuid, < CTLFLAG_RD | CTLFLAG_KERN | CTLTYPE_STRING | CTLFLAG_LOCKED, < kernelcache_uuid_string, sizeof(kernelcache_uuid_string), < sysctl_kernelcacheuuid, "A", ""); < 1871,1874d1820 < extern int sched_allow_rt_smt; < SYSCTL_INT(_kern, OID_AUTO, sched_allow_rt_smt, < CTLFLAG_KERN| CTLFLAG_RW| CTLFLAG_LOCKED, < &sched_allow_rt_smt, 0, ""); 2048c1994 < &vm_pageout_state.vm_page_speculative_percentage, 0, ""); --- > &vm_page_speculative_percentage, 0, ""); 2052c1998 < &vm_pageout_state.vm_page_speculative_q_age_ms, 0, ""); --- > &vm_page_speculative_q_age_ms, 0, ""); 2250c2196 < LATENCY, LATENCY_MIN, LATENCY_MAX, SCAN_LIMIT, SCAN_INTERVAL, PAUSES --- > LATENCY, LATENCY_MIN, LATENCY_MAX, SCAN_LIMIT, PAUSES 2278,2281d2223 < SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, scan_interval, < CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, < (void *) SCAN_INTERVAL, 0, sysctl_timer, "Q", ""); < 2285,2288d2226 < SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, scan_pauses, < CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED, < (void *) PAUSES, 0, sysctl_timer, "Q", ""); < 2313a2252,2254 > SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, scan_pauses, > CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED, > (void *) PAUSES, 0, sysctl_timer, "Q", ""); 2616,2931d2556 < #ifdef CONFIG_XNUPOST < < extern int xnupost_export_testdata(void *outp, uint32_t size, uint32_t *lenp); < extern uint32_t xnupost_get_estimated_testdata_size(void); < < extern int xnupost_reset_all_tests(void); < < STATIC int < sysctl_handle_xnupost_get_tests SYSCTL_HANDLER_ARGS < { < /* fixup unused arguments warnings */ < __unused int _oa2 = arg2; < __unused void * _oa1 = arg1; < __unused struct sysctl_oid * _oidp = oidp; < < int error = 0; < user_addr_t oldp = 0; < user_addr_t newp = 0; < uint32_t usedbytes = 0; < < oldp = req->oldptr; < newp = req->newptr; < < if (newp) < return ENOTSUP; < < if ((void *)oldp == NULL) { < /* return estimated size for second call where info can be placed */ < req->oldidx = xnupost_get_estimated_testdata_size(); < } else { < error = xnupost_export_testdata((void *)oldp, req->oldlen, &usedbytes); < req->oldidx = usedbytes; < } < < return error; < } < < SYSCTL_PROC(_debug, < OID_AUTO, < xnupost_get_tests, < CTLFLAG_MASKED | CTLFLAG_ANYBODY | CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_LOCKED, < 0, < 0, < sysctl_handle_xnupost_get_tests, < "-", < "read xnupost test data in kernel"); < < STATIC int < sysctl_debug_xnupost_ctl SYSCTL_HANDLER_ARGS < { < /* fixup unused arguments warnings */ < __unused int _oa2 = arg2; < __unused void * _oa1 = arg1; < __unused struct sysctl_oid * _oidp = oidp; < < #define ARRCOUNT 4 < /* < * INPUT: ACTION, PARAM1, PARAM2, PARAM3 < * OUTPUT: RESULTCODE, ADDITIONAL DATA < */ < int32_t outval[ARRCOUNT] = {0}; < int32_t input[ARRCOUNT] = {0}; < int32_t out_size = sizeof(outval); < int32_t in_size = sizeof(input); < int error = 0; < < /* if this is NULL call to find out size, send out size info */ < if (!req->newptr) { < goto out; < } < < /* pull in provided value from userspace */ < error = SYSCTL_IN(req, &input[0], in_size); < if (error) < return error; < < if (input[0] == XTCTL_RESET_TESTDATA) { < outval[0] = xnupost_reset_all_tests(); < goto out; < } < < out: < error = SYSCTL_OUT(req, &outval[0], out_size); < return error; < } < < SYSCTL_PROC(_debug, < OID_AUTO, < xnupost_testctl, < CTLFLAG_MASKED | CTLFLAG_ANYBODY | CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_LOCKED, < 0, < 0, < sysctl_debug_xnupost_ctl, < "I", < "xnupost control for kernel testing"); < < extern void test_oslog_handleOSLogCtl(int32_t * in, int32_t * out, int32_t arraycount); < < STATIC int < sysctl_debug_test_oslog_ctl(__unused struct sysctl_oid * oidp, __unused void * arg1, __unused int arg2, struct sysctl_req * req) < { < #define ARRCOUNT 4 < int32_t outval[ARRCOUNT] = {0}; < int32_t input[ARRCOUNT] = {0}; < int32_t size_outval = sizeof(outval); < int32_t size_inval = sizeof(input); < int32_t error; < < /* if this is NULL call to find out size, send out size info */ < if (!req->newptr) { < error = SYSCTL_OUT(req, &outval[0], size_outval); < return error; < } < < /* pull in provided value from userspace */ < error = SYSCTL_IN(req, &input[0], size_inval); < if (error) < return error; < < test_oslog_handleOSLogCtl(input, outval, ARRCOUNT); < < error = SYSCTL_OUT(req, &outval[0], size_outval); < < return error; < } < < SYSCTL_PROC(_debug, < OID_AUTO, < test_OSLogCtl, < CTLFLAG_MASKED | CTLFLAG_ANYBODY | CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_LOCKED, < 0, < 0, < sysctl_debug_test_oslog_ctl, < "I", < "testing oslog in kernel"); < < #include < #include < < extern lck_grp_t * sysctl_debug_test_stackshot_owner_grp; /* used for both mutexes and rwlocks */ < extern lck_mtx_t * sysctl_debug_test_stackshot_owner_init_mtx; /* used to protect lck_*_init */ < < /* This is a sysctl for testing collection of owner info on a lock in kernel space. A multi-threaded < * test from userland sets this sysctl in such a way that a thread blocks in kernel mode, and a < * stackshot is taken to see if the owner of the lock can be identified. < * < * We can't return to userland with a kernel lock held, so be sure to unlock before we leave. < * the semaphores allow us to artificially create cases where the lock is being held and the < * thread is hanging / taking a long time to do something. */ < < volatile char sysctl_debug_test_stackshot_mtx_inited = 0; < semaphore_t sysctl_debug_test_stackshot_mutex_sem; < lck_mtx_t sysctl_debug_test_stackshot_owner_lck; < < #define SYSCTL_DEBUG_MTX_ACQUIRE_WAIT 1 < #define SYSCTL_DEBUG_MTX_ACQUIRE_NOWAIT 2 < #define SYSCTL_DEBUG_MTX_SIGNAL 3 < #define SYSCTL_DEBUG_MTX_TEARDOWN 4 < < STATIC int < sysctl_debug_test_stackshot_mutex_owner(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) < { < long long option = -1; < /* if the user tries to read the sysctl, we tell them what the address of the lock is (to test against stackshot's output) */ < long long mtx_unslid_addr = (long long)VM_KERNEL_UNSLIDE_OR_PERM(&sysctl_debug_test_stackshot_owner_lck); < int error = sysctl_io_number(req, mtx_unslid_addr, sizeof(long long), (void*)&option, NULL); < < lck_mtx_lock(sysctl_debug_test_stackshot_owner_init_mtx); < if (!sysctl_debug_test_stackshot_mtx_inited) { < lck_mtx_init(&sysctl_debug_test_stackshot_owner_lck, < sysctl_debug_test_stackshot_owner_grp, < LCK_ATTR_NULL); < semaphore_create(kernel_task, < &sysctl_debug_test_stackshot_mutex_sem, < SYNC_POLICY_FIFO, 0); < sysctl_debug_test_stackshot_mtx_inited = 1; < } < lck_mtx_unlock(sysctl_debug_test_stackshot_owner_init_mtx); < < if (!error) { < switch(option) { < case SYSCTL_DEBUG_MTX_ACQUIRE_NOWAIT: < lck_mtx_lock(&sysctl_debug_test_stackshot_owner_lck); < lck_mtx_unlock(&sysctl_debug_test_stackshot_owner_lck); < break; < case SYSCTL_DEBUG_MTX_ACQUIRE_WAIT: < lck_mtx_lock(&sysctl_debug_test_stackshot_owner_lck); < semaphore_wait(sysctl_debug_test_stackshot_mutex_sem); < lck_mtx_unlock(&sysctl_debug_test_stackshot_owner_lck); < break; < case SYSCTL_DEBUG_MTX_SIGNAL: < semaphore_signal(sysctl_debug_test_stackshot_mutex_sem); < break; < case SYSCTL_DEBUG_MTX_TEARDOWN: < lck_mtx_lock(sysctl_debug_test_stackshot_owner_init_mtx); < < lck_mtx_destroy(&sysctl_debug_test_stackshot_owner_lck, < sysctl_debug_test_stackshot_owner_grp); < semaphore_destroy(kernel_task, < sysctl_debug_test_stackshot_mutex_sem); < sysctl_debug_test_stackshot_mtx_inited = 0; < < lck_mtx_unlock(sysctl_debug_test_stackshot_owner_init_mtx); < break; < case -1: /* user just wanted to read the value, so do nothing */ < break; < default: < error = EINVAL; < break; < } < } < return error; < } < < /* we can't return to userland with a kernel rwlock held, so be sure to unlock before we leave. < * the semaphores allow us to artificially create cases where the lock is being held and the < * thread is hanging / taking a long time to do something. */ < < SYSCTL_PROC(_debug, < OID_AUTO, < test_MutexOwnerCtl, < CTLFLAG_MASKED | CTLFLAG_ANYBODY | CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, < 0, < 0, < sysctl_debug_test_stackshot_mutex_owner, < "-", < "Testing mutex owner in kernel"); < < volatile char sysctl_debug_test_stackshot_rwlck_inited = 0; < lck_rw_t sysctl_debug_test_stackshot_owner_rwlck; < semaphore_t sysctl_debug_test_stackshot_rwlck_sem; < < #define SYSCTL_DEBUG_KRWLCK_RACQUIRE_NOWAIT 1 < #define SYSCTL_DEBUG_KRWLCK_RACQUIRE_WAIT 2 < #define SYSCTL_DEBUG_KRWLCK_WACQUIRE_NOWAIT 3 < #define SYSCTL_DEBUG_KRWLCK_WACQUIRE_WAIT 4 < #define SYSCTL_DEBUG_KRWLCK_SIGNAL 5 < #define SYSCTL_DEBUG_KRWLCK_TEARDOWN 6 < < STATIC int < sysctl_debug_test_stackshot_rwlck_owner(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) < { < long long option = -1; < /* if the user tries to read the sysctl, we tell them what the address of the lock is < * (to test against stackshot's output) */ < long long rwlck_unslid_addr = (long long)VM_KERNEL_UNSLIDE_OR_PERM(&sysctl_debug_test_stackshot_owner_rwlck); < int error = sysctl_io_number(req, rwlck_unslid_addr, sizeof(long long), (void*)&option, NULL); < < lck_mtx_lock(sysctl_debug_test_stackshot_owner_init_mtx); < if (!sysctl_debug_test_stackshot_rwlck_inited) { < lck_rw_init(&sysctl_debug_test_stackshot_owner_rwlck, < sysctl_debug_test_stackshot_owner_grp, < LCK_ATTR_NULL); < semaphore_create(kernel_task, < &sysctl_debug_test_stackshot_rwlck_sem, < SYNC_POLICY_FIFO, < 0); < sysctl_debug_test_stackshot_rwlck_inited = 1; < } < lck_mtx_unlock(sysctl_debug_test_stackshot_owner_init_mtx); < < if (!error) { < switch(option) { < case SYSCTL_DEBUG_KRWLCK_RACQUIRE_NOWAIT: < lck_rw_lock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_SHARED); < lck_rw_unlock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_SHARED); < break; < case SYSCTL_DEBUG_KRWLCK_RACQUIRE_WAIT: < lck_rw_lock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_SHARED); < semaphore_wait(sysctl_debug_test_stackshot_rwlck_sem); < lck_rw_unlock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_SHARED); < break; < case SYSCTL_DEBUG_KRWLCK_WACQUIRE_NOWAIT: < lck_rw_lock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_EXCLUSIVE); < lck_rw_unlock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_EXCLUSIVE); < break; < case SYSCTL_DEBUG_KRWLCK_WACQUIRE_WAIT: < lck_rw_lock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_EXCLUSIVE); < semaphore_wait(sysctl_debug_test_stackshot_rwlck_sem); < lck_rw_unlock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_EXCLUSIVE); < break; < case SYSCTL_DEBUG_KRWLCK_SIGNAL: < semaphore_signal(sysctl_debug_test_stackshot_rwlck_sem); < break; < case SYSCTL_DEBUG_KRWLCK_TEARDOWN: < lck_mtx_lock(sysctl_debug_test_stackshot_owner_init_mtx); < < lck_rw_destroy(&sysctl_debug_test_stackshot_owner_rwlck, < sysctl_debug_test_stackshot_owner_grp); < semaphore_destroy(kernel_task, < sysctl_debug_test_stackshot_rwlck_sem); < sysctl_debug_test_stackshot_rwlck_inited = 0; < < lck_mtx_unlock(sysctl_debug_test_stackshot_owner_init_mtx); < break; < case -1: /* user just wanted to read the value, so do nothing */ < break; < default: < error = EINVAL; < break; < } < } < return error; < } < < < SYSCTL_PROC(_debug, < OID_AUTO, < test_RWLockOwnerCtl, < CTLFLAG_MASKED | CTLFLAG_ANYBODY | CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, < 0, < 0, < sysctl_debug_test_stackshot_rwlck_owner, < "-", < "Testing rwlock owner in kernel"); < #endif /* !CONFIG_XNUPOST */ 2967d2591 < extern void memorystatus_disable_freeze(void); 2980c2604 < if (! VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) { --- > if (VM_CONFIG_COMPRESSOR_IS_ACTIVE) { 2995d2618 < memorystatus_disable_freeze(); 3001c2624 < SYSCTL_PROC(_vm, OID_AUTO, freeze_enabled, CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, &memorystatus_freeze_enabled, 0, sysctl_freeze_enabled, "I", ""); --- > SYSCTL_PROC(_vm, OID_AUTO, freeze_enabled, CTLTYPE_INT|CTLFLAG_RW, &memorystatus_freeze_enabled, 0, sysctl_freeze_enabled, "I", ""); 3004,3050d2626 < #if DEVELOPMENT || DEBUG < extern int vm_num_swap_files_config; < extern int vm_num_swap_files; < extern lck_mtx_t vm_swap_data_lock; < #define VM_MAX_SWAP_FILE_NUM 100 < < static int < sysctl_vm_config_num_swap_files SYSCTL_HANDLER_ARGS < { < #pragma unused(arg1, arg2) < int error = 0, val = vm_num_swap_files_config; < < error = sysctl_handle_int(oidp, &val, 0, req); < if (error || !req->newptr) { < goto out; < } < < if (!VM_CONFIG_SWAP_IS_ACTIVE && !VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) { < printf("Swap is disabled\n"); < error = EINVAL; < goto out; < } < < lck_mtx_lock(&vm_swap_data_lock); < < if (val < vm_num_swap_files) { < printf("Cannot configure fewer swap files than already exist.\n"); < error = EINVAL; < lck_mtx_unlock(&vm_swap_data_lock); < goto out; < } < < if (val > VM_MAX_SWAP_FILE_NUM) { < printf("Capping number of swap files to upper bound.\n"); < val = VM_MAX_SWAP_FILE_NUM; < } < < vm_num_swap_files_config = val; < lck_mtx_unlock(&vm_swap_data_lock); < out: < < return (0); < } < < SYSCTL_PROC(_debug, OID_AUTO, num_swap_files_configured, CTLFLAG_ANYBODY | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, sysctl_vm_config_num_swap_files, "I", ""); < #endif /* DEVELOPMENT || DEBUG */ < 3081c2657 < if (IS_64BIT_PROCESS(p)) { --- > if (IS_64BIT_PROCESS(p)) 3083d2658 < } 3219a2795 > extern uint32_t vm_page_filecache_min; 3222,3229c2798 < < SYSCTL_INT(_vm, OID_AUTO, vm_page_filecache_min, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_state.vm_page_filecache_min, 0, ""); < SYSCTL_INT(_vm, OID_AUTO, vm_page_xpmapped_min, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_state.vm_page_xpmapped_min, 0, ""); < < #if DEVELOPMENT || DEBUG < SYSCTL_INT(_vm, OID_AUTO, vm_page_filecache_min_divisor, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_pageout_state.vm_page_filecache_min_divisor, 0, ""); < SYSCTL_INT(_vm, OID_AUTO, vm_page_xpmapped_min_divisor, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_pageout_state.vm_page_xpmapped_min_divisor, 0, ""); < #endif --- > SYSCTL_INT(_vm, OID_AUTO, vm_page_filecache_min, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_page_filecache_min, 0, ""); 3244,3246d2812 < extern uint32_t vm_compressor_time_thread; < < #if DEVELOPMENT || DEBUG 3251,3256c2817,2818 < < extern uint32_t vm_compressor_minorcompact_threshold_divisor_overridden; < extern uint32_t vm_compressor_majorcompact_threshold_divisor_overridden; < extern uint32_t vm_compressor_unthrottle_threshold_divisor_overridden; < extern uint32_t vm_compressor_catchup_threshold_divisor_overridden; < --- > extern uint32_t vm_compressor_time_thread; > #if DEVELOPMENT || DEBUG 3258,3329d2819 < < < STATIC int < sysctl_minorcompact_threshold_divisor(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) < { < int new_value, changed; < int error = sysctl_io_number(req, vm_compressor_minorcompact_threshold_divisor, sizeof(int), &new_value, &changed); < < if (changed) { < vm_compressor_minorcompact_threshold_divisor = new_value; < vm_compressor_minorcompact_threshold_divisor_overridden = 1; < } < return(error); < } < < SYSCTL_PROC(_vm, OID_AUTO, compressor_minorcompact_threshold_divisor, < CTLTYPE_INT | CTLFLAG_LOCKED | CTLFLAG_RW, < 0, 0, sysctl_minorcompact_threshold_divisor, "I", ""); < < < STATIC int < sysctl_majorcompact_threshold_divisor(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) < { < int new_value, changed; < int error = sysctl_io_number(req, vm_compressor_majorcompact_threshold_divisor, sizeof(int), &new_value, &changed); < < if (changed) { < vm_compressor_majorcompact_threshold_divisor = new_value; < vm_compressor_majorcompact_threshold_divisor_overridden = 1; < } < return(error); < } < < SYSCTL_PROC(_vm, OID_AUTO, compressor_majorcompact_threshold_divisor, < CTLTYPE_INT | CTLFLAG_LOCKED | CTLFLAG_RW, < 0, 0, sysctl_majorcompact_threshold_divisor, "I", ""); < < < STATIC int < sysctl_unthrottle_threshold_divisor(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) < { < int new_value, changed; < int error = sysctl_io_number(req, vm_compressor_unthrottle_threshold_divisor, sizeof(int), &new_value, &changed); < < if (changed) { < vm_compressor_unthrottle_threshold_divisor = new_value; < vm_compressor_unthrottle_threshold_divisor_overridden = 1; < } < return(error); < } < < SYSCTL_PROC(_vm, OID_AUTO, compressor_unthrottle_threshold_divisor, < CTLTYPE_INT | CTLFLAG_LOCKED | CTLFLAG_RW, < 0, 0, sysctl_unthrottle_threshold_divisor, "I", ""); < < < STATIC int < sysctl_catchup_threshold_divisor(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) < { < int new_value, changed; < int error = sysctl_io_number(req, vm_compressor_catchup_threshold_divisor, sizeof(int), &new_value, &changed); < < if (changed) { < vm_compressor_catchup_threshold_divisor = new_value; < vm_compressor_catchup_threshold_divisor_overridden = 1; < } < return(error); < } < < SYSCTL_PROC(_vm, OID_AUTO, compressor_catchup_threshold_divisor, < CTLTYPE_INT | CTLFLAG_LOCKED | CTLFLAG_RW, < 0, 0, sysctl_catchup_threshold_divisor, "I", ""); 3332d2821 < 3348a2838,2841 > SYSCTL_INT(_vm, OID_AUTO, compressor_minorcompact_threshold_divisor, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_minorcompact_threshold_divisor, 0, ""); > SYSCTL_INT(_vm, OID_AUTO, compressor_majorcompact_threshold_divisor, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_majorcompact_threshold_divisor, 0, ""); > SYSCTL_INT(_vm, OID_AUTO, compressor_unthrottle_threshold_divisor, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_unthrottle_threshold_divisor, 0, ""); > SYSCTL_INT(_vm, OID_AUTO, compressor_catchup_threshold_divisor, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_catchup_threshold_divisor, 0, ""); 3463a2957,2958 > extern uint64_t vm_pageout_considered_bq_internal; > extern uint64_t vm_pageout_considered_bq_external; 3475,3476c2970,2971 < SYSCTL_QUAD(_vm, OID_AUTO, vm_pageout_considered_bq_internal, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_vminfo.vm_pageout_considered_bq_internal, ""); < SYSCTL_QUAD(_vm, OID_AUTO, vm_pageout_considered_bq_external, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_vminfo.vm_pageout_considered_bq_external, ""); --- > SYSCTL_QUAD(_vm, OID_AUTO, vm_pageout_considered_bq_internal, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_considered_bq_internal, ""); > SYSCTL_QUAD(_vm, OID_AUTO, vm_pageout_considered_bq_external, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_pageout_considered_bq_external, ""); 3480,3506c2975 < #endif /* CONFIG_BACKGROUND_QUEUE */ < < extern void vm_update_darkwake_mode(boolean_t); < extern boolean_t vm_darkwake_mode; < < STATIC int < sysctl_toggle_darkwake_mode(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) < { < int new_value, changed; < int error = sysctl_io_number(req, vm_darkwake_mode, sizeof(int), &new_value, &changed); < < if ( !error && changed) { < < if (new_value != 0 && new_value != 1) { < printf("Error: Invalid value passed to darkwake sysctl. Acceptable: 0 or 1.\n"); < error = EINVAL; < } else { < vm_update_darkwake_mode((boolean_t) new_value); < } < } < < return(error); < } < < SYSCTL_PROC(_vm, OID_AUTO, darkwake_mode, < CTLTYPE_INT | CTLFLAG_LOCKED | CTLFLAG_RW, < 0, 0, sysctl_toggle_darkwake_mode, "I", ""); --- > #endif 3522a2992,2993 > extern uint32_t vm_grab_anon_overrides; > extern uint32_t vm_grab_anon_nops; 3524,3525c2995,2996 < SYSCTL_INT(_vm, OID_AUTO, vm_grab_anon_overrides, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_pageout_debug.vm_grab_anon_overrides, 0, ""); < SYSCTL_INT(_vm, OID_AUTO, vm_grab_anon_nops, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_pageout_debug.vm_grab_anon_nops, 0, ""); --- > SYSCTL_INT(_vm, OID_AUTO, vm_grab_anon_overrides, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_grab_anon_overrides, 0, ""); > SYSCTL_INT(_vm, OID_AUTO, vm_grab_anon_nops, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_grab_anon_nops, 0, ""); 3598,3620d3068 < #if CONFIG_QUIESCE_COUNTER < static int < sysctl_cpu_quiescent_counter_interval SYSCTL_HANDLER_ARGS < { < #pragma unused(arg1, arg2) < < int error = sysctl_handle_int(oidp, &cpu_checkin_min_interval_us, 0, req); < if (error || !req->newptr) < return error; < < cpu_quiescent_counter_set_min_interval_us(cpu_checkin_min_interval_us); < < return 0; < } < < SYSCTL_PROC(_kern, OID_AUTO, cpu_checkin_interval, < CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, < 0, 0, < sysctl_cpu_quiescent_counter_interval, "I", < "Quiescent CPU checkin interval (microseconds)"); < #endif /* CONFIG_QUIESCE_COUNTER */ < < 3886a3335,3365 > /* > * This is set by core audio to tell tailspin (ie background tracing) how long > * its smallest buffer is. Background tracing can then try to make a reasonable > * decisions to try to avoid introducing so much latency that the buffers will > * underflow. > */ > > int min_audio_buffer_usec; > > STATIC int > sysctl_audio_buffer SYSCTL_HANDLER_ARGS > { > #pragma unused(oidp, arg1, arg2) > int err = 0, value = 0, changed = 0; > err = sysctl_io_number(req, min_audio_buffer_usec, sizeof(int), &value, &changed); > if (err) goto exit; > > if (changed) { > /* writing is protected by an entitlement */ > if (priv_check_cred(kauth_cred_get(), PRIV_AUDIO_LATENCY, 0) != 0) { > err = EPERM; > goto exit; > } > min_audio_buffer_usec = value; > } > exit: > return err; > } > > SYSCTL_PROC(_kern, OID_AUTO, min_audio_buffer_usec, CTLFLAG_RW | CTLFLAG_ANYBODY, 0, 0, sysctl_audio_buffer, "I", "Minimum audio buffer size, in microseconds"); > 4055,4058d3533 < extern int exc_resource_threads_enabled; < < SYSCTL_INT(_kern, OID_AUTO, exc_resource_threads_enabled, CTLFLAG_RD | CTLFLAG_LOCKED, &exc_resource_threads_enabled, 0, "exc_resource thread limit enabled"); < 4103,4526d3577 < < < #if DEVELOPMENT || DEBUG < < static atomic_int wedge_thread_should_wake = 0; < < static int < unwedge_thread SYSCTL_HANDLER_ARGS < { < #pragma unused(arg1, arg2) < int error, val = 0; < error = sysctl_handle_int(oidp, &val, 0, req); < if (error || val == 0) { < return error; < } < < atomic_store(&wedge_thread_should_wake, 1); < return 0; < } < < SYSCTL_PROC(_kern, OID_AUTO, unwedge_thread, CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED, 0, 0, unwedge_thread, "I", "unwedge the thread wedged by kern.wedge_thread"); < < extern uintptr_t phys_carveout_pa; < SYSCTL_LONG(_kern, OID_AUTO, phys_carveout_pa, CTLFLAG_RD | CTLFLAG_LOCKED, < &phys_carveout_pa, < "base physical address of the phys_carveout_mb boot-arg region"); < extern size_t phys_carveout_size; < SYSCTL_LONG(_kern, OID_AUTO, phys_carveout_size, CTLFLAG_RD | CTLFLAG_LOCKED, < &phys_carveout_size, < "size in bytes of the phys_carveout_mb boot-arg region"); < < static int < wedge_thread SYSCTL_HANDLER_ARGS < { < #pragma unused(arg1, arg2) < < int error, val = 0; < error = sysctl_handle_int(oidp, &val, 0, req); < if (error || val == 0) { < return error; < } < < uint64_t interval = 1; < nanoseconds_to_absolutetime(1000 * 1000 * 50, &interval); < < atomic_store(&wedge_thread_should_wake, 0); < while (!atomic_load(&wedge_thread_should_wake)) { < tsleep1(NULL, 0, "wedge_thread", mach_absolute_time()+interval, NULL); < } < < return 0; < } < < SYSCTL_PROC(_kern, OID_AUTO, wedge_thread, CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED, 0, 0, wedge_thread, "I", "wedge this thread so it cannot be cleaned up"); < < static int < sysctl_turnstile_test_prim_lock SYSCTL_HANDLER_ARGS; < static int < sysctl_turnstile_test_prim_unlock SYSCTL_HANDLER_ARGS; < int < tstile_test_prim_lock(boolean_t use_hashtable); < int < tstile_test_prim_unlock(boolean_t use_hashtable); < < #define SYSCTL_TURNSTILE_TEST_DEFAULT 1 < #define SYSCTL_TURNSTILE_TEST_GLOBAL_HASHTABLE 2 < < static int < sysctl_turnstile_test_prim_lock SYSCTL_HANDLER_ARGS < { < #pragma unused(arg1, arg2) < int error, val = 0; < error = sysctl_handle_int(oidp, &val, 0, req); < if (error || val == 0) { < return error; < } < boolean_t use_hashtable = (val == SYSCTL_TURNSTILE_TEST_GLOBAL_HASHTABLE) ? true : false; < return tstile_test_prim_lock(use_hashtable); < } < < static int < sysctl_turnstile_test_prim_unlock SYSCTL_HANDLER_ARGS < { < #pragma unused(arg1, arg2) < int error, val = 0; < error = sysctl_handle_int(oidp, &val, 0, req); < if (error || val == 0) { < return error; < } < boolean_t use_hashtable = (val == SYSCTL_TURNSTILE_TEST_GLOBAL_HASHTABLE) ? true : false; < return tstile_test_prim_unlock(use_hashtable); < } < < SYSCTL_PROC(_kern, OID_AUTO, turnstiles_test_lock, CTLFLAG_WR | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED, < 0, 0, sysctl_turnstile_test_prim_lock, "I", "turnstiles test lock"); < < SYSCTL_PROC(_kern, OID_AUTO, turnstiles_test_unlock, CTLFLAG_WR | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED, < 0, 0, sysctl_turnstile_test_prim_unlock, "I", "turnstiles test unlock"); < < int < turnstile_get_boost_stats_sysctl(void *req); < int < turnstile_get_unboost_stats_sysctl(void *req); < static int < sysctl_turnstile_boost_stats SYSCTL_HANDLER_ARGS; < static int < sysctl_turnstile_unboost_stats SYSCTL_HANDLER_ARGS; < extern uint64_t thread_block_on_turnstile_count; < extern uint64_t thread_block_on_regular_waitq_count; < < static int < sysctl_turnstile_boost_stats SYSCTL_HANDLER_ARGS < { < #pragma unused(arg1, arg2, oidp) < return turnstile_get_boost_stats_sysctl(req); < } < < static int < sysctl_turnstile_unboost_stats SYSCTL_HANDLER_ARGS < { < #pragma unused(arg1, arg2, oidp) < return turnstile_get_unboost_stats_sysctl(req); < } < < SYSCTL_PROC(_kern, OID_AUTO, turnstile_boost_stats, CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED | CTLTYPE_STRUCT, < 0, 0, sysctl_turnstile_boost_stats, "S", "turnstiles boost stats"); < SYSCTL_PROC(_kern, OID_AUTO, turnstile_unboost_stats, CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED | CTLTYPE_STRUCT, < 0, 0, sysctl_turnstile_unboost_stats, "S", "turnstiles unboost stats"); < SYSCTL_QUAD(_kern, OID_AUTO, thread_block_count_on_turnstile, < CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED, < &thread_block_on_turnstile_count, "thread blocked on turnstile count"); < SYSCTL_QUAD(_kern, OID_AUTO, thread_block_count_on_reg_waitq, < CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED, < &thread_block_on_regular_waitq_count, "thread blocked on regular waitq count"); < < static int < sysctl_lck_mtx_test_lock SYSCTL_HANDLER_ARGS < { < #pragma unused(arg1, arg2) < int error, val = 0; < error = sysctl_handle_int(oidp, &val, 0, req); < if (error || val == 0) { < return error; < } < < if (val == 1) { < lck_mtx_test_init(); < lck_mtx_test_lock(); < } < < return 0; < } < < static int < sysctl_lck_mtx_test_unlock SYSCTL_HANDLER_ARGS < { < #pragma unused(arg1, arg2) < int error, val = 0; < error = sysctl_handle_int(oidp, &val, 0, req); < if (error || val == 0) { < return error; < } < < if (val == 1) { < lck_mtx_test_init(); < lck_mtx_test_unlock(); < } < < return 0; < } < < static int < sysctl_erase_all_test_mtx_stats SYSCTL_HANDLER_ARGS < { < #pragma unused(arg1, arg2) < int error, val = 0; < error = sysctl_handle_int(oidp, &val, 0, req); < if (error || val == 0) { < return error; < } < < if (val == 1) { < lck_mtx_test_init(); < erase_all_test_mtx_stats(); < } < < return 0; < } < < static int < sysctl_get_test_mtx_stats SYSCTL_HANDLER_ARGS < { < #pragma unused(oidp, arg1, arg2) < char* buffer; < int size, buffer_size, error; < < buffer_size = 1000; < buffer = kalloc(buffer_size); < if (!buffer) < panic("Impossible to allocate memory for %s\n", __func__); < < lck_mtx_test_init(); < < size = get_test_mtx_stats_string(buffer, buffer_size); < < error = sysctl_io_string(req, buffer, size, 0, NULL); < < kfree(buffer, buffer_size); < < return error; < } < < static int < sysctl_test_mtx_uncontended SYSCTL_HANDLER_ARGS < { < #pragma unused(oidp, arg1, arg2) < char* buffer; < int buffer_size, offset, error, iter; < char input_val[40]; < < if (!req->newptr) { < return 0; < } < < if (!req->oldptr) { < return EINVAL; < } < < if (req->newlen >= sizeof(input_val)) { < return EINVAL; < } < < error = SYSCTL_IN(req, input_val, req->newlen); < if (error) { < return error; < } < input_val[req->newlen] = '\0'; < < sscanf(input_val, "%d", &iter); < < if (iter <= 0) { < printf("%s requested %d iterations, not starting the test\n", __func__, iter); < return EINVAL; < } < < lck_mtx_test_init(); < < buffer_size = 2000; < offset = 0; < buffer = kalloc(buffer_size); < if (!buffer) < panic("Impossible to allocate memory for %s\n", __func__); < memset(buffer, 0, buffer_size); < < printf("%s starting uncontended mutex test with %d iterations\n", __func__, iter); < < offset = snprintf(buffer, buffer_size, "STATS INNER LOOP"); < offset += lck_mtx_test_mtx_uncontended(iter, &buffer[offset], buffer_size - offset); < < offset += snprintf(&buffer[offset], buffer_size - offset, "\nSTATS OUTER LOOP"); < offset += lck_mtx_test_mtx_uncontended_loop_time(iter, &buffer[offset], buffer_size - offset); < < error = SYSCTL_OUT(req, buffer, offset); < < kfree(buffer, buffer_size); < return error; < } < < static int < sysctl_test_mtx_contended SYSCTL_HANDLER_ARGS < { < #pragma unused(oidp, arg1, arg2) < char* buffer; < int buffer_size, offset, error, iter; < char input_val[40]; < < printf("%s called\n", __func__); < < if (!req->newptr) { < return 0; < } < < if (!req->oldptr) { < return EINVAL; < } < < if (req->newlen >= sizeof(input_val)) { < return EINVAL; < } < < error = SYSCTL_IN(req, input_val, req->newlen); < if (error) { < return error; < } < input_val[req->newlen] = '\0'; < < sscanf(input_val, "%d", &iter); < < if (iter <= 0) { < printf("%s requested %d iterations, not starting the test\n", __func__, iter); < return EINVAL; < } < < lck_mtx_test_init(); < < erase_all_test_mtx_stats(); < < buffer_size = 1000; < offset = 0; < buffer = kalloc(buffer_size); < if (!buffer) < panic("Impossible to allocate memory for %s\n", __func__); < memset(buffer, 0, buffer_size); < < printf("%s starting contended mutex test with %d iterations\n", __func__, iter); < < offset = snprintf(buffer, buffer_size, "STATS INNER LOOP"); < offset += lck_mtx_test_mtx_contended(iter, &buffer[offset], buffer_size - offset); < < printf("%s starting contended mutex loop test with %d iterations\n", __func__, iter); < < offset += snprintf(&buffer[offset], buffer_size - offset, "\nSTATS OUTER LOOP"); < offset += lck_mtx_test_mtx_contended_loop_time(iter, &buffer[offset], buffer_size - offset); < < error = SYSCTL_OUT(req, buffer, offset); < < kfree(buffer, buffer_size); < < return error; < } < < SYSCTL_PROC(_kern, OID_AUTO, lck_mtx_test_lock, CTLFLAG_WR | CTLFLAG_MASKED | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED, < 0, 0, sysctl_lck_mtx_test_lock, "I", "lck mtx test lock"); < < SYSCTL_PROC(_kern, OID_AUTO, lck_mtx_test_unlock, CTLFLAG_WR | CTLFLAG_MASKED |CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED, < 0, 0, sysctl_lck_mtx_test_unlock, "I", "lck mtx test unlock"); < < SYSCTL_PROC(_kern, OID_AUTO, erase_all_test_mtx_stats, CTLFLAG_WR | CTLFLAG_MASKED |CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED, < 0, 0, sysctl_erase_all_test_mtx_stats, "I", "erase test_mtx statistics"); < < SYSCTL_PROC(_kern, OID_AUTO, get_test_mtx_stats, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MASKED| CTLFLAG_KERN | CTLFLAG_LOCKED, < 0, 0, sysctl_get_test_mtx_stats, "A", "get test_mtx statistics"); < < SYSCTL_PROC(_kern, OID_AUTO, test_mtx_contended, CTLTYPE_STRING | CTLFLAG_MASKED | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, < 0, 0, sysctl_test_mtx_contended, "A", "get statistics for contended mtx test"); < < SYSCTL_PROC(_kern, OID_AUTO, test_mtx_uncontended, CTLTYPE_STRING | CTLFLAG_MASKED | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, < 0, 0, sysctl_test_mtx_uncontended, "A", "get statistics for uncontended mtx test"); < < #if defined (__x86_64__) < < semaphore_t sysctl_test_panic_with_thread_sem; < < #pragma clang diagnostic push < #pragma clang diagnostic ignored "-Winfinite-recursion" /* rdar://38801963 */ < __attribute__((noreturn)) < static void < panic_thread_test_child_spin(void * arg, wait_result_t wres) < { < static int panic_thread_recurse_count = 5; < < if (panic_thread_recurse_count > 0) { < panic_thread_recurse_count--; < panic_thread_test_child_spin(arg, wres); < } < < semaphore_signal(sysctl_test_panic_with_thread_sem); < while (1) { ; } < } < #pragma clang diagnostic pop < < static void < panic_thread_test_child_park(void * arg __unused, wait_result_t wres __unused) < { < int event; < < assert_wait(&event, THREAD_UNINT); < semaphore_signal(sysctl_test_panic_with_thread_sem); < thread_block(panic_thread_test_child_park); < } < < static int < sysctl_test_panic_with_thread SYSCTL_HANDLER_ARGS < { < #pragma unused(arg1, arg2) < int rval = 0; < char str[16] = { '\0' }; < thread_t child_thread = THREAD_NULL; < < rval = sysctl_handle_string(oidp, str, sizeof(str), req); < if (rval != 0 || !req->newptr) { < return EINVAL; < } < < semaphore_create(kernel_task, &sysctl_test_panic_with_thread_sem, SYNC_POLICY_FIFO, 0); < < /* Create thread to spin or park in continuation */ < if (strncmp("spin", str, strlen("spin")) == 0) { < if (kernel_thread_start(panic_thread_test_child_spin, NULL, &child_thread) != KERN_SUCCESS) { < semaphore_destroy(kernel_task, sysctl_test_panic_with_thread_sem); < return EBUSY; < } < } else if (strncmp("continuation", str, strlen("continuation")) == 0) { < if (kernel_thread_start(panic_thread_test_child_park, NULL, &child_thread) != KERN_SUCCESS) { < semaphore_destroy(kernel_task, sysctl_test_panic_with_thread_sem); < return EBUSY; < } < } else { < semaphore_destroy(kernel_task, sysctl_test_panic_with_thread_sem); < return EINVAL; < } < < semaphore_wait(sysctl_test_panic_with_thread_sem); < < panic_with_thread_context(0, NULL, 0, child_thread, "testing panic_with_thread_context for thread %p", child_thread); < < /* Not reached */ < return EINVAL; < } < < SYSCTL_PROC(_kern, OID_AUTO, test_panic_with_thread, CTLFLAG_MASKED | CTLFLAG_KERN | CTLFLAG_LOCKED | CTLFLAG_WR | CTLTYPE_STRING, < 0, 0, sysctl_test_panic_with_thread, "A", "test panic flow for backtracing a different thread"); < #endif /* defined (__x86_64__) */ < #endif /* DEVELOPMENT || DEBUG */
./bsd/kern/kdebug.c differences detected: 185,190d184 < static void typefilter_allow_all(typefilter_t tf) < { < assert(tf != NULL); < memset(tf, ~0, KDBG_TYPEFILTER_BITMAP_SIZE); < } < 257,258d250 < static int kdbg_debug = 0; < 313a306 > extern char *proc_best_name(proc_t p); 616c609 < static boolean_t --- > boolean_t 636,637c629,630 < static void < enable_wrap(uint32_t old_slowcheck) --- > void > enable_wrap(uint32_t old_slowcheck, boolean_t lostevents) 646a640,642 > if (lostevents == TRUE) > kd_ctrl_page.kdebug_flags |= KDBG_WRAPPED; > 868c864 < --- > 870,872d865 < /* < * If there's a free page, grab it from the free list. < */ 878,881d870 < /* < * Otherwise, we're going to lose events and repurpose the oldest < * storage unit we can find. < */ 943,945c932 < if (kd_ctrl_page.oldest_time < oldest_ts) { < kd_ctrl_page.oldest_time = oldest_ts; < } --- > kd_ctrl_page.oldest_time = oldest_ts; 955c942 < kdsp_actual->kds_bufindx = 0; --- > kdsp_actual->kds_bufindx = 0; 1146,1192d1132 < /* < * Check if the given debug ID is allowed to be traced on the current process. < * < * Returns true if allowed and false otherwise. < */ < static inline bool < kdebug_debugid_procfilt_allowed(uint32_t debugid) < { < uint32_t procfilt_flags = kd_ctrl_page.kdebug_flags & < (KDBG_PIDCHECK | KDBG_PIDEXCLUDE); < < if (!procfilt_flags) { < return true; < } < < /* < * DBG_TRACE and MACH_SCHED tracepoints ignore the process filter. < */ < if ((debugid & 0xffff0000) == MACHDBG_CODE(DBG_MACH_SCHED, 0) || < (debugid >> 24 == DBG_TRACE)) { < return true; < } < < struct proc *curproc = current_proc(); < /* < * If the process is missing (early in boot), allow it. < */ < if (!curproc) { < return true; < } < < if (procfilt_flags & KDBG_PIDCHECK) { < /* < * Allow only processes marked with the kdebug bit. < */ < return curproc->p_kdebug; < } else if (procfilt_flags & KDBG_PIDEXCLUDE) { < /* < * Exclude any process marked with the kdebug bit. < */ < return !curproc->p_kdebug; < } else { < panic("kdebug: invalid procfilt flags %x", kd_ctrl_page.kdebug_flags); < __builtin_unreachable(); < } < } < 1195c1135,1136 < uint32_t debugid, --- > boolean_t only_filter, > uint32_t debugid, 1200,1201c1141 < uintptr_t arg5, < uint64_t flags) --- > uintptr_t arg5) 1203,1206c1143,1147 < uint64_t now; < uint32_t bindx; < kd_buf *kd; < int cpu; --- > struct proc *curproc; > uint64_t now; > uint32_t bindx; > kd_buf *kd; > int cpu; 1209,1211c1150 < union kds_ptr kds_raw; < bool only_filter = flags & KDBG_FLAG_FILTERED; < bool observe_procfilt = !(flags & KDBG_FLAG_NOPROCFILT); --- > union kds_ptr kds_raw; 1220,1222c1159,1181 < if (!ml_at_interrupt_context() && observe_procfilt && < !kdebug_debugid_procfilt_allowed(debugid)) { < goto out1; --- > if ( !ml_at_interrupt_context()) { > if (kd_ctrl_page.kdebug_flags & KDBG_PIDCHECK) { > /* > * If kdebug flag is not set for current proc, return > */ > curproc = current_proc(); > > if ((curproc && !(curproc->p_kdebug)) && > ((debugid & 0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)) && > (debugid >> 24 != DBG_TRACE)) > goto out1; > } > else if (kd_ctrl_page.kdebug_flags & KDBG_PIDEXCLUDE) { > /* > * If kdebug flag is set for current proc, return > */ > curproc = current_proc(); > > if ((curproc && curproc->p_kdebug) && > ((debugid & 0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE)) && > (debugid >> 24 != DBG_TRACE)) > goto out1; > } 1230c1189 < } else if (only_filter) { --- > } else if (only_filter == TRUE) { 1237c1196 < --- > 1245c1204 < --- > 1252c1211 < } else if (only_filter) { --- > } else if (only_filter == TRUE) { 1281c1240 < } --- > } 1293d1251 < 1341,1342c1299,1300 < kernel_debug_internal(debugid, arg1, arg2, arg3, arg4, < (uintptr_t)thread_tid(current_thread()), 0); --- > kernel_debug_internal(FALSE, debugid, arg1, arg2, arg3, arg4, > (uintptr_t)thread_tid(current_thread())); 1354,1367c1312 < kernel_debug_internal(debugid, arg1, arg2, arg3, arg4, arg5, 0); < } < < void < kernel_debug_flags( < uint32_t debugid, < uintptr_t arg1, < uintptr_t arg2, < uintptr_t arg3, < uintptr_t arg4, < uint64_t flags) < { < kernel_debug_internal(debugid, arg1, arg2, arg3, arg4, < (uintptr_t)thread_tid(current_thread()), flags); --- > kernel_debug_internal(FALSE, debugid, arg1, arg2, arg3, arg4, arg5); 1372c1317 < uint32_t debugid, --- > uint32_t debugid, 1378c1323,1324 < kernel_debug_flags(debugid, arg1, arg2, arg3, arg4, KDBG_FLAG_FILTERED); --- > kernel_debug_internal(TRUE, debugid, arg1, arg2, arg3, arg4, > (uintptr_t)thread_tid(current_thread())); 1399,1402d1344 < if (!kdebug_enable) { < return; < } < 1415,1418c1357,1360 < kernel_debug_internal(debugid, str_buf[0], < str_buf[1], < str_buf[2], < str_buf[3], thread_id, 0); --- > kernel_debug_internal(FALSE, debugid, str_buf[0], > str_buf[1], > str_buf[2], > str_buf[3], thread_id); 1429,1432c1371,1374 < kernel_debug_internal(debugid, str_buf[i], < str_buf[i + 1], < str_buf[i + 2], < str_buf[i + 3], thread_id, 0); --- > kernel_debug_internal(FALSE, debugid, str_buf[i], > str_buf[i + 1], > str_buf[i + 2], > str_buf[i + 3], thread_id); 1602d1543 < VM_MAP_KERNEL_FLAGS_NONE, 1659,1661c1600,1605 < kernel_debug_internal(uap->code, (uintptr_t)uap->arg1, < (uintptr_t)uap->arg2, (uintptr_t)uap->arg3, (uintptr_t)uap->arg4, < (uintptr_t)thread_tid(current_thread()), 0); --- > kernel_debug_internal(FALSE, uap->code, > (uintptr_t)uap->arg1, > (uintptr_t)uap->arg2, > (uintptr_t)uap->arg3, > (uintptr_t)uap->arg4, > (uintptr_t)thread_tid(current_thread())); 1706,1707c1650,1652 < kernel_debug_internal(trace_debugid | DBG_FUNC_START | DBG_FUNC_END, < (uintptr_t)debugid, (uintptr_t)str_id, 0, 0, thread_id, 0); --- > kernel_debug_internal(FALSE, trace_debugid | DBG_FUNC_START | DBG_FUNC_END, > (uintptr_t)debugid, (uintptr_t)str_id, 0, 0, > thread_id); 1723,1724c1668,1670 < kernel_debug_internal(trace_debugid, (uintptr_t)debugid, (uintptr_t)str_id, < str[0], str[1], thread_id, 0); --- > kernel_debug_internal(FALSE, trace_debugid, (uintptr_t)debugid, > (uintptr_t)str_id, str[0], > str[1], thread_id); 1734,1737c1680,1683 < kernel_debug_internal(trace_debugid, str[i], < str[i + 1], < str[i + 2], < str[i + 3], thread_id, 0); --- > kernel_debug_internal(FALSE, trace_debugid, str[i], > str[i + 1], > str[i + 2], > str[i + 3], thread_id); 2329,2331c2275 < #if !CONFIG_EMBEDDED < /* Must be done with the buffer, so release it back to the VM. < * On embedded targets this buffer is freed when the BOOTDATA segment is freed. */ --- > /* Must be done with the buffer, so release it back to the VM. */ 2333d2276 < #endif 2524d2466 < bool notify_iops = kd_ctrl_page.kdebug_flags & KDBG_TYPEFILTER_CHECK; 2533,2543d2474 < < if (notify_iops) { < /* < * Notify IOPs that the typefilter will now allow everything. < * Otherwise, they won't know a typefilter is no longer in < * effect. < */ < typefilter_allow_all(kdbg_typefilter); < kdbg_iop_list_callback(kd_ctrl_page.kdebug_iops, < KD_CALLBACK_TYPEFILTER_CHANGED, kdbg_typefilter); < } 3549c3480 < KDBG_RELEASE(TRACE_WRITING_EVENTS | DBG_FUNC_START); --- > KDBG(TRACE_WRITING_EVENTS | DBG_FUNC_START); 3554c3485 < KDBG_RELEASE(TRACE_WRITING_EVENTS | DBG_FUNC_END, number); --- > KDBG(TRACE_WRITING_EVENTS | DBG_FUNC_END, number); 3654a3586 > boolean_t lostevents = FALSE; 3708c3640,3641 < * If the buffers have wrapped, do not emit additional lost events for the --- > * If the buffers have wrapped, capture the earliest time where there > * are events for all CPUs and do not emit additional lost events for 3711a3645 > barrier_min = kd_ctrl_page.oldest_time; 3712a3647 > kd_ctrl_page.oldest_time = 0; 3722,3726d3656 < /* < * Capture the earliest time where there are events for all CPUs and don't < * emit events with timestamps prior. < */ < barrier_min = kd_ctrl_page.oldest_time; 3733,3738c3663 < /* < * Emit a lost events tracepoint to indicate that previous events < * were lost -- the thread map cannot be trusted. A new one must < * be taken so tools can analyze the trace in a backwards-facing < * fashion. < */ --- > /* Trace a single lost events event for wrapping. */ 3747,3748d3671 < bool lostevents = false; < int lostcpu = 0; 3753c3676 < /* Check each CPU's buffers for the earliest event. */ --- > /* Check each CPU's buffers. */ 3755c3678 < /* Skip CPUs without data in their oldest storage unit. */ --- > /* Skip CPUs without data. */ 3759a3683,3689 > /* Debugging aid: maintain a copy of the "kdsp" > * index. > */ > volatile union kds_ptr kdsp_shadow; > > kdsp_shadow = kdsp; > 3763,3764c3693,3697 < next_event: < /* The next event to be read from this buffer. */ --- > volatile struct kd_storage *kdsp_actual_shadow; > > kdsp_actual_shadow = kdsp_actual; > > /* Skip buffer if there are no events left. */ 3767d3699 < /* Skip this buffer if there are no events left. */ 3772,3779c3704 < /* < * Check that this storage unit wasn't stolen and events were < * lost. This must have happened while wrapping was disabled < * in this function. < */ < if (kdsp_actual->kds_lostevents) { < lostevents = true; < kdsp_actual->kds_lostevents = FALSE; --- > t = kdbg_get_timestamp(&kdsp_actual->kds_records[rcursor]); 3781,3795c3706,3719 < /* < * The earliest event we can trust is the first one in this < * stolen storage unit. < */ < uint64_t lost_time = < kdbg_get_timestamp(&kdsp_actual->kds_records[0]); < if (kd_ctrl_page.oldest_time < lost_time) { < /* < * If this is the first time we've seen lost events for < * this gap, record its timestamp as the oldest < * timestamp we're willing to merge for the lost events < * tracepoint. < */ < kd_ctrl_page.oldest_time = barrier_min = lost_time; < lostcpu = cpu; --- > /* Ignore events that have aged out due to wrapping. */ > while (t < barrier_min) { > rcursor = ++kdsp_actual->kds_readlast; > > if (rcursor >= EVENTS_PER_STORAGE_UNIT) { > release_storage_unit(cpu, kdsp.raw); > > if ((kdsp = kdbp->kd_list_head).raw == KDS_PTR_NULL) { > goto next_cpu; > } > kdsp_shadow = kdsp; > kdsp_actual = POINTER_FROM_KDS_PTR(kdsp); > kdsp_actual_shadow = kdsp_actual; > rcursor = kdsp_actual->kds_readlast; 3797d3720 < } 3799c3722,3723 < t = kdbg_get_timestamp(&kdsp_actual->kds_records[rcursor]); --- > t = kdbg_get_timestamp(&kdsp_actual->kds_records[rcursor]); > } 3802,3808d3725 < if (kdbg_debug) { < printf("kdebug: FUTURE EVENT: debugid %#8x: " < "time %lld from CPU %u " < "(barrier at time %lld, read %lu events)\n", < kdsp_actual->kds_records[rcursor].debugid, < t, cpu, barrier_max, *number + tempbuf_number); < } 3810,3811c3727,3729 < * Need to flush IOPs again before we can sort any more < * data from the buffers. --- > * Need to flush IOPs again before we > * can sort any more data from the > * buffers. 3818,3825c3736,3744 < * This indicates the event emitter hasn't completed < * filling in the event (becuase we're looking at the < * buffer that the record head is using). The max barrier < * timestamp should have saved us from seeing these kinds < * of things, but other CPUs might be slow on the up-take. < * < * Bail out so we don't get out-of-order events by < * continuing to read events from other CPUs' events. --- > * indicates we've not yet completed filling > * in this event... > * this should only occur when we're looking > * at the buf that the record head is utilizing > * we'll pick these events up on the next > * call to kdbg_read > * we bail at this point so that we don't > * get an out-of-order timestream by continuing > * to read events from the other CPUs' timestream(s) 3830,3857d3748 < < /* < * Ignore events that have aged out due to wrapping or storage < * unit exhaustion while merging events. < */ < if (t < barrier_min) { < kdsp_actual->kds_readlast++; < < if (kdsp_actual->kds_readlast >= EVENTS_PER_STORAGE_UNIT) { < release_storage_unit(cpu, kdsp.raw); < < if ((kdsp = kdbp->kd_list_head).raw == KDS_PTR_NULL) { < goto next_cpu; < } < kdsp_actual = POINTER_FROM_KDS_PTR(kdsp); < } < < goto next_event; < } < < /* < * Don't worry about merging any events -- just walk through < * the CPUs and find the latest timestamp of lost events. < */ < if (lostevents) { < continue; < } < 3864c3755 < if (lostevents) { --- > if (min_kdbp == NULL || out_of_events == TRUE) { 3866,3867c3757 < * If any lost events were hit in the buffers, emit an event < * with the latest timestamp. --- > * all buffers ran empty 3869,3875d3758 < kdbg_set_timestamp_and_cpu(&lostevent, barrier_min, lostcpu); < *tempbuf = lostevent; < tempbuf->arg1 = 1; < goto nextevent; < } < if (min_kdbp == NULL) { < /* All buffers ran empty. */ 3877,3878d3759 < } < if (out_of_events) { 3892c3773 < * Watch for out of order timestamps (from IOPs). --- > * Watch for out of order timestamps 3897d3777 < * Otherwise, ignore this event. 3922,3929d3801 < /* < * Remember the latest timestamp of events that we've merged so we < * don't think we've lost events later. < */ < uint64_t latest_time = kdbg_get_timestamp(tempbuf - 1); < if (kd_ctrl_page.oldest_time < latest_time) { < kd_ctrl_page.oldest_time = latest_time; < } 3947c3819 < --- > 3976c3848 < enable_wrap(old_kdebug_slowcheck); --- > enable_wrap(old_kdebug_slowcheck, lostevents); 4010,4015d3881 < KDBG_RELEASE_NOPROCFILT(KDEBUG_TEST_CODE(code)); code++; < KDBG_RELEASE_NOPROCFILT(KDEBUG_TEST_CODE(code), 1); code++; < KDBG_RELEASE_NOPROCFILT(KDEBUG_TEST_CODE(code), 1, 2); code++; < KDBG_RELEASE_NOPROCFILT(KDEBUG_TEST_CODE(code), 1, 2, 3); code++; < KDBG_RELEASE_NOPROCFILT(KDEBUG_TEST_CODE(code), 1, 2, 3, 4); code++; < 4039d3904 < 4049c3914 < kdebug_init(unsigned int n_events, char *filter_desc, boolean_t wrapping) --- > kdebug_init(unsigned int n_events, char *filter_desc) 4069c3934 < kdebug_trace_start(n_events, filter_desc, wrapping, FALSE); --- > kdebug_trace_start(n_events, filter_desc, FALSE); 4143c4008 < boolean_t wrapping, boolean_t at_wake) --- > boolean_t at_wake) 4144a4010,4011 > uint32_t old1, old2; > 4169,4172c4036 < if (!wrapping) { < uint32_t old1, old2; < (void)disable_wrap(&old1, &old2); < } --- > (void)disable_wrap(&old1, &old2); 4242c4106 < KDBG_RELEASE(TRACE_WRITING_EVENTS | DBG_FUNC_START); --- > KDBG(TRACE_WRITING_EVENTS | DBG_FUNC_START); 4291a4156,4176 > /* Helper function for filling in the BSD name for an address space > * Defined here because the machine bindings know only Mach threads > * and nothing about BSD processes. > * > * FIXME: need to grab a lock during this? > */ > void kdbg_get_task_name(char* name_buf, int len, task_t task) > { > proc_t proc; > > /* Note: we can't use thread->task (and functions that rely on it) here > * because it hasn't been initialized yet when this function is called. > * We use the explicitly-passed task parameter instead. > */ > proc = get_bsdtask_info(task); > if (proc != PROC_NULL) > snprintf(name_buf, len, "%s/%d", proc->p_comm, proc->p_pid); > else > snprintf(name_buf, len, "%p [!bsd]", task); > } > 4315,4318d4199 < SYSCTL_INT(_kern_kdbg, OID_AUTO, debug, < CTLFLAG_RW | CTLFLAG_LOCKED, < &kdbg_debug, 0, "Set kdebug debug mode"); <
./bsd/kern/kern_exec.c differences detected: 165,166d164 < extern boolean_t vm_darkwake_mode; < 182,188c180 < thread_t fork_create_child(task_t parent_task, < coalition_t *parent_coalition, < proc_t child_proc, < int inherit_memory, < int is_64bit_addr, < int is_64bit_data, < int in_exec); --- > thread_t fork_create_child(task_t parent_task, coalition_t *parent_coalition, proc_t child_proc, int inherit_memory, int is64bit, int in_exec); 738,739c730,731 < task_set_64bit(task, result->is_64bit_addr, result->is_64bit_data); < if (result->is_64bit_addr) { --- > if (result->is64bit) { > task_set_64bit(task, TRUE); 741a734 > task_set_64bit(task, FALSE); 843c836 < load_result_t load_result = {}; --- > load_result_t load_result; 889,891c882,883 < if ((mach_header->cputype & CPU_ARCH_ABI64) == CPU_ARCH_ABI64) { < imgp->ip_flags |= IMGPF_IS_64BIT_ADDR | IMGPF_IS_64BIT_DATA; < } --- > if ((mach_header->cputype & CPU_ARCH_ABI64) == CPU_ARCH_ABI64) > imgp->ip_flags |= IMGPF_IS_64BIT; 924,925d915 < < 944,950c934 < imgp->ip_new_thread = fork_create_child(task, < NULL, < p, < FALSE, < (imgp->ip_flags & IMGPF_IS_64BIT_ADDR), < (imgp->ip_flags & IMGPF_IS_64BIT_DATA), < FALSE); --- > imgp->ip_new_thread = fork_create_child(task, NULL, p, FALSE, (imgp->ip_flags & IMGPF_IS_64BIT), FALSE); 992,998d975 < } else if (lret == LOAD_BADARCH_X86) { < /* set anything that might be useful in the crash report */ < set_proc_name(imgp, p); < < exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_NO32EXEC); < exec_failure_reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT; < exec_failure_reason->osr_flags |= OS_REASON_FLAG_CONSISTENT_FAILURE; 1021c998 < CS_FORCED_LV|CS_ENTITLEMENTS_VALIDATED|CS_DYLD_PLATFORM|CS_RUNTIME| --- > CS_ENTITLEMENTS_VALIDATED|CS_DYLD_PLATFORM| 1046,1048c1023 < int cpu_subtype; < cpu_subtype = 0; /* all cpu_subtypes use the same shared region */ < vm_map_exec(map, task, load_result.is_64bit_addr, (void *)p->p_fd->fd_rdir, cpu_type(), cpu_subtype); --- > vm_map_exec(map, task, load_result.is64bit, (void *)p->p_fd->fd_rdir, cpu_type()); 1150c1125 < int new_ptr_size = (imgp->ip_flags & IMGPF_IS_64BIT_ADDR) ? 8 : 4; --- > int new_ptr_size = (imgp->ip_flags & IMGPF_IS_64BIT) ? 8 : 4; 1222,1227d1196 < #if __arm64__ < if (load_result.legacy_footprint) { < task_set_legacy_footprint(task, TRUE); < } < #endif /* __arm64__ */ < 1268c1237,1242 < long args[4] = {}; --- > long dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4; > > /* > * Collect the pathname for tracing > */ > kdbg_trace_string(p, &dbg_arg1, &dbg_arg2, &dbg_arg3, &dbg_arg4); 1280,1288c1254,1257 < KERNEL_DEBUG_CONSTANT_IST1(TRACE_DATA_EXEC, p->p_pid, fsid, fileid, 0, < (uintptr_t)thread_tid(thread)); < < /* < * Collect the pathname for tracing < */ < kdbg_trace_string(p, &args[0], &args[1], &args[2], &args[3]); < KERNEL_DEBUG_CONSTANT_IST1(TRACE_STRING_EXEC, args[0], args[1], < args[2], args[3], (uintptr_t)thread_tid(thread)); --- > KERNEL_DEBUG_CONSTANT1(TRACE_DATA_EXEC | DBG_FUNC_NONE, > p->p_pid , fsid, fileid, 0, (uintptr_t)thread_tid(thread)); > KERNEL_DEBUG_CONSTANT1(TRACE_STRING_EXEC | DBG_FUNC_NONE, > dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, (uintptr_t)thread_tid(thread)); 1552,1565c1521,1528 < if (error == 0) { < if (imgp->ip_flags & IMGPF_INTERPRET && ndp->ni_vp) { < AUDIT_ARG(vnpath, ndp->ni_vp, ARG_VNODE2); < } < < /* < * Call out to allow 3rd party notification of exec. < * Ignore result of kauth_authorize_fileop call. < */ < if (kauth_authorize_fileop_has_listeners()) { < kauth_authorize_fileop(vfs_context_ucred(imgp->ip_vfs_context), < KAUTH_FILEOP_EXEC, < (uintptr_t)ndp->ni_vp, 0); < } --- > /* > * Call out to allow 3rd party notification of exec. > * Ignore result of kauth_authorize_fileop call. > */ > if (error == 0 && kauth_authorize_fileop_has_listeners()) { > kauth_authorize_fileop(vfs_context_ucred(imgp->ip_vfs_context), > KAUTH_FILEOP_EXEC, > (uintptr_t)ndp->ni_vp, 0); 2122c2085 < unsigned ngroups = 0; --- > int ngroups = 0; 2130c2093 < if (ngroups != px_persona->pspi_ngroups) { --- > if (ngroups != (int)px_persona->pspi_ngroups) { 2261d2223 < task_t old_task = current_task(); 2289c2251 < imgp->ip_flags = (is_64 ? IMGPF_WAS_64BIT_ADDR : IMGPF_NONE); --- > imgp->ip_flags = (is_64 ? IMGPF_WAS_64BIT : IMGPF_NONE); 2330,2333c2292,2294 < if ((error = copyin(px_args.attrp, &px_sa, px_sa_offset)) != 0) { < goto bad; < } < --- > if ((error = copyin(px_args.attrp, &px_sa, px_sa_offset) != 0)) > goto bad; > 2623,2629c2584,2585 < imgp->ip_new_thread = fork_create_child(old_task, < NULL, < p, < FALSE, < p->p_flag & P_LP64, < task_get_64bit_data(old_task), < TRUE); --- > imgp->ip_new_thread = fork_create_child(current_task(), > NULL, p, FALSE, p->p_flag & P_LP64, TRUE); 2837c2793 < p = proc_exec_switch_task(p, old_task, new_task, imgp->ip_new_thread); --- > p = proc_exec_switch_task(p, current_task(), new_task, imgp->ip_new_thread); 2840,2848d2795 < < /* < * Need to transfer pending watch port boosts to the new task while still making < * sure that the old task remains in the importance linkage. Create an importance < * linkage from old task to new task, then switch the task importance base < * of old task and new task. After the switch the port watch boost will be < * boosting the new task and new task will be donating importance to old task. < */ < inherit = ipc_importance_exec_switch_task(old_task, new_task); 2975,2977d2921 < if (imgp->ip_px_sa != NULL && px_sa.psa_thread_limit > 0) { < task_set_thread_limit(new_task, (uint16_t)px_sa.psa_thread_limit); < } 3018c2962 < task_bank_init(new_task); --- > task_bank_init(get_threadtask(imgp->ip_new_thread)); 3025c2969 < proc_inherit_task_role(new_task, old_task); --- > proc_inherit_task_role(get_threadtask(imgp->ip_new_thread), current_task()); 3045c2989,2993 < * Apply the requested maximum address. --- > * Need to transfer pending watch port boosts to the new task while still making > * sure that the old task remains in the importance linkage. Create an importance > * linkage from old task to new task, then switch the task importance base > * of old task and new task. After the switch the port watch boost will be > * boosting the new task and new task will be donating importance to old task. 3047,3052c2995,2996 < if (error == 0 && imgp->ip_px_sa != NULL) { < struct _posix_spawnattr *psa = (struct _posix_spawnattr *) imgp->ip_px_sa; < < if (psa->psa_max_addr) { < vm_map_set_max_addr(get_task_map(new_task), psa->psa_max_addr); < } --- > if (error == 0 && task_did_exec(current_task())) { > inherit = ipc_importance_exec_switch_task(current_task(), get_threadtask(imgp->ip_new_thread)); 3056c3000 < /* Apply the main thread qos */ --- > /* Apply the main thread qos */ 3058c3002 < task_set_main_thread_qos(new_task, main_thread); --- > task_set_main_thread_qos(get_threadtask(imgp->ip_new_thread), main_thread); 3066c3010 < vm_map_set_jumbo(get_task_map(new_task)); --- > vm_map_set_jumbo(get_task_map(p->task)); 3181,3191d3124 < < #if CONFIG_AUDIT < if (!error && AUDIT_ENABLED() && p) { < /* Add the CDHash of the new process to the audit record */ < uint8_t *cdhash = cs_get_cdhash(p); < if (cdhash) { < AUDIT_ARG(data, cdhash, sizeof(uint8_t), CS_CDHASH_LEN); < } < } < #endif < 3195,3196c3128,3129 < if (task_did_exec(old_task)) { < set_bsdtask_info(old_task, NULL); --- > if (task_did_exec(current_task())) { > set_bsdtask_info(current_task(), NULL); 3240c3173 < if (task_did_exec(old_task)) { --- > if (task_did_exec(current_task())) { 3242c3175 < task_terminate_internal(old_task); --- > task_terminate_internal(current_task()); 3476d3408 < task_t old_task = current_task(); 3505c3437 < imgp->ip_flags = (is_64 ? IMGPF_WAS_64BIT_ADDR : IMGPF_NONE) | ((p->p_flag & P_DISABLE_ASLR) ? IMGPF_DISABLE_ASLR : IMGPF_NONE); --- > imgp->ip_flags = (is_64 ? IMGPF_WAS_64BIT : IMGPF_NONE) | ((p->p_flag & P_DISABLE_ASLR) ? IMGPF_DISABLE_ASLR : IMGPF_NONE); 3551,3557c3483,3484 < imgp->ip_new_thread = fork_create_child(old_task, < NULL, < p, < FALSE, < p->p_flag & P_LP64, < task_get_64bit_data(old_task), < TRUE); --- > imgp->ip_new_thread = fork_create_child(current_task(), > NULL, p, FALSE, p->p_flag & P_LP64, TRUE); 3580c3507 < p = proc_exec_switch_task(p, old_task, new_task, imgp->ip_new_thread); --- > p = proc_exec_switch_task(p, current_task(), new_task, imgp->ip_new_thread); 3583,3591d3509 < < /* < * Need to transfer pending watch port boosts to the new task while still making < * sure that the old task remains in the importance linkage. Create an importance < * linkage from old task to new task, then switch the task importance base < * of old task and new task. After the switch the port watch boost will be < * boosting the new task and new task will be donating importance to old task. < */ < inherit = ipc_importance_exec_switch_task(old_task, new_task); 3640c3558 < task_bank_init(new_task); --- > task_bank_init(get_threadtask(imgp->ip_new_thread)); 3648c3566 < proc_inherit_task_role(new_task, old_task); --- > proc_inherit_task_role(get_threadtask(imgp->ip_new_thread), current_task()); 3665,3672d3582 < if (vm_darkwake_mode == TRUE) { < /* < * This process is being launched when the system < * is in darkwake. So mark it specially. This will < * cause all its pages to be entered in the background Q. < */ < task_set_darkwake_mode(new_task, vm_darkwake_mode); < } 3681,3690d3590 < #if CONFIG_AUDIT < if (!error && AUDIT_ENABLED() && p) { < /* Add the CDHash of the new process to the audit record */ < uint8_t *cdhash = cs_get_cdhash(p); < if (cdhash) { < AUDIT_ARG(data, cdhash, sizeof(uint8_t), CS_CDHASH_LEN); < } < } < #endif < 3703,3704c3603,3604 < if (task_did_exec(old_task)) { < set_bsdtask_info(old_task, NULL); --- > if (task_did_exec(current_task())) { > set_bsdtask_info(current_task(), NULL); 3712a3613,3623 > /* > * Need to transfer pending watch port boosts to the new task while still making > * sure that the old task remains in the importance linkage. Create an importance > * linkage from old task to new task, then switch the task importance base > * of old task and new task. After the switch the port watch boost will be > * boosting the new task and new task will be donating importance to old task. > */ > if (error == 0 && task_did_exec(current_task())) { > inherit = ipc_importance_exec_switch_task(current_task(), get_threadtask(imgp->ip_new_thread)); > } > 3719c3630 < if (task_did_exec(old_task)) { --- > if (task_did_exec(current_task())) { 3721c3632 < task_terminate_internal(old_task); --- > task_terminate_internal(current_task()); 3898c3809 < int ptr_size = (imgp->ip_flags & IMGPF_IS_64BIT_ADDR) ? 8 : 4; --- > int ptr_size = (imgp->ip_flags & IMGPF_IS_64BIT) ? 8 : 4; 3972c3883,3884 < ptr_area_size = (imgp->ip_argc + imgp->ip_envc + imgp->ip_applec + 3) * ptr_size; --- > ptr_area_size = (imgp->ip_argc + imgp->ip_envc + imgp->ip_applec + 3) * > ptr_size; 4098,4099c4010,4011 < int ptr_size = (imgp->ip_flags & IMGPF_WAS_64BIT_ADDR) ? 8 : 4; < int new_ptr_size = (imgp->ip_flags & IMGPF_IS_64BIT_ADDR) ? 8 : 4; --- > int ptr_size = (imgp->ip_flags & IMGPF_WAS_64BIT) ? 8 : 4; > int new_ptr_size = (imgp->ip_flags & IMGPF_IS_64BIT) ? 8 : 4; 4305,4310d4216 < * libplatform needs a random pointer-obfuscation value when it is initialized. < */ < #define PTR_MUNGE_VALUES 1 < #define PTR_MUNGE_KEY "ptr_munge=" < < /* 4368c4274 < int img_ptr_size = (imgp->ip_flags & IMGPF_IS_64BIT_ADDR) ? 8 : 4; --- > int img_ptr_size = (imgp->ip_flags & IMGPF_IS_64BIT) ? 8 : 4; 4425,4434d4330 < /* < * Supply libpthread & libplatform with a random value to use for pointer < * obfuscation. < */ < error = exec_add_entropy_key(imgp, PTR_MUNGE_KEY, PTR_MUNGE_VALUES, FALSE); < if (error) { < goto bad; < } < imgp->ip_applec++; < 5150c5046 < user64_addr_t argv64bit[3] = {}; --- > user64_addr_t argv64bit[3]; 5160c5056 < user32_addr_t argv32bit[3] = {}; --- > user32_addr_t argv32bit[3]; 5302c5198 < static int --- > static int 5309d5204 < case LOAD_BADARCH_X86: 5577a5473,5533 > * If the process is not signed or if it contains entitlements, we > * need to communicate through the task_access_port to taskgated. > * > * taskgated will provide a detached code signature if present, and > * will enforce any restrictions on entitlements. > */ > > static boolean_t > taskgated_required(proc_t p, boolean_t *require_success) > { > size_t length; > void *blob; > int error; > > if (cs_debug > 2) > csvnode_print_debug(p->p_textvp); > > #if !CONFIG_EMBEDDED > const int can_skip_taskgated = csproc_get_platform_binary(p) && !csproc_get_platform_path(p); > #else > const int can_skip_taskgated = csproc_get_platform_binary(p); > #endif > if (can_skip_taskgated) { > if (cs_debug) printf("taskgated not required for: %s\n", p->p_name); > *require_success = FALSE; > return FALSE; > } > > if ((p->p_csflags & CS_VALID) == 0) { > *require_success = FALSE; > return TRUE; > } > > error = cs_entitlements_blob_get(p, &blob, &length); > if (error == 0 && blob != NULL) { > #if !CONFIG_EMBEDDED > /* > * fatal on the desktop when entitlements are present, > * unless we started in single-user mode > */ > if ((boothowto & RB_SINGLE) == 0) > *require_success = TRUE; > /* > * Allow initproc to run without causing taskgated to launch > */ > if (p == initproc) { > *require_success = FALSE; > return FALSE; > } > > #endif > if (cs_debug) printf("taskgated required for: %s\n", p->p_name); > > return TRUE; > } > > *require_success = FALSE; > return FALSE; > } > > /* 5597c5553 < struct cs_blob *csb; --- > unsigned char hash[CS_CDHASH_LEN]; 5639,5641c5595,5596 < /* If the code signature came through the image activation path, we skip the < * taskgated / externally attached path. */ < if (imgp->ip_csflags & CS_SIGNED) { --- > /* check if callout to taskgated is needed */ > if (!taskgated_required(p, &require_success)) { 5646,5649d5600 < /* The rest of the code is for signatures that either already have been externally < * attached (likely, but not necessarily by a previous run through the taskgated < * path), or that will now be attached by taskgated. */ < 5694,5729c5645,5652 < csb = ubc_cs_blob_get(p->p_textvp, -1, p->p_textoff); < < if (csb != NULL) { < /* As the enforcement we can do here is very limited, we only allow things that < * are the only reason why this code path still exists: < * Adhoc signed non-platform binaries without special cs_flags and without any < * entitlements (unrestricted ones still pass AMFI). */ < if ( < /* Revalidate the blob if necessary through bumped generation count. */ < (ubc_cs_generation_check(p->p_textvp) == 0 || < ubc_cs_blob_revalidate(p->p_textvp, csb, imgp, 0) == 0) && < /* Only CS_ADHOC, no CS_KILL, CS_HARD etc. */ < (csb->csb_flags & CS_ALLOWED_MACHO) == CS_ADHOC && < /* If it has a CMS blob, it's not adhoc. The CS_ADHOC flag can lie. */ < csblob_find_blob_bytes((const uint8_t *)csb->csb_mem_kaddr, csb->csb_mem_size, < CSSLOT_SIGNATURESLOT, < CSMAGIC_BLOBWRAPPER) == NULL && < /* It could still be in a trust cache (unlikely with CS_ADHOC), or a magic path. */ < csb->csb_platform_binary == 0 && < /* No entitlements, not even unrestricted ones. */ < csb->csb_entitlements_blob == NULL) { < < proc_lock(p); < p->p_csflags |= CS_SIGNED | CS_VALID; < proc_unlock(p); < < } else { < uint8_t cdhash[CS_CDHASH_LEN]; < char cdhash_string[CS_CDHASH_STRING_SIZE]; < proc_getcdhash(p, cdhash); < cdhash_to_string(cdhash_string, cdhash); < printf("ignoring detached code signature on '%s' with cdhash '%s' " < "because it is invalid, or not a simple adhoc signature.\n", < p->p_name, cdhash_string); < } < --- > /* > * If there's a new code directory, mark this process > * as signed. > */ > if (0 == ubc_cs_getcdhash(p->p_textvp, p->p_textoff, hash)) { > proc_lock(p); > p->p_csflags |= CS_VALID; > proc_unlock(p); 5794c5717 < if (imgp->ip_flags & IMGPF_IS_64BIT_ADDR) { --- > if (imgp->ip_flags & IMGPF_IS_64BIT) { 5840c5763 < if (imgp->ip_flags & IMGPF_IS_64BIT_ADDR) { --- > if (imgp->ip_flags & IMGPF_IS_64BIT) {
./bsd/kern/subr_eventhandler.c differences detected: 75c75 < struct eventhandler_entry_arg eventhandler_entry_dummy_arg = { { 0 }, { 0 } }; --- > struct eventhandler_entry_arg eventhandler_entry_dummy_arg = {{0}}; 83,88d82 < static unsigned int eg_size; /* size of eventhandler_entry_generic */ < static struct mcache *eg_cache; /* mcache for eventhandler_entry_generic */ < < static unsigned int el_size; /* size of eventhandler_list */ < static struct mcache *el_cache; /* mcache for eventhandler_list */ < 130,144d123 < < eg_size = sizeof (struct eventhandler_entry_generic); < eg_cache = mcache_create("eventhdlr_generic", eg_size, < sizeof (uint64_t), 0, MCR_SLEEP); < < el_size = sizeof (struct eventhandler_list); < el_cache = mcache_create("eventhdlr_list", el_size, < sizeof (uint64_t), 0, MCR_SLEEP); < } < < void < eventhandler_reap_caches(boolean_t purge) < { < mcache_reap_now(eg_cache, purge); < mcache_reap_now(el_cache, purge); 160,161d138 < VERIFY(strlen(name) <= (sizeof (new_list->el_name) - 1)); < 169c146 < lck_mtx_lock_spin(&evthdlr_lists_ctxt->eventhandler_mutex); --- > lck_mtx_lock(&evthdlr_lists_ctxt->eventhandler_mutex); 178,187c155,175 < lck_mtx_convert_spin(&evthdlr_lists_ctxt->eventhandler_mutex); < new_list = mcache_alloc(el_cache, MCR_SLEEP); < bzero(new_list, el_size); < evhlog((LOG_DEBUG, "%s: creating list \"%s\"", __func__, name)); < list = new_list; < list->el_flags = 0; < list->el_runcount = 0; < bzero(&list->el_lock, sizeof(list->el_lock)); < (void) snprintf(list->el_name, sizeof (list->el_name), "%s", name); < TAILQ_INSERT_HEAD(&evthdlr_lists_ctxt->eventhandler_lists, list, el_link); --- > lck_mtx_unlock(&evthdlr_lists_ctxt->eventhandler_mutex); > > MALLOC(new_list, struct eventhandler_list *, > sizeof(struct eventhandler_list) + strlen(name) + 1, > M_EVENTHANDLER, M_WAITOK); > > /* If someone else created it already, then use that one. */ > lck_mtx_lock(&evthdlr_lists_ctxt->eventhandler_mutex); > list = _eventhandler_find_list(evthdlr_lists_ctxt, name); > if (list != NULL) { > FREE(new_list, M_EVENTHANDLER); > } else { > evhlog((LOG_DEBUG, "%s: creating list \"%s\"", __func__, name)); > list = new_list; > list->el_flags = 0; > list->el_runcount = 0; > bzero(&list->el_lock, sizeof(list->el_lock)); > list->el_name = (char *)list + sizeof(struct eventhandler_list); > strlcpy(list->el_name, name, strlen(name) + 1); > TAILQ_INSERT_HEAD(&evthdlr_lists_ctxt->eventhandler_lists, list, el_link); > } 225,226c213,216 < eg = mcache_alloc(eg_cache, MCR_SLEEP); < bzero(eg, eg_size); --- > MALLOC(eg, struct eventhandler_entry_generic *, > sizeof(struct eventhandler_entry_generic), > M_EVENTHANDLER, M_WAITOK | M_ZERO); > 252,253c242 < EHL_LOCK_CONVERT(list); < mcache_free(eg_cache, ep); --- > FREE(ep, M_EVENTHANDLER); 264d252 < EHL_LOCK_CONVERT(list); 268c256 < mcache_free(eg_cache, ep); --- > FREE(ep, M_EVENTHANDLER); 278c266 < msleep((caddr_t)list, &list->el_lock, PSPIN, "evhrm", 0); --- > msleep((caddr_t)list, &list->el_lock, 0, "evhrm", 0); 317c305 < lck_mtx_lock_spin(&evthdlr_lists_ctxt->eventhandler_mutex); --- > lck_mtx_lock(&evthdlr_lists_ctxt->eventhandler_mutex); 319,322c307,308 < if (list != NULL) { < lck_mtx_convert_spin(&evthdlr_lists_ctxt->eventhandler_mutex); < EHL_LOCK_SPIN(list); < } --- > if (list != NULL) > EHL_LOCK(list); 342c328 < mcache_free(eg_cache, ep); --- > FREE(ep, M_EVENTHANDLER); 367c353 < mcache_free(el_cache, list); --- > FREE(list, M_EVENTHANDLER);
./bsd/kern/kern_lockf.c differences detected: 332a333 > #if CONFIG_EMBEDDED 336a338 > #endif NO DIFFS in ./bsd/kern/makekdebugevents.py
NO DIFFS in ./bsd/kern/kern_control.c
NO DIFFS in ./bsd/kern/subr_sbuf.c

./bsd/kern/uipc_domain.c differences detected: 2c2 < * Copyright (c) 1998-2018 Apple Inc. All rights reserved. --- > * Copyright (c) 1998-2013 Apple Inc. All rights reserved.
./bsd/kern/uipc_usrreq.c differences detected: 5c5 < * --- > * 14c14 < * --- > * 17c17 < * --- > * 25c25 < * --- > * 183,184c183,184 < static void < unp_get_locks_in_order(struct socket *so, struct socket *conn_so) --- > static void > unp_get_locks_in_order(struct socket *so, struct socket *conn_so) 372c372 < --- > 488c488 < if (so != so2) --- > if (so != so2) 527c527 < /* Check socket state again as we might have unlocked the socket --- > /* Check socket state again as we might have unlocked the socket 535c535 < } --- > } 561c561 < if ((int32_t)snd->sb_hiwat >= --- > if ((int32_t)snd->sb_hiwat >= 847c847 < lck_mtx_init(&unp->unp_mtx, --- > lck_mtx_init(&unp->unp_mtx, 889c889 < --unp_count; --- > --unp_count; 918c918 < * this reference and closing the connected socket, we need --- > * this reference and closing the connected socket, we need 938c938 < --- > 943c943 < --- > 1008,1009c1008 < if (namelen >= SOCK_MAXADDRLEN) < return (EINVAL); --- > ASSERT(namelen < SOCK_MAXADDRLEN); 1012c1011 < --- > 1123,1124c1122 < if (len >= SOCK_MAXADDRLEN) < return (EINVAL); --- > ASSERT(len < SOCK_MAXADDRLEN); 1303c1301 < --- > 1355,1356c1353,1354 < so2->so_usecount++; < --- > so2->so_usecount++; > 1362c1360 < if (so != so2) { --- > if (so != so2) { 1365,1366c1363,1364 < * soon after getting the locks in order < */ --- > * soon after getting the locks in order > */ 1466c1464 < if (so_locked == 1) { --- > if (so_locked == 1) { 1481c1479 < --- > 1488c1486 < (void)msleep(waitso->so_pcb, &unp->unp_mtx, --- > (void)msleep(waitso->so_pcb, &unp->unp_mtx, 1492c1490 < --- > 1744c1742 < (sizeof (struct xunpcb64)); --- > (sizeof (struct xunpcb64)); 1934c1932 < * XXX (1) this assumes a pointer and int are the same size, --- > * XXX (1) this assumes a pointer and int are the same size, 1979c1977 < VERIFY(fds[i] >= 0); --- > VERIFY(fds[i] > 0); 2084c2082 < /* On K64 we need to walk backwards because a fileglob * is twice the size of an fd --- > /* On K64 we need to walk backwards because a fileglob * is twice the size of an fd 2232c2230 < * In case a file is passed onto itself we need to --- > * In case a file is passed onto itself we need to 2321c2319 < --- > 2440c2438 < panic("unp_lock: so=%p NO PCB! lr=%p ref=0x%x\n", --- > panic("unp_lock: so=%p NO PCB! lr=%p ref=0x%x\n", 2487c2485 < --- > 2515a2514 > NO DIFFS in ./bsd/kern/subr_prof.c
NO DIFFS in ./bsd/kern/kern_priv.c
NO DIFFS in ./bsd/kern/bsd_stubs.c

./bsd/kern/decmpfs.c differences detected: 2c2 < * Copyright (c) 2008-2018 Apple Inc. All rights reserved. --- > * Copyright (c) 2008-2015 Apple Inc. All rights reserved. 489,490c489,490 < size_t read_size = 0; < size_t attr_size = 0; --- > size_t read_size = 0; > size_t attr_size = 0; 494,497d493 < const bool no_additional_data= ((cp != NULL) < && (cp->cmp_type != 0) < && (cp->cmp_minimal_xattr != 0)); < char uio_buf[ UIO_SIZEOF(1) ]; 499,508c495 < < /* < * Trace the following parameters on entry with event-id 0x03120004 < * < * @vp->v_id: vnode-id for which to fetch compressed header. < * @no_additional_data: If set true then xattr didn't have any extra data. < * @returnInvalid: return the header even though the type is out of range. < */ < DECMPFS_EMIT_TRACE_ENTRY(DECMPDBG_FETCH_COMPRESSED_HEADER, vp->v_id, < no_additional_data, returnInvalid); --- > char uio_buf[ UIO_SIZEOF(1) ]; 510c497,499 < if (no_additional_data) { --- > if ((cp != NULL) && > (cp->cmp_type != 0) && > (cp->cmp_minimal_xattr != 0)) { 585,591d573 < /* < * Trace the following parameters on return with event-id 0x03120004. < * < * @vp->v_id: vnode-id for which to fetch compressed header. < * @err: value returned from this function. < */ < DECMPFS_EMIT_TRACE_RETURN(DECMPDBG_FETCH_COMPRESSED_HEADER, vp->v_id, err); 700,702c682,684 < int error = 0; < uint32_t cmp_state; < struct vnode_attr va_fetch; --- > int error = 0; > uint32_t cmp_state; > struct vnode_attr va_fetch; 705c687 < int cnode_locked = 0; --- > int cnode_locked = 0; 708d689 < bool is_mounted, is_local_fs; 743,761c724,726 < < is_mounted = false; < is_local_fs = false; < mp = vnode_mount(vp); < if (mp) < is_mounted = true; < if (is_mounted) < is_local_fs = ((mp->mnt_flag & MNT_LOCAL)); < /* < * Trace the following parameters on entry with event-id 0x03120014. < * < * @vp->v_id: vnode-id of the file being queried. < * @is_mounted: set to true if @vp belongs to a mounted fs. < * @is_local_fs: set to true if @vp belongs to local fs. < */ < DECMPFS_EMIT_TRACE_ENTRY(DECMPDBG_FILE_IS_COMPRESSED, vp->v_id, < is_mounted, is_local_fs); < < if (!is_mounted) { --- > > mp = vnode_mount(vp); > if (mp == NULL) { 770,771c735 < < if (!is_local_fs) { --- > if ((mp->mnt_flag & MNT_LOCAL) == 0) { 850,868c814,824 < /* < * Trace the following parameters on return with event-id 0x03120014. < * < * @vp->v_id: vnode-id of the file being queried. < * @return: set to 1 is file is compressed. < */ < switch(ret) { < case FILE_IS_NOT_COMPRESSED: < DECMPFS_EMIT_TRACE_RETURN(DECMPDBG_FILE_IS_COMPRESSED, vp->v_id, 0); < return 0; < case FILE_IS_COMPRESSED: < case FILE_IS_CONVERTING: < DECMPFS_EMIT_TRACE_RETURN(DECMPDBG_FILE_IS_COMPRESSED, vp->v_id, 1); < return 1; < default: < /* unknown state, assume file is not compressed */ < DECMPFS_EMIT_TRACE_RETURN(DECMPDBG_FILE_IS_COMPRESSED, vp->v_id, 0); < ErrorLogWithPath("unknown ret %d\n", ret); < return 0; --- > > switch(ret) { > case FILE_IS_NOT_COMPRESSED: > return 0; > case FILE_IS_COMPRESSED: > case FILE_IS_CONVERTING: > return 1; > default: > /* unknown state, assume file is not compressed */ > ErrorLogWithPath("unknown ret %d\n", ret); > return 0; 1105,1118c1061 < < /* < * Trace the following parameters on entry with event-id 0x03120008. < * < * @vp->v_id: vnode-id of the file being decompressed. < * @hdr->compression_type: compression type. < * @offset: offset from where to fetch uncompressed data. < * @size: amount of uncompressed data to fetch. < * < * Please NOTE: @offset and @size can overflow in theory but < * here it is safe. < */ < DECMPFS_EMIT_TRACE_ENTRY(DECMPDBG_FETCH_UNCOMPRESSED_DATA, vp->v_id, < hdr->compression_type, (int)offset, (int)size); --- > 1139,1149c1082 < /* < * Trace the following parameters on return with event-id 0x03120008. < * < * @vp->v_id: vnode-id of the file being decompressed. < * @bytes_read: amount of uncompressed bytes fetched in bytes. < * @err: value returned from this function. < * < * Please NOTE: @bytes_read can overflow in theory but here it is safe. < */ < DECMPFS_EMIT_TRACE_RETURN(DECMPDBG_FETCH_UNCOMPRESSED_DATA, vp->v_id, < (int)*bytes_read, err); --- > 1581a1515 > 1583,1590d1516 < < /* < * Trace the following parameters on entry with event-id 0x03120010. < * < * @vp->v_id: vnode-id of the file for which to free compressed data. < */ < DECMPFS_EMIT_TRACE_ENTRY(DECMPDBG_FREE_COMPRESSED_DATA, vp->v_id); < 1609,1615d1534 < /* < * Trace the following parameters on return with event-id 0x03120010. < * < * @vp->v_id: vnode-id of the file for which to free compressed data. < * @err: value returned from this function. < */ < DECMPFS_EMIT_TRACE_RETURN(DECMPDBG_FREE_COMPRESSED_DATA, vp->v_id, err); 1669c1588 < decmpfs_header *hdr = NULL; --- > decmpfs_header *hdr = NULL; 1673,1685d1591 < < /* < * Trace the following parameters on entry with event-id 0x03120000. < * < * @vp->v_id: vnode-id of the file being decompressed. < * @toSize: uncompress given bytes of the file. < * @truncate_okay: on error it is OK to truncate. < * @skiplock: compressed data is locked, skip locking again. < * < * Please NOTE: @toSize can overflow in theory but here it is safe. < */ < DECMPFS_EMIT_TRACE_ENTRY(DECMPDBG_DECOMPRESS_FILE, vp->v_id, < (int)toSize, truncate_okay, skiplock); 1883,1889c1789 < /* < * Trace the following parameters on return with event-id 0x03120000. < * < * @vp->v_id: vnode-id of the file being decompressed. < * @err: value returned from this function. < */ < DECMPFS_EMIT_TRACE_RETURN(DECMPDBG_DECOMPRESS_FILE, vp->v_id, err); --- >
./bsd/kern/kern_cs.c differences detected: 82,87d81 < // If set, AMFI will error out early on unsigned code, before evaluation the normal policy. < int cs_debug_fail_on_unsigned_code = 0; < // If the previous mode is enabled, we count the resulting failures here. < unsigned int cs_debug_unsigned_exec_failures = 0; < unsigned int cs_debug_unsigned_mmap_failures = 0; < 89,103c83 < /* < Here we split cs_enforcement_enable into cs_system_enforcement_enable and cs_process_enforcement_enable < < cs_system_enforcement_enable governs whether or not system level code signing enforcement mechanisms < are applied on the system. Today, the only such mechanism is code signing enforcement of the dyld shared < cache. < < cs_process_enforcement_enable governs whether code signing enforcement mechanisms are applied to all < processes or only those that opt into such enforcement. < < (On iOS and related, both of these are set by default. On macOS, only cs_system_enforcement_enable < is set by default. Processes can then be opted into code signing enforcement on a case by case basis.) < */ < const int cs_system_enforcement_enable = 1; < const int cs_process_enforcement_enable = 1; --- > const int cs_enforcement_enable = 1; 110,111c90 < #define DEFAULT_CS_SYSTEM_ENFORCEMENT_ENABLE 1 < #define DEFAULT_CS_PROCESS_ENFORCEMENT_ENABLE 1 --- > #define DEFAULT_CS_ENFORCEMENT_ENABLE 1 113,114c92 < #define DEFAULT_CS_SYSTEM_ENFORCEMENT_ENABLE 1 < #define DEFAULT_CS_PROCESS_ENFORCEMENT_ENABLE 0 --- > #define DEFAULT_CS_ENFORCEMENT_ENABLE 0 116,117c94 < SECURITY_READ_ONLY_LATE(int) cs_system_enforcement_enable = DEFAULT_CS_SYSTEM_ENFORCEMENT_ENABLE; < SECURITY_READ_ONLY_LATE(int) cs_process_enforcement_enable = DEFAULT_CS_PROCESS_ENFORCEMENT_ENABLE; --- > SECURITY_READ_ONLY_LATE(int) cs_enforcement_enable = DEFAULT_CS_ENFORCEMENT_ENABLE; 134,139d110 < SYSCTL_INT(_vm, OID_AUTO, cs_debug_fail_on_unsigned_code, CTLFLAG_RW | CTLFLAG_LOCKED, < &cs_debug_fail_on_unsigned_code, 0, ""); < SYSCTL_UINT(_vm, OID_AUTO, cs_debug_unsigned_exec_failures, CTLFLAG_RD | CTLFLAG_LOCKED, < &cs_debug_unsigned_exec_failures, 0, ""); < SYSCTL_UINT(_vm, OID_AUTO, cs_debug_unsigned_mmap_failures, CTLFLAG_RD | CTLFLAG_LOCKED, < &cs_debug_unsigned_mmap_failures, 0, ""); 144,145c115 < SYSCTL_INT(_vm, OID_AUTO, cs_system_enforcement, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_system_enforcement_enable, 0, ""); < SYSCTL_INT(_vm, OID_AUTO, cs_process_enforcement, CTLFLAG_RW | CTLFLAG_LOCKED, &cs_process_enforcement_enable, 0, ""); --- > SYSCTL_INT(_vm, OID_AUTO, cs_enforcement, CTLFLAG_RW | CTLFLAG_LOCKED, &cs_enforcement_enable, 0, ""); 149c119 < SYSCTL_INT(_vm, OID_AUTO, cs_library_validation, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_library_val_enable, 0, ""); --- > SYSCTL_INT(_vm, OID_AUTO, cs_library_validation, CTLFLAG_RW | CTLFLAG_LOCKED, &cs_library_val_enable, 0, ""); 158,159c128 < #if MACH_ASSERT < #if PLATFORM_WatchOS || __x86_64__ --- > #if MACH_ASSERT && __x86_64__ 161,162c130 < #endif /* watchos || x86_64 */ < #endif /* MACH_ASSERT */ --- > #endif /* MACH_ASSERT && __x86_64__ */ 169,171c137,138 < if (disable_cs_enforcement && PE_i_can_has_debugger(NULL) != 0) { < cs_system_enforcement_enable = 0; < cs_process_enforcement_enable = 0; --- > if (disable_cs_enforcement) { > cs_enforcement_enable = 0; 201c168 < #if CONFIG_MACF --- > #if CONFIG_MACF && CONFIG_ENFORCE_SIGNED_CODE 298c265 < cs_process_enforcement(struct proc *p) --- > cs_enforcement(struct proc *p) 301c268 < if (cs_process_enforcement_enable) --- > if (cs_enforcement_enable) 313,324d279 < int < cs_process_global_enforcement(void) < { < return cs_process_enforcement_enable ? 1 : 0; < } < < int < cs_system_enforcement(void) < { < return cs_system_enforcement_enable ? 1 : 0; < } < 360,371d314 < int < csproc_forced_lv(struct proc* p) < { < if (p == NULL) { < p = current_proc(); < } < if (p != NULL && (p->p_csflags & CS_FORCED_LV)) { < return 1; < } < return 0; < } < 385c328 < * Description: This function returns the base offset into the (possibly universal) binary --- > * Description: This function returns the base offset into the Mach-O binary 473,476d415 < if ((p->p_csflags & CS_SIGNED) == 0) { < return NULL; < } < 481c420 < * Function: csvnode_get_blob --- > * Function: csproc_get_blob 643,645c582 < struct cs_blob *csblob; < < csblob = csproc_get_blob(p); --- > struct cs_blob *csblob = csproc_get_blob(p); 670,719d606 < void < csproc_disable_enforcement(struct proc* __unused p) < { < #if !CONFIG_ENFORCE_SIGNED_CODE < if (p != NULL) { < proc_lock(p); < p->p_csflags &= (~CS_ENFORCEMENT); < proc_unlock(p); < } < #endif < } < < /* Function: csproc_mark_invalid_allowed < * < * Description: Mark the process as being allowed to go invalid. Called as part of < * task_for_pid and ptrace policy. Note CS_INVALID_ALLOWED only matters for < * processes that have been opted into CS_ENFORCEMENT. < */ < void < csproc_mark_invalid_allowed(struct proc* __unused p) < { < #if !CONFIG_ENFORCE_SIGNED_CODE < if (p != NULL) { < proc_lock(p); < p->p_csflags |= CS_INVALID_ALLOWED; < proc_unlock(p); < } < #endif < } < < /* < * Function: csproc_check_invalid_allowed < * < * Description: Returns 1 if the process has been marked as allowed to go invalid < * because it gave its task port to an allowed process. < */ < int < csproc_check_invalid_allowed(struct proc* __unused p) < { < #if !CONFIG_ENFORCE_SIGNED_CODE < if (p == NULL) { < p = current_proc(); < } < < if (p != NULL && (p->p_csflags & CS_INVALID_ALLOWED)) < return 1; < #endif < return 0; < } < 918,1004d804 < /* < * Function: csfg_get_identity < * < * Description: This function returns the codesign identity < * for the fileglob < */ < const char * < csfg_get_identity(struct fileglob *fg, off_t offset) < { < vnode_t vp; < struct cs_blob *csblob = NULL; < < if (FILEGLOB_DTYPE(fg) != DTYPE_VNODE) < return NULL; < < vp = (struct vnode *)fg->fg_data; < if (vp == NULL) < return NULL; < < csblob = ubc_cs_blob_get(vp, -1, offset); < if (csblob == NULL) < return NULL; < < return csblob_get_identity(csblob); < } < < /* < * Function: csfg_get_platform_identifier < * < * Description: This function returns the codesign platform < * identifier for the fileglob. Assumes the fileproc < * is being held busy to keep the fileglob consistent. < */ < uint8_t < csfg_get_platform_identifier(struct fileglob *fg, off_t offset) < { < vnode_t vp; < < if (FILEGLOB_DTYPE(fg) != DTYPE_VNODE) < return 0; < < vp = (struct vnode *)fg->fg_data; < if (vp == NULL) < return 0; < < return csvnode_get_platform_identifier(vp, offset); < } < < /* < * Function: csvnode_get_platform_identifier < * < * Description: This function returns the codesign platform < * identifier for the vnode. Assumes a vnode reference < * is held. < */ < uint8_t < csvnode_get_platform_identifier(struct vnode *vp, off_t offset) < { < struct cs_blob *csblob; < const CS_CodeDirectory *code_dir; < < csblob = ubc_cs_blob_get(vp, -1, offset); < if (csblob == NULL) < return 0; < < code_dir = csblob->csb_cd; < if (code_dir == NULL || ntohl(code_dir->length) < 8) < return 0; < < return code_dir->platform; < } < < /* < * Function: csproc_get_platform_identifier < * < * Description: This function returns the codesign platform < * identifier for the proc. Assumes proc will remain < * valid through call. < */ < uint8_t < csproc_get_platform_identifier(struct proc *p) < { < if (NULL == p->p_textvp) < return 0; < < return csvnode_get_platform_identifier(p->p_textvp, p->p_textoff); < } 1018,1023d817 < int < csproc_hardened_runtime(struct proc* p) < { < return (p->p_csflags & CS_RUNTIME) ? 1 : 0; < } < 1067,1070d860 < if ((p->p_csflags & CS_SIGNED) == 0) { < return 0; < } < 1091,1094d880 < if ((p->p_csflags & CS_SIGNED) == 0) { < return NULL; < } < 1104,1106c890,894 < /* < * DO NOT USE THIS FUNCTION! < * Use the properly guarded csproc_get_blob instead. --- > > /* Retrieve the codesign blob for a process. > * Returns: > * EINVAL no text vnode associated with the process > * 0 no error occurred 1108,1110c896,898 < * This is currently here to allow detached signatures to work < * properly. The only user of this function is also checking < * for CS_VALID. --- > * On success, out_start and out_length will point to the > * cms blob if found; or will be set to NULL/zero > * if there were no blob. 1142,1145d929 < if ((p->p_csflags & CS_SIGNED) == 0) { < return NULL; < } <
./bsd/kern/mach_loader.c differences detected: 68d67 < #include 86c85 < #include --- > #include 91,94d89 < #if __x86_64__ < extern int bootarg_no32exec; /* bsd_init.c */ < #endif < 124,125c119 < .is_64bit_addr = 0, < .is_64bit_data = 0, --- > .is64bit = 0, 318,319c312,313 < const int fourk_binary_compatibility_unsafe = TRUE; < const int fourk_binary_compatibility_allow_wx = FALSE; --- > int fourk_binary_compatibility_unsafe = TRUE; > int fourk_binary_compatibility_allow_wx = FALSE; 353,354c347 < result->is_64bit_addr = ((imgp->ip_flags & IMGPF_IS_64BIT_ADDR) == IMGPF_IS_64BIT_ADDR); < result->is_64bit_data = ((imgp->ip_flags & IMGPF_IS_64BIT_DATA) == IMGPF_IS_64BIT_DATA); --- > result->is64bit = ((imgp->ip_flags & IMGPF_IS_64BIT) == IMGPF_IS_64BIT); 364c357 < result->is_64bit_addr); --- > result->is64bit); 367c360 < vm_compute_max_offset(result->is_64bit_addr), --- > vm_compute_max_offset(result->is64bit), 371c364 < if (result->is_64bit_addr) { --- > if (result->is64bit) { 388c381 < if ( !cs_process_global_enforcement() && (header->flags & MH_ALLOW_STACK_EXECUTION) ) { --- > if ( !cs_enforcement(NULL) && (header->flags & MH_ALLOW_STACK_EXECUTION) ) 390,391d382 < // TODO: Message Trace or log that this is happening < } 425,426c416 < result->is_64bit_addr = ((imgp->ip_flags & IMGPF_IS_64BIT_ADDR) == IMGPF_IS_64BIT_ADDR); < result->is_64bit_data = ((imgp->ip_flags & IMGPF_IS_64BIT_DATA) == IMGPF_IS_64BIT_DATA); --- > result->is64bit = ((imgp->ip_flags & IMGPF_IS_64BIT) == IMGPF_IS_64BIT); 441c431 < if (!result->is_64bit_addr) { --- > if (!result->is64bit) { 451c441 < if (result->is_64bit_addr && --- > if (result->is64bit && 470c460 < if (!result->is_64bit_addr && /* not 64-bit address space */ --- > if (!result->is64bit && /* not 64-bit */ 521c511 < workq_mark_exiting(p); --- > workqueue_mark_exiting(p); 523c513 < workq_exit(p); --- > workqueue_exit(p); 533,545d522 < < #ifdef CONFIG_32BIT_TELEMETRY < if (!result->is_64bit_data) { < /* < * This may not need to be an AST; we merely need to ensure that < * we gather telemetry at the point where all of the information < * that we want has been added to the process. < */ < task_set_32bit_log_flag(get_threadtask(thread)); < act_set_astbsd(thread); < } < #endif /* CONFIG_32BIT_TELEMETRY */ < 635c612 < !grade_binary(header->cputype, --- > !grade_binary(header->cputype, 638,644c615 < < #if __x86_64__ < if (bootarg_no32exec && (header->cputype == CPU_TYPE_X86)) { < return(LOAD_BADARCH_X86); < } < #endif < --- > 646c617 < --- > 648c619 < --- > 870a842 > 932a905 > 1058c1031 < if (!cs_process_global_enforcement()) --- > if (!cs_enforcement(NULL)) 1087c1060 < if (cs_process_global_enforcement() || --- > if (cs_enforcement(NULL) || 1139,1154d1111 < #if __arm64__ < case LC_VERSION_MIN_IPHONEOS: { < struct version_min_command *vmc; < < if (pass != 1) { < break; < } < vmc = (struct version_min_command *) lcp; < if (vmc->sdk < (12 << 16)) { < /* app built with a pre-iOS12 SDK: apply legacy footprint mitigation */ < result->legacy_footprint = TRUE; < } < // printf("FBDP %s:%d vp %p (%s) sdk %d.%d.%d -> legacy_footprint=%d\n", __FUNCTION__, __LINE__, vp, vp->v_name, (vmc->sdk >> 16), ((vmc->sdk & 0xFF00) >> 8), (vmc->sdk & 0xFF), result->legacy_footprint); < break; < } < #endif /* __arm64__ */ 1167,1169c1124,1151 < if (ret == LOAD_SUCCESS) { < if(!got_code_signatures && cs_process_global_enforcement()) { < ret = LOAD_FAILURE; --- > if (ret == LOAD_SUCCESS) { > if (! got_code_signatures) { > if (cs_enforcement(NULL)) { > ret = LOAD_FAILURE; > } else { > #if !CONFIG_EMBEDDED > /* > * No embedded signatures: look for detached by taskgated, > * this is only done on OSX, on embedded platforms we expect everything > * to be have embedded signatures. > */ > struct cs_blob *blob; > > blob = ubc_cs_blob_get(vp, -1, file_offset); > if (blob != NULL) { > unsigned int cs_flag_data = blob->csb_flags; > if(0 != ubc_cs_generation_check(vp)) { > if (0 != ubc_cs_blob_revalidate(vp, blob, imgp, 0)) { > /* clear out the flag data if revalidation fails */ > cs_flag_data = 0; > result->csflags &= ~CS_VALID; > } > } > /* get flags to be applied to the process */ > result->csflags |= cs_flag_data; > } > #endif > } 1190c1172 < #if CONFIG_ENFORCE_SIGNED_CODE --- > #if CONFIG_EMBEDDED 1330,1331c1312 < vm_prot_t maxprot, < load_result_t *result) --- > vm_prot_t maxprot) 1433,1449d1413 < < #if CONFIG_EMBEDDED < (void) result; < #else /* CONFIG_EMBEDDED */ < /* < * This process doesn't have its new csflags (from < * the image being loaded) yet, so tell VM to override the < * current process's CS_ENFORCEMENT for this mapping. < */ < if (result->csflags & CS_ENFORCEMENT) { < cur_vmk_flags.vmkf_cs_enforcement = TRUE; < } else { < cur_vmk_flags.vmkf_cs_enforcement = FALSE; < } < cur_vmk_flags.vmkf_cs_enforcement_override = TRUE; < #endif /* CONFIG_EMBEDDED */ < 1825,1826c1789 < maxprot, < result); --- > maxprot); 1884,1885c1847 < scp->maxprot, < result); --- > scp->maxprot); 2002c1964 < ret = thread_userstackdefault(&addr, result->is_64bit_addr); --- > ret = thread_userstackdefault(&addr, result->is64bit); 2042a2005 > 2053,2056c2016,2019 < (uint32_t *)(((vm_offset_t)tcp) + < sizeof(struct thread_command)), < tcp->cmdsize - sizeof(struct thread_command), < &addr, &customstack, result); --- > (uint32_t *)(((vm_offset_t)tcp) + > sizeof(struct thread_command)), > tcp->cmdsize - sizeof(struct thread_command), > &addr, &customstack, result); 2061c2024 < --- > 2071,2074c2034,2037 < (uint32_t *)(((vm_offset_t)tcp) + < sizeof(struct thread_command)), < tcp->cmdsize - sizeof(struct thread_command), < &addr); --- > (uint32_t *)(((vm_offset_t)tcp) + > sizeof(struct thread_command)), > tcp->cmdsize - sizeof(struct thread_command), > &addr); 2087,2089c2050,2052 < (uint32_t *)(((vm_offset_t)tcp) + sizeof(struct thread_command)), < tcp->cmdsize - sizeof(struct thread_command), < result); --- > (uint32_t *)(((vm_offset_t)tcp) + sizeof(struct thread_command)), > tcp->cmdsize - sizeof(struct thread_command), > result); 2189c2152 < ret = thread_userstack(thread, flavor, (thread_state_t)ts, size, user_stack, customstack, result->is_64bit_data); --- > ret = thread_userstack(thread, flavor, (thread_state_t)ts, size, user_stack, customstack, result->is64bit); 2345,2346c2308 < myresult->is_64bit_addr = result->is_64bit_addr; < myresult->is_64bit_data = result->is_64bit_data; --- > myresult->is64bit = result->is64bit; 2415d2376 < 2418,2419c2379,2389 < if (blob->csb_cpu_type != cputype || < blob->csb_base_offset != macho_offset) { --- > if (blob->csb_cpu_type == cputype && > blob->csb_base_offset == macho_offset) { > /* it matches the blob we want here, lets verify the version */ > if(0 != ubc_cs_generation_check(vp)) { > if (0 != ubc_cs_blob_revalidate(vp, blob, imgp, 0)) { > ret = LOAD_FAILURE; /* set error same as from ubc_cs_blob_add */ > goto out; > } > } > ret = LOAD_SUCCESS; > } else { 2422,2437d2391 < goto out; < } < < /* It matches the blob we want here, let's verify the version */ < if (ubc_cs_generation_check(vp) == 0) { < /* No need to revalidate, we're good! */ < ret = LOAD_SUCCESS; < goto out; < } < < /* That blob may be stale, let's revalidate. */ < error = ubc_cs_blob_revalidate(vp, blob, imgp, 0); < if (error == 0) { < /* Revalidation succeeded, we're good! */ < ret = LOAD_SUCCESS; < goto out; 2439,2452c2393 < < if (error != EAGAIN) { < printf("load_code_signature: revalidation failed: %d\n", error); < ret = LOAD_FAILURE; < goto out; < } < < assert(error == EAGAIN); < < /* < * Revalidation was not possible for this blob. We just continue as if there was no blob, < * rereading the signature, and ubc_cs_blob_add will do the right thing. < */ < blob = NULL; --- > goto out;
./bsd/kern/sys_coalition.c differences detected: 219c219 < struct coalition_resource_usage cru = {}; --- > struct coalition_resource_usage cru; 318c318 < uint64_t ids[COALITION_NUM_TYPES] = {}; --- > uint64_t ids[COALITION_NUM_TYPES]; 352c352 < int roles[COALITION_NUM_TYPES] = {}; --- > int roles[COALITION_NUM_TYPES]; NO DIFFS in ./bsd/kern/posix_shm.c
NO DIFFS in ./bsd/kern/kpi_mbuf_internal.h

./bsd/kern/kern_resource.c differences detected: 2c2 < * Copyright (c) 2000-2018 Apple Inc. All rights reserved. --- > * Copyright (c) 2000-2017 Apple Inc. All rights reserved. 1147c1147 < struct rlimit lim = {}; --- > struct rlimit lim; 1235,1236c1235,1236 < struct user64_rusage rubuf64 = {}; < struct user32_rusage rubuf32 = {}; --- > struct user64_rusage rubuf64; > struct user32_rusage rubuf32; 1424,1430d1423 < static int < iopolicysys_disk(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param); < static int < iopolicysys_vfs_hfs_case_sensitivity(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param); < static int < iopolicysys_vfs_atime_updates(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param); < 1442a1436,1441 > > static int > iopolicysys_disk(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param); > static int > iopolicysys_vfs(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param); > 1463,1468c1462 < error = iopolicysys_vfs_hfs_case_sensitivity(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param); < if (error) < goto out; < break; < case IOPOL_TYPE_VFS_ATIME_UPDATES: < error = iopolicysys_vfs_atime_updates(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param); --- > error = iopolicysys_vfs(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param); 1609c1603 < iopolicysys_vfs_hfs_case_sensitivity(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param) --- > iopolicysys_vfs(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param) 1677,1763d1670 < static inline int < get_thread_atime_policy(struct uthread *ut) < { < return (ut->uu_flag & UT_ATIME_UPDATE)? IOPOL_ATIME_UPDATES_OFF: IOPOL_ATIME_UPDATES_DEFAULT; < } < < static inline void < set_thread_atime_policy(struct uthread *ut, int policy) < { < if (policy == IOPOL_ATIME_UPDATES_OFF) { < ut->uu_flag |= UT_ATIME_UPDATE; < } else { < ut->uu_flag &= ~UT_ATIME_UPDATE; < } < } < < static inline void < set_task_atime_policy(struct proc *p, int policy) < { < if (policy == IOPOL_ATIME_UPDATES_OFF) { < OSBitOrAtomic16((uint16_t)P_VFS_IOPOLICY_ATIME_UPDATES, &p->p_vfs_iopolicy); < } else { < OSBitAndAtomic16(~((uint16_t)P_VFS_IOPOLICY_ATIME_UPDATES), &p->p_vfs_iopolicy); < } < } < < static inline int < get_task_atime_policy(struct proc *p) < { < return (p->p_vfs_iopolicy & P_VFS_IOPOLICY_ATIME_UPDATES)? IOPOL_ATIME_UPDATES_OFF: IOPOL_ATIME_UPDATES_DEFAULT; < } < < static int < iopolicysys_vfs_atime_updates(struct proc *p __unused, int cmd, int scope, int policy, struct _iopol_param_t *iop_param) < { < int error = 0; < thread_t thread; < < /* Validate scope */ < switch (scope) { < case IOPOL_SCOPE_THREAD: < thread = current_thread(); < break; < case IOPOL_SCOPE_PROCESS: < thread = THREAD_NULL; < break; < default: < error = EINVAL; < goto out; < } < < /* Validate policy */ < if (cmd == IOPOL_CMD_SET) { < switch (policy) { < case IOPOL_ATIME_UPDATES_DEFAULT: < case IOPOL_ATIME_UPDATES_OFF: < break; < default: < error = EINVAL; < goto out; < } < } < < /* Perform command */ < switch(cmd) { < case IOPOL_CMD_SET: < if (thread != THREAD_NULL) < set_thread_atime_policy(get_bsdthread_info(thread), policy); < else < set_task_atime_policy(p, policy); < break; < case IOPOL_CMD_GET: < if (thread != THREAD_NULL) < policy = get_thread_atime_policy(get_bsdthread_info(thread)); < else < policy = get_task_atime_policy(p); < iop_param->iop_policy = policy; < break; < default: < error = EINVAL; /* unknown command */ < break; < } < < out: < return (error); < } < 1793,1795d1699 < #if CONFIG_LEDGER_INTERVAL_MAX < ru->ri_interval_max_phys_footprint = get_task_phys_footprint_interval_max(p->task, FALSE); < #endif 1797c1701 < /* fall through */ --- > /* fall through */ 1835c1739 < rusage_info_current ri_current = {}; --- > rusage_info_current ri_current; 1910,1912d1813 < * RLIMIT_CPU_USAGE_MONITOR < * RLIMIT_THREAD_CPULIMITS < * RLIMIT_FOOTPRINT_INTERVAL 1923,1926d1823 < #if CONFIG_LEDGER_INTERVAL_MAX < uint32_t footprint_interval_flags; < uint64_t interval_max_footprint; < #endif /* CONFIG_LEDGER_INTERVAL_MAX */ 1989,2002d1885 < < #if CONFIG_LEDGER_INTERVAL_MAX < case RLIMIT_FOOTPRINT_INTERVAL: < footprint_interval_flags = uap->arg; // XXX temporarily stashing flags in argp (12592127) < /* < * There is currently only one option for this flavor. < */ < if ((footprint_interval_flags & FOOTPRINT_INTERVAL_RESET) == 0) { < error = EINVAL; < break; < } < interval_max_footprint = get_task_phys_footprint_interval_max(targetp->task, TRUE); < break; < #endif /* CONFIG_LEDGER_INTERVAL_MAX */
./bsd/kern/kern_backtrace.c differences detected: 71c71 < bzero(bt, sizeof(uintptr_t) * bt_len); --- > NO DIFFS in ./bsd/kern/kern_acct.c
NO DIFFS in ./bsd/kern/kpi_socketfilter.c

./bsd/kern/policy_check.c differences detected: 122c122 < #if (MAC_POLICY_OPS_VERSION != 55) --- > #if (MAC_POLICY_OPS_VERSION != 52) 271,273c271,273 < CHECK_SET_HOOK(proc_notify_exec_complete) < .mpo_reserved5 = (mpo_reserved_hook_t *)common_hook, < .mpo_reserved6 = (mpo_reserved_hook_t *)common_hook, --- > CHECK_SET_HOOK(iokit_check_nvram_get) > CHECK_SET_HOOK(iokit_check_nvram_set) > CHECK_SET_HOOK(iokit_check_nvram_delete) 284d283 < CHECK_SET_HOOK(vnode_check_trigger_resolve) 287a287 > .mpo_reserved4 = (mpo_reserved_hook_t *)common_hook, 473a474,475 > CHECK_SET_HOOK(system_check_chud) > NO DIFFS in ./bsd/kern/sysv_msg.c

./bsd/kern/syscalls.master differences detected: 105c105 < 55 AUE_REBOOT ALL { int reboot(int opt, char *msg) NO_SYSCALL_STUB; } --- > 55 AUE_REBOOT ALL { int reboot(int opt, char *command) NO_SYSCALL_STUB; } 272c272 < 184 AUE_SIGRETURN ALL { int sigreturn(struct ucontext *uctx, int infostyle, user_addr_t token) NO_SYSCALL_STUB; } --- > 184 AUE_SIGRETURN ALL { int sigreturn(struct ucontext *uctx, int infostyle) NO_SYSCALL_STUB; } 839,844d838 < #if CONFIG_WORKQUEUE < 530 AUE_WORKLOOPCTL ALL { int kqueue_workloop_ctl(user_addr_t cmd, uint64_t options, user_addr_t addr, size_t sz) NO_SYSCALL_STUB; } < #else < 530 AUE_NULL ALL { int enosys(void); } < #endif // CONFIG_WORKQUEUE < 531 AUE_NULL ALL { int enosys(void); } NO DIFFS in ./bsd/kern/netboot.c
NO DIFFS in ./bsd/kern/kern_clock.c
NO DIFFS in ./bsd/kern/socket_info.c
NO DIFFS in ./bsd/kern/mach_process.c
NO DIFFS in ./bsd/kern/subr_prf.c

./bsd/kern/kern_newsysctl.c differences detected: 87d86 < 210d208 < 268d265 < 648c645 < char tempbuf[10] = {}; --- > char tempbuf[10]; 840c837 < int newoid[CTL_MAXNAME] = {}; --- > int newoid[CTL_MAXNAME]; 972c969 < int error, oid[CTL_MAXNAME] = {}; --- > int error, oid[CTL_MAXNAME]; 1333d1329 < sysctl_handler_t oid_handler = NULL; 1471,1476c1467 < /* < * sysctl_unregister_oid() may change the handler value, so grab it < * under the lock. < */ < oid_handler = oid->oid_handler; < if (!oid_handler) { --- > if (!oid->oid_handler) { 1515d1505 < 1517c1507,1509 < i = oid_handler(oid, name + indx, namelen - indx, req); --- > i = (oid->oid_handler) (oid, > name + indx, namelen - indx, > req); 1519c1511,1513 < i = oid_handler(oid, oid->oid_arg1, oid->oid_arg2, req); --- > i = (oid->oid_handler) (oid, > oid->oid_arg1, oid->oid_arg2, > req); NO DIFFS in ./bsd/kern/tty_dev.h

./bsd/kern/kern_ntptime.c differences detected: 270c270 < struct user64_ntptimeval user_ntv = {}; --- > struct user64_ntptimeval user_ntv; 279c279 < struct user32_ntptimeval user_ntv = {}; --- > struct user32_ntptimeval user_ntv; 296c296 < ntp_adjtime(struct proc *p, struct ntp_adjtime_args *uap, int32_t *retval) --- > ntp_adjtime(struct proc *p, struct ntp_adjtime_args *uap, __unused int32_t *retval) 298c298 < struct timex ntv = {}; --- > struct timex ntv; 300c300 < unsigned int modes; --- > int modes; 337c337 < os_log(OS_LOG_DEFAULT, "%s: BEFORE modes %u offset %ld freq %ld status %d constant %ld time_adjtime %lld\n", --- > os_log(OS_LOG_DEFAULT, "%s:BEFORE modes %u offset %ld freq %ld status %d constant %ld time_adjtime %lld\n", 432,433c432,433 < os_log(OS_LOG_DEFAULT, "%s: AFTER modes %u offset %lld freq %lld status %d constant %ld time_adjtime %lld\n", < __func__, modes, time_offset, time_freq, time_status, time_constant, time_adjtime); --- > os_log(OS_LOG_DEFAULT, "%s:AFTER offset %lld freq %lld status %d constant %ld time_adjtime %lld\n", > __func__, time_offset, time_freq, time_status, time_constant, time_adjtime); 442c442 < struct user64_timex user_ntv = {}; --- > struct user64_timex user_ntv; 444d443 < user_ntv.modes = modes; 467c466 < struct user32_timex user_ntv = {}; --- > struct user32_timex user_ntv; 469d467 < user_ntv.modes = modes; 717c715 < struct user64_timeval user_atv = {}; --- > struct user64_timeval user_atv; 722c720 < struct user32_timeval user_atv = {}; --- > struct user32_timeval user_atv; NO DIFFS in ./bsd/kern/mach_fat.h
NO DIFFS in ./bsd/kern/kern_csr.c
NO DIFFS in ./bsd/kern/sys_domain.c

./bsd/kern/kern_exit.c differences detected: 315,329d314 < < uint64_t ledger_internal; < uint64_t ledger_internal_compressed; < uint64_t ledger_iokit_mapped; < uint64_t ledger_alternate_accounting; < uint64_t ledger_alternate_accounting_compressed; < uint64_t ledger_purgeable_nonvolatile; < uint64_t ledger_purgeable_nonvolatile_compressed; < uint64_t ledger_page_table; < uint64_t ledger_phys_footprint; < uint64_t ledger_phys_footprint_lifetime_max; < uint64_t ledger_network_nonvolatile; < uint64_t ledger_network_nonvolatile_compressed; < uint64_t ledger_wired_mem; < 355d339 < static_assert(sizeof(struct proc_uniqidentifierinfo) == sizeof(struct crashinfo_proc_uniqidentifierinfo)); 430,495d413 < if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PHYS_FOOTPRINT_LIFETIME_MAX, sizeof(ledger_phys_footprint_lifetime_max), &uaddr)) { < ledger_phys_footprint_lifetime_max = get_task_phys_footprint_lifetime_max(p->task); < kcdata_memcpy(crash_info_ptr, uaddr, &ledger_phys_footprint_lifetime_max, sizeof(ledger_phys_footprint_lifetime_max)); < } < < // In the forking case, the current ledger info is copied into the corpse while the original task is suspended for consistency < if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_INTERNAL, sizeof(ledger_internal), &uaddr)) { < ledger_internal = get_task_internal(corpse_task); < kcdata_memcpy(crash_info_ptr, uaddr, &ledger_internal, sizeof(ledger_internal)); < } < < if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_INTERNAL_COMPRESSED, sizeof(ledger_internal_compressed), &uaddr)) { < ledger_internal_compressed = get_task_internal_compressed(corpse_task); < kcdata_memcpy(crash_info_ptr, uaddr, &ledger_internal_compressed, sizeof(ledger_internal_compressed)); < } < < if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_IOKIT_MAPPED, sizeof(ledger_iokit_mapped), &uaddr)) { < ledger_iokit_mapped = get_task_iokit_mapped(corpse_task); < kcdata_memcpy(crash_info_ptr, uaddr, &ledger_iokit_mapped, sizeof(ledger_iokit_mapped)); < } < < if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_ALTERNATE_ACCOUNTING, sizeof(ledger_alternate_accounting), &uaddr)) { < ledger_alternate_accounting = get_task_alternate_accounting(corpse_task); < kcdata_memcpy(crash_info_ptr, uaddr, &ledger_alternate_accounting, sizeof(ledger_alternate_accounting)); < } < < if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_ALTERNATE_ACCOUNTING_COMPRESSED, sizeof(ledger_alternate_accounting_compressed), &uaddr)) { < ledger_alternate_accounting_compressed = get_task_alternate_accounting_compressed(corpse_task); < kcdata_memcpy(crash_info_ptr, uaddr, &ledger_alternate_accounting_compressed, sizeof(ledger_alternate_accounting_compressed)); < } < < if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PURGEABLE_NONVOLATILE, sizeof(ledger_purgeable_nonvolatile), &uaddr)) { < ledger_purgeable_nonvolatile = get_task_purgeable_nonvolatile(corpse_task); < kcdata_memcpy(crash_info_ptr, uaddr, &ledger_purgeable_nonvolatile, sizeof(ledger_purgeable_nonvolatile)); < } < < if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PURGEABLE_NONVOLATILE_COMPRESSED, sizeof(ledger_purgeable_nonvolatile_compressed), &uaddr)) { < ledger_purgeable_nonvolatile_compressed = get_task_purgeable_nonvolatile_compressed(corpse_task); < kcdata_memcpy(crash_info_ptr, uaddr, &ledger_purgeable_nonvolatile_compressed, sizeof(ledger_purgeable_nonvolatile_compressed)); < } < < if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PAGE_TABLE, sizeof(ledger_page_table), &uaddr)) { < ledger_page_table = get_task_page_table(corpse_task); < kcdata_memcpy(crash_info_ptr, uaddr, &ledger_page_table, sizeof(ledger_page_table)); < } < < if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PHYS_FOOTPRINT, sizeof(ledger_phys_footprint), &uaddr)) { < ledger_phys_footprint = get_task_phys_footprint(corpse_task); < kcdata_memcpy(crash_info_ptr, uaddr, &ledger_phys_footprint, sizeof(ledger_phys_footprint)); < } < < if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_NETWORK_NONVOLATILE, sizeof(ledger_network_nonvolatile), &uaddr)) { < ledger_network_nonvolatile = get_task_network_nonvolatile(corpse_task); < kcdata_memcpy(crash_info_ptr, uaddr, &ledger_network_nonvolatile, sizeof(ledger_network_nonvolatile)); < } < < if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_NETWORK_NONVOLATILE_COMPRESSED, sizeof(ledger_network_nonvolatile_compressed), &uaddr)) { < ledger_network_nonvolatile_compressed = get_task_network_nonvolatile_compressed(corpse_task); < kcdata_memcpy(crash_info_ptr, uaddr, &ledger_network_nonvolatile_compressed, sizeof(ledger_network_nonvolatile_compressed)); < } < < if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_WIRED_MEM, sizeof(ledger_wired_mem), &uaddr)) { < ledger_wired_mem = get_task_wired_mem(corpse_task); < kcdata_memcpy(crash_info_ptr, uaddr, &ledger_wired_mem, sizeof(ledger_wired_mem)); < } < 698c616 < payload, payload_size, reason_string, reason_flags | OS_REASON_FLAG_ABORT); --- > payload, payload_size, reason_string, reason_flags); 1149c1067 < workq_mark_exiting(p); --- > workqueue_mark_exiting(p); 1163c1081 < workq_exit(p); --- > workqueue_exit(p); 1444a1363,1364 > vm_purgeable_disown(p->task); > 1514d1433 < DTRACE_PROC2(exited, proc_t, p, int, exitval); 1538d1456 < DTRACE_PROC2(exited, proc_t, p, int, exitval); 1800c1718 < wait4_data = &uth->uu_save.uus_wait4_data; --- > wait4_data = &uth->uu_kevent.uu_wait4_data; 1847,1854d1764 < < /* we're not using a continuation here but we still need to stash < * the args for stackshot. */ < uth = current_uthread(); < wait4_data = &uth->uu_save.uus_wait4_data; < wait4_data->args = uap; < thread_set_pending_block_hint(current_thread(), kThreadWaitOnProcess); < 1884c1794 < struct user64_rusage my_rusage = {}; --- > struct user64_rusage my_rusage; 1891c1801 < struct user32_rusage my_rusage = {}; --- > struct user32_rusage my_rusage; 1989c1899 < wait4_data = &uth->uu_save.uus_wait4_data; --- > wait4_data = &uth->uu_kevent.uu_wait4_data; 1993d1902 < thread_set_pending_block_hint(current_thread(), kThreadWaitOnProcess); 2030c1939 < waitid_data = &uth->uu_save.uus_waitid_data; --- > waitid_data = &uth->uu_kevent.uu_waitid_data; 2254c2163 < waitid_data = &uth->uu_save.uus_waitid_data; --- > waitid_data = &uth->uu_kevent.uu_waitid_data; 2275c2184 < proc_reparentlocked(proc_t child, proc_t parent, int signallable, int locked) --- > proc_reparentlocked(proc_t child, proc_t parent, int cansignal, int locked) 2308c2217 < if ((signallable != 0) && (initproc == parent) && (child->p_stat == SZOMB)) --- > if ((cansignal != 0) && (initproc == parent) && (child->p_stat == SZOMB)) 2818,2834d2726 < < void < kdp_wait4_find_process(thread_t thread, __unused event64_t wait_event, thread_waitinfo_t *waitinfo) < { < assert(thread != NULL); < assert(waitinfo != NULL); < < struct uthread *ut = get_bsdthread_info(thread); < waitinfo->context = 0; < // ensure wmesg is consistent with a thread waiting in wait4 < assert(!strcmp(ut->uu_wmesg, "waitcoll") || !strcmp(ut->uu_wmesg, "wait")); < struct wait4_nocancel_args *args = ut->uu_save.uus_wait4_data.args; < // May not actually contain a pid; this is just the argument to wait4. < // See man wait4 for other valid wait4 arguments. < waitinfo->owner = args->pid; < } < NO DIFFS in ./bsd/kern/kpi_socket.c
NO DIFFS in ./bsd/kern/sys_reason.c

./bsd/kern/kern_aio.c differences detected: 1509d1508 < free_context = TRUE; 1531d1529 < free_context = FALSE; 1650c1648 < if (free_context) { --- > if ((lio_context != NULL) && ((lio_context->io_issued == 0) || (free_context == TRUE))) {
./bsd/kern/tty_ptmx.c differences detected: 599a600,602 > if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0) { > kn->kn_udata = kev->udata; > } 823d825 < kn->kn_flags |= EV_EOF; 832d833 < kn->kn_data = retval; 907a909,911 > if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0) { > kn->kn_udata = kev->udata; > } NO DIFFS in ./bsd/kern/subr_xxx.c
NO DIFFS in ./bsd/kern/kern_physio.c

./bsd/kern/subr_log.c differences detected: 164,166d163 < uint8_t __firehose_buffer_kernel_chunk_count = FIREHOSE_BUFFER_KERNEL_DEFAULT_CHUNK_COUNT; < uint8_t __firehose_num_kernel_io_pages = FIREHOSE_BUFFER_KERNEL_DEFAULT_IO_PAGES; < 790c787 < mach_vm_size_t buffer_size = (__firehose_buffer_kernel_chunk_count * FIREHOSE_CHUNK_SIZE); --- > mach_vm_size_t buffer_size = (FIREHOSE_BUFFER_KERNEL_CHUNK_COUNT * FIREHOSE_CHUNK_SIZE); 815d811 < VM_MAP_KERNEL_FLAGS_NONE, 883,894c879 < if (!PE_parse_boot_argn("firehose_chunk_count", &__firehose_buffer_kernel_chunk_count, sizeof(__firehose_buffer_kernel_chunk_count))) { < __firehose_buffer_kernel_chunk_count = FIREHOSE_BUFFER_KERNEL_DEFAULT_CHUNK_COUNT; < } < if (!PE_parse_boot_argn("firehose_io_pages", &__firehose_num_kernel_io_pages, sizeof(__firehose_num_kernel_io_pages))) { < __firehose_num_kernel_io_pages = FIREHOSE_BUFFER_KERNEL_DEFAULT_IO_PAGES; < } < if (!__firehose_kernel_configuration_valid(__firehose_buffer_kernel_chunk_count, __firehose_num_kernel_io_pages)) { < printf("illegal firehose configuration %u/%u, using defaults\n", __firehose_buffer_kernel_chunk_count, __firehose_num_kernel_io_pages); < __firehose_buffer_kernel_chunk_count = FIREHOSE_BUFFER_KERNEL_DEFAULT_CHUNK_COUNT; < __firehose_num_kernel_io_pages = FIREHOSE_BUFFER_KERNEL_DEFAULT_IO_PAGES; < } < vm_size_t size = __firehose_buffer_kernel_chunk_count * FIREHOSE_CHUNK_SIZE; --- > vm_size_t size = FIREHOSE_BUFFER_KERNEL_CHUNK_COUNT * FIREHOSE_CHUNK_SIZE; 909c894 < printf("oslog_init completed, %u chunks, %u io pages\n", __firehose_buffer_kernel_chunk_count, __firehose_num_kernel_io_pages); --- > kprintf("oslog_init completed\n"); 1351,1393d1335 < #ifdef CONFIG_XNUPOST < < uint32_t find_pattern_in_buffer(char * pattern, uint32_t len, int expected_count); < < /* < * returns count of pattern found in systemlog buffer. < * stops searching further if count reaches expected_count. < */ < uint32_t < find_pattern_in_buffer(char * pattern, uint32_t len, int expected_count) < { < int match_count = 0; < int i = 0; < int j = 0; < int no_match = 0; < int pos = 0; < char ch = 0; < < if (pattern == NULL || len == 0 || expected_count == 0) { < return 0; < } < < for (i = 0; i < msgbufp->msg_size; i++) { < no_match = 0; < for (j = 0; j < (int)len; j++) { < pos = (msgbufp->msg_bufx + i + j) % msgbufp->msg_size; < ch = msgbufp->msg_bufc[pos]; < if (ch != pattern[j]) { < no_match = 1; < break; < } < } < if (no_match == 0) { < match_count++; < if (match_count >= expected_count) { < break; < } < } < } < return match_count; < } < < #endif NO DIFFS in ./bsd/kern/qsort.c

./bsd/kern/kern_prot.c differences detected: 673,678d672 < proc_issetugid (proc_t p) < { < return (p->p_flag & P_SUGID) ? 1 : 0; < } < < int 690c684 < *retval = proc_issetugid(p); --- > *retval = (p->p_flag & P_SUGID) ? 1 : 0;
./bsd/kern/kern_persona.c differences detected: 44c44,46 < #include --- > #define pna_info(fmt, ...) \ > printf("%s: " fmt "\n", __func__, ## __VA_ARGS__) > 46c48 < os_log_error(OS_LOG_DEFAULT, "ERROR: " fmt, ## __VA_ARGS__) --- > printf("ERROR[%s]: " fmt "\n", __func__, ## __VA_ARGS__) 58,59d59 < #define PERSONA_ALLOC_TOKEN (0x7a0000ae) < #define PERSONA_INIT_TOKEN (0x7500005e) 61d60 < #define persona_initialized(p) ((p)->pna_valid == PERSONA_MAGIC || (p)->pna_valid == PERSONA_INIT_TOKEN) 78,79d76 < os_refgrp_decl(static, persona_refgrp, "persona", NULL); < 132,136d128 < int err = persona_init_begin(g_system_persona); < assert(err == 0); < < persona_init_end(g_system_persona, err); < 142c134 < struct persona *persona; --- > struct persona *persona, *tmp; 143a136,137 > kauth_cred_t tmp_cred; > gid_t new_group; 176d169 < persona_dbg("Starting persona allocation for: '%s'", persona->pna_login); 180c173 < os_ref_init(&persona->pna_refcount, &persona_refgrp); --- > persona->pna_refcount = 1; 194,246d186 < persona->pna_type = type; < persona->pna_id = id; < persona->pna_valid = PERSONA_ALLOC_TOKEN; < < /* < * NOTE: this persona has not been fully initialized. A subsequent < * call to persona_init_begin() followed by persona_init_end() will make < * the persona visible to the rest of the system. < */ < if (error) { < *error = 0; < } < return persona; < < out_error: < (void)hw_atomic_add(&g_total_personas, -1); < zfree(persona_zone, persona); < if (error) { < *error = err; < } < return NULL; < } < < /** < * persona_init_begin < * < * This function begins initialization of a persona. It first acquires the < * global persona list lock via lock_personas(), then selects an appropriate < * persona ID and sets up the persona's credentials. This function *must* be < * followed by a call to persona_init_end() which will mark the persona < * structure as valid < * < * Conditions: < * persona has been allocated via persona_alloc() < * nothing locked < * < * Returns: < * global persona list is locked (even on error) < */ < int persona_init_begin(struct persona *persona) < { < struct persona *tmp; < int err = 0; < kauth_cred_t tmp_cred; < gid_t new_group; < uid_t id; < < if (!persona || (persona->pna_valid != PERSONA_ALLOC_TOKEN)) { < return EINVAL; < } < < id = persona->pna_id; < 249c189,191 < if (id == PERSONA_ID_NONE) --- > if (id != PERSONA_ID_NONE) > persona->pna_id = id; > else 252c194 < persona_dbg("Beginning Initialization of %d:%d (%s)...", id, persona->pna_id, persona->pna_login); --- > persona_dbg("Adding %d (%s) to global list...", persona->pna_id, persona->pna_login); 256,258c198 < persona_lock(tmp); < if (id == PERSONA_ID_NONE && tmp->pna_id == persona->pna_id) { < persona_unlock(tmp); --- > if (id == PERSONA_ID_NONE && tmp->pna_id == id) { 266,268c206,207 < if (strncmp(tmp->pna_login, persona->pna_login, sizeof(tmp->pna_login)) == 0 || < tmp->pna_id == persona->pna_id) { < persona_unlock(tmp); --- > if (strncmp(tmp->pna_login, login, sizeof(tmp->pna_login)) == 0 > || tmp->pna_id == id) { 276d214 < persona_unlock(tmp); 279c217 < goto out; --- > goto out_unlock; 292c230 < goto out; --- > goto out_unlock; 308c246 < goto out; --- > goto out_unlock; 310a249,254 > persona->pna_type = type; > > /* insert the, now valid, persona into the global list! */ > persona->pna_valid = PERSONA_MAGIC; > LIST_INSERT_HEAD(&all_personas, persona, pna_list); > 315c259,260 < persona->pna_valid = PERSONA_INIT_TOKEN; --- > out_unlock: > unlock_personas(); 317,324c262,274 < out: < if (err != 0) { < persona_dbg("ERROR:%d while initializing %d:%d (%s)...", err, id, persona->pna_id, persona->pna_login); < /* < * mark the persona with an error so that persona_init_end() < * will *not* add it to the global list. < */ < persona->pna_id = PERSONA_ID_NONE; --- > if (err) { > switch (err) { > case EEXIST: > persona_dbg("Login '%s' (%d) already exists", > login, persona->pna_id); > break; > case EACCES: > persona_dbg("kauth_error for persona:%d", persona->pna_id); > break; > default: > persona_dbg("Unknown error:%d", err); > } > goto out_error; 327,331c277,284 < /* < * leave the global persona list locked: it will be < * unlocked in a call to persona_init_end() < */ < return err; --- > return persona; > > out_error: > (void)hw_atomic_add(&g_total_personas, -1); > zfree(persona_zone, persona); > if (error) > *error = err; > return NULL; 334,349c287 < /** < * persona_init_end < * < * This function finalizes the persona initialization by marking it valid and < * adding it to the global list of personas. After unlocking the global list, < * the persona will be visible to the reset of the system. The function will < * only mark the persona valid if the input parameter 'error' is 0. < * < * Conditions: < * persona is initialized via persona_init_begin() < * global persona list is locked via lock_personas() < * < * Returns: < * global persona list is unlocked < */ < void persona_init_end(struct persona *persona, int error) --- > int persona_invalidate(struct persona *persona) 351,353c289,291 < if (persona == NULL) { < return; < } --- > int error = 0; > if (!persona) > return EINVAL; 355,372c293,302 < /* < * If the pna_valid member is set to the INIT_TOKEN value, then it has < * successfully gone through persona_init_begin(), and we can mark it < * valid and make it visible to the rest of the system. However, if < * there was an error either during initialization or otherwise, we < * need to decrement the global count of personas because this one < * will be disposed-of by the callers invocation of persona_put(). < */ < if (error != 0 || persona->pna_valid == PERSONA_ALLOC_TOKEN) { < persona_dbg("ERROR:%d after initialization of %d (%s)", error, persona->pna_id, persona->pna_login); < /* remove this persona from the global count */ < (void)hw_atomic_add(&g_total_personas, -1); < } else if (error == 0 && < persona->pna_valid == PERSONA_INIT_TOKEN) { < persona->pna_valid = PERSONA_MAGIC; < LIST_INSERT_HEAD(&all_personas, persona, pna_list); < persona_dbg("Initialization of %d (%s) Complete.", persona->pna_id, persona->pna_login); < } --- > lock_personas(); > persona_lock(persona); > > if (!persona_valid(persona)) > panic("Double-invalidation of persona %p", persona); > > LIST_REMOVE(persona, pna_list); > if (hw_atomic_add(&g_total_personas, -1) == UINT_MAX) > panic("persona ref count underflow!\n"); > persona_mkinvalid(persona); 373a304 > persona_unlock(persona); 374a306,307 > > return error; 379,380c312,316 < os_ref_retain_locked(&persona->pna_refcount); < return persona; --- > if (persona->pna_refcount) { > persona->pna_refcount++; > return persona; > } > return NULL; 403,404c339,341 < if (os_ref_release_locked(&persona->pna_refcount) == 0) { < destroy = 1; --- > if (persona->pna_refcount >= 0) { > if (--(persona->pna_refcount) == 0) > destroy = 1; 419d355 < persona_lock(persona); 426d361 < persona_unlock(persona); 466,493d400 < struct persona *persona_lookup_and_invalidate(uid_t id) < { < struct persona *persona, *entry, *tmp; < < persona = NULL; < < lock_personas(); < LIST_FOREACH_SAFE(entry, &all_personas, pna_list, tmp) { < persona_lock(entry); < if (entry->pna_id == id) { < if (persona_valid(entry)) { < persona = persona_get_locked(entry); < assert(persona != NULL); < LIST_REMOVE(persona, pna_list); < if (hw_atomic_add(&g_total_personas, -1) == UINT_MAX) < panic("persona ref count underflow!\n"); < persona_mkinvalid(persona); < } < persona_unlock(entry); < break; < } < persona_unlock(entry); < } < unlock_personas(); < < return persona; < } < 940c847 < if (!persona_initialized(persona)) { --- > if (!persona_valid(persona)) { 977c884 < if (!persona_initialized(persona)) { --- > if (!persona_valid(persona)) { 1058c965 < if (!persona_initialized(persona)) { --- > if (!persona_valid(persona)) { 1094c1001 < int persona_set_groups(struct persona *persona, gid_t *groups, unsigned ngroups, uid_t gmuid) --- > int persona_set_groups(struct persona *persona, gid_t *groups, int ngroups, uid_t gmuid) 1105c1012 < if (!persona_initialized(persona)) { --- > if (!persona_valid(persona)) { 1116c1023 < new_cred = kauth_cred_setgroups(my_cred, groups, (int)ngroups, gmuid); --- > new_cred = kauth_cred_setgroups(my_cred, groups, ngroups, gmuid); 1126c1033 < int persona_get_groups(struct persona *persona, unsigned *ngroups, gid_t *groups, unsigned groups_sz) --- > int persona_get_groups(struct persona *persona, int *ngroups, gid_t *groups, int groups_sz) 1129c1036 < if (!persona || !persona->pna_cred || !groups || !ngroups || groups_sz > NGROUPS) --- > if (!persona || !persona->pna_cred || !groups || !ngroups) 1136,1138c1043 < int kauth_ngroups = (int)groups_sz; < kauth_cred_getgroups(persona->pna_cred, groups, &kauth_ngroups); < *ngroups = (unsigned)kauth_ngroups; --- > kauth_cred_getgroups(persona->pna_cred, groups, ngroups);
./bsd/kern/sysv_shm.c differences detected: 491d490 < VM_MAP_KERNEL_FLAGS_NONE,
./bsd/kern/kern_event.c differences detected: 58c58 < #include --- > #include 90d89 < #include 106,108d104 < #include < #include < #include 125a122,128 > /* > * JMM - this typedef needs to be unified with pthread_priority_t > * and mach_msg_priority_t. It also needs to be the same type > * everywhere. > */ > typedef int32_t qos_t; > 129a133,140 > #define KNUSE_NONE 0x0 > #define KNUSE_STEAL_DROP 0x1 > #define KNUSE_BOOST 0x2 > static int kqlock2knoteuse(struct kqueue *kq, struct knote *kn, int flags); > static int kqlock2knotedrop(struct kqueue *kq, struct knote *kn); > static int kqlock2knotedetach(struct kqueue *kq, struct knote *kn, int flags); > static int knoteuse2kqlock(struct kqueue *kq, struct knote *kn, int flags); > 158c169 < user_addr_t eventlist, int nevents, --- > user_addr_t eventlist, int nevents, 169,175d179 < static int kevent_register_wait_prepare(struct knote *kn, struct kevent_internal_s *kev); < static void kevent_register_wait_block(struct turnstile *ts, thread_t handoff_thread, < struct knote_lock_ctx *knlc, thread_continue_t cont, < struct _kevent_register *cont_args) __dead2; < static void kevent_register_wait_return(struct _kevent_register *cont_args) __dead2; < static void kevent_register_wait_cleanup(struct knote *kn); < static inline void kqueue_release_last(struct proc *p, kqueue_t kqu); 178c182 < void *data); --- > void *data); 182c186,188 < struct filt_process_s *process_data, int *countp); --- > struct filt_process_s *process_data, int *countp, struct proc *p); > static struct kqtailq *kqueue_get_base_queue(struct kqueue *kq, kq_index_t qos_index); > static struct kqtailq *kqueue_get_high_queue(struct kqueue *kq, kq_index_t qos_index); 185,186c191 < static struct kqtailq *kqueue_get_suppressed_queue(kqueue_t kq, struct knote *kn); < static void kqueue_threadreq_initiate(struct kqueue *kq, struct kqrequest *kqr, kq_index_t qos, int flags); --- > static struct kqtailq *kqueue_get_suppressed_queue(struct kqueue *kq, kq_index_t qos_index); 188,190c193,197 < static void kqworkq_update_override(struct kqworkq *kqwq, struct knote *kn, kq_index_t qos); < static void kqworkq_unbind(proc_t p, struct kqrequest *kqr); < static thread_qos_t kqworkq_unbind_locked(struct kqworkq *kqwq, struct kqrequest *kqr, thread_t thread); --- > static void kqworkq_request_thread(struct kqworkq *kqwq, kq_index_t qos_index); > static void kqworkq_request_help(struct kqworkq *kqwq, kq_index_t qos_index); > static void kqworkq_update_override(struct kqworkq *kqwq, kq_index_t qos_index, kq_index_t override_index); > static void kqworkq_bind_thread_impl(struct kqworkq *kqwq, kq_index_t qos_index, thread_t thread, unsigned int flags); > static void kqworkq_unbind_thread(struct kqworkq *kqwq, kq_index_t qos_index, thread_t thread, unsigned int flags); 193,196c200,212 < static void kqworkloop_update_override(struct kqworkloop *kqwl, kq_index_t override_index); < static void kqworkloop_unbind(proc_t p, struct kqworkloop *kwql); < static thread_qos_t kqworkloop_unbind_locked(struct kqworkloop *kwql, thread_t thread); < static kq_index_t kqworkloop_owner_override(struct kqworkloop *kqwl); --- > enum { > KQWL_UO_NONE = 0, > KQWL_UO_OLD_OVERRIDE_IS_SYNC_UI = 0x1, > KQWL_UO_NEW_OVERRIDE_IS_SYNC_UI = 0x2, > KQWL_UO_UPDATE_SUPPRESS_SYNC_COUNTERS = 0x4, > KQWL_UO_UPDATE_OVERRIDE_LAZY = 0x8 > }; > > static void kqworkloop_update_override(struct kqworkloop *kqwl, kq_index_t qos_index, kq_index_t override_index, uint32_t flags); > static void kqworkloop_bind_thread_impl(struct kqworkloop *kqwl, thread_t thread, unsigned int flags); > static void kqworkloop_unbind_thread(struct kqworkloop *kqwl, thread_t thread, unsigned int flags); > static inline kq_index_t kqworkloop_combined_qos(struct kqworkloop *kqwl, boolean_t *); > static void kqworkloop_update_suppress_sync_count(struct kqrequest *kqr, uint32_t flags); 210,211d225 < KQWL_UTQ_UNBINDING, /* attempt to rebind */ < KQWL_UTQ_PARKING, 220c234 < * The QoS is the maximum QoS of an event enqueued on this workloop in --- > * The async QoS is the maximum QoS of an event enqueued on this workloop in 225c239,245 < KQWL_UTQ_SET_QOS_INDEX, --- > KQWL_UTQ_SET_ASYNC_QOS, > /* > * The sync waiters QoS is the maximum QoS of any thread blocked on an > * EVFILT_WORKLOOP knote marked with the NOTE_WL_SYNC_WAIT bit. > * If there is no such knote, this QoS is 0. > */ > KQWL_UTQ_SET_SYNC_WAITERS_QOS, 230d249 < static int kqworkloop_end_processing(struct kqworkloop *kqwl, int flags, int kevent_flags); 233c252,255 < struct filt_process_s *process_data); --- > struct filt_process_s *process_data, struct proc *p); > #if 0 > static void knote_put(struct knote *kn); > #endif 236c258 < struct knote_lock_ctx *knlc, struct proc *p); --- > struct kevent_internal_s *kev, struct proc *p, int *knoteuse_flags); 237a260 > static void kq_remove_knote(struct kqueue *kq, struct knote *kn, struct proc *p, kn_status_t *kn_status, uint16_t *kq_state); 239c262 < static void knote_drop(struct kqueue *kq, struct knote *kn, struct knote_lock_ctx *knlc); --- > static void knote_drop(struct knote *kn, struct proc *p); 256,260c279,283 < static bool knote_should_apply_qos_override(struct kqueue *kq, struct knote *kn, < int result, thread_qos_t *qos_out); < static void knote_apply_qos_override(struct knote *kn, kq_index_t qos_index); < static void knote_adjust_qos(struct kqueue *kq, struct knote *kn, int result); < static void knote_reset_priority(struct knote *kn, pthread_priority_t pp); --- > static kq_index_t knote_get_queue_index(struct knote *kn); > static struct kqtailq *knote_get_queue(struct knote *kn); > static kq_index_t knote_get_req_index(struct knote *kn); > static kq_index_t knote_get_qos_index(struct knote *kn); > static void knote_set_qos_index(struct knote *kn, kq_index_t qos_index); 261a285,286 > static kq_index_t knote_get_sync_qos_override_index(struct knote *kn); > static void knote_set_qos_override_index(struct knote *kn, kq_index_t qos_index, boolean_t override_is_sync); 264,289c289,293 < static zone_t knote_zone; < static zone_t kqfile_zone; < static zone_t kqworkq_zone; < static zone_t kqworkloop_zone; < #if DEVELOPMENT || DEBUG < #define KEVENT_PANIC_ON_WORKLOOP_OWNERSHIP_LEAK (1U << 0) < #define KEVENT_PANIC_ON_NON_ENQUEUED_PROCESS (1U << 1) < #define KEVENT_PANIC_BOOT_ARG_INITIALIZED (1U << 31) < < #define KEVENT_PANIC_DEFAULT_VALUE (0) < static uint32_t < kevent_debug_flags(void) < { < static uint32_t flags = KEVENT_PANIC_DEFAULT_VALUE; < < if ((flags & KEVENT_PANIC_BOOT_ARG_INITIALIZED) == 0) { < uint32_t value = 0; < if (!PE_parse_boot_argn("kevent_debug", &value, sizeof(value))) { < value = KEVENT_PANIC_DEFAULT_VALUE; < } < value |= KEVENT_PANIC_BOOT_ARG_INITIALIZED; < os_atomic_store(&flags, value, relaxed); < } < return flags; < } < #endif --- > static int filt_fileattach(struct knote *kn, struct kevent_internal_s *kev); > SECURITY_READ_ONLY_EARLY(static struct filterops) file_filtops = { > .f_isfd = 1, > .f_attach = filt_fileattach, > }; 291c295,305 < #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) --- > static void filt_kqdetach(struct knote *kn); > static int filt_kqueue(struct knote *kn, long hint); > static int filt_kqtouch(struct knote *kn, struct kevent_internal_s *kev); > static int filt_kqprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev); > SECURITY_READ_ONLY_EARLY(static struct filterops) kqread_filtops = { > .f_isfd = 1, > .f_detach = filt_kqdetach, > .f_event = filt_kqueue, > .f_touch = filt_kqtouch, > .f_process = filt_kqprocess, > }; 295d308 < static int filt_badevent(struct knote *kn, long hint); 299a313,325 > static int filt_procattach(struct knote *kn, struct kevent_internal_s *kev); > static void filt_procdetach(struct knote *kn); > static int filt_proc(struct knote *kn, long hint); > static int filt_proctouch(struct knote *kn, struct kevent_internal_s *kev); > static int filt_procprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev); > SECURITY_READ_ONLY_EARLY(static struct filterops) proc_filtops = { > .f_attach = filt_procattach, > .f_detach = filt_procdetach, > .f_event = filt_proc, > .f_touch = filt_proctouch, > .f_process = filt_procprocess, > }; > 302a329 > 303a331 > 304a333,341 > > static zone_t knote_zone; > static zone_t kqfile_zone; > static zone_t kqworkq_zone; > static zone_t kqworkloop_zone; > > #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) > > /* Mach portset filter */ 305a343,381 > > /* User filter */ > static int filt_userattach(struct knote *kn, struct kevent_internal_s *kev); > static void filt_userdetach(struct knote *kn); > static int filt_user(struct knote *kn, long hint); > static int filt_usertouch(struct knote *kn, struct kevent_internal_s *kev); > static int filt_userprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev); > SECURITY_READ_ONLY_EARLY(static struct filterops) user_filtops = { > .f_attach = filt_userattach, > .f_detach = filt_userdetach, > .f_event = filt_user, > .f_touch = filt_usertouch, > .f_process = filt_userprocess, > }; > > static lck_spin_t _filt_userlock; > static void filt_userlock(void); > static void filt_userunlock(void); > > /* Workloop filter */ > static bool filt_wlneeds_boost(struct kevent_internal_s *kev); > static int filt_wlattach(struct knote *kn, struct kevent_internal_s *kev); > static int filt_wlpost_attach(struct knote *kn, struct kevent_internal_s *kev); > static void filt_wldetach(struct knote *kn); > static int filt_wlevent(struct knote *kn, long hint); > static int filt_wltouch(struct knote *kn, struct kevent_internal_s *kev); > static int filt_wldrop_and_unlock(struct knote *kn, struct kevent_internal_s *kev); > static int filt_wlprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev); > SECURITY_READ_ONLY_EARLY(static struct filterops) workloop_filtops = { > .f_needs_boost = filt_wlneeds_boost, > .f_attach = filt_wlattach, > .f_post_attach = filt_wlpost_attach, > .f_detach = filt_wldetach, > .f_event = filt_wlevent, > .f_touch = filt_wltouch, > .f_drop_and_unlock = filt_wldrop_and_unlock, > .f_process = filt_wlprocess, > }; > 321,323d396 < const static struct filterops file_filtops; < const static struct filterops kqread_filtops; < const static struct filterops proc_filtops; 325,326d397 < const static struct filterops user_filtops; < const static struct filterops workloop_filtops; 335c406 < * - Add a filterops to the sysfilt_ops array. Public filters should be added at the end --- > * - Add a filterops to the sysfilt_ops array. Public filters should be added at the end 341,342c412,413 < * - Add a filterops to the sysfilt_ops. Private filters should be added at the end of < * the Private filters section of the array. --- > * - Add a filterops to the sysfilt_ops. Private filters should be added at the end of > * the Private filters section of the array. 346,358c417,429 < [~EVFILT_READ] = &file_filtops, < [~EVFILT_WRITE] = &file_filtops, < [~EVFILT_AIO] = &bad_filtops, < [~EVFILT_VNODE] = &file_filtops, < [~EVFILT_PROC] = &proc_filtops, < [~EVFILT_SIGNAL] = &sig_filtops, < [~EVFILT_TIMER] = &timer_filtops, < [~EVFILT_MACHPORT] = &machport_filtops, < [~EVFILT_FS] = &fs_filtops, < [~EVFILT_USER] = &user_filtops, < &bad_filtops, < [~EVFILT_VM] = &bad_filtops, < [~EVFILT_SOCK] = &file_filtops, --- > [~EVFILT_READ] = &file_filtops, > [~EVFILT_WRITE] = &file_filtops, > [~EVFILT_AIO] = &bad_filtops, > [~EVFILT_VNODE] = &file_filtops, > [~EVFILT_PROC] = &proc_filtops, > [~EVFILT_SIGNAL] = &sig_filtops, > [~EVFILT_TIMER] = &timer_filtops, > [~EVFILT_MACHPORT] = &machport_filtops, > [~EVFILT_FS] = &fs_filtops, > [~EVFILT_USER] = &user_filtops, > &bad_filtops, > &bad_filtops, > [~EVFILT_SOCK] = &file_filtops, 360c431 < [~EVFILT_MEMORYSTATUS] = &memorystatus_filtops, --- > [~EVFILT_MEMORYSTATUS] = &memorystatus_filtops, 362c433 < [~EVFILT_MEMORYSTATUS] = &bad_filtops, --- > [~EVFILT_MEMORYSTATUS] = &bad_filtops, 364c435,436 < [~EVFILT_EXCEPT] = &file_filtops, --- > [~EVFILT_EXCEPT] = &file_filtops, > 368,382c440,454 < [EVFILTID_KQREAD] = &kqread_filtops, < [EVFILTID_PIPE_R] = &pipe_rfiltops, < [EVFILTID_PIPE_W] = &pipe_wfiltops, < [EVFILTID_PTSD] = &ptsd_kqops, < [EVFILTID_SOREAD] = &soread_filtops, < [EVFILTID_SOWRITE] = &sowrite_filtops, < [EVFILTID_SCK] = &sock_filtops, < [EVFILTID_SOEXCEPT] = &soexcept_filtops, < [EVFILTID_SPEC] = &spec_filtops, < [EVFILTID_BPFREAD] = &bpfread_filtops, < [EVFILTID_NECP_FD] = &necp_fd_rfiltops, < [EVFILTID_FSEVENT] = &fsevent_filtops, < [EVFILTID_VN] = &vnode_filtops, < [EVFILTID_TTY] = &tty_filtops, < [EVFILTID_PTMX] = &ptmx_kqops, --- > [EVFILTID_KQREAD] = &kqread_filtops, > [EVFILTID_PIPE_R] = &pipe_rfiltops, > [EVFILTID_PIPE_W] = &pipe_wfiltops, > [EVFILTID_PTSD] = &ptsd_kqops, > [EVFILTID_SOREAD] = &soread_filtops, > [EVFILTID_SOWRITE] = &sowrite_filtops, > [EVFILTID_SCK] = &sock_filtops, > [EVFILTID_SOEXCEPT] = &soexcept_filtops, > [EVFILTID_SPEC] = &spec_filtops, > [EVFILTID_BPFREAD] = &bpfread_filtops, > [EVFILTID_NECP_FD] = &necp_fd_rfiltops, > [EVFILTID_FSEVENT] = &fsevent_filtops, > [EVFILTID_VN] = &vnode_filtops, > [EVFILTID_TTY] = &tty_filtops, > [EVFILTID_PTMX] = &ptmx_kqops, 388,389c460,501 < static inline struct kqworkloop * < kqr_kqworkloop(struct kqrequest *kqr) --- > #ifndef _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG > #define _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG 0x02000000 /* pthread event manager bit */ > #endif > #ifndef _PTHREAD_PRIORITY_OVERCOMMIT_FLAG > #define _PTHREAD_PRIORITY_OVERCOMMIT_FLAG 0x80000000 /* request overcommit threads */ > #endif > #ifndef _PTHREAD_PRIORITY_QOS_CLASS_MASK > #define _PTHREAD_PRIORITY_QOS_CLASS_MASK 0x003fff00 /* QoS class mask */ > #endif > #ifndef _PTHREAD_PRIORITY_QOS_CLASS_SHIFT_32 > #define _PTHREAD_PRIORITY_QOS_CLASS_SHIFT_32 8 > #endif > > static inline __kdebug_only > uintptr_t > kqr_thread_id(struct kqrequest *kqr) > { > return (uintptr_t)thread_tid(kqr->kqr_thread); > } > > static inline > boolean_t is_workqueue_thread(thread_t thread) > { > return (thread_get_tag(thread) & THREAD_TAG_WORKQUEUE); > } > > static inline > void knote_canonicalize_kevent_qos(struct knote *kn) > { > struct kqueue *kq = knote_get_kq(kn); > unsigned long canonical; > > if ((kq->kq_state & (KQ_WORKQ | KQ_WORKLOOP)) == 0) > return; > > /* preserve manager and overcommit flags in this case */ > canonical = pthread_priority_canonicalize(kn->kn_qos, FALSE); > kn->kn_qos = (qos_t)canonical; > } > > static inline > kq_index_t qos_index_from_qos(struct knote *kn, qos_t qos, boolean_t propagation) 391,392c503,516 < if (kqr->kqr_state & KQR_WORKLOOP) { < return __container_of(kqr, struct kqworkloop, kqwl_request); --- > struct kqueue *kq = knote_get_kq(kn); > kq_index_t qos_index; > unsigned long flags = 0; > > if ((kq->kq_state & (KQ_WORKQ | KQ_WORKLOOP)) == 0) > return QOS_INDEX_KQFILE; > > qos_index = (kq_index_t)thread_qos_from_pthread_priority( > (unsigned long)qos, &flags); > > if (kq->kq_state & KQ_WORKQ) { > /* workq kqueues support requesting a manager thread (non-propagation) */ > if (!propagation && (flags & _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG)) > return KQWQ_QOS_MANAGER; 394c518,519 < return NULL; --- > > return qos_index; 397,398c522,523 < static inline kqueue_t < kqr_kqueue(proc_t p, struct kqrequest *kqr) --- > static inline > qos_t qos_from_qos_index(kq_index_t qos_index) 400,406c525,545 < kqueue_t kqu; < if (kqr->kqr_state & KQR_WORKLOOP) { < kqu.kqwl = kqr_kqworkloop(kqr); < } else { < kqu.kqwq = (struct kqworkq *)p->p_fd->fd_wqkqueue; < assert(kqr >= kqu.kqwq->kqwq_request && < kqr < kqu.kqwq->kqwq_request + KQWQ_NBUCKETS); --- > /* should only happen for KQ_WORKQ */ > if (qos_index == KQWQ_QOS_MANAGER) > return _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG; > > if (qos_index == 0) > return THREAD_QOS_UNSPECIFIED; > > /* Should have support from pthread kext support */ > return (1 << (qos_index - 1 + > _PTHREAD_PRIORITY_QOS_CLASS_SHIFT_32)); > } > > /* kqr lock must be held */ > static inline > unsigned long pthread_priority_for_kqrequest( > struct kqrequest *kqr, > kq_index_t qos_index) > { > unsigned long priority = qos_from_qos_index(qos_index); > if (kqr->kqr_state & KQR_THOVERCOMMIT) { > priority |= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG; 408c547 < return kqu; --- > return priority; 411,412c550,551 < static inline boolean_t < is_workqueue_thread(thread_t thread) --- > static inline > kq_index_t qos_index_for_servicer(int qos_class, thread_t thread, int flags) 414c553,562 < return (thread_get_tag(thread) & THREAD_TAG_WORKQUEUE); --- > #pragma unused(thread) > kq_index_t qos_index; > > if (flags & KEVENT_FLAG_WORKQ_MANAGER) > return KQWQ_QOS_MANAGER; > > qos_index = (kq_index_t)qos_class; > assert(qos_index > 0 && qos_index < KQWQ_QOS_MANAGER); > > return qos_index; 421c569 < * and the kqueue-aware status and locks of individual knotes. --- > * and the kqueue-aware status and use counts of individual knotes. 435,455c583,585 < static lck_grp_attr_t *kq_lck_grp_attr; < static lck_grp_t *kq_lck_grp; < static lck_attr_t *kq_lck_attr; < < static inline void < kqlock(kqueue_t kqu) < { < lck_spin_lock(&kqu.kq->kq_lock); < } < < static inline void < kqlock_held(__assert_only kqueue_t kqu) < { < LCK_SPIN_ASSERT(&kqu.kq->kq_lock, LCK_ASSERT_OWNED); < } < < static inline void < kqunlock(kqueue_t kqu) < { < lck_spin_unlock(&kqu.kq->kq_lock); < } --- > lck_grp_attr_t * kq_lck_grp_attr; > lck_grp_t * kq_lck_grp; > lck_attr_t * kq_lck_attr; 458c588 < kq_req_lock(kqueue_t kqu) --- > kqlock(struct kqueue *kq) 460,461c590 < assert(kqu.kq->kq_state & (KQ_WORKLOOP | KQ_WORKQ)); < lck_spin_lock(&kqu.kq->kq_reqlock); --- > lck_spin_lock(&kq->kq_lock); 465c594 < kq_req_unlock(kqueue_t kqu) --- > kqlock_held(__assert_only struct kqueue *kq) 467,468c596 < assert(kqu.kq->kq_state & (KQ_WORKLOOP | KQ_WORKQ)); < lck_spin_unlock(&kqu.kq->kq_reqlock); --- > LCK_SPIN_ASSERT(&kq->kq_lock, LCK_ASSERT_OWNED); 472c600 < kq_req_held(__assert_only kqueue_t kqu) --- > kqunlock(struct kqueue *kq) 474,475c602 < assert(kqu.kq->kq_state & (KQ_WORKLOOP | KQ_WORKQ)); < LCK_SPIN_ASSERT(&kqu.kq->kq_reqlock, LCK_ASSERT_OWNED); --- > lck_spin_unlock(&kq->kq_lock); 490d616 < #pragma mark knote locks 493,500c619 < * Enum used by the knote_lock_* functions. < * < * KNOTE_KQ_LOCK_ALWAYS < * The function will always return with the kq lock held. < * < * KNOTE_KQ_UNLOCK_ON_SUCCESS < * The function will return with the kq lock held if it was successful < * (knote_lock() is the only function that can fail). --- > * Convert a kq lock to a knote use referece. 502,504c621,623 < * KNOTE_KQ_UNLOCK_ON_FAILURE < * The function will return with the kq lock held if it was unsuccessful < * (knote_lock() is the only function that can fail). --- > * If the knote is being dropped, or has > * vanished, we can't get a use reference. > * Just return with it still locked. 506,507c625,626 < * KNOTE_KQ_UNLOCK: < * The function returns with the kq unlocked. --- > * - kq locked at entry > * - unlock on exit if we get the use reference 509,538c628,629 < #define KNOTE_KQ_LOCK_ALWAYS 0x0 < #define KNOTE_KQ_LOCK_ON_SUCCESS 0x1 < #define KNOTE_KQ_LOCK_ON_FAILURE 0x2 < #define KNOTE_KQ_UNLOCK 0x3 < < #if DEBUG || DEVELOPMENT < __attribute__((noinline, not_tail_called, disable_tail_calls)) < void knote_lock_ctx_chk(struct knote_lock_ctx *knlc) < { < /* evil hackery to make sure no one forgets to unlock */ < assert(knlc->knlc_state == KNOTE_LOCK_CTX_UNLOCKED); < } < #endif < < static struct knote_lock_ctx * < knote_lock_ctx_find(struct kqueue *kq, struct knote *kn) < { < struct knote_lock_ctx *ctx; < LIST_FOREACH(ctx, &kq->kq_knlocks, knlc_le) { < if (ctx->knlc_knote == kn) return ctx; < } < panic("knote lock context not found: %p", kn); < __builtin_trap(); < } < < /* slowpath of knote_lock() */ < __attribute__((noinline)) < static bool __result_use_check < knote_lock_slow(struct kqueue *kq, struct knote *kn, < struct knote_lock_ctx *knlc, int kqlocking) --- > static int > kqlock2knoteuse(struct kqueue *kq, struct knote *kn, int flags) 540,543c631,632 < kqlock_held(kq); < < struct knote_lock_ctx *owner_lc = knote_lock_ctx_find(kq, kn); < thread_t owner_thread = owner_lc->knlc_thread; --- > if (kn->kn_status & (KN_DROPPING | KN_VANISHED)) > return (0); 545,547c634,641 < #if DEBUG || DEVELOPMENT < knlc->knlc_state = KNOTE_LOCK_CTX_WAITING; < #endif --- > assert(kn->kn_status & KN_ATTACHED); > kn->kn_inuse++; > if (flags & KNUSE_BOOST) { > set_thread_rwlock_boost(); > } > kqunlock(kq); > return (1); > } 549,551c643,654 < thread_reference(owner_thread); < TAILQ_INSERT_TAIL(&owner_lc->knlc_head, knlc, knlc_tqe); < assert_wait(&kn->kn_status, THREAD_UNINT | THREAD_WAIT_NOREPORT); --- > /* > * - kq locked at entry > * - kq unlocked at exit > */ > __disable_tail_calls > static wait_result_t > knoteusewait(struct kqueue *kq, struct knote *kn) > { > kn->kn_status |= KN_USEWAIT; > waitq_assert_wait64((struct waitq *)&kq->kq_wqs, > CAST_EVENT64_T(&kn->kn_status), > THREAD_UNINT, TIMEOUT_WAIT_FOREVER); 552a656,657 > return thread_block(THREAD_CONTINUE_NULL); > } 554,570c659,663 < if (thread_handoff_deallocate(owner_thread) == THREAD_RESTART) { < if (kqlocking == KNOTE_KQ_LOCK_ALWAYS || < kqlocking == KNOTE_KQ_LOCK_ON_FAILURE) { < kqlock(kq); < } < #if DEBUG || DEVELOPMENT < assert(knlc->knlc_state == KNOTE_LOCK_CTX_WAITING); < knlc->knlc_state = KNOTE_LOCK_CTX_UNLOCKED; < #endif < return false; < } < #if DEBUG || DEVELOPMENT < assert(knlc->knlc_state == KNOTE_LOCK_CTX_LOCKED); < #endif < if (kqlocking == KNOTE_KQ_LOCK_ALWAYS || < kqlocking == KNOTE_KQ_LOCK_ON_SUCCESS) { < kqlock(kq); --- > static bool > knoteuse_needs_boost(struct knote *kn, struct kevent_internal_s *kev) > { > if (knote_fops(kn)->f_needs_boost) { > return knote_fops(kn)->f_needs_boost(kev); 572c665 < return true; --- > return false; 576c669 < * Attempts to take the "knote" lock. --- > * Convert from a knote use reference back to kq lock. 578c671,672 < * Called with the kqueue lock held. --- > * Drop a use reference and wake any waiters if > * this is the last one. 580c674,684 < * Returns true if the knote lock is acquired, false if it has been dropped --- > * If someone is trying to drop the knote, but the > * caller has events they must deliver, take > * responsibility for the drop later - and wake the > * other attempted dropper in a manner that informs > * him of the transfer of responsibility. > * > * The exit return indicates if the knote is still alive > * (or if not, the other dropper has been given the green > * light to drop it). > * > * The kqueue lock is re-taken unconditionally. 582,584c686,687 < static bool __result_use_check < knote_lock(struct kqueue *kq, struct knote *kn, struct knote_lock_ctx *knlc, < int kqlocking) --- > static int > knoteuse2kqlock(struct kqueue *kq, struct knote *kn, int flags) 586c689,690 < kqlock_held(kq); --- > int dropped = 0; > int steal_drop = (flags & KNUSE_STEAL_DROP); 588,596c692,694 < #if DEBUG || DEVELOPMENT < assert(knlc->knlc_state == KNOTE_LOCK_CTX_UNLOCKED); < #endif < knlc->knlc_knote = kn; < knlc->knlc_thread = current_thread(); < TAILQ_INIT(&knlc->knlc_head); < < if (__improbable(kn->kn_status & KN_LOCKED)) { < return knote_lock_slow(kq, kn, knlc, kqlocking); --- > kqlock(kq); > if (flags & KNUSE_BOOST) { > clear_thread_rwlock_boost(); 599,609c697 < /* < * When the knote will be dropped, the knote lock is taken before < * KN_DROPPING is set, and then the knote will be removed from any < * hash table that references it before the lock is canceled. < */ < assert((kn->kn_status & KN_DROPPING) == 0); < LIST_INSERT_HEAD(&kq->kq_knlocks, knlc, knlc_le); < kn->kn_status |= KN_LOCKED; < #if DEBUG || DEVELOPMENT < knlc->knlc_state = KNOTE_LOCK_CTX_LOCKED; < #endif --- > if (--kn->kn_inuse == 0) { 611,616c699,701 < if (kqlocking == KNOTE_KQ_UNLOCK || < kqlocking == KNOTE_KQ_LOCK_ON_FAILURE) { < kqunlock(kq); < } < return true; < } --- > if ((kn->kn_status & KN_ATTACHING) != 0) { > kn->kn_status &= ~KN_ATTACHING; > } 618,629c703,704 < /* < * Unlocks a knote successfully locked with knote_lock(). < * < * Called with the kqueue lock held. < * < * Returns with the kqueue lock held according to KNOTE_KQ_* flags < */ < static void < knote_unlock(struct kqueue *kq, struct knote *kn, < struct knote_lock_ctx *knlc, int flags) < { < kqlock_held(kq); --- > if ((kn->kn_status & KN_USEWAIT) != 0) { > wait_result_t result; 631,635c706,713 < assert(knlc->knlc_knote == kn); < assert(kn->kn_status & KN_LOCKED); < #if DEBUG || DEVELOPMENT < assert(knlc->knlc_state == KNOTE_LOCK_CTX_LOCKED); < #endif --- > /* If we need to, try and steal the drop */ > if (kn->kn_status & KN_DROPPING) { > if (steal_drop && !(kn->kn_status & KN_STOLENDROP)) { > kn->kn_status |= KN_STOLENDROP; > } else { > dropped = 1; > } > } 637c715,717 < struct knote_lock_ctx *next_owner_lc = TAILQ_FIRST(&knlc->knlc_head); --- > /* wakeup indicating if ANY USE stole the drop */ > result = (kn->kn_status & KN_STOLENDROP) ? > THREAD_RESTART : THREAD_AWAKENED; 639c719,727 < LIST_REMOVE(knlc, knlc_le); --- > kn->kn_status &= ~KN_USEWAIT; > waitq_wakeup64_all((struct waitq *)&kq->kq_wqs, > CAST_EVENT64_T(&kn->kn_status), > result, > WAITQ_ALL_PRIORITIES); > } else { > /* should have seen use-wait if dropping with use refs */ > assert((kn->kn_status & (KN_DROPPING|KN_STOLENDROP)) == 0); > } 641,643c729,732 < if (next_owner_lc) { < assert(next_owner_lc->knlc_knote == kn); < TAILQ_REMOVE(&knlc->knlc_head, next_owner_lc, knlc_tqe); --- > } else if (kn->kn_status & KN_DROPPING) { > /* not the last ref but want to steal a drop if present */ > if (steal_drop && ((kn->kn_status & KN_STOLENDROP) == 0)) { > kn->kn_status |= KN_STOLENDROP; 645,666c734,739 < assert(TAILQ_EMPTY(&next_owner_lc->knlc_head)); < TAILQ_CONCAT(&next_owner_lc->knlc_head, &knlc->knlc_head, knlc_tqe); < LIST_INSERT_HEAD(&kq->kq_knlocks, next_owner_lc, knlc_le); < #if DEBUG || DEVELOPMENT < next_owner_lc->knlc_state = KNOTE_LOCK_CTX_LOCKED; < #endif < } else { < kn->kn_status &= ~KN_LOCKED; < } < if (kn->kn_inuse == 0) { < /* < * No f_event() in flight anymore, we can leave QoS "Merge" mode < * < * See knote_should_apply_qos_override() < */ < kn->kn_status &= ~KN_MERGE_QOS; < } < if (flags & KNOTE_KQ_UNLOCK) { < kqunlock(kq); < } < if (next_owner_lc) { < thread_wakeup_thread(&kn->kn_status, next_owner_lc->knlc_thread); --- > /* but we now have to wait to be the last ref */ > knoteusewait(kq, kn); > kqlock(kq); > } else { > dropped = 1; > } 668,670c741,742 < #if DEBUG || DEVELOPMENT < knlc->knlc_state = KNOTE_LOCK_CTX_UNLOCKED; < #endif --- > > return (!dropped); 674c746,751 < * Aborts all waiters for a knote lock, and unlock the knote. --- > * Convert a kq lock to a knote use reference > * (for the purpose of detaching AND vanishing it). > * > * If the knote is being dropped, we can't get > * a detach reference, so wait for the knote to > * finish dropping before returning. 676c753,756 < * Called with the kqueue lock held. --- > * If the knote is being used for other purposes, > * we cannot detach it until those uses are done > * as well. Again, just wait for them to finish > * (caller will start over at lookup). 678c758,759 < * Returns with the kqueue lock held according to KNOTE_KQ_* flags --- > * - kq locked at entry > * - unlocked on exit 680,682c761,762 < static void < knote_unlock_cancel(struct kqueue *kq, struct knote *kn, < struct knote_lock_ctx *knlc, int kqlocking) --- > static int > kqlock2knotedetach(struct kqueue *kq, struct knote *kn, int flags) 684,695c764,767 < kqlock_held(kq); < < assert(knlc->knlc_knote == kn); < assert(kn->kn_status & KN_LOCKED); < assert(kn->kn_status & KN_DROPPING); < < LIST_REMOVE(knlc, knlc_le); < kn->kn_status &= ~KN_LOCKED; < < if (kqlocking == KNOTE_KQ_UNLOCK || < kqlocking == KNOTE_KQ_LOCK_ON_FAILURE) { < kqunlock(kq); --- > if ((kn->kn_status & KN_DROPPING) || kn->kn_inuse) { > /* have to wait for dropper or current uses to go away */ > knoteusewait(kq, kn); > return (0); 697,698c769,774 < if (!TAILQ_EMPTY(&knlc->knlc_head)) { < thread_wakeup_with_result(&kn->kn_status, THREAD_RESTART); --- > assert((kn->kn_status & KN_VANISHED) == 0); > assert(kn->kn_status & KN_ATTACHED); > kn->kn_status &= ~KN_ATTACHED; > kn->kn_status |= KN_VANISHED; > if (flags & KNUSE_BOOST) { > clear_thread_rwlock_boost(); 700,702c776,778 < #if DEBUG || DEVELOPMENT < knlc->knlc_state = KNOTE_LOCK_CTX_UNLOCKED; < #endif --- > kn->kn_inuse++; > kqunlock(kq); > return (1); 706c782,788 < * Call the f_event hook of a given filter. --- > * Convert a kq lock to a knote drop reference. > * > * If the knote is in use, wait for the use count > * to subside. We first mark our intention to drop > * it - keeping other users from "piling on." > * If we are too late, we have to wait for the > * other drop to complete. 708c790,793 < * Takes a use count to protect against concurrent drops. --- > * - kq locked at entry > * - always unlocked on exit. > * - caller can't hold any locks that would prevent > * the other dropper from completing. 710,711c795,796 < static void < knote_call_filter_event(struct kqueue *kq, struct knote *kn, long hint) --- > static int > kqlock2knotedrop(struct kqueue *kq, struct knote *kn) 713,731c798,799 < int result, dropping = 0; < < kqlock_held(kq); < < if (kn->kn_status & (KN_DROPPING | KN_VANISHED)) < return; < < kn->kn_inuse++; < kqunlock(kq); < result = filter_call(knote_fops(kn), f_event(kn, hint)); < kqlock(kq); < < dropping = (kn->kn_status & KN_DROPPING); < < if (!dropping && (result & FILTER_ACTIVE)) { < if (result & FILTER_ADJUST_EVENT_QOS_BIT) < knote_adjust_qos(kq, kn, result); < knote_activate(kn); < } --- > int oktodrop; > wait_result_t result; 733,746c801,809 < if (--kn->kn_inuse == 0) { < if ((kn->kn_status & KN_LOCKED) == 0) { < /* < * We're the last f_event() call and there's no other f_* call in < * flight, we can leave QoS "Merge" mode. < * < * See knote_should_apply_qos_override() < */ < kn->kn_status &= ~KN_MERGE_QOS; < } < if (dropping) { < waitq_wakeup64_all((struct waitq *)&kq->kq_wqs, < CAST_EVENT64_T(&kn->kn_inuse), < THREAD_AWAKENED, WAITQ_ALL_PRIORITIES); --- > oktodrop = ((kn->kn_status & (KN_DROPPING | KN_ATTACHING)) == 0); > /* if another thread is attaching, they will become the dropping thread */ > kn->kn_status |= KN_DROPPING; > knote_unsuppress(kn); > knote_dequeue(kn); > if (oktodrop) { > if (kn->kn_inuse == 0) { > kqunlock(kq); > return (oktodrop); 748a812,814 > result = knoteusewait(kq, kn); > /* THREAD_RESTART == another thread stole the knote drop */ > return (result == THREAD_AWAKENED); 750a817 > #if 0 752,755c819 < * Called by knote_drop() to wait for the last f_event() caller to be done. < * < * - kq locked at entry < * - kq unlocked at exit --- > * Release a knote use count reference. 758c822 < knote_wait_for_filter_events(struct kqueue *kq, struct knote *kn) --- > knote_put(struct knote *kn) 760,764c824 < wait_result_t wr = THREAD_NOT_WAITING; < < kqlock_held(kq); < < assert(kn->kn_status & KN_DROPPING); --- > struct kqueue *kq = knote_get_kq(kn); 766,769c826,834 < if (kn->kn_inuse) { < wr = waitq_assert_wait64((struct waitq *)&kq->kq_wqs, < CAST_EVENT64_T(&kn->kn_inuse), < THREAD_UNINT | THREAD_WAIT_NOREPORT, TIMEOUT_WAIT_FOREVER); --- > kqlock(kq); > if (--kn->kn_inuse == 0) { > if ((kn->kn_status & KN_USEWAIT) != 0) { > kn->kn_status &= ~KN_USEWAIT; > waitq_wakeup64_all((struct waitq *)&kq->kq_wqs, > CAST_EVENT64_T(&kn->kn_status), > THREAD_AWAKENED, > WAITQ_ALL_PRIORITIES); > } 772,774d836 < if (wr == THREAD_WAITING) { < thread_block(THREAD_CONTINUE_NULL); < } 776,777c838 < < #pragma mark file_filtops --- > #endif 782c843 < return fo_kqfilter(kn->kn_fp, kn, kev, vfs_context_current()); --- > return (fo_kqfilter(kn->kn_fp, kn, kev, vfs_context_current())); 785,791d845 < SECURITY_READ_ONLY_EARLY(static struct filterops) file_filtops = { < .f_isfd = 1, < .f_attach = filt_fileattach, < }; < < #pragma mark kqread_filtops < 792a847,848 > #define f_msgcount f_fglob->fg_msgcount > #define f_cred f_fglob->fg_cred 793a850 > #define f_offset f_fglob->fg_offset 806a864 > /*ARGSUSED*/ 810a869 > int count; 812c871,872 < return (kq->kq_count > 0); --- > count = kq->kq_count; > return (count > 0); 823a884,885 > if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0) > kn->kn_udata = kev->udata; 851,859c913 < SECURITY_READ_ONLY_EARLY(static struct filterops) kqread_filtops = { < .f_isfd = 1, < .f_detach = filt_kqdetach, < .f_event = filt_kqueue, < .f_touch = filt_kqtouch, < .f_process = filt_kqprocess, < }; < < #pragma mark proc_filtops --- > #pragma mark EVFILT_PROC 869c923,924 < knote_set_error(kn, ENOTSUP); --- > kn->kn_flags = EV_ERROR; > kn->kn_data = ENOTSUP; 875c930,931 < knote_set_error(kn, ESRCH); --- > kn->kn_flags = EV_ERROR; > kn->kn_data = ESRCH; 893c949,950 < knote_set_error(kn, EACCES); --- > kn->kn_flags = EV_ERROR; > kn->kn_data = EACCES; 968c1025 < } --- > } 986c1043 < * as is collected here, in kn_data. Any changes to how --- > * as is collected here, in kn_data. Any changes to how 1000c1057 < kn->kn_data |= NOTE_EXIT_DECRYPTFAIL; --- > kn->kn_data |= NOTE_EXIT_DECRYPTFAIL; 1048a1106,1107 > if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0) > kn->kn_udata = kev->udata; 1082,1088d1140 < SECURITY_READ_ONLY_EARLY(static struct filterops) proc_filtops = { < .f_attach = filt_procattach, < .f_detach = filt_procdetach, < .f_event = filt_proc, < .f_touch = filt_proctouch, < .f_process = filt_procprocess, < }; 1090c1142 < #pragma mark timer_filtops --- > #pragma mark EVFILT_TIMER 1092,1097d1143 < struct filt_timer_params { < uint64_t deadline; /* deadline in abs/cont time < (or 0 if NOTE_ABSOLUTE and deadline is in past) */ < uint64_t leeway; /* leeway in abstime, or 0 if none */ < uint64_t interval; /* interval in abstime or 0 if non-repeating timer */ < }; 1107,1122c1153 < * kn->kn_hookid timer state < * < * TIMER_IDLE: < * The timer has either never been scheduled or been cancelled. < * It is safe to schedule a new one in this state. < * < * TIMER_ARMED: < * The timer has been scheduled < * < * TIMER_FIRED < * The timer has fired and an event needs to be delivered. < * When in this state, the callout may still be running. < * < * TIMER_IMMEDIATE < * The timer has fired at registration time, and the callout was never < * dispatched. --- > * kn->kn_data fire count 1124,1127d1154 < #define TIMER_IDLE 0x0 < #define TIMER_ARMED 0x1 < #define TIMER_FIRED 0x2 < #define TIMER_IMMEDIATE 0x3 1129,1130c1156,1161 < static void < filt_timer_set_params(struct knote *kn, struct filt_timer_params *params) --- > static lck_mtx_t _filt_timerlock; > > static void filt_timerlock(void) { lck_mtx_lock(&_filt_timerlock); } > static void filt_timerunlock(void) { lck_mtx_unlock(&_filt_timerlock); } > > static inline void filt_timer_assert_locked(void) 1132,1134c1163 < kn->kn_ext[0] = params->deadline; < kn->kn_ext[1] = params->leeway; < kn->kn_sdata = params->interval; --- > LCK_MTX_ASSERT(&_filt_timerlock, LCK_MTX_ASSERT_OWNED); 1136a1166,1169 > /* state flags stored in kn_hookid */ > #define TIMER_RUNNING 0x1 > #define TIMER_CANCELWAIT 0x2 > 1147,1148c1180,1182 < * struct filter_timer_params to apply to the filter with < * filt_timer_set_params when changes are ready to be commited. --- > * kn_sdata either interval in abstime or 0 if non-repeating timer > * ext[0] fire deadline in abs/cont time > * (or 0 if NOTE_ABSOLUTE and deadline is in past) 1152d1185 < * ERANGE Various overflows with the parameters 1157,1158c1190 < filt_timervalidate(const struct kevent_internal_s *kev, < struct filt_timer_params *params) --- > filt_timervalidate(struct knote *kn) 1161c1193 < * There are 5 knobs that need to be chosen for a timer registration: --- > * There are 4 knobs that need to be chosen for a timer registration: 1190a1223,1224 > filt_timer_assert_locked(); > 1195c1229 < switch (kev->fflags & (NOTE_SECONDS|NOTE_USECONDS|NOTE_NSECONDS|NOTE_MACHTIME)) { --- > switch (kn->kn_sfflags & (NOTE_SECONDS|NOTE_USECONDS|NOTE_NSECONDS|NOTE_MACHTIME)) { 1217c1251 < if (kev->fflags & NOTE_LEEWAY) { --- > if (kn->kn_sfflags & NOTE_LEEWAY) { 1221c1255 < leeway_abs = (uint64_t)kev->ext[1]; --- > leeway_abs = (uint64_t)kn->kn_ext[1]; 1224c1258 < if (os_mul_overflow((uint64_t)kev->ext[1], multiplier, &leeway_ns)) --- > if (os_mul_overflow((uint64_t)kn->kn_ext[1], multiplier, &leeway_ns)) 1230,1232c1264 < params->leeway = leeway_abs; < } else { < params->leeway = 0; --- > kn->kn_ext[1] = leeway_abs; 1235c1267 < if (kev->fflags & NOTE_ABSOLUTE) { --- > if (kn->kn_sfflags & NOTE_ABSOLUTE) { 1239c1271 < deadline_abs = (uint64_t)kev->data; --- > deadline_abs = (uint64_t)kn->kn_sdata; 1243c1275 < if (os_mul_overflow((uint64_t)kev->data, multiplier, &calendar_deadline_ns)) --- > if (os_mul_overflow((uint64_t)kn->kn_sdata, multiplier, &calendar_deadline_ns)) 1277c1309 < if (kev->fflags & NOTE_MACH_CONTINUOUS_TIME) --- > if (kn->kn_sfflags & NOTE_MACH_CONTINUOUS_TIME) 1288,1290c1320,1322 < params->deadline = deadline_abs; < params->interval = 0; /* NOTE_ABSOLUTE is non-repeating */ < } else if (kev->data < 0) { --- > kn->kn_ext[0] = deadline_abs; > kn->kn_sdata = 0; /* NOTE_ABSOLUTE is non-repeating */ > } else if (kn->kn_sdata < 0) { 1304,1305c1336,1337 < params->deadline = 0; /* expire immediately */ < params->interval = 0; /* non-repeating */ --- > kn->kn_sdata = 0; /* non-repeating */ > kn->kn_ext[0] = 0; /* expire immediately */ 1310c1342 < interval_abs = (uint64_t)kev->data; --- > interval_abs = (uint64_t)kn->kn_sdata; 1313c1345 < if (os_mul_overflow((uint64_t)kev->data, multiplier, &interval_ns)) --- > if (os_mul_overflow((uint64_t)kn->kn_sdata, multiplier, &interval_ns)) 1321c1353 < if (kev->fflags & NOTE_MACH_CONTINUOUS_TIME) --- > if (kn->kn_sfflags & NOTE_MACH_CONTINUOUS_TIME) 1326,1327c1358,1359 < params->deadline = deadline; < params->interval = interval_abs; --- > kn->kn_sdata = interval_abs; /* default to a repeating timer */ > kn->kn_ext[0] = deadline; 1332a1365,1367 > > > 1334a1370,1375 > * > * Just propagate the timer event into the knote > * filter routine (by going through the knote > * synchronization point). Pass a hint to > * indicate this is a real event, not just a > * query from above. 1338a1380 > struct klist timer_list; 1340d1381 < int v; 1342,1345c1383,1394 < if (os_atomic_cmpxchgv(&kn->kn_hookid, TIMER_ARMED, TIMER_FIRED, < &v, relaxed)) { < // our f_event always would say FILTER_ACTIVE, < // so be leaner and just do it. --- > filt_timerlock(); > > kn->kn_hookid &= ~TIMER_RUNNING; > > /* no "object" for timers, so fake a list */ > SLIST_INIT(&timer_list); > SLIST_INSERT_HEAD(&timer_list, kn, kn_selnext); > > KNOTE(&timer_list, 1); > > /* if someone is waiting for timer to pop */ > if (kn->kn_hookid & TIMER_CANCELWAIT) { 1347,1360c1396,1399 < kqlock(kq); < knote_activate(kn); < kqunlock(kq); < } else { < /* < * From TIMER_ARMED, the only allowed transition are: < * - to TIMER_FIRED through the timer callout just above < * - to TIMER_IDLE due to filt_timercancel() which will wait for the < * timer callout (and any possible invocation of filt_timerexpire) to < * have finished before the state is changed again. < */ < assert(v == TIMER_IDLE); < } < } --- > waitq_wakeup64_all((struct waitq *)&kq->kq_wqs, > CAST_EVENT64_T(&kn->kn_hook), > THREAD_AWAKENED, > WAITQ_ALL_PRIORITIES); 1362,1367c1401 < static void < filt_timercancel(struct knote *kn) < { < if (os_atomic_xchg(&kn->kn_hookid, TIMER_IDLE, relaxed) == TIMER_ARMED) { < /* cancel the thread call and wait for any filt_timerexpire in flight */ < thread_call_cancel_wait((thread_call_t)kn->kn_hook); --- > kn->kn_hookid &= ~TIMER_CANCELWAIT; 1368a1403,1404 > > filt_timerunlock(); 1372c1408,1410 < * Does this deadline needs a timer armed for it, or has it expired? --- > * Cancel a running timer (or wait for the pop). > * Timer filter lock is held. > * May drop and retake the timer filter lock. 1374,1375c1412,1413 < static bool < filt_timer_is_ready(struct knote *kn) --- > static void > filt_timercancel(struct knote *kn) 1377c1415 < uint64_t now, deadline = kn->kn_ext[0]; --- > filt_timer_assert_locked(); 1379,1381c1417 < if (deadline == 0) { < return true; < } --- > assert((kn->kn_hookid & TIMER_CANCELWAIT) == 0); 1383,1386c1419,1428 < if (kn->kn_sfflags & NOTE_MACH_CONTINUOUS_TIME) { < now = mach_continuous_time(); < } else { < now = mach_absolute_time(); --- > /* if no timer, then we're good */ > if ((kn->kn_hookid & TIMER_RUNNING) == 0) > return; > > thread_call_t callout = (thread_call_t)kn->kn_hook; > > /* cancel the callout if we can */ > if (thread_call_cancel(callout)) { > kn->kn_hookid &= ~TIMER_RUNNING; > return; 1388c1430,1446 < return deadline <= now; --- > > /* cancel failed, we have to wait for the in-flight expire routine */ > > kn->kn_hookid |= TIMER_CANCELWAIT; > > struct kqueue *kq = knote_get_kq(kn); > > waitq_assert_wait64((struct waitq *)&kq->kq_wqs, > CAST_EVENT64_T(&kn->kn_hook), > THREAD_UNINT, TIMEOUT_WAIT_FOREVER); > > filt_timerunlock(); > thread_block(THREAD_CONTINUE_NULL); > filt_timerlock(); > > assert((kn->kn_hookid & TIMER_CANCELWAIT) == 0); > assert((kn->kn_hookid & TIMER_RUNNING) == 0); 1391,1396d1448 < /* < * Arm a timer < * < * It is the responsibility of the caller to make sure the timer call < * has completed or been cancelled properly prior to arming it. < */ 1399a1452,1457 > filt_timer_assert_locked(); > > assert((kn->kn_hookid & TIMER_RUNNING) == 0); > > thread_call_t callout = (thread_call_t)kn->kn_hook; > 1406,1407d1463 < assert(os_atomic_load(&kn->kn_hookid, relaxed) == TIMER_IDLE); < 1421,1423c1477,1502 < os_atomic_store(&kn->kn_hookid, TIMER_ARMED, relaxed); < thread_call_enter_delayed_with_leeway((thread_call_t)kn->kn_hook, NULL, < deadline, leeway, timer_flags); --- > thread_call_enter_delayed_with_leeway(callout, NULL, > deadline, leeway, > timer_flags); > > kn->kn_hookid |= TIMER_RUNNING; > } > > /* > * Does this knote need a timer armed for it, or should it be ready immediately? > */ > static boolean_t > filt_timer_is_ready(struct knote *kn) > { > uint64_t now; > > if (kn->kn_sfflags & NOTE_MACH_CONTINUOUS_TIME) > now = mach_continuous_time(); > else > now = mach_absolute_time(); > > uint64_t deadline = kn->kn_ext[0]; > > if (deadline < now) > return TRUE; > else > return FALSE; 1430c1509 < filt_timerattach(struct knote *kn, struct kevent_internal_s *kev) --- > filt_timerattach(struct knote *kn, __unused struct kevent_internal_s *kev) 1433d1511 < struct filt_timer_params params; 1436,1440d1513 < if ((error = filt_timervalidate(kev, ¶ms)) != 0) { < knote_set_error(kn, error); < return 0; < } < 1446c1519,1520 < knote_set_error(kn, ENOMEM); --- > kn->kn_flags = EV_ERROR; > kn->kn_data = ENOMEM; 1450,1451c1524,1537 < filt_timer_set_params(kn, ¶ms); < kn->kn_hook = callout; --- > filt_timerlock(); > > if ((error = filt_timervalidate(kn)) != 0) { > kn->kn_flags = EV_ERROR; > kn->kn_data = error; > filt_timerunlock(); > > __assert_only boolean_t freed = thread_call_free(callout); > assert(freed); > return 0; > } > > kn->kn_hook = (void*)callout; > kn->kn_hookid = 0; 1453d1538 < os_atomic_store(&kn->kn_hookid, TIMER_IDLE, relaxed); 1459,1461c1544,1548 < if (filt_timer_is_ready(kn)) { < os_atomic_store(&kn->kn_hookid, TIMER_IMMEDIATE, relaxed); < return FILTER_ACTIVE; --- > boolean_t timer_ready = FALSE; > > if ((timer_ready = filt_timer_is_ready(kn))) { > /* cause immediate expiration */ > kn->kn_data = 1; 1464d1550 < return 0; 1465a1552,1555 > > filt_timerunlock(); > > return timer_ready; 1474c1564 < __assert_only boolean_t freed; --- > thread_call_t callout; 1476,1481c1566,1573 < /* < * Unconditionally cancel to make sure there can't be any filt_timerexpire() < * running anymore. < */ < thread_call_cancel_wait((thread_call_t)kn->kn_hook); < freed = thread_call_free((thread_call_t)kn->kn_hook); --- > filt_timerlock(); > > callout = (thread_call_t)kn->kn_hook; > filt_timercancel(kn); > > filt_timerunlock(); > > __assert_only boolean_t freed = thread_call_free(callout); 1485a1578,1592 > * filt_timerevent - post events to a timer knote > * > * Called in the context of filt_timerexpire with > * the filt_timerlock held > */ > static int > filt_timerevent(struct knote *kn, __unused long hint) > { > filt_timer_assert_locked(); > > kn->kn_data = 1; > return (1); > } > > /* 1493c1600,1602 < filt_timertouch(struct knote *kn, struct kevent_internal_s *kev) --- > filt_timertouch( > struct knote *kn, > struct kevent_internal_s *kev) 1495,1496d1603 < struct filt_timer_params params; < uint32_t changed_flags = (kn->kn_sfflags ^ kev->fflags); 1499,1503c1606 < if (changed_flags & NOTE_ABSOLUTE) { < kev->flags |= EV_ERROR; < kev->data = EINVAL; < return 0; < } --- > filt_timerlock(); 1505,1509c1608,1615 < if ((error = filt_timervalidate(kev, ¶ms)) != 0) { < kev->flags |= EV_ERROR; < kev->data = error; < return 0; < } --- > /* > * cancel current call - drops and retakes lock > * TODO: not safe against concurrent touches? > */ > filt_timercancel(kn); > > /* clear if the timer had previously fired, the user no longer wants to see it */ > kn->kn_data = 0; 1512,1513c1618 < filt_timercancel(kn); < filt_timer_set_params(kn, ¶ms); --- > kn->kn_sdata = kev->data; 1514a1620,1624 > kn->kn_ext[0] = kev->ext[0]; > kn->kn_ext[1] = kev->ext[1]; > > if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0) > kn->kn_udata = kev->udata; 1516,1518c1626,1640 < if (filt_timer_is_ready(kn)) { < os_atomic_store(&kn->kn_hookid, TIMER_IMMEDIATE, relaxed); < return FILTER_ACTIVE | FILTER_UPDATE_REQ_QOS; --- > /* recalculate deadline */ > error = filt_timervalidate(kn); > if (error) { > /* no way to report error, so mark it in the knote */ > kn->kn_flags |= EV_ERROR; > kn->kn_data = error; > filt_timerunlock(); > return 1; > } > > boolean_t timer_ready = FALSE; > > if ((timer_ready = filt_timer_is_ready(kn))) { > /* cause immediate expiration */ > kn->kn_data = 1; 1521d1642 < return FILTER_UPDATE_REQ_QOS; 1522a1644,1647 > > filt_timerunlock(); > > return timer_ready; 1538,1549c1663,1665 < /* < * filt_timerprocess is serialized with any filter routine except for < * filt_timerexpire which atomically does a TIMER_ARMED -> TIMER_FIRED < * transition, and on success, activates the knote. < * < * Hence, we don't need atomic modifications of the state, only to peek at < * whether we see any of the "FIRED" state, and if we do, it is safe to < * do simple state machine transitions. < */ < switch (os_atomic_load(&kn->kn_hookid, relaxed)) { < case TIMER_IDLE: < case TIMER_ARMED: --- > filt_timerlock(); > > if (kn->kn_data == 0 || (kn->kn_hookid & TIMER_CANCELWAIT)) { 1550a1667,1672 > * kn_data = 0: > * The timer hasn't yet fired, so there's nothing to deliver > * TIMER_CANCELWAIT: > * touch is in the middle of canceling the timer, > * so don't deliver or re-arm anything > * 1553a1676 > filt_timerunlock(); 1557,1574c1680 < os_atomic_store(&kn->kn_hookid, TIMER_IDLE, relaxed); < < /* < * Copy out the interesting kevent state, < * but don't leak out the raw time calculations. < * < * TODO: potential enhancements - tell the user about: < * - deadline to which this timer thought it was expiring < * - return kn_sfflags in the fflags field so the client can know < * under what flags the timer fired < */ < *kev = kn->kn_kevent; < kev->ext[0] = 0; < /* kev->ext[1] = 0; JMM - shouldn't we hide this too? */ < < if (kn->kn_sdata == 0) { < kev->data = 1; < } else { --- > if (kn->kn_sdata != 0 && ((kn->kn_flags & EV_ERROR) == 0)) { 1583a1690,1692 > /* The timer better have had expired... */ > assert((kn->kn_hookid & TIMER_RUNNING) == 0); > 1606a1716,1722 > * > * An unsuccessful touch would: > * disarm the timer > * clear kn_data > * clear kn_sdata > * set EV_ERROR > * all of which will prevent this code from running. 1611c1727 < kev->data = (int64_t)num_fired; --- > kn->kn_data = (int64_t) num_fired; 1622,1625d1737 < /* < * This can't shortcut setting up the thread call, because < * knote_process deactivates EV_CLEAR knotes unconditionnally. < */ 1630c1742,1759 < return FILTER_ACTIVE; --- > /* > * Copy out the interesting kevent state, > * but don't leak out the raw time calculations. > * > * TODO: potential enhancements - tell the user about: > * - deadline to which this timer thought it was expiring > * - return kn_sfflags in the fflags field so the client can know > * under what flags the timer fired > */ > *kev = kn->kn_kevent; > kev->ext[0] = 0; > /* kev->ext[1] = 0; JMM - shouldn't we hide this too? */ > > /* we have delivered the event, reset the timer pop count */ > kn->kn_data = 0; > > filt_timerunlock(); > return 1; 1634d1762 < .f_extended_codes = true, 1637c1765 < .f_event = filt_badevent, --- > .f_event = filt_timerevent, 1642c1770,1784 < #pragma mark user_filtops --- > > #pragma mark EVFILT_USER > > > static void > filt_userlock(void) > { > lck_spin_lock(&_filt_userlock); > } > > static void > filt_userunlock(void) > { > lck_spin_unlock(&_filt_userlock); > } 1646a1789,1791 > /* EVFILT_USER knotes are not attached to anything in the kernel */ > /* Cant discover this knote until after attach - so no lock needed */ > kn->kn_hook = NULL; 1648c1793 < kn->kn_hookid = FILTER_ACTIVE; --- > kn->kn_hookid = 1; 1662c1807,1818 < filt_usertouch(struct knote *kn, struct kevent_internal_s *kev) --- > filt_user( > __unused struct knote *kn, > __unused long hint) > { > panic("filt_user"); > return 0; > } > > static int > filt_usertouch( > struct knote *kn, > struct kevent_internal_s *kev) 1665a1822,1824 > int active; > > filt_userlock(); 1683a1843,1845 > if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0) > kn->kn_udata = kev->udata; > 1685c1847 < kn->kn_hookid = FILTER_ACTIVE; --- > kn->kn_hookid = 1; 1687c1849,1853 < return (int)kn->kn_hookid; --- > active = kn->kn_hookid; > > filt_userunlock(); > > return (active); 1696c1862 < int result = (int)kn->kn_hookid; --- > filt_userlock(); 1698,1706c1864,1875 < if (result) { < *kev = kn->kn_kevent; < kev->fflags = kn->kn_sfflags; < kev->data = kn->kn_sdata; < if (kn->kn_flags & EV_CLEAR) { < kn->kn_hookid = 0; < kn->kn_data = 0; < kn->kn_fflags = 0; < } --- > if (kn->kn_hookid == 0) { > filt_userunlock(); > return 0; > } > > *kev = kn->kn_kevent; > kev->fflags = (volatile UInt32)kn->kn_sfflags; > kev->data = kn->kn_sdata; > if (kn->kn_flags & EV_CLEAR) { > kn->kn_hookid = 0; > kn->kn_data = 0; > kn->kn_fflags = 0; 1707a1877 > filt_userunlock(); 1709c1879 < return result; --- > return 1; 1712,1719c1882 < SECURITY_READ_ONLY_EARLY(static struct filterops) user_filtops = { < .f_extended_codes = true, < .f_attach = filt_userattach, < .f_detach = filt_userdetach, < .f_event = filt_badevent, < .f_touch = filt_usertouch, < .f_process = filt_userprocess, < }; --- > #pragma mark EVFILT_WORKLOOP 1721c1884,1889 < #pragma mark workloop_filtops --- > #if DEBUG || DEVELOPMENT > /* > * see src/queue_internal.h in libdispatch > */ > #define DISPATCH_QUEUE_ENQUEUED 0x1ull > #endif 1735,1745c1903,1910 < /* < * Returns true when the interlock for the turnstile is the workqueue lock < * < * When this is the case, all turnstiles operations are delegated < * to the workqueue subsystem. < * < * This is required because kqueue_threadreq_bind_prepost only holds the < * workqueue lock but needs to move the inheritor from the workloop turnstile < * away from the creator thread, so that this now fulfilled request cannot be < * picked anymore by other threads. < */ --- > static inline void > filt_wlheld(__assert_only struct kqworkloop *kqwl) > { > LCK_MTX_ASSERT(&kqwl->kqwl_statelock, LCK_MTX_ASSERT_OWNED); > } > > #define WL_OWNER_SUSPENDED ((thread_t)(~0ull)) /* special owner when suspended */ > 1747c1912 < filt_wlturnstile_interlock_is_workq(struct kqworkloop *kqwl) --- > filt_wlowner_is_valid(thread_t owner) 1749,1751c1914 < struct kqrequest *kqr = &kqwl->kqwl_request; < return (kqr->kqr_state & KQR_THREQUESTED) && < (kqr->kqr_thread == THREAD_NULL); --- > return owner != THREAD_NULL && owner != WL_OWNER_SUSPENDED; 1754,1756c1917,1919 < static void < filt_wlupdate_inheritor(struct kqworkloop *kqwl, struct turnstile *ts, < turnstile_update_flags_t flags) --- > static inline bool > filt_wlshould_end_ownership(struct kqworkloop *kqwl, > struct kevent_internal_s *kev, int error) 1758,1759c1921,1925 < turnstile_inheritor_t inheritor = TURNSTILE_INHERITOR_NULL; < struct kqrequest *kqr = &kqwl->kqwl_request; --- > thread_t owner = kqwl->kqwl_owner; > return (error == 0 || error == ESTALE) && > (kev->fflags & NOTE_WL_END_OWNERSHIP) && > (owner == current_thread() || owner == WL_OWNER_SUSPENDED); > } 1761,1765c1927,1932 < /* < * binding to the workq should always happen through < * workq_kern_threadreq_update_inheritor() < */ < assert(!filt_wlturnstile_interlock_is_workq(kqwl)); --- > static inline bool > filt_wlshould_update_ownership(struct kevent_internal_s *kev, int error) > { > return error == 0 && (kev->fflags & NOTE_WL_DISCOVER_OWNER) && > kev->ext[EV_EXTIDX_WL_ADDR]; > } 1767,1770c1934,1939 < if ((inheritor = kqwl->kqwl_owner)) { < flags |= TURNSTILE_INHERITOR_THREAD; < } else if ((inheritor = kqr->kqr_thread)) { < flags |= TURNSTILE_INHERITOR_THREAD; --- > static inline bool > filt_wlshould_set_async_qos(struct kevent_internal_s *kev, int error, > kq_index_t async_qos) > { > if (error != 0) { > return false; 1772,1773c1941,1948 < < turnstile_update_inheritor(ts, inheritor, flags); --- > if (async_qos != THREAD_QOS_UNSPECIFIED) { > return true; > } > if ((kev->fflags & NOTE_WL_THREAD_REQUEST) && (kev->flags & EV_DELETE)) { > /* see filt_wlprocess() */ > return true; > } > return false; 1776,1779d1950 < #define FILT_WLATTACH 0 < #define FILT_WLTOUCH 1 < #define FILT_WLDROP 2 < 1782,1783c1953,1954 < filt_wlupdate(struct kqworkloop *kqwl, struct knote *kn, < struct kevent_internal_s *kev, kq_index_t qos_index, int op) --- > filt_wlupdateowner(struct kqworkloop *kqwl, struct kevent_internal_s *kev, > int error, kq_index_t async_qos) 1785d1955 < user_addr_t uaddr = CAST_USER_ADDR_T(kev->ext[EV_EXTIDX_WL_ADDR]); 1788,1793c1958,1962 < kq_index_t cur_owner_override = THREAD_QOS_UNSPECIFIED; < int action = KQWL_UTQ_NONE, error = 0; < bool needs_wake = false, needs_wllock = false; < uint64_t kdata = kev->ext[EV_EXTIDX_WL_VALUE]; < uint64_t mask = kev->ext[EV_EXTIDX_WL_MASK]; < uint64_t udata = 0; --- > kq_index_t cur_override = THREAD_QOS_UNSPECIFIED; > kq_index_t old_owner_override = THREAD_QOS_UNSPECIFIED; > boolean_t ipc_override_is_sync = false; > boolean_t old_owner_override_is_sync = false; > int action = KQWL_UTQ_NONE; 1795c1964,1973 < if (kev->fflags & (NOTE_WL_END_OWNERSHIP | NOTE_WL_DISCOVER_OWNER)) { --- > filt_wlheld(kqwl); > > /* > * The owner is only changed under both the filt_wllock and the > * kqwl_req_lock. Looking at it with either one held is fine. > */ > cur_owner = kqwl->kqwl_owner; > if (filt_wlshould_end_ownership(kqwl, kev, error)) { > new_owner = THREAD_NULL; > } else if (filt_wlshould_update_ownership(kev, error)) { 1797,1798c1975,1976 < * If we're maybe going to change the kqwl_owner, < * then we need to hold the filt_wllock(). --- > * Decipher the owner port name, and translate accordingly. > * The low 2 bits were borrowed for other flags, so mask them off. 1800,1846c1978,1991 < needs_wllock = true; < } else if (kqr->kqr_thread == current_thread()) { < /* < * Servicer updates need to be serialized with < * any ownership change too, as the kqr_thread value influences the < * outcome of handling NOTE_WL_DISCOVER_OWNER. < */ < needs_wllock = true; < } < < if (needs_wllock) { < filt_wllock(kqwl); < /* < * The kqwl owner is set under both the req and filter lock, < * meaning it's fine to look at it under any. < */ < new_owner = cur_owner = kqwl->kqwl_owner; < } else { < new_owner = cur_owner = THREAD_NULL; < } < < /* < * Phase 1: < * < * If asked, load the uint64 value at the user provided address and compare < * it against the passed in mask and expected value. < * < * If NOTE_WL_DISCOVER_OWNER is specified, translate the loaded name as < * a thread reference. < * < * If NOTE_WL_END_OWNERSHIP is specified and the currently known owner is < * the current thread, then end ownership. < * < * Lastly decide whether we need to perform a QoS update. < */ < if (uaddr) { < error = copyin_word(uaddr, &udata, sizeof(udata)); < if (error) { < goto out; < } < < /* Update state as copied in. */ < kev->ext[EV_EXTIDX_WL_VALUE] = udata; < < if ((udata & mask) != (kdata & mask)) { < error = ESTALE; < } else if (kev->fflags & NOTE_WL_DISCOVER_OWNER) { --- > uint64_t udata = kev->ext[EV_EXTIDX_WL_VALUE]; > mach_port_name_t new_owner_name = (mach_port_name_t)udata & ~0x3; > if (new_owner_name != MACH_PORT_NULL) { > new_owner_name = ipc_entry_name_mask(new_owner_name); > } > > if (MACH_PORT_VALID(new_owner_name)) { > new_owner = port_name_to_thread(new_owner_name); > if (new_owner == THREAD_NULL) > return EOWNERDEAD; > extra_thread_ref = new_owner; > } else if (new_owner_name == MACH_PORT_DEAD) { > new_owner = WL_OWNER_SUSPENDED; > } else { 1848,1851c1993,1994 < * Decipher the owner port name, and translate accordingly. < * The low 2 bits were borrowed for other flags, so mask them off. < * < * Then attempt translation to a thread reference or fail. --- > * We never want to learn a new owner that is NULL. > * Ownership should be ended with END_OWNERSHIP. 1853,1862c1996 < mach_port_name_t name = (mach_port_name_t)udata & ~0x3; < if (name != MACH_PORT_NULL) { < name = ipc_entry_name_mask(name); < extra_thread_ref = port_name_to_thread(name); < if (extra_thread_ref == THREAD_NULL) { < error = EOWNERDEAD; < goto out; < } < new_owner = extra_thread_ref; < } --- > new_owner = cur_owner; 1863a1998,1999 > } else { > new_owner = cur_owner; 1866,1899c2002,2003 < if ((kev->fflags & NOTE_WL_END_OWNERSHIP) && new_owner == current_thread()) { < new_owner = THREAD_NULL; < } < < if (error == 0) { < if ((kev->fflags & NOTE_WL_THREAD_REQUEST) && (kev->flags & EV_DELETE)) { < action = KQWL_UTQ_SET_QOS_INDEX; < } else if (qos_index && kqr->kqr_qos_index != qos_index) { < action = KQWL_UTQ_SET_QOS_INDEX; < } < < if (op == FILT_WLTOUCH) { < /* < * Save off any additional fflags/data we just accepted < * But only keep the last round of "update" bits we acted on which helps < * debugging a lot. < */ < kn->kn_sfflags &= ~NOTE_WL_UPDATES_MASK; < kn->kn_sfflags |= kev->fflags; < kn->kn_sdata = kev->data; < if (kev->fflags & NOTE_WL_SYNC_WAKE) { < needs_wake = (kn->kn_hook != THREAD_NULL); < } < } else if (op == FILT_WLDROP) { < if ((kn->kn_sfflags & (NOTE_WL_SYNC_WAIT | NOTE_WL_SYNC_WAKE)) == < NOTE_WL_SYNC_WAIT) { < /* < * When deleting a SYNC_WAIT knote that hasn't been woken up < * explicitly, issue a wake up. < */ < kn->kn_sfflags |= NOTE_WL_SYNC_WAKE; < needs_wake = (kn->kn_hook != THREAD_NULL); < } < } --- > if (filt_wlshould_set_async_qos(kev, error, async_qos)) { > action = KQWL_UTQ_SET_ASYNC_QOS; 1901,1908c2005 < < /* < * Phase 2: < * < * Commit ownership and QoS changes if any, possibly wake up waiters < */ < < if (cur_owner == new_owner && action == KQWL_UTQ_NONE && !needs_wake) { --- > if (cur_owner == new_owner && action == KQWL_UTQ_NONE) { 1912c2009 < kq_req_lock(kqwl); --- > kqwl_req_lock(kqwl); 1915,1916c2012,2013 < if (new_owner == kqr->kqr_thread) { < new_owner = THREAD_NULL; --- > if ((kqr->kqr_state & KQR_BOUND) && new_owner == kqr->kqr_thread) { > kqwl->kqwl_owner = new_owner = THREAD_NULL; 1925,1929c2022,2024 < cur_owner_override = kqworkloop_owner_override(kqwl); < < if (cur_owner) { < thread_ends_owning_workloop(cur_owner); < } --- > cur_override = kqworkloop_combined_qos(kqwl, &ipc_override_is_sync); > old_owner_override = kqr->kqr_dsync_owner_qos; > old_owner_override_is_sync = kqr->kqr_owner_override_is_sync; 1931c2026 < if (new_owner) { --- > if (filt_wlowner_is_valid(new_owner)) { 1933,1934c2028,2029 < if (cur_owner_override != THREAD_QOS_UNSPECIFIED) { < thread_add_ipc_override(new_owner, cur_owner_override); --- > if (cur_override != THREAD_QOS_UNSPECIFIED) { > thread_add_ipc_override(new_owner, cur_override); 1935a2031,2036 > if (ipc_override_is_sync) { > thread_add_sync_ipc_override(new_owner); > } > /* Update the kqr to indicate that owner has sync ipc override */ > kqr->kqr_dsync_owner_qos = cur_override; > kqr->kqr_owner_override_is_sync = ipc_override_is_sync; 1937c2038 < if ((kqr->kqr_state & KQR_THREQUESTED) && !kqr->kqr_thread) { --- > if ((kqr->kqr_state & (KQR_THREQUESTED | KQR_BOUND)) == KQR_THREQUESTED) { 1942c2043,2045 < } else { --- > } else if (new_owner == THREAD_NULL) { > kqr->kqr_dsync_owner_qos = THREAD_QOS_UNSPECIFIED; > kqr->kqr_owner_override_is_sync = false; 1951,1953d2053 < struct turnstile *ts = kqwl->kqwl_turnstile; < bool wl_inheritor_updated = false; < 1955c2055 < kqworkloop_update_threads_qos(kqwl, action, qos_index); --- > kqworkloop_update_threads_qos(kqwl, action, async_qos); 1958,1983c2058 < if (cur_owner != new_owner && ts) { < if (action == KQWL_UTQ_REDRIVE_EVENTS) { < /* < * Note that when action is KQWL_UTQ_REDRIVE_EVENTS, < * the code went through workq_kern_threadreq_initiate() < * and the workqueue has set the inheritor already < */ < assert(filt_wlturnstile_interlock_is_workq(kqwl)); < } else if (filt_wlturnstile_interlock_is_workq(kqwl)) { < workq_kern_threadreq_lock(kqwl->kqwl_p); < workq_kern_threadreq_update_inheritor(kqwl->kqwl_p, kqr, new_owner, < ts, TURNSTILE_IMMEDIATE_UPDATE); < workq_kern_threadreq_unlock(kqwl->kqwl_p); < if (!filt_wlturnstile_interlock_is_workq(kqwl)) { < /* < * If the workq is no longer the interlock, then < * workq_kern_threadreq_update_inheritor() has finished a bind < * and we need to fallback to the regular path. < */ < filt_wlupdate_inheritor(kqwl, ts, TURNSTILE_IMMEDIATE_UPDATE); < } < wl_inheritor_updated = true; < } else { < filt_wlupdate_inheritor(kqwl, ts, TURNSTILE_IMMEDIATE_UPDATE); < wl_inheritor_updated = true; < } --- > kqwl_req_unlock(kqwl); 1985,1990c2060,2066 < /* < * We need a turnstile reference because we are dropping the interlock < * and the caller has not called turnstile_prepare. < */ < if (wl_inheritor_updated) { < turnstile_reference(ts); --- > /* Now that we are unlocked, drop the override and ref on old owner */ > if (new_owner != cur_owner && filt_wlowner_is_valid(cur_owner)) { > if (old_owner_override != THREAD_QOS_UNSPECIFIED) { > thread_drop_ipc_override(cur_owner); > } > if (old_owner_override_is_sync) { > thread_drop_sync_ipc_override(cur_owner); 1991a2068,2069 > thread_ends_owning_workloop(cur_owner); > thread_deallocate(cur_owner); 1994,1996c2072,2074 < if (needs_wake && ts) { < waitq_wakeup64_thread(&ts->ts_waitq, CAST_EVENT64_T((event_t)kn), < (thread_t)kn->kn_hook, THREAD_AWAKENED); --- > out: > if (extra_thread_ref) { > thread_deallocate(extra_thread_ref); 1997a2076,2077 > return error; > } 1999c2079,2087 < kq_req_unlock(kqwl); --- > static int > filt_wldebounce( > struct kqworkloop *kqwl, > struct kevent_internal_s *kev, > int default_result) > { > user_addr_t addr = CAST_USER_ADDR_T(kev->ext[EV_EXTIDX_WL_ADDR]); > uint64_t udata; > int error; 2001,2004c2089,2090 < if (wl_inheritor_updated) { < turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_NOT_HELD); < turnstile_deallocate(ts); < } --- > /* we must have the workloop state mutex held */ > filt_wlheld(kqwl); 2006,2014c2092,2095 < out: < /* < * Phase 3: < * < * Unlock and cleanup various lingering references and things. < */ < if (needs_wllock) { < filt_wlunlock(kqwl); < } --- > /* Do we have a debounce address to work with? */ > if (addr) { > uint64_t kdata = kev->ext[EV_EXTIDX_WL_VALUE]; > uint64_t mask = kev->ext[EV_EXTIDX_WL_MASK]; 2016,2032c2097,2100 < #if CONFIG_WORKLOOP_DEBUG < KQWL_HISTORY_WRITE_ENTRY(kqwl, { < .updater = current_thread(), < .servicer = kqr->kqr_thread, /* Note: racy */ < .old_owner = cur_owner, < .new_owner = new_owner, < < .kev_ident = kev->ident, < .error = (int16_t)error, < .kev_flags = kev->flags, < .kev_fflags = kev->fflags, < < .kev_mask = mask, < .kev_value = kdata, < .in_value = udata, < }); < #endif // CONFIG_WORKLOOP_DEBUG --- > error = copyin_word(addr, &udata, sizeof(udata)); > if (error) { > return error; > } 2034,2036c2102,2107 < if (cur_owner && new_owner != cur_owner) { < if (cur_owner_override != THREAD_QOS_UNSPECIFIED) { < thread_drop_ipc_override(cur_owner); --- > /* update state as copied in */ > kev->ext[EV_EXTIDX_WL_VALUE] = udata; > > /* If the masked bits don't match, reject it as stale */ > if ((udata & mask) != (kdata & mask)) { > return ESTALE; 2038,2039d2108 < thread_deallocate(cur_owner); < } 2041,2042c2110,2118 < if (extra_thread_ref) { < thread_deallocate(extra_thread_ref); --- > #if DEBUG || DEVELOPMENT > if ((kev->fflags & NOTE_WL_THREAD_REQUEST) && !(kev->flags & EV_DELETE)) { > if ((udata & DISPATCH_QUEUE_ENQUEUED) == 0 && > (udata >> 48) != 0 && (udata >> 48) != 0xffff) { > panic("kevent: workloop %#016llx is not enqueued " > "(kev:%p dq_state:%#016llx)", kev->udata, kev, udata); > } > } > #endif 2044c2120,2121 < return error; --- > > return default_result; 2055,2056c2132,2136 < filt_wlremember_last_update(struct knote *kn, struct kevent_internal_s *kev, < int error) --- > filt_wlremember_last_update( > __assert_only struct kqworkloop *kqwl, > struct knote *kn, > struct kevent_internal_s *kev, > int error) 2057a2138 > filt_wlheld(kqwl); 2062a2144,2184 > /* > * Return which operations on EVFILT_WORKLOOP need to be protected against > * knoteusewait() causing priority inversions. > */ > static bool > filt_wlneeds_boost(struct kevent_internal_s *kev) > { > if (kev == NULL) { > /* > * this is an f_process() usecount, and it can cause a drop to wait > */ > return true; > } > if (kev->fflags & NOTE_WL_THREAD_REQUEST) { > /* > * All operations on thread requests may starve drops or re-attach of > * the same knote, all of them need boosts. None of what we do under > * thread-request usecount holds blocks anyway. > */ > return true; > } > if (kev->fflags & NOTE_WL_SYNC_WAIT) { > /* > * this may call filt_wlwait() and we don't want to hold any boost when > * woken up, this would cause background threads contending on > * dispatch_sync() to wake up at 64 and be preempted immediately when > * this drops. > */ > return false; > } > > /* > * SYNC_WAIT knotes when deleted don't need to be rushed, there's no > * detach/reattach race with these ever. In addition to this, when the > * SYNC_WAIT knote is dropped, the caller is no longer receiving the > * workloop overrides if any, and we'd rather schedule other threads than > * him, he's not possibly stalling anything anymore. > */ > return (kev->flags & EV_DELETE) == 0; > } > 2080c2202 < kq_req_lock(kqwl); --- > kqwl_req_lock(kqwl); 2088c2210,2214 < kev->ext[0] = thread_tid(kqwl->kqwl_owner); --- > if (kqwl->kqwl_owner == WL_OWNER_SUSPENDED) { > kev->ext[0] = ~0ull; > } else { > kev->ext[0] = thread_tid(kqwl->kqwl_owner); > } 2092c2218 < kq_req_unlock(kqwl); --- > kqwl_req_unlock(kqwl); 2097a2224 > /* Some simple validation */ 2105,2106c2232,2234 < qos_index = _pthread_priority_thread_qos(kn->kn_qos); < if (qos_index == THREAD_QOS_UNSPECIFIED) { --- > qos_index = qos_index_from_qos(kn, kn->kn_qos, FALSE); > if (qos_index < THREAD_QOS_MAINTENANCE || > qos_index > THREAD_QOS_USER_INTERACTIVE) { 2110,2117d2237 < if (kqwl->kqwl_request.kqr_qos_index) { < /* < * There already is a thread request, and well, you're only allowed < * one per workloop, so fail the attach. < */ < error = EALREADY; < goto out; < } 2120a2241,2244 > if (kq->kq_state & KQ_NO_WQ_THREAD) { > error = ENOTSUP; > goto out; > } 2139c2263,2278 < error = filt_wlupdate(kqwl, kn, kev, qos_index, FILT_WLATTACH); --- > filt_wllock(kqwl); > kn->kn_hook = NULL; > > if (command == NOTE_WL_THREAD_REQUEST && kqwl->kqwl_request.kqr_qos_index) { > /* > * There already is a thread request, and well, you're only allowed > * one per workloop, so fail the attach. > * > * Note: kqr_qos_index is always set with the wllock held, so we > * don't need to take the kqr lock. > */ > error = EALREADY; > } else { > /* Make sure user and kernel are in agreement on important state */ > error = filt_wldebounce(kqwl, kev, 0); > } 2140a2280,2281 > error = filt_wlupdateowner(kqwl, kev, error, qos_index); > filt_wlunlock(kqwl); 2142a2284 > kn->kn_flags |= EV_ERROR; 2147c2289 < knote_set_error(kn, error); --- > kn->kn_data = error; 2150,2152c2292 < if (command == NOTE_WL_SYNC_WAIT) { < return kevent_register_wait_prepare(kn, kev); < } --- > 2154,2162c2294 < if (command == NOTE_WL_THREAD_REQUEST) { < /* < * Thread Request knotes need an explicit touch to be active again, < * so delivering an event needs to also consume it. < */ < kn->kn_flags |= EV_CLEAR; < return FILTER_ACTIVE; < } < return 0; --- > return command == NOTE_WL_THREAD_REQUEST; 2165,2166c2297,2301 < static void __dead2 < filt_wlwait_continue(void *parameter, wait_result_t wr) --- > __attribute__((noinline,not_tail_called)) > static int > filt_wlwait(struct kqworkloop *kqwl, > struct knote *kn, > struct kevent_internal_s *kev) 2168,2170c2303,2304 < struct _kevent_register *cont_args = parameter; < struct kqworkloop *kqwl = (struct kqworkloop *)cont_args->kq; < struct kqrequest *kqr = &kqwl->kqwl_request; --- > filt_wlheld(kqwl); > assert((kn->kn_sfflags & NOTE_WL_SYNC_WAKE) == 0); 2172,2181c2306,2310 < kq_req_lock(kqwl); < kqr->kqr_dsync_waiters--; < if (filt_wlturnstile_interlock_is_workq(kqwl)) { < workq_kern_threadreq_lock(kqwl->kqwl_p); < turnstile_complete((uintptr_t)kqwl, &kqwl->kqwl_turnstile, NULL); < workq_kern_threadreq_unlock(kqwl->kqwl_p); < } else { < turnstile_complete((uintptr_t)kqwl, &kqwl->kqwl_turnstile, NULL); < } < kq_req_unlock(kqwl); --- > /* > * Hint to the wakeup side that this thread is waiting. Also used by > * stackshot for waitinfo. > */ > kn->kn_hook = current_thread(); 2183c2312 < turnstile_cleanup(); --- > thread_set_pending_block_hint(current_thread(), kThreadWaitWorkloopSyncWait); 2185,2190c2314 < if (wr == THREAD_INTERRUPTED) { < cont_args->kev.flags |= EV_ERROR; < cont_args->kev.data = EINTR; < } else if (wr != THREAD_AWAKENED) { < panic("Unexpected wait result: %d", wr); < } --- > wait_result_t wr = assert_wait(kn, THREAD_ABORTSAFE); 2192,2193c2316,2318 < kevent_register_wait_return(cont_args); < } --- > if (wr == THREAD_WAITING) { > kq_index_t qos_index = qos_index_from_qos(kn, kev->qos, TRUE); > struct kqrequest *kqr = &kqwl->kqwl_request; 2195,2206c2320 < /* < * Called with the workloop mutex held, most of the time never returns as it < * calls filt_wlwait_continue through a continuation. < */ < static void __dead2 < filt_wlpost_register_wait(struct uthread *uth, struct knote_lock_ctx *knlc, < struct _kevent_register *cont_args) < { < struct kqworkloop *kqwl = (struct kqworkloop *)cont_args->kq; < struct kqrequest *kqr = &kqwl->kqwl_request; < struct turnstile *ts; < bool workq_locked = false; --- > thread_t thread_to_handoff = THREAD_NULL; /* holds +1 thread ref */ 2208c2322,2326 < kq_req_lock(kqwl); --- > thread_t kqwl_owner = kqwl->kqwl_owner; > if (filt_wlowner_is_valid(kqwl_owner)) { > thread_reference(kqwl_owner); > thread_to_handoff = kqwl_owner; > } 2210c2328 < kqr->kqr_dsync_waiters++; --- > kqwl_req_lock(kqwl); 2212,2215c2330,2337 < if (filt_wlturnstile_interlock_is_workq(kqwl)) { < workq_kern_threadreq_lock(kqwl->kqwl_p); < workq_locked = true; < } --- > if (qos_index) { > assert(kqr->kqr_dsync_waiters < UINT16_MAX); > kqr->kqr_dsync_waiters++; > if (qos_index > kqr->kqr_dsync_waiters_qos) { > kqworkloop_update_threads_qos(kqwl, > KQWL_UTQ_SET_SYNC_WAITERS_QOS, qos_index); > } > } 2217,2218c2339,2341 < ts = turnstile_prepare((uintptr_t)kqwl, &kqwl->kqwl_turnstile, < TURNSTILE_NULL, TURNSTILE_WORKLOOPS); --- > if ((kqr->kqr_state & KQR_BOUND) && thread_to_handoff == THREAD_NULL) { > assert(kqr->kqr_thread != THREAD_NULL); > thread_t servicer = kqr->kqr_thread; 2220,2230c2343,2344 < if (workq_locked) { < workq_kern_threadreq_update_inheritor(kqwl->kqwl_p, < &kqwl->kqwl_request, kqwl->kqwl_owner, ts, < TURNSTILE_DELAYED_UPDATE); < if (!filt_wlturnstile_interlock_is_workq(kqwl)) { < /* < * if the interlock is no longer the workqueue lock, < * then we don't need to hold it anymore. < */ < workq_kern_threadreq_unlock(kqwl->kqwl_p); < workq_locked = false; --- > thread_reference(servicer); > thread_to_handoff = servicer; 2232,2239d2345 < } < if (!workq_locked) { < /* < * If the interlock is the workloop's, then it's our responsibility to < * call update_inheritor, so just do it. < */ < filt_wlupdate_inheritor(kqwl, ts, TURNSTILE_DELAYED_UPDATE); < } 2241,2243c2347 < thread_set_pending_block_hint(uth->uu_thread, kThreadWaitWorkloopSyncWait); < waitq_assert_wait64(&ts->ts_waitq, CAST_EVENT64_T(cont_args->knote), < THREAD_ABORTSAFE, TIMEOUT_WAIT_FOREVER); --- > kqwl_req_unlock(kqwl); 2245,2247c2349,2360 < if (workq_locked) { < workq_kern_threadreq_unlock(kqwl->kqwl_p); < } --- > filt_wlunlock(kqwl); > > /* TODO: use continuation based blocking */ > > /* consume a refcount on thread_to_handoff, then thread_block() */ > wr = thread_handoff(thread_to_handoff); > thread_to_handoff = THREAD_NULL; > > filt_wllock(kqwl); > > /* clear waiting state (only one waiting thread - so no race) */ > assert(kn->kn_hook == current_thread()); 2249,2251c2362,2371 < thread_t thread = kqwl->kqwl_owner ?: kqr->kqr_thread; < if (thread) { < thread_reference(thread); --- > if (qos_index) { > kqwl_req_lock(kqwl); > assert(kqr->kqr_dsync_waiters > 0); > if (--kqr->kqr_dsync_waiters == 0) { > assert(kqr->kqr_dsync_waiters_qos); > kqworkloop_update_threads_qos(kqwl, > KQWL_UTQ_SET_SYNC_WAITERS_QOS, 0); > } > kqwl_req_unlock(kqwl); > } 2253d2372 < kq_req_unlock(kqwl); 2255c2374,2386 < kevent_register_wait_block(ts, thread, knlc, filt_wlwait_continue, cont_args); --- > kn->kn_hook = NULL; > > switch (wr) { > case THREAD_AWAKENED: > return 0; > case THREAD_INTERRUPTED: > return EINTR; > case THREAD_RESTART: > return ECANCELED; > default: > panic("filt_wlattach: unexpected wait result %d", wr); > return EINVAL; > } 2261c2392,2393 < event64_t event, thread_waitinfo_t *waitinfo) --- > event64_t event, > thread_waitinfo_t *waitinfo) 2263c2395 < struct knote *kn = (struct knote *)event; --- > struct knote *kn = (struct knote*) event; 2278c2410,2412 < if (kqwl_owner != THREAD_NULL) { --- > if (kqwl_owner == WL_OWNER_SUSPENDED) { > waitinfo->owner = STACKSHOT_WAITOWNER_SUSPENDED; > } else if (kqwl_owner != THREAD_NULL) { 2292a2427,2451 > > return; > } > > /* > * Takes kqueue locked, returns locked, may drop in the middle and/or block for a while > */ > static int > filt_wlpost_attach(struct knote *kn, struct kevent_internal_s *kev) > { > struct kqueue *kq = knote_get_kq(kn); > struct kqworkloop *kqwl = (struct kqworkloop *)kq; > int error = 0; > > if (kev->fflags & NOTE_WL_SYNC_WAIT) { > if (kqlock2knoteuse(kq, kn, KNUSE_NONE)) { > filt_wllock(kqwl); > /* if the wake has already preposted, don't wait */ > if ((kn->kn_sfflags & NOTE_WL_SYNC_WAKE) == 0) > error = filt_wlwait(kqwl, kn, kev); > filt_wlunlock(kqwl); > knoteuse2kqlock(kq, kn, KNUSE_NONE); > } > } > return error; 2299,2301c2458,2474 < if (kn->kn_hook) { < kevent_register_wait_cleanup(kn); < } --- > > /* > * Thread requests have nothing to detach. > * Sync waiters should have been aborted out > * and drop their refs before we could drop/ > * detach their knotes. > */ > assert(kn->kn_hook == NULL); > } > > static int > filt_wlevent( > __unused struct knote *kn, > __unused long hint) > { > panic("filt_wlevent"); > return 0; 2305,2306c2478 < filt_wlvalidate_kev_flags(struct knote *kn, struct kevent_internal_s *kev, < thread_qos_t *qos_index) --- > filt_wlvalidate_kev_flags(struct knote *kn, struct kevent_internal_s *kev) 2310,2324c2482 < < if ((kev->fflags & NOTE_WL_DISCOVER_OWNER) && (kev->flags & EV_DELETE)) { < return EINVAL; < } < if (kev->fflags & NOTE_WL_UPDATE_QOS) { < if (kev->flags & EV_DELETE) { < return EINVAL; < } < if (sav_commands != NOTE_WL_THREAD_REQUEST) { < return EINVAL; < } < if (!(*qos_index = _pthread_priority_thread_qos(kev->qos))) { < return ERANGE; < } < } --- > int error = 0; 2329,2330c2487,2488 < if (sav_commands != NOTE_WL_THREAD_REQUEST) < return EINVAL; --- > if (sav_commands != new_commands) > error = EINVAL; 2335,2337c2493,2494 < return EINVAL; < goto sync_checks; < --- > error = EINVAL; > /* FALLTHROUGH */ 2339c2496 < sync_checks: --- > /* waits and wakes can update themselves or their counterparts */ 2341c2498,2500 < return EINVAL; --- > error = EINVAL; > if (kev->fflags & NOTE_WL_UPDATE_QOS) > error = EINVAL; 2343c2502,2511 < return EINVAL; --- > error = EINVAL; > if (kev->flags & EV_DELETE) { > /* > * Really this is not supported: there is absolutely no reason > * whatsoever to want to fail the drop of a NOTE_WL_SYNC_WAIT knote. > */ > if (kev->ext[EV_EXTIDX_WL_ADDR] && kev->ext[EV_EXTIDX_WL_MASK]) { > error = EINVAL; > } > } 2347c2515 < return EINVAL; --- > error = EINVAL; 2349c2517,2520 < return 0; --- > if ((kev->flags & EV_DELETE) && (kev->fflags & NOTE_WL_DISCOVER_OWNER)) { > error = EINVAL; > } > return error; 2353c2524,2526 < filt_wltouch(struct knote *kn, struct kevent_internal_s *kev) --- > filt_wltouch( > struct knote *kn, > struct kevent_internal_s *kev) 2355,2356c2528,2533 < struct kqworkloop *kqwl = (struct kqworkloop *)knote_get_kq(kn); < thread_qos_t qos_index = THREAD_QOS_UNSPECIFIED; --- > struct kqueue *kq = knote_get_kq(kn); > int error = 0; > struct kqworkloop *kqwl; > > assert(kq->kq_state & KQ_WORKLOOP); > kqwl = (struct kqworkloop *)kq; 2358c2535 < int error = filt_wlvalidate_kev_flags(kn, kev, &qos_index); --- > error = filt_wlvalidate_kev_flags(kn, kev); 2363,2364c2540,2543 < error = filt_wlupdate(kqwl, kn, kev, qos_index, FILT_WLTOUCH); < filt_wlremember_last_update(kn, kev, error); --- > filt_wllock(kqwl); > > /* Make sure user and kernel are in agreement on important state */ > error = filt_wldebounce(kqwl, kev, 0); 2366c2545,2546 < goto out; --- > error = filt_wlupdateowner(kqwl, kev, error, 0); > goto out_unlock; 2368a2549,2625 > int new_command = kev->fflags & NOTE_WL_COMMANDS_MASK; > switch (new_command) { > case NOTE_WL_THREAD_REQUEST: > assert(kqwl->kqwl_request.kqr_qos_index != THREAD_QOS_UNSPECIFIED); > break; > > case NOTE_WL_SYNC_WAIT: > /* > * we need to allow waiting several times on the same knote because > * of EINTR. If it's already woken though, it won't block. > */ > break; > > case NOTE_WL_SYNC_WAKE: > if (kn->kn_sfflags & NOTE_WL_SYNC_WAKE) { > /* disallow waking the same knote twice */ > error = EALREADY; > goto out_unlock; > } > if (kn->kn_hook) { > thread_wakeup_thread((event_t)kn, (thread_t)kn->kn_hook); > } > break; > > default: > error = EINVAL; > goto out_unlock; > } > > /* > * Save off any additional fflags/data we just accepted > * But only keep the last round of "update" bits we acted on which helps > * debugging a lot. > */ > kn->kn_sfflags &= ~NOTE_WL_UPDATES_MASK; > kn->kn_sfflags |= kev->fflags; > kn->kn_sdata = kev->data; > > kq_index_t qos_index = THREAD_QOS_UNSPECIFIED; > > if (kev->fflags & NOTE_WL_UPDATE_QOS) { > qos_t qos = pthread_priority_canonicalize(kev->qos, FALSE); > > if (kn->kn_qos != qos) { > qos_index = qos_index_from_qos(kn, qos, FALSE); > if (qos_index == THREAD_QOS_UNSPECIFIED) { > error = ERANGE; > goto out_unlock; > } > kqlock(kq); > if (kn->kn_status & KN_QUEUED) { > knote_dequeue(kn); > knote_set_qos_index(kn, qos_index); > knote_enqueue(kn); > knote_wakeup(kn); > } else { > knote_set_qos_index(kn, qos_index); > } > kn->kn_qos = qos; > kqunlock(kq); > } > } > > error = filt_wlupdateowner(kqwl, kev, 0, qos_index); > if (error) { > goto out_unlock; > } > > if (new_command == NOTE_WL_SYNC_WAIT) { > /* if the wake has already preposted, don't wait */ > if ((kn->kn_sfflags & NOTE_WL_SYNC_WAKE) == 0) > error = filt_wlwait(kqwl, kn, kev); > } > > out_unlock: > filt_wlremember_last_update(kqwl, kn, kev, error); > filt_wlunlock(kqwl); 2379,2382d2635 < int command = kev->fflags & NOTE_WL_COMMANDS_MASK; < if (command == NOTE_WL_SYNC_WAIT && !(kn->kn_sfflags & NOTE_WL_SYNC_WAKE)) { < return kevent_register_wait_prepare(kn, kev); < } 2384,2390c2637 < if (command == NOTE_WL_THREAD_REQUEST) { < if (kev->fflags & NOTE_WL_UPDATE_QOS) { < return FILTER_ACTIVE | FILTER_UPDATE_REQ_QOS; < } < return FILTER_ACTIVE; < } < return 0; --- > return new_command == NOTE_WL_THREAD_REQUEST; 2393,2394c2640,2643 < static bool < filt_wlallow_drop(struct knote *kn, struct kevent_internal_s *kev) --- > static int > filt_wldrop_and_unlock( > struct knote *kn, > struct kevent_internal_s *kev) 2396c2645,2652 < struct kqworkloop *kqwl = (struct kqworkloop *)knote_get_kq(kn); --- > struct kqueue *kq = knote_get_kq(kn); > struct kqworkloop *kqwl = (struct kqworkloop *)kq; > int error = 0, knoteuse_flags = KNUSE_NONE; > > kqlock_held(kq); > > assert(kev->flags & EV_DELETE); > assert(kq->kq_state & KQ_WORKLOOP); 2398c2654 < int error = filt_wlvalidate_kev_flags(kn, kev, NULL); --- > error = filt_wlvalidate_kev_flags(kn, kev); 2403,2405c2659,2666 < error = filt_wlupdate(kqwl, kn, kev, 0, FILT_WLDROP); < filt_wlremember_last_update(kn, kev, error); < if (error) { --- > if (kn->kn_sfflags & NOTE_WL_THREAD_REQUEST) { > knoteuse_flags |= KNUSE_BOOST; > } > > /* take a usecount to allow taking the filt_wllock */ > if (!kqlock2knoteuse(kq, kn, knoteuse_flags)) { > /* knote is being dropped already */ > error = EINPROGRESS; 2409c2670,2726 < out: --- > filt_wllock(kqwl); > > /* > * Make sure user and kernel are in agreement on important state > * > * Userland will modify bits to cause this to fail for the touch / drop > * race case (when a drop for a thread request quiescing comes in late after > * the workloop has been woken up again). > */ > error = filt_wldebounce(kqwl, kev, 0); > > if (!knoteuse2kqlock(kq, kn, knoteuse_flags)) { > /* knote is no longer alive */ > error = EINPROGRESS; > goto out_unlock; > } > > if (!error && (kn->kn_sfflags & NOTE_WL_THREAD_REQUEST) && kn->kn_inuse) { > /* > * There is a concurrent drop or touch happening, we can't resolve this, > * userland has to redrive. > * > * The race we're worried about here is the following: > * > * f_touch | f_drop_and_unlock > * ------------------------+-------------------------------------------- > * | kqlock() > * | kqlock2knoteuse() > * | filt_wllock() > * | debounces successfully > * kqlock() | > * kqlock2knoteuse | > * filt_wllock() | > * | knoteuse2kqlock() > * | filt_wlunlock() > * | kqlock2knotedrop() > * debounces successfully | > * filt_wlunlock() | > * caller WAKES f_drop | > * | performs drop, but f_touch should have won > * > * So if the usecount is not 0 here, we need to wait for it to drop and > * redrive the whole logic (including looking up the knote again). > */ > filt_wlunlock(kqwl); > knoteusewait(kq, kn); > return ERESTART; > } > > /* > * If error is 0 this will set kqr_qos_index to THREAD_QOS_UNSPECIFIED > * > * If error is 0 or ESTALE this may drop ownership and cause a thread > * request redrive, however the kqlock is held which prevents f_process() to > * run until we did the drop for real. > */ > error = filt_wlupdateowner(kqwl, kev, error, 0); 2411,2412c2728,2739 < if (error == ESTALE && (kev->fflags & NOTE_WL_IGNORE_ESTALE)) { < return false; --- > goto out_unlock; > } > > if ((kn->kn_sfflags & (NOTE_WL_SYNC_WAIT | NOTE_WL_SYNC_WAKE)) == > NOTE_WL_SYNC_WAIT) { > /* > * When deleting a SYNC_WAIT knote that hasn't been woken up > * explicitly, issue a wake up. > */ > kn->kn_sfflags |= NOTE_WL_SYNC_WAKE; > if (kn->kn_hook) { > thread_wakeup_thread((event_t)kn, (thread_t)kn->kn_hook); 2414,2416d2740 < kev->flags |= EV_ERROR; < kev->data = error; < return false; 2418c2742,2773 < return true; --- > > out_unlock: > filt_wlremember_last_update(kqwl, kn, kev, error); > filt_wlunlock(kqwl); > > out: > if (error == 0) { > /* If nothing failed, do the regular knote drop. */ > if (kqlock2knotedrop(kq, kn)) { > knote_drop(kn, current_proc()); > } else { > error = EINPROGRESS; > } > } else { > kqunlock(kq); > } > if (error == ESTALE && (kev->fflags & NOTE_WL_IGNORE_ESTALE)) { > error = 0; > } > if (error == EINPROGRESS) { > /* > * filt_wlprocess() makes sure that no event can be delivered for > * NOTE_WL_THREAD_REQUEST knotes once a drop is happening, and > * NOTE_WL_SYNC_* knotes are never fired. > * > * It means that EINPROGRESS is about a state that userland cannot > * observe for this filter (an event being delivered concurrently from > * a drop), so silence the error. > */ > error = 0; > } > return error; 2427c2782,2784 < struct kqworkloop *kqwl = (struct kqworkloop *)knote_get_kq(kn); --- > struct kqueue *kq = knote_get_kq(kn); > struct kqworkloop *kqwl = (struct kqworkloop *)kq; > struct kqrequest *kqr = &kqwl->kqwl_request; 2430,2432c2787 < assert(kn->kn_sfflags & NOTE_WL_THREAD_REQUEST); < < filt_wllock(kqwl); --- > assert(kq->kq_state & KQ_WORKLOOP); 2434,2448c2789,2794 < if (kqwl->kqwl_owner) { < /* < * userspace sometimes due to events being < * delivered but not triggering a drain session can cause a process < * of the thread request knote. < * < * When that happens, the automatic deactivation due to process < * would swallow the event, so we have to activate the knote again. < */ < kqlock(kqwl); < knote_activate(kn); < kqunlock(kqwl); < } else { < #if DEBUG || DEVELOPMENT < if (kevent_debug_flags() & KEVENT_PANIC_ON_NON_ENQUEUED_PROCESS) { --- > /* only thread requests should get here */ > assert(kn->kn_sfflags & NOTE_WL_THREAD_REQUEST); > if (kn->kn_sfflags & NOTE_WL_THREAD_REQUEST) { > filt_wllock(kqwl); > assert(kqr->kqr_qos_index != THREAD_QOS_UNSPECIFIED); > if (kqwl->kqwl_owner) { 2450c2796,2801 < * see src/queue_internal.h in libdispatch --- > * userspace sometimes due to events being > * delivered but not triggering a drain session can cause a process > * of the thread request knote. > * > * When that happens, the automatic deactivation due to process > * would swallow the event, so we have to activate the knote again. 2452c2803,2807 < #define DISPATCH_QUEUE_ENQUEUED 0x1ull --- > kqlock(kq); > knote_activate(kn); > kqunlock(kq); > } else if (kqr->kqr_qos_index) { > #if DEBUG || DEVELOPMENT 2459c2814 < (val >> 48) != 0xdead && (val >> 48) != 0 && (val >> 48) != 0xffff) { --- > (val >> 48) != 0 && (val >> 48) != 0xffff) { 2462c2817,2818 < kn->kn_udata, kn, val, kn->kn_ext[EV_EXTIDX_WL_VALUE]); --- > kn->kn_udata, kn, val, > kn->kn_ext[EV_EXTIDX_WL_VALUE]); 2464d2819 < } 2466,2476c2821,2827 < *kev = kn->kn_kevent; < kev->fflags = kn->kn_sfflags; < kev->data = kn->kn_sdata; < kev->qos = kn->kn_qos; < rc |= FILTER_ACTIVE; < } < < filt_wlunlock(kqwl); < < if (rc & FILTER_ACTIVE) { < workq_thread_set_max_qos(kqwl->kqwl_p, &kqwl->kqwl_request); --- > *kev = kn->kn_kevent; > kev->fflags = kn->kn_sfflags; > kev->data = kn->kn_sdata; > kev->qos = kn->kn_qos; > rc = 1; > } > filt_wlunlock(kqwl); 2481,2491d2831 < SECURITY_READ_ONLY_EARLY(static struct filterops) workloop_filtops = { < .f_extended_codes = true, < .f_attach = filt_wlattach, < .f_detach = filt_wldetach, < .f_event = filt_badevent, < .f_touch = filt_wltouch, < .f_process = filt_wlprocess, < .f_allow_drop = filt_wlallow_drop, < .f_post_register_wait = filt_wlpost_register_wait, < }; < 2498,2504d2837 < filt_badevent(struct knote *kn, long hint) < { < panic("%s[%d](%p, %ld)", __func__, kn->kn_filter, kn, hint); < return 0; < } < < static int 2507c2840,2841 < knote_set_error(kn, ENOTSUP); --- > kn->kn_flags |= EV_ERROR; > kn->kn_data = ENOTSUP; 2517a2852 > uint64_t kq_addr_offset; 2533c2868 < TAILQ_INIT(&kqwq->kqwq_queue[i]); --- > TAILQ_INIT(&kq->kq_queue[i]); 2535,2549c2870 < for (i = 0; i < KQWQ_NBUCKETS; i++) { < if (i != KQWQ_QOS_MANAGER) { < /* < * Because of how the bucketized system works, we mix overcommit < * sources with not overcommit: each time we move a knote from < * one bucket to the next due to overrides, we'd had to track < * overcommitness, and it's really not worth it in the workloop < * enabled world that track this faithfully. < * < * Incidentally, this behaves like the original manager-based < * kqwq where event delivery always happened (hence is < * "overcommit") < */ < kqwq->kqwq_request[i].kqr_state |= KQR_THOVERCOMMIT; < } --- > for (i = 0; i < KQWQ_NQOS; i++) { 2551d2871 < TAILQ_INIT(&kqwq->kqwq_request[i].kqr_suppressed); 2553a2874 > lck_spin_init(&kqwq->kqwq_reqlock, kq_lck_grp, kq_lck_attr); 2555a2877 > 2568d2889 < kqwl->kqwl_request.kqr_state = KQR_WORKLOOP; 2572c2893 < TAILQ_INIT(&kqwl->kqwl_queue[i]); --- > TAILQ_INIT(&kq->kq_queue[i]); 2575a2897 > lck_spin_init(&kqwl->kqwl_reqlock, kq_lck_grp, kq_lck_attr); 2579c2901,2907 < hook = (void *)kqwl; --- > if (flags & KEVENT_FLAG_WORKLOOP_NO_WQ_THREAD) { > policy |= SYNC_POLICY_PREPOST; > kq->kq_state |= KQ_NO_WQ_THREAD; > } else { > hook = (void *)kqwl; > } > 2582c2910 < --- > 2589c2917 < TAILQ_INIT(&kqf->kqf_queue); --- > TAILQ_INIT(&kq->kq_queue[0]); 2591c2919 < --- > 2597d2924 < lck_spin_init(&kq->kq_reqlock, kq_lck_grp, kq_lck_attr); 2606a2934,2936 > kq_addr_offset = ((uintptr_t)kq - (uintptr_t)VM_MIN_KERNEL_AND_KEXT_ADDRESS); > /* Assert that the address can be pointer compacted for use with knote */ > assert(kq_addr_offset < (uint64_t)(1ull << KNOTE_KQ_BITSIZE)); 2613,2616c2943,2946 < * Called with proc_fdlock held. < * Returns with it locked. < * May drop it temporarily. < * Process is in such a state that it will not try to allocate --- > * Called with proc_fdlock held. > * Returns with it locked. > * May drop it temporarily. > * Process is in such a state that it will not try to allocate 2635c2965,2968 < knote_drop(kq, kn, NULL); --- > /* drop it ourselves or wait */ > if (kqlock2knotedrop(kq, kn)) { > knote_drop(kn, p); > } 2655c2988,2991 < knote_drop(kq, kn, NULL); --- > /* drop it ourselves or wait */ > if (kqlock2knotedrop(kq, kn)) { > knote_drop(kn, p); > } 2673,2704d3008 < /* < * kqworkloop_invalidate < * < * Invalidate ownership of a workloop. < * < * This is meant to be used so that any remnant of overrides and ownership < * information is dropped before a kqworkloop can no longer be found in the < * global hash table and have ghost workloop ownership left over. < * < * Possibly returns a thread to deallocate in a safe context. < */ < static thread_t < kqworkloop_invalidate(struct kqworkloop *kqwl) < { < thread_t cur_owner = kqwl->kqwl_owner; < < assert(TAILQ_EMPTY(&kqwl->kqwl_request.kqr_suppressed)); < if (cur_owner) { < /* < * If the kqueue had an owner that prevented the thread request to < * go through, then no unbind happened, and we may have lingering < * overrides to drop. < */ < if (kqworkloop_owner_override(kqwl) != THREAD_QOS_UNSPECIFIED) { < thread_drop_ipc_override(cur_owner); < } < thread_ends_owning_workloop(cur_owner); < kqwl->kqwl_owner = THREAD_NULL; < } < < return cur_owner; < } 2709c3013 < * We walk each list looking for knotes referencing this --- > * We walk each list looking for knotes referencing this 2738,2743c3042,3063 < /* < * Workloops are refcounted by their knotes, so there's no point < * spending a lot of time under these locks just to deallocate one. < */ < if ((kq->kq_state & KQ_WORKLOOP) == 0) { < KNOTE_LOCK_CTX(knlc); --- > proc_fdlock(p); > for (i = 0; i < fdp->fd_knlistsize; i++) { > kn = SLIST_FIRST(&fdp->fd_knlist[i]); > while (kn != NULL) { > if (kq == knote_get_kq(kn)) { > assert((kq->kq_state & KQ_WORKLOOP) == 0); > kqlock(kq); > proc_fdunlock(p); > /* drop it ourselves or wait */ > if (kqlock2knotedrop(kq, kn)) { > knote_drop(kn, p); > } > proc_fdlock(p); > /* start over at beginning of list */ > kn = SLIST_FIRST(&fdp->fd_knlist[i]); > continue; > } > kn = SLIST_NEXT(kn, kn_link); > } > } > knhash_lock(p); > proc_fdunlock(p); 2745,2747c3065,3067 < proc_fdlock(p); < for (i = 0; i < fdp->fd_knlistsize; i++) { < kn = SLIST_FIRST(&fdp->fd_knlist[i]); --- > if (fdp->fd_knhashmask != 0) { > for (i = 0; i < (int)fdp->fd_knhashmask + 1; i++) { > kn = SLIST_FIRST(&fdp->fd_knhash[i]); 2749a3070 > assert((kq->kq_state & KQ_WORKLOOP) == 0); 2751,2753c3072,3075 < proc_fdunlock(p); < if (knote_lock(kq, kn, &knlc, KNOTE_KQ_LOCK_ON_SUCCESS)) { < knote_drop(kq, kn, &knlc); --- > knhash_unlock(p); > /* drop it ourselves or wait */ > if (kqlock2knotedrop(kq, kn)) { > knote_drop(kn, p); 2755c3077 < proc_fdlock(p); --- > knhash_lock(p); 2757c3079 < kn = SLIST_FIRST(&fdp->fd_knlist[i]); --- > kn = SLIST_FIRST(&fdp->fd_knhash[i]); 2763,2786d3084 < < knhash_lock(p); < proc_fdunlock(p); < < if (fdp->fd_knhashmask != 0) { < for (i = 0; i < (int)fdp->fd_knhashmask + 1; i++) { < kn = SLIST_FIRST(&fdp->fd_knhash[i]); < while (kn != NULL) { < if (kq == knote_get_kq(kn)) { < kqlock(kq); < knhash_unlock(p); < if (knote_lock(kq, kn, &knlc, KNOTE_KQ_LOCK_ON_SUCCESS)) { < knote_drop(kq, kn, &knlc); < } < knhash_lock(p); < /* start over at beginning of list */ < kn = SLIST_FIRST(&fdp->fd_knhash[i]); < continue; < } < kn = SLIST_NEXT(kn, kn_link); < } < } < } < knhash_unlock(p); 2787a3086 > knhash_unlock(p); 2791c3090,3091 < thread_t cur_owner = kqworkloop_invalidate(kqwl); --- > struct kqrequest *kqr = &kqwl->kqwl_request; > thread_t cur_owner = kqwl->kqwl_owner; 2793c3093,3103 < if (cur_owner) thread_deallocate(cur_owner); --- > assert(TAILQ_EMPTY(&kqwl->kqwl_request.kqr_suppressed)); > if (filt_wlowner_is_valid(cur_owner)) { > /* > * If the kqueue had an owner that prevented the thread request to > * go through, then no unbind happened, and we may have lingering > * overrides to drop. > */ > if (kqr->kqr_dsync_owner_qos != THREAD_QOS_UNSPECIFIED) { > thread_drop_ipc_override(cur_owner); > kqr->kqr_dsync_owner_qos = THREAD_QOS_UNSPECIFIED; > } 2795,2801c3105,3111 < if (kqwl->kqwl_request.kqr_state & KQR_ALLOCATED_TURNSTILE) { < struct turnstile *ts; < turnstile_complete((uintptr_t)kqwl, &kqwl->kqwl_turnstile, &ts); < turnstile_cleanup(); < turnstile_deallocate(ts); < } else { < assert(kqwl->kqwl_turnstile == NULL); --- > if (kqr->kqr_owner_override_is_sync) { > thread_drop_sync_ipc_override(cur_owner); > kqr->kqr_owner_override_is_sync = 0; > } > thread_ends_owning_workloop(cur_owner); > thread_deallocate(cur_owner); > kqwl->kqwl_owner = THREAD_NULL; 2811d3120 < lck_spin_destroy(&kq->kq_reqlock, kq_lck_grp); 2814c3123,3126 < zfree(kqworkq_zone, (struct kqworkq *)kq); --- > struct kqworkq *kqwq = (struct kqworkq *)kq; > > lck_spin_destroy(&kqwq->kqwq_reqlock, kq_lck_grp); > zfree(kqworkq_zone, kqwq); 2818a3131 > lck_spin_destroy(&kqwl->kqwl_reqlock, kq_lck_grp); 2822c3135,3137 < zfree(kqfile_zone, (struct kqfile *)kq); --- > struct kqfile *kqf = (struct kqfile *)kq; > > zfree(kqfile_zone, kqf); 2847c3162 < kqueue_release(kqueue_t kqu, __assert_only int possibly_last) --- > kqueue_release(struct kqueue *kq, __assert_only int possibly_last) 2849c3164,3166 < if ((kqu.kq->kq_state & KQ_DYNAMIC) == 0) { --- > struct kqworkloop *kqwl = (struct kqworkloop *)kq; > > if ((kq->kq_state & KQ_DYNAMIC) == 0) { 2853,2854c3170,3171 < assert(kqu.kq->kq_state & KQ_WORKLOOP); /* for now */ < uint32_t refs = OSDecrementAtomic(&kqu.kqwl->kqwl_retains); --- > assert(kq->kq_state & KQ_WORKLOOP); /* for now */ > uint32_t refs = OSDecrementAtomic(&kqwl->kqwl_retains); 2856c3173 < panic("kq(%p) over-release", kqu.kq); --- > panic("kq(%p) over-release", kq); 2905c3222 < unsigned int flags) --- > unsigned int flags) 2957c3274 < --- > 2987c3304 < unsigned int flags) --- > unsigned int flags) 2993c3310 < /* --- > /* 3007c3324 < --- > 3053c3370 < --- > 3083,3087c3400,3403 < kevent_get_data_size( < struct proc *p, < uint64_t data_available, < unsigned int flags, < user_size_t *residp) --- > kevent_get_data_size(struct proc *p, > uint64_t data_available, > unsigned int flags, > user_size_t *residp) 3114,3118c3430,3433 < kevent_put_data_size( < struct proc *p, < uint64_t data_available, < unsigned int flags, < user_size_t resid) --- > kevent_put_data_size(struct proc *p, > uint64_t data_available, > unsigned int flags, > user_size_t resid) 3140a3456 > 3240,3241c3556,3557 < int < kevent_qos_internal(struct proc *p, int fd, --- > int > kevent_qos_internal(struct proc *p, int fd, 3245,3246c3561,3562 < unsigned int flags, < int32_t *retval) --- > unsigned int flags, > int32_t *retval) 3281,3282c3597,3598 < unsigned int flags, < int32_t *retval) --- > unsigned int flags, > int32_t *retval) 3294c3610 < --- > 3492c3808 < kqueue_release_last(struct proc *p, kqueue_t kqu) --- > kqueue_release_last(struct proc *p, struct kqueue *kq) 3494d3809 < struct kqueue *kq = kqu.kq; 3498d3812 < thread_t cur_owner = kqworkloop_invalidate(kqu.kqwl); 3501d3814 < if (cur_owner) thread_deallocate(cur_owner); 3509,3519c3822,3824 < /* < * kqworkloops_dealloc - rebalance retains on kqworkloops created with < * scheduling parameters < * < * Called with proc_fdlock held. < * Returns with it locked. < * Process is in such a state that it will not try to allocate < * any more knotes during this process (stopped for exit or exec). < */ < void < kqworkloops_dealloc(proc_t p) --- > static struct kqueue * > kevent_get_bound_kq(__assert_only struct proc *p, thread_t thread, > unsigned int kev_flags, unsigned int kq_flags) 3521,3534c3826,3827 < struct filedesc *fdp = p->p_fd; < struct kqlist *list; < struct kqworkloop *kqwl, *kqwln; < struct kqlist tofree; < int i; < < if (!(fdp->fd_flags & FD_WORKLOOP)) { < return; < } < < SLIST_INIT(&tofree); < < kqhash_lock(p); < assert(fdp->fd_kqhashmask != 0); --- > struct kqueue *kq; > struct uthread *ut = get_bsdthread_info(thread); 3536,3548c3829 < for (i = 0; i <= (int)fdp->fd_kqhashmask; i++) { < list = &fdp->fd_kqhash[i]; < SLIST_FOREACH_SAFE(kqwl, list, kqwl_hashlink, kqwln) { < /* < * kqworkloops that have scheduling parameters have an < * implicit retain from kqueue_workloop_ctl that needs < * to be balanced on process exit. < */ < assert(kqwl->kqwl_params); < SLIST_REMOVE(list, kqwl, kqworkloop, kqwl_hashlink); < SLIST_INSERT_HEAD(&tofree, kqwl, kqwl_hashlink); < } < } --- > assert(p == get_bsdthreadtask_info(thread)); 3550c3831,3832 < kqhash_unlock(p); --- > if (!(ut->uu_kqueue_flags & kev_flags)) > return NULL; 3552,3559c3834,3836 < SLIST_FOREACH_SAFE(kqwl, &tofree, kqwl_hashlink, kqwln) { < struct kqueue *kq = (struct kqueue *)kqwl; < __assert_only bool released; < released = kqueue_release(kq, KQUEUE_MIGHT_BE_LAST_REF); < assert(released); < kqueue_dealloc(kq); < } < } --- > kq = ut->uu_kqueue_bound; > if (!kq) > return NULL; 3561,3565c3838,3839 < static struct kqueue * < kevent_get_bound_kqworkloop(thread_t thread) < { < struct uthread *ut = get_bsdthread_info(thread); < struct kqrequest *kqr = ut->uu_kqr_bound; --- > if (!(kq->kq_state & kq_flags)) > return NULL; 3567c3841 < return kqr ? (struct kqueue *)kqr_kqworkloop(kqr) : NULL; --- > return kq; 3571,3573c3845 < kevent_get_kq(struct proc *p, kqueue_id_t id, workq_threadreq_param_t *trp, < unsigned int flags, struct fileproc **fpp, int *fdp, < struct kqueue **kqp) --- > kevent_get_kq(struct proc *p, kqueue_id_t id, unsigned int flags, struct fileproc **fpp, int *fdp, struct kqueue **kqp) 3577c3849 < struct kqueue *kq = NULL; --- > struct kqueue *kq; 3580,3582d3851 < thread_t th = current_thread(); < < assert(!trp || (flags & KEVENT_FLAG_WORKLOOP)); 3587,3594d3855 < assert(!trp || (flags & KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST)); < kq = kevent_get_bound_kqworkloop(th); < < /* < * when kevent_id_internal is called from within the < * kernel, and the passed 'id' value is '-1' then we < * look for the currently bound workloop kq. < */ 3599,3601c3860 < if (!is_workqueue_thread(th) || !kq) { < return EINVAL; < } --- > assert(is_workqueue_thread(current_thread())); 3603,3605c3862,3876 < kqueue_retain(kq); < goto out; < } --- > /* > * when kevent_id_internal is called from within the > * kernel, and the passed 'id' value is '-1' then we > * look for the currently bound workloop kq. > * > * Until pthread kext avoids calling in to kevent_id_internal > * for threads whose fulfill is canceled, calling in unbound > * can't be fatal. > */ > kq = kevent_get_bound_kq(p, current_thread(), > KEVENT_FLAG_WORKLOOP, KQ_WORKLOOP); > if (kq) { > kqueue_retain(kq); > } else { > struct uthread *ut = get_bsdthread_info(current_thread()); 3607,3608c3878,3892 < if (id == 0 || id == (kqueue_id_t)-1) { < return EINVAL; --- > /* If thread is unbound due to cancel, just return an error */ > if (ut->uu_kqueue_flags == KEVENT_FLAG_WORKLOOP_CANCELED) { > ut->uu_kqueue_flags = 0; > error = ECANCELED; > } else { > panic("Unbound thread called kevent_internal with id=-1" > " uu_kqueue_flags:0x%x, uu_kqueue_bound:%p", > ut->uu_kqueue_flags, ut->uu_kqueue_bound); > } > } > > *fpp = NULL; > *fdp = 0; > *kqp = kq; > return error; 3611a3896 > kq = kevent_get_bound_kq(p, current_thread(), KEVENT_FLAG_WORKLOOP, KQ_WORKLOOP); 3615c3900,3902 < return EEXIST; --- > error = EEXIST; > kq = NULL; > goto out; 3620a3908 > error = 0; 3631c3919,3920 < return ENOENT; --- > error = ENOENT; > goto out; 3636,3648c3925,3939 < if (!alloc_kq) { < return ENOMEM; < } < < kqhash_lock(p); < kqueue_hash_init_if_needed(p); < kq = kqueue_hash_lookup(p, id); < if (kq == NULL) { < /* insert our new one */ < kq = alloc_kq; < if (trp) { < struct kqworkloop *kqwl = (struct kqworkloop *)kq; < kqwl->kqwl_params = trp->trp_value; --- > if (alloc_kq) { > kqhash_lock(p); > kqueue_hash_init_if_needed(p); > kq = kqueue_hash_lookup(p, id); > if (kq == NULL) { > /* insert our new one */ > kq = alloc_kq; > kqueue_hash_insert(p, id, kq); > kqhash_unlock(p); > } else { > /* lost race, retain existing workloop */ > kqueue_retain(kq); > kqhash_unlock(p); > kqueue_release(alloc_kq, KQUEUE_MIGHT_BE_LAST_REF); > kqueue_dealloc(alloc_kq); 3650,3657d3940 < kqueue_hash_insert(p, id, kq); < kqhash_unlock(p); < } else if (flags & KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST) { < /* lost race and caller wants an error */ < kqhash_unlock(p); < kqueue_release(alloc_kq, KQUEUE_MIGHT_BE_LAST_REF); < kqueue_dealloc(alloc_kq); < return EEXIST; 3659,3663c3942,3943 < /* lost race, retain existing workloop */ < kqueue_retain(kq); < kqhash_unlock(p); < kqueue_release(alloc_kq, KQUEUE_MIGHT_BE_LAST_REF); < kqueue_dealloc(alloc_kq); --- > error = ENOMEM; > goto out; 3669c3949,3951 < return EEXIST; --- > kq = NULL; > error = EEXIST; > goto out; 3677c3959 < --- > 3693c3975 < if (alloc_kq == NULL) { --- > if (alloc_kq == NULL) 3695d3976 < } 3718c3999 < } --- > } 3724c4005 < --- > 3770c4051 < os_reason_t reason = OS_REASON_NULL; --- > os_reason_t reason; 3788a4070 > assert(workloop_id); 3803,3804c4085 < if (workloop_id) { < struct kcdata_descriptor *kcd = &reason->osr_kcd_descriptor; --- > struct kcdata_descriptor *kcd = &reason->osr_kcd_descriptor; 3806,3809c4087,4090 < if (kcdata_get_memory_addr(kcd, EXIT_REASON_WORKLOOP_ID, < sizeof(workloop_id), &addr) == KERN_SUCCESS) { < kcdata_memcpy(kcd, addr, &workloop_id, sizeof(workloop_id)); < } --- > if (kcdata_get_memory_addr(kcd, EXIT_REASON_WORKLOOP_ID, > sizeof(workloop_id), &addr) == KERN_SUCCESS) { > kcdata_memcpy(kcd, addr, &workloop_id, sizeof(workloop_id)); > } 3811,3815c4092,4095 < uint64_t serial_no = kevent_workloop_serial_no_copyin(p, workloop_id); < if (serial_no && kcdata_get_memory_addr(kcd, EXIT_REASON_DISPATCH_QUEUE_NO, < sizeof(serial_no), &addr) == KERN_SUCCESS) { < kcdata_memcpy(kcd, addr, &serial_no, sizeof(serial_no)); < } --- > uint64_t serial_no = kevent_workloop_serial_no_copyin(p, workloop_id); > if (serial_no && kcdata_get_memory_addr(kcd, EXIT_REASON_DISPATCH_QUEUE_NO, > sizeof(serial_no), &addr) == KERN_SUCCESS) { > kcdata_memcpy(kcd, addr, &serial_no, sizeof(serial_no)); 3816a4097 > 3819,3822d4099 < if (kevent_debug_flags() & KEVENT_PANIC_ON_WORKLOOP_OWNERSHIP_LEAK) { < panic("thread %p in task %p is leaked workloop 0x%016llx ownership", < thread, p->task, workloop_id); < } 3831,3832c4108,4240 < static inline boolean_t < kevent_args_requesting_events(unsigned int flags, int nevents) --- > > static int > kevent_servicer_detach_preflight(thread_t thread, unsigned int flags, struct kqueue *kq) > { > int error = 0; > struct kqworkloop *kqwl; > struct uthread *ut; > struct kqrequest *kqr; > > if (!(flags & KEVENT_FLAG_WORKLOOP) || !(kq->kq_state & KQ_WORKLOOP)) > return EINVAL; > > /* only kq created with KEVENT_FLAG_WORKLOOP_NO_WQ_THREAD from userspace can have attached threads */ > if (!(kq->kq_state & KQ_NO_WQ_THREAD)) > return EINVAL; > > /* allow detach only on not wq threads */ > if (is_workqueue_thread(thread)) > return EINVAL; > > /* check that the current thread is bound to the requested wq */ > ut = get_bsdthread_info(thread); > if (ut->uu_kqueue_bound != kq) > return EINVAL; > > kqwl = (struct kqworkloop *)kq; > kqwl_req_lock(kqwl); > kqr = &kqwl->kqwl_request; > > /* check that the wq is bound to the thread */ > if ((kqr->kqr_state & KQR_BOUND) == 0 || (kqr->kqr_thread != thread)) > error = EINVAL; > > kqwl_req_unlock(kqwl); > > return error; > } > > static void > kevent_servicer_detach_thread(struct proc *p, kqueue_id_t id, thread_t thread, > unsigned int flags, struct kqueue *kq) > { > struct kqworkloop *kqwl; > struct uthread *ut; > > assert((flags & KEVENT_FLAG_WORKLOOP) && (kq->kq_state & KQ_WORKLOOP)); > > /* allow detach only on not wqthreads threads */ > assert(!is_workqueue_thread(thread)); > > /* only kq created with KEVENT_FLAG_WORKLOOP_NO_WQ_THREAD from userspace can have attached threads */ > assert(kq->kq_state & KQ_NO_WQ_THREAD); > > /* check that the current thread is bound to the requested kq */ > ut = get_bsdthread_info(thread); > assert(ut->uu_kqueue_bound == kq); > > kqwl = (struct kqworkloop *)kq; > > kqlock(kq); > > /* unbind the thread. > * unbind itself checks if still processing and ends it. > */ > kqworkloop_unbind_thread(kqwl, thread, flags); > > kqunlock(kq); > > kevent_put_kq(p, id, NULL, kq); > > return; > } > > static int > kevent_servicer_attach_thread(thread_t thread, unsigned int flags, struct kqueue *kq) > { > int error = 0; > struct kqworkloop *kqwl; > struct uthread *ut; > struct kqrequest *kqr; > > if (!(flags & KEVENT_FLAG_WORKLOOP) || !(kq->kq_state & KQ_WORKLOOP)) > return EINVAL; > > /* only kq created with KEVENT_FLAG_WORKLOOP_NO_WQ_THREAD from userspace can have attached threads*/ > if (!(kq->kq_state & KQ_NO_WQ_THREAD)) > return EINVAL; > > /* allow attach only on not wqthreads */ > if (is_workqueue_thread(thread)) > return EINVAL; > > /* check that the thread is not already bound */ > ut = get_bsdthread_info(thread); > if (ut->uu_kqueue_bound != NULL) > return EINVAL; > > assert(ut->uu_kqueue_flags == 0); > > kqlock(kq); > kqwl = (struct kqworkloop *)kq; > kqwl_req_lock(kqwl); > kqr = &kqwl->kqwl_request; > > /* check that the kqueue is not already bound */ > if (kqr->kqr_state & (KQR_BOUND | KQR_THREQUESTED | KQR_DRAIN)) { > error = EINVAL; > goto out; > } > > assert(kqr->kqr_thread == NULL); > assert((kqr->kqr_state & KQR_PROCESSING) == 0); > > kqr->kqr_state |= KQR_THREQUESTED; > kqr->kqr_qos_index = THREAD_QOS_UNSPECIFIED; > kqr->kqr_override_index = THREAD_QOS_UNSPECIFIED; > kqr->kqr_dsync_owner_qos = THREAD_QOS_UNSPECIFIED; > kqr->kqr_owner_override_is_sync = 0; > > kqworkloop_bind_thread_impl(kqwl, thread, KEVENT_FLAG_WORKLOOP); > > /* get a ref on the wlkq on behalf of the attached thread */ > kqueue_retain(kq); > > out: > kqwl_req_unlock(kqwl); > kqunlock(kq); > > return error; > } > > static inline > boolean_t kevent_args_requesting_events(unsigned int flags, int nevents) 3843c4251 < unsigned int flags, --- > unsigned int flags, 3847a4256 > struct _kevent *cont_args; 3853,3854c4262 < int error, noutputs, register_rc; < bool needs_end_processing = false; --- > int error, noutputs; 3859d4266 < KNOTE_LOCK_CTX(knlc); 3866,3870d4272 < if (flags & KEVENT_FLAG_PARKING) { < if (!kevent_args_requesting_events(flags, nevents) || id != (kqueue_id_t)-1) < return EINVAL; < } < 3875,3878c4277,4278 < if ((flags & (KEVENT_FLAG_WORKLOOP)) && (flags & (KEVENT_FLAG_WORKQ))) < return EINVAL; < < if (flags & (KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST | KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST)) { --- > if (flags & (KEVENT_FLAG_WORKLOOP_SERVICER_ATTACH | KEVENT_FLAG_WORKLOOP_SERVICER_DETACH | > KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST | KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST | KEVENT_FLAG_WORKLOOP_NO_WQ_THREAD)) { 3882a4283,4291 > > /* cannot attach and detach simultaneously*/ > if ((flags & KEVENT_FLAG_WORKLOOP_SERVICER_ATTACH) && (flags & KEVENT_FLAG_WORKLOOP_SERVICER_DETACH)) > return EINVAL; > > /* cannot ask for events and detach */ > if ((flags & KEVENT_FLAG_WORKLOOP_SERVICER_DETACH) && kevent_args_requesting_events(flags, nevents)) > return EINVAL; > 3887c4296 < int scale = ((flags & KEVENT_FLAG_LEGACY32) ? --- > int scale = ((flags & KEVENT_FLAG_LEGACY32) ? 3899c4308 < --- > 3906,3917c4315 < error = kevent_get_kq(p, id, NULL, flags, &fp, &fd, &kq); < #if CONFIG_WORKLOOP_DEBUG < ut = (uthread_t)get_bsdthread_info(thread); < UU_KEVENT_HISTORY_WRITE_ENTRY(ut, { < .uu_kqid = id, < .uu_kq = error ? NULL : kq, < .uu_error = error, < .uu_nchanges = nchanges, < .uu_nevents = nevents, < .uu_flags = flags, < }); < #endif // CONFIG_WORKLOOP_DEBUG --- > error = kevent_get_kq(p, id, flags, &fp, &fd, &kq); 3922,3924c4320,4325 < if (flags & KEVENT_FLAG_WORKLOOP) { < struct kqworkloop *kqwl = (struct kqworkloop *)kq; < struct kqrequest *kqr = &kqwl->kqwl_request; --- > if ((flags & KEVENT_FLAG_WORKLOOP) && kevent_args_requesting_events(flags, nevents)) { > ut = (uthread_t)get_bsdthread_info(thread); > if (ut->uu_kqueue_bound != kq) { > error = EXDEV; > goto out; > } 3926c4327 < assert(kq->kq_state & KQ_WORKLOOP); --- > } 3928,3930c4329,4339 < if (kevent_args_requesting_events(flags, nevents)) { < if (kq != kevent_get_bound_kqworkloop(thread)) { < error = EXDEV; --- > /* attach the current thread if necessary */ > if (flags & KEVENT_FLAG_WORKLOOP_SERVICER_ATTACH) { > error = kevent_servicer_attach_thread(thread, flags, kq); > if (error) > goto out; > } > else { > /* before processing events and committing to the system call, return an error if the thread cannot be detached when requested */ > if (flags & KEVENT_FLAG_WORKLOOP_SERVICER_DETACH) { > error = kevent_servicer_detach_preflight(thread, flags, kq); > if (error) 3932,3946d4340 < } < < kq_req_lock(kqwl); < /* < * Disable the R2K notification while doing a register, if the < * caller wants events too, we don't want the AST to be set if we < * will process these events soon. < */ < kqr->kqr_state &= ~KQR_R2K_NOTIF_ARMED; < needs_end_processing = true; < kq_req_unlock(kq); < } < < if (id_out) { < *id_out = kqwl->kqwl_dynamicid; 3947a4342 > } 3948a4344,4348 > if (id_out && kq && (flags & KEVENT_FLAG_WORKLOOP)) { > assert(kq->kq_state & KQ_WORKLOOP); > struct kqworkloop *kqwl; > kqwl = (struct kqworkloop *)kq; > *id_out = kqwl->kqwl_dynamicid; 3961,3997c4361,4365 < register_rc = kevent_register(kq, &kev, &knlc); < if (register_rc & FILTER_REGISTER_WAIT) { < kqlock_held(kq); < < // f_post_register_wait is meant to call a continuation and not to < // return, which is why we don't support FILTER_REGISTER_WAIT if < // KEVENT_FLAG_ERROR_EVENTS is not passed, or if the event that < // waits isn't the last. < // < // It is implementable, but not used by any userspace code at the < // moment, so for now return ENOTSUP if someone tries to do it. < if (nchanges == 1 && nevents >= 1 && (flags & KEVENT_FLAG_ERROR_EVENTS)) { < struct _kevent_register *cont_args; < /* store the continuation/completion data in the uthread */ < ut = (uthread_t)get_bsdthread_info(thread); < cont_args = &ut->uu_save.uus_kevent_register; < cont_args->kev = kev; < cont_args->kq = kq; < cont_args->fp = fp; < cont_args->fd = fd; < cont_args->ueventlist = ueventlist; < cont_args->flags = flags; < cont_args->retval = retval; < cont_args->eventcount = nevents; < cont_args->eventout = noutputs; < knote_fops(cont_args->knote)->f_post_register_wait(ut, &knlc, cont_args); < panic("f_post_register_wait returned (kev: %p)", &kev); < } < < kev.flags |= EV_ERROR; < kev.data = ENOTSUP; < knote_unlock(kq, knlc.knlc_knote, &knlc, KNOTE_KQ_UNLOCK); < } < < // keep in sync with kevent_register_wait_return() < if (nevents > 0 && (kev.flags & (EV_ERROR|EV_RECEIPT))) { < if ((kev.flags & EV_ERROR) == 0) { --- > kevent_register(kq, &kev, p); > > if (nevents > 0 && > ((kev.flags & EV_ERROR) || (kev.flags & EV_RECEIPT))) { > if (kev.flags & EV_RECEIPT) { 4018d4385 < struct _kevent *cont_args; 4021c4388 < cont_args = &ut->uu_save.uus_kevent; --- > cont_args = &ut->uu_kevent.ss_kevent; 4035,4039d4401 < /* < * kqworkloop_end_processing() will happen at the end of kqueue_scan() < */ < needs_end_processing = false; < 4056,4064c4418,4421 < out: < if (__improbable(needs_end_processing)) { < /* < * If we didn't through kqworkloop_end_processing(), < * we need to do it here. < */ < kqlock(kq); < kqworkloop_end_processing((struct kqworkloop *)kq, 0, 0); < kqunlock(kq); --- > /* detach the current thread if necessary */ > if (flags & KEVENT_FLAG_WORKLOOP_SERVICER_DETACH) { > assert(fp == NULL); > kevent_servicer_detach_thread(p, id, thread, flags, kq); 4065a4423,4424 > > out: 4087c4446 < void *data) --- > void *data) 4134,4249d4492 < static int < kevent_register_validate_priority(struct kqueue *kq, struct knote *kn, < struct kevent_internal_s *kev) < { < /* We don't care about the priority of a disabled or deleted knote */ < if (kev->flags & (EV_DISABLE | EV_DELETE)) { < return 0; < } < < if (kq->kq_state & KQ_WORKLOOP) { < /* < * Workloops need valid priorities with a QOS (excluding manager) for < * any enabled knote. < * < * When it is pre-existing, just make sure it has a valid QoS as < * kevent_register() will not use the incoming priority (filters who do < * have the responsibility to validate it again, see filt_wltouch). < * < * If the knote is being made, validate the incoming priority. < */ < if (!_pthread_priority_thread_qos(kn ? kn->kn_qos : kev->qos)) { < return ERANGE; < } < } < < return 0; < } < < /* < * Prepare a filter for waiting after register. < * < * The f_post_register_wait hook will be called later by kevent_register() < * and should call kevent_register_wait_block() < */ < static int < kevent_register_wait_prepare(struct knote *kn, struct kevent_internal_s *kev) < { < thread_t thread = current_thread(); < struct uthread *uth = get_bsdthread_info(thread); < < assert(knote_fops(kn)->f_extended_codes); < < if (kn->kn_hook == NULL) { < thread_reference(thread); < kn->kn_hook = thread; < } else if (kn->kn_hook != thread) { < /* < * kn_hook may be set from a previous aborted wait < * However, it has to be from the same thread. < */ < kev->flags |= EV_ERROR; < kev->data = EXDEV; < return 0; < } < < uth->uu_save.uus_kevent_register.knote = kn; < return FILTER_REGISTER_WAIT; < } < < /* < * Cleanup a kevent_register_wait_prepare() effect for threads that have been < * aborted instead of properly woken up with thread_wakeup_thread(). < */ < static void < kevent_register_wait_cleanup(struct knote *kn) < { < thread_t thread = kn->kn_hook; < kn->kn_hook = NULL; < thread_deallocate(thread); < } < < /* < * Must be called at the end of a f_post_register_wait call from a filter. < */ < static void < kevent_register_wait_block(struct turnstile *ts, thread_t thread, < struct knote_lock_ctx *knlc, thread_continue_t cont, < struct _kevent_register *cont_args) < { < knote_unlock(cont_args->kq, cont_args->knote, knlc, KNOTE_KQ_UNLOCK); < turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_NOT_HELD); < cont_args->handoff_thread = thread; < thread_handoff_parameter(thread, cont, cont_args); < } < < /* < * Called by Filters using a f_post_register_wait to return from their wait. < */ < static void < kevent_register_wait_return(struct _kevent_register *cont_args) < { < struct kqueue *kq = cont_args->kq; < proc_t p = kq->kq_p; < struct kevent_internal_s *kev = &cont_args->kev; < int error = 0; < < if (cont_args->handoff_thread) { < thread_deallocate(cont_args->handoff_thread); < } < < if (kev->flags & (EV_ERROR|EV_RECEIPT)) { < if ((kev->flags & EV_ERROR) == 0) { < kev->flags |= EV_ERROR; < kev->data = 0; < } < error = kevent_copyout(kev, &cont_args->ueventlist, p, cont_args->flags); < if (error == 0) cont_args->eventout++; < } < < kevent_put_kq(p, cont_args->fd, cont_args->fp, kq); < if (error == 0) { < *cont_args->retval = cont_args->eventout; < } < unix_syscall_return(error); < } < 4264c4507 < int --- > void 4266c4509 < struct knote_lock_ctx *knlc) --- > __unused struct proc *ctxp) 4271c4514,4515 < int result = 0, error = 0; --- > int result = 0; > int error = 0; 4272a4517 > int knoteuse_flags = KNUSE_NONE; 4287c4532 < (kev->flags & (EV_ADD | EV_DISPATCH2)) != (EV_ADD | EV_DISPATCH2)) { --- > (kev->flags & (EV_ADD | EV_DISPATCH2)) != (EV_ADD | EV_DISPATCH2)) { 4311a4557 > 4314,4318d4559 < error = kevent_register_validate_priority(kq, kn, kev); < result = 0; < if (error) { < goto out; < } 4320,4323c4561,4597 < if (kn == NULL && (kev->flags & EV_ADD) == 0) { < /* < * No knote found, EV_ADD wasn't specified < */ --- > if (kn == NULL) { > if (kev->flags & EV_ADD) { > struct fileproc *knote_fp = NULL; > > /* grab a file reference for the new knote */ > if (fops->f_isfd) { > if ((error = fp_lookup(p, kev->ident, &knote_fp, 0)) != 0) { > goto out; > } > } > > kn = knote_alloc(); > if (kn == NULL) { > error = ENOMEM; > if (knote_fp != NULL) > fp_drop(p, kev->ident, knote_fp, 0); > goto out; > } > > kn->kn_fp = knote_fp; > knote_set_kq(kn, kq); > kqueue_retain(kq); /* retain a kq ref */ > kn->kn_filtid = ~kev->filter; > kn->kn_inuse = 1; /* for f_attach() */ > kn->kn_status = KN_ATTACHING | KN_ATTACHED; > > /* was vanish support requested */ > if (kev->flags & EV_VANISHED) { > kev->flags &= ~EV_VANISHED; > kn->kn_status |= KN_REQVANISH; > } > > /* snapshot matching/dispatching protcol flags into knote */ > if (kev->flags & EV_DISPATCH) > kn->kn_status |= KN_DISPATCH; > if (kev->flags & EV_UDATA_SPECIFIC) > kn->kn_status |= KN_UDATA_SPECIFIC; 4325,4326d4598 < if ((kev_flags & EV_ADD) && (kev_flags & EV_DELETE) && < (kq->kq_state & KQ_WORKLOOP)) { 4328,4330c4600,4603 < * For workloops, understand EV_ADD|EV_DELETE as a "soft" delete < * that doesn't care about ENOENT, so just pretend the deletion < * happened. --- > * copy the kevent state into knote > * protocol is that fflags and data > * are saved off, and cleared before > * calling the attach routine. 4332,4335c4605,4609 < } else { < error = ENOENT; < } < goto out; --- > kn->kn_kevent = *kev; > kn->kn_sfflags = kev->fflags; > kn->kn_sdata = kev->data; > kn->kn_fflags = 0; > kn->kn_data = 0; 4337,4340c4611,4613 < } else if (kn == NULL) { < /* < * No knote found, need to attach a new one (attach) < */ --- > /* invoke pthread kext to convert kevent qos to thread qos */ > knote_canonicalize_kevent_qos(kn); > knote_set_qos_index(kn, qos_index_from_qos(kn, kn->kn_qos, FALSE)); 4342c4615,4623 < struct fileproc *knote_fp = NULL; --- > /* before anyone can find it */ > if (kev->flags & EV_DISABLE) { > /* > * do this before anyone can find it, > * this can't call knote_disable() because it expects having > * the kqlock held > */ > kn->kn_status |= KN_DISABLED; > } 4344,4346c4625,4636 < /* grab a file reference for the new knote */ < if (fops->f_isfd) { < if ((error = fp_lookup(p, kev->ident, &knote_fp, 0)) != 0) { --- > /* Add the knote for lookup thru the fd table */ > error = kq_add_knote(kq, kn, kev, p, &knoteuse_flags); > if (error) { > (void)kqueue_release(kq, KQUEUE_CANT_BE_LAST_REF); > knote_free(kn); > if (knote_fp != NULL) > fp_drop(p, kev->ident, knote_fp, 0); > > if (error == ERESTART) { > error = 0; > goto restart; > } 4349d4638 < } 4351,4357c4640,4641 < kn = knote_alloc(); < if (kn == NULL) { < error = ENOMEM; < if (knote_fp != NULL) < fp_drop(p, kev->ident, knote_fp, 0); < goto out; < } --- > /* fp reference count now applies to knote */ > /* rwlock boost is now held */ 4359,4377c4643,4644 < kn->kn_fp = knote_fp; < kn->kn_kq_packed = (intptr_t)(struct kqueue *)kq; < kqueue_retain(kq); /* retain a kq ref */ < kn->kn_filtid = ~kev->filter; < kn->kn_status = KN_ATTACHING | KN_ATTACHED; < < /* was vanish support requested */ < if (kev->flags & EV_VANISHED) { < kev->flags &= ~EV_VANISHED; < kn->kn_status |= KN_REQVANISH; < } < < /* snapshot matching/dispatching protcol flags into knote */ < if (kev->flags & EV_DISPATCH) < kn->kn_status |= KN_DISPATCH; < if (kev->flags & EV_UDATA_SPECIFIC) < kn->kn_status |= KN_UDATA_SPECIFIC; < if (kev->flags & EV_DISABLE) < kn->kn_status |= KN_DISABLED; --- > /* call filter attach routine */ > result = fops->f_attach(kn, kev); 4379,4390c4646,4651 < /* < * copy the kevent state into knote < * protocol is that fflags and data < * are saved off, and cleared before < * calling the attach routine. < */ < kn->kn_kevent = *kev; < kn->kn_sfflags = kev->fflags; < kn->kn_sdata = kev->data; < kn->kn_fflags = 0; < kn->kn_data = 0; < knote_reset_priority(kn, kev->qos); --- > /* > * Trade knote use count for kq lock. > * Cannot be dropped because we held > * KN_ATTACHING throughout. > */ > knoteuse2kqlock(kq, kn, KNUSE_STEAL_DROP | knoteuse_flags); 4392,4398c4653,4666 < /* Add the knote for lookup thru the fd table */ < error = kq_add_knote(kq, kn, knlc, p); < if (error) { < (void)kqueue_release(kq, KQUEUE_CANT_BE_LAST_REF); < knote_free(kn); < if (knote_fp != NULL) < fp_drop(p, kev->ident, knote_fp, 0); --- > if (kn->kn_flags & EV_ERROR) { > /* > * Failed to attach correctly, so drop. > * All other possible users/droppers > * have deferred to us. Save the error > * to return to our caller. > */ > kn->kn_status &= ~KN_ATTACHED; > kn->kn_status |= KN_DROPPING; > error = kn->kn_data; > kqunlock(kq); > knote_drop(kn, p); > goto out; > } 4400,4401c4668,4709 < if (error == ERESTART) { < goto restart; --- > /* end "attaching" phase - now just attached */ > kn->kn_status &= ~KN_ATTACHING; > > if (kn->kn_status & KN_DROPPING) { > /* > * Attach succeeded, but someone else > * deferred their drop - now we have > * to do it for them. > */ > kqunlock(kq); > knote_drop(kn, p); > goto out; > } > > /* Mark the thread request overcommit - if appropos */ > knote_set_qos_overcommit(kn); > > /* > * If the attach routine indicated that an > * event is already fired, activate the knote. > */ > if (result) > knote_activate(kn); > > if (knote_fops(kn)->f_post_attach) { > error = knote_fops(kn)->f_post_attach(kn, kev); > if (error) { > kqunlock(kq); > goto out; > } > } > > } else { > if ((kev_flags & (EV_ADD | EV_DELETE)) == (EV_ADD | EV_DELETE) && > (kq->kq_state & KQ_WORKLOOP)) { > /* > * For workloops, understand EV_ADD|EV_DELETE as a "soft" delete > * that doesn't care about ENOENT, so just pretend the deletion > * happened. > */ > } else { > error = ENOENT; 4406c4714,4715 < /* fp reference count now applies to knote */ --- > } else { > /* existing knote: kqueue lock already taken by kq_find_knote_and_kq_lock */ 4408,4415c4717,4723 < /* < * we can't use filter_call() because f_attach can change the filter ops < * for a filter that supports f_extended_codes, so we need to reload < * knote_fops() and not use `fops`. < */ < result = fops->f_attach(kn, kev); < if (result && !knote_fops(kn)->f_extended_codes) { < result = FILTER_ACTIVE; --- > if ((kn->kn_status & (KN_DROPPING | KN_ATTACHING)) != 0) { > /* > * The knote is not in a stable state, wait for that > * transition to complete and then redrive the lookup. > */ > knoteusewait(kq, kn); > goto restart; 4418c4726 < kqlock(kq); --- > if (kev->flags & EV_DELETE) { 4420d4727 < if (kn->kn_flags & EV_ERROR) { 4422c4729,4731 < * Failed to attach correctly, so drop. --- > * If attempting to delete a disabled dispatch2 knote, > * we must wait for the knote to be re-enabled (unless > * it is being re-enabled atomically here). 4424,4427c4733,4761 < kn->kn_status &= ~(KN_ATTACHED | KN_ATTACHING); < error = kn->kn_data; < knote_drop(kq, kn, knlc); < result = 0; --- > if ((kev->flags & EV_ENABLE) == 0 && > (kn->kn_status & (KN_DISPATCH2 | KN_DISABLED)) == > (KN_DISPATCH2 | KN_DISABLED)) { > kn->kn_status |= KN_DEFERDELETE; > kqunlock(kq); > error = EINPROGRESS; > } else if (knote_fops(kn)->f_drop_and_unlock) { > /* > * The filter has requested to handle EV_DELETE events > * > * ERESTART means the kevent has to be re-evaluated > */ > error = knote_fops(kn)->f_drop_and_unlock(kn, kev); > if (error == ERESTART) { > error = 0; > goto restart; > } > } else if (kqlock2knotedrop(kq, kn)) { > /* standard/default EV_DELETE path */ > knote_drop(kn, p); > } else { > /* > * The kqueue is unlocked, it's not being > * dropped, and kqlock2knotedrop returned 0: > * this means that someone stole the drop of > * the knote from us. > */ > error = EINPROGRESS; > } 4432,4444c4766,4773 < * end "attaching" phase - now just attached < * < * Mark the thread request overcommit, if appropos < * < * If the attach routine indicated that an < * event is already fired, activate the knote. < */ < kn->kn_status &= ~KN_ATTACHING; < knote_set_qos_overcommit(kn); < < if (result & FILTER_ACTIVE) { < if (result & FILTER_ADJUST_EVENT_QOS_BIT) < knote_adjust_qos(kq, kn, result); --- > * If we are re-enabling a deferred-delete knote, > * just enable it now and avoid calling the > * filter touch routine (it has delivered its > * last event already). > */ > if ((kev->flags & EV_ENABLE) && > (kn->kn_status & KN_DEFERDELETE)) { > assert(kn->kn_status & KN_DISABLED); 4445a4775,4777 > knote_enable(kn); > kqunlock(kq); > goto out; 4448,4449d4779 < } else if (!knote_lock(kq, kn, knlc, KNOTE_KQ_LOCK_ON_SUCCESS)) { < 4451,4452c4781,4784 < * The knote was dropped while we were waiting for the lock, < * we need to re-evaluate entirely --- > * If we are disabling, do it before unlocking and > * calling the touch routine (so no processing can > * see the new kevent state before the disable is > * applied). 4453a4786,4787 > if (kev->flags & EV_DISABLE) > knote_disable(kn); 4455,4457d4788 < goto restart; < < } else if (kev->flags & EV_DELETE) { 4459,4465c4790,4791 < * Deletion of a knote (drop) < * < * If the filter wants to filter drop events, let it do so. < * < * defer-delete: when trying to delete a disabled EV_DISPATCH2 knote, < * we must wait for the knote to be re-enabled (unless it is being < * re-enabled atomically here). --- > * Convert the kqlock to a use reference on the > * knote so we can call the filter touch routine. 4467,4475c4793,4794 < < if (knote_fops(kn)->f_allow_drop) { < bool drop; < < kqunlock(kq); < drop = knote_fops(kn)->f_allow_drop(kn, kev); < kqlock(kq); < < if (!drop) goto out_unlock; --- > if (knoteuse_needs_boost(kn, kev)) { > knoteuse_flags |= KNUSE_BOOST; 4476a4796,4802 > if (kqlock2knoteuse(kq, kn, knoteuse_flags)) { > /* > * Call touch routine to notify filter of changes > * in filter values (and to re-determine if any > * events are fired). > */ > result = knote_fops(kn)->f_touch(kn, kev); 4478,4501c4804,4808 < if ((kev->flags & EV_ENABLE) == 0 && < (kn->kn_status & (KN_DISPATCH2 | KN_DISABLED)) == < (KN_DISPATCH2 | KN_DISABLED)) { < kn->kn_status |= KN_DEFERDELETE; < error = EINPROGRESS; < goto out_unlock; < } < < knote_drop(kq, kn, knlc); < goto out; < < } else { < /* < * Regular update of a knote (touch) < * < * Call touch routine to notify filter of changes in filter values < * (and to re-determine if any events are fired). < * < * If the knote is in defer-delete, avoid calling the filter touch < * routine (it has delivered its last event already). < * < * If the touch routine had no failure, < * apply the requested side effects to the knote. < */ --- > /* Get the kq lock back (don't defer droppers). */ > if (!knoteuse2kqlock(kq, kn, knoteuse_flags)) { > kqunlock(kq); > goto out; > } 4503,4505c4810,4814 < if (kn->kn_status & (KN_DEFERDELETE | KN_VANISHED)) { < if (kev->flags & EV_ENABLE) { < result = FILTER_ACTIVE; --- > /* Handle errors during touch routine */ > if (kev->flags & EV_ERROR) { > error = kev->data; > kqunlock(kq); > goto out; 4507,4511d4815 < } else { < kqunlock(kq); < result = filter_call(knote_fops(kn), f_touch(kn, kev)); < kqlock(kq); < } 4513,4533c4817,4818 < if (kev->flags & EV_ERROR) { < result = 0; < } else { < /* accept new kevent state */ < if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0) < kn->kn_udata = kev->udata; < if (kev->flags & EV_DISABLE) < knote_disable(kn); < if (result & (FILTER_UPDATE_REQ_QOS | FILTER_ADJUST_EVENT_QOS_BIT)) < knote_dequeue(kn); < if ((result & FILTER_UPDATE_REQ_QOS) && < kev->qos && kev->qos != kn->kn_qos) { < knote_reset_priority(kn, kev->qos); < } < if (result & FILTER_ACTIVE) { < thread_qos_t qos; < if (result & FILTER_ADJUST_EVENT_QOS_BIT) { < if (knote_should_apply_qos_override(kq, kn, result, &qos)) { < knote_apply_qos_override(kn, qos); < } < } --- > /* Activate it if the touch routine said to */ > if (result) 4535,4542d4819 < } < if (result & (FILTER_UPDATE_REQ_QOS | FILTER_ADJUST_EVENT_QOS_BIT)) { < if (knote_enqueue(kn) && (kn->kn_status & KN_ACTIVE)) { < knote_wakeup(kn); < } < } < if (kev->flags & EV_ENABLE) < knote_enable(kn); 4544d4820 < } 4546,4553c4822,4825 < out_unlock: < if ((result & FILTER_REGISTER_WAIT) == 0) { < /* < * When the filter asked for a post-register wait, < * we leave the knote and kqueue locked for kevent_register() < * to call the filter's f_post_register_wait hook. < */ < knote_unlock(kq, kn, knlc, KNOTE_KQ_UNLOCK); --- > /* Enable the knote if called for */ > if (kev->flags & EV_ENABLE) > knote_enable(kn); > 4555a4828,4830 > /* still have kqlock held and knote is valid */ > kqunlock(kq); > 4562d4836 < return result; 4564a4839 > 4586c4861 < knote_process(struct knote *kn, --- > knote_process(struct knote *kn, 4589c4864,4865 < struct filt_process_s *process_data) --- > struct filt_process_s *process_data, > struct proc *p) 4593,4594c4869 < KNOTE_LOCK_CTX(knlc); < int result = FILTER_ACTIVE; --- > int result = 0; 4596d4870 < bool drop = false; 4623,4631d4896 < if ((kn->kn_status & KN_DROPPING) || < !knote_lock(kq, kn, &knlc, KNOTE_KQ_LOCK_ALWAYS)) { < /* < * When the knote is dropping or has dropped, < * then there's nothing we want to process. < */ < return EJUSTRETURN; < } < 4637,4639d4901 < * < * suppress knotes to avoid returning the same event multiple times in < * a single call. 4641,4642d4902 < knote_suppress(kn); < 4647c4907,4909 < kev.flags = (kn->kn_status & KN_DEFERDELETE) ? EV_DELETE : EV_VANISHED; --- > kev.qos = kn->kn_qos; > kev.flags = (kn->kn_status & KN_DEFERDELETE) ? > EV_DELETE : EV_VANISHED; 4649a4912,4914 > result = 1; > > knote_suppress(kn); 4650a4916 > int flags = KNUSE_NONE; 4654,4657c4920,4921 < kqunlock(kq); < result = filter_call(knote_fops(kn), f_process(kn, process_data, &kev)); < kqlock(kq); < } --- > /* suppress knotes to avoid returning the same event multiple times in a single call. */ > knote_suppress(kn); 4659,4686c4923,4941 < /* < * Determine how to dispatch the knote for future event handling. < * not-fired: just return (do not callout, leave deactivated). < * One-shot: If dispatch2, enter deferred-delete mode (unless this is < * is the deferred delete event delivery itself). Otherwise, < * drop it. < * Dispatch: don't clear state, just mark it disabled. < * Cleared: just leave it deactivated. < * Others: re-activate as there may be more events to handle. < * This will not wake up more handlers right now, but < * at the completion of handling events it may trigger < * more handler threads (TODO: optimize based on more than < * just this one event being detected by the filter). < */ < if ((result & FILTER_ACTIVE) == 0) { < if ((kn->kn_status & (KN_ACTIVE | KN_STAYACTIVE)) == 0) { < /* < * Stay active knotes should not be unsuppressed or we'd create an < * infinite loop. < * < * Some knotes (like EVFILT_WORKLOOP) can be reactivated from < * within f_process() but that doesn't necessarily make them < * ready to process, so we should leave them be. < * < * For other knotes, since we will not return an event, < * there's no point keeping the knote suppressed. < */ < knote_unsuppress(kn); --- > if (knoteuse_needs_boost(kn, NULL)) { > flags |= KNUSE_BOOST; > } > /* convert lock to a knote use reference */ > if (!kqlock2knoteuse(kq, kn, flags)) > panic("dropping knote found on queue\n"); > > /* call out to the filter to process with just a ref */ > result = knote_fops(kn)->f_process(kn, process_data, &kev); > if (result) flags |= KNUSE_STEAL_DROP; > > /* > * convert our reference back to a lock. accept drop > * responsibility from others if we've committed to > * delivering event data. > */ > if (!knoteuse2kqlock(kq, kn, flags)) { > /* knote dropped */ > kn = NULL; 4688,4689d4942 < knote_unlock(kq, kn, &knlc, KNOTE_KQ_LOCK_ALWAYS); < return EJUSTRETURN; 4692,4699c4945,4995 < if (result & FILTER_ADJUST_EVENT_QOS_BIT) < knote_adjust_qos(kq, kn, result); < kev.qos = _pthread_priority_combine(kn->kn_qos, kn->kn_qos_override); < < if (kev.flags & EV_ONESHOT) { < if ((kn->kn_status & (KN_DISPATCH2 | KN_DEFERDELETE)) == KN_DISPATCH2) { < /* defer dropping non-delete oneshot dispatch2 events */ < kn->kn_status |= KN_DEFERDELETE; --- > if (kn != NULL) { > /* > * Determine how to dispatch the knote for future event handling. > * not-fired: just return (do not callout, leave deactivated). > * One-shot: If dispatch2, enter deferred-delete mode (unless this is > * is the deferred delete event delivery itself). Otherwise, > * drop it. > * stolendrop:We took responsibility for someone else's drop attempt. > * treat this just like one-shot and prepare to turn it back > * into a deferred delete if required. > * Dispatch: don't clear state, just mark it disabled. > * Cleared: just leave it deactivated. > * Others: re-activate as there may be more events to handle. > * This will not wake up more handlers right now, but > * at the completion of handling events it may trigger > * more handler threads (TODO: optimize based on more than > * just this one event being detected by the filter). > */ > > if (result == 0) > return (EJUSTRETURN); > > if ((kev.flags & EV_ONESHOT) || (kn->kn_status & KN_STOLENDROP)) { > if ((kn->kn_status & (KN_DISPATCH2 | KN_DEFERDELETE)) == KN_DISPATCH2) { > /* defer dropping non-delete oneshot dispatch2 events */ > kn->kn_status |= KN_DEFERDELETE; > knote_disable(kn); > > /* if we took over another's drop clear those flags here */ > if (kn->kn_status & KN_STOLENDROP) { > assert(kn->kn_status & KN_DROPPING); > /* > * the knote will be dropped when the > * deferred deletion occurs > */ > kn->kn_status &= ~(KN_DROPPING|KN_STOLENDROP); > } > } else if (kn->kn_status & KN_STOLENDROP) { > /* We now own the drop of the knote. */ > assert(kn->kn_status & KN_DROPPING); > knote_unsuppress(kn); > kqunlock(kq); > knote_drop(kn, p); > kqlock(kq); > } else if (kqlock2knotedrop(kq, kn)) { > /* just EV_ONESHOT, _not_ DISPATCH2 */ > knote_drop(kn, p); > kqlock(kq); > } > } else if (kn->kn_status & KN_DISPATCH) { > /* disable all dispatch knotes */ 4701,4702c4997,4999 < } else { < drop = true; --- > } else if ((kev.flags & EV_CLEAR) == 0) { > /* re-activate in case there are more events */ > knote_activate(kn); 4704,4709d5000 < } else if (kn->kn_status & KN_DISPATCH) { < /* disable all dispatch knotes */ < knote_disable(kn); < } else if ((kev.flags & EV_CLEAR) == 0) { < /* re-activate in case there are more events */ < knote_activate(kn); 4717,4726c5008,5011 < if (drop) { < knote_drop(kq, kn, &knlc); < } else { < knote_unlock(kq, kn, &knlc, KNOTE_KQ_UNLOCK); < } < < if (kev.flags & EV_VANISHED) { < KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KNOTE_VANISHED), < kev.ident, kn->kn_udata, kn->kn_status | (kn->kn_id << 32), < kn->kn_filtid); --- > if (result) { > kqunlock(kq); > error = (callback)(kq, &kev, callback_data); > kqlock(kq); 4728,4731c5013 < < error = (callback)(kq, &kev, callback_data); < kqlock(kq); < return error; --- > return (error); 4733a5016 > 4735c5018,5022 < * Returns -1 if the kqueue was unbound and processing should not happen --- > * Return 0 to indicate that processing should proceed, > * -1 if there is nothing to process. > * > * Called with kqueue locked and returns the same way, > * but may drop lock temporarily. 4737,4739d5023 < #define KQWQAE_BEGIN_PROCESSING 1 < #define KQWQAE_END_PROCESSING 2 < #define KQWQAE_UNBIND 3 4741,4742c5025 < kqworkq_acknowledge_events(struct kqworkq *kqwq, struct kqrequest *kqr, < int kevent_flags, int kqwqae_op) --- > kqworkq_begin_processing(struct kqworkq *kqwq, kq_index_t qos_index, int flags) 4744,4748c5027,5029 < thread_qos_t old_override = THREAD_QOS_UNSPECIFIED; < thread_t thread = kqr->kqr_thread; < struct knote *kn; < int rc = 0; < bool seen_stayactive = false, unbind; --- > struct kqrequest *kqr; > thread_t self = current_thread(); > __assert_only struct uthread *ut = get_bsdthread_info(self); 4750c5031,5032 < kqlock_held(&kqwq->kqwq_kqueue); --- > assert(kqwq->kqwq_state & KQ_WORKQ); > assert(qos_index < KQWQ_NQOS); 4752,4767c5034,5035 < if (!TAILQ_EMPTY(&kqr->kqr_suppressed)) { < /* < * Return suppressed knotes to their original state. < * For workq kqueues, suppressed ones that are still < * truly active (not just forced into the queue) will < * set flags we check below to see if anything got < * woken up. < */ < while ((kn = TAILQ_FIRST(&kqr->kqr_suppressed)) != NULL) { < assert(kn->kn_status & KN_SUPPRESSED); < knote_unsuppress(kn); < if (kn->kn_status & KN_STAYACTIVE) { < seen_stayactive = true; < } < } < } --- > KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWQ_PROCESS_BEGIN) | DBG_FUNC_START, > flags, qos_index); 4769c5037 < kq_req_lock(kqwq); --- > kqwq_req_lock(kqwq); 4771,4773c5039 < #if DEBUG || DEVELOPMENT < thread_t self = current_thread(); < struct uthread *ut = get_bsdthread_info(self); --- > kqr = kqworkq_get_request(kqwq, qos_index); 4775,4778c5041,5042 < assert(kqr->kqr_state & KQR_THREQUESTED); < assert(kqr->kqr_thread == self); < assert(ut->uu_kqr_bound == kqr); < #endif // DEBUG || DEVELOPMENT --- > /* manager skips buckets that haven't asked for its help */ > if (flags & KEVENT_FLAG_WORKQ_MANAGER) { 4780,4803c5044,5049 < if (kqwqae_op == KQWQAE_UNBIND) { < unbind = true; < } else if ((kevent_flags & KEVENT_FLAG_PARKING) == 0) { < unbind = false; < } else if (kqwqae_op == KQWQAE_BEGIN_PROCESSING && seen_stayactive) { < /* < * When we unsuppress stayactive knotes, for the kind that are hooked < * through select, we need to process once before we can assert there's < * no event pending. Hence we can't unbind during BEGIN PROCESSING. < */ < unbind = false; < } else { < unbind = ((kqr->kqr_state & KQR_WAKEUP) == 0); < } < if (unbind) { < old_override = kqworkq_unbind_locked(kqwq, kqr, thread); < rc = -1; < /* < * request a new thread if we didn't process the whole queue or real events < * have happened (not just putting stay-active events back). < */ < if (kqr->kqr_state & KQR_WAKEUP) { < kqueue_threadreq_initiate(&kqwq->kqwq_kqueue, kqr, < kqr->kqr_qos_index, 0); --- > /* If nothing for manager to do, just return */ > if ((kqr->kqr_state & KQWQ_THMANAGER) == 0) { > KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWQ_PROCESS_BEGIN) | DBG_FUNC_END, > 0, kqr->kqr_state); > kqwq_req_unlock(kqwq); > return -1; 4805,4814c5051,5052 < } < < if (rc == 0) { < /* < * Reset wakeup bit to notice events firing while we are processing, < * as we cannot rely on the bucket queue emptiness because of stay < * active knotes. < */ < kqr->kqr_state &= ~KQR_WAKEUP; < } --- > /* bind manager thread from this time on */ > kqworkq_bind_thread_impl(kqwq, qos_index, self, flags); 4816,4819c5054,5060 < kq_req_unlock(kqwq); < < if (old_override) { < thread_drop_ipc_override(thread); --- > } else { > /* We should already be bound to this kqueue */ > assert(kqr->kqr_state & KQR_BOUND); > assert(kqr->kqr_thread == self); > assert(ut->uu_kqueue_bound == (struct kqueue *)kqwq); > assert(ut->uu_kqueue_qos_index == qos_index); > assert((ut->uu_kqueue_flags & flags) == ut->uu_kqueue_flags); 4822,4836c5063,5069 < return rc; < } < < /* < * Return 0 to indicate that processing should proceed, < * -1 if there is nothing to process. < * < * Called with kqueue locked and returns the same way, < * but may drop lock temporarily. < */ < static int < kqworkq_begin_processing(struct kqworkq *kqwq, struct kqrequest *kqr, < int kevent_flags) < { < int rc = 0; --- > /* > * we should have been requested to be here > * and nobody else should still be processing > */ > assert(kqr->kqr_state & KQR_WAKEUP); > assert(kqr->kqr_state & KQR_THREQUESTED); > assert((kqr->kqr_state & KQR_PROCESSING) == 0); 4838,4839c5071,5072 < KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWQ_PROCESS_BEGIN) | DBG_FUNC_START, < 0, kqr->kqr_qos_index); --- > /* reset wakeup trigger to catch new events after we start processing */ > kqr->kqr_state &= ~KQR_WAKEUP; 4841,4842c5074,5075 < rc = kqworkq_acknowledge_events(kqwq, kqr, kevent_flags, < KQWQAE_BEGIN_PROCESSING); --- > /* convert to processing mode */ > kqr->kqr_state |= KQR_PROCESSING; 4845c5078 < thread_tid(kqr->kqr_thread), kqr->kqr_state); --- > kqr_thread_id(kqr), kqr->kqr_state); 4847c5080,5081 < return rc; --- > kqwq_req_unlock(kqwq); > return 0; 4868,4869c5102,5103 < static thread_qos_t < kqworkloop_acknowledge_events(struct kqworkloop *kqwl) --- > static void > kqworkloop_acknowledge_events(struct kqworkloop *kqwl, boolean_t clear_ipc_override) 4872d5105 < kq_index_t qos = THREAD_QOS_UNSPECIFIED; 4886c5119,5125 < qos = MAX(qos, knote_get_qos_override_index(kn)); --- > /* > * When called from unbind, clear the sync ipc override on the knote > * for events which are delivered. > */ > if (clear_ipc_override) { > knote_adjust_sync_qos(kn, THREAD_QOS_UNSPECIFIED, FALSE); > } 4891,4892d5129 < < return qos; 4896c5133,5134 < kqworkloop_begin_processing(struct kqworkloop *kqwl, unsigned int kevent_flags) --- > kqworkloop_begin_processing(struct kqworkloop *kqwl, > __assert_only unsigned int flags) 4900,4902d5137 < thread_qos_t old_override = THREAD_QOS_UNSPECIFIED, qos_override; < thread_t thread = kqr->kqr_thread; < int rc = 0, op = KQWL_UTQ_NONE; 4907c5142,5144 < kqwl->kqwl_dynamicid, 0, 0); --- > kqwl->kqwl_dynamicid, flags, 0); > > kqwl_req_lock(kqwl); 4909a5147 > assert((kqr->kqr_state & KQR_PROCESSING) == 0); 4911a5150 > kqr->kqr_state |= KQR_PROCESSING | KQR_R2K_NOTIF_ARMED; 4914,4979c5153 < if (!TAILQ_EMPTY(&kqr->kqr_suppressed)) { < op = KQWL_UTQ_RESET_WAKEUP_OVERRIDE; < } < < if (kevent_flags & KEVENT_FLAG_PARKING) { < /* < * When "parking" we want to process events and if no events are found < * unbind. < * < * However, non overcommit threads sometimes park even when they have < * more work so that the pool can narrow. For these, we need to unbind < * early, so that calling kqworkloop_update_threads_qos() can ask the < * workqueue subsystem whether the thread should park despite having < * pending events. < */ < if (kqr->kqr_state & KQR_THOVERCOMMIT) { < op = KQWL_UTQ_PARKING; < } else { < op = KQWL_UTQ_UNBINDING; < } < } < if (op == KQWL_UTQ_NONE) { < goto done; < } < < qos_override = kqworkloop_acknowledge_events(kqwl); < < kq_req_lock(kqwl); < < if (op == KQWL_UTQ_UNBINDING) { < old_override = kqworkloop_unbind_locked(kqwl, thread); < (void)kqueue_release(kqwl, KQUEUE_CANT_BE_LAST_REF); < } < kqworkloop_update_threads_qos(kqwl, op, qos_override); < if (op == KQWL_UTQ_PARKING) { < if (!TAILQ_EMPTY(&kqwl->kqwl_queue[KQWL_BUCKET_STAYACTIVE])) { < /* < * We cannot trust KQR_WAKEUP when looking at stay active knotes. < * We need to process once, and kqworkloop_end_processing will < * handle the unbind. < */ < } else if ((kqr->kqr_state & KQR_WAKEUP) == 0 || kqwl->kqwl_owner) { < old_override = kqworkloop_unbind_locked(kqwl, thread); < (void)kqueue_release(kqwl, KQUEUE_CANT_BE_LAST_REF); < rc = -1; < } < } else if (op == KQWL_UTQ_UNBINDING) { < if (kqr->kqr_thread == thread) { < /* < * The thread request fired again, passed the admission check and < * got bound to the current thread again. < */ < } else { < rc = -1; < } < } < < if (rc == 0) { < /* < * Reset wakeup bit to notice stay active events firing while we are < * processing, as we cannot rely on the stayactive bucket emptiness. < */ < kqr->kqr_wakeup_indexes &= ~KQWL_STAYACTIVE_FIRED_BIT; < } else { < kq->kq_state &= ~KQ_PROCESSING; < } --- > kqwl_req_unlock(kqwl); 4981,4985c5155 < kq_req_unlock(kqwl); < < if (old_override) { < thread_drop_ipc_override(thread); < } --- > kqworkloop_acknowledge_events(kqwl, FALSE); 4987d5156 < done: 4989c5158 < kqwl->kqwl_dynamicid, 0, 0); --- > kqwl->kqwl_dynamicid, flags, 0); 4991c5160 < return rc; --- > return 0; 5003c5172 < kqfile_begin_processing(struct kqueue *kq) --- > kqueue_begin_processing(struct kqueue *kq, kq_index_t qos_index, unsigned int flags) 5009c5178,5183 < assert((kq->kq_state & (KQ_WORKQ | KQ_WORKLOOP)) == 0); --- > if (kq->kq_state & KQ_WORKQ) { > return kqworkq_begin_processing((struct kqworkq *)kq, qos_index, flags); > } else if (kq->kq_state & KQ_WORKLOOP) { > return kqworkloop_begin_processing((struct kqworkloop*)kq, flags); > } > 5011c5185,5187 < VM_KERNEL_UNSLIDE_OR_PERM(kq), 0); --- > VM_KERNEL_UNSLIDE_OR_PERM(kq), flags); > > assert(qos_index == QOS_INDEX_KQFILE); 5026c5202 < suppressq = kqueue_get_suppressed_queue(kq, NULL); --- > suppressq = kqueue_get_suppressed_queue(kq, qos_index); 5028,5030c5204,5206 < CAST_EVENT64_T(suppressq), THREAD_UNINT | THREAD_WAIT_NOREPORT, < TIMEOUT_WAIT_FOREVER); < --- > CAST_EVENT64_T(suppressq), > THREAD_UNINT, TIMEOUT_WAIT_FOREVER); > 5043c5219 < if (kqueue_queue_empty(kq, QOS_INDEX_KQFILE)) { --- > if (kqueue_queue_empty(kq, qos_index)) { 5059,5060c5235,5241 < * Try to end the processing, only called when a workq thread is attempting to < * park (KEVENT_FLAG_PARKING is set). --- > * kqworkq_end_processing - Complete the processing of a workq kqueue > * > * We may have to request new threads. > * This can happen there are no waiting processing threads and: > * - there were active events we never got to (count > 0) > * - we pended waitq hook callouts during processing > * - we pended wakeups while processing (or unsuppressing) 5062,5063c5243 < * When returning -1, the kqworkq is setup again so that it is ready to be < * processed. --- > * Called with kqueue lock held. 5065,5067c5245,5246 < static int < kqworkq_end_processing(struct kqworkq *kqwq, struct kqrequest *kqr, < int kevent_flags) --- > static void > kqworkq_end_processing(struct kqworkq *kqwq, kq_index_t qos_index, int flags) 5069,5073c5248,5265 < if (!kqueue_queue_empty(&kqwq->kqwq_kqueue, kqr->kqr_qos_index)) { < /* remember we didn't process everything */ < kq_req_lock(kqwq); < kqr->kqr_state |= KQR_WAKEUP; < kq_req_unlock(kqwq); --- > #pragma unused(flags) > > struct kqueue *kq = &kqwq->kqwq_kqueue; > struct kqtailq *suppressq = kqueue_get_suppressed_queue(kq, qos_index); > > thread_t self = current_thread(); > struct uthread *ut = get_bsdthread_info(self); > struct knote *kn; > struct kqrequest *kqr; > thread_t thread; > > assert(kqwq->kqwq_state & KQ_WORKQ); > assert(qos_index < KQWQ_NQOS); > > /* Are we really bound to this kqueue? */ > if (ut->uu_kqueue_bound != kq) { > assert(ut->uu_kqueue_bound == kq); > return; 5076,5084c5268,5296 < if (kevent_flags & KEVENT_FLAG_PARKING) { < /* < * if acknowledge events "succeeds" it means there are events, < * which is a failure condition for end_processing. < */ < int rc = kqworkq_acknowledge_events(kqwq, kqr, kevent_flags, < KQWQAE_END_PROCESSING); < if (rc == 0) { < return -1; --- > kqr = kqworkq_get_request(kqwq, qos_index); > > kqwq_req_lock(kqwq); > > /* Do we claim to be manager? */ > if (flags & KEVENT_FLAG_WORKQ_MANAGER) { > > /* bail if not bound that way */ > if (ut->uu_kqueue_qos_index != KQWQ_QOS_MANAGER || > (ut->uu_kqueue_flags & KEVENT_FLAG_WORKQ_MANAGER) == 0) { > assert(ut->uu_kqueue_qos_index == KQWQ_QOS_MANAGER); > assert(ut->uu_kqueue_flags & KEVENT_FLAG_WORKQ_MANAGER); > kqwq_req_unlock(kqwq); > return; > } > > /* bail if this request wasn't already getting manager help */ > if ((kqr->kqr_state & KQWQ_THMANAGER) == 0 || > (kqr->kqr_state & KQR_PROCESSING) == 0) { > kqwq_req_unlock(kqwq); > return; > } > } else { > if (ut->uu_kqueue_qos_index != qos_index || > (ut->uu_kqueue_flags & KEVENT_FLAG_WORKQ_MANAGER)) { > assert(ut->uu_kqueue_qos_index == qos_index); > assert((ut->uu_kqueue_flags & KEVENT_FLAG_WORKQ_MANAGER) == 0); > kqwq_req_unlock(kqwq); > return; 5088c5300,5347 < return 0; --- > assert(kqr->kqr_state & KQR_BOUND); > thread = kqr->kqr_thread; > assert(thread == self); > > assert(kqr->kqr_state & KQR_PROCESSING); > > /* If we didn't drain the whole queue, re-mark a wakeup being needed */ > if (!kqueue_queue_empty(kq, qos_index)) > kqr->kqr_state |= KQR_WAKEUP; > > kqwq_req_unlock(kqwq); > > /* > * Return suppressed knotes to their original state. > * For workq kqueues, suppressed ones that are still > * truly active (not just forced into the queue) will > * set flags we check below to see if anything got > * woken up. > */ > while ((kn = TAILQ_FIRST(suppressq)) != NULL) { > assert(kn->kn_status & KN_SUPPRESSED); > knote_unsuppress(kn); > } > > kqwq_req_lock(kqwq); > > /* Indicate that we are done processing this request */ > kqr->kqr_state &= ~KQR_PROCESSING; > > /* > * Drop our association with this one request and its > * override on us. > */ > kqworkq_unbind_thread(kqwq, qos_index, thread, flags); > > /* > * request a new thread if we didn't process the whole > * queue or real events have happened (not just putting > * stay-active events back). > */ > if (kqr->kqr_state & KQR_WAKEUP) { > if (kqueue_queue_empty(kq, qos_index)) { > kqr->kqr_state &= ~KQR_WAKEUP; > } else { > kqworkq_request_thread(kqwq, qos_index); > } > } > kqwq_req_unlock(kqwq); 5091,5102c5350,5352 < /* < * Try to end the processing, only called when a workq thread is attempting to < * park (KEVENT_FLAG_PARKING is set). < * < * When returning -1, the kqworkq is setup again so that it is ready to be < * processed (as if kqworkloop_begin_processing had just been called). < * < * If successful and KEVENT_FLAG_PARKING was set in the kevent_flags, < * the kqworkloop is unbound from its servicer as a side effect. < */ < static int < kqworkloop_end_processing(struct kqworkloop *kqwl, int flags, int kevent_flags) --- > static void > kqworkloop_end_processing(struct kqworkloop *kqwl, int nevents, > unsigned int flags) 5104d5353 < struct kqueue *kq = &kqwl->kqwl_kqueue; 5106,5108c5355 < thread_qos_t old_override = THREAD_QOS_UNSPECIFIED, qos_override; < thread_t thread = kqr->kqr_thread; < int rc = 0; --- > struct kqueue *kq = &kqwl->kqwl_kqueue; 5113,5128c5360 < kqwl->kqwl_dynamicid, 0, 0); < < if (flags & KQ_PROCESSING) { < assert(kq->kq_state & KQ_PROCESSING); < < /* < * If we still have queued stayactive knotes, remember we didn't finish < * processing all of them. This should be extremely rare and would < * require to have a lot of them registered and fired. < */ < if (!TAILQ_EMPTY(&kqwl->kqwl_queue[KQWL_BUCKET_STAYACTIVE])) { < kq_req_lock(kqwl); < kqworkloop_update_threads_qos(kqwl, KQWL_UTQ_UPDATE_WAKEUP_QOS, < KQWL_BUCKET_STAYACTIVE); < kq_req_unlock(kqwl); < } --- > kqwl->kqwl_dynamicid, flags, 0); 5129a5362,5363 > if ((kq->kq_state & KQ_NO_WQ_THREAD) && nevents == 0 && > (flags & KEVENT_FLAG_IMMEDIATE) == 0) { 5131,5132c5365,5366 < * When KEVENT_FLAG_PARKING is set, we need to attempt an unbind while < * still under the lock. --- > * We may soon block, but have returned no > * kevents that need to be kept supressed for overriding purposes. 5134,5140c5368,5369 < * So we do everything kqworkloop_unbind() would do, but because we're < * inside kqueue_process(), if the workloop actually received events < * while our locks were dropped, we have the opportunity to fail the end < * processing and loop again. < * < * This avoids going through the process-wide workqueue lock hence < * scales better. --- > * It is hence safe to acknowledge events and unsuppress everything, so > * that if we block we can observe all events firing. 5142,5144c5371 < if (kevent_flags & KEVENT_FLAG_PARKING) { < qos_override = kqworkloop_acknowledge_events(kqwl); < } --- > kqworkloop_acknowledge_events(kqwl, TRUE); 5147c5374 < kq_req_lock(kqwl); --- > kqwl_req_lock(kqwl); 5149,5167c5376,5377 < if (kevent_flags & KEVENT_FLAG_PARKING) { < kqworkloop_update_threads_qos(kqwl, KQWL_UTQ_PARKING, qos_override); < if ((kqr->kqr_state & KQR_WAKEUP) && !kqwl->kqwl_owner) { < /* < * Reset wakeup bit to notice stay active events firing while we are < * processing, as we cannot rely on the stayactive bucket emptiness. < */ < kqr->kqr_wakeup_indexes &= ~KQWL_STAYACTIVE_FIRED_BIT; < rc = -1; < } else { < old_override = kqworkloop_unbind_locked(kqwl, thread); < (void)kqueue_release(kqwl, KQUEUE_CANT_BE_LAST_REF); < kq->kq_state &= ~flags; < } < } else { < kq->kq_state &= ~flags; < kqr->kqr_state |= KQR_R2K_NOTIF_ARMED; < kqworkloop_update_threads_qos(kqwl, KQWL_UTQ_RECOMPUTE_WAKEUP_QOS, 0); < } --- > assert(kqr->kqr_state & KQR_PROCESSING); > assert(kq->kq_state & KQ_PROCESSING); 5169c5379,5381 < kq_req_unlock(kqwl); --- > kq->kq_state &= ~KQ_PROCESSING; > kqr->kqr_state &= ~KQR_PROCESSING; > kqworkloop_update_threads_qos(kqwl, KQWL_UTQ_RECOMPUTE_WAKEUP_QOS, 0); 5171,5173c5383 < if (old_override) { < thread_drop_ipc_override(thread); < } --- > kqwl_req_unlock(kqwl); 5176,5178c5386 < kqwl->kqwl_dynamicid, 0, 0); < < return rc; --- > kqwl->kqwl_dynamicid, flags, 0); 5185c5393,5394 < kqfile_end_processing(struct kqueue *kq) --- > kqueue_end_processing(struct kqueue *kq, kq_index_t qos_index, > int nevents, unsigned int flags) 5193c5402,5406 < assert((kq->kq_state & (KQ_WORKQ|KQ_WORKLOOP)) == 0); --- > assert((kq->kq_state & KQ_WORKQ) == 0); > > if (kq->kq_state & KQ_WORKLOOP) { > return kqworkloop_end_processing((struct kqworkloop *)kq, nevents, flags); > } 5196c5409,5411 < VM_KERNEL_UNSLIDE_OR_PERM(kq), 0); --- > VM_KERNEL_UNSLIDE_OR_PERM(kq), flags); > > assert(qos_index == QOS_INDEX_KQFILE); 5201c5416 < suppressq = kqueue_get_suppressed_queue(kq, NULL); --- > suppressq = kqueue_get_suppressed_queue(kq, qos_index); 5219,5221c5434,5698 < static int < kqueue_workloop_ctl_internal(proc_t p, uintptr_t cmd, uint64_t __unused options, < struct kqueue_workloop_params *params, int *retval) --- > /* > * kqwq_internal_bind - bind thread to processing workq kqueue > * > * Determines if the provided thread will be responsible for > * servicing the particular QoS class index specified in the > * parameters. Once the binding is done, any overrides that may > * be associated with the cooresponding events can be applied. > * > * This should be called as soon as the thread identity is known, > * preferably while still at high priority during creation. > * > * - caller holds a reference on the process (and workq kq) > * - the thread MUST call kevent_qos_internal after being bound > * or the bucket of events may never be delivered. > * - Nothing locked > * (unless this is a synchronous bind, then the request is locked) > */ > static int > kqworkq_internal_bind( > struct proc *p, > kq_index_t qos_index, > thread_t thread, > unsigned int flags) > { > struct kqueue *kq; > struct kqworkq *kqwq; > struct kqrequest *kqr; > struct uthread *ut = get_bsdthread_info(thread); > > /* If no process workq, can't be our thread. */ > kq = p->p_fd->fd_wqkqueue; > > if (kq == NULL) > return 0; > > assert(kq->kq_state & KQ_WORKQ); > kqwq = (struct kqworkq *)kq; > > /* > * No need to bind the manager thread to any specific > * bucket, but still claim the thread. > */ > if (qos_index == KQWQ_QOS_MANAGER) { > assert(ut->uu_kqueue_bound == NULL); > assert(flags & KEVENT_FLAG_WORKQ_MANAGER); > ut->uu_kqueue_bound = kq; > ut->uu_kqueue_qos_index = qos_index; > ut->uu_kqueue_flags = flags; > > KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWQ_BIND), > thread_tid(thread), flags, qos_index); > > return 1; > } > > /* > * If this is a synchronous bind callback, the request > * lock is already held, so just do the bind. > */ > if (flags & KEVENT_FLAG_SYNCHRONOUS_BIND) { > kqwq_req_held(kqwq); > /* strip out synchronout bind flag */ > flags &= ~KEVENT_FLAG_SYNCHRONOUS_BIND; > kqworkq_bind_thread_impl(kqwq, qos_index, thread, flags); > return 1; > } > > /* > * check the request that corresponds to our qos_index > * to see if there is an outstanding request. > */ > kqr = kqworkq_get_request(kqwq, qos_index); > assert(kqr->kqr_qos_index == qos_index); > kqwq_req_lock(kqwq); > > KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWQ_BIND), > thread_tid(thread), flags, qos_index, kqr->kqr_state); > > if ((kqr->kqr_state & KQR_THREQUESTED) && > (kqr->kqr_state & KQR_PROCESSING) == 0) { > > if ((kqr->kqr_state & KQR_BOUND) && > thread == kqr->kqr_thread) { > /* duplicate bind - claim the thread */ > assert(ut->uu_kqueue_bound == kq); > assert(ut->uu_kqueue_qos_index == qos_index); > kqwq_req_unlock(kqwq); > return 1; > } > if ((kqr->kqr_state & (KQR_BOUND | KQWQ_THMANAGER)) == 0) { > /* ours to bind to */ > kqworkq_bind_thread_impl(kqwq, qos_index, thread, flags); > kqwq_req_unlock(kqwq); > return 1; > } > } > kqwq_req_unlock(kqwq); > return 0; > } > > static void > kqworkloop_bind_thread_impl(struct kqworkloop *kqwl, > thread_t thread, > __assert_only unsigned int flags) > { > assert(flags & KEVENT_FLAG_WORKLOOP); > > /* the request object must be locked */ > kqwl_req_held(kqwl); > > struct kqrequest *kqr = &kqwl->kqwl_request; > struct uthread *ut = get_bsdthread_info(thread); > boolean_t ipc_override_is_sync; > kq_index_t qos_index = kqworkloop_combined_qos(kqwl, &ipc_override_is_sync); > > /* nobody else bound so finally bind (as a workloop) */ > assert(kqr->kqr_state & KQR_THREQUESTED); > assert((kqr->kqr_state & (KQR_BOUND | KQR_PROCESSING)) == 0); > assert(thread != kqwl->kqwl_owner); > > KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWL_BIND), > kqwl->kqwl_dynamicid, (uintptr_t)thread_tid(thread), > qos_index, > (uintptr_t)(((uintptr_t)kqr->kqr_override_index << 16) | > (((uintptr_t)kqr->kqr_state) << 8) | > ((uintptr_t)ipc_override_is_sync))); > > kqr->kqr_state |= KQR_BOUND | KQR_R2K_NOTIF_ARMED; > kqr->kqr_thread = thread; > > /* bind the workloop to the uthread */ > ut->uu_kqueue_bound = (struct kqueue *)kqwl; > ut->uu_kqueue_flags = flags; > ut->uu_kqueue_qos_index = qos_index; > assert(ut->uu_kqueue_override_is_sync == 0); > ut->uu_kqueue_override_is_sync = ipc_override_is_sync; > if (qos_index) { > thread_add_ipc_override(thread, qos_index); > } > if (ipc_override_is_sync) { > thread_add_sync_ipc_override(thread); > } > } > > /* > * workloop_fulfill_threadreq - bind thread to processing workloop > * > * The provided thread will be responsible for delivering events > * associated with the given kqrequest. Bind it and get ready for > * the thread to eventually arrive. > * > * If WORKLOOP_FULFILL_THREADREQ_SYNC is specified, the callback > * within the context of the pthread_functions->workq_threadreq > * callout. In this case, the request structure is already locked. > */ > int > workloop_fulfill_threadreq(struct proc *p, > workq_threadreq_t req, > thread_t thread, > int flags) > { > int sync = (flags & WORKLOOP_FULFILL_THREADREQ_SYNC); > int cancel = (flags & WORKLOOP_FULFILL_THREADREQ_CANCEL); > struct kqrequest *kqr; > struct kqworkloop *kqwl; > > kqwl = (struct kqworkloop *)((uintptr_t)req - > offsetof(struct kqworkloop, kqwl_request) - > offsetof(struct kqrequest, kqr_req)); > kqr = &kqwl->kqwl_request; > > /* validate we're looking at something valid */ > if (kqwl->kqwl_p != p || > (kqwl->kqwl_state & KQ_WORKLOOP) == 0) { > assert(kqwl->kqwl_p == p); > assert(kqwl->kqwl_state & KQ_WORKLOOP); > return EINVAL; > } > > if (!sync) > kqwl_req_lock(kqwl); > > /* Should be a pending request */ > if ((kqr->kqr_state & KQR_BOUND) || > (kqr->kqr_state & KQR_THREQUESTED) == 0) { > > assert((kqr->kqr_state & KQR_BOUND) == 0); > assert(kqr->kqr_state & KQR_THREQUESTED); > if (!sync) > kqwl_req_unlock(kqwl); > return EINPROGRESS; > } > > assert((kqr->kqr_state & KQR_DRAIN) == 0); > > /* > * Is it a cancel indication from pthread. > * If so, we must be exiting/exec'ing. Forget > * our pending request. > */ > if (cancel) { > kqr->kqr_state &= ~KQR_THREQUESTED; > kqr->kqr_state |= KQR_DRAIN; > } else { > /* do the actual bind? */ > kqworkloop_bind_thread_impl(kqwl, thread, KEVENT_FLAG_WORKLOOP); > } > > if (!sync) > kqwl_req_unlock(kqwl); > > if (cancel) > kqueue_release_last(p, &kqwl->kqwl_kqueue); /* may dealloc kq */ > > return 0; > } > > > /* > * kevent_qos_internal_bind - bind thread to processing kqueue > * > * Indicates that the provided thread will be responsible for > * servicing the particular QoS class index specified in the > * parameters. Once the binding is done, any overrides that may > * be associated with the cooresponding events can be applied. > * > * This should be called as soon as the thread identity is known, > * preferably while still at high priority during creation. > * > * - caller holds a reference on the kqueue. > * - the thread MUST call kevent_qos_internal after being bound > * or the bucket of events may never be delivered. > * - Nothing locked (may take mutex or block). > */ > > int > kevent_qos_internal_bind( > struct proc *p, > int qos_class, > thread_t thread, > unsigned int flags) > { > kq_index_t qos_index; > > assert(flags & KEVENT_FLAG_WORKQ); > > if (thread == THREAD_NULL || (flags & KEVENT_FLAG_WORKQ) == 0) { > return EINVAL; > } > > /* get the qos index we're going to service */ > qos_index = qos_index_for_servicer(qos_class, thread, flags); > > if (kqworkq_internal_bind(p, qos_index, thread, flags)) > return 0; > > return EINPROGRESS; > } > > > static void > kqworkloop_internal_unbind( > struct proc *p, > thread_t thread, > unsigned int flags) 5223,5225d5699 < int error = 0; < int fd; < struct fileproc *fp; 5228,5243c5702 < struct filedesc *fdp = p->p_fd; < workq_threadreq_param_t trp = { }; < < switch (cmd) { < case KQ_WORKLOOP_CREATE: < if (!params->kqwlp_flags) { < error = EINVAL; < break; < } < < if ((params->kqwlp_flags & KQ_WORKLOOP_CREATE_SCHED_PRI) && < (params->kqwlp_sched_pri < 1 || < params->kqwlp_sched_pri > 63 /* MAXPRI_USER */)) { < error = EINVAL; < break; < } --- > struct uthread *ut = get_bsdthread_info(thread); 5245,5249c5704,5707 < if ((params->kqwlp_flags & KQ_WORKLOOP_CREATE_SCHED_POL) && < invalid_policy(params->kqwlp_sched_pol)) { < error = EINVAL; < break; < } --- > assert(ut->uu_kqueue_bound != NULL); > kq = ut->uu_kqueue_bound; > assert(kq->kq_state & KQ_WORKLOOP); > kqwl = (struct kqworkloop *)kq; 5251,5258c5709,5711 < if ((params->kqwlp_flags & KQ_WORKLOOP_CREATE_CPU_PERCENT) && < (params->kqwlp_cpu_percent <= 0 || < params->kqwlp_cpu_percent > 100 || < params->kqwlp_cpu_refillms <= 0 || < params->kqwlp_cpu_refillms > 0x00ffffff)) { < error = EINVAL; < break; < } --- > KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWL_UNBIND), > kqwl->kqwl_dynamicid, (uintptr_t)thread_tid(thread), > flags, 0); 5260,5279c5713,5714 < if (params->kqwlp_flags & KQ_WORKLOOP_CREATE_SCHED_PRI) { < trp.trp_flags |= TRP_PRIORITY; < trp.trp_pri = params->kqwlp_sched_pri; < } < if (params->kqwlp_flags & KQ_WORKLOOP_CREATE_SCHED_POL) { < trp.trp_flags |= TRP_POLICY; < trp.trp_pol = params->kqwlp_sched_pol; < } < if (params->kqwlp_flags & KQ_WORKLOOP_CREATE_CPU_PERCENT) { < trp.trp_flags |= TRP_CPUPERCENT; < trp.trp_cpupercent = (uint8_t)params->kqwlp_cpu_percent; < trp.trp_refillms = params->kqwlp_cpu_refillms; < } < < error = kevent_get_kq(p, params->kqwlp_id, &trp, < KEVENT_FLAG_DYNAMIC_KQUEUE | KEVENT_FLAG_WORKLOOP | < KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST , &fp, &fd, &kq); < if (error) { < break; < } --- > if (!(kq->kq_state & KQ_NO_WQ_THREAD)) { > assert(is_workqueue_thread(thread)); 5281,5297d5715 < if (!(fdp->fd_flags & FD_WORKLOOP)) { < /* FD_WORKLOOP indicates we've ever created a workloop < * via this syscall but its only ever added to a process, never < * removed. < */ < proc_fdlock(p); < fdp->fd_flags |= FD_WORKLOOP; < proc_fdunlock(p); < } < break; < case KQ_WORKLOOP_DESTROY: < error = kevent_get_kq(p, params->kqwlp_id, NULL, < KEVENT_FLAG_DYNAMIC_KQUEUE | KEVENT_FLAG_WORKLOOP | < KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST , &fp, &fd, &kq); < if (error) { < break; < } 5299,5306c5717 < kqwl = (struct kqworkloop *)kq; < trp.trp_value = kqwl->kqwl_params; < if (trp.trp_flags && !(trp.trp_flags & TRP_RELEASED)) { < trp.trp_flags |= TRP_RELEASED; < kqueue_release(kq, KQUEUE_CANT_BE_LAST_REF); < } else { < error = EINVAL; < } --- > kqworkloop_unbind_thread(kqwl, thread, flags); 5307a5719,5720 > > /* If last reference, dealloc the workloop kq */ 5309c5722,5724 < break; --- > } else { > assert(!is_workqueue_thread(thread)); > kevent_servicer_detach_thread(p, kqwl->kqwl_dynamicid, thread, flags, kq); 5311,5312d5725 < *retval = 0; < return error; 5314a5728,5777 > static void > kqworkq_internal_unbind( > struct proc *p, > kq_index_t qos_index, > thread_t thread, > unsigned int flags) > { > struct kqueue *kq; > struct kqworkq *kqwq; > struct uthread *ut; > kq_index_t end_index; > > assert(thread == current_thread()); > ut = get_bsdthread_info(thread); > > kq = p->p_fd->fd_wqkqueue; > assert(kq->kq_state & KQ_WORKQ); > assert(ut->uu_kqueue_bound == kq); > > kqwq = (struct kqworkq *)kq; > > /* end servicing any requests we might own */ > end_index = (qos_index == KQWQ_QOS_MANAGER) ? > 0 : qos_index; > kqlock(kq); > > KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWQ_UNBIND), > (uintptr_t)thread_tid(thread), flags, qos_index); > > do { > kqworkq_end_processing(kqwq, qos_index, flags); > } while (qos_index-- > end_index); > > ut->uu_kqueue_bound = NULL; > ut->uu_kqueue_qos_index = 0; > ut->uu_kqueue_flags = 0; > > kqunlock(kq); > } > > /* > * kevent_qos_internal_unbind - unbind thread from processing kqueue > * > * End processing the per-QoS bucket of events and allow other threads > * to be requested for future servicing. > * > * caller holds a reference on the kqueue. > * thread is the current thread. > */ > 5316c5779,5783 < kqueue_workloop_ctl(proc_t p, struct kqueue_workloop_ctl_args *uap, int *retval) --- > kevent_qos_internal_unbind( > struct proc *p, > int qos_class, > thread_t thread, > unsigned int flags) 5318,5323c5785 < struct kqueue_workloop_params params = { < .kqwlp_id = 0, < }; < if (uap->sz < sizeof(params.kqwlp_version)) { < return EINVAL; < } --- > #pragma unused(qos_class) 5325,5328c5787,5798 < size_t copyin_sz = MIN(sizeof(params), uap->sz); < int rv = copyin(uap->addr, ¶ms, copyin_sz); < if (rv) { < return rv; --- > struct uthread *ut; > struct kqueue *kq; > unsigned int bound_flags; > bool check_flags; > > ut = get_bsdthread_info(thread); > if (ut->uu_kqueue_bound == NULL) { > /* early out if we are already unbound */ > assert(ut->uu_kqueue_flags == 0); > assert(ut->uu_kqueue_qos_index == 0); > assert(ut->uu_kqueue_override_is_sync == 0); > return EALREADY; 5331,5332c5801,5826 < if (params.kqwlp_version != (int)uap->sz) { < return EINVAL; --- > assert(flags & (KEVENT_FLAG_WORKQ | KEVENT_FLAG_WORKLOOP)); > assert(thread == current_thread()); > > check_flags = flags & KEVENT_FLAG_UNBIND_CHECK_FLAGS; > > /* Get the kqueue we started with */ > kq = ut->uu_kqueue_bound; > assert(kq != NULL); > assert(kq->kq_state & (KQ_WORKQ | KQ_WORKLOOP)); > > /* get flags and QoS parameters we started with */ > bound_flags = ut->uu_kqueue_flags; > > /* Unbind from the class of workq */ > if (kq->kq_state & KQ_WORKQ) { > if (check_flags && !(flags & KEVENT_FLAG_WORKQ)) { > return EINVAL; > } > > kqworkq_internal_unbind(p, ut->uu_kqueue_qos_index, thread, bound_flags); > } else { > if (check_flags && !(flags & KEVENT_FLAG_WORKLOOP)) { > return EINVAL; > } > > kqworkloop_internal_unbind(p, thread, bound_flags); 5335,5336c5829 < return kqueue_workloop_ctl_internal(p, uap->cmd, uap->options, ¶ms, < retval); --- > return 0; 5342,5346c5835,5840 < * Walk the queued knotes and validate that they are really still triggered < * events by calling the filter routines (if necessary). < * < * For each event that is still considered triggered, invoke the callback < * routine provided. --- > * Walk the queued knotes and validate that they are > * really still triggered events by calling the filter > * routines (if necessary). Hold a use reference on > * the knote to avoid it being detached. For each event > * that is still considered triggered, invoke the > * callback routine provided. 5351a5846 > 5354,5357c5849,5853 < kevent_callback_t callback, < void *callback_data, < struct filt_process_s *process_data, < int *countp) --- > kevent_callback_t callback, > void *callback_data, > struct filt_process_s *process_data, > int *countp, > struct proc *p) 5358a5855 > unsigned int flags = process_data ? process_data->fp_flags : 0; 5360c5857 < struct kqrequest *kqr = ut->uu_kqr_bound; --- > kq_index_t start_index, end_index, i; 5362,5368c5859,5860 < unsigned int flags = process_data ? process_data->fp_flags : 0; < int nevents = 0, error = 0, rc = 0; < struct kqtailq *base_queue, *queue; < kqueue_t kqu = { .kq = kq }; < #if DEBUG || DEVELOPMENT < int retries = 64; < #endif --- > int nevents = 0; > int error = 0; 5369a5862,5865 > /* > * Based on the mode of the kqueue and the bound QoS of the servicer, > * determine the range of thread requests that need checking > */ 5371,5376c5867,5869 < if (kqr == NULL || (kqr->kqr_state & KQR_WORKLOOP)) { < return EJUSTRETURN; < } < rc = kqworkq_begin_processing(kqu.kqwq, kqr, flags); < } else if (kq->kq_state & KQ_WORKLOOP) { < if (ut->uu_kqr_bound != &kqu.kqwl->kqwl_request) { --- > if (flags & KEVENT_FLAG_WORKQ_MANAGER) { > start_index = KQWQ_QOS_MANAGER; > } else if (ut->uu_kqueue_bound != kq) { 5377a5871,5872 > } else { > start_index = ut->uu_kqueue_qos_index; 5379,5382d5873 < rc = kqworkloop_begin_processing(kqu.kqwl, flags); < } else { < rc = kqfile_begin_processing(kq); < } 5384,5396c5875,5877 < if (rc == -1) { < /* Nothing to process */ < *countp = 0; < return 0; < } < < /* < * loop through the enqueued knotes associated with this request, < * processing each one. Each request may have several queues < * of knotes to process (depending on the type of kqueue) so we < * have to loop through all the queues as long as we have additional < * space. < */ --- > /* manager services every request in a workq kqueue */ > assert(start_index > 0 && start_index <= KQWQ_QOS_MANAGER); > end_index = (start_index == KQWQ_QOS_MANAGER) ? 0 : start_index; 5398,5400d5878 < process_again: < if (kq->kq_state & KQ_WORKQ) { < base_queue = queue = &kqu.kqwq->kqwq_queue[kqr->kqr_qos_index]; 5402,5403c5880,5887 < base_queue = &kqu.kqwl->kqwl_queue[0]; < queue = &kqu.kqwl->kqwl_queue[KQWL_NBUCKETS - 1]; --- > if (ut->uu_kqueue_bound != kq) > return EJUSTRETURN; > > /* > * Single request servicing > * we want to deliver all events, regardless of the QOS > */ > start_index = end_index = THREAD_QOS_UNSPECIFIED; 5405c5889 < base_queue = queue = &kq->kq_queue[QOS_INDEX_KQFILE]; --- > start_index = end_index = QOS_INDEX_KQFILE; 5406a5891,5892 > > i = start_index; 5409,5414c5895,5920 < while (error == 0 && (kn = TAILQ_FIRST(queue)) != NULL) { < error = knote_process(kn, callback, callback_data, process_data); < if (error == EJUSTRETURN) { < error = 0; < } else { < nevents++; --- > if (kqueue_begin_processing(kq, i, flags) == -1) { > *countp = 0; > /* Nothing to process */ > continue; > } > > /* > * loop through the enqueued knotes associated with this request, > * processing each one. Each request may have several queues > * of knotes to process (depending on the type of kqueue) so we > * have to loop through all the queues as long as we have additional > * space. > */ > error = 0; > > struct kqtailq *base_queue = kqueue_get_base_queue(kq, i); > struct kqtailq *queue = kqueue_get_high_queue(kq, i); > do { > while (error == 0 && (kn = TAILQ_FIRST(queue)) != NULL) { > error = knote_process(kn, callback, callback_data, process_data, p); > if (error == EJUSTRETURN) { > error = 0; > } else { > nevents++; > } > /* error is EWOULDBLOCK when the out event array is full */ 5416c5922,5925 < /* error is EWOULDBLOCK when the out event array is full */ --- > } while (error == 0 && queue-- > base_queue); > > if ((kq->kq_state & KQ_WORKQ) == 0) { > kqueue_end_processing(kq, i, nevents, flags); 5424c5933 < } while (queue-- > base_queue); --- > } while (i-- > end_index); 5427,5457c5936 < < /* < * If KEVENT_FLAG_PARKING is set, and no kevents have been returned, < * we want to unbind the kqrequest from the thread. < * < * However, because the kq locks are dropped several times during process, < * new knotes may have fired again, in which case, we want to fail the end < * processing and process again, until it converges. < * < * If we returned events however, end processing never fails. < */ < if (error || nevents) flags &= ~KEVENT_FLAG_PARKING; < if (kq->kq_state & KQ_WORKQ) { < rc = kqworkq_end_processing(kqu.kqwq, kqr, flags); < } else if (kq->kq_state & KQ_WORKLOOP) { < rc = kqworkloop_end_processing(kqu.kqwl, KQ_PROCESSING, flags); < } else { < kqfile_end_processing(kq); < rc = 0; < } < if (rc == -1) { < assert(flags & KEVENT_FLAG_PARKING); < #if DEBUG || DEVELOPMENT < if (retries-- == 0) { < panic("kevent: way too many knote_process retries, kq: %p (0x%02x)", < kq, kq->kq_state); < } < #endif < goto process_again; < } < return error; --- > return (error); 5465c5944 < struct _kqueue_scan * cont_args = &ut->uu_save.uus_kqueue_scan; --- > struct _kqueue_scan * cont_args = &ut->uu_kevent.ss_kqueue_scan; 5476,5477c5955,5956 < error = kqueue_process(kq, cont_args->call, cont_args->data, < process_data, &count); --- > error = kqueue_process(kq, cont_args->call, cont_args->data, > process_data, &count, current_proc()); 5531a6011 > 5539c6019 < __unused struct proc *p) --- > struct proc *p) 5567c6047 < process_data, &count); --- > process_data, &count, p); 5590c6070 < deadline = 0; /* block forever */ --- > deadline = 0; /* block forever */ 5595c6075 < struct _kqueue_scan *cont_args = &ut->uu_save.uus_kqueue_scan; --- > struct _kqueue_scan *cont_args = &ut->uu_kevent.ss_kqueue_scan; 5654,5656c6134,6136 < __unused struct uio *uio, < __unused int flags, < __unused vfs_context_t ctx) --- > __unused struct uio *uio, > __unused int flags, > __unused vfs_context_t ctx) 5664,5666c6144,6146 < __unused struct uio *uio, < __unused int flags, < __unused vfs_context_t ctx) --- > __unused struct uio *uio, > __unused int flags, > __unused vfs_context_t ctx) 5674,5676c6154,6156 < __unused u_long com, < __unused caddr_t data, < __unused vfs_context_t ctx) --- > __unused u_long com, > __unused caddr_t data, > __unused vfs_context_t ctx) 5684c6164 < __unused vfs_context_t ctx) --- > __unused vfs_context_t ctx) 5709c6189 < struct uthread * ut = get_bsdthread_info(cur_act); --- > struct uthread * ut = get_bsdthread_info(cur_act); 5732c6212 < if (kqfile_begin_processing(kq) == -1) { --- > if (kqueue_begin_processing(kq, QOS_INDEX_KQFILE, 0) == -1) { 5737c6217 < queue = &kq->kq_queue[QOS_INDEX_KQFILE]; --- > queue = kqueue_get_base_queue(kq, QOS_INDEX_KQFILE); 5759c6239 < suppressq = kqueue_get_suppressed_queue(kq, NULL); --- > suppressq = kqueue_get_suppressed_queue(kq, QOS_INDEX_KQFILE); 5761,5762c6241 < KNOTE_LOCK_CTX(knlc); < int result = 0; --- > unsigned peek = 1; 5764,5768c6243 < /* If didn't vanish while suppressed - peek at it */ < if ((kn->kn_status & KN_DROPPING) || !knote_lock(kq, kn, &knlc, < KNOTE_KQ_LOCK_ON_FAILURE)) { < continue; < } --- > assert(!knoteuse_needs_boost(kn, NULL)); 5770c6245,6247 < result = filter_call(knote_fops(kn), f_peek(kn)); --- > /* If didn't vanish while suppressed - peek at it */ > if (kqlock2knoteuse(kq, kn, KNUSE_NONE)) { > peek = knote_fops(kn)->f_peek(kn); 5772,5773c6249,6252 < kqlock(kq); < knote_unlock(kq, kn, &knlc, KNOTE_KQ_LOCK_ALWAYS); --- > /* if it dropped while getting lock - move on */ > if (!knoteuse2kqlock(kq, kn, KNUSE_NONE)) > continue; > } 5779c6258 < if (result & FILTER_ACTIVE) { --- > if (peek > 0) { 5787c6266 < kqfile_end_processing(kq); --- > kqueue_end_processing(kq, QOS_INDEX_KQFILE, retnum, 0); 5807,5813d6285 < /* < * Max depth of the nested kq path that can be created. < * Note that this has to be less than the size of kq_level < * to avoid wrapping around and mislabeling the level. < */ < #define MAX_NESTED_KQ 1000 < 5827d6298 < uint16_t plevel = 0; 5831,5832c6302,6305 < if (parentkq == kq || kn->kn_filter != EVFILT_READ) { < knote_set_error(kn, EINVAL); --- > if (parentkq == kq || > kn->kn_filter != EVFILT_READ) { > kn->kn_flags = EV_ERROR; > kn->kn_data = EINVAL; 5845,5846d6317 < * < * Only up to MAX_NESTED_KQ can be nested. 5854c6325,6326 < knote_set_error(kn, EINVAL); --- > kn->kn_flags = EV_ERROR; > kn->kn_data = EINVAL; 5858,5868c6330,6333 < plevel = (parentkq->kq_level == 0)? 2: parentkq->kq_level; < if (plevel < kq->kq_level + 1) { < if (kq->kq_level + 1 > MAX_NESTED_KQ) { < kqunlock(parentkq); < knote_set_error(kn, EINVAL); < return 0; < } < plevel = kq->kq_level + 1; < } < < parentkq->kq_level = plevel; --- > if (parentkq->kq_level == 0) > parentkq->kq_level = 2; > if (parentkq->kq_level < kq->kq_level + 1) > parentkq->kq_level = kq->kq_level + 1; 5943,5944c6408,6411 < * Interact with the pthread kext to request a servicing there at a specific QoS < * level. --- > * Interact with the pthread kext to request a servicing there. > * Eventually, this will request threads at specific QoS levels. > * For now, it only requests a dispatch-manager-QoS thread, and > * only one-at-a-time. 5952,5953c6419,6421 < kqueue_threadreq_initiate(struct kqueue *kq, struct kqrequest *kqr, < kq_index_t qos, int flags) --- > kqworkq_request_thread( > struct kqworkq *kqwq, > kq_index_t qos_index) 5955,5958c6423 < assert(kqr->kqr_state & KQR_WAKEUP); < assert(kqr->kqr_thread == THREAD_NULL); < assert((kqr->kqr_state & KQR_THREQUESTED) == 0); < struct turnstile *ts = TURNSTILE_NULL; --- > struct kqrequest *kqr; 5960,5962c6425,6426 < if (workq_is_exiting(kq->kq_p)) { < return; < } --- > assert(kqwq->kqwq_state & KQ_WORKQ); > assert(qos_index < KQWQ_NQOS); 5964,5965c6428 < /* Add a thread request reference on the kqueue. */ < kqueue_retain(kq); --- > kqr = kqworkq_get_request(kqwq, qos_index); 5967c6430 < kq_req_held(kq); --- > assert(kqr->kqr_state & KQR_WAKEUP); 5969,5970c6432,6438 < if (kq->kq_state & KQ_WORKLOOP) { < __assert_only struct kqworkloop *kqwl = (struct kqworkloop *)kq; --- > /* > * If we have already requested a thread, and it hasn't > * started processing yet, there's no use hammering away > * on the pthread kext. > */ > if (kqr->kqr_state & KQR_THREQUESTED) > return; 5972,5980c6440 < assert(kqwl->kqwl_owner == THREAD_NULL); < KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWL_THREQUEST), < kqwl->kqwl_dynamicid, 0, qos, kqr->kqr_state); < ts = kqwl->kqwl_turnstile; < } else { < assert(kq->kq_state & KQ_WORKQ); < KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWQ_THREQUEST), < -1, 0, qos, kqr->kqr_state); < } --- > assert((kqr->kqr_state & KQR_BOUND) == 0); 5982c6442,6447 < kqr->kqr_state |= KQR_THREQUESTED; --- > /* request additional workq threads if appropriate */ > if (pthread_functions != NULL && > pthread_functions->workq_reqthreads != NULL) { > unsigned int flags = KEVENT_FLAG_WORKQ; > unsigned long priority; > thread_t wqthread; 5984,6005c6449,6450 < /* < * New-style thread request supported. < * Provide the pthread kext a pointer to a workq_threadreq_s structure for < * its use until a corresponding kqueue_threadreq_bind callback. < */ < if ((kq->kq_state & KQ_WORKLOOP) && current_proc() == kq->kq_p) { < flags |= WORKQ_THREADREQ_SET_AST_ON_FAILURE; < } < if (qos == KQWQ_QOS_MANAGER) { < qos = WORKQ_THREAD_QOS_MANAGER; < } < if (!workq_kern_threadreq_initiate(kq->kq_p, kqr, ts, qos, flags)) { < /* < * Process is shutting down or exec'ing. < * All the kqueues are going to be cleaned up < * soon. Forget we even asked for a thread - < * and make sure we don't ask for more. < */ < kqr->kqr_state &= ~(KQR_THREQUESTED | KQR_R2K_NOTIF_ARMED); < kqueue_release(kq, KQUEUE_CANT_BE_LAST_REF); < } < } --- > /* Compute the appropriate pthread priority */ > priority = qos_from_qos_index(qos_index); 6007,6017c6452,6468 < /* < * kqueue_threadreq_bind_prepost - prepost the bind to kevent < * < * This is used when kqueue_threadreq_bind may cause a lock inversion. < */ < void < kqueue_threadreq_bind_prepost(struct proc *p __unused, workq_threadreq_t req, < thread_t thread) < { < struct kqrequest *kqr = __container_of(req, struct kqrequest, kqr_req); < struct uthread *ut = get_bsdthread_info(thread); --- > #if 0 > /* JMM - for now remain compatible with old invocations */ > /* set the over-commit flag on the request if needed */ > if (kqr->kqr_state & KQR_THOVERCOMMIT) > priority |= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG; > #endif /* 0 */ > > /* Compute a priority based on qos_index. */ > struct workq_reqthreads_req_s request = { > .priority = priority, > .count = 1 > }; > > /* mark that we are making a request */ > kqr->kqr_state |= KQR_THREQUESTED; > if (qos_index == KQWQ_QOS_MANAGER) > kqr->kqr_state |= KQWQ_THMANAGER; 6019,6021c6470,6481 < req->tr_binding_thread = thread; < ut->uu_kqr_bound = kqr; < req->tr_state = TR_STATE_BINDING; --- > KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWQ_THREQUEST), > 0, qos_index, > (((uintptr_t)kqr->kqr_override_index << 8) | > (uintptr_t)kqr->kqr_state)); > wqthread = (*pthread_functions->workq_reqthreads)(kqwq->kqwq_p, 1, &request); > > /* We've been switched to the emergency/manager thread */ > if (wqthread == (thread_t)-1) { > assert(qos_index != KQWQ_QOS_MANAGER); > kqr->kqr_state |= KQWQ_THMANAGER; > return; > } 6023,6025d6482 < struct kqworkloop *kqwl = kqr_kqworkloop(kqr); < if (kqwl && kqwl->kqwl_turnstile) { < struct turnstile *ts = kqwl->kqwl_turnstile; 6027,6028c6484,6486 < * While a thread request is in flight, the workqueue < * is the interlock for the turnstile and can update the inheritor. --- > * bind the returned thread identity > * This goes away when we switch to synchronous callback > * binding from the pthread kext. 6030,6032c6488,6490 < turnstile_update_inheritor(ts, thread, TURNSTILE_IMMEDIATE_UPDATE | < TURNSTILE_INHERITOR_THREAD); < turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_HELD); --- > if (wqthread != NULL) { > kqworkq_bind_thread_impl(kqwq, qos_index, wqthread, flags); > } 6037c6495,6496 < * kqueue_threadreq_bind_commit - commit a bind prepost --- > * If we aren't already busy processing events [for this QoS], > * request workq thread support as appropriate. 6039,6072c6498 < * The workq code has to commit any binding prepost before the thread has < * a chance to come back to userspace (and do kevent syscalls) or be aborted. < */ < void < kqueue_threadreq_bind_commit(struct proc *p, thread_t thread) < { < struct uthread *ut = get_bsdthread_info(thread); < struct kqrequest *kqr = ut->uu_kqr_bound; < kqueue_t kqu = kqr_kqueue(p, kqr); < < kq_req_lock(kqu); < if (kqr->kqr_req.tr_state == TR_STATE_BINDING) { < kqueue_threadreq_bind(p, &kqr->kqr_req, thread, 0); < } < kq_req_unlock(kqu); < } < < static void < kqueue_threadreq_modify(struct kqueue *kq, struct kqrequest *kqr, kq_index_t qos) < { < assert(kqr->kqr_state & KQR_THREQUESTED); < assert(kqr->kqr_thread == THREAD_NULL); < < kq_req_held(kq); < < int flags = 0; < if ((kq->kq_state & KQ_WORKLOOP) && kq->kq_p == current_proc()) { < flags |= WORKQ_THREADREQ_SET_AST_ON_FAILURE; < } < workq_kern_threadreq_modify(kq->kq_p, kqr, qos, flags); < } < < /* < * kqueue_threadreq_bind - bind thread to processing kqrequest --- > * TBD - for now, we don't segregate out processing by QoS. 6074,6076c6500,6501 < * The provided thread will be responsible for delivering events < * associated with the given kqrequest. Bind it and get ready for < * the thread to eventually arrive. --- > * - May be called with the kqueue's wait queue set locked, > * so cannot do anything that could recurse on that. 6078,6080c6503,6506 < void < kqueue_threadreq_bind(struct proc *p, workq_threadreq_t req, thread_t thread, < unsigned int flags) --- > static void > kqworkq_request_help( > struct kqworkq *kqwq, > kq_index_t qos_index) 6082,6136c6508 < struct kqrequest *kqr = __container_of(req, struct kqrequest, kqr_req); < kqueue_t kqu = kqr_kqueue(p, kqr); < struct uthread *ut = get_bsdthread_info(thread); < < kq_req_held(kqu); < < assert(kqr->kqr_state & KQR_THREQUESTED); < assert(kqr->kqr_thread == THREAD_NULL); < assert(ut->uu_kqueue_override == 0); < < if (kqr->kqr_req.tr_state == TR_STATE_BINDING) { < assert(ut->uu_kqr_bound == kqr); < assert(kqr->kqr_req.tr_binding_thread == thread); < kqr->kqr_req.tr_state = TR_STATE_IDLE; < kqr->kqr_req.tr_binding_thread = NULL; < } else { < assert(ut->uu_kqr_bound == NULL); < } < < ut->uu_kqr_bound = kqr; < kqr->kqr_thread = thread; < < if (kqu.kq->kq_state & KQ_WORKLOOP) { < struct turnstile *ts = kqu.kqwl->kqwl_turnstile; < < if (__improbable(thread == kqu.kqwl->kqwl_owner)) { < /* < * shows that asserting here is not ok. < * < * This is not supposed to happen for correct use of the interface, < * but it is sadly possible for userspace (with the help of memory < * corruption, such as over-release of a dispatch queue) to make < * the creator thread the "owner" of a workloop. < * < * Once that happens, and that creator thread picks up the same < * workloop as a servicer, we trip this codepath. We need to fixup < * the state to forget about this thread being the owner, as the < * entire workloop state machine expects servicers to never be < * owners and everything would basically go downhill from here. < */ < kqu.kqwl->kqwl_owner = THREAD_NULL; < if (kqworkloop_owner_override(kqu.kqwl)) { < thread_drop_ipc_override(thread); < } < thread_ends_owning_workloop(thread); < } < < if (ts && (flags & KQUEUE_THREADERQ_BIND_NO_INHERITOR_UPDATE) == 0) { < /* < * Past this point, the interlock is the kq req lock again, < * so we can fix the inheritor for good. < */ < filt_wlupdate_inheritor(kqu.kqwl, ts, TURNSTILE_IMMEDIATE_UPDATE); < turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_HELD); < } --- > struct kqrequest *kqr; 6138,6140c6510,6514 < KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWL_BIND), kqu.kqwl->kqwl_dynamicid, < thread_tid(thread), kqr->kqr_qos_index, < (kqr->kqr_override_index << 16) | kqr->kqr_state); --- > /* convert to thread qos value */ > assert(qos_index < KQWQ_NQOS); > > kqwq_req_lock(kqwq); > kqr = kqworkq_get_request(kqwq, qos_index); 6142,6147c6516,6518 < ut->uu_kqueue_override = kqr->kqr_override_index; < if (kqr->kqr_override_index) { < thread_add_ipc_override(thread, kqr->kqr_override_index); < } < } else { < assert(kqr->kqr_override_index == 0); --- > if ((kqr->kqr_state & KQR_WAKEUP) == 0) { > /* Indicate that we needed help from this request */ > kqr->kqr_state |= KQR_WAKEUP; 6149,6151c6520,6521 < KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWQ_BIND), -1, < thread_tid(thread), kqr->kqr_qos_index, < (kqr->kqr_override_index << 16) | kqr->kqr_state); --- > /* Go assure a thread request has been made */ > kqworkq_request_thread(kqwq, qos_index); 6152a6523 > kqwq_req_unlock(kqwq); 6155,6161c6526,6527 < /* < * kqueue_threadreq_cancel - abort a pending thread request < * < * Called when exiting/exec'ing. Forget our pending request. < */ < void < kqueue_threadreq_cancel(struct proc *p, workq_threadreq_t req) --- > static void > kqworkloop_threadreq_impl(struct kqworkloop *kqwl, kq_index_t qos_index) 6163,6164c6529,6531 < struct kqrequest *kqr = __container_of(req, struct kqrequest, kqr_req); < kqueue_t kqu = kqr_kqueue(p, kqr); --- > struct kqrequest *kqr = &kqwl->kqwl_request; > unsigned long pri = pthread_priority_for_kqrequest(kqr, qos_index); > int op, ret; 6166c6533 < kq_req_lock(kqu); --- > assert((kqr->kqr_state & (KQR_THREQUESTED | KQR_BOUND)) == KQR_THREQUESTED); 6168,6170c6535,6566 < assert(kqr->kqr_thread == THREAD_NULL); < assert(kqr->kqr_state & KQR_THREQUESTED); < kqr->kqr_state &= ~(KQR_THREQUESTED | KQR_R2K_NOTIF_ARMED); --- > /* > * New-style thread request supported. Provide > * the pthread kext a pointer to a workq_threadreq_s > * structure for its use until a corresponding > * workloop_fulfill_threqreq callback. > */ > if (current_proc() == kqwl->kqwl_kqueue.kq_p) { > op = WORKQ_THREADREQ_WORKLOOP_NO_THREAD_CALL; > } else { > op = WORKQ_THREADREQ_WORKLOOP; > } > again: > ret = (*pthread_functions->workq_threadreq)(kqwl->kqwl_p, &kqr->kqr_req, > WORKQ_THREADREQ_WORKLOOP, pri, 0); > switch (ret) { > case ENOTSUP: > assert(op == WORKQ_THREADREQ_WORKLOOP_NO_THREAD_CALL); > op = WORKQ_THREADREQ_WORKLOOP; > goto again; > > case ECANCELED: > case EINVAL: > /* > * Process is shutting down or exec'ing. > * All the kqueues are going to be cleaned up > * soon. Forget we even asked for a thread - > * and make sure we don't ask for more. > */ > kqueue_release((struct kqueue *)kqwl, KQUEUE_CANT_BE_LAST_REF); > kqr->kqr_state &= ~KQR_THREQUESTED; > kqr->kqr_state |= KQR_DRAIN; > break; 6172c6568,6571 < kq_req_unlock(kqu); --- > case EAGAIN: > assert(op == WORKQ_THREADREQ_WORKLOOP_NO_THREAD_CALL); > act_set_astkevent(current_thread(), AST_KEVENT_REDRIVE_THREADREQ); > break; 6174c6573,6575 < kqueue_release_last(p, kqu); /* may dealloc kqu */ --- > default: > assert(ret == 0); > } 6177,6178c6578,6579 < workq_threadreq_param_t < kqueue_threadreq_workloop_param(workq_threadreq_t req) --- > static void > kqworkloop_threadreq_modify(struct kqworkloop *kqwl, kq_index_t qos_index) 6180,6182c6581,6583 < struct kqrequest *kqr = __container_of(req, struct kqrequest, kqr_req); < struct kqworkloop *kqwl; < workq_threadreq_param_t trp; --- > struct kqrequest *kqr = &kqwl->kqwl_request; > unsigned long pri = pthread_priority_for_kqrequest(kqr, qos_index); > int ret, op = WORKQ_THREADREQ_CHANGE_PRI_NO_THREAD_CALL; 6184,6188c6585 < assert(kqr->kqr_state & KQR_WORKLOOP); < kqwl = __container_of(kqr, struct kqworkloop, kqwl_request); < trp.trp_value = kqwl->kqwl_params; < return trp; < } --- > assert((kqr->kqr_state & (KQR_THREQUESTED | KQR_BOUND)) == KQR_THREQUESTED); 6190,6202c6587,6588 < /* < * kqueue_threadreq_unbind - unbind thread from processing kqueue < * < * End processing the per-QoS bucket of events and allow other threads < * to be requested for future servicing. < * < * caller holds a reference on the kqueue. < */ < void < kqueue_threadreq_unbind(struct proc *p, struct kqrequest *kqr) < { < if (kqr->kqr_state & KQR_WORKLOOP) { < kqworkloop_unbind(p, kqr_kqworkloop(kqr)); --- > if (current_proc() == kqwl->kqwl_kqueue.kq_p) { > op = WORKQ_THREADREQ_CHANGE_PRI_NO_THREAD_CALL; 6204c6590,6612 < kqworkq_unbind(p, kqr); --- > op = WORKQ_THREADREQ_CHANGE_PRI; > } > again: > ret = (*pthread_functions->workq_threadreq_modify)(kqwl->kqwl_p, > &kqr->kqr_req, op, pri, 0); > switch (ret) { > case ENOTSUP: > assert(op == WORKQ_THREADREQ_CHANGE_PRI_NO_THREAD_CALL); > op = WORKQ_THREADREQ_CHANGE_PRI; > goto again; > > case EAGAIN: > assert(op == WORKQ_THREADREQ_WORKLOOP_NO_THREAD_CALL); > act_set_astkevent(current_thread(), AST_KEVENT_REDRIVE_THREADREQ); > break; > > case ECANCELED: > case EINVAL: > case 0: > break; > > default: > assert(ret == 0); 6209,6210c6617,6620 < * If we aren't already busy processing events [for this QoS], < * request workq thread support as appropriate. --- > * Interact with the pthread kext to request a servicing thread. > * This will request a single thread at the highest QoS level > * for which there is work (whether that was the requested QoS > * for an event or an override applied to a lower-QoS request). 6212c6622 < * TBD - for now, we don't segregate out processing by QoS. --- > * - Caller holds the workloop request lock 6218c6628 < kqworkq_request_help(struct kqworkq *kqwq, kq_index_t qos_index) --- > kqworkloop_request_thread(struct kqworkloop *kqwl, kq_index_t qos_index) 6222,6223c6632 < /* convert to thread qos value */ < assert(qos_index < KQWQ_NBUCKETS); --- > assert(kqwl->kqwl_state & KQ_WORKLOOP); 6225,6226c6634 < kq_req_lock(kqwq); < kqr = kqworkq_get_request(kqwq, qos_index); --- > kqr = &kqwl->kqwl_request; 6228,6232c6636,6663 < if ((kqr->kqr_state & KQR_WAKEUP) == 0) { < kqr->kqr_state |= KQR_WAKEUP; < if ((kqr->kqr_state & KQR_THREQUESTED) == 0) { < kqueue_threadreq_initiate(&kqwq->kqwq_kqueue, kqr, qos_index, 0); < } --- > assert(kqwl->kqwl_owner == THREAD_NULL); > assert((kqr->kqr_state & KQR_BOUND) == 0); > assert((kqr->kqr_state & KQR_THREQUESTED) == 0); > assert(!(kqwl->kqwl_kqueue.kq_state & KQ_NO_WQ_THREAD)); > > /* If we're draining thread requests, just bail */ > if (kqr->kqr_state & KQR_DRAIN) > return; > > if (pthread_functions != NULL && > pthread_functions->workq_threadreq != NULL) { > /* > * set request state flags, etc... before calling pthread > * This assures they are set before a possible synchronous > * callback to workloop_fulfill_threadreq(). > */ > kqr->kqr_state |= KQR_THREQUESTED; > > /* Add a thread request reference on the kqueue. */ > kqueue_retain((struct kqueue *)kqwl); > > KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWL_THREQUEST), > kqwl->kqwl_dynamicid, > 0, qos_index, kqr->kqr_state); > kqworkloop_threadreq_impl(kqwl, qos_index); > } else { > panic("kqworkloop_request_thread"); > return; 6234d6664 < kq_req_unlock(kqwq); 6237,6238c6667,6678 < static kq_index_t < kqworkloop_owner_override(struct kqworkloop *kqwl) --- > static void > kqworkloop_update_sync_override_state(struct kqworkloop *kqwl, boolean_t sync_ipc_override) > { > struct kqrequest *kqr = &kqwl->kqwl_request; > kqwl_req_lock(kqwl); > kqr->kqr_has_sync_override = sync_ipc_override; > kqwl_req_unlock(kqwl); > > } > > static inline kq_index_t > kqworkloop_combined_qos(struct kqworkloop *kqwl, boolean_t *ipc_override_is_sync) 6241c6681,6691 < return MAX(kqr->kqr_qos_index, kqr->kqr_override_index); --- > kq_index_t override; > > *ipc_override_is_sync = FALSE; > override = MAX(MAX(kqr->kqr_qos_index, kqr->kqr_override_index), > kqr->kqr_dsync_waiters_qos); > > if (kqr->kqr_sync_suppress_count > 0 || kqr->kqr_has_sync_override) { > *ipc_override_is_sync = TRUE; > override = THREAD_QOS_USER_INTERACTIVE; > } > return override; 6249c6699 < kq_req_held(kqwl); --- > kqwl_req_held(kqwl); 6251a6702 > assert(kqr->kqr_state & KQR_BOUND); 6252a6704 > 6260a6713,6714 > const uint8_t KQWL_STAYACTIVE_FIRED_BIT = (1 << 0); > 6261a6716,6717 > boolean_t old_ipc_override_is_sync = FALSE; > kq_index_t old_qos = kqworkloop_combined_qos(kqwl, &old_ipc_override_is_sync); 6263c6719 < kq_index_t old_owner_override = kqworkloop_owner_override(kqwl); --- > bool static_thread = (kq->kq_state & KQ_NO_WQ_THREAD); 6267c6723 < kq_req_held(kqwl); --- > kqwl_req_held(kqwl); 6285a6742 > assert(!static_thread); 6295c6752 < goto recompute; --- > goto recompute_async; 6304c6761 < goto recompute; --- > goto recompute_async; 6309,6312d6765 < case KQWL_UTQ_PARKING: < case KQWL_UTQ_UNBINDING: < kqr->kqr_override_index = qos; < /* FALLTHROUGH */ 6314,6317c6767,6768 < if (op == KQWL_UTQ_RECOMPUTE_WAKEUP_QOS) { < assert(qos == THREAD_QOS_UNSPECIFIED); < } < kqlock_held(kqwl); // to look at kq_queues --- > kqlock_held(kq); // to look at kq_queues > kqr->kqr_has_sync_override = FALSE; 6322c6773 < if (!TAILQ_EMPTY(&kqwl->kqwl_queue[i]) && --- > if (!TAILQ_EMPTY(&kq->kq_queue[i]) && 6336c6787 < if (!TAILQ_EMPTY(&kqwl->kqwl_queue[i])) { --- > if (!TAILQ_EMPTY(&kq->kq_queue[i])) { 6337a6789,6793 > struct knote *kn = TAILQ_FIRST(&kqwl->kqwl_kqueue.kq_queue[i]); > if (i == THREAD_QOS_USER_INTERACTIVE && > kn->kn_qos_override_is_sync) { > kqr->kqr_has_sync_override = TRUE; > } 6346c6802,6803 < goto recompute; --- > assert(qos == THREAD_QOS_UNSPECIFIED); > goto recompute_async; 6349,6350c6806,6808 < kqr->kqr_override_index = qos; < goto recompute; --- > kqr->kqr_override_index = THREAD_QOS_UNSPECIFIED; > assert(qos == THREAD_QOS_UNSPECIFIED); > goto recompute_async; 6353c6811 < recompute: --- > recompute_async: 6355,6357c6813,6815 < * When modifying the wakeup QoS or the override QoS, we always need to < * maintain our invariant that kqr_override_index is at least as large < * as the highest QoS for which an event is fired. --- > * When modifying the wakeup QoS or the async override QoS, we always > * need to maintain our invariant that kqr_override_index is at least as > * large as the highest QoS for which an event is fired. 6373c6831,6832 < case KQWL_UTQ_SET_QOS_INDEX: --- > case KQWL_UTQ_SET_ASYNC_QOS: > filt_wlheld(kqwl); 6376a6836,6840 > case KQWL_UTQ_SET_SYNC_WAITERS_QOS: > filt_wlheld(kqwl); > kqr->kqr_dsync_waiters_qos = qos; > break; > 6380a6845,6846 > boolean_t new_ipc_override_is_sync = FALSE; > kq_index_t new_qos = kqworkloop_combined_qos(kqwl, &new_ipc_override_is_sync); 6383,6384c6849 < boolean_t qos_changed = FALSE; < kq_index_t new_owner_override = kqworkloop_owner_override(kqwl); --- > __assert_only int ret; 6389c6854 < if (kqwl_owner) { --- > if (filt_wlowner_is_valid(kqwl_owner)) { 6393,6394c6858,6861 < kqwl->kqwl_dynamicid, thread_tid(kqwl_owner), kqr->kqr_qos_index, < (kqr->kqr_override_index << 16) | kqr->kqr_state); --- > kqwl->kqwl_dynamicid, > (kqr->kqr_state & KQR_BOUND) ? thread_tid(kqwl_owner) : 0, > (kqr->kqr_qos_index << 8) | new_qos, > (kqr->kqr_override_index << 8) | kqr->kqr_state); 6396c6863 < if (new_owner_override == old_owner_override) { --- > if (new_qos == kqr->kqr_dsync_owner_qos) { 6398,6400c6865,6867 < } else if (old_owner_override == THREAD_QOS_UNSPECIFIED) { < thread_add_ipc_override(kqwl_owner, new_owner_override); < } else if (new_owner_override == THREAD_QOS_UNSPECIFIED) { --- > } else if (kqr->kqr_dsync_owner_qos == THREAD_QOS_UNSPECIFIED) { > thread_add_ipc_override(kqwl_owner, new_qos); > } else if (new_qos == THREAD_QOS_UNSPECIFIED) { 6402,6403c6869,6870 < } else /* old_owner_override != new_owner_override */ { < thread_update_ipc_override(kqwl_owner, new_owner_override); --- > } else /* kqr->kqr_dsync_owner_qos != new_qos */ { > thread_update_ipc_override(kqwl_owner, new_qos); 6404a6872,6881 > kqr->kqr_dsync_owner_qos = new_qos; > > if (new_ipc_override_is_sync && > !kqr->kqr_owner_override_is_sync) { > thread_add_sync_ipc_override(kqwl_owner); > } else if (!new_ipc_override_is_sync && > kqr->kqr_owner_override_is_sync) { > thread_drop_sync_ipc_override(kqwl_owner); > } > kqr->kqr_owner_override_is_sync = new_ipc_override_is_sync; 6410c6887 < if ((kqr->kqr_state & KQR_THREQUESTED) == 0) { --- > if (static_thread) { 6412c6889 < * No servicer, nor thread-request --- > * Statically bound thread 6414,6416c6891,6892 < * Make a new thread request, unless there is an owner (or the workloop < * is suspended in userland) or if there is no asynchronous work in the < * first place. --- > * These threads don't participates in QoS overrides today, just wakeup > * the thread blocked on this kqueue if a new event arrived. 6419,6422c6895,6917 < if (kqwl_owner == NULL && (kqr->kqr_state & KQR_WAKEUP)) { < int initiate_flags = 0; < if (op == KQWL_UTQ_UNBINDING) { < initiate_flags = WORKQ_THREADREQ_ATTEMPT_REBIND; --- > switch (op) { > case KQWL_UTQ_UPDATE_WAKEUP_QOS: > case KQWL_UTQ_UPDATE_STAYACTIVE_QOS: > case KQWL_UTQ_RECOMPUTE_WAKEUP_QOS: > break; > > case KQWL_UTQ_RESET_WAKEUP_OVERRIDE: > case KQWL_UTQ_UPDATE_WAKEUP_OVERRIDE: > case KQWL_UTQ_REDRIVE_EVENTS: > case KQWL_UTQ_SET_ASYNC_QOS: > case KQWL_UTQ_SET_SYNC_WAITERS_QOS: > panic("should never be called"); > break; > } > > kqlock_held(kq); > > if ((kqr->kqr_state & KQR_BOUND) && (kqr->kqr_state & KQR_WAKEUP)) { > assert(servicer && !is_workqueue_thread(servicer)); > if (kq->kq_state & (KQ_SLEEP | KQ_SEL)) { > kq->kq_state &= ~(KQ_SLEEP | KQ_SEL); > waitq_wakeup64_all((struct waitq *)&kq->kq_wqs, KQ_EVENT, > THREAD_AWAKENED, WAITQ_ALL_PRIORITIES); 6424,6425d6918 < kqueue_threadreq_initiate(kq, kqr, new_owner_override, < initiate_flags); 6427c6920 < } else if (servicer) { --- > } else if ((kqr->kqr_state & KQR_THREQUESTED) == 0) { 6429c6922 < * Servicer in flight --- > * No servicer, nor thread-request 6431c6924,6926 < * Just apply the diff to the servicer --- > * Make a new thread request, unless there is an owner (or the workloop > * is suspended in userland) or if there is no asynchronous work in the > * first place. 6433,6443c6928,6930 < struct uthread *ut = get_bsdthread_info(servicer); < if (ut->uu_kqueue_override != kqr->kqr_override_index) { < if (ut->uu_kqueue_override == THREAD_QOS_UNSPECIFIED) { < thread_add_ipc_override(servicer, kqr->kqr_override_index); < } else if (kqr->kqr_override_index == THREAD_QOS_UNSPECIFIED) { < thread_drop_ipc_override(servicer); < } else /* ut->uu_kqueue_override != kqr->kqr_override_index */ { < thread_update_ipc_override(servicer, kqr->kqr_override_index); < } < ut->uu_kqueue_override = kqr->kqr_override_index; < qos_changed = TRUE; --- > > if (kqwl_owner == THREAD_NULL && (kqr->kqr_state & KQR_WAKEUP)) { > kqworkloop_request_thread(kqwl, new_qos); 6445c6932,6933 < } else if (new_owner_override == THREAD_QOS_UNSPECIFIED) { --- > } else if ((kqr->kqr_state & KQR_BOUND) == 0 && > (kqwl_owner || (kqr->kqr_state & KQR_WAKEUP) == 0)) { 6447c6935 < * No events to deliver anymore. --- > * No servicer, thread request in flight we want to cancel 6449,6451c6937,6938 < * However canceling with turnstiles is challenging, so the fact that < * the request isn't useful will be discovered by the servicer himself < * later on. --- > * We just got rid of the last knote of the kqueue or noticed an owner > * with a thread request still in flight, take it back. 6453c6940,6948 < } else if (old_owner_override != new_owner_override) { --- > ret = (*pthread_functions->workq_threadreq_modify)(kqwl->kqwl_p, > &kqr->kqr_req, WORKQ_THREADREQ_CANCEL, 0, 0); > if (ret == 0) { > kqr->kqr_state &= ~KQR_THREQUESTED; > kqueue_release(kq, KQUEUE_CANT_BE_LAST_REF); > } > } else { > boolean_t qos_changed = FALSE; > 6455c6950 < * Request is in flight --- > * Servicer or request is in flight 6457c6952 < * Apply the diff to the thread request --- > * Just apply the diff to the servicer or the thread request 6459,6461c6954,6967 < kqueue_threadreq_modify(kq, kqr, new_owner_override); < qos_changed = TRUE; < } --- > if (kqr->kqr_state & KQR_BOUND) { > servicer = kqr->kqr_thread; > struct uthread *ut = get_bsdthread_info(servicer); > if (ut->uu_kqueue_qos_index != new_qos) { > if (ut->uu_kqueue_qos_index == THREAD_QOS_UNSPECIFIED) { > thread_add_ipc_override(servicer, new_qos); > } else if (new_qos == THREAD_QOS_UNSPECIFIED) { > thread_drop_ipc_override(servicer); > } else /* ut->uu_kqueue_qos_index != new_qos */ { > thread_update_ipc_override(servicer, new_qos); > } > ut->uu_kqueue_qos_index = new_qos; > qos_changed = TRUE; > } 6463,6466c6969,6992 < if (qos_changed) { < KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWL_THADJUST), kqwl->kqwl_dynamicid, < thread_tid(kqr->kqr_thread), kqr->kqr_qos_index, < (kqr->kqr_override_index << 16) | kqr->kqr_state); --- > if (new_ipc_override_is_sync != ut->uu_kqueue_override_is_sync) { > if (new_ipc_override_is_sync && > !ut->uu_kqueue_override_is_sync) { > thread_add_sync_ipc_override(servicer); > } else if (!new_ipc_override_is_sync && > ut->uu_kqueue_override_is_sync) { > thread_drop_sync_ipc_override(servicer); > } > ut->uu_kqueue_override_is_sync = new_ipc_override_is_sync; > qos_changed = TRUE; > } > } else if (old_qos != new_qos) { > assert(new_qos); > kqworkloop_threadreq_modify(kqwl, new_qos); > qos_changed = TRUE; > } > if (qos_changed) { > servicer = kqr->kqr_thread; > KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWL_THADJUST), > kqwl->kqwl_dynamicid, > (kqr->kqr_state & KQR_BOUND) ? thread_tid(servicer) : 0, > (kqr->kqr_qos_index << 16) | (new_qos << 8) | new_ipc_override_is_sync, > (kqr->kqr_override_index << 8) | kqr->kqr_state); > } 6476c7002 < kq_req_lock(kqwl); --- > kqwl_req_lock(kqwl); 6478c7004 < kq_req_unlock(kqwl); --- > kqwl_req_unlock(kqwl); 6480a7007,7013 > /* > * These arrays described the low and high qindexes for a given qos_index. > * The values come from the chart in (must stay in sync). > */ > static kq_index_t _kqwq_base_index[KQWQ_NQOS] = {0, 0, 6, 11, 15, 18, 20, 21}; > static kq_index_t _kqwq_high_index[KQWQ_NQOS] = {0, 5, 10, 14, 17, 19, 20, 21}; > 6482c7015 < kqueue_get_queue(struct kqueue *kq, kq_index_t qos_index) --- > kqueue_get_base_queue(struct kqueue *kq, kq_index_t qos_index) 6485c7018,7019 < assert(qos_index < KQWQ_NBUCKETS); --- > assert(qos_index < KQWQ_NQOS); > return &kq->kq_queue[_kqwq_base_index[qos_index]]; 6487a7022 > return &kq->kq_queue[qos_index]; 6489a7025 > return &kq->kq_queue[QOS_INDEX_KQFILE]; 6491,6499d7026 < static_assert(offsetof(struct kqueue, kq_queue) == sizeof(struct kqueue), < "struct kqueue::kq_queue must be exactly at the end"); < return &kq->kq_queue[qos_index]; < } < < static int < kqueue_queue_empty(struct kqueue *kq, kq_index_t qos_index) < { < return TAILQ_EMPTY(kqueue_get_queue(kq, qos_index)); 6503c7030 < kqueue_get_suppressed_queue(kqueue_t kq, struct knote *kn) --- > kqueue_get_high_queue(struct kqueue *kq, kq_index_t qos_index) 6505,6508c7032,7037 < if (kq.kq->kq_state & KQ_WORKQ) { < return &kqworkq_get_request(kq.kqwq, kn->kn_qos_index)->kqr_suppressed; < } else if (kq.kq->kq_state & KQ_WORKLOOP) { < return &kq.kqwl->kqwl_request.kqr_suppressed; --- > if (kq->kq_state & KQ_WORKQ) { > assert(qos_index < KQWQ_NQOS); > return &kq->kq_queue[_kqwq_high_index[qos_index]]; > } else if (kq->kq_state & KQ_WORKLOOP) { > assert(qos_index < KQWL_NBUCKETS); > return &kq->kq_queue[KQWL_BUCKET_STAYACTIVE]; 6510c7039,7040 < return &kq.kqf->kqf_suppressed; --- > assert(qos_index == QOS_INDEX_KQFILE); > return &kq->kq_queue[QOS_INDEX_KQFILE]; 6514,6515c7044,7045 < static struct turnstile * < kqueue_get_turnstile(kqueue_t kqu, bool can_alloc) --- > static int > kqueue_queue_empty(struct kqueue *kq, kq_index_t qos_index) 6517,6521c7047,7048 < uint8_t kqr_state; < < if ((kqu.kq->kq_state & KQ_WORKLOOP) == 0) { < return TURNSTILE_NULL; < } --- > struct kqtailq *base_queue = kqueue_get_base_queue(kq, qos_index); > struct kqtailq *queue = kqueue_get_high_queue(kq, qos_index); 6523,6528c7050,7055 < kqr_state = os_atomic_load(&kqu.kqwl->kqwl_request.kqr_state, relaxed); < if (kqr_state & KQR_ALLOCATED_TURNSTILE) { < /* force a dependency to pair with the atomic or with release below */ < return os_atomic_load_with_dependency_on(&kqu.kqwl->kqwl_turnstile, < kqr_state); < } --- > do { > if (!TAILQ_EMPTY(queue)) > return 0; > } while (queue-- > base_queue); > return 1; > } 6530,6532c7057,7061 < if (!can_alloc) { < return TURNSTILE_NULL; < } --- > static struct kqtailq * > kqueue_get_suppressed_queue(struct kqueue *kq, kq_index_t qos_index) > { > struct kqtailq *res; > struct kqrequest *kqr; 6534c7063,7064 < struct turnstile *ts = turnstile_alloc(), *free_ts = TURNSTILE_NULL; --- > if (kq->kq_state & KQ_WORKQ) { > struct kqworkq *kqwq = (struct kqworkq *)kq; 6536,6539c7066,7069 < kq_req_lock(kqu); < if (filt_wlturnstile_interlock_is_workq(kqu.kqwl)) { < workq_kern_threadreq_lock(kqu.kqwl->kqwl_p); < } --- > kqr = kqworkq_get_request(kqwq, qos_index); > res = &kqr->kqr_suppressed; > } else if (kq->kq_state & KQ_WORKLOOP) { > struct kqworkloop *kqwl = (struct kqworkloop *)kq; 6541,6543c7071,7072 < if (kqu.kqwl->kqwl_request.kqr_state & KQR_ALLOCATED_TURNSTILE) { < free_ts = ts; < ts = kqu.kqwl->kqwl_turnstile; --- > kqr = &kqwl->kqwl_request; > res = &kqr->kqr_suppressed; 6545,6550c7074,7075 < ts = turnstile_prepare((uintptr_t)kqu.kqwl, &kqu.kqwl->kqwl_turnstile, < ts, TURNSTILE_WORKLOOPS); < < /* release-barrier to pair with the unlocked load of kqwl_turnstile above */ < os_atomic_or(&kqu.kqwl->kqwl_request.kqr_state, < KQR_ALLOCATED_TURNSTILE, release); --- > struct kqfile *kqf = (struct kqfile *)kq; > res = &kqf->kqf_suppressed; 6551a7077,7078 > return res; > } 6553,6556c7080,7086 < if (filt_wlturnstile_interlock_is_workq(kqu.kqwl)) { < workq_kern_threadreq_unlock(kqu.kqwl->kqwl_p); < } < kq_req_unlock(kqu.kqwl); --- > static kq_index_t > knote_get_queue_index(struct knote *kn) > { > kq_index_t override_index = knote_get_qos_override_index(kn); > kq_index_t qos_index = knote_get_qos_index(kn); > struct kqueue *kq = knote_get_kq(kn); > kq_index_t res; 6558,6559c7088,7099 < if (free_ts) { < turnstile_deallocate(free_ts); --- > if (kq->kq_state & KQ_WORKQ) { > res = _kqwq_base_index[qos_index]; > if (override_index > qos_index) > res += override_index - qos_index; > assert(res <= _kqwq_high_index[qos_index]); > } else if (kq->kq_state & KQ_WORKLOOP) { > res = MAX(override_index, qos_index); > assert(res < KQWL_NBUCKETS); > } else { > assert(qos_index == QOS_INDEX_KQFILE); > assert(override_index == QOS_INDEX_KQFILE); > res = QOS_INDEX_KQFILE; 6561c7101 < return ts; --- > return res; 6564,6565c7104,7105 < struct turnstile * < kqueue_turnstile(struct kqueue *kq) --- > static struct kqtailq * > knote_get_queue(struct knote *kn) 6567c7107,7109 < return kqueue_get_turnstile(kq, false); --- > kq_index_t qindex = knote_get_queue_index(kn); > > return &(knote_get_kq(kn))->kq_queue[qindex]; 6570,6571c7112,7113 < struct turnstile * < kqueue_alloc_turnstile(struct kqueue *kq) --- > static kq_index_t > knote_get_req_index(struct knote *kn) 6573c7115 < return kqueue_get_turnstile(kq, true); --- > return kn->kn_req_index; 6576,6577c7118,7119 < static struct kqtailq * < knote_get_queue(struct knote *kn) --- > static kq_index_t > knote_get_qos_index(struct knote *kn) 6579c7121 < return kqueue_get_queue(knote_get_kq(kn), kn->kn_qos_index); --- > return kn->kn_qos_index; 6583c7125 < knote_reset_priority(struct knote *kn, pthread_priority_t pp) --- > knote_set_qos_index(struct knote *kn, kq_index_t qos_index) 6586d7127 < kq_index_t qos = _pthread_priority_thread_qos(pp); 6587a7129 > assert(qos_index < KQWQ_NQOS); 6591,6597c7133 < if (qos == THREAD_QOS_UNSPECIFIED) { < /* On workqueues, outside of QoS means MANAGER */ < qos = KQWQ_QOS_MANAGER; < pp = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG; < } else { < pp = _pthread_priority_normalize(pp); < } --- > assert(qos_index > THREAD_QOS_UNSPECIFIED); 6599,6607c7135,7139 < assert((pp & _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG) == 0); < pp = _pthread_priority_normalize(pp); < } else { < pp = _pthread_unspecified_priority(); < qos = THREAD_QOS_UNSPECIFIED; < } < < kn->kn_qos = pp; < kn->kn_req_index = qos; --- > /* XXX this policy decision shouldn't be here */ > if (qos_index == THREAD_QOS_UNSPECIFIED) > qos_index = THREAD_QOS_LEGACY; > } else > qos_index = QOS_INDEX_KQFILE; 6609,6612c7141,7142 < if ((kn->kn_status & KN_MERGE_QOS) == 0 || qos > kn->kn_qos_override) { < /* Never lower QoS when in "Merge" mode */ < kn->kn_qos_override = qos; < } --- > /* always set requested */ > kn->kn_req_index = qos_index; 6615,6621c7145,7146 < if ((kn->kn_status & KN_SUPPRESSED) == 0) { < kn->kn_qos_index = qos; < } else if (kq->kq_state & KQ_WORKQ) { < kqworkq_update_override((struct kqworkq *)kq, kn, qos); < } else if (kq->kq_state & KQ_WORKLOOP) { < kqworkloop_update_override((struct kqworkloop *)kq, qos); < } --- > if ((kn->kn_status & KN_SUPPRESSED) == 0) > kn->kn_qos_index = qos_index; 6627a7153 > struct kqrequest *kqr; 6630,6633c7156,7159 < if ((kn->kn_qos & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG) && < (kq->kq_state & KQ_WORKLOOP)) { < struct kqworkloop *kqwl = (struct kqworkloop *)kq; < struct kqrequest *kqr = &kqwl->kqwl_request; --- > if (kn->kn_qos & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG) { > if (kq->kq_state & KQ_WORKQ) { > kq_index_t qos_index = knote_get_qos_index(kn); > struct kqworkq *kqwq = (struct kqworkq *)kq; 6635,6641c7161 < /* < * This test is racy, but since we never remove this bit, < * it allows us to avoid taking a lock. < */ < if (kqr->kqr_state & KQR_THOVERCOMMIT) { < return; < } --- > kqr = kqworkq_get_request(kqwq, qos_index); 6643,6646c7163,7173 < kq_req_lock(kqwl); < kqr->kqr_state |= KQR_THOVERCOMMIT; < if (!kqr->kqr_thread && (kqr->kqr_state & KQR_THREQUESTED)) { < kqueue_threadreq_modify(kq, kqr, kqr->kqr_req.tr_qos); --- > kqwq_req_lock(kqwq); > kqr->kqr_state |= KQR_THOVERCOMMIT; > kqwq_req_unlock(kqwq); > } else if (kq->kq_state & KQ_WORKLOOP) { > struct kqworkloop *kqwl = (struct kqworkloop *)kq; > > kqr = &kqwl->kqwl_request; > > kqwl_req_lock(kqwl); > kqr->kqr_state |= KQR_THOVERCOMMIT; > kqwl_req_unlock(kqwl); 6648d7174 < kq_req_unlock(kqwl); 6659,6660c7185,7243 < kqworkq_update_override(struct kqworkq *kqwq, struct knote *kn, < kq_index_t override_index) --- > knote_set_qos_override_index(struct knote *kn, kq_index_t override_index, > boolean_t override_is_sync) > { > struct kqueue *kq = knote_get_kq(kn); > kq_index_t qos_index = knote_get_qos_index(kn); > kq_index_t old_override_index = knote_get_qos_override_index(kn); > boolean_t old_override_is_sync = kn->kn_qos_override_is_sync; > uint32_t flags = 0; > > assert((kn->kn_status & KN_QUEUED) == 0); > > if (override_index == KQWQ_QOS_MANAGER) { > assert(qos_index == KQWQ_QOS_MANAGER); > } else { > assert(override_index < KQWQ_QOS_MANAGER); > } > > kn->kn_qos_override = override_index; > kn->kn_qos_override_is_sync = override_is_sync; > > /* > * If this is a workq/workloop kqueue, apply the override to the > * servicing thread. > */ > if (kq->kq_state & KQ_WORKQ) { > struct kqworkq *kqwq = (struct kqworkq *)kq; > > assert(qos_index > THREAD_QOS_UNSPECIFIED); > kqworkq_update_override(kqwq, qos_index, override_index); > } else if (kq->kq_state & KQ_WORKLOOP) { > struct kqworkloop *kqwl = (struct kqworkloop *)kq; > > if ((kn->kn_status & KN_SUPPRESSED) == KN_SUPPRESSED) { > flags = flags | KQWL_UO_UPDATE_SUPPRESS_SYNC_COUNTERS; > > if (override_index == THREAD_QOS_USER_INTERACTIVE > && override_is_sync) { > flags = flags | KQWL_UO_NEW_OVERRIDE_IS_SYNC_UI; > } > > if (old_override_index == THREAD_QOS_USER_INTERACTIVE > && old_override_is_sync) { > flags = flags | KQWL_UO_OLD_OVERRIDE_IS_SYNC_UI; > } > } > > assert(qos_index > THREAD_QOS_UNSPECIFIED); > kqworkloop_update_override(kqwl, qos_index, override_index, flags); > } > } > > static kq_index_t > knote_get_sync_qos_override_index(struct knote *kn) > { > return kn->kn_qos_sync_override; > } > > static void > kqworkq_update_override(struct kqworkq *kqwq, kq_index_t qos_index, kq_index_t override_index) 6664d7246 < kq_index_t queue_index = kn->kn_qos_index; 6666c7248 < if (override_index <= queue_index) { --- > if (override_index <= qos_index) { 6670c7252 < kqr = kqworkq_get_request(kqwq, queue_index); --- > kqr = kqworkq_get_request(kqwq, qos_index); 6672c7254 < kq_req_lock(kqwq); --- > kqwq_req_lock(kqwq); 6678,6682c7260,7270 < if (kqr->kqr_thread) { < if (old_override_index) < thread_update_ipc_override(kqr->kqr_thread, override_index); < else < thread_add_ipc_override(kqr->kqr_thread, override_index); --- > if (kqr->kqr_state & KQR_BOUND) { > thread_t wqthread = kqr->kqr_thread; > > /* only apply if non-manager */ > assert(wqthread); > if ((kqr->kqr_state & KQWQ_THMANAGER) == 0) { > if (old_override_index) > thread_update_ipc_override(wqthread, override_index); > else > thread_add_ipc_override(wqthread, override_index); > } 6685c7273 < kq_req_unlock(kqwq); --- > kqwq_req_unlock(kqwq); 6687a7276 > /* called with the kqworkq lock held */ 6689c7278,7282 < kqworkloop_update_override(struct kqworkloop *kqwl, kq_index_t override_index) --- > kqworkq_bind_thread_impl( > struct kqworkq *kqwq, > kq_index_t qos_index, > thread_t thread, > unsigned int flags) 6691,6694c7284,7350 < kq_req_lock(kqwl); < kqworkloop_update_threads_qos(kqwl, KQWL_UTQ_UPDATE_WAKEUP_OVERRIDE, < override_index); < kq_req_unlock(kqwl); --- > /* request lock must be held */ > kqwq_req_held(kqwq); > > struct kqrequest *kqr = kqworkq_get_request(kqwq, qos_index); > assert(kqr->kqr_state & KQR_THREQUESTED); > > if (qos_index == KQWQ_QOS_MANAGER) > flags |= KEVENT_FLAG_WORKQ_MANAGER; > > struct uthread *ut = get_bsdthread_info(thread); > > /* > * If this is a manager, and the manager request bit is > * not set, assure no other thread is bound. If the bit > * is set, make sure the old thread is us (or not set). > */ > if (flags & KEVENT_FLAG_WORKQ_MANAGER) { > if ((kqr->kqr_state & KQR_BOUND) == 0) { > kqr->kqr_state |= (KQR_BOUND | KQWQ_THMANAGER); > TAILQ_INIT(&kqr->kqr_suppressed); > kqr->kqr_thread = thread; > ut->uu_kqueue_bound = (struct kqueue *)kqwq; > ut->uu_kqueue_qos_index = KQWQ_QOS_MANAGER; > ut->uu_kqueue_flags = (KEVENT_FLAG_WORKQ | > KEVENT_FLAG_WORKQ_MANAGER); > } else { > assert(kqr->kqr_state & KQR_BOUND); > assert(thread == kqr->kqr_thread); > assert(ut->uu_kqueue_bound == (struct kqueue *)kqwq); > assert(ut->uu_kqueue_qos_index == KQWQ_QOS_MANAGER); > assert(ut->uu_kqueue_flags & KEVENT_FLAG_WORKQ_MANAGER); > } > return; > } > > /* Just a normal one-queue servicing thread */ > assert(kqr->kqr_state & KQR_THREQUESTED); > assert(kqr->kqr_qos_index == qos_index); > > if ((kqr->kqr_state & KQR_BOUND) == 0) { > kqr->kqr_state |= KQR_BOUND; > TAILQ_INIT(&kqr->kqr_suppressed); > kqr->kqr_thread = thread; > > /* apply an ipc QoS override if one is needed */ > if (kqr->kqr_override_index) { > assert(kqr->kqr_qos_index); > assert(kqr->kqr_override_index > kqr->kqr_qos_index); > assert(thread_get_ipc_override(thread) == THREAD_QOS_UNSPECIFIED); > thread_add_ipc_override(thread, kqr->kqr_override_index); > } > > /* indicate that we are processing in the uthread */ > ut->uu_kqueue_bound = (struct kqueue *)kqwq; > ut->uu_kqueue_qos_index = qos_index; > ut->uu_kqueue_flags = flags; > } else { > /* > * probably syncronously bound AND post-request bound > * this logic can go away when we get rid of post-request bind > */ > assert(kqr->kqr_state & KQR_BOUND); > assert(thread == kqr->kqr_thread); > assert(ut->uu_kqueue_bound == (struct kqueue *)kqwq); > assert(ut->uu_kqueue_qos_index == qos_index); > assert((ut->uu_kqueue_flags & flags) == flags); > } 6697,6698c7353,7358 < static thread_qos_t < kqworkloop_unbind_locked(struct kqworkloop *kqwl, thread_t thread) --- > static void > kqworkloop_update_override( > struct kqworkloop *kqwl, > kq_index_t qos_index, > kq_index_t override_index, > uint32_t flags) 6700d7359 < struct uthread *ut = get_bsdthread_info(thread); 6702d7360 < kq_index_t ipc_override = ut->uu_kqueue_override; 6704,6705c7362,7373 < KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWL_UNBIND), kqwl->kqwl_dynamicid, < thread_tid(thread), 0, 0); --- > kqwl_req_lock(kqwl); > > /* Do not override on attached threads */ > if (kqr->kqr_state & KQR_BOUND) { > assert(kqr->kqr_thread); > > if (kqwl->kqwl_kqueue.kq_state & KQ_NO_WQ_THREAD) { > kqwl_req_unlock(kqwl); > assert(!is_workqueue_thread(kqr->kqr_thread)); > return; > } > } 6707,6716c7375,7377 < kq_req_held(kqwl); < assert(ut->uu_kqr_bound == kqr); < ut->uu_kqr_bound = NULL; < ut->uu_kqueue_override = THREAD_QOS_UNSPECIFIED; < < if (kqwl->kqwl_owner == NULL && kqwl->kqwl_turnstile) { < turnstile_update_inheritor(kqwl->kqwl_turnstile, < TURNSTILE_INHERITOR_NULL, TURNSTILE_IMMEDIATE_UPDATE); < turnstile_update_inheritor_complete(kqwl->kqwl_turnstile, < TURNSTILE_INTERLOCK_HELD); --- > /* Update sync ipc counts on kqr for suppressed knotes */ > if (flags & KQWL_UO_UPDATE_SUPPRESS_SYNC_COUNTERS) { > kqworkloop_update_suppress_sync_count(kqr, flags); 6719,6721c7380,7399 < kqr->kqr_thread = NULL; < kqr->kqr_state &= ~(KQR_THREQUESTED | KQR_R2K_NOTIF_ARMED); < return ipc_override; --- > if ((flags & KQWL_UO_UPDATE_OVERRIDE_LAZY) == 0) { > kqworkloop_update_threads_qos(kqwl, KQWL_UTQ_UPDATE_WAKEUP_OVERRIDE, > MAX(qos_index, override_index)); > } > kqwl_req_unlock(kqwl); > } > > static void > kqworkloop_update_suppress_sync_count( > struct kqrequest *kqr, > uint32_t flags) > { > if (flags & KQWL_UO_NEW_OVERRIDE_IS_SYNC_UI) { > kqr->kqr_sync_suppress_count++; > } > > if (flags & KQWL_UO_OLD_OVERRIDE_IS_SYNC_UI) { > assert(kqr->kqr_sync_suppress_count > 0); > kqr->kqr_sync_suppress_count--; > } 6725c7403 < * kqworkloop_unbind - Unbind the servicer thread of a workloop kqueue --- > * kqworkloop_unbind_thread - Unbind the servicer thread of a workloop kqueue 6727,6728c7405,7409 < * It will acknowledge events, and possibly request a new thread if: < * - there were active events left --- > * It will end the processing phase in case it was still processing: > * > * We may have to request a new thread for not KQ_NO_WQ_THREAD workloop. > * This can happen if : > * - there were active events at or above our QoS we never got to (count > 0) 6733a7415 > 6735c7417,7420 < kqworkloop_unbind(proc_t p, struct kqworkloop *kqwl) --- > kqworkloop_unbind_thread( > struct kqworkloop *kqwl, > thread_t thread, > __unused unsigned int flags) 6739,6741d7423 < thread_t thread = kqr->kqr_thread; < int op = KQWL_UTQ_PARKING; < kq_index_t ipc_override, qos_override = THREAD_QOS_UNSPECIFIED; 6743c7425 < assert(thread == current_thread()); --- > kqlock_held(kq); 6745c7427,7430 < kqlock(kqwl); --- > assert((kq->kq_state & KQ_PROCESSING) == 0); > if (kq->kq_state & KQ_PROCESSING) { > return; > } 6752,6756c7437,7447 < assert((kq->kq_state & KQ_PROCESSING) == 0); < if (!TAILQ_EMPTY(&kqr->kqr_suppressed)) { < kq->kq_state |= KQ_PROCESSING; < qos_override = kqworkloop_acknowledge_events(kqwl); < kq->kq_state &= ~KQ_PROCESSING; --- > kq->kq_state |= KQ_PROCESSING; > kqworkloop_acknowledge_events(kqwl, TRUE); > kq->kq_state &= ~KQ_PROCESSING; > > kqwl_req_lock(kqwl); > > /* deal with extraneous unbinds in release kernels */ > assert((kqr->kqr_state & (KQR_BOUND | KQR_PROCESSING)) == KQR_BOUND); > if ((kqr->kqr_state & (KQR_BOUND | KQR_PROCESSING)) != KQR_BOUND) { > kqwl_req_unlock(kqwl); > return; 6759c7450,7455 < kq_req_lock(kqwl); --- > assert(thread == current_thread()); > assert(kqr->kqr_thread == thread); > if (kqr->kqr_thread != thread) { > kqwl_req_unlock(kqwl); > return; > } 6761,6762c7457,7463 < ipc_override = kqworkloop_unbind_locked(kqwl, thread); < kqworkloop_update_threads_qos(kqwl, op, qos_override); --- > struct uthread *ut = get_bsdthread_info(thread); > kq_index_t old_qos_index = ut->uu_kqueue_qos_index; > boolean_t ipc_override_is_sync = ut->uu_kqueue_override_is_sync; > ut->uu_kqueue_bound = NULL; > ut->uu_kqueue_qos_index = 0; > ut->uu_kqueue_override_is_sync = 0; > ut->uu_kqueue_flags = 0; 6764c7465,7468 < kq_req_unlock(kqwl); --- > /* unbind the servicer thread, drop overrides */ > kqr->kqr_thread = NULL; > kqr->kqr_state &= ~(KQR_BOUND | KQR_THREQUESTED | KQR_R2K_NOTIF_ARMED); > kqworkloop_update_threads_qos(kqwl, KQWL_UTQ_RECOMPUTE_WAKEUP_QOS, 0); 6766c7470 < kqunlock(kqwl); --- > kqwl_req_unlock(kqwl); 6772c7476 < if (ipc_override) { --- > if (old_qos_index) { 6775,6777c7479,7481 < < /* If last reference, dealloc the workloop kq */ < kqueue_release_last(p, kqwl); --- > if (ipc_override_is_sync) { > thread_drop_sync_ipc_override(thread); > } 6780,6782c7484,7490 < static thread_qos_t < kqworkq_unbind_locked(__assert_only struct kqworkq *kqwq, < struct kqrequest *kqr, thread_t thread) --- > /* called with the kqworkq lock held */ > static void > kqworkq_unbind_thread( > struct kqworkq *kqwq, > kq_index_t qos_index, > thread_t thread, > __unused unsigned int flags) 6784,6785c7492,7493 < struct uthread *ut = get_bsdthread_info(thread); < kq_index_t old_override = kqr->kqr_override_index; --- > struct kqrequest *kqr = kqworkq_get_request(kqwq, qos_index); > kq_index_t override_index = 0; 6787,6788c7495,7496 < KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWQ_UNBIND), -1, < thread_tid(kqr->kqr_thread), kqr->kqr_qos_index, 0); --- > /* request lock must be held */ > kqwq_req_held(kqwq); 6790,6795c7498 < kq_req_held(kqwq); < assert(ut->uu_kqr_bound == kqr); < ut->uu_kqr_bound = NULL; < kqr->kqr_thread = NULL; < kqr->kqr_state &= ~(KQR_THREQUESTED | KQR_R2K_NOTIF_ARMED); < kqr->kqr_override_index = THREAD_QOS_UNSPECIFIED; --- > assert(thread == current_thread()); 6797,6798c7500,7503 < return old_override; < } --- > if ((kqr->kqr_state & KQR_BOUND) == 0) { > assert(kqr->kqr_state & KQR_BOUND); > return; > } 6800,6813c7505,7533 < /* < * kqworkq_unbind - unbind of a workq kqueue from a thread < * < * We may have to request new threads. < * This can happen there are no waiting processing threads and: < * - there were active events we never got to (count > 0) < * - we pended waitq hook callouts during processing < * - we pended wakeups while processing (or unsuppressing) < */ < static void < kqworkq_unbind(proc_t p, struct kqrequest *kqr) < { < struct kqworkq *kqwq = (struct kqworkq *)p->p_fd->fd_wqkqueue; < __assert_only int rc; --- > assert(kqr->kqr_thread == thread); > assert(TAILQ_EMPTY(&kqr->kqr_suppressed)); > > /* > * If there is an override, drop it from the current thread > * and then we are free to recompute (a potentially lower) > * minimum override to apply to the next thread request. > */ > if (kqr->kqr_override_index) { > struct kqtailq *base_queue = kqueue_get_base_queue(&kqwq->kqwq_kqueue, qos_index); > struct kqtailq *queue = kqueue_get_high_queue(&kqwq->kqwq_kqueue, qos_index); > > /* if not bound to a manager thread, drop the current ipc override */ > if ((kqr->kqr_state & KQWQ_THMANAGER) == 0) { > thread_drop_ipc_override(thread); > } > > /* recompute the new override */ > do { > if (!TAILQ_EMPTY(queue)) { > override_index = queue - base_queue + qos_index; > break; > } > } while (queue-- > base_queue); > } > > /* Mark it unbound */ > kqr->kqr_thread = NULL; > kqr->kqr_state &= ~(KQR_BOUND | KQR_THREQUESTED | KQWQ_THMANAGER); 6815,6818c7535,7540 < kqlock(kqwq); < rc = kqworkq_acknowledge_events(kqwq, kqr, 0, KQWQAE_UNBIND); < assert(rc == -1); < kqunlock(kqwq); --- > /* apply the new override */ > if (override_index > kqr->kqr_qos_index) { > kqr->kqr_override_index = override_index; > } else { > kqr->kqr_override_index = THREAD_QOS_UNSPECIFIED; > } 6824c7546 < assert(qos_index < KQWQ_NBUCKETS); --- > assert(qos_index < KQWQ_NQOS); 6828,6829c7550,7551 < static void < knote_apply_qos_override(struct knote *kn, kq_index_t qos_index) --- > void > knote_adjust_qos(struct knote *kn, qos_t new_qos, qos_t new_override, kq_index_t sync_override_index) 6831c7553,7554 < assert((kn->kn_status & KN_QUEUED) == 0); --- > struct kqueue *kq = knote_get_kq(kn); > boolean_t override_is_sync = FALSE; 6833c7556,7573 < kn->kn_qos_override = qos_index; --- > if (kq->kq_state & (KQ_WORKQ | KQ_WORKLOOP)) { > kq_index_t new_qos_index; > kq_index_t new_override_index; > kq_index_t servicer_qos_index; > > new_qos_index = qos_index_from_qos(kn, new_qos, FALSE); > new_override_index = qos_index_from_qos(kn, new_override, TRUE); > > /* make sure the servicer qos acts as a floor */ > servicer_qos_index = qos_index_from_qos(kn, kn->kn_qos, FALSE); > if (servicer_qos_index > new_qos_index) > new_qos_index = servicer_qos_index; > if (servicer_qos_index > new_override_index) > new_override_index = servicer_qos_index; > if (sync_override_index >= new_override_index) { > new_override_index = sync_override_index; > override_is_sync = TRUE; > } 6835,6848c7575,7588 < if (kn->kn_status & KN_SUPPRESSED) { < struct kqueue *kq = knote_get_kq(kn); < /* < * For suppressed events, the kn_qos_index field cannot be touched as it < * allows us to know on which supress queue the knote is for a kqworkq. < * < * Also, there's no natural push applied on the kqueues when this field < * changes anyway. We hence need to apply manual overrides in this case, < * which will be cleared when the events are later acknowledged. < */ < if (kq->kq_state & KQ_WORKQ) { < kqworkq_update_override((struct kqworkq *)kq, kn, qos_index); < } else { < kqworkloop_update_override((struct kqworkloop *)kq, qos_index); --- > kqlock(kq); > if (new_qos_index != knote_get_req_index(kn) || > new_override_index != knote_get_qos_override_index(kn) || > override_is_sync != kn->kn_qos_override_is_sync) { > if (kn->kn_status & KN_QUEUED) { > knote_dequeue(kn); > knote_set_qos_index(kn, new_qos_index); > knote_set_qos_override_index(kn, new_override_index, override_is_sync); > knote_enqueue(kn); > knote_wakeup(kn); > } else { > knote_set_qos_index(kn, new_qos_index); > knote_set_qos_override_index(kn, new_override_index, override_is_sync); > } 6850,6851c7590 < } else { < kn->kn_qos_index = qos_index; --- > kqunlock(kq); 6855,6857c7594,7595 < static bool < knote_should_apply_qos_override(struct kqueue *kq, struct knote *kn, int result, < thread_qos_t *qos_out) --- > void > knote_adjust_sync_qos(struct knote *kn, kq_index_t sync_qos, boolean_t lock_kq) 6859c7597,7606 < thread_qos_t qos_index = (result >> FILTER_ADJUST_EVENT_QOS_SHIFT) & 7; --- > struct kqueue *kq = knote_get_kq(kn); > kq_index_t old_sync_override; > kq_index_t qos_index = knote_get_qos_index(kn); > uint32_t flags = 0; > > /* Tracking only happens for UI qos */ > if (sync_qos != THREAD_QOS_USER_INTERACTIVE && > sync_qos != THREAD_QOS_UNSPECIFIED) { > return; > } 6861c7608,7609 < kqlock_held(kq); --- > if (lock_kq) > kqlock(kq); 6863,6864c7611,7612 < assert(result & FILTER_ADJUST_EVENT_QOS_BIT); < assert(qos_index < THREAD_QOS_LAST); --- > if (kq->kq_state & KQ_WORKLOOP) { > struct kqworkloop *kqwl = (struct kqworkloop *)kq; 6866,6885c7614,7620 < /* < * Early exit for knotes that should not change QoS < * < * It is safe to test kn_req_index against MANAGER / STAYACTIVE because < * knotes with such kn_req_index values never change for their entire < * lifetime. < */ < if (__improbable(!knote_fops(kn)->f_adjusts_qos)) { < panic("filter %d cannot change QoS", kn->kn_filtid); < } else if (kq->kq_state & KQ_WORKLOOP) { < if (kn->kn_req_index == KQWL_BUCKET_STAYACTIVE) { < return false; < } < } else if (kq->kq_state & KQ_WORKQ) { < if (kn->kn_req_index == KQWQ_QOS_MANAGER) { < return false; < } < } else { < return false; < } --- > old_sync_override = knote_get_sync_qos_override_index(kn); > if (old_sync_override != sync_qos) { > kn->kn_qos_sync_override = sync_qos; > > /* update sync ipc counters for suppressed knotes */ > if ((kn->kn_status & KN_SUPPRESSED) == KN_SUPPRESSED) { > flags = flags | KQWL_UO_UPDATE_SUPPRESS_SYNC_COUNTERS; 6887,6901c7622,7623 < /* < * knotes with the FALLBACK flag will only use their registration QoS if the < * incoming event has no QoS, else, the registration QoS acts as a floor. < */ < if (kn->kn_qos & _PTHREAD_PRIORITY_FALLBACK_FLAG) { < if (qos_index == THREAD_QOS_UNSPECIFIED) < qos_index = kn->kn_req_index; < } else { < if (qos_index < kn->kn_req_index) < qos_index = kn->kn_req_index; < } < if ((kn->kn_status & KN_MERGE_QOS) && (qos_index < kn->kn_qos_override)) { < /* Never lower QoS when in "Merge" mode */ < return false; < } --- > /* Do not recalculate kqwl override, it would be done later */ > flags = flags | KQWL_UO_UPDATE_OVERRIDE_LAZY; 6903,6918c7625,7627 < if ((kn->kn_status & KN_LOCKED) && kn->kn_inuse) { < /* < * When we're trying to update the QoS override and that both an < * f_event() and other f_* calls are running concurrently, any of these < * in flight calls may want to perform overrides that aren't properly < * serialized with each other. < * < * The first update that observes this racy situation enters a "Merge" < * mode which causes subsequent override requests to saturate the < * override instead of replacing its value. < * < * This mode is left when knote_unlock() or knote_call_filter_event() < * observe that no other f_* routine is in flight. < */ < kn->kn_status |= KN_MERGE_QOS; < } --- > if (sync_qos == THREAD_QOS_USER_INTERACTIVE) { > flags = flags | KQWL_UO_NEW_OVERRIDE_IS_SYNC_UI; > } 6920,6922c7629,7631 < if (kn->kn_qos_override == qos_index) { < return false; < } --- > if (old_sync_override == THREAD_QOS_USER_INTERACTIVE) { > flags = flags | KQWL_UO_OLD_OVERRIDE_IS_SYNC_UI; > } 6924,6926c7633,7635 < *qos_out = qos_index; < return true; < } --- > kqworkloop_update_override(kqwl, qos_index, sync_qos, > flags); > } 6928,6936d7636 < static void < knote_adjust_qos(struct kqueue *kq, struct knote *kn, int result) < { < thread_qos_t qos; < if (knote_should_apply_qos_override(kq, kn, result, &qos)) { < knote_dequeue(kn); < knote_apply_qos_override(kn, qos); < if (knote_enqueue(kn) && (kn->kn_status & KN_ACTIVE)) { < knote_wakeup(kn); 6938a7639,7640 > if (lock_kq) > kqunlock(kq); 6944a7647 > kq_index_t qos_index = knote_get_qos_index(kn); 6948a7652 > /* request a servicing thread */ 6951c7655,7656 < kqworkq_request_help(kqwq, kn->kn_qos_index); --- > kqworkq_request_help(kqwq, qos_index); > 6952a7658 > /* request a servicing thread */ 6955,6960c7661,7666 < /* < * kqworkloop_end_processing() will perform the required QoS < * computations when it unsets the processing mode. < */ < if (!kqworkloop_is_processing_on_current_thread(kqwl)) { < kqworkloop_request_help(kqwl, kn->kn_qos_index); --- > if (kqworkloop_is_processing_on_current_thread(kqwl)) { > /* > * kqworkloop_end_processing() will perform the required QoS > * computations when it unsets the processing mode. > */ > return; 6961a7668 > kqworkloop_request_help(kqwl, qos_index); 6972,6973c7679,7682 < waitq_wakeup64_all((struct waitq *)&kq->kq_wqs, KQ_EVENT, < THREAD_AWAKENED, WAITQ_ALL_PRIORITIES); --- > waitq_wakeup64_all((struct waitq *)&kq->kq_wqs, > KQ_EVENT, > THREAD_AWAKENED, > WAITQ_ALL_PRIORITIES); 7005,7006c7714,7715 < suppressq = kqueue_get_suppressed_queue(kq, NULL); < (void)waitq_wakeup64_all((struct waitq *)&kq->kq_wqs, --- > suppressq = kqueue_get_suppressed_queue(kq, QOS_INDEX_KQFILE); > (void)waitq_wakeup64_all((struct waitq *)&kq->kq_wqs, 7034a7744 > 7058c7768,7769 < * detach/drop operations. --- > * detach/drop operations. But we'll prevent it here > * too (by taking a use reference) - just in case. 7066a7778 > 7068c7780,7794 < knote_call_filter_event(kq, kn, hint); --- > > assert(!knoteuse_needs_boost(kn, NULL)); > > /* If we can get a use reference - deliver event */ > if (kqlock2knoteuse(kq, kn, KNUSE_NONE)) { > int result; > > /* call the event with only a use count */ > result = knote_fops(kn)->f_event(kn, hint); > > /* if its not going away and triggered */ > if (knoteuse2kqlock(kq, kn, KNUSE_NONE) && result) > knote_activate(kn); > /* kq lock held */ > } 7118a7845 > int result; 7121c7848,7851 < if (kn->kn_status & KN_REQVANISH) { --- > > assert(!knoteuse_needs_boost(kn, NULL)); > > if ((kn->kn_status & KN_DROPPING) == 0) { 7123,7126c7853,7865 < kn->kn_status |= KN_VANISHED; < knote_activate(kn); < } else { < knote_call_filter_event(kq, kn, NOTE_REVOKE); --- > if (kn->kn_status & KN_REQVANISH) { > kn->kn_status |= KN_VANISHED; > knote_activate(kn); > > } else if (kqlock2knoteuse(kq, kn, KNUSE_NONE)) { > /* call the event with only a use count */ > result = knote_fops(kn)->f_event(kn, NOTE_REVOKE); > > /* if its not going away and triggered */ > if (knoteuse2kqlock(kq, kn, KNUSE_NONE) && result) > knote_activate(kn); > /* lock held again */ > } 7133,7158d7871 < * Force a lazy allocation of the waitqset link < * of the kq_wqs associated with the kn < * if it wasn't already allocated. < * < * This allows knote_link_waitq to never block < * if reserved_link is not NULL. < */ < void < knote_link_waitqset_lazy_alloc(struct knote *kn) < { < struct kqueue *kq = knote_get_kq(kn); < waitq_set_lazy_init_link(&kq->kq_wqs); < } < < /* < * Check if a lazy allocation for the waitqset link < * of the kq_wqs is needed. < */ < boolean_t < knote_link_waitqset_should_lazy_alloc(struct knote *kn) < { < struct kqueue *kq = knote_get_kq(kn); < return waitq_set_should_lazy_init_link(&kq->kq_wqs); < } < < /* 7167,7168c7880 < * caller provides the wait queue link structure and insures that the kq->kq_wqs < * is linked by previously calling knote_link_waitqset_lazy_alloc. --- > * caller provides the wait queue link structure. 7207a7920,7922 > * Essentially an inlined knote_remove & knote_drop > * when we know for sure that the thing is a file > * 7212c7927 < knote_fdclose(struct proc *p, int fd) --- > knote_fdclose(struct proc *p, int fd, int force) 7216d7930 < KNOTE_LOCK_CTX(knlc); 7234,7236c7948,7973 < if (kn->kn_status & KN_VANISHED) { < kqunlock(kq); < continue; --- > if (!force && (kn->kn_status & KN_REQVANISH)) { > > if ((kn->kn_status & KN_VANISHED) == 0) { > proc_fdunlock(p); > > assert(!knoteuse_needs_boost(kn, NULL)); > > /* get detach reference (also marks vanished) */ > if (kqlock2knotedetach(kq, kn, KNUSE_NONE)) { > /* detach knote and drop fp use reference */ > knote_fops(kn)->f_detach(kn); > if (knote_fops(kn)->f_isfd) > fp_drop(p, kn->kn_id, kn->kn_fp, 0); > > /* activate it if it's still in existence */ > if (knoteuse2kqlock(kq, kn, KNUSE_NONE)) { > knote_activate(kn); > } > kqunlock(kq); > } > proc_fdlock(p); > goto restart; > } else { > kqunlock(kq); > continue; > } 7240,7250d7976 < if (!knote_lock(kq, kn, &knlc, KNOTE_KQ_LOCK_ON_SUCCESS)) { < /* the knote was dropped by someone, nothing to do */ < } else if (kn->kn_status & KN_REQVANISH) { < kn->kn_status |= KN_VANISHED; < kn->kn_status &= ~KN_ATTACHED; < < kqunlock(kq); < knote_fops(kn)->f_detach(kn); < if (knote_fops(kn)->f_isfd) < fp_drop(p, kn->kn_id, kn->kn_fp, 0); < kqlock(kq); 7252,7255c7978,7986 < knote_activate(kn); < knote_unlock(kq, kn, &knlc, KNOTE_KQ_UNLOCK); < } else { < knote_drop(kq, kn, &knlc); --- > /* > * Convert the kq lock to a drop ref. > * If we get it, go ahead and drop it. > * Otherwise, we waited for the blocking > * condition to complete. Either way, > * we dropped the fdlock so start over. > */ > if (kqlock2knotedrop(kq, kn)) { > knote_drop(kn, p); 7263c7994 < /* --- > /* 7278,7280c8009,8011 < struct kevent_internal_s *kev, < bool is_fd, < struct proc *p) --- > struct kevent_internal_s *kev, > bool is_fd, > struct proc *p) 7286c8017 < /* --- > /* 7305c8036 < kev->ident == kn->kn_id && --- > kev->ident == kn->kn_id && 7336,7337c8067,8069 < kq_add_knote(struct kqueue *kq, struct knote *kn, struct knote_lock_ctx *knlc, < struct proc *p) --- > kq_add_knote(struct kqueue *kq, struct knote *kn, > struct kevent_internal_s *kev, > struct proc *p, int *knoteuse_flags) 7349c8081 < if (knote_fdfind(kq, &kn->kn_kevent, is_fd, p) != NULL) { --- > if (knote_fdfind(kq, kev, is_fd, p) != NULL) { 7360c8092,8093 < list = hashinit(CONFIG_KN_HASHSIZE, M_KQUEUE, &size); --- > list = hashinit(CONFIG_KN_HASHSIZE, M_KQUEUE, > &size); 7421,7424c8154,8158 < if (ret == 0) { < kqlock(kq); < assert((kn->kn_status & KN_LOCKED) == 0); < (void)knote_lock(kq, kn, knlc, KNOTE_KQ_UNLOCK); --- > if (ret == 0 && knoteuse_needs_boost(kn, kev)) { > set_thread_rwlock_boost(); > *knoteuse_flags = KNUSE_BOOST; > } else { > *knoteuse_flags = KNUSE_NONE; 7435a8170,8171 > * and copy kn_status an kq_state while holding kqlock and > * fd table locks. 7444c8180 < struct knote_lock_ctx *knlc) --- > kn_status_t *kn_status, uint16_t *kq_state) 7448d8183 < uint16_t kq_state; 7467,7472c8202,8205 < kq_state = kq->kq_state; < if (knlc) { < knote_unlock_cancel(kq, kn, knlc, KNOTE_KQ_UNLOCK); < } else { < kqunlock(kq); < } --- > *kn_status = kn->kn_status; > *kq_state = kq->kq_state; > kqunlock(kq); > 7477,7479d8209 < < if (kq_state & KQ_DYNAMIC) < kqueue_release_last(p, kq); 7490,7491c8220,8223 < kq_find_knote_and_kq_lock(struct kqueue *kq, struct kevent_internal_s *kev, < bool is_fd, struct proc *p) --- > kq_find_knote_and_kq_lock(struct kqueue *kq, > struct kevent_internal_s *kev, > bool is_fd, > struct proc *p) 7516,7518c8248,8254 < * Called with the kqueue locked, returns with the kqueue unlocked. < * < * If a knote locking context is passed, it is canceled. --- > * Called with the kqueue unlocked and holding a > * "drop reference" on the knote in question. > * This reference is most often aquired thru a call > * to kqlock2knotedrop(). But it can also be acquired > * through stealing a drop reference via a call to > * knoteuse2knotedrop() or during the initial attach > * of the knote. 7524c8260 < knote_drop(struct kqueue *kq, struct knote *kn, struct knote_lock_ctx *knlc) --- > knote_drop(struct knote *kn, __unused struct proc *ctxp) 7525a8262 > struct kqueue *kq = knote_get_kq(kn); 7527,7538c8264,8265 < < kqlock_held(kq); < < assert((kn->kn_status & KN_DROPPING) == 0); < if (knlc == NULL) { < assert((kn->kn_status & KN_LOCKED) == 0); < } < kn->kn_status |= KN_DROPPING; < < knote_unsuppress(kn); < knote_dequeue(kn); < knote_wait_for_filter_events(kq, kn); --- > kn_status_t kn_status; > uint16_t kq_state; 7545,7546c8272,8300 < /* kq may be freed when kq_remove_knote() returns */ < kq_remove_knote(kq, kn, p, knlc); --- > /* Remove the source from the appropriate hash */ > kq_remove_knote(kq, kn, p, &kn_status, &kq_state); > > /* > * If a kqueue_dealloc is happening in parallel for the kq > * pointed by the knote the kq could be aready deallocated > * at this point. > * Do not access the kq after the kq_remove_knote if it is > * not a KQ_DYNAMIC. > */ > > /* determine if anyone needs to know about the drop */ > assert((kn_status & (KN_DROPPING | KN_SUPPRESSED | KN_QUEUED)) == KN_DROPPING); > > /* > * If KN_USEWAIT is set, some other thread was trying to drop the kn. > * Or it was in kqueue_dealloc, so the kqueue_dealloc did not happen > * because that thread was waiting on this wake, or it was a drop happening > * because of a kevent_register that takes a reference on the kq, and therefore > * the kq cannot be deallocated in parallel. > * > * It is safe to access kq->kq_wqs if needswakeup is set. > */ > if (kn_status & KN_USEWAIT) > waitq_wakeup64_all((struct waitq *)&kq->kq_wqs, > CAST_EVENT64_T(&kn->kn_status), > THREAD_RESTART, > WAITQ_ALL_PRIORITIES); > 7550a8305,8312 > > /* > * release reference on dynamic kq (and free if last). > * Will only be last if this is from fdfree, etc... > * because otherwise processing thread has reference. > */ > if (kq_state & KQ_DYNAMIC) > kqueue_release_last(p, kq); 7587a8350,8352 > /* Clear the sync qos on the knote */ > knote_adjust_sync_qos(kn, THREAD_QOS_UNSPECIFIED, FALSE); > 7636c8401 < suppressq = kqueue_get_suppressed_queue(kq, kn); --- > suppressq = kqueue_get_suppressed_queue(kq, knote_get_qos_index(kn)); 7637a8403,8412 > > if ((kq->kq_state & KQ_WORKLOOP) && > knote_get_qos_override_index(kn) == THREAD_QOS_USER_INTERACTIVE && > kn->kn_qos_override_is_sync) { > struct kqworkloop *kqwl = (struct kqworkloop *)kq; > /* update the sync qos override counter for suppressed knotes */ > kqworkloop_update_override(kqwl, knote_get_qos_index(kn), > knote_get_qos_override_index(kn), > (KQWL_UO_UPDATE_SUPPRESS_SYNC_COUNTERS | KQWL_UO_NEW_OVERRIDE_IS_SYNC_UI)); > } 7651a8427,8429 > /* Clear the sync qos on the knote */ > knote_adjust_sync_qos(kn, THREAD_QOS_UNSPECIFIED, FALSE); > 7653c8431 < suppressq = kqueue_get_suppressed_queue(kq, kn); --- > suppressq = kqueue_get_suppressed_queue(kq, knote_get_qos_index(kn)); 7656,7663c8434,8435 < /* < * If the knote is no longer active, reset its push, < * and resynchronize kn_qos_index with kn_qos_override < */ < if ((kn->kn_status & KN_ACTIVE) == 0) { < kn->kn_qos_override = kn->kn_req_index; < } < kn->kn_qos_index = kn->kn_qos_override; --- > /* udate in-use qos to equal requested qos */ > kn->kn_qos_index = kn->kn_req_index; 7670c8442,8444 < if ((kq->kq_state & KQ_WORKLOOP) && TAILQ_EMPTY(suppressq)) { --- > if ((kq->kq_state & KQ_WORKLOOP) && !(kq->kq_state & KQ_NO_WQ_THREAD) && > knote_get_qos_override_index(kn) == THREAD_QOS_USER_INTERACTIVE && > kn->kn_qos_override_is_sync) { 7672a8447,8455 > /* update the sync qos override counter for suppressed knotes */ > kqworkloop_update_override(kqwl, knote_get_qos_index(kn), > knote_get_qos_override_index(kn), > (KQWL_UO_UPDATE_SUPPRESS_SYNC_COUNTERS | KQWL_UO_OLD_OVERRIDE_IS_SYNC_UI)); > } > > if (TAILQ_EMPTY(suppressq) && (kq->kq_state & KQ_WORKLOOP) && > !(kq->kq_state & KQ_NO_WQ_THREAD)) { > struct kqworkloop *kqwl = (struct kqworkloop *)kq; 7675,7677c8458,8459 < * kqworkloop_end_processing() or kqworkloop_begin_processing() < * will perform the required QoS computations when it unsets the < * processing mode. --- > * kqworkloop_end_processing() will perform the required QoS > * computations when it unsets the processing mode. 7680c8462 < kq_req_lock(kqwl); --- > kqwl_req_lock(kqwl); 7682c8464 < kq_req_unlock(kqwl); --- > kqwl_req_unlock(kqwl); 7687a8470,8491 > static void > knote_update_sync_override_state(struct knote *kn) > { > struct kqtailq *queue = knote_get_queue(kn); > struct kqueue *kq = knote_get_kq(kn); > > if (!(kq->kq_state & KQ_WORKLOOP) || > knote_get_queue_index(kn) != THREAD_QOS_USER_INTERACTIVE) > return; > > /* Update the sync ipc state on workloop */ > struct kqworkloop *kqwl = (struct kqworkloop *)kq; > boolean_t sync_ipc_override = FALSE; > if (!TAILQ_EMPTY(queue)) { > struct knote *kn_head = TAILQ_FIRST(queue); > if (kn_head->kn_qos_override_is_sync) > sync_ipc_override = TRUE; > } > kqworkloop_update_sync_override_state(kqwl, sync_ipc_override); > } > > /* called with kqueue lock held */ 7700c8504,8509 < TAILQ_INSERT_TAIL(queue, kn, kn_tqe); --- > /* insert at head for sync ipc waiters */ > if (kn->kn_qos_override_is_sync) { > TAILQ_INSERT_HEAD(queue, kn, kn_tqe); > } else { > TAILQ_INSERT_TAIL(queue, kn, kn_tqe); > } 7702a8512 > knote_update_sync_override_state(kn); 7724a8535 > knote_update_sync_override_state(kn); 7749a8561,8566 > /* Initialize the timer filter lock */ > lck_mtx_init(&_filt_timerlock, kq_lck_grp, kq_lck_attr); > > /* Initialize the user filter lock */ > lck_spin_init(&_filt_userlock, kq_lck_grp, kq_lck_attr); > 7766,7767c8583,8585 < struct knote *kn = ((struct knote *)zalloc(knote_zone)); < bzero(kn, sizeof(struct knote)); --- > struct knote *kn; > kn = ((struct knote *)zalloc(knote_zone)); > *kn = (struct knote) { .kn_qos_override = 0, .kn_qos_sync_override = 0, .kn_qos_override_is_sync = 0 }; 7774,7775d8591 < assert(kn->kn_inuse == 0); < assert((kn->kn_status & KN_LOCKED) == 0); 7807c8623 < struct ifnet *ifp, struct proc *p); --- > struct ifnet *ifp, struct proc *p); 7842,7843c8658,8659 < CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, < kevt_getstat, "S,kevtstat", ""); --- > CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, > kevt_getstat, "S,kevtstat", ""); 8090c8906 < u_int32_t *out_vendor_code) --- > u_int32_t *out_vendor_code) 8109c8925 < /* --- > /* 8234,8237c9050,9053 < u_long cmd, < caddr_t data, < __unused struct ifnet *ifp, < __unused struct proc *p) --- > u_long cmd, > caddr_t data, > __unused struct ifnet *ifp, > __unused struct proc *p) 8439d9254 < workq_threadreq_param_t trp = {}; 8450c9265,9275 < kq_req_lock(kqwl); --- > kqwl_req_lock(kqwl); > > if (kqr->kqr_thread) { > kqdi->kqdi_servicer = thread_tid(kqr->kqr_thread); > } > > if (kqwl->kqwl_owner == WL_OWNER_SUSPENDED) { > kqdi->kqdi_owner = ~0ull; > } else { > kqdi->kqdi_owner = thread_tid(kqwl->kqwl_owner); > } 8452,8453d9276 < kqdi->kqdi_servicer = thread_tid(kqr->kqr_thread); < kqdi->kqdi_owner = thread_tid(kqwl->kqwl_owner); 8458,8474c9281 < kqdi->kqdi_sync_waiter_qos = 0; < < trp.trp_value = kqwl->kqwl_params; < if (trp.trp_flags & TRP_PRIORITY) < kqdi->kqdi_pri = trp.trp_pri; < else < kqdi->kqdi_pri = 0; < < if (trp.trp_flags & TRP_POLICY) < kqdi->kqdi_pol = trp.trp_pol; < else < kqdi->kqdi_pol = 0; < < if (trp.trp_flags & TRP_CPUPERCENT) < kqdi->kqdi_cpupercent = trp.trp_cpupercent; < else < kqdi->kqdi_cpupercent = 0; --- > kqdi->kqdi_sync_waiter_qos = kqr->kqr_dsync_waiters_qos; 8476c9283 < kq_req_unlock(kqwl); --- > kqwl_req_unlock(kqwl); 8486d9292 < kq_index_t qos; 8496d9301 < assert((kn->kn_status & (KN_QUEUED | KN_SUPPRESSED)) == 0); 8500c9305 < qos = KQWQ_QOS_MANAGER; --- > knote_set_qos_index(kn, KQWQ_QOS_MANAGER); 8503,8511c9308,9313 < < qos = _pthread_priority_thread_qos(kn->kn_qos); < assert(qos && qos < THREAD_QOS_LAST); < kq_req_lock(kq); < kqworkloop_update_threads_qos(kqwl, KQWL_UTQ_UPDATE_STAYACTIVE_QOS, qos); < kq_req_unlock(kq); < qos = KQWL_BUCKET_STAYACTIVE; < } else { < qos = THREAD_QOS_UNSPECIFIED; --- > kqwl_req_lock(kqwl); > assert(kn->kn_req_index && kn->kn_req_index < THREAD_QOS_LAST); > kqworkloop_update_threads_qos(kqwl, KQWL_UTQ_UPDATE_STAYACTIVE_QOS, > kn->kn_req_index); > kqwl_req_unlock(kqwl); > knote_set_qos_index(kn, KQWL_BUCKET_STAYACTIVE); 8514,8517d9315 < kn->kn_req_index = qos; < kn->kn_qos_override = qos; < kn->kn_qos_index = qos; < 8748c9546 < out: --- > out: 8833a9632,9639 > kevent_redrive_proc_thread_request(proc_t p) > { > __assert_only int ret; > ret = (*pthread_functions->workq_threadreq)(p, NULL, WORKQ_THREADREQ_REDRIVE, 0, 0); > assert(ret == 0 || ret == ECANCELED); > } > > static void 8843,8844c9649,9654 < if (ut->uu_kqr_bound != NULL) { < ast_flags64 |= R2K_WORKLOOP_PENDING_EVENTS; --- > if (ut->uu_kqueue_bound != NULL) { > if (ut->uu_kqueue_flags & KEVENT_FLAG_WORKLOOP) { > ast_flags64 |= R2K_WORKLOOP_PENDING_EVENTS; > } else if (ut->uu_kqueue_flags & KEVENT_FLAG_WORKQ) { > ast_flags64 |= R2K_WORKQ_PENDING_EVENTS; > } 8875c9685 < workq_kern_threadreq_redrive(p, WORKQ_THREADREQ_CAN_CREATE_THREADS); --- > kevent_redrive_proc_thread_request(p); 8891a9702,9703 > struct uthread *ut; > struct kqueue *kq; 8901c9713 < struct uthread *ut = get_bsdthread_info(current_thread()); --- > ut = get_bsdthread_info(current_thread()); 8906,8910c9718,9722 < struct kqrequest *kqr = ut->uu_kqr_bound; < if (kqr) { < if (kqr->kqr_state & KQR_WORKLOOP) { < bound_id = kqr_kqworkloop(kqr)->kqwl_dynamicid; < } else { --- > kq = ut->uu_kqueue_bound; > if (kq) { > if (kq->kq_state & KQ_WORKLOOP) { > bound_id = ((struct kqworkloop *)kq)->kqwl_dynamicid; > } else if (kq->kq_state & KQ_WORKQ) {
./bsd/kern/sys_pipe.c differences detected: 1422c1422 < --- > 1433a1434,1435 > if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0) > kn->kn_udata = kev->udata; 1515a1518,1519 > if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0) > kn->kn_udata = kev->udata; NO DIFFS in ./bsd/kern/kern_subr.c
NO DIFFS in ./bsd/kern/kern_ecc.c

./bsd/kern/proc_info.c differences detected: 70d69 < #include 158c157 < int __attribute__ ((noinline)) proc_pidthreadinfo(proc_t p, uint64_t arg, bool thuniqueid, struct proc_threadinfo *pthinfo); --- > int __attribute__ ((noinline)) proc_pidthreadinfo(proc_t p, uint64_t arg, int thuniqueid, struct proc_threadinfo *pthinfo); 160c159 < int __attribute__ ((noinline)) proc_pidlistthreads(proc_t p, bool thuniqueid, user_addr_t buffer, uint32_t buffersize, int32_t *retval); --- > int __attribute__ ((noinline)) proc_pidlistthreads(proc_t p, user_addr_t buffer, uint32_t buffersize, int32_t *retval); 178,181d176 < #if !CONFIG_EMBEDDED < int __attribute__ ((noinline)) proc_udata_info(pid_t pid, int flavor, user_addr_t buffer, uint32_t buffersize, int32_t *retval); < #endif < 202c197 < extern int cansignal(struct proc *, kauth_cred_t, struct proc *, int); --- > extern int cansignal(struct proc *, kauth_cred_t, struct proc *, int, int); 280,283d274 < #if !CONFIG_EMBEDDED < case PROC_INFO_CALL_UDATA_INFO: < return proc_udata_info(pid, flavor, buffer, buffersize, retval); < #endif /* !CONFIG_EMBEDDED */ 800c791 < proc_pidthreadinfo(proc_t p, uint64_t arg, bool thuniqueid, struct proc_threadinfo *pthinfo) --- > proc_pidthreadinfo(proc_t p, uint64_t arg, int thuniqueid, struct proc_threadinfo *pthinfo) 930c921 < proc_pidlistthreads(proc_t p, bool thuniqueid, user_addr_t buffer, uint32_t buffersize, int32_t *retval) --- > proc_pidlistthreads(proc_t p, user_addr_t buffer, uint32_t buffersize, int32_t *retval) 954c945 < ret = fill_taskthreadlist(p->task, kbuf, numthreads, thuniqueid); --- > ret = fill_taskthreadlist(p->task, kbuf, numthreads); 1361c1352 < uuid_t uuid = {}; --- > uuid_t uuid; 1389c1380 < uint32_t is_backgrounded = 0; --- > uint32_t is_backgrounded; 1688c1679 < bool thuniqueid = false; --- > int thuniqueid = 0; 1710,1712d1700 < case PROC_PIDLISTTHREADIDS: < size = PROC_PIDLISTTHREADIDS_SIZE; < break; 1795,1800d1782 < case PROC_PIDVMRTFAULTINFO: < size = sizeof(vm_rtfault_record_t); < if (buffer == USER_ADDR_NULL) { < size = 0; < } < break; 1930c1912 < thuniqueid = true; --- > thuniqueid = 1; 1943,1944d1924 < case PROC_PIDLISTTHREADIDS: < thuniqueid = true; 1946c1926 < error = proc_pidlistthreads(p, thuniqueid, buffer, buffersize, retval); --- > error = proc_pidlistthreads(p, buffer, buffersize, retval); 2085,2097d2064 < case PROC_PIDVMRTFAULTINFO: { < /* This interface can only be employed on the current < * process. We will eventually enforce an entitlement. < */ < *retval = 0; < < if (p != current_proc()) { < error = EINVAL; < break; < } < < size_t kbufsz = MIN(buffersize, vmrtfaultinfo_bufsz()); < void *vmrtfbuf = kalloc(kbufsz); 2099,2126d2065 < if (vmrtfbuf == NULL) { < error = ENOMEM; < break; < } < < bzero(vmrtfbuf, kbufsz); < < uint64_t effpid = get_current_unique_pid(); < /* The VM may choose to provide more comprehensive records < * for root-privileged users on internal configurations. < */ < boolean_t isroot = (suser(kauth_cred_get(), (u_short *)0) == 0); < int vmf_residue = vmrtf_extract(effpid, isroot, kbufsz, vmrtfbuf, retval); < int vmfsz = *retval * sizeof(vm_rtfault_record_t); < < error = 0; < if (vmfsz) { < error = copyout(vmrtfbuf, buffer, vmfsz); < } < < if (error == 0) { < if (vmf_residue) { < error = ENOMEM; < } < } < kfree(vmrtfbuf, kbufsz); < } < break; 2892c2831 < if (!cansignal(current_proc(), my_cred, target_p, SIGKILL)) { --- > if (!cansignal(current_proc(), my_cred, target_p, SIGKILL, 0)) { 2913c2852 < if (!cansignal(current_proc(), my_cred, target_p, SIGKILL)) { --- > if (!cansignal(current_proc(), my_cred, target_p, SIGKILL, 0)) { 2976c2915 < if (!cansignal(current_proc(), uc, p, SIGKILL)) { --- > if (!cansignal(current_proc(), uc, p, SIGKILL, 0)) { 3270,3317d3208 < < #if !CONFIG_EMBEDDED < int < proc_udata_info(int pid, int flavor, user_addr_t buffer, uint32_t bufsize, int32_t *retval) < { < int err = 0; < proc_t p; < < p = proc_find(pid); < if (p == PROC_NULL) { < return ESRCH; < } < < /* < * Only support calls against oneself for the moment. < */ < if (p->p_pid != proc_selfpid()) { < err = EACCES; < goto out; < } < < if (bufsize != sizeof (p->p_user_data)) { < err = EINVAL; < goto out; < } < < switch (flavor) { < case PROC_UDATA_INFO_SET: < err = copyin(buffer, &p->p_user_data, sizeof (p->p_user_data)); < break; < case PROC_UDATA_INFO_GET: < err = copyout(&p->p_user_data, buffer, sizeof (p->p_user_data)); < break; < default: < err = ENOTSUP; < break; < } < < out: < proc_rele(p); < < if (err == 0) { < *retval = 0; < } < < return err; < } < #endif /* !CONFIG_EMBEDDED */ NO DIFFS in ./bsd/kern/posix_sem.c

./bsd/kern/kern_proc.c differences detected: 115,118d114 < #ifdef CONFIG_32BIT_TELEMETRY < #include < #endif /* CONFIG_32BIT_TELEMETRY */ < 133,136d128 < #ifdef CONFIG_32BIT_TELEMETRY < #define MAX_32BIT_EXEC_SIG_SIZE 160 < #endif /* CONFIG_32BIT_TELEMETRY */ < 164a157,160 > #if DEVELOPMENT || DEBUG > extern int cs_enforcement_enable; > #endif > 183,184d178 < typedef uint64_t unaligned_u64 __attribute__((aligned(1))); < 186a181 > void * proc_get_uthread_uu_threadlist(void * uthread_v); 188c183 < void proc_starttime_kdp(void * p, unaligned_u64 *tv_sec, unaligned_u64 *tv_usec, unaligned_u64 *abstime); --- > void proc_starttime_kdp(void * p, uint64_t * tv_sec, uint64_t * tv_usec, uint64_t * abstime); 190a186,188 > /* TODO: make a header that's exported and usable in osfmk */ > char* proc_best_name(proc_t p); > 206a205,207 > uint64_t get_current_unique_pid(void); > > 909d909 < 915c915 < proc_starttime_kdp(void *p, unaligned_u64 *tv_sec, unaligned_u64 *tv_usec, unaligned_u64 *abstime) --- > proc_starttime_kdp(void *p, uint64_t *tv_sec, uint64_t *tv_usec, uint64_t *abstime) 917a918,921 > struct uint64p { > uint64_t val; > } __attribute__((packed)); > 920c924 < *tv_sec = pp->p_start.tv_sec; --- > ((struct uint64p *)tv_sec)->val = pp->p_start.tv_sec; 922c926 < *tv_usec = pp->p_start.tv_usec; --- > ((struct uint64p *)tv_usec)->val = pp->p_start.tv_usec; 1003,1013d1006 < proc_in_teardown(proc_t p) < { < int retval = 0; < < if (p) < retval = p->p_lflag & P_LPEXIT; < return(retval? 1: 0); < < } < < int 1083,1089d1075 < proc_is64bit_data(proc_t p) < { < assert(p->task); < return (int)task_get_64bit_data(p->task); < } < < int 1172c1158 < } --- > } 1960d1945 < case CS_OPS_TEAMID: 2011c1996 < if (cs_process_enforcement(pt)) --- > if (cs_enforcement(pt)) 2017,2020d2001 < //Don't return CS_REQUIRE_LV if we turned it on with CS_FORCED_LV but still report CS_FORCED_LV < if ((pt->p_csflags & CS_FORCED_LV) == CS_FORCED_LV) { < retflags &= (~CS_REQUIRE_LV); < } 2169,2170c2150 < case CS_OPS_IDENTITY: < case CS_OPS_TEAMID: { --- > case CS_OPS_IDENTITY: { 2194c2174 < identity = ops == CS_OPS_TEAMID ? csproc_get_teamid(pt) : cs_identity_get(pt); --- > identity = cs_identity_get(pt); 2225c2205 < if (cs_process_global_enforcement()) { --- > if (cs_enforcement_enable) { 2264c2244 < pid_t *pid_list = NULL; --- > pid_t *pid_list; 2276c2256 < pid_count_available = nprocs + 1 /* kernel_task not counted in nprocs */; --- > pid_count_available = nprocs + 1; //kernel_task is not counted in nprocs 2294d2273 < assert(pid_list != NULL); 3246c3225 < boolean_t memorystatus_kill_on_VM_compressor_space_shortage(boolean_t); --- > boolean_t memorystatus_kill_on_VM_thrashing(boolean_t); 3322c3301 < memorystatus_kill_on_VM_compressor_space_shortage(TRUE /* async */); --- > memorystatus_kill_on_VM_thrashing(TRUE /* async */); 3449,3464c3428,3429 < boolean_t < proc_send_synchronous_EXC_RESOURCE(proc_t p) < { < if (p == PROC_NULL) < return FALSE; < < /* Send sync EXC_RESOURCE if the process is traced */ < if (ISSET(p->p_lflag, P_LTRACED)) { < return TRUE; < } < return FALSE; < } < < #ifdef CONFIG_32BIT_TELEMETRY < void < proc_log_32bit_telemetry(proc_t p) --- > void * > proc_get_uthread_uu_threadlist(void * uthread_v) 3466,3535c3431,3432 < /* Gather info */ < char signature_buf[MAX_32BIT_EXEC_SIG_SIZE] = { 0 }; < char * signature_cur_end = &signature_buf[0]; < char * signature_buf_end = &signature_buf[MAX_32BIT_EXEC_SIG_SIZE - 1]; < int bytes_printed = 0; < < const char * teamid = NULL; < const char * identity = NULL; < struct cs_blob * csblob = NULL; < < proc_list_lock(); < < /* < * Get proc name and parent proc name; if the parent execs, we'll get a < * garbled name. < */ < bytes_printed = snprintf(signature_cur_end, < signature_buf_end - signature_cur_end, < "%s,%s,", p->p_name, < (p->p_pptr ? p->p_pptr->p_name : "")); < < if (bytes_printed > 0) { < signature_cur_end += bytes_printed; < } < < proc_list_unlock(); < < /* Get developer info. */ < vnode_t v = proc_getexecutablevnode(p); < < if (v) { < csblob = csvnode_get_blob(v, 0); < < if (csblob) { < teamid = csblob_get_teamid(csblob); < identity = csblob_get_identity(csblob); < } < } < < if (teamid == NULL) { < teamid = ""; < } < < if (identity == NULL) { < identity = ""; < } < < bytes_printed = snprintf(signature_cur_end, < signature_buf_end - signature_cur_end, < "%s,%s", teamid, identity); < < if (bytes_printed > 0) { < signature_cur_end += bytes_printed; < } < < if (v) { < vnode_put(v); < } < < /* < * We may want to rate limit here, although the SUMMARIZE key should < * help us aggregate events in userspace. < */ < < /* Emit log */ < kern_asl_msg(LOG_DEBUG, "messagetracer", 3, < /* 0 */ "com.apple.message.domain", "com.apple.kernel.32bit_exec", < /* 1 */ "com.apple.message.signature", signature_buf, < /* 2 */ "com.apple.message.summarize", "YES", < NULL); --- > uthread_t uth = (uthread_t)uthread_v; > return (uth != NULL) ? uth->uu_threadlist : NULL; 3537d3433 < #endif /* CONFIG_32BIT_TELEMETRY */
./bsd/kern/sys_generic.c differences detected: 1088c1088 < seldata = &uth->uu_save.uus_select_data; --- > seldata = &uth->uu_kevent.ss_select_data; 1273c1273 < seldata = &uth->uu_save.uus_select_data; --- > seldata = &uth->uu_kevent.ss_select_data; 1486c1486,1495 < waitq_link(&select_conflict_queue, wqset, WAITQ_SHOULD_LOCK, NULL); --- > /* > * The conflict queue requires disabling interrupts, so we > * need to explicitly reserve a link object to avoid a > * panic/assert in the waitq code. Hopefully this extra step > * can be avoided if we can split the waitq structure into > * blocking and linkage sub-structures. > */ > uint64_t reserved_link = waitq_link_reserve(&select_conflict_queue); > waitq_link(&select_conflict_queue, wqset, WAITQ_SHOULD_LOCK, &reserved_link); > waitq_link_release(reserved_link); 1604,1605d1612 < < waitq_set_lazy_init_link(wqset); 1727,1728d1733 < KNOTE_LOCK_CTX(knlc); < __assert_only int rc; 1747,1748c1752 < rc = kevent_register(kq, &kev, &knlc); < assert((rc & FILTER_REGISTER_WAIT) == 0); --- > kevent_register(kq, &kev, p); 1755,1756c1759 < rc = kevent_register(kq, &kev, &knlc); < assert((rc & FILTER_REGISTER_WAIT) == 0); --- > kevent_register(kq, &kev, p); 1772,1773c1775 < rc = kevent_register(kq, &kev, &knlc); < assert((rc & FILTER_REGISTER_WAIT) == 0); --- > kevent_register(kq, &kev, p); 2029c2031 < seldata = &uth->uu_save.uus_select_data; --- > seldata = &uth->uu_kevent.ss_select_data; 2742c2744 < } uer = {}; --- > } uer; 3113c3115 < __darwin_uuid_t uuid_kern = {}; /* for IOKit call */ --- > __darwin_uuid_t uuid_kern; /* for IOKit call */ 3228c3230 < struct ledger_info info = {}; --- > struct ledger_info info; 3288,3290d3289 < case TELEMETRY_CMD_PMI_SETUP: < error = telemetry_pmi_setup((enum telemetry_pmi)args->deadline, args->interval); < break; 3685,3704d3683 < < static int < sysctl_waitq_set_nelem SYSCTL_HANDLER_ARGS < { < #pragma unused(oidp, arg1, arg2) < int nelem; < < /* Read only */ < if (req->newptr != USER_ADDR_NULL) < return (EPERM); < < nelem = sysctl_helper_waitq_set_nelem(); < < return SYSCTL_OUT(req, &nelem, sizeof(nelem)); < } < < SYSCTL_PROC(_kern, OID_AUTO, n_ltable_entries, CTLFLAG_RD | CTLFLAG_LOCKED, < 0, 0, sysctl_waitq_set_nelem, "I", "ltable elementis currently used"); < <
./bsd/kern/kern_symfile.c differences detected: 55d54 < #include 65,67d63 < #define HIBERNATE_MIN_PHYSICAL_LBA (34) < #define HIBERNATE_MIN_FILE_SIZE (1024*1024) < 82,90c78,84 < vfs_context_t ctx; < struct vnode * vp; < dev_t device; < uint32_t blksize; < off_t filelength; < char cf; < char pinned; < char frozen; < char wbcranged; --- > vfs_context_t ctx; > struct vnode * vp; > dev_t device; > uint32_t blksize; > off_t filelength; > char cf; > char pinned; 210c204 < uint32_t iflags, --- > boolean_t create_file, 228d221 < dk_apfs_wbc_range_t wbc_range; 233c226 < uint64_t physoffset, minoffset; --- > uint64_t physoffset; 239c232 < off_t maxiocount, count, segcount, wbctotal; --- > off_t maxiocount, count, segcount; 263c256 < fmode = (kIOPolledFileCreate & iflags) ? (O_CREAT | FWRITE) : FWRITE; --- > fmode = (create_file) ? (O_CREAT | FWRITE) : FWRITE; 286,289c279,282 < if ((error = kern_write_file(ref, write_file_offset, write_file_addr, write_file_len, IO_SKIP_ENCRYPTION))) { < kprintf("kern_write_file() failed with error: %d\n", error); < goto out; < } --- > if ((error = kern_write_file(ref, write_file_offset, write_file_addr, write_file_len, IO_SKIP_ENCRYPTION))) { > kprintf("kern_write_file() failed with error: %d\n", error); > goto out; > } 302d294 < wbctotal = 0; 320,333d311 < if (kIOPolledFileHibernate & iflags) < { < error = do_ioctl(p1, p2, DKIOCAPFSGETWBCRANGE, (caddr_t) &wbc_range); < ref->wbcranged = (error == 0); < } < if (ref->wbcranged) < { < uint32_t idx; < assert(wbc_range.count <= (sizeof(wbc_range.extents) / sizeof(wbc_range.extents[0]))); < for (idx = 0; idx < wbc_range.count; idx++) wbctotal += wbc_range.extents[idx].length; < kprintf("kern_direct_file(%s): wbc %qd\n", name, wbctotal); < if (wbctotal) target = wbc_range.dev; < } < 336,344d313 < if (wbctotal) < { < if (wbctotal >= set_file_size) set_file_size = HIBERNATE_MIN_FILE_SIZE; < else < { < set_file_size -= wbctotal; < if (set_file_size < HIBERNATE_MIN_FILE_SIZE) set_file_size = HIBERNATE_MIN_FILE_SIZE; < } < } 388,389d356 < minoffset = HIBERNATE_MIN_PHYSICAL_LBA * ref->blksize; < 397c364 < // pin logical extents, CS version --- > // pin logical extents 403,408d369 < // pin logical extents, apfs version < < error = VNOP_IOCTL(ref->vp, FSCTL_FREEZE_EXTENTS, NULL, 0, ref->ctx); < if (error && (ENOTTY != error)) goto out; < ref->frozen = (error == 0); < 454,456d414 < < assert(getphysreq.offset >= minoffset); < 469,477d426 < if (ref->wbcranged) < { < uint32_t idx; < for (idx = 0; idx < wbc_ra