- Last 7 days
-
elixir.bootlin.com elixir.bootlin.com
-
if ((gup_flags & (FOLL_PIN | FOLL_LONGTERM)) != (FOLL_PIN | FOLL_LONGTERM)) return true;
most likely part of a policy code that includes flag based decision making
-
/* user gate pages are read-only */ if (gup_flags & FOLL_WRITE) return -EFAULT; if (address > TASK_SIZE) pgd = pgd_offset_k(address); else pgd = pgd_offset_gate(mm, address); if (pgd_none(*pgd)) return -EFAULT; p4d = p4d_offset(pgd, address); if (p4d_none(*p4d)) return -EFAULT; pud = pud_offset(p4d, address); if (pud_none(*pud)) return -EFAULT; pmd = pmd_offset(pud, address); if (!pmd_present(*pmd)) return -EFAULT; pte = pte_offset_map(pmd, address); if (!pte) return -EFAULT; entry = ptep_get(pte); if (pte_none(entry)) goto unmap; *vma = get_gate_vma(mm); if (!page) goto out; *page = vm_normal_page(*vma, address, entry); if (!*page) { if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(entry))) goto unmap; *page = pte_page(entry); } ret = try_grab_page(*page, gup_flags); if (unlikely(ret)) goto unmap;
Most of these seem like sanity checks right up until line 897 i.e, 'if(!page)'* after which we seem to unmap the page. flag +
-
-
elixir.bootlin.com elixir.bootlin.com
-
if (s->flags & __CMPXCHG_DOUBLE) { ret = __update_freelist_fast(slab, freelist_old, counters_old, freelist_new, counters_new); } else { ret = __update_freelist_slow(slab, freelist_old, counters_old, freelist_new, counters_new); }
This policy is very similar to annotated code below. The description is reproduced here:
This policy determines if the system has support for compare and exchange. If so, it will use the "__update_freelist_fast()" function, which uses a compare and exchange internally. Otherwise, it will use "__update_freelist_slow()", which uses a lock (specifically a bit-based spinlock) internally.
-
#ifdef CONFIG_SLAB_FREELIST_HARDENED encoded = (unsigned long)ptr ^ s->random ^ swab(ptr_addr); #else encoded = (unsigned long)ptr; #endif
This policy uses the configuration setting "CONFIG_SLAB_FREELIST_HARDENED" to determine whether to obfuscate a SLUB free list pointer or not for increased security at the cost of some performance.
-
#ifdef CONFIG_SLAB_FREELIST_HARDENED decoded = (void *)(ptr.v ^ s->random ^ swab(ptr_addr)); #else decoded = (void *)ptr.v; #endif
This policy is based on the configuration setting indicated by "CONFIG_SLAB_FREELIST_HARDENED", which hardens the slab free list. If the free list is hardened, then free list pointers will be obfuscated; this policy just undoes the obfuscation in that case. In the case where free list pointers are not obfuscated, this function just returns the unmodified pointer value.
-
#ifdef CONFIG_SLAB_FREELIST_RANDOM /* Pre-initialize the random sequence cache */ static int init_cache_random_seq(struct kmem_cache *s) { unsigned int count = oo_objects(s->oo); int err; /* Bailout if already initialised */ if (s->random_seq) return 0; err = cache_random_seq_create(s, count, GFP_KERNEL); if (err) { pr_err("SLUB: Unable to initialize free list for %s\n", s->name); return err; } /* Transform to an offset on the set of pages */ if (s->random_seq) { unsigned int i; for (i = 0; i < count; i++) s->random_seq[i] *= s->size; } return 0; } /* Initialize each random sequence freelist per cache */ static void __init init_freelist_randomization(void) { struct kmem_cache *s; mutex_lock(&slab_mutex); list_for_each_entry(s, &slab_caches, list) init_cache_random_seq(s); mutex_unlock(&slab_mutex); } /* Get the next entry on the pre-computed freelist randomized */ static void *next_freelist_entry(struct kmem_cache *s, struct slab *slab, unsigned long *pos, void *start, unsigned long page_limit, unsigned long freelist_count) { unsigned int idx; /* * If the target page allocation failed, the number of objects on the * page might be smaller than the usual size defined by the cache. */ do { idx = s->random_seq[*pos]; *pos += 1; if (*pos >= freelist_count) *pos = 0; } while (unlikely(idx >= page_limit)); return (char *)start + idx; } /* Shuffle the single linked freelist based on a random pre-computed sequence */ static bool shuffle_freelist(struct kmem_cache *s, struct slab *slab) { void *start; void *cur; void *next; unsigned long idx, pos, page_limit, freelist_count; if (slab->objects < 2 || !s->random_seq) return false; freelist_count = oo_objects(s->oo); pos = get_random_u32_below(freelist_count); page_limit = slab->objects * s->size; start = fixup_red_left(s, slab_address(slab)); /* First entry is used as the base of the freelist */ cur = next_freelist_entry(s, slab, &pos, start, page_limit, freelist_count); cur = setup_object(s, cur); slab->freelist = cur; for (idx = 1; idx < slab->objects; idx++) { next = next_freelist_entry(s, slab, &pos, start, page_limit, freelist_count); next = setup_object(s, next); set_freepointer(s, cur, next); cur = next; } set_freepointer(s, cur, NULL); return true; } #else static inline int init_cache_random_seq(struct kmem_cache *s) { return 0; } static inline void init_freelist_randomization(void) { } static inline bool shuffle_freelist(struct kmem_cache *s, struct slab *slab) { return false; } #endif /* CONFIG_SLAB_FREELIST_RANDOM */
This policy looks at the "CONFIG_SLAB_FREELIST_RANDOM" config setting. If it is set, the policy defines functions to randomize the free list order when creating new pages (for security purposes). If it is not set, the functions involved in randomizing the free list are empty, effectively turning off free list randomization.
-
if (s->flags & __CMPXCHG_DOUBLE) { ret = __update_freelist_fast(slab, freelist_old, counters_old, freelist_new, counters_new); } else { unsigned long flags; local_irq_save(flags); ret = __update_freelist_slow(slab, freelist_old, counters_old, freelist_new, counters_new); local_irq_restore(flags); }
This policy determines if the system has support for compare and exchange. If so, it will use the "__update_freelist_fast()" function, which uses a compare and exchange internally. Otherwise, it will use "__update_freelist_slow()", which uses a lock (specifically a bit-based spinlock) internally.
-
-
elixir.bootlin.com elixir.bootlin.com
-
if (dtc->wb_thresh < 2 * wb_stat_error()) { wb_reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE); dtc->wb_dirty = wb_reclaimable + wb_stat_sum(wb, WB_WRITEBACK); } else { wb_reclaimable = wb_stat(wb, WB_RECLAIMABLE); dtc->wb_dirty = wb_reclaimable + wb_stat(wb, WB_WRITEBACK); }
This is a configuration policy that does a more accurate calculation on the number of reclaimable pages and dirty pages when the threshold for the dirty pages in the writeback context is lower than 2 times the maximal error of a stat counter.
-
static long wb_min_pause(struct bdi_writeback *wb, long max_pause, unsigned long task_ratelimit, unsigned long dirty_ratelimit, int *nr_dirtied_pause
This function is an algorithmic policy that determines the minimum throttle time for a process between consecutive writeback operations for dirty pages based on heuristics. It is used for balancing the load of the I/O subsystems so that there will not be excessive I/O operations that impact the performance of the system.
-
if (!laptop_mode && nr_reclaimable > gdtc->bg_thresh && !writeback_in_progress(wb)) wb_start_background_writeback(wb);
This is a configuration policy that determines whether to start background writeout. The code here indicates that if laptop_mode, which will reduce disk activity for power saving, is not set, then when the number of dirty pages reaches the bg_thresh threshold, the system starts writing back pages.
-
if (thresh > dirty) return 1UL << (ilog2(thresh - dirty) >> 1);
This implements a configuration policy that determines the interval for the kernel to wake up and check for dirty pages that need to be written back to disk.
-
limit -= (limit - thresh) >> 5;
This is a configuration policy that determines how much should the limit be updated. The limit controls the amount of dirty memory allowed in the system.
-
if (dirty <= dirty_freerun_ceiling(thresh, bg_thresh) && (!mdtc || m_dirty <= dirty_freerun_ceiling(m_thresh, m_bg_thresh))) { unsigned long intv; unsigned long m_intv; free_running: intv = dirty_poll_interval(dirty, thresh); m_intv = ULONG_MAX; current->dirty_paused_when = now; current->nr_dirtied = 0; if (mdtc) m_intv = dirty_poll_interval(m_dirty, m_thresh); current->nr_dirtied_pause = min(intv, m_intv); break; } /* Start writeback even when in laptop mode */ if (unlikely(!writeback_in_progress(wb))) wb_start_background_writeback(wb); mem_cgroup_flush_foreign(wb); /* * Calculate global domain's pos_ratio and select the * global dtc by default. */ if (!strictlimit) { wb_dirty_limits(gdtc); if ((current->flags & PF_LOCAL_THROTTLE) && gdtc->wb_dirty < dirty_freerun_ceiling(gdtc->wb_thresh, gdtc->wb_bg_thresh)) /* * LOCAL_THROTTLE tasks must not be throttled * when below the per-wb freerun ceiling. */ goto free_running; } dirty_exceeded = (gdtc->wb_dirty > gdtc->wb_thresh) && ((gdtc->dirty > gdtc->thresh) || strictlimit); wb_position_ratio(gdtc); sdtc = gdtc; if (mdtc) { /* * If memcg domain is in effect, calculate its * pos_ratio. @wb should satisfy constraints from * both global and memcg domains. Choose the one * w/ lower pos_ratio. */ if (!strictlimit) { wb_dirty_limits(mdtc); if ((current->flags & PF_LOCAL_THROTTLE) && mdtc->wb_dirty < dirty_freerun_ceiling(mdtc->wb_thresh, mdtc->wb_bg_thresh)) /* * LOCAL_THROTTLE tasks must not be * throttled when below the per-wb * freerun ceiling. */ goto free_running; } dirty_exceeded |= (mdtc->wb_dirty > mdtc->wb_thresh) && ((mdtc->dirty > mdtc->thresh) || strictlimit); wb_position_ratio(mdtc); if (mdtc->pos_ratio < gdtc->pos_ratio) sdtc = mdtc; }
This is an algorithmic policy that determines whether the process can run freely or a throttle is needed to control the rate of the writeback by checking if the number of dirty pages exceed the average of the global threshold and background threshold.
-
shift = dirty_ratelimit / (2 * step + 1); if (shift < BITS_PER_LONG) step = DIV_ROUND_UP(step >> shift, 8); else step = 0; if (dirty_ratelimit < balanced_dirty_ratelimit) dirty_ratelimit += step; else dirty_ratelimit -= step;
This is a configuration policy that determines how much we should increase/decrease the dirty_ratelimit, which controls the rate that processors write dirty pages back to storage.
-
ratelimit_pages = dirty_thresh / (num_online_cpus() * 32); if (ratelimit_pages < 16) ratelimit_pages = 16;
This is a configuration policy that dynamically determines the rate that kernel can write dirty pages back to storage in a single writeback cycle.
-
t = wb_dirty / (1 + bw / roundup_pow_of_two(1 + HZ / 8));
This implements a configuration policy that determines the maximum time that the kernel should wait between writeback operations for dirty pages. This ensures that dirty pages are flushed to disk within a reasonable time frame and control the risk of data loss in case of a system crash.
-
if (IS_ENABLED(CONFIG_CGROUP_WRITEBACK) && mdtc) {
This is a configuration policy that controls whether to update the limit in the control group. The config enables support for controlling the writeback of dirty pages on a per-cgroup basis in the Linux kernel. This allows for better resource management and improved performance.
-
-
elixir.bootlin.com elixir.bootlin.com
-
enum get_ksm_page_flags { GET_KSM_PAGE_NOLOCK, GET_KSM_PAGE_LOCK, GET_KSM_PAGE_TRYLOCK };
This defines the locking behavior when accessing a KSM page. The caller can choose between no locking, locking, or trying to lock without blocking, affecting how the page is accessed and modified.
-
static int ksmd_should_run(void) { return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.slot.mm_node); }
This function checks whether the ksmd should run based on the KSM_RUN_MERGE flag and whether there are any memory regions to process. The decision to enable or disable memory merging is controlled by the ksm_run configuration.
-
-
elixir.bootlin.com elixir.bootlin.com
-
randomize_stack_top
This function uses a configuration policy to enable Address Space Layout Randomization (ASLR) for a specific process if PF_RANDOMIZE flag is set. It randomly arranges the positions of stack of a process to help defend certain attacks by making memory addresses unpredictable.
-
-
elixir.bootlin.com elixir.bootlin.com
-
if (movable_node_is_enabled()) {
This policy, as the comment states, will ignore the kernelcore and movablecore options if movable nodes are enabled (skipping the logic below this if statement and jumping to "out2" instead). The logic for the "movable_node_is_enabled()" function is in "memory_hotplug.h".
-
if (page_poisoning_enabled() || (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) && debug_pagealloc_enabled())) {
This check looks at flags and configuration settings to determine if page poisoning should be enabled.
-
if (descending)
This decides whether to iterate forward or backward through the zones passed into the function. The variable "descending" is determined by the "arch_has_descending_max_zone_pfns()" function on line 1811, which is determined from configuration options.
-
if (overcommit_policy == OVERCOMMIT_NEVER)
This policy controls the memory batch size based on the overcommit policy, choosing a smaller batch size when the policy is OVERCOMMIT_NEVER.
-
-
elixir.bootlin.com elixir.bootlin.com
-
if (prev_class) { if (can_merge(prev_class, pages_per_zspage, objs_per_zspage)) { pool->size_class[i] = prev_class; continue; } }
This is an algorithmic policy. A
zs_pool
maintainszs_page
s of differentsize_class
. However, somesize_class
es share exactly same characteristics, namelypages_per_zspage
andobjs_per_zspage
. Recall the other annotation of mine, it searched freezspage
s bysize_class
:zspage = find_get_zspage(class);
. Thus, grouping different classes improves memory utilization. -
zspage = find_get_zspage(class); if (likely(zspage)) { obj = obj_malloc(pool, zspage, handle); /* Now move the zspage to another fullness group, if required */ fix_fullness_group(class, zspage); record_obj(handle, obj); class_stat_inc(class, ZS_OBJS_INUSE, 1); goto out; }
This is an algorithmic policy. Instead of immediately allocating new zspages for each memory request, the algorithm first attempts to find and reuse existing partially filled zspages from a given size class by invoking the find_get_zspage(class) function. It also updates the corresponding fullness groups.
-
-
elixir.bootlin.com elixir.bootlin.com
-
1
This is a configuration policy that sets the timeout between retries if vmap_pages_range() fails. This could be tunable variable.
-
if (!(flags & VM_ALLOC)) area->addr = kasan_unpoison_vmalloc(area->addr, requested_size, KASAN_VMALLOC_PROT_NORMAL);
This is an algorithmic policy. This is an optimization that prevents duplicate marks of accessibility. Only pages allocated without
VM_ALLOC
(e.g. ioremap) was not set accessible (unpoison), thus requiring explicit setting here. -
100U
This is an configuration policy that determines 100 pages are the upper limit for the bulk-allocator. However, the implementation of
alloc_pages_bulk_array_mempolicy
does not explicitly limit in the implementation. So I believe it is an algorithmic policy related to some sort of optimization. -
if (!order) {
This is an algorithmic policy determines that only use the bulk allocator for order-0 pages (non-super pages). Maybe the bulk allocator could be applied to super pages to speed up allocation. Currently I haven't seen the reason why it cannot be applied.
-
if (likely(count <= VMAP_MAX_ALLOC)) { mem = vb_alloc(size, GFP_KERNEL); if (IS_ERR(mem)) return NULL; addr = (unsigned long)mem; } else { struct vmap_area *va; va = alloc_vmap_area(size, PAGE_SIZE, VMALLOC_START, VMALLOC_END, node, GFP_KERNEL, VMAP_RAM); if (IS_ERR(va)) return NULL; addr = va->va_start; mem = (void *)addr; }
This is an algorithmic policy that determines whether to use a more efficient
vl_alloc
which does not involve complex virtual-to-physical mappings. Unlike the latteralloc_vmap_area
,vb_alloc
does not need to traverse the rb-tree of free vmap areas. It simply find a larger enough block fromvmap_block_queue
. -
VMAP_PURGE_THRESHOLD
The threshold VMAP_PURGE_THRESHOLD is a configuration policy that could be tuned by machine learning. Setting this threshold lower reduces purging activities while setting it higher reduces framentation.
-
if (!(force_purge
This is an algorithmic policy that prevents purging blocks with considerable amount of "usable memory" unless requested with force_purge.
-
resched_threshold = lazy_max_pages() << 1;
The assignment of resched_threshold and lines 1776-1777 are configuration policies to determine fewer than which number of lazily-freed pages it should yield CPU temporarily to higher-priority tasks.
-
log = fls(num_online_cpus());
This heuristic scales lazy_max_pages logarithmically, which is a configuration policy. Alternatively, machine learning could determine the optimal scaling function—whether linear, logarithmic, square-root, or another approach.
-
32UL * 1024 * 1024
This is a configuration policy that decides to always returns multiples of 32 MB worth of pages. This could be a configurable variable rather than a fixed magic number.
-
-
elixir.bootlin.com elixir.bootlin.com
-
static bool should_skip_region(struct memblock_type *type, struct memblock_region *m, int nid, int flags) { int m_nid = memblock_get_region_node(m); /* we never skip regions when iterating memblock.reserved or physmem */ if (type != memblock_memory) return false; /* only memory regions are associated with nodes, check it */ if (nid != NUMA_NO_NODE && nid != m_nid) return true; /* skip hotpluggable memory regions if needed */ if (movable_node_is_enabled() && memblock_is_hotpluggable(m) && !(flags & MEMBLOCK_HOTPLUG)) return true; /* if we want mirror memory skip non-mirror memory regions */ if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m)) return true; /* skip nomap memory unless we were asked for it explicitly */ if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m)) return true; /* skip driver-managed memory unless we were asked for it explicitly */ if (!(flags & MEMBLOCK_DRIVER_MANAGED) && memblock_is_driver_managed(m)) return true; return false; }
This policy determines whether a memblock region should be skipped, based on several checks that incorporate various flags. You can see this policy being used in other functions on lines 1080 and 1184 in this file; these other functions appear to be sub-functions for iterators on the memblock regions.
-
if (memblock_bottom_up())
This policy controls whether the memblock allocator should allocate memory from the bottom up or from the top down.
-
- Sep 2024
-
elixir.bootlin.com elixir.bootlin.com
-
if (lruvec->file_cost + lruvec->anon_cost > lrusize / 4) { lruvec->file_cost /= 2; lruvec->anon_cost /= 2; }
It is a configuration policy. The policy here is to adjust the cost of current page. The cost means the overhead for kernel to operate the page if the page is swapped out. Kernel adopts a decay policy. Specifically, if current cost is greater than lrusize/4, its cost will be divided by 2. If kernel has no this policy, after a long-term running, the cost of each page is very high. Kernel might mistakenly reserve pages which are frequently visited long time ago but are inactive currently. It might cause performance degradation. The value (lrusize/4) here has a trade-off between performance and sensitivity. For example, if the value is too small, kernel will frequently adjust the priority of each page, resulting in process's performance degradation. If the value is too large, kernel might be misleaded by historical data, causing wrongly swapping currently popular pages and further performance degradation.
-
if (megs < 16) page_cluster = 2; else page_cluster = 3;
It is a configuration policy. The policy here is to determine the size of page cluster. "2" and "3" is determined externally to the kernel. Page cluster is the actual execution unit when swapping. If the machine is small-memory, kernel executes swap operation on 4 (2^2) pages for each time. Otherwise, kernel operats 8 (2^3) for each time. The rational here is to avoid much memory pressure for small-memory system and to improve performance for large-memory system.
-
-
elixir.bootlin.com elixir.bootlin.com
-
mod_memcg_page_state(area->pages[i], MEMCG_VMALLOC, 1);
It is a configuration policy. It is used to count the physical page for memory control group. The policy here is one physical page corresponds to one count to the memory control group. If using huge page, the value here might not be 1,
-
schedule_timeout_uninterruptible(1);
It is a configuration policy. If kernel cannot allocate an enough virtual space with alignment, while nofail is specified to disallow failure, the kernel will let the process to sleep for 1 time slot. In this period, the process cannot be interrupted and quit the schedule queue of CPU. The "1" here is a configuration parameter, made externally to the kernel. It is not large enough for latency-sensitive process and is not small enough to retry frequently.
-
if (array_size > PAGE_SIZE) { area->pages = __vmalloc_node(array_size, 1, nested_gfp, node, area->caller); } else { area->pages = kmalloc_node(array_size, nested_gfp, node); }
It is an algorithmic policy and also a configuration policy. The algorithmic policy here is to maxmize the data locality of virtual memory address, by letting the space be in continuous virtual pages. If the demanded size of virtual memory is larger than one page, the kernel will allocate multiple continuous pages for it by using __vmalloc_node(). Otherwise, the kernel will call kmalloc_node() to place it in a single page. On the other hand, to allocate continuous pages, we should invoke __vmalloc_node() by declaring align = 1, which is a configuration policy.
-
if (!counters) return; if (v->flags & VM_UNINITIALIZED) return;
It is an algorithmic policy. The show_numa_info() is used for printing how the virtual memory of v (vm_struct) distributes its pages across numa nodes. The two if conditions are used for filtering the invalid v. The first if means the virtual memory has not been associated with physical page. The second if means the virtual memory has not been initialized yet.
-
if (page) copied = copy_page_to_iter_nofault(page, offset, length, iter); else copied = zero_iter(iter, length);
It is an algorithmic policy. The policy here is to avoid lock/unlock overhead by using copy_page_to_iter_nofault() function plus if-else branch. Because copy_page_to_iter_nofault() is based on no page fault, we need to check the physical page is still alive. So the kernel uses an if condition to guarantee copy_page_to_iter_nofault() without page fault. On the other hand, if the page is not valid, kernel chooses to fill the iter using zero value instead of returning an error. I think it is because it can make invocation more convenient. The caller does not need to do a verification of the return value and does not need further actions to handle it.
-
if (base + last_end < vmalloc_start + last_end) goto overflow; /* * Fitting base has not been found. */ if (va == NULL) goto overflow;
It is an algorithmic policy. The two if conditions are used for checking the feasibility of the allocation. The first if means the adress of the allocation is beyond the allowable address, causing overflow. In this case, since the base address is decided from higher one to smaller one, reducing base address cannot work. Note that the base address is dependent on the va. The second if means we don't find a valid va which has appropriate base address. So the policy here is going to overflow branch to re-allocate va list.
-
if (base + start < va->va_start) { va = node_to_va(rb_prev(&va->rb_node)); base = pvm_determine_end_from_reverse(&va, align) - end; term_area = area; continue; }
It is an algorithmic policy. It is used for checking whether the start address of current va is valid. The policy here is to change a new va, different from the solution of "end address check" (explained in my other annotation). The reason is that we find the base address from high address to low address. If we continue to reduce the base address, the if condition will be always wrong. Changing a new va with a lower start address is the only solution.
-
if (base + end > va->va_end) { base = pvm_determine_end_from_reverse(&va, align) - end; term_area = area; continue; }
It is an algorithmic policy. The outer loop is an endless loop until finding a vaild virtual memory space satisfying the requirements. The if condition here is used to guarantee the space in va is large enough. If not, we continue to scan the memory space from high address to low address while satisfying the alignment requirement. Specifically, tuning the base address to a lower address. And then, it will retry for verification.
-
va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); if (WARN_ON_ONCE(!va)) continue;
It is an algorithmic policy. The for loop is used for transferring the virtual memory managment by using from vm_struct (link list) to vm_area (tree?). Inside the for loop, if we cannot allocate a new vmap_area from vmap_area_cachep (va = NULL), the program will simply print a warning and skip current tmp, instread of retry or anything to guarantee the integrity.
-
if (!spin_trylock(&vmap_area_lock)) return false;
It is an algorithmic policy. The vmalloc_dump_obj() function is used for printing the information of specified virtual memory. Due to concurrency, before manipulating the critical state, we must obtain the lock. The policy here is spin_trylock. It only tries once. If fails, the function will simply returns false. I think the policy here is to avoid long-term waiting to improve the performance.
-
- Aug 2024
-
elixir.bootlin.com elixir.bootlin.com
-
if (hugetlb_cgroup_disabled())
This probably not an interesting policy decision for ldos. It is a feature flag for the running OS. But if cgroups were decided by policy then this flag would be controlled by the cgroup decision.
-
- Sep 2023
-
hypothes.is hypothes.is
-
"Surrendering" by Ocean Vuong
-
He moved into United State when he was age of five. He first came to United State when he started kindergarten. Seven of them live in the apartment one bedroom and bathroom to share the whole. He learned ABC song and alphabet. He knows the ABC that he forgot the letter is M comes before N.
-
He went to the library since he was on the recess. He was in the library hiding from the bully. The bully just came in the library doing the slight frame and soft voice in front of the kid where he sit. He left the library, he walked to the middle of the schoolyard started calling him the pansy and fairy. He knows the American flag that he recognize on the microphone against the backdrop.
-
Tags
- Weeks earlier, I’d been in the library. It was where I would hide during recess. Otherwise, because of my slight frame and soft voice, the boys would call me “pansy” and “fairy” and pull my shorts around my ankles in the middle of the schoolyard. I sat on the floor beside a tape player. From a box of cassettes, I chose one labelled “Great American Speeches.” I picked it because of the illustration, a microphone against a backdrop of the American flag. I picked it because the American flag was one of the few symbols I recognized.
- My family immigrated to the U.S. from Vietnam in 1990, when I was two. We lived, all seven of us, in a one-bedroom apartment in Hartford, Connecticut, and I spent my first five years in America surrounded, inundated, by the Vietnamese language. When I entered kindergarten, I was, in a sense, immigrating all over again, except this time into English. Like any American child, I quickly learned my ABCs, thanks to the age-old melody (one I still sing rapidly to myself when I forget whether “M” comes before “N”). Within a few years, I had become fluent—but only in speech, not in the written word.
Annotators
URL
-
- Apr 2022
-
twitter.com twitter.com
-
Dr Ellie Murray. (2021, February 23). A thing I feel is weird about how we are all reacting to this pandemic: Mourning is still so individual & private. It surprises me there aren’t campaigns for armbands, ribbons, wreaths on doors, or some sort of flag in the window to say “a loved one was lost to COVID here”. [Tweet]. @EpiEllie. https://twitter.com/EpiEllie/status/1364033220904427524
-
- Mar 2022
-
-
For Aboriginal Australians,its importance is recognised by its position at the centre of thenational Aboriginal flag, developed in 1971 by Luritja artist HaroldThomas.
The Aboriginal flag was developed in 1971 by Luritja artist Harold Thomas. Centering its importance to Aboriginal Australians, the sun appears in the middle of the flag.
It's subtle here, as in other instances, but notice that Hamacher gives the citation to the Indigenous artist that developed the flag and simultaneously underlines the source of visual information that is associated with the flag and the sun. It's not just the knowledge of the two things which are associated to each other, but they're also both associated with a person who is that source of knowledge.
Is this three-way association common in all Indigenous cultures? While names may be tricky for some, the visual image of a particular person's face, body, and presence is usually very memorable and thereby easy to attach to various forms of knowledge.
Does the person/source of knowledge form or act like an 'oral folder' for Indigenous knowledge?
-
- Jul 2021
-
poisotlab.github.io poisotlab.github.io
-
contest
Are we really contesting that?
-
predictions of binary interactions can be more readily interpreted.
Repeated sentence
-
-
www.migrationencounters.org www.migrationencounters.org
-
Ben: Oh yeah. Yeah. Yes, I felt normal, I had a lot of friends and our high school, the high school I went to, there was very few Hispanics- period, very few blacks. If you looked at that high school, if you pulled it up—well actually they made them remove the confederate flag, because the confederate flag was part of, it was the school football team logo and it was on their helmets. They were called the Southland Prairie Warriors, and when they ran out on the football field, they carried the confederate flag—and the high school flew the confederate flag up with the Texas and US flag—which it would make you think the opposite. I can't say that the school was…Of course there were a few people, but I did okay. I didn't feel out of place and I felt pretty well accepted by others.
Time in the US, School, High School, Fitting in/belonging
Tags
Annotators
URL
-
- Jun 2021
-
poisotlab.github.io poisotlab.github.io
-
Given two species co-occur, a neutral approach to probabilistic interactions would assume that the effect of abundances and trait matching would have no effec
I think you mean: A neutral approach to probabilistic interactions would rely only on the effect of abundances and assume trait-matching would have no effect?
Tags
Annotators
URL
-
- Mar 2021
-
twitter.com twitter.com
-
ReconfigBehSci on Twitter: ‘RT @factmata: We are excited to launch of a side project we worked on this summer—Https://t.co/2yGSgkqzTG. We scan Twitter profiles with…’ / Twitter. (n.d.). Retrieved 6 March 2021, from https://twitter.com/SciBeh/status/1323664538777124867
-
- Feb 2021
-
poisotlab.github.io poisotlab.github.io
-
This could potentially be solved through our framework of predicting networks first, interactions next, and finally the realized species pool.
I think this might be confusing, as readers could think this is the actual framework of our paper. We could maybe precise that we could use our framework the other way around, by using our predictions of ecological networks and species interactions to make better predictions of species pools.
-
all interactions occur between species in each pool
I think this definition of bipartite network is a little confusing. We could say something like:
"Bipartite networks are divided into two disjoint sets of species and interactions occur between members of different sets (e.g. plant-pollinator and host-parasite networks)"
-
there have been calls for a probabilistic species pool
you say twice there have been calls. Maybe simplify the sentence with something like: through probabilistic species pool, and more importantly ...
-
.S
missing space
-
embedding projects
missing words
-
reached an accuracy of ≈ 0.8
Semi- by curiosity, semi- because it might be better to explicitly say it, but how was accuracy calculated? It is not clear if correctly predicted absences are used? As stated before, there are no true negative.
-
Here adopt a question-driven approach to serve as a guide through the path toward building models to predict and forecast the structure of ecological networks across space, and to identify the next steps in the research regime.
The sentence is very hard to read. too many long noun strings.
-
-
web.hypothes.is web.hypothes.is
-
It is not currently possible to “un-flag” an annotation — if an annotation is flagged by mistake, the group creator can choose not to hide it.
Wow, this is so emblematic of the age... any person can taint another irrevocably with a flag!
And an accident cannot be undone!
Now when I screen share on Zoom to discuss our document, everyone will see a big red flag.
-
- Dec 2020
- Sep 2020
-
dagrs.berkeley.edu dagrs.berkeley.edu
-
Grzegorz Grudzi ́nski
Hello world
Tags
Annotators
URL
-
- Jul 2020
-
gitlab.com gitlab.com
- Jun 2020
-
www.scientificamerican.com www.scientificamerican.com
-
Sathya, C. (2020, June 17). Pandemic-Related Gun Purchases Raise Suicide Risks. Scientific American. https://www.scientificamerican.com/article/pandemic-related-gun-purchases-raise-suicide-risks/
-
- May 2020
-
gitlab.com gitlab.com
-
The Inherited Environment Variables feature is under development and not ready for production use. It is deployed behind a feature flag that is disabled by default.
Tags
Annotators
URL
-
- Jun 2019
-
earlybritishlit.pressbooks.com earlybritishlit.pressbooks.com
-
Yanity and vice
Does anybody know what this means?
-
- Jan 2016
-
www.elysee.fr www.elysee.fr
-
le bleu et le rouge
J'aurais aimé savoir quel bleu et quel rouge sont utilisés sur le drapeau.
Tags
Annotators
URL
-