- Nov 2024
-
elixir.bootlin.com elixir.bootlin.com
-
#ifdef CONFIG_ARCH_HAS_PTE_SPECIAL /* * Fast-gup relies on pte change detection to avoid concurrent pgtable * operations. * * To pin the page, fast-gup needs to do below in order: * (1) pin the page (by prefetching pte), then (2) check pte not changed. * * For the rest of pgtable operations where pgtable updates can be racy * with fast-gup, we need to do (1) clear pte, then (2) check whether page * is pinned. * * Above will work for all pte-level operations, including THP split. * * For THP collapse, it's a bit more complicated because fast-gup may be * walking a pgtable page that is being freed (pte is still valid but pmd * can be cleared already). To avoid race in such condition, we need to * also check pmd here to make sure pmd doesn't change (corresponds to * pmdp_collapse_flush() in the THP collapse code path). */ static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) { struct dev_pagemap *pgmap = NULL; int nr_start = *nr, ret = 0; pte_t *ptep, *ptem; ptem = ptep = pte_offset_map(&pmd, addr); if (!ptep) return 0; do { pte_t pte = ptep_get_lockless(ptep); struct page *page; struct folio *folio; /* * Always fallback to ordinary GUP on PROT_NONE-mapped pages: * pte_access_permitted() better should reject these pages * either way: otherwise, GUP-fast might succeed in * cases where ordinary GUP would fail due to VMA access * permissions. */ if (pte_protnone(pte)) goto pte_unmap; if (!pte_access_permitted(pte, flags & FOLL_WRITE)) goto pte_unmap; if (pte_devmap(pte)) { if (unlikely(flags & FOLL_LONGTERM)) goto pte_unmap; pgmap = get_dev_pagemap(pte_pfn(pte), pgmap); if (unlikely(!pgmap)) { undo_dev_pagemap(nr, nr_start, flags, pages); goto pte_unmap; } } else if (pte_special(pte)) goto pte_unmap; VM_BUG_ON(!pfn_valid(pte_pfn(pte))); page = pte_page(pte); folio = try_grab_folio(page, 1, flags); if (!folio) goto pte_unmap; if (unlikely(folio_is_secretmem(folio))) { gup_put_folio(folio, 1, flags); goto pte_unmap; } if (unlikely(pmd_val(pmd) != pmd_val(*pmdp)) || unlikely(pte_val(pte) != pte_val(ptep_get(ptep)))) { gup_put_folio(folio, 1, flags); goto pte_unmap; } if (!folio_fast_pin_allowed(folio, flags)) { gup_put_folio(folio, 1, flags); goto pte_unmap; } if (!pte_write(pte) && gup_must_unshare(NULL, flags, page)) { gup_put_folio(folio, 1, flags); goto pte_unmap; } /* * We need to make the page accessible if and only if we are * going to access its content (the FOLL_PIN case). Please * see Documentation/core-api/pin_user_pages.rst for * details. */ if (flags & FOLL_PIN) { ret = arch_make_page_accessible(page); if (ret) { gup_put_folio(folio, 1, flags); goto pte_unmap; } } folio_set_referenced(folio); pages[*nr] = page; (*nr)++; } while (ptep++, addr += PAGE_SIZE, addr != end); ret = 1; pte_unmap: if (pgmap) put_dev_pagemap(pgmap); pte_unmap(ptem); return ret; } #else /* * If we can't determine whether or not a pte is special, then fail immediately * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not * to be special. * * For a futex to be placed on a THP tail page, get_futex_key requires a * get_user_pages_fast_only implementation that can pin pages. Thus it's still * useful to have gup_huge_pmd even if we can't operate on ptes. */ static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) { return 0; } #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
non concurrent fast gup approach that checks for pinned page and unmaps pte or clears it
-
static __always_inline long __get_user_pages_locked(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, int *locked, unsigned int flags) { long ret, pages_done; bool must_unlock = false; /* * The internal caller expects GUP to manage the lock internally and the * lock must be released when this returns. */ if (!*locked) { if (mmap_read_lock_killable(mm)) return -EAGAIN; must_unlock = true; *locked = 1; } else mmap_assert_locked(mm); if (flags & FOLL_PIN) mm_set_has_pinned_flag(&mm->flags); /* * FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior * is to set FOLL_GET if the caller wants pages[] filled in (but has * carelessly failed to specify FOLL_GET), so keep doing that, but only * for FOLL_GET, not for the newer FOLL_PIN. * * FOLL_PIN always expects pages to be non-null, but no need to assert * that here, as any failures will be obvious enough. */ if (pages && !(flags & FOLL_PIN)) flags |= FOLL_GET; pages_done = 0; for (;;) { ret = __get_user_pages(mm, start, nr_pages, flags, pages, locked); if (!(flags & FOLL_UNLOCKABLE)) { /* VM_FAULT_RETRY couldn't trigger, bypass */ pages_done = ret; break; } /* VM_FAULT_RETRY or VM_FAULT_COMPLETED cannot return errors */ if (!*locked) { BUG_ON(ret < 0); BUG_ON(ret >= nr_pages); } if (ret > 0) { nr_pages -= ret; pages_done += ret; if (!nr_pages) break; } if (*locked) { /* * VM_FAULT_RETRY didn't trigger or it was a * FOLL_NOWAIT. */ if (!pages_done) pages_done = ret; break; } /* * VM_FAULT_RETRY triggered, so seek to the faulting offset. * For the prefault case (!pages) we only update counts. */ if (likely(pages)) pages += ret; start += ret << PAGE_SHIFT; /* The lock was temporarily dropped, so we must unlock later */ must_unlock = true; retry: /* * Repeat on the address that fired VM_FAULT_RETRY * with both FAULT_FLAG_ALLOW_RETRY and * FAULT_FLAG_TRIED. Note that GUP can be interrupted * by fatal signals of even common signals, depending on * the caller's request. So we need to check it before we * start trying again otherwise it can loop forever. */ if (gup_signal_pending(flags)) { if (!pages_done) pages_done = -EINTR; break; } ret = mmap_read_lock_killable(mm); if (ret) { BUG_ON(ret > 0); if (!pages_done) pages_done = ret; break; } *locked = 1; ret = __get_user_pages(mm, start, 1, flags | FOLL_TRIED, pages, locked); if (!*locked) { /* Continue to retry until we succeeded */ BUG_ON(ret != 0); goto retry; } if (ret != 1) { BUG_ON(ret > 1); if (!pages_done) pages_done = ret; break; } nr_pages--; pages_done++; if (!nr_pages) break; if (likely(pages)) pages++; start += PAGE_SIZE; } if (must_unlock && *locked) { /* * We either temporarily dropped the lock, or the caller * requested that we both acquire and drop the lock. Either way, * we must now unlock, and notify the caller of that state. */ mmap_read_unlock(mm); *locked = 0; } return pages_done; }
same as gup but sets/unsets mmap_lock
-
static long __get_user_pages(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, int *locked) { long ret = 0, i = 0; struct vm_area_struct *vma = NULL; struct follow_page_context ctx = { NULL }; if (!nr_pages) return 0; start = untagged_addr_remote(mm, start); VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN))); do { struct page *page; unsigned int foll_flags = gup_flags; unsigned int page_increm; /* first iteration or cross vma bound */ if (!vma || start >= vma->vm_end) { /* * MADV_POPULATE_(READ|WRITE) wants to handle VMA * lookups+error reporting differently. */ if (gup_flags & FOLL_MADV_POPULATE) { vma = vma_lookup(mm, start); if (!vma) { ret = -ENOMEM; goto out; } if (check_vma_flags(vma, gup_flags)) { ret = -EINVAL; goto out; } goto retry; } vma = gup_vma_lookup(mm, start); if (!vma && in_gate_area(mm, start)) { ret = get_gate_page(mm, start & PAGE_MASK, gup_flags, &vma, pages ? &page : NULL); if (ret) goto out; ctx.page_mask = 0; goto next_page; } if (!vma) { ret = -EFAULT; goto out; } ret = check_vma_flags(vma, gup_flags); if (ret) goto out; } retry: /* * If we have a pending SIGKILL, don't keep faulting pages and * potentially allocating memory. */ if (fatal_signal_pending(current)) { ret = -EINTR; goto out; } cond_resched(); page = follow_page_mask(vma, start, foll_flags, &ctx); if (!page || PTR_ERR(page) == -EMLINK) { ret = faultin_page(vma, start, &foll_flags, PTR_ERR(page) == -EMLINK, locked); switch (ret) { case 0: goto retry; case -EBUSY: case -EAGAIN: ret = 0; fallthrough; case -EFAULT: case -ENOMEM: case -EHWPOISON: goto out; } BUG(); } else if (PTR_ERR(page) == -EEXIST) { /* * Proper page table entry exists, but no corresponding * struct page. If the caller expects **pages to be * filled in, bail out now, because that can't be done * for this page. */ if (pages) { ret = PTR_ERR(page); goto out; } } else if (IS_ERR(page)) { ret = PTR_ERR(page); goto out; } next_page: page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask); if (page_increm > nr_pages) page_increm = nr_pages; if (pages) { struct page *subpage; unsigned int j; /* * This must be a large folio (and doesn't need to * be the whole folio; it can be part of it), do * the refcount work for all the subpages too. * * NOTE: here the page may not be the head page * e.g. when start addr is not thp-size aligned. * try_grab_folio() should have taken care of tail * pages. */ if (page_increm > 1) { struct folio *folio; /* * Since we already hold refcount on the * large folio, this should never fail. */ folio = try_grab_folio(page, page_increm - 1, foll_flags); if (WARN_ON_ONCE(!folio)) { /* * Release the 1st page ref if the * folio is problematic, fail hard. */ gup_put_folio(page_folio(page), 1, foll_flags); ret = -EFAULT; goto out; } } for (j = 0; j < page_increm; j++) { subpage = nth_page(page, j); pages[i + j] = subpage; flush_anon_page(vma, subpage, start + j * PAGE_SIZE); flush_dcache_page(subpage); } } i += page_increm; start += page_increm * PAGE_SIZE; nr_pages -= page_increm; } while (nr_pages); out: if (ctx.pgmap) put_dev_pagemap(ctx.pgmap); return i ? i : ret; }
Literally the actual policy logic of gup. Most important piece of code right here for gup
-
if (WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == 0)) return NULL; if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page))) return NULL;
Time saving predictions(unlikely) and single time warning func(WARN_ON_ONCE) for flags. Not actual policy logic so low confidence.
-
if (is_zero_page(page)) return page_folio(page); folio = try_get_folio(page, refs); if (!folio) return NULL;
Just trying to check for zero pages and trying to retrieve folios. Unlikely policy logic
-
if (gup_flags & FOLL_PIN) mm_set_has_pinned_flag(¤t->mm->flags); if (!(gup_flags & FOLL_FAST_ONLY)) might_lock_read(¤t->mm->mmap_lock); start = untagged_addr(start) & PAGE_MASK; len = nr_pages << PAGE_SHIFT; if (check_add_overflow(start, len, &end)) return -EOVERFLOW; if (end > TASK_SIZE_MAX) return -EFAULT; if (unlikely(!access_ok((void __user *)start, len))) return -EFAULT;
checking for overflow in page alloc likely
-
*/ if (gup_flags & FOLL_MADV_POPULATE) { vma = vma_lookup(mm, start); if (!vma) { ret = -ENOMEM; goto out; } if (check_vma_flags(vma, gup_flags)) { ret = -EINVAL; goto out; } goto retry; }
page populate flag for sure
-
- Oct 2024
-
Local file Local file
-
Cakedinkmayberemovedwithtypeputty,alcohol,carbontetrachloride,oroneoftheproprietarydry-cleaningfluids,appliedwithabrush.Pressthetypeputtyontothetype,peelitoff,andthecakedinkcomeswithit.Ifyouusealiquid,firstliftthetypeandputpaperunderitto prevent dirt from dripping into the machine. When using type-cleaning fluid, be sure toWipe the type dry with a cloth before using the place paper under type to prevent dirt frommachine again.
dry cleaning solvents in 1941 were likely Varsol or Stoddard's Formula.
compare to trichloroethane<br /> https://hypothes.is/a/EyBIAFXAEe-AcP-Atlj_aQ
Note discontinuation of carbon tetrachloride<br /> https://hypothes.is/a/bfdi_I90Ee-OQLN0HpsE7Q
-
- May 2024
-
www.reddit.com www.reddit.com
-
Most typewriter shops did not use alcohol as it was ineffective and contained water. Industrial alcohols contained keytones and acetones that will melt plastic and remove paint.
Solvents for typewriters used in repair shops: - White mineral spirits with a squeeze bottle. (Sometimes also called Varsol, Stoddard's Formula, and possibly Inhibisol) - Naphtha (aka lighter fluid; used in Zippo lighters, and frequently seen in Europe). PB B'laster is essentially pressurized naphtha in a can. - Auto carb and brake cleaners, usually pressurized in a can. These usually have acetone in them and will melt plastic. Will remove WD-40 if accidentally used on a typewriter.
For cleaning typeslugs, one can use naphtha or mineral spirits with a brass bristle brush.
For platen cleaning try mineral spirits or fedron.
Only oil the carriage rails for the bearings or trucks.
(This is all colloquial advice, albeit with experience, so check specific facts about what certain products contain.)
-
You will notice that manual was printed in 1920. Gas was the only cleaner available then. In just one more year in 1921 Stoddards Formula was invented (Varsol) for the dry cleaning business. Everything changed then and Varsol was the cleaner of choice.
should find a better reference
-
- Mar 2024
-
www.youtube.com www.youtube.com
-
LOL. stupid shit. only the multiplication of scalar values is always defined.<br /> multiplying two nonscalar values is defined only in some cases, and certailnly not for money.<br /> multiplying two lengths (1 meter x 1 meter) only works, if the two lenghts are orthogonal... idiots
Tags
Annotators
URL
-
- Nov 2023
-
www.econometrics-with-r.org www.econometrics-with-r.org
-
σ2^β1=1nVar[(Xi−μX)ui][Var(Xi)]2.
This formula is wrong. It should be as follows: $$Var(\hat{\beta}1) = \frac{\sum{i=1}^N Var(x_i - \bar{x})^2 u_i}{(Var(\sum_{i=1}^N (x_i - \bar{x})^2))}$$
Otherwise, $\sigma_{\hat{\beta}_1}$ is different for each $u_i$.
-
- Aug 2023
-
engineering.universe.com engineering.universe.com
-
def rating(like_count, angry_count) like_count * 2 - angry_count end
-
-
math.stackexchange.com math.stackexchange.com
-
so a formula is like a dead equation?
-
- Dec 2021
-
blogs.dickinson.edu blogs.dickinson.edu
-
average sentence complexity
Formula: total # of punctuation marks used / total # of sentences
-
average sentence length
Formula: total # of words / total # of sentences
-
average line length
Formula: total # of words / total # of lines
-
hapax legomena ratio
Formula: # of words that appear exactly once/total # of words
-
type-token ratio
Formula: total # of distinct words / total # of words
-
- Apr 2021
-
www.bio-rad.com www.bio-rad.com
-
Formula for Calculating Concentration
-
- Sep 2020
- Oct 2019
-
understandinguncertainty.org understandinguncertainty.org
-
probability compounds
compound probability = 1 - (1 - annual probability of excedence)^n where:
- annual probability = 0.01
- n = exposure period = 10 years
by other hand Mean Recurrence Interval - MRI = Return Period = 1 / annual probability of exceedence, but only if MRI >= 10 years, if not then:
annual probability of excedence = 1-e(-1/MRI)
Tags
Annotators
URL
-
- Mar 2019
-
www.colciencias.gov.co www.colciencias.gov.co
-
Indicador de cohesión del grupo (IC
Si el grupo tiene un investigador que no produce, el resultado será inferior. Hay una relación entre el autor y sus productos.
-
2.7.Indicadores de producción (ITP
Revisar la fórmula de medición presentada en la página siguiente.
-
- Nov 2018
-
en.wikipedia.org en.wikipedia.org
-
Net profit: To calculate net profit for a venture (such as a company, division, or project), subtract all costs, including a fair share of total corporate overheads, from the gross revenues or turnover. Net profit = sales revenue − total costs
Tags
Annotators
URL
-
- May 2017
-
-
by the Gauss formula we have(4.8)eRslij=hsihlj−hlihsj.
Tags
Annotators
URL
-
-
terrytao.wordpress.com terrytao.wordpress.com
-
the variations of various tensors under the Ricci flow: (31)
-
variation formula for the Ricci tensor (13) where is the trace, and the Lichnerowicz Laplacian (or Hodge-de Rham Laplacian) on symmetric rank (0,2) tensors is defined by the formula (14) and is the usual connection Laplacian.
-
- Nov 2013
-
cran.r-project.org cran.r-project.org
-
n its space-time representation (Ogata, 1998), the ETASmodel is a temporal marked point process model, and a special case of marked Hawks process, withconditional intensity function(t;x;yjHt) =(x;y) +Xti<tk(mi)g(tti)f(xxi;yyijmi)
Testing out PDF annotation that also include LaTeX rendered formulas.
-