763 Matching Annotations
  1. Mar 2023
    1. Send the 304 Not Modified response

      ```js import etag from "etag"; import { renderToString } from "react-dom/server"; import type { EntryContext, HandleDataRequestFunction } from "remix"; import { RemixServer } from "remix";

      export default function handleRequest( request: Request, status: number, headers: Headers, remixContext: EntryContext ) { let markup = renderToString( <RemixServer context={remixContext} url={request.url} /> );

      headers.set("Content-Type", "text/html"); headers.set("ETag", etag(markup));

      // check if the If-None-Match header matches the ETag if (request.headers.get("If-None-Match") === headers.get("ETag")) { // and send an empty Response with status 304 and the headers. return new Response("", { status: 304, headers }); }

      return new Response("<!DOCTYPE html>" + markup, { status, headers }); }

      export let handleDataRequest: HandleDataRequestFunction = async ( response: Response, { request } ) => { let body = await response.text();

      if (request.method.toLowerCase() === "get") { response.headers.set("etag", etag(body)); // As with document requests, check the If-None-Match header // and compare it with the Etag, if they match, send the empty 304 Response if (request.headers.get("If-None-Match") === response.headers.get("ETag")) { return new Response("", { status: 304, headers: response.headers }); } }

      return response; }; ```

    2. All Together

      ```js import etag from "etag"; import { renderToString } from "react-dom/server"; import type { EntryContext, HandleDataRequestFunction } from "remix"; import { RemixServer } from "remix";

      export default function handleRequest( request: Request, status: number, headers: Headers, remixContext: EntryContext ) { let markup = renderToString( <RemixServer context={remixContext} url={request.url} /> );

      headers.set("Content-Type", "text/html"); headers.set("ETag", etag(markup));

      return new Response("<!DOCTYPE html>" + markup, { status, headers }); }

      export let handleDataRequest: HandleDataRequestFunction = async ( response: Response ) => { let body = await response.text(); response.headers.set("etag", etag(body)); return response; }; ```

    3. Using ETags for document requests

      ```js import etag from "etag"; import { renderToString } from "react-dom/server"; import type { EntryContext } from "remix"; import { RemixServer } from "remix";

      export default function handleRequest( request: Request, status: number, headers: Headers, remixContext: EntryContext ) { let markup = renderToString( <RemixServer context={remixContext} url={request.url} /> );

      headers.set("Content-Type", "text/html"); // add the Etag header using the markup as value headers.set("ETag", etag(markup));

      return new Response("<!DOCTYPE html>" + markup, { status, headers }); } ```

    4. Using ETags for data requests

      ```js import etag from "etag"; import type { HandleDataRequestFunction } from "remix";

      export let handleDataRequest: HandleDataRequestFunction = async ( response: Response, { request } ) => { let body = await response.text(); // parse the response body as text

      // only add the ETag for GET requests if (request.method.toLowerCase() === "get") { response.headers.set("etag", etag(body)); // and use it to create the ETag }

      return response; // return the response }; ```

    1. ```js import { renderToString } from "react-dom/server"; import { RemixServer } from "remix"; import type { EntryContext } from "remix"; import { etag } from 'remix-etag';

      export default function handleRequest( request: Request, responseStatusCode: number, responseHeaders: Headers, remixContext: EntryContext ) { const markup = renderToString( <RemixServer context={remixContext} url={request.url} /> );

      responseHeaders.set("Content-Type", "text/html");

      const response = new Response("<!DOCTYPE html>" + markup, { status: responseStatusCode, headers: responseHeaders, }); return etag({ request, response }); } ```

  2. developer.mozilla.org developer.mozilla.org
    1. ```http HTTP/1.1 200 OK Content-Type: text/plain Transfer-Encoding: chunked Trailer: Expires

      7\r\n Mozilla\r\n 9\r\n Developer\r\n 7\r\n Network\r\n 0\r\n Expires: Wed, 21 Oct 2015 07:28:00 GMT\r\n \r\n ```

    1. Async iteration of a stream using for await...ofThis example shows how you can process the fetch() response using a for await...of loop to iterate through the arriving chunks.

      ```js const response = await fetch("https://www.example.org"); let total = 0;

      // Iterate response.body (a ReadableStream) asynchronously for await (const chunk of response.body) { // Do something with each chunk // Here we just accumulate the size of the response. total += chunk.length; }

      // Do something with the total console.log(total); ```

    1. The body read-only property of the Response interface is a ReadableStream of the body contents.
    1. You'll notice that for the app/routes/jokes/$jokeId.tsx route in addition to Cache-Control we've also set Vary header to Cookie. This is because we're returning something that's specific to the user who is logged in. So we want the cache to associated to that particular Cookie value and not shared with different users, so the browser and CDN will not deliver the cached value if the cookie is different from the cached response's cookie.
  3. Feb 2023
    1. ```js const supportsRequestStreams = (() => { let duplexAccessed = false;

      const hasContentType = new Request('', { body: new ReadableStream(), method: 'POST', get duplex() { duplexAccessed = true; return 'half'; }, }).headers.has('Content-Type');

      return duplexAccessed && !hasContentType; })();

      if (supportsRequestStreams) { // … } else { // … } ```

    1. ```js /* * Fetch and process the stream / async function process() { // Retrieve NDJSON from the server const response = await fetch('http://localhost:3000/request');

      const results = response.body // From bytes to text: .pipeThrough(new TextDecoderStream()) // Buffer until newlines: .pipeThrough(splitStream('\n')) // Parse chunks as JSON: .pipeThrough(parseJSON());

      // Loop through the results and write to the DOM writeToDOM(results.getReader()); }

      /* * Read through the results and write to the DOM * @param {object} reader / function writeToDOM(reader) { reader.read().then(({ value, done }) => { if (done) { console.log("The stream was already closed!");

      } else {
        // Build up the values
        let result = document.createElement('div');
        result.innerHTML =
          `<div>ID: ${value.id} - Phone: ${value.phone} - Result: $
              {value.result}</div><br>`;
      
        // Prepend to the target
        targetDiv.insertBefore(result, targetDiv.firstChild);
      
        // Recursively call
        writeToDOM(reader);
      }
      

      }, e => console.error("The stream became errored and cannot be read from!", e) ); } ```

  4. Jan 2023
    1. console $ curl -LH "Accept: application/vnd.schemaorg.ld+json" https://doi.org/10.5438/4K3M-NYVG { "@context": "http://schema.org", "@type": "ScholarlyArticle", "@id": "https://doi.org/10.5438/4k3m-nyvg", "url": "https://blog.datacite.org/eating-your-own-dog-food/", "additionalType": "BlogPosting", "name": "Eating your own Dog Food", "author": { "name": "Martin Fenner", "givenName": "Martin", "familyName": "Fenner", "@id": "https://orcid.org/0000-0003-1419-2405" }, "description": "Eating your own dog food is a slang term to describe that an organization should itself use the products and services it provides. For DataCite this means that we should use DOIs with appropriate metadata and strategies for long-term preservation for...", "license": "https://creativecommons.org/licenses/by/4.0/legalcode", "version": "1.0", "keywords": "datacite, doi, metadata, FOS: Computer and information sciences", "inLanguage": "en", "dateCreated": "2016-12-20", "datePublished": "2016-12-20", "dateModified": "2016-12-20", "isPartOf": { "@id": "https://doi.org/10.5438/0000-00ss", "@type": "CreativeWork" }, "citation": [ { "@id": "https://doi.org/10.5438/0012", "@type": "CreativeWork" }, { "@id": "https://doi.org/10.5438/55e5-t5c0", "@type": "CreativeWork" } ], "schemaVersion": "http://datacite.org/schema/kernel-4", "periodical": { "@type": "Series", "identifier": "10.5438/0000-00SS", "identifierType": "DOI" }, "publisher": { "@type": "Organization", "name": "DataCite" }, "provider": { "@type": "Organization", "name": "datacite" } }

    1. HTTP frames

      I'm guessing each frame has an identifier for the previous and next one. The server/user agent then parses all these frames for all the previous/next frames and gives the okay if they're all there or rerequests if not available. Something with TCP likely goes here.

  5. Dec 2022
    1. GET if to obtain information. There should not be any "body" data sent with that request. This will not trigger error, but we just simply don't look there in GET request. Only query string parameters ale allowed. This request will never change anything, you can call it as many times as you want. It is called that GET is IDEMPOTENT request. POST is to add new or modify existing information. If request url doesn't contain resource ID then we will treat that as new data that must be inserted. If you want to modify existing data you need to specify an ID in the Rrequest URL (like POST /contacts/hg5fF). POST is NOT IDEMPOTENT, you need to be carefull to not send the same request twice... DELETE is only to remove infomration. As a general rule we do not allow to add any "body" data to that type of request nor we allow any query string. This should be just simply DELETE /contacts/hg5fF.
    1. But anti- spam software often fetches all resources in mail header fields automatically, without any action by the user, and there is no mechanical way for a sender to tell whether a request was made automatically by anti-spam software or manually requested by a user. To prevent accidental unsubscriptions, senders return landing pages with a confirmation step to finish the unsubscribe request. A live user would recognize and act on this confirmation step, but an automated system would not. That makes the unsubscription process more complex than a single click.

      HTTP: method: safe methods: GETs have to be safe, just in case a machine crawls it.

    2. The mail sender MUST NOT return an HTTPS redirect, since redirected POST actions have historically not worked reliably, and many browsers have turned redirected HTTP POSTs into GETs.
    3. This document describes a method for signaling a one-click function for the List-Unsubscribe email header field. The need for this arises out of the actuality that mail software sometimes fetches URLs in mail header fields, and thereby accidentally triggers unsubscriptions in the case of the List-Unsubscribe header field.
  6. Nov 2022
  7. Oct 2022
    1. How do REST APIs work? REST, or “representational state transfer,” is a type of software design that gives access to data (aka “web resources”) by using a uniform and predefined set of operations. The payload - the data to be delivered - defined in the request itself, will be formatted in a language such as HTML, JSON, or XML. The set of operations are the methods available to HTTP, which is the underlying protocol for how browsers retrieve websites from servers. These methods include GET, POST, PUT, DELETE, and others.

      O que é payload

    2. REST APIs If you’ve heard people talk about JSON (javascript object notation), chances are they’re talking about REST APIs. Over 70% of all public APIs use REST, because of its fast performance, reliability, and ability to scale by reusing modular components without affecting the system as a whole.

      O que é REST API

    1. Communication with this endpoint consists of JSON-encoded messages sent from client to server and vice versa.

      Este é o EndPoint da API do Hypothesis.

    1. Para solicitar a primeira API, crie um URL que aponte para o “endpoint” da API com a qual deseja se comunicar e passe o URL à ação “Obter Conteúdo do URL”. Quando o atalho é executado, essa ação faz a solicitação de API.

      Parece que um EndPoint é a porta de comunicação entre um HTTP API server e seu cliente.

  8. Sep 2022
    1. 400 Bad Request is the status code to return when the form of the client request is not as the API expects.401 Unauthorized is the status code to return when the client provides no credentials or invalid credentials.403 Forbidden is the status code to return when a client has valid credentials but not enough privileges to perform an action on a resource.
    1. The server possibly can send back a 406 (Not Acceptable) error code when unable to serve content in a matching language. However, such a behavior is rarely implemented for a better user experience, and servers often ignore the Accept-Language header in such cases.
  9. Aug 2022
    1. ```js // Fetch and return the promise with the abort controller as controller property function fetchWithController(input, init) { // create the controller let controller = new AbortController() // use the signal to hookup the controller to the fetch request let signal = controller.signal // extend arguments init = Object.assign({signal}, init) // call the fetch request let promise = fetch(input, init) // attach the controller promise.controller = controller return promise }

      // and then replace a normal fetch with

      let promise = fetchWithController('/') promise.controller.abort() ```

    1. The “work around” was to detect users in an IAB and display a message on first navigation attempt to prompt them to click the “open in browser” button early.

      That's a pretty deficient workaround, given the obvious downsides. A more robust workaround would be to make the cart stateless, as far as the server is concerned, for non-logged-in users; don't depend on cookies. A page request instead amounts to a request for the form that has this and this and this pre-selected ("in the cart"). Like with paper.

  10. Jul 2022
    1. To synchronize BMC Helix common services container images

      I've tried to run this section while harbor runs on http. This fails as docker login command is issued and thinks harbor is on SSL.

      The workaround is to do the following

      1. Create 4 replication rules.

      Name ade_part_01 Source resource registry https://containers.bmc.com Source resource filter Name bmc/lp0lz Source resource filter Tag {4.2.2-debian-10-r50,ade-authz-service-149,ade-file-service-e2830be-7,ade-identity-management-portal-12,ade-identity-management-service-15,ade-notification-service-9,adeops-util-v012,adeops-util-v013,adeops-util-v016,adeops-util-v019,adeops-util-v024,adereporting-21.3.02.02,adereporting-content-e0ab22f-251,adereporting-initdb-v001,adereporting-kafkacli-v002,adereporting-puller-7e41b3d-274,adereporting-renderer-dd91f81-216,adereporting-runner-7e41b3d-274,ade-tenant-management-automation-273,ade-tenant-management-portal-14,ade-tenant-management-service-7,ade-ui-content-service-18,aif-api-service-8150462-9,aif-clustering-ingestion-service-3a4ce1d-12,aif-clustering-query-service-3dfbda3-9,aif-clustering-service-08fa171-9,aif-core-service-fdfb78d-6,aif-incident-ingestion-service-3a0f0e2-8,aif-job-manager-service-ab85bfb-8,aif-machine-learning-utilities-8a08716-57,aif-ticket-service-d71f457-11,anomaly-detection-service-58e6996-5}

      Name ade_part_02 Source resource registry https://containers.bmc.com Source resource filter Name bmc/lp0lz Source resource filter Tag {authproxy-RSSO_Auth_Proxy_101,authproxy-RSSO_Auth_Proxy_110,authproxy-RSSO_Auth_Proxy_112,authproxy-RSSO_Auth_Proxy_80,bitnami-kafka-2.7.0-debian-10-r124,bitnami-minio-2021.4.18-debian-10-r0,bitnami-zookeeper-3.7.0-debian-10-r25,custom-elasticsearch-1.13.3,custom-postgresql-repmgr-12.9.0,custom-sec-ade-infra-clients-1,custom-sec-redis-5.0.12-alpine,custom-sec-victoriametrics-vminsert-v1.63.0-cluster,custom-sec-victoriametrics-vmselect-v1.63.0-cluster,custom-sec-victoriametrics-vmstorage-v1.63.0-cluster,es-proxy-nginx-service-6d2eb81-6,es-proxy-service-6d2eb81-6,event-ingestion-service-4c0353c-4,event-mgmt-service-fc008be-6,event-processor-service-199851c-10,event-service-a21ce51-7,haproxy-2.0.4,justwatch-elasticsearch_exporter-1.1.0,kibana-proxy-service-c4f46f6-6,kibana-service-c4f46f6-6,kubectl-latest,log-ingestion-service-ff04217-99,log-mgmt-service-ceb53d1-4,log-processing-service-726afae-6,logs-portal-eb0d3a5-8}

      Name ade_part_02 Source resource registry https://containers.bmc.com Source resource filter Name bmc/lp0lz Source resource filter Tag {metric-aggregation-service-6c4b171-9,metric-configuration-service-2b5ba78-7,metric-gateway-service-4a6caae-8,metricservice-6b50628-8,prometheus-ingestion-service-8659793-7,RSSO_21.3.00-DRRS0-3893,smart-graph-api-r841442-642-daas_ship-tkn_ship,smart-graph-controller-api-r841442-642-daas_ship-tkn_ship,smart-graph-controller-efsinit-r841442-642-daas_ship-tkn_ship,smart-graph-controller-security-r841442-642-daas_ship-tkn_ship,smart-graph-environment-controller-r841442-642-daas_ship-tkn_ship,smart-graph-instance-controller-r841442-642-daas_ship-tkn_ship,tctlrest-14,thirdparty-ingestion-service-6add794-5,truesight-credential-service-267,truesight-featureflag-service-272,0.9.0-debian-10-r35,bitnami-shell-10,bitnami-bitnami-shell-10-debian-10-r61,custom-sec-busybox-1.27.2,webhook-2102_20210218,elasticsearch-7.16.2-debian-10-r0,bitnami-elasticsearch-curator-5.8.4,kibana-7.16.2-debian-10-r0,fluentd-1.12.3-debian-10-r4}

      Name ade_part_02 Source resource registry https://containers.bmc.com Source resource filter Name bmc/lp0lz Source resource filter Tag {ade-ims-webhook-114,ade-itsm-identity-sync-199}

      1. Then you can synchronize them at will
    2. Install Harbor by using self signed SSL certificates.

      This step is not mandatory as even harbor mentions:

      However, using HTTP is acceptable only in air-gapped test or development environments that do not have a connection to the external internet.

    3. wget https://github.com/goharbor/harbor/releases/download/v2.1.4/harbor-offline-installer-v2.1.4.tgz

      I've used curl -L https://storage.googleapis.com/harbor-releases/release-1.8.0/harbor-offline-installer-v1.8.0.tgz > harbor-offline-installer-v1.8.0.tgz

      which is the minimal required version

    4. harbor.yml.tmpl harbor.yml

      the minimal required version doesn't need this step only one harbor.yml file is available

      The file needs to change in one variable: hostname

      the https port is not enabled

      you may also change the harbor admin password

    1. ```bash POST /news/comments/5 HTTP/1.1 Content-Type: text/xml

      <item> <title>Foo Bar</title> <author>joe@bitworking.org</author> <link>http://www.bar.com/</link> <description>My Excerpt</description> </item> ```

  11. Jun 2022
  12. May 2022
    1. Signposting is an approach to make the scholarly web more friendly to machines. It uses Typed Links as a means to clarify patterns that occur repeatedly in scholarly portals. For resources of any media type, these typed links are provided in HTTP Link headers. For HTML resources, they may additionally be provided in HTML link elements. Throughout this site, examples use the former approach.

      http HTTP/1.1 302 Found Server: Apache-Coyote/1.1 Vary: Accept Location: http://www.dlib.org/dlib/november15/vandesompel/11vandesompel.html Link: <http://orcid.org/0000-0002-0715-6126> ; rel="author", <http://orcid.org/0000-0003-3749-8116> ; rel="author" Expires: Tue, 31 May 2016 17:18:50 GMT Content-Type: text/html;charset=utf-8 Content-Length: 217 Date: Tue, 31 May 2016 16:38:15 GMT Connection: keep-alive

  13. datatracker.ietf.org datatracker.ietf.org
    1. 4. Link Relations for Web Services

      In order to allow Web services to represent the relation of individual resources to service documentation/description and metadata, this specification introduces and registers three new link relation types.

      4.1. The service-doc Link Relation Type

      The "service-doc" link relation type is used to represent the fact that a resource or a set of resources is documented at a specific URI. The target resource is expected to provide documentation that is primarily intended for human consumption.

      4.2. The service-desc Link Relation Type

      The "service-desc" link relation type is used to represent the fact that a resource or a set of resources is described at a specific URI. The target resource is expected to provide a service description that is primarily intended for machine consumption. In many cases, it is provided in a representation that is consumed by tools, code libraries, or similar components.

      4.3. The service-meta Link Relation Type

      The "service-meta" link relation type is used to link to available metadata for the service context of a resource. Service metadata is any kind of data that may be of interest to existing or potential service users, with documentation/description being only two possible facets of service metadata. The target resource is expected to provide a representation that is primarily intended for machine consumption. In many cases, it is provided in a representation that is consumed by tools, code libraries, or similar components.

      Since service metadata can have many different purposes and use many different representations, it may make sense for representations using the "service-meta" link relation to offer additional hints about the specific kind or format of metadata that is being linked.

      This definition of the "service-meta" link relation makes no specific assumptions about how these link hints will be represented, and the specific mechanism will depend on the context where the "service-meta" link relation is being used.

      One example is that a "service-desc" link may identify an OpenAPI description, which is supposed to be the machine-readable description of a Web API. A "service-meta" link may identify a resource that contains additional metadata about the Web API, such as labels that classify the API according to a labeling scheme and a privacy policy that makes statements about how the Web API manages personally identifiable information.

  14. Apr 2022
    1. Cache using fetch

      Determine how to cache a resource by setting TTLs, custom cache keys, and cache headers in a fetch request.

      ```js async function handleRequest(request) { const url = new URL(request.url);

      // Only use the path for the cache key, removing query strings // and always store using HTTPS, for example, https://www.example.com/file-uri-here const someCustomKey = https://${url.hostname}${url.pathname};

      let response = await fetch(request, { cf: { // Always cache this fetch regardless of content type // for a max of 5 seconds before revalidating the resource cacheTtl: 5, cacheEverything: true, //Enterprise only feature, see Cache API for other plans cacheKey: someCustomKey, }, }); // Reconstruct the Response object to make its headers mutable. response = new Response(response.body, response);

      // Set cache control headers to cache on browser for 25 minutes response.headers.set('Cache-Control', 'max-age=1500'); return response; }

      addEventListener('fetch', event => { return event.respondWith(handleRequest(event.request)); }); ```


      Caching HTML resources

      Setting the cache level to Cache Everything will override the default cacheability of the asset. For time-to-live (TTL), Cloudflare will still rely on headers set by the origin.

      js // Force Cloudflare to cache an asset fetch(event.request, { cf: { cacheEverything: true } });