Compare commits

..

No commits in common. "f34c892dbe53baa9a74966d4e149b9da0cb0fa9f" and "6d1a835972414f03b536e566b26dfa97d0984c12" have entirely different histories.

188 changed files with 15628 additions and 11379 deletions

1
.env Normal file
View File

@ -0,0 +1 @@
VITE_API_URL=https://api.nicco.io/graphql

1
.gitattributes vendored
View File

@ -1,3 +1,2 @@
**/images/* filter=lfs diff=lfs merge=lfs -text
*.afphoto filter=lfs diff=lfs merge=lfs -text
*.afdesign filter=lfs diff=lfs merge=lfs -text

32
.gitignore vendored
View File

@ -1,26 +1,8 @@
# build output
dist/
# generated types
.astro/
# dependencies
node_modules/
# logs
npm-debug.log*
yarn-debug.log*
yarn-error.log*
pnpm-debug.log*
# environment variables
.env
.env.production
# macOS-specific files
.DS_Store
# jetbrains setting folder
.idea/
output
export.xml
node_modules
/.svelte-kit
/package
/build
.vercel_build_output
.vercel
*.wpress

1
.graphqlrc.yml Normal file
View File

@ -0,0 +1 @@
schema: "https://api.nicco.io/graphql"

1
.npmrc Normal file
View File

@ -0,0 +1 @@
engine-strict=true

View File

@ -1,4 +0,0 @@
{
"recommendations": ["astro-build.astro-vscode", "unifiedjs.vscode-mdx"],
"unwantedRecommendations": []
}

11
.vscode/launch.json vendored
View File

@ -1,11 +0,0 @@
{
"version": "0.2.0",
"configurations": [
{
"command": "./node_modules/.bin/astro dev",
"name": "Development server",
"request": "launch",
"type": "node-terminal"
}
]
}

View File

@ -1,3 +1,38 @@
# nicco.io
# create-svelte
Personal blog and website. With astro and a bit of svelte
Everything you need to build a Svelte project, powered by [`create-svelte`](https://github.com/sveltejs/kit/tree/master/packages/create-svelte);
## Creating a project
If you're seeing this, you've probably already done this step. Congrats!
```bash
# create a new project in the current directory
npm init svelte@next
# create a new project in my-app
npm init svelte@next my-app
```
> Note: the `@next` is temporary
## Developing
Once you've created a project and installed dependencies with `npm install` (or `pnpm install` or `yarn`), start a development server:
```bash
npm run dev
# or start the server and open the app in a new browser tab
npm run dev -- --open
```
## Building
Before creating a production version of your app, install an [adapter](https://kit.svelte.dev/docs#adapters) for your target environment. Then:
```bash
npm run build
```
> You can preview the built app with `npm run preview`, regardless of whether you installed an adapter. This should _not_ be used to serve your app in production.

View File

@ -1 +0,0 @@
- Tag count

View File

@ -1,22 +0,0 @@
// @ts-check
import { rehypeHeadingIds } from '@astrojs/markdown-remark'
import mdx from '@astrojs/mdx'
import sitemap from '@astrojs/sitemap'
import svelte from '@astrojs/svelte'
import { defineConfig } from 'astro/config'
import rehypeAutolinkHeadings from 'rehype-autolink-headings'
import Icons from 'unplugin-icons/vite'
import { remarkReadingTime } from './readingTime'
// https://astro.build/config
export default defineConfig({
site: 'https://example.com',
integrations: [mdx(), sitemap(), svelte()],
markdown: {
rehypePlugins: [rehypeHeadingIds, [rehypeAutolinkHeadings, { behavior: 'wrap' }]],
remarkPlugins: [remarkReadingTime],
},
vite: {
plugins: [Icons({ compiler: 'astro' })],
},
})

14
codegen.yaml Normal file
View File

@ -0,0 +1,14 @@
schema: https://api.nicco.io/graphql
documents: "src/**/*.graphql"
generates:
./src/lib/gql/gen.ts:
plugins:
- "@graphql-codegen/typescript"
- "@graphql-codegen/typescript-operations"
- "@graphql-codegen/typescript-graphql-request"
config:
maybeValue: "T"
typesPrefix: GQL
immutableTypes: true
useTypeImports: true
avoidOptionals: true

View File

@ -1,34 +1,37 @@
{
"private": true,
"type": "module",
"scripts": {
"astro": "astro",
"build": "astro check && astro build",
"dev": "astro dev",
"preview": "astro preview",
"start": "astro dev"
"dev": "svelte-kit dev",
"build": "svelte-kit build",
"preview": "svelte-kit preview",
"check": "svelte-check --tsconfig ./tsconfig.json",
"check:watch": "svelte-check --tsconfig ./tsconfig.json --watch",
"generate": "graphql-codegen",
"ci": "pnpm run generate && pnpm run check && pnpm run build"
},
"type": "module",
"devDependencies": {
"@astrojs/check": "^0.9.4",
"@astrojs/markdown-remark": "^6.0.0",
"@astrojs/mdx": "^4.0.1",
"@astrojs/rss": "^4.0.9",
"@astrojs/sitemap": "^3.2.1",
"@astrojs/svelte": "^7.0.1",
"@fontsource-variable/jost": "^5.1.1",
"@fontsource-variable/playfair-display": "^5.1.0",
"@iconify-json/ion": "^1.2.1",
"astro": "^5.0.2",
"fuse.js": "^7.0.0",
"mdast-util-to-string": "^4.0.0",
"reading-time": "^1.5.0",
"rehype-autolink-headings": "^7.1.0",
"remark-toc": "^9.0.0",
"sass": "^1.81.1",
"sharp": "^0.33.5",
"svelte": "^5.5.2",
"typescript": "^5.7.2",
"unplugin-icons": "^0.21.0"
"@graphql-codegen/cli": "^2.6.2",
"@graphql-codegen/typescript": "^2.4.7",
"@graphql-codegen/typescript-graphql-request": "^4.4.2",
"@graphql-codegen/typescript-operations": "^2.3.4",
"@sveltejs/adapter-static": "^1.0.0-next.29",
"@sveltejs/kit": "^1.0.0-next.295",
"@types/lunr": "^2.3.4",
"graphql": "^15.8.0",
"graphql-request": "^3.7.0",
"graphql-tag": "^2.12.6",
"svelte": "^3.46.4",
"svelte-check": "^2.4.5",
"svelte-preprocess": "^4.10.4",
"tslib": "^2.3.1",
"typescript": "^4.6.2"
},
"packageManager": "pnpm@9.14.4"
"dependencies": {
"dayjs": "^1.10.8",
"highlight.js": "^11.5.0",
"lodash": "^4.17.21",
"lunr": "^2.3.9",
"svelte-cloudinary": "^0.2.5"
}
}

8899
pnpm-lock.yaml generated

File diff suppressed because it is too large Load Diff

View File

@ -1,9 +0,0 @@
<svg xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 128 128">
<path d="M50.4 78.5a75.1 75.1 0 0 0-28.5 6.9l24.2-65.7c.7-2 1.9-3.2 3.4-3.2h29c1.5 0 2.7 1.2 3.4 3.2l24.2 65.7s-11.6-7-28.5-7L67 45.5c-.4-1.7-1.6-2.8-2.9-2.8-1.3 0-2.5 1.1-2.9 2.7L50.4 78.5Zm-1.1 28.2Zm-4.2-20.2c-2 6.6-.6 15.8 4.2 20.2a17.5 17.5 0 0 1 .2-.7 5.5 5.5 0 0 1 5.7-4.5c2.8.1 4.3 1.5 4.7 4.7.2 1.1.2 2.3.2 3.5v.4c0 2.7.7 5.2 2.2 7.4a13 13 0 0 0 5.7 4.9v-.3l-.2-.3c-1.8-5.6-.5-9.5 4.4-12.8l1.5-1a73 73 0 0 0 3.2-2.2 16 16 0 0 0 6.8-11.4c.3-2 .1-4-.6-6l-.8.6-1.6 1a37 37 0 0 1-22.4 2.7c-5-.7-9.7-2-13.2-6.2Z" />
<style>
path { fill: #000; }
@media (prefers-color-scheme: dark) {
path { fill: #FFF; }
}
</style>
</svg>

Before

Width:  |  Height:  |  Size: 749 B

View File

@ -1,11 +0,0 @@
import getReadingTime from 'reading-time'
import { toString } from 'mdast-util-to-string'
export function remarkReadingTime() {
return function (tree, { data }) {
const textOnPage = toString(tree)
const readingTime = getReadingTime(textOnPage)
data.astro.frontmatter.text = textOnPage
data.astro.frontmatter.readingTime = readingTime
}
}

View File

@ -1,3 +1,5 @@
@import url('https://fonts.googleapis.com/css2?family=Jost:wght@300;400&family=Playfair+Display&display=swap');
* {
box-sizing: border-box;
-webkit-overflow-scrolling: touch;
@ -5,8 +7,9 @@
}
:root {
--ff: 'Jost Variable', sans-serif;
--ff-alt: 'Playfair Display Variable', serif;
--ff: 'Jost', Roboto, -apple-system, BlinkMacSystemFont, Segoe UI, Oxygen, Ubuntu, Cantarell, Fira Sans, Droid Sans,
Helvetica Neue, sans-serif;
--ff-alt: 'Playfair Display', serif;
--clr-light: #ffffff;
--clr-dark: #010101;
--clr-primary: hsl(219, 90%, 80%);
@ -27,12 +30,13 @@
body {
margin: 0;
overflow: hidden;
background-color: var(--clr-light);
font-family: var(--ff);
font-size: 16px;
font-weight: 300;
font-weight: lighter;
color: var(--clr-dark);
-webkit-font-smoothing: antialiased;
-moz-osx-font-smoothing: grayscale;
@ -58,13 +62,6 @@ h6 {
line-height: 1.2;
}
h2 {
font-size: 1.5rem;
}
h3 {
font-size: 1.25rem;
}
p {
text-align: justify;
line-height: 1.5;
@ -79,27 +76,39 @@ a {
text-decoration: none;
}
p a,
.toc a {
border-bottom: 0.125em solid var(--clr-primary);
ul {
margin: 0;
padding: 0;
padding-left: 0px;
padding-left: 1em;
list-style: square;
}
.progress {
box-sizing: border-box;
width: 100%;
position: relative;
background: var(--clr-light);
margin: 0.5em 0;
padding: 0.1em 0.5em;
border: 1px solid var(--clr-dark);
}
.progress > span {
position: relative;
z-index: 1;
}
.progress > div {
height: 100%;
background: var(--clr-primary);
position: absolute;
top: 0;
left: 0;
z-index: 0;
}
svg {
fill: currentColor;
height: auto;
}
pre {
padding: 1rem;
margin: 1rem 0;
display: block;
}
ul {
list-style: square;
margin-left: 1rem;
}
ol {
list-style: auto;
margin-left: 1rem;
}

23
src/app.html Normal file
View File

@ -0,0 +1,23 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8" />
<meta name="description" content="Designer & Developer" />
<meta name="keywords" content="Web Agency Blog Articles" />
<meta name="viewport" content="width=device-width,initial-scale=1.0,viewport-fit=cover" />
<link rel="icon" type="image/png" href="/images/monogramm.png" />
%svelte.head%
</head>
<body>
<div id="svelte">%svelte.body%</div>
<script
async
defer
data-website-id="2c23f7af-230c-4ea3-a40a-87aa2939fef3"
src="https://spectare.nicco.io/unicorn.js"
></script>
</body>
</html>

View File

@ -1,30 +0,0 @@
---
import { Image } from 'astro:assets'
import aboutImage from '../content/images/about.webp'
---
<Image src={aboutImage} alt={'tiny me'} />
<style>
img {
position: absolute;
z-index: -1;
object-fit: contain;
width: 24vw;
height: 30vw;
left: 40em;
top: 12em;
max-width: 25em;
}
@media (max-width: 60em) {
img {
position: initial;
width: 100%;
height: 100%;
object-position: right;
max-height: 20em;
margin-top: 4em;
}
}
</style>

View File

@ -1,58 +0,0 @@
---
import '@fontsource-variable/jost'
import '@fontsource-variable/playfair-display'
import '../styles/preflight.css'
import '../styles/global.scss'
interface Props {
title?: string
image?: string
}
const canonicalURL = new URL(Astro.url.pathname, Astro.site)
// const { image = '/blog-placeholder-1.jpg' } = Astro.props
let title = 'Niccolo Borgioli'
const description = 'Personal blog'
if (Astro.props.title) {
title = Astro.props.title + ' ' + title
}
---
<!-- Global Metadata -->
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width,initial-scale=1" />
<link rel="icon" type="image/png" href="/monogram.png" />
<meta name="generator" content={Astro.generator} />
<!-- Canonical URL -->
<link rel="canonical" href={canonicalURL} />
<!-- Primary Meta Tags -->
<title>{title}</title>
<meta name="title" content={title} />
<meta name="description" content={description} />
<!-- Open Graph / Facebook -->
<meta property="og:type" content="website" />
<meta property="og:url" content={Astro.url} />
<meta property="og:title" content={title} />
<meta property="og:description" content={description} />
<!-- <meta property="og:image" content={new URL(image, Astro.url)} /> -->
<!-- Twitter -->
<meta property="twitter:card" content="summary_large_image" />
<meta property="twitter:url" content={Astro.url} />
<meta property="twitter:title" content={title} />
<meta property="twitter:description" content={description} />
<!-- <meta property="twitter:image" content={new URL(image, Astro.url)} /> -->
<script
is:inline
async
defer
src="https://spectare.nicco.io/unicorn.js"
data-website-id="bc7525c5-6928-49e1-9255-aca296947def"></script>

View File

@ -1,53 +0,0 @@
---
import FingerPrint from '~icons/ion/finger-print'
import ChatBubbles from '~icons/ion/chatbubbles-outline'
import Mail from '~icons/ion/mail-outline'
import Heart from '~icons/ion/heart'
import Github from '~icons/ion/logo-github'
import Trending from '~icons/ion/trending-up'
const links: { label: string; url: string; icon: astroHTML.JSX.Element }[] = [
{ label: 'Say hi@nicco.io', url: 'mailto:hi@nicco.io', icon: Mail },
{ label: 'Chat on discord', url: 'https://discord.gg/wS7RpYTYd2', icon: ChatBubbles },
{ label: 'GitHub', url: 'https://github.com/cupcakearmy', icon: Github },
{ label: 'Support my work', url: 'https://github.com/sponsors/cupcakearmy', icon: Heart },
{ label: 'Traffic', url: 'https://spectare.nicco.io/share/HYgOcrlfHxGB9RAR/nicco.io', icon: Trending },
{ label: 'Privacy', url: '/privacy', icon: FingerPrint },
]
---
<ul>
{
links.map(({ label, url, icon: Icon }) => {
return (
<li>
<a href={url} target="_blank" rel="noopener noreferrer">
{Icon && <Icon />}
{label}
</a>
</li>
)
})
}
</ul>
<style lang="scss">
ul {
display: flex;
flex-direction: column;
gap: 1rem;
list-style: none;
margin: 0;
}
a {
display: flex;
gap: 0.5rem;
padding: 0.5rem 0;
transition: var(--animation);
&:hover {
transform: translateX(5%) scale(1.1);
}
}
</style>

View File

@ -1,17 +0,0 @@
---
interface Props {
date: Date
}
const { date } = Astro.props
---
<time datetime={date.toISOString()}>
{
date.toLocaleDateString(undefined, {
year: 'numeric',
month: 'short',
day: 'numeric',
})
}
</time>

View File

@ -1,66 +0,0 @@
<script lang="ts">
import Fuse from 'fuse.js'
const { entries } = $props()
const fuse = new Fuse(entries, {
keys: ['text', 'url', 'extra'],
includeScore: true,
includeMatches: false,
minMatchCharLength: 2,
threshold: 0.5,
})
let needle = $state('')
const results = $derived(fuse.search(needle))
</script>
<input bind:value={needle} />
<ol>
{#each results as result}
<li>
<a href={result.item.url}>
<span class="meta">
{result.item.type}
</span>
<span class="meta">
{(1 - result.score).toFixed(2)}
</span>
{result.item.title}
</a>
</li>
{/each}
</ol>
<style lang="scss">
input {
background: white;
border: 2px solid var(--clr-primary);
border-bottom-width: 4px;
padding: 1rem;
&:focus,
&:hover {
outline: none;
border-color: var(--clr-secondary);
}
}
a {
display: flex;
flex-direction: row;
align-items: start;
gap: 0.5rem;
}
li {
margin-bottom: 1rem;
}
.meta {
background: var(--clr-secondary);
width: fit-content;
padding: 0 0.25rem;
}
</style>

View File

@ -1,44 +0,0 @@
---
import type { CollectionEntry } from 'astro:content'
import type { ReadTimeResults } from 'reading-time'
import FormattedDate from './FormattedDate.astro'
import ReadingTime from './ReadingTime.astro'
type Props = {
readingTime: ReadTimeResults
} & Pick<CollectionEntry<'blog'>['data'], 'date' | 'updatedDate'>
const { updatedDate, date, readingTime } = Astro.props
---
<div class="attributes">
<div class="estimation">
<FormattedDate date={date} />
{
updatedDate && date !== updatedDate && (
<div class="updated">
Updated: <FormattedDate date={updatedDate} />
</div>
)
}
</div>
<ReadingTime readingTime={readingTime} />
</div>
<style>
.attributes {
display: flex;
justify-content: space-between;
font-weight: 400;
margin-top: -0.125em;
}
.estimation {
display: flex;
align-items: baseline;
gap: 0.5em;
}
.updated {
font-size: 0.75em;
font-style: italic;
}
</style>

View File

@ -1,32 +0,0 @@
---
import type { CollectionEntry } from 'astro:content'
import PostPreview from './PostPreview.astro'
export type Props = {
posts: CollectionEntry<'blog'>[]
}
const { posts } = Astro.props
---
<ul>
{
posts.map((post) => (
<li>
<a href={`/blog/${post.slug}`}>
<PostPreview {post} />
</a>
</li>
))
}
</ul>
<style>
ul {
max-width: 40rem;
display: flex;
flex-direction: column;
gap: 6rem;
list-style: none;
}
</style>

View File

@ -1,77 +0,0 @@
---
import { Picture } from 'astro:assets'
import type { CollectionEntry } from 'astro:content'
import PostAttributes from './PostAttributes.astro'
import Tags from './Tags.astro'
export type Props = {
post: CollectionEntry<'blog'>
}
const { post } = Astro.props
const { remarkPluginFrontmatter } = await post.render()
---
<section class:list={{ without: !post.data.coverImage }}>
{post.data.coverImage && <Picture src={post.data.coverImage} alt={'foo'} />}
<PostAttributes
date={post.data.date}
updatedDate={post.data.updatedDate}
readingTime={remarkPluginFrontmatter.readingTime}
/>
<h2>
{post.data.title}
</h2>
<Tags tags={post.data.tags.map((tag) => ({ name: tag, href: `/tag/${tag}` }))} />
</section>
<style lang="scss">
section {
display: block;
transition: var(--animation);
// &.without {
// border: 2px solid var(--clr-primary);
// padding: 5%;
// width: calc(100% + 10%);
// transform: translateX(-5%);
// }
&:hover {
transform: scale(1.05);
& :global(img) {
}
// & > :global(div) {
// opacity: 0;
// }
}
}
h2 {
margin-top: 0.25rem;
position: relative;
top: 0;
transition: var(--animation);
font-size: 2rem;
}
section > :global(div) {
opacity: 1;
transition: var(--animation);
}
img {
width: calc(100% - 0.25em);
height: 12rem;
object-fit: cover;
object-position: center;
border: 0.125rem solid var(--clr-primary);
transition: var(--animation);
transform: scale(1);
margin: 0;
margin-bottom: 0.5rem;
top: 0;
position: relative;
}
</style>

View File

@ -1,9 +0,0 @@
---
import type { ReadTimeResults } from 'reading-time'
type Props = { readingTime: ReadTimeResults }
const { readingTime } = Astro.props
---
<span>~ {readingTime.minutes.toFixed(0)} min</span>

View File

@ -1,39 +0,0 @@
---
export type Props = {
progress: number
title: string
}
const { progress, title } = Astro.props
---
<div class="progress">
<span>{title}</span>
<div style={`width: ${progress * 100}%`}></div>
</div>
<style>
.progress {
box-sizing: border-box;
width: 100%;
position: relative;
background: var(--clr-light);
margin: 0.5em 0;
padding: 0.1em 0.5em;
border: 1px solid var(--clr-dark);
}
.progress span {
position: relative;
z-index: 1;
}
.progress div {
height: 100%;
background: var(--clr-primary);
position: absolute;
top: 0;
left: 0;
z-index: 0;
}
</style>

View File

@ -1,36 +0,0 @@
---
import type { ComponentProps } from 'astro/types'
import Tag from './Tag.astro'
export type Props = {
tags: ComponentProps<typeof Tag>[]
rows?: number
}
const { tags, rows = 1 } = Astro.props
const height = rows * 2
---
<div style={`height: ${height}em;`}>
{tags.map((tag) => <Tag {...tag} />)}
</div>
<style>
div {
display: flex;
flex-direction: row;
overflow: auto;
align-items: center;
justify-content: flex-start;
margin: 0 -0.25rem;
}
div::-webkit-scrollbar {
display: none;
}
div {
-ms-overflow-style: none;
scrollbar-width: none;
}
</style>

View File

@ -1,47 +0,0 @@
---
import type { MarkdownHeading } from 'astro'
type Props = { headings: MarkdownHeading[] }
const { headings } = Astro.props
---
<div class="toc">
<b>Outline</b>
{
headings.map(({ slug, text, depth }) => (
<div style={`margin-left: ${depth - 2}rem;`}>
<span>▶</span>
<a href={`#${slug}`}>{text}</a>
</div>
))
}
</div>
<style>
.toc {
margin: 2rem 0;
@media (min-width: 70rem) {
margin: 0;
position: absolute;
left: 45rem;
width: calc(100vw - 50rem);
}
}
span {
font-size: 0.5em;
transform: translateY(-0.8em);
display: inline-block;
}
a {
height: 1.4em;
display: inline-block;
max-width: calc(100% - 1.5rem);
overflow: hidden;
white-space: nowrap;
text-overflow: ellipsis;
}
</style>

View File

@ -1,5 +0,0 @@
// Place any global data in this file.
// You can import this data from anywhere in your site by using the `import` keyword.
export const SITE_TITLE = 'Astro Blog';
export const SITE_DESCRIPTION = 'Welcome to my website!';

View File

@ -1,232 +0,0 @@
---
title: '5 useful Typescript tricks'
date: '2019-10-06'
categories:
- 'coding'
tags:
- 'tips-and-tricks'
- 'typescript'
coverImage: './images/amador-loureiro-BVyNlchWqzs-unsplash-scaled.jpg'
---
Typescript is a godsend. It is very easy to get started with and for most developers there is no way back once they get the hang of it. Sometimes it can get pretty advanced and intimidating though.
This is why I decided to share 5 of my favourite typescript tips and tricks you might have needed in the past. Some are super basic, some are bit more advanced.
**Update** _07 Okt 2019 @ 07:53_
Reddit user [jakeboone02](https://www.reddit.com/r/typescript/comments/de17xs/5_useful_typescript_tricks_small_tricks_you_might/f2t9prk?utm_source=share&utm_medium=web2x) found an error in the ternary code.
**Update** _06 Okt 2019 @ 15:06_
Reddit user [smeijer87](https://www.reddit.com/r/typescript/comments/de17xs/5_useful_typescript_tricks_small_tricks_you_might/f2qveub?utm_source=share&utm_medium=web2x) found an error in the code for null coalescing.
**Update** _06 Okt 2019 @ 14:47_
A fiendly reader pointed out excluding interface type are called discriminated unions.
1. [react higher-order components](#hoc)
2. [smarter constructor](#constructors)
3. [type checking functions](#type-checking-function)
4. [discriminated unions](#excluding)
5. [optional chaining & null coalescing](#future)
<figure>
![](images/amador-loureiro-BVyNlchWqzs-unsplash-scaled.jpg)
<figcaption>
Photo by [Amador Loureiro](https://unsplash.com/@amadorloureiroblanco?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/type?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)
</figcaption>
</figure>
## Higher-order Components
In React [higher order components (HOC)](https://reactjs.org/docs/higher-order-components.html) are very useful tools. Generally they are used to wrap some layout or functionality to some other component. They are simply functions that return another component: basically the same pattern as decorators.
In typescript it can be confusing how to write them _maintaining the right props_ after wrapping the original component. Here you are:
```
import React from 'react'
function withLayout<P extends object>(WrappedComponent: React.ComponentType<P>) {
return (props: P) => (
<div id='app'>
<Header/>
<WrappedComponent {...props}/>
<Footer/>
</div>
);
}
```
Note also that when using the `withLayout` you don't need to specify the generic type explicitly, as typescript will inherit from the function parameter. Super handy!
## Smarter constructors
Let's start by the building block this is based on. It's a basic Javascript trick, not a typescript exclusive at first.
```
class Pizza {
slices: number
name: string
constructor(init) {
Object.assign(this, init)
}
}
const pizza = new Pizza({
slices: 8,
name: 'Margherita',
})
```
What is happening here? With the super handy `Object.assign` simply assigns the object to the class. This is super handy when classes have many constructor parameters. But this is NOT type safe as your IDE/Editor will tell you. How do we fix this?
```
import { NonFunctionKeys } from 'utility-types'
class Pizza {
slices!: number
name?: string
constructor(init: Pick<Pizza, NonFunctionKeys<Pizza>>) {
Object.assign(this, init)
}
eat() {
this.slices = 0
}
}
const pizza = new Pizza({
slices: 8,
name: 'Margherita',
})
```
Let me explain what happens:
This leverages the awesome [utility-types](https://github.com/piotrwitek/utility-types) package. We first take all the keys that are not a function, so we don't overwrite the `eat` method of the class. Then we pick those from the general Pizza type.
This means that `slices` will be required, while `name` will be optional, as they are defined.
## Type-checking Functions
Did you know you can write functions to tell typescript what type something is? This is awesome!
Suppose we have the following interfaces
```
interface Food {
name: string
}
interface Pasta extends Food {
type: 'Spaghetti' | 'Fusilli'
}
interface Pizza extends Food {
slices: number
}
```
Now we could write a `cook` function that accepts both Pasta and Pizza. Typescript itself cannot differentiate between the too.
```
function cook(what: Food) {
if(what === Pizza) ????
}
```
Fortunately there is a nice solution built into typescript.
```
function isPizza(x: Food | Pizza): x is Pizza {
return x.hasOwnProperty('slices')
}
function isPasta(x: Food | Pasta): x is Pasta {
return x.hasOwnProperty('type')
}
function cook(plate: Food) {
if (isPizza(plate)) {
// Plate is now of type Pizza
putInTheOven(plate)
}
if (isPasta(plate)) {
// Plate is now of type Pasta
putInThePan(plate)
}
}
```
Here we define two functions that return `x is Sometype` and return a boolean value based on the input. It's up to you of course to define it properly, but this can be very useful in various situations.
## Discriminated unions
```
type Sqlite = {
type: 'sqlite',
database: string,
}
type PostgreSQL = {
type: 'postgresql',
database: string,
host: string,
post?: number
}
type PossibleConfigs = Sqlite | PostgreSQL
function initialize(config: PossibleConfigs) {}
```
This might look like a simple one, but I often see people putting those sorts of types all into the same interface. By separating the different type of objects you make sure that they are safe. Also the autocomplete will thank you.
## Optional Chaining & Null Coalescing
This are future features that will be introduced in Typescript 3.7 that are very useful and will not be lived without after the release in early November 2019.
Optional chaining is an obvious shorthand. Every time you need to check if a property (especially if nested) exists, you need to do lots of repetitive checking. No more!
```
a && a.b && a.b.c // 🤬
a?.b?.c // 🚀
```
Null coalescing is also a very useful shorthand. You all know the `||` shorthand, often used to initialise a variable if no value is given.
```
const option = something || 'default'
// Sugar for
const option = !!something ? something : 'default'
```
The problem is with values that are actual values, but result as falsy.
```
false || 'default' // => 'default'
0 || 'default' // => 'default'
```
This is where the Null Coalescing comes in.
```
const option = something ?? 'default' // 🚀
// Sugar for
const option = (x === null || x === undefined)
? 'default'
: x
0 ?? 'default' // => 0
false ?? 'default' // => false
```
Basically it only assign the default value if the provided one is `null` or `undefined` so that values like `false` or `0` don't get overwritten.

View File

@ -1,64 +0,0 @@
---
title: "5 JetBrains tips'n'tricks I wish I'd known sooner"
date: '2019-07-03'
categories:
- 'coding'
tags:
- 'ide'
- 'jetbrains'
coverImage: './images/cards-scaled.jpg'
---
Here are some small features that may not be apparent to the newer devs that leverage the JetBrain IDEs. Most of them I discovered by using the.
1. Double Shift for navigating your codebase
2. cmd/ctrl + shift + f for text search
3. Remote Interpreters
4. Syncing settings
5. Reformatting
## Double Shift
For many including myself this is the primary way to navigate code and files. Simply press shift two times, type in the file, class or function you are searching for and press enter. This is by far the quickest and most accurate way to navigate code in any Editor or IDE I've tried so far.
Do it once, and you will not go back.
## CMD + Shift + F
**Windows & Linux**: ctrl + shift + f
This is somewhat similar to 2x Shift. The main difference is that double shifts searches mainly for filenames and symbols (function names, class names, etc.) while CMD shift f functions more like a text search.
What makes this really powerful is that you can [regex search](https://www.jetbrains.com/help/idea/tutorial-finding-and-replacing-text-using-regular-expressions.html#Tutorial_Finding_and_Replacing_Text_Using_Regular_Expressions.xml), [mask files by extension](https://www.jetbrains.com/help/idea/finding-and-replacing-text-in-project.html#exclude_type), [exclude folders](https://www.jetbrains.com/help/webstorm/configuring-project-structure.html#022f3834) (e.g. build folders) and search only in specific directories.
If you ever _loose something in your code_ or _maybe your are new to the project_ and don't know where certain parts are located, **this is the way to find it.**
## Remote Interpreters
This is a huge one for me! JetBrains allows you tu run the code on remote machines. This extends also to all the packages you install, the shell in the terminal is automatically opened on the host.
Remote hosts can be either a machine in which you ssh into, a local docker container running a different version of the language that you need.
_You might ask why?_
1. Use a docker container with a specific version of node/python/php/etc. instead of installing it locally on your machine. Basically a virtual environment for every language. Amazing!
2. Maybe you want to run the code on a Raspberry Pi, which has a different architecture. So all the packages you install will be installed on the Raspberry and when you hit _command+r_ the code will execute not on your machine but you still get the logs. Incredible!
To configure **simply go to the** _**run**_ **menu and add a new remote interpreter**.
## Sync Settings across devices
This is very simple. You can sync all your settings, including plugins, to either your JetBrain account or your own git settings repository. When you open the IDE some where else, everything is back to how it was.
Enable by going to: File -> Sync IDE Settings
**Note:** The synchronisation is per-IDE-basis, so your WebStorm settings are not synced with your PyCharm settings of course.
## Reformatting
Yet again a one of the reasons why I can't go back to VSCode. For each language there is a TON of customisation possible when reformatting. You can decide how your spaces should look, commas, imports, semicolons, everything is completely up to you.
You can tinker around with it in the Settings under: Editor -> Code Style -> <your language>
**Bonus:** If you select on a folder in the project view, you can reformat all files inside it, quick and easy. This is especially useful if one has imported some external sources for example.
Thats it, I hope you found some of it useful and that you can enjoy the JetBrain cosmos even more 😉

View File

@ -1,255 +0,0 @@
---
title: 'A guide to Directus for Gatsby or Sapper as CMS'
date: '2020-04-11'
categories:
- 'coding'
tags:
- 'cms'
- 'directus'
- 'gatsby'
- 'sapper'
- 'static-generated'
coverImage: './images/noah-silliman-doBrZnp_wqA-unsplash.jpg'
---
For those who don't know what [Directus](https://directus.io/) is: an open source database first CMS that generates an api. Lot of buzzwords there, but it's truly a cool project that deserves much more attention IMO.
Recently I've used it to deliver some static generated websites that needed some sort of CMS. Think of a blog, or small landing pages. For that kind you can combine it with Gatsby or in this case Sapper to generate static html from the API.
The article will focus on Sapper, but the parts related to Directus are identical for Gatsby, just the frontend will change.
#### What will we do today?
1. [Install Directus](#1)
2. [Create some data and make in publicly available](#2)
3. [Create a super small frontend](#3)
4. [Write a custom hook for Directus that automatically triggers the build whenever content changes in the DB.](#4)
## Installing Directus
This should be straight forward. These instructions are adopted from the [official docker guide](https://docs.directus.io/installation/docker.html). I will use Docker for this.
```yaml
# docker-compose.yml
version: '3.7'
services:
mysql:
image: mysql:5.7
volumes:
- ./data/db:/var/lib/mysql
env_file: .env
directus:
image: directus/directus:v8-apache
ports:
- '8000:80'
env_file: .env
volumes:
- ./data/config:/var/directus/config
- ./data/uploads:/var/directus/public/uploads
```
The we run `docker-compose up -d`. After a few seconds we need to initialise Directus.
```bash
docker-compose run directus install --email some@email.com --password 1337
```
Now you can go to [localhost:8000](http://localhost:8000) and sign in with the credentials you just specified
## Create some data
Now I'm going to create some data to test our blog. First go to the [settings](http://localhost:8080/admin/#/_/settings/collections) and create a new collection. Im a going to call it `posts`.
Then we are going to add a `title` text field and a simple markdown editor with a `body` field.
Lastly we add a simple post with random data.
<figure>
![](images/data.gif)
<figcaption>
Insert collection and data
</figcaption>
</figure>
## Giving permissions
Now we need to give permission to the `public` role so that we don't need an API Key. For the most sites this is perfectly fine, since the data we only expose the data that gets displayed in the website anyways.
Goto the [roles settings](http://localhost:8080/admin/#/_/settings/roles) and click on `public`. There select the tables you want/need for the website.
Gotcha: If you have files (like photos) you also need to enable them for public viewing. Do this by clicking "Show Directus System Collections" and enabling view access to `Files`
<figure>
![](images/permissions.gif)
<figcaption>
Give permissions to the public user
</figcaption>
</figure>
## Building a minimal frontend with sapper
I will not explain how [Sapper](https://sapper.svelte.dev/) works as this is not the focus today. If you don't know Sapper: It's very similar to Nuxt or Next.js with the additional option to even export as static html, so the end result is similar to a Gatsby website. Very powerful and easy to use and code.
```bash
# Setup
npx degit "sveltejs/sapper-template#rollup" my-blog
cd my-blog
yarn
yarn run dev
# open http://localhost:3000
```
### Load data from Directus
Directus has a [JS SDK](https://docs.directus.io/guides/js-sdk.html) and since we have made data public we don't even need a token or authentication. Awesome 🚀
```bash
yarn add @directus/sdk-js
```
First we are going to initialise the SDK. The default project name is simply `directus`
```ts
// ./src/lib/api.js
import DirectusSDK from '@directus/sdk-js'
export const client = new DirectusSDK({
url: 'http://localhost:8000',
project: 'directus',
})
```
Then lets make a server side json loader so that the exported site will not even contact the server afterwards. Completely static html.
```ts
// ./src/routes/posts.json.js
import { client } from '../lib/api'
export async function get(req, res, next) {
try {
const { data } = await client.getItems('posts')
res.writeHead(200, {
'Content-Type': 'application/json',
})
res.end(JSON.stringify(data))
} catch (e) {
res.writeHead(404, {
'Content-Type': 'application/json',
})
res.end(
JSON.stringify({
message: 'Not found',
})
)
}
}
```
Finally the svelte component.
```svelte
// ./src/routes/index.svelte
<script context="module">
export async function preload ({ params }) {
const res = await this.fetch('posts.json')
const data = await res.json()
if (res.status === 200) return { data }
else this.error(res.status, 'Not found')
}
</script>
<script>
import Post from '../components/Post.svelte'
export let data
</script>
<div class="wrapper">
{#each data as post}
<Post {post} />
{/each}
</div>
```
## Write a custom hook to trigger a build every time the data changes
When it comes to static generated sites often the easiest way to do things is to simply generate the site every "x" time. That kinda works however there will be many build that don't contain any change and you need to wait for a cron job to see changes. That sucks.
Fortunately Directus supports writing custom hooks! 🎉
I will illustrate the case for [Drone](https://drone.io/), but the approach can be used for any CI/CD server out there.
For that we create a new php file and give it a name. In my case: `drone-hook.php`
```php
# ./hooks/drone-hook.php
<?php
function process ($collection, $data) {
$collectionsToWatch = ['posts'];
if(!in_array($collection, $collectionsToWatch)) {
return;
}
$ch = curl_init();
curl_setopt($ch, CURLOPT_URL, 'https://my.domain.com/api/repos/my-username/my-repo/builds');
curl_setopt($ch, CURLOPT_POST, 1);
curl_setopt($ch, CURLOPT_HTTPHEADER, [ 'Authorization: Bearer '.$_ENV['DRONE_TOKEN'] ]);
curl_setopt($ch, CURLOPT_RETURNTRANSFER,true);
curl_exec($ch);
curl_close($ch);
}
return [
'actions' => [
'item.create' => process,
'item.update' => process,
'item.delete' => process,
]
];
```
I've also put the token inside of the `.env` file so that I can safely check my code into a repo and not having to worry about having a token lying around in the codebase.
```bash
# .env
...
DIRECTUS_DATABASE_PASSWORD=directus
DRONE_TOKEN=my-drone-token
```
The last thing to do is actually load the code into Directus. You can simply mount the `./hooks` folder we just created into the container and reload.
```yaml
# docker-compose.yml
version: "3.7"
...
directus:
...
volumes:
...
- ./hooks:/var/directus/public/extensions/custom/hooks
```
This will trigger a curl post request every time items in a collection listed inside of `$collectionsToWatch` get either created, updated, or deleted.
You will probably need to make some adaptations if you are not using Drone, but at the end it will boil down to making a http request to your build server triggering a new build.

View File

@ -1,290 +0,0 @@
---
title: 'A practical introduction to React Hooks'
date: '2019-05-03'
categories:
- 'coding'
tags:
- 'hooks'
- 'javascript'
- 'react'
coverImage: './images/matt-artz-353210-unsplash-scaled.jpg'
---
Since [React](https://reactjs.org/) 16.8 was published in February Hooks are now officially supported and the API finalised and stable. They arose around the idea of functional programming. In short: they allow to have state in functional components and with custom hooks (we'll have a look at those later) they allow us to reuse and share state logic between multiple components. This article assumes a basic understanding of React.
All the code shown can be found here: [https://git.nicco.io/cupcakearmy/guide-react-hooks](https://git.nicco.io/cupcakearmy/guide-react-hooks)
#### What we will look at today
1. Class Components vs Functional Components
2. Native React hooks
- `useState`
- `useEffect`
- `useRef`
3. Custom hooks
- `useWindow`
- `useApi` (The real power)
## 1\. Class vs Functional
Let's first have a look at the 'hello world' of react: A simple counter which we can increment or decrement.
###### Class
```
import React from 'react'
class SimpleClass extends React.Component {
constructor(props) {
super(props)
this.state = {
counter: 0,
}
}
componentDidMount() {
console.log('Lets goo 🚀')
setTimeout(() => this.setState({ counter: 5 }), 2000)
}
componentDidUpdate() {
console.log(this.state.counter)
}
render() {
return <div>
<div>{this.state.counter}</div>
<br/>
<button onClick={() => this.setState({ counter: this.state.counter - 1 })}>Decrease</button>
<button onClick={() => this.setState({ counter: this.state.counter + 1 })}>Increase</button>
</div>
}
}
```
Easy! Now we will convert the snippet above to the functional equivalent with the help of hooks.
###### Hooks
```
import React, { useEffect, useState } from 'react'
const SimpleFC = () => {
const [counter, setCounter] = useState(0)
return <div>
<div>{counter}</div>
<br/>
<button onClick={() => setCounter(counter - 1)}>Decrease</button>
<button onClick={() => setCounter(counter + 1)}>Increase</button>
</div>
}
```
Awesome 🚀 Simple enough right?
## 2\. Native React hooks
### useState
Our constructor with `state` is gone and we have a simple `const [counter, setCounter] = useState(0)`.
How does this work? `useState` returns an array wich deconstructed gives us a getter and a setter. The parameter we pass to it is the initial value. That is all. Simple and useful.
### useEffect
How about the timeout and the `console.log`? Welcome `useEffect`!
`useEffect` takes a function and executes it every time the component updates. So it is basically `componentDidMount` and `componentDidUpdate` together.
The second parameter determines when the function will be triggered. It expects an array and checks whether the variables inside it change.
If no array is passed it will trigger every time the component gets updated and or mounted.
This means that you can pass props into the array and it will effect only when those change. Also, if you pass an empty array it will trigger only once and is equivalent to `componentDidMount`.
```
useEffect(myFN) // triggered every time the component gets updated
useEffect(myFN, []) // Only triggered
useEffect(myFN, [prop1, prop2]) // Gets triggered when either the props get changed
```
In our example from above we would use it as follows:
```
import React, { useEffect, useState } from 'react'
const SimpleFC = () => {
const [counter, setCounter] = useState(0)
useEffect(() => {
setTimeout(() => {
setCounter(5)
}, 1000)
}, [])
useEffect(() => {
console.log(counter)
})
return <div>
<div>{counter}</div>
<br/>
<button onClick={() => setCounter(counter - 1)}>Decrease</button>
<button onClick={() => setCounter(counter + 1)}>Increase</button>
</div>
}
```
### useRef
Now let's have a look at `useRef`. We will have a normal class based component and the equivalent functional one with the help of hooks.
###### Class
```
class RefClass extends React.Component {
constructor(props) {
super(props)
this.myRef = React.createRef()
this.change = this.change.bind(this)
}
change() {
this.myRef.current.style.backgroundColor = '#6ba7ee'
}
render() {
return <div>
<button onClick={this.change}>Change Me</button>
<br/><br/>
<div ref={this.myRef} style={{ width: 50, height: 50, backgroundColor: '#000000' }}/>
</div>
}
}
```
###### Hooks
```
const RefFN = () => {
const rect = useRef()
const change = () => rect.current.style.backgroundColor = '#6ba7ee'
return <div>
<button onClick={change}>Change Me</button>
<br/><br/>
<div ref={rect} style={{ width: 50, height: 50, backgroundColor: '#000000' }}/>
</div>
}
```
That is huge improvement in terms of amount code and most importantly readability. `react.current` points to the Dom element, which we can then modify at our will.
As a side node: look how much cleaner we can have class functions. instead of needing binding the function to `this`, in functional components we just need to define them.
## 3\. Custom hooks
This is where the real power lies. With custom hooks react allows you to reuse stateful logic and share it between components. Very powerful.
We will cover two examples:
1. Window size
2. Consume an API
### Window size
Assume you want to make a component dependent on the window size of the browser. With react hooks this is quick, easy and reusable.
###### hooks.js
```
export const useWindowSize = () => {
const getCurrentSize = () => ({ height: window.innerHeight, width: window.innerWidth })
const [size, setSize] = useState(getCurrentSize())
useEffect(() => {
const handle = () => setSize(getCurrentSize())
window.addEventListener('resize', handle)
return () => window.removeEventListener('resize', handle)
})
return size
}
```
###### component.jsx
```
import { useWindowSize } from '../Hooks'
const Custom = ()=> {
const size = useWindowSize()
return <div>
Width: {size.width}
<br/>
Height: {size.height}
</div>
}
```
As we can see we created a custom hook called `useWindowSize`. We now can use our own hook inside of other components.
Custom components are just arrow functions that use the native `useState` and `useEffect` and some custom logic you add.
Note the `return () => window.removeEventListener('resize', handle)` inside the effect function. You can return a function in the effect function that will get called whenever the hook will be unmounted. This allows us to do cleanup. In this case we stop listening for window size changes. Neat 💪
### API Hook
Last but definitely not least: API calls. I personally think this is where hooks really show their power. I'll show you the code first and then explain.
###### hooks.js
```
export const useCallApi = (url) => {
const [data, setData] = useState()
const update = () => {
fetch(url)
.then(response => response.json())
.then(json => setData(json))
}
useEffect(() => {
update()
}, [])
return [data, update]
}
```
###### posts.jsx
```
import { useCallApi } from '../Hooks'
const Posts = () => {
const [posts] = useCallApi(`https://jsonplaceholder.typicode.com/posts`)
const [users] = useCallApi(`https://jsonplaceholder.typicode.com/users`)
// ...
if (!posts) return <div>Loading 🕰</div>
return <div>
{posts.map((post, i) => <div key={i}>
<h3>{post.title}</h3>
<p>{post.body}</p>
</div>)}
</div>
}
```
What is happening? We created a custom hook that queries an API and returns the result. How? We pass a url to the hook and we get the data back.
Internally the hook uses `useState` to save the results. It executes the update functions once (because the use effect has an empty array as second parameter).
Now we can use the `useCallApi` hook in multiple components or many times inside the same component. Options are endless.

View File

@ -1,187 +0,0 @@
---
title: 'A sane and efficient guide for consuming GraphQL endpoints in Typescript'
date: '2021-12-31'
categories:
- 'coding'
tags:
- 'code-generation'
- 'graphql'
- 'typescript'
coverImage: './images/clayton-robbins-Ru09fQONJWo-unsplash-scaled.jpg'
---
GraphQL is becoming common practice in the wild, while I feel the workflow with Typescript is still not straight forward. I want to propose one way to go about it and hopefully make your next Typescript GraphQL project a joy to work with!
Lets dive deeper 🤿.
I created a tiny [companion repository](https://github.com/cupcakearmy/blog-typescript-graphql) if you want to check out the code and try it out.
Or check out the [finished demo](https://blog-typescript-graphql.vercel.app/).
## Intro
First we need to decide on what do we want (and probably need)
- Editor support for syntax highlighting `gql` and `.graphql` files.
- Strict type safety for our client.
- Easy tooling & workflow
So our workflow will look something like this:
```
GrapQL API -> Schema -> Queries & Mutations -> Typescript -> Client
```
For this article we'll build a minuscule one pager using the [SpaceX Land GraphQL API](https://api.spacex.land/graphql/) to display some space travel data.
## Editor setup
The setup will be be for VSCode. For that we first install the [GraphQL extension](https://marketplace.visualstudio.com/items?itemName=GraphQL.vscode-graphql). This will enable us to have warnings and autocompletion inside of `gql` tags and `.graphql` files.
We need to add a `.graphqlrc.yml` file at the root with the following content:
```yaml
schema: https://api.spacex.land/graphql/
```
## Writing Queries & Mutations
Now onto the real stuff.
We want to take our endpoint, generate types and queries from it that can then be used by Typescript safely. To do that we will:
1. Setup generators for Schema, Queries, Mutations & SDK.
2. Write some Queries & Mutations
3. Generate the SDK
4. Consume the SDK
### Setup
There is this amazing project called `@graphql-codegen` which is a collection of tools for helping you generating various things from GraphQL. Let's install:
```bash
# Generators
pnpm i -D @graphql-codegen/cli @graphql-codegen/typescript @graphql-codegen/typescript-operations @graphql-codegen/typescript-graphql-request
# For the SDK
pnpm i -D graphql graphql-request graphql-tag
```
I will assume my GraphQL stuff will live under `./src/lib/gql`
We will create a top level configuration file to handle all of our generation step called `codegen.yaml`. Ignore the `config` option for now, I will explain that later
```yaml
schema: https://api.spacex.land/graphql/
documents: 'src/**/*.graphql'
generates:
./src/lib/gql/gen.ts:
plugins:
- '@graphql-codegen/typescript'
- '@graphql-codegen/typescript-operations'
- '@graphql-codegen/typescript-graphql-request'
config:
maybeValue: 'T'
typesPrefix: GQL
immutableTypes: true
useTypeImports: true
avoidOptionals: true
```
The property `schema` does not need an explanation.
`generates` has 3 plugin enabled, one for the general types, another for queries and mutations and the last one to generate us a ready to use SDK and will save it under `./src/lib/gql/gen.ts`.
`documents` is a glob that will find all our GraphQL files we write and generate the according code.
### Creating Queries
Now let's create a `src/lib/gql/root.graphql` file and write some queries, all autocompleted of course!
```gql
query LaunchpadsMany {
launchpads(limit: 10) {
id
name
location {
name
}
successful_launches
status
}
}
query LaunchByYear($year: String!) {
launches(find: { launch_year: $year }) {
mission_id
mission_name
launch_date_utc
rocket {
rocket_name
}
}
}
```
### Let magic do it's thing
```bash
pnpm exec graphql-codegen
```
This will look at all our custom queries and mutations and generate us a ready to consume SDK that is completely typed. Amazing!
### Leverage the new SDK
```ts
// src/lib/gql/index.ts
import { GraphQLClient } from 'graphql-request'
import { getSdk } from './gen'
const client = new GraphQLClient('https://api.spacex.land/graphql/')
export const SDK = getSdk(client)
```
```ts
import { SDK } from '$lib/gql'
const data = await SDK.LaunchByYear({ year: '2021' })
```
You can also use the generated types to explicitly set them
```ts
import { SDK } from '$lib/gql'
import type { GQLLaunchByYearQuery } from '$lib/gql/gen'
const data: GQLLaunchByYearQuery = await SDK.LaunchByYear({ year: '2021' })
```
Every thing is typed now, I can't pass a number to the `year` variable or use return data that does not exist. Typescript will error on me. This not only gives us autocompletion but also the safety of what we are doing.
### Configuration options
I promised I would come back to it at some point.
```yaml
schema: ...
generates:
...
config:
maybeValue: "T"
typesPrefix: GQL
immutableTypes: true
useTypeImports: true
avoidOptionals: true
```
There are [many options](https://www.graphql-code-generator.com/plugins/typescript#config-api-reference) for the generators, but I think these are quite sensible defaults.
`maybeValue` is `T | null` as default, but since we only use our queries which are type safe we can just remove uncertainty and use the correct type straight away.
`avoidOptionals` same thing as the `maybeValue`, just with `prop:?`. Don't want that.
`typesPrefix` is useful if you have some own type specifications that you don't want to clash with. I like to prefix all my generated GraphQL stuff with `GQL` to keep it tidy.
`immutableTypes` i prefer using an immutable type, with basically adds a `readonly` to every property. This way we are sure we are not editing data on the client.
`useTypeImports` this uses `import type` whenever possible.
## Final thoughts
I hope this made your GraphQL life a bit easier, it definitely did for me and it's way more fun to consume GraphQL API this way. Also something worth mentioning is that you can use the `@graphql-codegen/typescript-generic-sdk` package instead of the `@graphql-codegen/typescript-graphql-request` if you want to do the network requests yourself. It's easy to use but if you don't really have a reason just stick with the `graphql-request` one I'd say.

View File

@ -1,123 +0,0 @@
---
title: 'Automate Github releases with Drone.'
date: '2020-01-29'
categories:
- 'coding'
tags:
- 'cd'
- 'drone'
coverImage: './images/franck-v-U3sOwViXhkY-unsplash-scaled-1.jpg'
---
If you have a project on github that has releases for code or binaries for example it might be a good idea to automate it. Not only this saved a lot of clicks and time, but also it makes releases predictable and therefore less prone to errors in the process.
For this article I will take my own [project](https://github.com/cupcakearmy/autorestic) as the example here, but of course this can be applied to any project, written in whatever language and/or framework.
Also I will base this guide on [Drone](https://drone.io/). But I'm sure there is the same workflow for jenkins/circle/whatever CI/CD system you are using.
This means I'm assuming you have a repository already running with Drone.
The first thing we will need is an access token for the Github API.
You can get them here [https://github.com/settings/tokens](https://github.com/settings/tokens). I called my `Drone` and you need to check the permissions for the repos as follows.
<figure>
![](images/Screenshot-2020-01-29-at-14.57.05.png)
<figcaption>
How to create a new token in Github
</figcaption>
</figure>
Copy the token and save it somewhere **safe**. You will see it only once.
We will add this token to our Drone repository settings. For that navigate to your drone instance and open the settings for the repository in question.
<figure>
![](images/Screenshot-2020-01-29-at-14.55.28.png)
<figcaption>
Add the token to Drone secrets
</figcaption>
</figure>
I've called my secret `github` and I have not allowed it in PRs. Otherwise a PR made by some random user could trigger a release. We don't want that.
Now it's time to edit our drone file and make everything automatic. The flow at the end will be as follows.
1. Code, commit and develop
2. When you are ready for the next release we create a tag
3. Once a tag is created and pushed drone will automatically build and release that code attached to the tag.
Simple right? Lets see how!
```yaml
# .drone.yml
---
kind: pipeline
name: default
steps:
- name: build
image: node
pull: always
commands:
- yarn
- yarn run bin
when:
event: tag
- name: publish
image: plugins/github-release
pull: always
settings:
api_key:
from_secret: github
files: bin/*
checksum:
- sha512
note: CHANGELOG.md
when:
event: tag
---
kind: signature
hmac: 3b1f235f6a6f0ee1aa3f572d0833c4f0eec931dbe0378f31b9efa336a7462912
```
Lets understand what is happening here:
First I'm building my project. In this case this is a standalone typescript executable build by [pkg](https://github.com/zeit/pkg). The build binaries will be emitted into the `./bin` folder. But it really does not matter. Could be anything.
Secondly we tell the [Github release plugin](http://plugins.drone.io/drone-plugins/drone-github-release/) what files we want to include in the release. In my case this was everything inside the `bin` folder. This can also be an array.
```
files:
- dist/*
- bin/binary.exe
```
The `api_key` includes the token, which we load from a secret so that we don't simply put in the `.drone.yml` file, which could be a huge security issue!
The `checksum` setting is also amazing because as the name suggests the plugin automatically generates checksums for all the files. That is amazingly practical and there is no reason not to do that. You can choose a few hash functions but I would suggest simply going with `sha512`.
## So how do a trigger a release now?
Simple! First tag your code with the following command
```bash
git tag 1.2.3
```
Now push the tag and drone will be on its way
```bash
git push --tags
```
Thats it! Hope it made your release journey easier 🙂

View File

@ -1,55 +0,0 @@
---
title: 'Backup MongoDB inside of Docker the easy way'
date: '2019-08-15'
categories:
- 'coding'
tags:
- 'cli'
- 'docker'
coverImage: './images/tobias-fischer-PkbZahEG2Ng-unsplash-scaled.jpg'
---
Backing up a mongo instance is more confusing than it should be. Maybe you have run into a `the input device is not a TTY` or you simply don't know how to do it? Here are two 1-Liner to backup and restore a running mongo instance.
## Setup
First we define our mongo instance like below. Notice that instead of mapping the data directory onto our filesystem we have a native volume.
###### docker-compose.yml
```
version: '3.7'
volumes:
db:
services:
db:
image: mongo:3-xenial
restart: always
volumes:
- db:/data/db
ports:
- 27017:27017
```
Then start with `docker-compose up -d`.
## Backup
First we will do a backup of our running instance.
```
docker-compose exec -T db mongodump --archive --gzip --db mydb > dump.gz
```
The `-T` option is for enabling piping the output to our own machine. We also tell mongo to use the `--gzip` option to compress the file significantly.
Lastly we specify the `--db <database>` that we want to backup.
## Restore
Whenever we want to restore a db, or maybe seed it we can run the following:
```
docker-compose exec -T db mongorestore --archive --gzip < dump.gz
```

View File

@ -1,87 +0,0 @@
---
title: 'Be your own (tiny) image CDN'
date: '2023-04-28'
coverImage: './images/meagan-carsience-QGnm_F_nd1E-unsplash1.jpg'
---
Today, I want to share how to create and host your own image transformation service, much like the known [Imgix](https://imgix.com/) and [Cloudinary](https://cloudinary.com/). The aim is to have a powerful transformation server for images that caches, so images only need to be computed once.
The building blocks will be [imgproxy](https://github.com/imgproxy/imgproxy) and [nginx](https://nginx.org/). The former is a battle tested and fast image server with support for most image operations, while nginx should not need an introduction.
While imgproxy is the core of this operation, it does not support caching. This is intentional, as it's intended to be run behind a proxy. For that, nginx is the tool of choice, as it enables us to easily setup caching rules to avoid generating the same image twice in a given cache interval. Everything will be done in docker containers, but the concept, of course, extends to bare metal too.
## Setup
Imgproxy fortunately is very customisable and options can be passed by env variables, which is wonderful.
It's generally advised to use signed URLs if possible. In my case, there was no backbend that could sign them, so it was avoided. Whenever omitting signing, it is critical to limit the allowed sources to the minimum with `IMGPROXY_ALLOWED_SOURCES` so that it cannot be abused by other websites.
Below is docker file used. Required is only the `IMGPROXY_BIND` as otherwise nginx cannot connect to our image container. The other options are up to you and are just here for a quick setup.
```yaml
# docker-compose.yaml
version: '3.8'
volumes:
cache:
services:
img:
image: darthsim/imgproxy
environment:
# Required for nginx
IMGPROXY_BIND: 0.0.0.0:80
# Security
IMGPROXY_MAX_SRC_RESOLUTION: 100
IMGPROXY_ALLOWED_SOURCES: https://images.example.org/
# Transforms
IMGPROXY_ENFORCE_WEBP: true
IMGPROXY_ENFORCE_AVIF: true
IMGPROXY_ONLY_PRESETS: true
IMGPROXY_PRESETS: default=resizing_type:fit,sm=size:250:250,md=size:500:500,lg=size:1000:1000
proxy:
image: nginx
ports:
- 80:80
volumes:
- ./proxy.conf:/etc/nginx/conf.d/default.conf:ro
- cache:/tmp
```
The more interesting part is the nginx configuration file below. In this case, we target 30 days as a cache TTL. This could be easily increased if we are only talking about static images.
```nginx
# Set cache to 30 days, 1GB.
# Only use the uri as the cache key, as it's the only input for imageproxy.
proxy_cache_path /tmp levels=1:2 keys_zone=images:8m max_size=1g inactive=30d;
proxy_cache_key "$uri";
proxy_cache_valid 200 30d;
server
{
listen 80;
server_name _;
location /
{
proxy_pass_request_headers off;
proxy_set_header HOST $host;
proxy_set_header Accept $http_accept;
proxy_pass http://img;
proxy_cache images;
}
}
```
Here we are configuring a few things, so let's elaborate:
First a cache is configured at the location `/tmp`, with the name `images`, a maximum size of 1 gigabyte and the `inactive` parameter to 30 days.
For the cache key, we use only the `$uri` variable, as all the parameters that affect image generation are included in the path and makes therefore the image transformation unique.
Lastly, we tell nginx to cache all responses with code `200` for 30 days.
Another important trick is to strip all headers that reach the proxy. This is done by setting `proxy_pass_request_headers` and only passing the `Accept` header, as it's required for automatically determining the image format.

View File

@ -1,67 +0,0 @@
---
title: 'Cleanup downloaded Google Photos Takeout archives'
date: '2019-05-04'
categories:
- 'general'
tags:
- 'google-photos'
- 'google-takeout'
- 'icloud-photos'
- 'migration'
coverImage: './images/rayan-almuslem-1302778-unsplash-scaled.jpg'
---
Recently I've been taking my tin foil hat a bit more seriously and since I mostly live in the Apple ecosystem (yes, you can judge me) the iCloud Photos felt like a pretty good alternative. Yes, it's still a cloud but the content [is encrypted](https://support.apple.com/en-us/HT202303) and most importantly Apple has no real economical incentive on data mining you data. They are far ahead in terms of privacy. With that out of the way let's go! 🚀
TLDR: I wrote this [cleaning script](https://gist.github.com/CupCakeArmy/51070b311e6fd0a3f2d793bee3350ede) (tested only on macOS) to remove all duplicates from the [Google Takeout](https://takeout.google.com/) folders.
The process seemed easy at first. Google offers an awesome tools for exporting data out of their servers. It's called [Takeout](https://takeout.google.com/). So basically you select the Google Photos service and let them create the archives. Then after a few hours you can download them.
Now at the time I had ~40gb worth of pictures and videos saved in Googles cloud, however the archives I downloaded where about ~90gb. I started looking into it and a lot of photos where duplicates and edited versions that google was keeping. In addition the folders where full of JSON metadata.
**time for cleanup 🧹**
Fortunately for us there is the awesome `find` command that will save our lives. Removing metadata and duplicates from more than ~50k files is impossible by hand.
First we need to remove all `.json` files:
```
find ./my_takeout_folder -name "*.json" -type f -delete
```
Then all the duplicates that contain a `(1)` at the end of the file.
```
# macOS
find -E ./my_takeout_folder -regex ".*\([0-9]+\).*" -type f -delete
# Unix (Thanks to Stravos F. for pointing that out ❤️)
find ./my_takeout_folder -regextype posix-extended -regex ".*\([0-9]+\).*" -type f -delete
```
All the edited photos by google
```
find ./my_takeout_folder -name "*edited*" -type f -delete
```
And lastly remove all the empty folders.
```
find ./my_takeout_folder -type d -empty -delete
```
You probably will have multiple folders because you will have to download multiple archives. Simply unpack them all into one folder and run the scripts on to that folder.
If you are to lazy to run them manually just get this script I wrote
<script src="https://gist.github.com/CupCakeArmy/51070b311e6fd0a3f2d793bee3350ede.js"></script>
Then...
```
chmod +x ./clean
./clean my_folder_with_all_the_google_takouts
```
Finally just drag and drop into the Photos app.

View File

@ -1,84 +0,0 @@
---
title: 'Going beyond NPM: meet Yarn & pnpm'
date: '2019-08-27'
categories:
- 'coding'
tags:
- 'javascript'
- 'node'
- 'npm'
- 'pnpm'
- 'yarn'
coverImage: './images/ruchindra-gunasekara-GK8x_XCcDZg-unsplash.jpg'
---
If you are a JS developer you probably use NPM multiple times a day without thinking about it. It's the default package manager which ships with node.
But have you wondered what if there was another way of managing your (probably too many 😉) packages? We will look at [yarn](https://yarnpkg.com/en/) and [pnpm](https://pnpm.js.org/) as worthy rivals.
**Update** _27 Aug 2019 @ 21:23_
As [this user](https://www.reddit.com/r/javascript/comments/cw64xt/going_beyond_npm_meet_yarn_pnpm/ey92a0i?utm_source=share&utm_medium=web2x) on reddit pointed out npm now supports offline installs too, so that part is the same for all three package managers. Also apparently the checksums, but I could now verify it.
**Update** _27 Aug 2019 @ 22:51_
If you are having troubles with pnpm try using `pnpm i shamefully-flatten`. Thanks to [this reddit user](https://www.reddit.com/r/node/comments/cw64qq/going_beyond_npm_meet_yarn_pnpm/ey9aa1v?utm_source=share&utm_medium=web2x).
For the lazy readers: [Jump to the conclusion here](#conclusion).
You might wonder now: why? _Why should I bother reading this when NPM works perfectly_? Is this just another run to the latest framework? Don't worry: there are actual reasons you might want to switch.
#### Speed!!... or the lack of it?
The biggest issue that plagues npm is speed. Unfortunately even with the latest version (6) npm is slow. If you ever had to delete the node_modules folder to do a clean install on a bigger project you will know what I mean. Fans start to spin, laptop gets warm and you can go read an article while npm chews on the dependencies.
## Yarn to the rescue
Yarn came along in the early days and you definitely have heard about it wandering across Github. Back in the days (before npm 5 with the `package-lock.json`) Yarn addressed the issues with consistency by being the first to generate a lockfile (`yarn.lock`). This file could be checked in and the devs would have a consistent dependencies across multiple machines.
#### Speed
Yarn is often twice as fast as npm. It's truly impressive and you need to see it for yourself to believe it. The CLI output is also way more human-friendly.
#### Offline
Every package version is only downloaded once, so if you happen to loose connection or need to download the same package again you will gain a substantial speed boost since they are cached locally.
_See update at the top_.
#### yarn upgrade-interactive
This is incredible 😍. If you run `yarn upgrade-interactive` you get an interactive CLI where you can choose what packages to upgrade and which not. It's a simple thing, but one you cannot live without anymore if tried it once.
#### yarn why
Similar to the previous command this is a very handy cli goodie. simply run `yarn why some-package` and yarn will tell you why it was installed, from which dependency it came from, etc.
#### Lack of npx
Unfortunately Yarn lacks the `npx` equivalent of npm, which is the only drawback I encountered while using yarn. Other than that yarn is a very fast and a solid alternative to npm.
## PNPM: The underdog
I truly love this project so I might be biased. They basically implemented a thought I had a while back: **reuse the same packages across your computer**. Confused? Let me explain:
Have you ever measured the size of the your node_modules?
```
du -sh node_modules
# --> 816M node_modules
```
What?! 0.8Gb for a react-native project?!
Unfortunately that is a pretty common reality and **pnpm** aims to solve that.
PNPM links your packages with symlinks. This means that **the same version of a package only exists once** on your computer. If you ever install the same package twice, it will simply symlinked to your node_modules. 🚀
[On top of that it's even faster than yarn.](https://github.com/pnpm/benchmarks-of-javascript-package-managers)
#### So perfection is achieved? Let's all switch to pnpm?
Unfortunately it's not that easy. If you start a new project you can probably go with pnpm, but with existing projects I had some problems with building my apps. So it's definitely experimental at best and should not be used without rigorous testing as it might break your app. pnpm also supports npx with `pnpx`.
## Conclusion Time
<table><tbody><tr><td></td><td><strong>Speed</strong></td><td><strong>NPX</strong></td><td><strong>Offline</strong></td><td><strong>Well supported</strong></td></tr><tr><td>npm</td><td>🐌</td><td></td><td></td><td></td></tr><tr><td>yarn</td><td>🚄</td><td></td><td></td><td></td></tr><tr><td>pnpm</td><td>🚀</td><td></td><td></td><td></td></tr></tbody></table>
As you can see above there is no clear winner. NPM is the most compatible of course but really falls behind in terms of speed. **Yarn in my opinion is currently your best bet** and fallback to `npx your-command` when npx is needed.
pnpm is an incredibly cool tool but is not ready yet for production. With react-native I can cause problems, but with the "normal" stacks it works very good. I will use pnpm for my personal projects from now on.

View File

@ -1,171 +0,0 @@
---
title: 'How to avoid killing your MacBook / Laptop battery'
date: '2019-07-23'
categories:
- 'general'
tags:
- 'battery'
- 'laptop'
- 'macbook'
coverImage: './images/israel-palacio-ImcUkZ72oUs-unsplash-scaled.jpg'
---
As of May of 2020 this is no more relevant! macOS 10.15.5 finally addressed this issue by not charging the battery to 100% depending on battery temperature, health and so on 🚀🚀🚀
There are a lot if misleading wisdom out there about batteries (e.g. it is ok to leave your laptop plugged in). The reasons behind it are pretty interesting and not at all trivial. If you want to know why: keep reading.
**TLDR;** [Jump to the solution](#solution)
<figure>
![](images/israel-palacio-ImcUkZ72oUs-unsplash-scaled.jpg)
<figcaption>
Photo by [israel palacio](https://unsplash.com/@othentikisra?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/electricity?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)
</figcaption>
</figure>
> The worst situation is keeping a fully charged battery at elevated temperatures.
>
> batteryuniversity.com
Batteries are consumable items. This means the degrade over time and loose their ability to store energy. We will see how and why this happens and how to combat it.
## Why do batteries degrade?
In theoretical chemistry your battery should be able to last forever. But in the real world of course that's not possible. There are 2 main killers for batteries:
1. Heat
2. Cycles (Especially above ~80%)
### Killer #1: Heat
With heat the internal materials of the battery start to loose their chemical form and therefore their capacity. Below is a table that illustrates how batteries react to temperature exposure.
_Estimated recoverable capacity when storing Li-ion for one year at various temperatures._
<table><tbody><tr><td><strong>Temperature</strong></td><td><strong>40% charge</strong></td><td><strong>100% charge</strong></td></tr><tr><td>0°C</td><td>98%&nbsp;(after 1 year)</td><td>94%&nbsp;(after 1 year)</td></tr><tr><td>25°C</td><td>96%&nbsp;(after 1 year)</td><td>80%&nbsp;(after 1 year)</td></tr><tr><td>40°C</td><td>85%&nbsp;(after 1 year)</td><td>65%&nbsp;(after 1 year)</td></tr><tr><td>60°C</td><td>75%&nbsp;(after 1 year)</td><td>60%<br>(after 3 months)</td></tr></tbody></table>
As we can see, a heated battery basically is a lost cause, especially when fully charged.
Unfortunately **there is little we can do for the heat issue** since the thermal design of our Laptop is fixed and the manufacturer will choose where to place the battery and if and how to cool it.
### Killer #2: Cycles
Over this one we have much more control. A cycle varies in definition, but basically it is a discharge, followed by a charge.
Why are cycles bad? Well whenever we move electrons around our battery either by using it or while charging, the chemical material is subjected to wear. Why exactly this happens is beyond my understanding of chemistry, so I won't try to explain it since I would probably do it wrong.
What I can tell you is how to charge and use your battery in the correct manner:
1. Ideally leave your battery between 30% and 80%
2. Don't charge over 80% if not strictly required for a long trip or so.
### Don't believe me, trust the data
<figure>
![](images/DST-cycles-web2.jpg)
<figcaption>
Capacity loss as a function of charge and discharge bandwidth. [Source](https://batteryuniversity.com/index.php/learn/article/how_to_prolong_lithium_based_batteries)
</figcaption>
</figure>
As we can observe above, all the tested bandwidth that regularly charged to a full 100% degraded the fastest. Don't do that.
Part of the problem is that as you can see in the graphic below is that while **the first 80% of the capacity is charged easily and quick, the last 80% to 100% have an exponential curve**. Those last percentages are really taxing on the battery because you are trying to stuff the last electrons inside an almost full battery. Imagine you stuffing a last bag into an almost full garbage. You will need to push it.
Making matters worse, the battery will heat up during the last steps of charging because of the strain that is undergoing. This only adds to the problem, since as we learned above heat is incredibly bad for capacities.
<figure>
![](images/Battery-Charge-Voltage-vs-Time.png)
<figcaption>
Charging graph for Lithium Batteries.
Graph by [batteryuniversity.com](https://batteryuniversity.com/index.php/learn/article/charging_lithium_ion_batteries) - Colorised by [Android Authority](https://www.androidauthority.com/maximize-battery-life-882395/)
</figcaption>
</figure>
This is also the reason because electric vehicles only charge up to 80%. It increases the lifespan of the battery significantly. On the other hand, consumer **products like laptops and phones are more about selling you maximum battery life.** What they don't tell you is **how quick that advertised battery life is going to last after 6 months** of usage.
### Myth: It's ok to keep you laptop plugged in
This is a misconception that arose in recent years. While it is not completely wrong, it overlooks some important aspects.
What is true? Modern laptops and phones don't overcharge the battery and will switch to using only the powered cable as the source. **However** at some point the battery will dip below 97% and the laptop will start charge it again to 100%. Assuming you use your laptop for work the whole day, this will happen multiple times daily. **It will break it**.
## Solution
### For MacBooks (magsafe):
Put a piece of paper/cloth on your middle connector of your MacBook Magsafe charger **whenever your laptop plugged in for long periods**.
<figure>
![](images/howto.jpg)
<figcaption>
How-To protect the battery with Magsafe chargers
</figcaption>
</figure>
**Update:** I tried new methods, the one that seems the most practical is to use a little piece of tape that you can bend in front of the connector when needed.
##### Updated method with tape
Put a piece of tape on your middle connector. For simplicity you can just cover 3 pins and leave the 2 outside pins (does not matter which side) free.
Now you can easily switch between loading the battery or just working on power **whenever your laptop plugged in for long periods**.
<figure>
![](images/howto-1.jpg)
<figcaption>
Same method, just with tape. Much easier to use.
</figcaption>
</figure>
<figure>
![](images/status.jpg)
<figcaption>
Status of battery after modification
</figcaption>
</figure>
This will prevent your laptop from using **and** charging your battery while using it for a whole day.
**Credits for the hack**: [https://superuser.com/a/1130375](https://superuser.com/a/1130375)
### Laptops with removable batteries:
Simply remove the battery when using laptop for long periods. This will prevent the heat of the laptop being transferred to the battery and it won't charge over and over again to 100%.
#### Sources
- [https://batteryuniversity.com/index.php/learn/article/charging_lithium_ion_batteries](https://batteryuniversity.com/index.php/learn/article/charging_lithium_ion_batteries)
- [https://batteryuniversity.com/learn/article/bu_808b_what_causes_li_ion_to_die](https://batteryuniversity.com/learn/article/bu_808b_what_causes_li_ion_to_die)
- [https://batteryuniversity.com/index.php/learn/article/how_to_prolong_lithium_based_batteries](https://batteryuniversity.com/index.php/learn/article/how_to_prolong_lithium_based_batteries)
- [https://batteryuniversity.com/index.php/learn/article/do_and_dont_battery_table](https://batteryuniversity.com/index.php/learn/article/do_and_dont_battery_table)
- [https://www.electricbike.com/how-to-make-lithium-battery-last/](https://www.electricbike.com/how-to-make-lithium-battery-last/)
- [https://www.androidauthority.com/maximize-battery-life-882395/](https://www.androidauthority.com/maximize-battery-life-882395/)

View File

@ -1,343 +0,0 @@
---
title: 'How to bring your neural network to the web'
date: '2020-02-10'
categories:
- 'coding'
tags:
- 'ai'
- 'keras'
- 'machine-learning'
- 'tensorflow'
coverImage: './images/natasha-connell-byp5TTxUbL0-unsplash-scaled-1.jpg'
---
Artificial intelligence, neural networks, machine learning. I don't know which of them is the bigger buzzword. If we look past the hype there are some actually very interesting use cases for machine learning in the browser.
**For the lazy that simply what to just to the source code**
[Here is the git repo](https://github.com/cupcakearmy/mnist) for you :)
**Or simply go to the [finished website](https://mnist.nicco.io/)**
Today we will look on how to train a simple mnist digit recogniser and then export it into a website where we then can see it in action. Therefore this article will be split into three parts
1. Training
2. Export & import the pre-trained model into a website
3. Build a simple website where we can use the model.
Also I am not going to explain what machine learning is, as there are enough guides, videos, podcasts, ... that already do a much better job than I could and would be outside the scope of this article.
So the first thing we need to understand is that we will not train the model in the browser. That is a job for GPUs and the goal here is only to use a pre-trained model inside of the browser. Training is a much more resource intensive task than simply using the net.
## Training the model
So, the first step is to actually have a model. I will do this in tensorflow 2.0 using the now included keras api. This means Python 🎉
The code below is basically an adapted version of the [keras hello world example](https://keras.io/examples/mnist_cnn/).
If you want to run the code yourself (which you should!) simply head over to [Google Colab](https://colab.research.google.com), create a new file and just paste the code. There you can run it for free on GPUs which is pretty dope!
```py
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras.utils import to_categorical
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# Reshaping for channels_last (tensorflow) with one channel
size = 28
print(x_train.shape, x_test.shape)
x_train = x_train.reshape(len(x_train), size, size, 1).astype('float32')
x_test = x_test.reshape(len(x_test), size, size, 1).astype('float32')
print(x_train.shape, x_test.shape)
# Normalize
upper = max(x_train.max(), x_test.max())
lower = min(x_train.min(), x_test.min())
print(f'Max: {upper} Min: {lower}')
x_train /= upper
x_test /= upper
total_classes = 10
y_train = to_categorical(y_train, total_classes)
y_test = to_categorical(y_test, total_classes)
# Make the model
model = Sequential()
model.add(Conv2D(64, (3, 3), activation='relu', input_shape=(size,size, 1), data_format='channels_last'))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(total_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# Train
model.fit(x_train, y_train,
batch_size=32,
epochs=12,
verbose=True)
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
```
We can run this and we will get a pretty good accuracy. The MNIST dataset ist not very hard to train.
## Export the model
Now the conventional way to save a model is to use the `model.save("model.h5")` method provided by keras. This uses the h5 file format.
Unfortunately this is not compatible with tensorflow-js. So we need another way.
There is a package called tensorflowjs for python (confusing right? 😅) that provides the functionality we need
```ts
import tensorflowjs as tfjs
tfjs.converters.save_keras_model(model, './js')
```
It save the model data inside the `./js` folder ready to be used.
Inside there you will find a `model.json` that basically describes the structure of the model and something like `group1-shard1of1.bin` that contains the fitted weights.
## Import the model
Now we are ready to import that. First we need to install the `@tensorflow/tfjs` package.
```ts
import * as tf from '@tensorflow/tfjs'
let model
tf.loadLayersModel('/model.json').then((m) => {
model = m
})
```
Ok how do I use that now?
```ts
const tensor = tf.tensor(new Uint8Array(ourData), [1, 28, 28, 1])
const prediction = model.predict(tensor)
```
**What is happening here?**
In order to predict a value we first need a tensor (vector) the same shape as our original input with which we trained the model with. In our case that is 1x28x28x1.
Also we will convert our pixel data into a `Uint8Array`.
## Using the canvas element to draw and predict numbers
I'm not gonna talk about what bundler, etc. I'm using. If you interested simply have a look at the [git repo](https://github.com/cupcakearmy/mnist).
First lets write some basic html for the skeleton of our page.
```html
<html>
<head>
<style>
* {
box-sizing: border-box;
font-family: monospace;
}
html,
body {
padding: 0;
margin: 0;
height: 100vh;
width: 100vw;
display: flex;
justify-content: center;
align-items: center;
}
body > div {
text-align: center;
}
div canvas {
display: inline-block;
border: 1px solid;
}
div input {
display: inline-block;
margin-top: 0.5em;
padding: 0.5em 2em;
background: white;
outline: none;
border: 1px solid;
font-weight: bold;
}
</style>
</head>
<body>
<div>
<h1>MNIST (Pretrained)</h1>
<canvas id="can" width="28" height="28"></canvas>
<br />
<input id="clear" type="button" value="clear" />
<br />
<input id="test" type="button" value="test" />
<br />
<h2 id="result"></h2>
<a href="https://github.com/cupcakearmy/mnist">
<h3>source code</h3>
</a>
</div>
<script src="./tf.js"></script>
<script src="./canvas.js"></script>
</body>
</html>
```
Next we need come short code for drawing on a canvas.
The code is adapted from [this stackoverflow answer](https://stackoverflow.com/a/8398189) and reduced to the only the basics we need.
In essence it's a canvas that listens on our mouse events and fills the pixels with black. Nothing more.
```ts
/* jslint esversion: 6, asi: true */
var canvas,
ctx,
flag = false,
prevX = 0,
currX = 0,
prevY = 0,
currY = 0,
dot_flag = false
var x = 'black',
y = 2
function init() {
canvas = document.getElementById('can')
ctx = canvas.getContext('2d')
w = canvas.width
h = canvas.height
canvas.addEventListener(
'mousemove',
function (e) {
findxy('move', e)
},
false
)
canvas.addEventListener(
'mousedown',
function (e) {
findxy('down', e)
},
false
)
canvas.addEventListener(
'mouseup',
function (e) {
findxy('up', e)
},
false
)
canvas.addEventListener(
'mouseout',
function (e) {
findxy('out', e)
},
false
)
window.document.getElementById('clear').addEventListener('click', erase)
}
function draw() {
ctx.beginPath()
ctx.moveTo(prevX, prevY)
ctx.lineTo(currX, currY)
ctx.strokeStyle = x
ctx.lineWidth = y
ctx.stroke()
ctx.closePath()
}
function erase() {
ctx.clearRect(0, 0, w, h)
}
function findxy(res, e) {
if (res == 'down') {
prevX = currX
prevY = currY
currX = e.clientX - canvas.offsetLeft
currY = e.clientY - canvas.offsetTop
flag = true
dot_flag = true
if (dot_flag) {
ctx.beginPath()
ctx.fillStyle = x
ctx.fillRect(currX, currY, 2, 2)
ctx.closePath()
dot_flag = false
}
}
if (res == 'up' || res == 'out') {
flag = false
}
if (res == 'move') {
if (flag) {
prevX = currX
prevY = currY
currX = e.clientX - canvas.offsetLeft
currY = e.clientY - canvas.offsetTop
draw()
}
}
}
init()
```
And not the glue to put this together is the piece of code that listens on the "test" button.
```ts
import * as tf from '@tensorflow/tfjs'
let model
tf.loadLayersModel('/model.json').then((m) => {
model = m
})
window.document.getElementById('test').addEventListener('click', async () => {
const canvas = window.document.querySelector('canvas')
const { data, width, height } = canvas.getContext('2d').getImageData(0, 0, 28, 28)
const tensor = tf.tensor(new Uint8Array(data.filter((_, i) => i % 4 === 3)), [1, 28, 28, 1])
const prediction = model.predict(tensor)
const result = await prediction.data()
const guessed = result.indexOf(1)
console.log(guessed)
window.document.querySelector('#result').innerText = guessed
})
```
Here we need to explain a few things.
`canvas.getContext('2d').getImageData(0, 0, 28, 28)` simply returns a flattened array of the pixels from the point (0,0) to (28,28).
Then, instead of simply passing the data to the tensor. we need to do some magic with `data.filter` in order to get only every 3rd pixel. This is because our canvas has 3 channels + 1 alpha, but we only need to know if the pixel is black or not. We do this by simply filtering for the index mod 4
```ts
data.filter((_, i) => i % 4 === 3)
```
Lastly we need to interpret the result. `prediction.data()` return an array with 10 items. Because we have trained it that way that we only have 10 possible outcomes. 10 Digits right?
Well in that case we simply search in which position in the array we have a 1 and the index is out solution.
We search for a 1 because we only have floats from 0 to 1. So 1 is the maximum.
I hope this helped you understand the process better. It was pretty confusing at first for me too 😬

View File

@ -1,211 +0,0 @@
---
title: 'How to search in the JAM'
date: '2020-12-06'
coverImage: './images/uriel-soberanes-gCeH4z9m7bg-unsplash.jpg'
categories:
- 'coding'
tags:
- 'jam-stack'
- 'lunr'
- 'search'
- 'svelte'
---
So a lot (me included) now are building JAM stack landing pages, shops, full-stack apps, etc. and while you can have a backend of course not all of them have. For those who don't: **How do we search?**
So there is the obvious [Google Programmable Search Engine](https://programmablesearchengine.google.com/about/) but that looks bad and it not really customizable. The results are very good, it's google after all. However for those who want something more custom: Here's one way how.
A working example can found right here [nicco.io/search](https://nicco.io/search) 😉
We will look at the following:
1. How to implement the search
2. Search Accuracy & Precision
3. Performance & Size
We can't rely on a backend as discussed above, so the magic will happen at build time, like everything in the JAM-verse.
I've decided to go with the free and open source [lunr.js](https://lunrjs.com/) which is a simple but still quite powerful search engine that can run in the client.
```
const idx = lunr(function () {
this.ref('name')
this.field('text')
this.add({ name: 'A', text: 'Lorem...' })
})
const results = idx.search('Lor*')
```
The first question that probably will pop out in your head is: "How will lunr be able to know what is on our website?" Here is where our work begins.
## Roadmap
1. Aggregate all the data of your site
2. Prebuild the index and make it available as static JSON
3. Load `lunr.js` into your site and start searching
## Preparing the Index
So I'm using [Sapper](https://sapper.svelte.dev/) for this blog so the examples will be based on it, but the same principle applies to all JAM tech.
First we need to aggregate all our data. In my case this means all the single pages, blog entries, projects and works. So I created a `/src/routes/search.json.js` file and got to work.
```ts
import lunr from 'lunr'
import { getAll } from '../lib/wp'
function removeHTML(s) {
return s.replace(/<.*?>|\s+|&#\d+;/g, ' ').trim()
}
async function convertForIdx(type, fields = []) {
// Load the data from Wordpress
const items = await getAll(type)
// Map only the fields we need and are relevant
const defaults = ['title', 'content', 'slug']
return items.map((item) => ({
url: `${item.type}/${item.slug}`,
data: [...defaults, ...fields].map((field) => removeHTML(item[field])).join(' '),
}))
}
export async function get(req, res) {
const all = await Promise.all([
convertForIdx('projects', ['description']),
convertForIdx('pages'),
convertForIdx('posts'),
convertForIdx('works', ['role']),
])
const idx = lunr(function () {
this.ref('url')
this.field('data')
all.flat().forEach((doc) => this.add(doc))
})
res.setHeader('Content-Type', 'application/json')
res.end(JSON.stringify(idx))
}
```
First I get all the data from the Wordpress backend and for each item I select at least the `title` and `content` as I want them to be searchable. Then we remove any html tags with a dirty regexp and finally we build the index.
When we call `JSON.stringify(idx)` the precomputed index will be serialized to JSON. Otherwise every client would had to compute that on their CPU, wasting cycles and possibly battery. We don't want that.
Now I have the "search model" ready. You can have a look: [nicco.io/search.json](https://nicco.io/search.json)
## Integrating the search
It's time to integrate the search into the actual website 🚀
```html
<script context="module">
export async function preload() {
const prebuilt = await this.fetch(`/search.json`).then((res) => res.json())
return { prebuilt }
}
</script>
<script>
import lunr from 'lunr'
import SearchResult from '../components/SearchResult.svelte'
export let prebuilt
let needle
let results = []
async function search(needle) {
if (!needle || !idx) {
results = []
} else {
let found = idx.search(needle + '~1')
if (!found.length) found = idx.search(needle + '*')
results = found.slice(0, 20)
}
}
$: idx = lunr.Index.load(prebuilt)
$: search(needle)
</script>
<input bind:value="{needle}" placeholder="needle" />
<ul>
{#each results as result (result.ref)}
<SearchResult {result} />
{/each}
</ul>
```
The first thing we do is load our preloaded `/search.json` and loading into an instance of `lunr`. This only need to happen once, once the index is loaded we ready to go.
```ts
const idx = lunr.Index.load(prebuilt)
```
For the searching itself `lunr` has quite a [few options](https://lunrjs.com/guides/searching.html). The most relevant for me where the wildcard and fuzzy search. While wildcard is good for when we don't have completed a word yet, fuzzy helps us with typos.
```ts
const fuzzy = idx.search(needle + '~1') // foo~1
```
While not explicitly said in the docs I'm guessing they use the [Levenshtein Distance](https://en.wikipedia.org/wiki/Levenshtein_distance), which means `~1` will replace at most 1 char.
```ts
const wildcard = idx.search(needle + '*') // fo*
```
Wildcard are straight forward. `lunr` supports any kind: `*oo`, `f*o` and `fo*`.
The result is an array with the `ref` field so you can find the related item and a `score`. They are already sorted by score, so basically you just need to write a for loop.
## Search Quality
Now the accuracy and precision are of course on par with Google, but way good enough for a blog or a smaller site. However in 1h you can add search to your JAM site without much work and you stay google free.
Also this approach gives you all the artistic liberties over the design.
## Performance & Size
Since we are prebuilding and packaging the whole site into one big `JSON` file it's worth taking a look at the size of the index.
For this I took the [Iliad by Homer](https://gutenberg.org/ebooks/6130) and slitted it up into different amount of pieces to simulate the amount of pages. At the same tame, the more pieces, the smaller the single content on one "page".
Please not that it's ~1mb of plain text so it's quite a lot.
You can get the source code for the "test" [here](https://gist.github.com/cupcakearmy/242b54ee6b1a914896390c91846aa4d4).
### Variable size documents
<figure>
![](https://api.nicco.io/wp-content/uploads/2020/12/Lunr-Index-Size-Compresion.svg)
<figcaption>
Graph of Lunr Index size
</figcaption>
</figure>
As you can see, with `1000` each around `1.15k` in size we end up with a compressed size of `563 KiB` which starts to get big.
### A more real example
Here is an example where each document is around `10k` in size. Roughly double the text amount needed for this blog post. Then we add an ever increasing amount of documents to the index and watch it grow.
<figure>
![](https://api.nicco.io/wp-content/uploads/2020/12/Lunr.js-Index-Size-10k-Document-Size.svg)
<figcaption>
Chart of 10k sized documents building the index.
</figcaption>
</figure>
The results are very different of course. Please note the the second graph has a logarithmic scale! If we compare the compressed size at `500` documents we have `494kb @2.3k/doc` vs `1.09MiB @10k/doc`. Basically double, which is not that bad if we consider that the documents are around 5 times bigger.
Hope you enjoyed and learned something, take care ❤️

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:54ef9ac3fb65009c53d55f103eadb1f6c1b9ac9cb93019f161d5c0d842576e61
size 191614

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:ee60b8040150d154afd21faf3112421b9b43cacb0626a5b4c53c0590c69626a0
size 151085

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:767f3552e0f0244b006c0bcb5a8d3c39b2fb83d1eb008cd5abb6d8c8012c11da
size 182432

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:f43dfc65d7f710d7cfe7f21d168c5142c450b95b363e23f340d31135676bedc4
size 33825

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:1e65961b86eb1febbe48eb9292c9a54eebdb61314a7b1223d7c9112423477dcf
size 175459

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:e83975d7ec576ef6ca6c4fc7a007fd82d8066cdfc42ee097527f989ccf025756
size 2027604

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:f1dfeb6c5d59b355b38a50a0888b178e9fa9fa93bd0d26dd94ffd87ecbc0fae3
size 136338

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:a4a5b54dfff9307f3460189e6e80eedd6c80ee64eb3ae25bf5c7df3c3cccf882
size 56274

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:b0f241b1da4e817d415f8984ce54830276b0631042b31815a5569dd6937eb2ad
size 258865

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:2b5d821660053d05743eb6439d652e77a3d4699a45884a9bce88169835c55c8a
size 564881

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:5db61551895b9969afdb1356560f03c1918faca5e68169b6ac2d200ce700ce90
size 73938

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:88c62af32b444a6928c85e8f31cad2f4bef8ebed33893b0a28894d6f87593599
size 42234

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:eb058e043414fe65d83876644ed84a7e2e336ad8b1b4d771062db8494c036e79
size 101970

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:b70f8cacc7eaa9ec3127c96cf3d3fecddda61a35313b8fd6f42622d1f2983858
size 50254

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:641d3e8a3529e6e026773d16bc105b71a0ae27d8361faada37d52551ef198a0f
size 688323

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:ef1c569cdcf9851539f06176d0c9e0debc52875976ff330b1cc6971572fe1404
size 665199

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:641e531b6383c8552a05586e2ac44c53718c0b6130d72f23dcb8edf74ee4b3dd
size 794403

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:2f1402d168936c5ef8b97bc4f30afe3d4bae64136feb8d85b8e85bc15d3810d3
size 287558

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:40588829e34a45f71777c36e1dfdc9078bdf88d3196051034d20249d3a0ea539
size 104152

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:5e7b72e16d3f5bcd368eb3b39078e32d0ef904023b7984f32fa2d0e364109479
size 43477

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:48603b5e68e25cce1593bf8578f5cc882c1612c213d0c3041312f018087a9a78
size 191775

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:ebc09fc3389fdd05f9d559692c3d24eb7bd99adba36cabfdcf707975670c2474
size 483287

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:7856b748cc26c928c9fd5d43af23d43f72a6f11bf277685ef4b63ad8ada0528e
size 495251

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:366666945890876b458c89ddfb94d4bab8e02f686a08b1d3bc04f0ed0e03e1cc
size 272668

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:a3f3295dd8580c51c510f34b5c5252c56d77be75d1a93ce83583c6a4308e636f
size 4596995

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:a326b48b5b9355bdef2a43fe5298559d45ad645f911519b9e398f981a1390e80
size 318810

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:418f648ed44631401d9b4887c951b1db562cb02d981258f410768284dccd4945
size 160637

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:394cb4b320efe0eaafc527569f2e04e57f25f6ca7ad70a6cc951af981012be2a
size 587956

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:7b41d27d824c56267c2cb666d906c1bede2fdaa1de74f9957e26d502c50cbc2b
size 1115156

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:39057dd4352501b6a8ad5aa62f28662cc0b1310fbbf03c5ad3ee67d0ca3abb9a
size 143009

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:8aaebdf6ec81ea65d61c9db30a93e0e6da65c06b874a614393d260ef135c3ee9
size 170032

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:728917127520026b290af0348ef70dee6d36dbd3ec3f72b76d3c2b1fca3bdfdb
size 1205666

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:24f5c8a7e2fce080c7f9090f9db4f8ae5d2644f08eba8671da802bd76880e679
size 523148

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:3a7c6f461682a6a5d127655c2d67b90e1a1293c3fbe9bca763db8ab855e1c534
size 907098

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:82a2dfcd574fef01451a2e62c5efa1975445682c70588bca4e06f1421be4c24d
size 225187

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:16f73c9118460e939f16070572407482c45a885a5f28163d3fe21a1ae63efc4f
size 547290

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:bdd3909cf6516e603c072d23f1ba8a9394dc29cfdaff787c0a6ecd3ce0083c99
size 351535

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:012f5cffade95b8952c6da835c5ec9badacce4ab222b8cf6a59ce2d62fb2fd4a
size 645501

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:500b324d99022e3410afa467743fff186a85700462e66487c11f5b7c3722e9d1
size 244788

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:08eb7d31540d64f49de77452565c48015bc4da3e5753ba9ed93ad19e70eaef48
size 190710

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:4def0c84923defe516c9d1c7e4be767ff6a7ecfdccf55eb674abf3b5144dbc06
size 92471

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:7302bfcb09c4fcaa7cbc1b51254697333f3a0b46090809779bd4a7c49442454a
size 445129

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:0160fa88ffe90a0f9a1846e178b4554db761855aea838973bc32eb700cd28af9
size 247279

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:8ff024e32cdb7fa6fe8624cc7c42f16a3b7c2981646ab0f01c6e81c6b56a02f3
size 393856

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:cd058a3a94865f064a2cc5300267657d449d7cb787662b2ceb945ebe80dfafc7
size 555132

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:dc7bc17cdc583accf94e27eadb4bdb49c91e58c6add60074d24e6b71b1d35e19
size 485939

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:0f1258ab1f0583e4cb7225f97c777685f0b35021e71ab9ce3ac838fbcaad714d
size 299888

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:4aa44481d72090565e8664bdfbb506f833cabc9a0e1ac988807912d86dc61c71
size 97464

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:a9a2b0d913d11a177032bf5c8148c43e64e97dcff8c0224645f82a8e5e7280c9
size 4819801

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:dd832991f433a79e9794b29d80c170486d55187b565e1234f69e2cc141291042
size 57217

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:6a2d827c72bad2602c6f1f9fa29fcb2471de06dd725feb338b247b034480da05
size 773178

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:2e195c9ed36de6e74ff814f663d80387de82245685784e9c5b52e2831d860f09
size 615029

View File

@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:06f7370a7d558d0abef5b4b0213e574ae8de04416135fe37a4ca949d9c587b82
size 905920

View File

@ -1,150 +0,0 @@
---
title: 'Leaving Nextcloud: From heaven to the depths of Seafile'
date: '2021-03-23'
updatedDate: '2021-03-27'
categories:
- 'general'
tags:
- 'migration'
- 'nextcloud'
- 'seafile'
coverImage: './images/pawel-nolbert-xe-ss5Tg2mo-unsplash.jpg'
---
Today I'll share and explain the motivations that lead me to leave Nextcloud and choose Seafile, while hopefully making the transition smooth for anyone that wants to follow along. Spoiler: Performance and stuff constantly breaking leads to a Seafile beta being way more stable and reliable that a Nextcloud "Production" release.
Nextcloud is the the de-facto way to go for most self hosted Cloud / Dropbox / Google Drive. So a few years ago in my move to a more ownership driven approach to my data I setup a Nextcloud instance and was quite happy. They have an iOS app and a macOS sync client.
There are numerous of plug-ins that can accomplish anything from contacts syncing with WebDav to GSuite-Like online collaboration with the Collabora integration, a mail client and real time video conferences with Nextcloud Talk. For many (persons & businesses) this means a convenient place where all the tools are combined under one platform.
## Where the issues started
Trying to be everything at the same time comes at a cost. And that is generally an experience that at least in my experience never feels polished or finished. While the Nextcloud Plug-Ins are incredibly versatile and powerful they also leave room for segmentation and you will notice it.
### The permanent alpha
That's what using Nextcloud feels like 75% of the time. I have no insight into the company behind the project but it feels like they are chasing a release cycle for the sake of paper launching unfinished features that compromise in terms of stability and polish. The thing that bothers me the most is that they are constantly marketed as "production ready" when they clearly had not nearly enough QA.
2 years ago I tried to install Collabora for an organisation I'm involved with and the setup was everything but straight forward. Docs where limited and the answers buried either in a forum thread or github issue. After many web searches I got it to work, but the performance (at least when I tested) was not really usable. We ended up using Google Docs and Etherpad.
Then there was the story with end to end encryption (E2EE) for files. This was a feature that was promoted quite extensively by Nextcloud and [released as stable](https://nextcloud.com/blog/production-ready-end-to-end-encryption-and-new-user-interface-arrive-with-nextcloud-desktop-client-3-0/) after many delays. So I followed the instructions on installing it and... [the sync clients broke down](https://github.com/nextcloud/desktop/issues/2593). What happened? Well Nextcloud released software as stable and production ready, but the most basic functionality was simply not ready and a lot of clients stopped syncing, mine included.
A few weeks ago [Nextcloud 21](https://nextcloud.com/blog/nextcloud-hub-21-out-with-up-to-10x-better-performance-whiteboard-and-more-collaboration-features/) was released and boasted a "10x Speed improvement" which of course is an assumption that cannot hold it's grounds. But I was excited as even a "1.5x-2x" improvement would have been really substantial. I installed the new version and well... performance seemed untouched but as a new feature my UI was stuck in a permanent dark mode UI with unreadable text (see the files sizes). I don't run any custom theme or anything. Is this a complete deal breaker? Of course not, but it goes a long way to show how untested the whole software is if a stable release has unreadable text in the start page of your product.
<figure>
![](images/Screenshot-2021-03-23-at-10.58.31.png)
<figcaption>
Screenshot of Nextcloud 21 with colour bugs
</figcaption>
</figure>
In addition to the weird CSS bugs it also introduced a new bug where I have to frequently reload the web UI as it "cannot connect" to the server. Definetely production ready.
### Performance
The final issue is the performance as a whole. The web interface regularly takes around 5-10s to load for each action you perform. The only thing that is quite responsive is navigating through folders. It's a drag when you just quickly want to get stuff done and the actual work you have to do takes less than waiting for Nextcloud to serve you the website. It's simply put not fun to use it.
There is also the issue that you need to spin up a second container for cron jobs like it's 2003 and that every update or so you manually have to go into the console to rebuild some indexes. I'll leave them unjudged as it maybe those are "Enterprise Features" which I don't understand.
## Seafile to the rescue
The last update (Nextcloud 21) was the point where I decided to jump ship as explained above. The question was: What options do I have?
- Pydio
- Seafile
Pydio reinvented itself with the launch of it's Cells product. However at the time of writing the macOS client did not seem ready and therefore was excluded.
Seafile on the other hand just had the release of it's 8th version (still in beta afaik) and supports iOS and macOS.
### Migration
I had to migrate 2 things: Cal/CardDav for Calendar and Contacts and the files drive itself.
Spinning up a Seafile instance was a breeze as I host every single service with docker.
```bash
# .env
MYSQL_ROOT_PASSWORD=random
DB_HOST=db
DB_ROOT_PASSWD=random
SEAFILE_ADMIN_EMAIL=me@example.com
SEAFILE_ADMIN_PASSWORD=a_very_secret_password
```
```yaml
version: '2.0'
services:
db:
image: mariadb:10.5
env_file: .env
volumes:
- ./data/db:/var/lib/mysql
memcached:
image: memcached:1.5.6
entrypoint: memcached -m 256
app:
image: seafileltd/seafile-mc:latest
env_file: .env
volumes:
- ./data/app:/shared
depends_on:
- db
- memcached
```
I then installed the macOS client and simply copied all the files over. Before actually copying the files I added a `seafile-ignore.txt` file in the root to exclude files from being uploaded to the server. Read more [here](https://help.seafile.com/syncing_client/excluding_files/).
**Gotcha**
The ignore file can be tricky if you tread it like a `.gitignore` files. `.git/` would not exclude all those directories, only the root one. To exclude all `.git` directories you actually need to insert `*/.git/` into the `seafile-ignore.txt` file.
The UI and the sync is incredibly fast, especially when compared to Nextcloud and is delightful to use. It has all the features you would expect: 2FA, user groups, quotas, link sharing (with support for expiration, password and upload only), files sharing between users, etc. It also features collaboration features like Nextcloud but I haven't tested them yet. Also it features an actual REST API with tokens that you can generate (as read-write or read only tokens). Another issue I had to battle with in the past with Nextcloud.
The mobile app on iOS does everything you would expect it to do, including integrating with the native Files API.
For Notability I use the backup feature that uploads backups as PDFs of my notes to a WebDav server. However Seafile disables the usage of WebDav for users with 2FA as it would be a vulnerability. As a solution I simply created a "Notability" user without 2FA and shared the folder i want to use as target with that user. Awesome! Now I have a scoped user that only has access to the notability backup folder without having access to the rest of my files.
## CardDav/CalDav
Since Seafile focuses only on the "Drive" component I had to migrate my contacts and calendars elsewhere. The way to go solution is Radicale and I was surprised to find that there is not official docker image? After a 2-min research I found the most popular docker image did not support Authentication? So I had to create my own.
You can find my [Radicale docker image here](https://github.com/cupcakearmy/docker-radicale), maybe you find it useful. It supports bcrypt passwords and can be deployed with just the env variables for `USER` and `PASSWORD`. It has been tested with the iOS and macOS native clients.
```bash
# .env
USER=foo
PASSWORD=secret
```
```yaml
# docker-compose.yml
version: '3.7'
services:
app:
image: cupcakearmy/radicale
env_file: .env
volumes:
- ./data:/data
ports:
- 80:5232
```
The "migration" was done by exporting the calendars I had in Nextcloud with the native macOS calendar app and simply reimporting them into the new server, again with the native client on macOS. Same procedure with the contacts. Sync works like a charm and I'll never go back.
### The downsides
As with any project Seafile has some drawbacks compared to Nextcloud. Beside the obvious fact that Nextcloud has tons of plug-ins and Seafile does not, Seafile does store data and therefore files in blobs, so they are not visible to the host machine. That means that you cannot directly access lets say a `hello.txt` directly from the filesystem of the server you are hosting the service on. This might be a deal breaker for some people.
Another thing that could bother some is that in the free version of Seafile there is no automatic garbage collection, so from time to time you should run the script to cleanup old data.
## Conclusion
To conclude the journey: It took me an evening to move everything, create the docker image for radicale and I could not be happier. Seafile feels so much more robust in comparison to the point that Nextcloud feels like a toy product. Of course this is not a fair comparison as Seafile only does file sync and not the other 10-20 big features Nextcloud brings to the table. However if you only use Nextcloud to sync files to your own cloud the comparison is not even close IMO.

View File

@ -1,178 +0,0 @@
---
title: 'Matomo vs uBlock Origin'
date: '2021-01-28'
categories:
- 'general'
tags:
- 'blocker'
- 'matomo'
- 'stats'
- 'tracking'
coverImage: './images/luke-chesser-JKUTrJ4vK00-unsplash-scaled.jpg'
---
After [Ackee](https://github.com/electerious/Ackee) got an update and stopped working I wanted to search for an alternative to get some stats on my statically rendered site. As no server is used, I need some 3rd party service.
I don't want to spy on people, nor set cookies and annoy people with consent banners if they only want to read a damn blog post. The goal is just get a feel for the traffic on the site.
This is important to mention as the next steps could sound a bit nefarious otherwise.
Data collected on this site is 100% anonymous and [GDPR](https://gdpr.eu/) compliant.
Since Matomo is the de facto way to go, I spun up the Matomo server with my trusted docker-traefik setup and was up and running in no time.
( I'll share the config files if anyone is interested at the bottom. )
Then I quickly copied the JS tracker code in my main html template and thought that was it. **Wrong**.
## The problem defaults.
So turns out that Matomo, being widely used is of course included in many Ad-Blocker lists and therefore my stats did not work. Lets see why:
Basically all ad blockers work with lists. Those lists include pattern that if matched will be filtered out. Let's take a look at the default Matomo tracking code:
```html
<script type="text/javascript">
var _paq = (window._paq = window._paq || [])
/* tracker methods like "setCustomDimension" should be called before "trackPageView" */
_paq.push(['trackPageView'])
_paq.push(['enableLinkTracking'])
;(function () {
var u = '//stats.nicco.io/'
_paq.push(['setTrackerUrl', u + 'matomo.php'])
_paq.push(['setSiteId', '1'])
var d = document,
g = d.createElement('script'),
s = d.getElementsByTagName('script')[0]
g.type = 'text/javascript'
g.async = true
g.src = u + 'matomo.js'
s.parentNode.insertBefore(g, s)
})()
</script>
```
We can see that my stats server is `stats.nicco.io`. And we also can see that the tracking script is loaded by `matomo.js`, which then sends the details to `matomo.php`. Well that is of course incredibly easy to block, and it is as you can see below:
<figure>
![](images/Screenshot-2021-01-28-at-12.12.59.png)
<figcaption>
Part of the EasyList Filter
</figcaption>
</figure>
<figure>
![](images/Screenshot-2021-01-28-at-12.14.03.png)
<figcaption>
Part of the EasyList Filter
</figcaption>
</figure>
That won't work, and since most of the people that visit this site are probably developers which probably have some kind of ad blocker installed.
## Solution time
So after a short Ecosia search I landed on the blog of [Christian Mochow](https://christianmochow.de/author/christian-mochow/) that wrote a [blog post](https://christianmochow.de/beitraege/tools/catch-me-if-you-can-adblocker-umgehen-mit-matomo/) on this issue. I got the solution from his article.
Luckily Apache has the famous Rewrite module, which will solve all our problems. I bet most of you already know where this is headed.
We can create a `.htaccess` file in the root of our Matomo installation folder, to cloak our requests.
```apache
# .htaccess
RewriteEngine On
RewriteRule ^unicorn matomo.js
RewriteRule ^rainbow matomo.php
```
Now if we request `https://stats.nicco.io/unicorn` we actually get the response for `https://stats.nicco.io/matomo.js` and the same for `rainbow` and `matomo.php`.
```js
// Replace in the client
_paq.push(['setTrackerUrl', u + 'matomo.php']) // Before
_paq.push(['setTrackerUrl', u + 'rainbow']) // After
g.src = u + 'matomo.js' // Before
g.src = u + 'unicorn' // After
```
**Awesome!**
I had to create a minuscule `Dockerfile` as the `Rewrite` module is not enabled per default in the standard Matomo docker image.
```docker
# Dockerfile
FROM matomo
RUN a2enmod rewrite
```
## Responsible Usage
Now as you can see it's incredibly easy to mask tracking stuff, and I bet there are a lot of people doing this in the wild. It is important to respect the privacy of your users and you should never store more data than you need and in the best case don't store data at all.
**Anonymize as much as possible!** Matomo makes this easy. You can effortlessly delete 2 bytes of each ip address (half of the info), enforce strict no cookie tracking and automatically delete data after `x` days. Please do ❤️
### Config Files
The `Dockerfile` and the `.htaccess` files are shown above.
```yaml
# docker-compose.yml
version: '3.7'
networks:
traefik:
external: true
services:
db:
image: mariadb
command: --max-allowed-packet=64MB
restart: unless-stopped
volumes:
- ./data/db:/var/lib/mysql
env_file: .env
app:
build: .
restart: unless-stopped
links:
- db
volumes:
- ./data/matomo:/var/www/html
- ./.htaccess:/var/www/html/.htaccess
env_file: .env
labels:
- traefik.enable=true
- traefik.docker.network=traefik
- traefik.port=80
- traefik.backend=matomo
- 'traefik.frontend.rule=Host:stats.nicco.io;'
networks:
- traefik
- default
```
```bash
# .env
MYSQL_DATABASE=matomo
MYSQL_USER=matomo
MYSQL_PASSWORD=<random bytes>
MYSQL_RANDOM_ROOT_PASSWORD=yes
MATOMO_DATABASE_HOST=db
MATOMO_DATABASE_ADAPTER=mysql
MATOMO_DATABASE_DBNAME=matomo
MATOMO_DATABASE_USERNAME=matomo
MATOMO_DATABASE_PASSWORD=<random bytes>
```
See the [code for this website](https://github.com/cupcakearmy/nicco.io/blob/220643770385bebb05094b440c28441b49184556/src/template.html#L37-L64).

View File

@ -1,158 +0,0 @@
---
title: 'Monitor your self hosted services for free'
date: '2022-07-07'
coverImage: './images/daniele-franchi-g2fJ7d7eKSM-unsplash.jpg'
tags:
- self-host
- monitoring
---
Monitoring services requires external resources, as monitoring your server(s) from the server itself does not make sense. Renting a whole server for monitoring is a bit of a resources (and money) waste.
## Getting a free VM
Luckily we can leverage the free tiers of many cloud providers. This gives us a free option and stability as they tend to be very reliable. Below is a list of free tiers available form the big 3 players. All of them offer a free VM per month, which is more than sufficient for our needs.
- [Google Cloud Project](https://cloud.google.com/free/docs/gcp-free-tier/#compute)
- [Microsoft Azure](https://azure.microsoft.com/en-in/pricing/free-services/)
- [Amazon AWS](https://aws.amazon.com/free/?all-free-tier.sort-by=item.additionalFields.SortRank&all-free-tier.sort-order=asc&awsf.Free%20Tier%20Types=*all&awsf.Free%20Tier%20Categories=categories%23compute)
Choose your preferred cloud, it does not really matter. I went with Google as I find the interface the nicest to use. For the OS of the VM I went which Ubuntu, but any Linux.
## Setup
For monitoring we will use [Uptime Kuma](https://github.com/louislam/uptime-kuma). It's an amazing free, open source monitoring tool, very similar to [UptimeRobot](https://uptimerobot.com/). For simplicity we will run it with Docker and Traefik.
First we need to [instal docker](https://docs.docker.com/engine/install/debian/#install-using-the-repository)
```bash
curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
echo \
"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian \
$(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
apt update
apt install docker-ce docker-ce-cli containerd.io docker-compose-plugin
```
Also we want some basic firewall
```bash
apt install ufw
ufw allow 80
ufw allow 443
ufw allow 22
ufw enable
```
Don't forget to point your DNS to the server. For example as a subdomain `status.example.org`
## Depoly Uptime Kuma
We only need a `docker-compose.yaml` file now and we should be up and running. I'll share the folder structure below. We could but everything in one compose file but I like to keep thinks tidy.
```
.
├── kuma
│ └── docker-compose.yaml
└── traefik
├── docker-compose.yaml
└── traefik.yaml
```
### Traefik
Lets start with Traefik. It will handle all our routing and TLS certificates. Remember to change the acme email down in the `traefik.yaml`
```yaml
version: '3.8'
networks:
default:
external: true
name: proxy
services:
traefik:
image: traefik:2.8
restart: unless-stopped
ports:
- '80:80'
- '443:443'
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- ./traefik.yaml:/etc/traefik/traefik.yaml:ro
- ./data:/data
labels:
- 'traefik.enable=true'
# HTTP to HTTPS redirection
- 'traefik.http.routers.http_catchall.rule=HostRegexp(`{any:.+}`)'
- 'traefik.http.routers.http_catchall.entrypoints=insecure'
- 'traefik.http.routers.http_catchall.middlewares=https_redirect'
- 'traefik.http.middlewares.https_redirect.redirectscheme.scheme=https'
- 'traefik.http.middlewares.https_redirect.redirectscheme.permanent=true'
```
```yaml
# Define HTTP and HTTPS entrypoints
entryPoints:
insecure:
address: ':80'
secure:
address: ':443'
#Dynamic configuration will come from docker labels
providers:
docker:
endpoint: 'unix:///var/run/docker.sock'
network: 'proxy'
exposedByDefault: false
#Enable acme with http file challenge
certificatesResolvers:
le:
acme:
email: me@example.org
storage: /data/acme.json
httpChallenge:
# used during the challenge
entryPoint: insecure
```
To get traefik running we just need to type the following
```bash
docker network create proxy
docker compose up -d
```
### Kuma
The compose file for Kuma is compact. Don't forget to change the domain to yours.
```yaml
version: '3.8'
networks:
default:
external: true
name: proxy
services:
kuma:
image: louislam/uptime-kuma:1
restart: unless-stopped
volumes:
- ./data:/app/data
labels:
- traefik.enable=true
- traefik.http.routers.kuma.rule=Host(`status.example.org`)
- traefik.http.routers.kuma.entrypoints=secure
- traefik.http.routers.kuma.tls.certresolver=le
```
Now you can navigate to your new monitoring website and create and admin account and setup monitors, alert systems and so on.
Many thanks to [Louis Lam](https://github.com/louislam) for creating and maintaining Utime Kuma! Consider donating!

View File

@ -1,120 +0,0 @@
---
title: 'React code splitting made simple. Easily reduce bundle.js'
date: '2019-07-21'
categories:
- 'coding'
tags:
- 'code-splitting'
- 'react'
coverImage: './images/jason-abdilla-jZWmw6007EY-unsplash-scaled.jpg'
---
On average right now **around 200-500kb of JS is sent** down the pipe to the client. Way to much for my personal taste. Since a lot of website use react, today we will look how to reduce that in a simple and easy way, applicable to almost any app.
<figure>
![](images/jason-abdilla-jZWmw6007EY-unsplash-scaled.jpg)
<figcaption>
Photo by [Jason Abdilla](https://unsplash.com/@jabdilla_creative?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/axe?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)
</figcaption>
</figure>
Let's start with why our `bundle.js` even grows that much. In my opinion there are 2 causes for this:
1. Devs mindlessly running `npm install`.
2. Loading **all** the JS, even if it is not needed in the current screen (e.g. the homepage).
## Carefully select packages
The first is not as easily solved if the app is already built. It requires your team to carefully choose your package and realise that often you don't need all the packages you are bundling. Often there is a lighter alternative.
As an example: a lot of websites use [momentjs](https://github.com/moment/moment/): It's an awesome library, **but it weights [231.7kb](https://bundlephobia.com/result?p=moment)!** There is an even cooler alternative: [DayJS](https://github.com/iamkun/dayjs). It requires only [6.5kb](https://bundlephobia.com/result?p=dayjs) and shares the same API as moment, so there is no rewriting code. There are a lot of similar examples that can be made. If you have a big module, just search for a smaller alternative.
#### TLDR;
1. Use the [webpack-bundle-analyser](https://github.com/webpack-contrib/webpack-bundle-analyzer) for already existent packages.
2. Search for lighter alternatives to big packages.
3. **Before installing** check on [bundlephobia](https://bundlephobia.com/) how big your desired package is.
## Code splitting & lazy loading
Now to the real deal. The classic problem is that big websites don't split the JS that is sent to the client. This means that the website might receive the JS for the shopping section, while you are only waiting for the homepage to load. This is unnecessary and **waiting for a page to load is always a frustrating experience**. We can do better 👍
How are we going to achieve this? **Code splitting & lazy loading.**
We are going to use 2 native react functions, so no external packages.
- [lazy](https://reactjs.org/docs/code-splitting.html#reactlazy)
- [Suspense](https://reactjs.org/docs/code-splitting.html#suspense)
Lazy & Suspense require React version **16.6** or newer. Also this does not work with server side rendering.
**Lazy** is used to lazy load the component (...duh 🙄). This means that the code for the component is only downloaded when a component actually needs to be shown.
**Suspense** is a handy wrapper for displaying a fallback while the component is loading.
Let's see below how this is achieved:
**From**
```
import MyList from './MyList'
const App = () => <div>
// ...
<MyList />
// ...
</div>
```
**To**
```
const MyList = lazy(() => import('./MyList'))
const App = () => <div>
// ...
<Suspense>
<MyList />
</Suspense>
// ...
</div>
```
This is in fact all you need to do. Now our component `MyList` will lazy load on necessity. Awesome!
#### Bonus: Little helper function
This can get repetitive though, so here is a little helper that basically wraps everything into one function:
```
export const Split = path => props => {
const Component = lazy(() => import(path))
return <Suspense fallback={<span>Loading...</span>}>
<Component {...props} />
</Suspense>
}
```
Now we can simply do the following:
```
import { Split } from './utils.jsx'
const MyListLazy = Split('./MyList')
const App = () => <div>
// ...
<MyListLazy />
// ...
</div>
```
There is a little codesanbox below with all the code if you wanna try for yourself (you should! 😉)
<iframe src="https://codesandbox.io/embed/code-splittings3?autoresize=1&amp;fontsize=14&amp;module=%2Fsrc%2FApp.jsx" title="Code Splitting" allow="geolocation; microphone; camera; midi; vr; accelerometer; gyroscope; payment; ambient-light-sensor; encrypted-media" style="width:100%; height:500px; border:0; border-radius: 4px; overflow:hidden;" sandbox="allow-modals allow-forms allow-popups allow-scripts allow-same-origin"></iframe>
This concludes todays look at how to reduce the bundle size and use code splitting in react. Note that this does not work with server side rendering.

Some files were not shown because too many files have changed in this diff Show More