First working version.

This commit is contained in:
Shibo Lyu 2023-02-18 17:17:53 +00:00
commit 2b2d0e01e9
6 changed files with 2194 additions and 0 deletions

2
.gitignore vendored Normal file
View file

@ -0,0 +1,2 @@
node_modules
index.js

7
LICENSE.md Normal file
View file

@ -0,0 +1,7 @@
# Internet Systems Consortium license
Copyright (c) 2023, Shibo Lyu
Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

70
README.md Normal file
View file

@ -0,0 +1,70 @@
# GhoS3
An AWS S3 storage adapter tested on Ghost 5.x.
This is a modernized version based on [colinmeinke/ghost-storage-adapter-s3](https://github.com/colinmeinke/ghost-storage-adapter-s3). Major changes are:
- Adopted `async`/`await`
- Rewritten in TypeScript
- Use latest Version 3 of AWS SDK
It's designed to be a drop-in replacement of colinmeinke's package, so configuration and installation method remained largely the same.
However, this port pretty much targets only Ghost 5.x and up, as the build toolchain is set to target Node 16.x. With some modifications this should work for older version of Ghost (PRs welcomed).
On my blog [_The Base_](https://base.of.sb), I use [Cloudflare R2](https://www.cloudflare.com/zh-tw/products/r2/) with GhoS3.
## Installation
```bash
npm install ghos3
mkdir -p ./content/adapters/storage
cp -r ./node_modules/ghos3 ./content/adapters/storage/s3
```
## Configuration
Largely the same, but note `signatureVersion` and `serverSideEncryption` are removed since in AWS SDK v3 they're implemented differently than just a simple string field (PRs welcomed, of course).
```json
"storage": {
"active": "s3",
"s3": {
"accessKeyId": "YOUR_ACCESS_KEY_ID",
"secretAccessKey": "YOUR_SECRET_ACCESS_KEY",
"region": "YOUR_REGION_SLUG",
"bucket": "YOUR_BUCKET_NAME",
"assetHost": "YOUR_OPTIONAL_CDN_URL (See note 1 below)",
"pathPrefix": "YOUR_OPTIONAL_BUCKET_SUBDIRECTORY",
"endpoint": "YOUR_OPTIONAL_ENDPOINT_URL (only needed for 3rd party S3 providers)",
"forcePathStyle": true,
"acl": "YOUR_OPTIONAL_ACL (See note 3 below)",
}
}
```
### Notes
1. Be sure to include `//` or the appropriate protocol within your `assetHost` string/variable to ensure that your site's domain is not prepended to the CDN URL.
2. If your S3 provider requires path style, you can enable it with `forcePathStyle`.
3. If you use CloudFront the object ACL does not need to be set to `public-read`.
### Via environment variables
```
AWS_ACCESS_KEY_ID
AWS_SECRET_ACCESS_KEY
AWS_DEFAULT_REGION
GHOST_STORAGE_ADAPTER_S3_PATH_BUCKET
GHOST_STORAGE_ADAPTER_S3_ASSET_HOST // optional
GHOST_STORAGE_ADAPTER_S3_PATH_PREFIX // optional
GHOST_STORAGE_ADAPTER_S3_ENDPOINT // optional
GHOST_STORAGE_ADAPTER_S3_FORCE_PATH_STYLE // optional
GHOST_STORAGE_ADAPTER_S3_ACL // optional
```
For configuration on the AWS side, colinmeinke's original README has a detailed [tutorial](https://github.com/colinmeinke/ghost-storage-adapter-s3/tree/master#aws-configuration) to set your up.
## License
[ISC](./LICENSE.md)

1885
package-lock.json generated Normal file

File diff suppressed because it is too large Load diff

25
package.json Normal file
View file

@ -0,0 +1,25 @@
{
"name": "ghos3",
"version": "1.0.3",
"description": "S3 Storage adapter for Ghost.",
"main": "index.js",
"scripts": {
"build": "esbuild ./src/index.js --bundle --outfile=index.js --platform=node --format=cjs --target=node16 --packages=external --footer:js='module.exports = module.exports.default;'",
"prepublishOnly": "npm run build",
"test": "echo \"Error: no test specified\" && exit 1"
},
"author": "Shibo Lyu <github@of.sb>",
"license": "ISC",
"dependencies": {
"@aws-sdk/client-s3": "^3.272.0",
"ghost-storage-base": "^1.0.0"
},
"devDependencies": {
"@types/express": "^4.17.17",
"@types/ghost-storage-base": "^0.0.1",
"esbuild": "^0.17.8",
"prettier": "^2.8.4",
"tslib": "^2.5.0",
"typescript": "^4.9.5"
}
}

205
src/index.ts Normal file
View file

@ -0,0 +1,205 @@
import { S3, type S3ClientConfig } from '@aws-sdk/client-s3'
import StorageBase, { type ReadOptions, type Image } from 'ghost-storage-base'
import { join } from 'path'
import { readFile } from 'fs/promises'
import type { Readable } from 'stream'
import type { Handler } from 'express'
const stripLeadingSlash = (s: string) =>
s.indexOf('/') === 0 ? s.substring(1) : s
const stripEndingSlash = (s: string) =>
s.indexOf('/') === s.length - 1 ? s.substring(0, s.length - 1) : s
type Config = {
accessKeyId?: string
assetHost?: string
bucket?: string
pathPrefix?: string
region?: string
secretAccessKey?: string
endpoint?: string
forcePathStyle?: boolean
acl?: string
}
class S3Storage extends StorageBase {
accessKeyId?: string
secretAccessKey?: string
region?: string
bucket?: string
host: string
pathPrefix: string
endpoint: string
forcePathStyle: boolean
acl: string
constructor(config: Config = {}) {
super()
const {
accessKeyId,
assetHost,
bucket,
pathPrefix,
region,
secretAccessKey,
endpoint,
forcePathStyle,
acl,
} = config
// Compatible with the aws-sdk's default environment variables
this.accessKeyId = accessKeyId
this.secretAccessKey = secretAccessKey
this.region = process.env.AWS_DEFAULT_REGION || region
this.bucket = process.env.GHOST_STORAGE_ADAPTER_S3_PATH_BUCKET || bucket
if (!this.bucket) throw new Error('S3 bucket not specified')
// Optional configurations
this.host =
process.env.GHOST_STORAGE_ADAPTER_S3_ASSET_HOST ||
assetHost ||
`https://s3${
this.region === 'us-east-1' ? '' : `-${this.region}`
}.amazonaws.com/${this.bucket}`
this.pathPrefix = stripLeadingSlash(
process.env.GHOST_STORAGE_ADAPTER_S3_PATH_PREFIX || pathPrefix || ''
)
this.endpoint =
process.env.GHOST_STORAGE_ADAPTER_S3_ENDPOINT || endpoint || ''
this.forcePathStyle =
Boolean(process.env.GHOST_STORAGE_ADAPTER_S3_FORCE_PATH_STYLE) ||
Boolean(forcePathStyle) ||
false
this.acl = process.env.GHOST_STORAGE_ADAPTER_S3_ACL || acl || 'public-read'
}
async delete(fileName: string, targetDir?: string) {
const directory = targetDir || this.getTargetDir(this.pathPrefix)
try {
await this.s3().deleteObject({
Bucket: this.bucket,
Key: stripLeadingSlash(join(directory, fileName)),
})
} catch {
return false
}
return true
}
async exists(fileName: string, targetDir?: string) {
try {
await this.s3().getObject({
Bucket: this.bucket,
Key: stripLeadingSlash(
targetDir ? join(targetDir, fileName) : fileName
),
})
} catch {
return false
}
return true
}
s3() {
const options: S3ClientConfig = {
region: this.region,
forcePathStyle: this.forcePathStyle,
}
// Set credentials only if provided, falls back to AWS SDK's default provider chain
if (this.accessKeyId && this.secretAccessKey) {
options.credentials = {
accessKeyId: this.accessKeyId,
secretAccessKey: this.secretAccessKey,
}
}
if (this.endpoint !== '') {
options.endpoint = this.endpoint
}
return new S3(options)
}
async save(image: Image, targetDir?: string) {
const directory = targetDir || this.getTargetDir(this.pathPrefix)
const [fileName, file] = await Promise.all([
this.getUniqueFileName(image, directory),
readFile(image.path),
])
let config = {
ACL: this.acl,
Body: file,
Bucket: this.bucket,
CacheControl: `max-age=${30 * 24 * 60 * 60}`,
ContentType: image.type,
Key: stripLeadingSlash(fileName),
}
await this.s3().putObject(config)
return `${this.host}/${fileName}`
}
serve(): Handler {
return async (req, res, next) => {
try {
const output = await this.s3().getObject({
Bucket: this.bucket,
Key: stripLeadingSlash(stripEndingSlash(this.pathPrefix) + req.path),
})
const headers: { [key: string]: string } = {}
if (output.AcceptRanges) headers['accept-ranges'] = output.AcceptRanges
if (output.CacheControl) headers['cache-control'] = output.CacheControl
if (output.ContentDisposition)
headers['content-disposition'] = output.ContentDisposition
if (output.ContentEncoding)
headers['content-encoding'] = output.ContentEncoding
if (output.ContentLanguage)
headers['content-language'] = output.ContentLanguage
if (output.ContentLength)
headers['content-length'] = `${output.ContentLength}`
if (output.ContentRange) headers['content-range'] = output.ContentRange
if (output.ContentType) headers['content-type'] = output.ContentType
if (output.ETag) headers['etag'] = output.ETag
res.set(headers)
const stream = output.Body as Readable
stream.pipe(res)
} catch (err) {
res.status(404)
next(err)
}
}
}
async read(options: ReadOptions = { path: '' }) {
let path = (options.path || '').replace(/\/$|\\$/, '')
// check if path is stored in s3 handled by us
if (!path.startsWith(this.host)) {
throw new Error(`${path} is not stored in s3`)
}
path = path.substring(this.host.length)
const response = await this.s3().getObject({
Bucket: this.bucket,
Key: stripLeadingSlash(path),
})
const stream = response.Body as Readable
return await new Promise<Buffer>((resolve, reject) => {
const chunks: Buffer[] = []
stream.on('data', (chunk) => chunks.push(chunk))
stream.once('end', () => resolve(Buffer.concat(chunks)))
stream.once('error', reject)
})
}
}
export default S3Storage