1
0
mirror of https://gitlab.com/timvisee/send.git synced 2024-11-14 15:12:48 +01:00
send/server/routes/ws.js

106 lines
2.9 KiB
JavaScript
Raw Normal View History

2018-06-21 02:05:33 +02:00
const crypto = require('crypto');
const storage = require('../storage');
const config = require('../config');
const mozlog = require('../log');
const Limiter = require('../limiter');
2018-06-25 20:26:48 +02:00
const Parser = require('../streamparser');
2018-06-21 02:05:33 +02:00
const wsStream = require('websocket-stream/stream');
2018-08-31 23:20:15 +02:00
const fxa = require('./fxa');
2018-06-21 02:05:33 +02:00
const log = mozlog('send.upload');
2018-08-08 00:40:17 +02:00
module.exports = function(ws, req) {
2018-06-21 02:05:33 +02:00
let fileStream;
2018-06-21 22:57:53 +02:00
ws.on('close', e => {
2018-06-22 22:17:23 +02:00
if (e !== 1000 && fileStream !== undefined) {
fileStream.destroy();
2018-06-21 22:57:53 +02:00
}
});
2018-06-21 02:05:33 +02:00
2018-06-25 19:57:52 +02:00
ws.once('message', async function(message) {
2018-06-21 22:57:53 +02:00
try {
2018-06-25 19:57:52 +02:00
const newId = crypto.randomBytes(5).toString('hex');
const owner = crypto.randomBytes(10).toString('hex');
2018-06-21 02:05:33 +02:00
2018-06-25 19:57:52 +02:00
const fileInfo = JSON.parse(message);
2018-08-31 23:20:15 +02:00
const timeLimit = fileInfo.timeLimit || config.default_expire_seconds;
const dlimit = fileInfo.dlimit || 1;
2018-06-25 19:57:52 +02:00
const metadata = fileInfo.fileMetadata;
const auth = fileInfo.authorization;
2018-08-31 23:20:15 +02:00
const user = await fxa.verify(fileInfo.bearer);
2018-08-08 00:40:17 +02:00
const maxFileSize = user
? config.max_file_size
: config.anon_max_file_size;
const maxExpireSeconds = user
? config.max_expire_seconds
: config.anon_max_expire_seconds;
2018-08-31 23:20:15 +02:00
const maxDownloads = user
? config.max_downloads
: config.anon_max_downloads;
2018-06-21 02:05:33 +02:00
2018-08-08 20:07:09 +02:00
if (
!metadata ||
!auth ||
timeLimit <= 0 ||
2018-08-31 23:20:15 +02:00
timeLimit > maxExpireSeconds ||
dlimit > maxDownloads
2018-08-08 20:07:09 +02:00
) {
2018-06-21 02:05:33 +02:00
ws.send(
JSON.stringify({
2018-06-25 19:57:52 +02:00
error: 400
2018-06-21 02:05:33 +02:00
})
);
2018-06-25 19:57:52 +02:00
return ws.close();
2018-06-21 02:05:33 +02:00
}
2018-06-25 19:57:52 +02:00
const meta = {
owner,
metadata,
2018-08-31 23:20:15 +02:00
dlimit,
2018-06-25 19:57:52 +02:00
auth: auth.split(' ')[1],
nonce: crypto.randomBytes(16).toString('base64')
};
const protocol = config.env === 'production' ? 'https' : req.protocol;
const url = `${protocol}://${req.get('host')}/download/${newId}/`;
2018-08-31 23:20:15 +02:00
ws.send(
JSON.stringify({
url,
ownerToken: meta.owner,
id: newId
})
);
2018-08-08 00:40:17 +02:00
const limiter = new Limiter(maxFileSize);
2018-06-25 20:26:48 +02:00
const parser = new Parser();
fileStream = wsStream(ws, { binary: true })
.pipe(limiter)
.pipe(parser);
2018-08-08 20:07:09 +02:00
await storage.set(newId, fileStream, meta, timeLimit);
2018-06-25 19:57:52 +02:00
2018-06-25 23:01:08 +02:00
if (ws.readyState === 1) {
// if the socket is closed by a cancelled upload the stream
// ends without an error so we need to check the state
// before sending a reply.
2018-06-25 20:52:29 +02:00
2018-06-25 23:01:08 +02:00
// TODO: we should handle cancelled uploads differently
// in order to avoid having to check socket state and clean
// up storage, possibly with an exception that we can catch.
2018-08-31 23:20:15 +02:00
ws.send(JSON.stringify({ ok: true }));
2018-06-25 23:01:08 +02:00
}
2018-06-21 22:57:53 +02:00
} catch (e) {
log.error('upload', e);
2018-06-25 23:01:08 +02:00
if (ws.readyState === 1) {
ws.send(
JSON.stringify({
error: e === 'limit' ? 413 : 500
})
);
ws.close();
}
2018-06-21 22:57:53 +02:00
}
});
2018-06-21 02:05:33 +02:00
};