|
|
/**
|
|
|
* Link to the project's GitHub page:
|
|
|
* https://github.com/pickhardt/coffeescript-codemirror-mode
|
|
|
*/
|
|
|
CodeMirror.defineMode('coffeescript', function(conf) {
|
|
|
var ERRORCLASS = 'error';
|
|
|
|
|
|
function wordRegexp(words) {
|
|
|
return new RegExp("^((" + words.join(")|(") + "))\\b");
|
|
|
}
|
|
|
|
|
|
var singleOperators = new RegExp("^[\\+\\-\\*/%&|\\^~<>!\?]");
|
|
|
var singleDelimiters = new RegExp('^[\\(\\)\\[\\]\\{\\},:`=;\\.]');
|
|
|
var doubleOperators = new RegExp("^((\->)|(\=>)|(\\+\\+)|(\\+\\=)|(\\-\\-)|(\\-\\=)|(\\*\\*)|(\\*\\=)|(\\/\\/)|(\\/\\=)|(==)|(!=)|(<=)|(>=)|(<>)|(<<)|(>>)|(//))");
|
|
|
var doubleDelimiters = new RegExp("^((\\.\\.)|(\\+=)|(\\-=)|(\\*=)|(%=)|(/=)|(&=)|(\\|=)|(\\^=))");
|
|
|
var tripleDelimiters = new RegExp("^((\\.\\.\\.)|(//=)|(>>=)|(<<=)|(\\*\\*=))");
|
|
|
var identifiers = new RegExp("^[_A-Za-z$][_A-Za-z$0-9]*");
|
|
|
var properties = new RegExp("^(@|this\.)[_A-Za-z$][_A-Za-z$0-9]*");
|
|
|
|
|
|
var wordOperators = wordRegexp(['and', 'or', 'not',
|
|
|
'is', 'isnt', 'in',
|
|
|
'instanceof', 'typeof']);
|
|
|
var indentKeywords = ['for', 'while', 'loop', 'if', 'unless', 'else',
|
|
|
'switch', 'try', 'catch', 'finally', 'class'];
|
|
|
var commonKeywords = ['break', 'by', 'continue', 'debugger', 'delete',
|
|
|
'do', 'in', 'of', 'new', 'return', 'then',
|
|
|
'this', 'throw', 'when', 'until'];
|
|
|
|
|
|
var keywords = wordRegexp(indentKeywords.concat(commonKeywords));
|
|
|
|
|
|
indentKeywords = wordRegexp(indentKeywords);
|
|
|
|
|
|
|
|
|
var stringPrefixes = new RegExp("^('{3}|\"{3}|['\"])");
|
|
|
var regexPrefixes = new RegExp("^(/{3}|/)");
|
|
|
var commonConstants = ['Infinity', 'NaN', 'undefined', 'null', 'true', 'false', 'on', 'off', 'yes', 'no'];
|
|
|
var constants = wordRegexp(commonConstants);
|
|
|
|
|
|
// Tokenizers
|
|
|
function tokenBase(stream, state) {
|
|
|
// Handle scope changes
|
|
|
if (stream.sol()) {
|
|
|
var scopeOffset = state.scopes[0].offset;
|
|
|
if (stream.eatSpace()) {
|
|
|
var lineOffset = stream.indentation();
|
|
|
if (lineOffset > scopeOffset) {
|
|
|
return 'indent';
|
|
|
} else if (lineOffset < scopeOffset) {
|
|
|
return 'dedent';
|
|
|
}
|
|
|
return null;
|
|
|
} else {
|
|
|
if (scopeOffset > 0) {
|
|
|
dedent(stream, state);
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
if (stream.eatSpace()) {
|
|
|
return null;
|
|
|
}
|
|
|
|
|
|
var ch = stream.peek();
|
|
|
|
|
|
// Handle docco title comment (single line)
|
|
|
if (stream.match("####")) {
|
|
|
stream.skipToEnd();
|
|
|
return 'comment';
|
|
|
}
|
|
|
|
|
|
// Handle multi line comments
|
|
|
if (stream.match("###")) {
|
|
|
state.tokenize = longComment;
|
|
|
return state.tokenize(stream, state);
|
|
|
}
|
|
|
|
|
|
// Single line comment
|
|
|
if (ch === '#') {
|
|
|
stream.skipToEnd();
|
|
|
return 'comment';
|
|
|
}
|
|
|
|
|
|
// Handle number literals
|
|
|
if (stream.match(/^-?[0-9\.]/, false)) {
|
|
|
var floatLiteral = false;
|
|
|
// Floats
|
|
|
if (stream.match(/^-?\d*\.\d+(e[\+\-]?\d+)?/i)) {
|
|
|
floatLiteral = true;
|
|
|
}
|
|
|
if (stream.match(/^-?\d+\.\d*/)) {
|
|
|
floatLiteral = true;
|
|
|
}
|
|
|
if (stream.match(/^-?\.\d+/)) {
|
|
|
floatLiteral = true;
|
|
|
}
|
|
|
|
|
|
if (floatLiteral) {
|
|
|
// prevent from getting extra . on 1..
|
|
|
if (stream.peek() == "."){
|
|
|
stream.backUp(1);
|
|
|
}
|
|
|
return 'number';
|
|
|
}
|
|
|
// Integers
|
|
|
var intLiteral = false;
|
|
|
// Hex
|
|
|
if (stream.match(/^-?0x[0-9a-f]+/i)) {
|
|
|
intLiteral = true;
|
|
|
}
|
|
|
// Decimal
|
|
|
if (stream.match(/^-?[1-9]\d*(e[\+\-]?\d+)?/)) {
|
|
|
intLiteral = true;
|
|
|
}
|
|
|
// Zero by itself with no other piece of number.
|
|
|
if (stream.match(/^-?0(?![\dx])/i)) {
|
|
|
intLiteral = true;
|
|
|
}
|
|
|
if (intLiteral) {
|
|
|
return 'number';
|
|
|
}
|
|
|
}
|
|
|
|
|
|
// Handle strings
|
|
|
if (stream.match(stringPrefixes)) {
|
|
|
state.tokenize = tokenFactory(stream.current(), 'string');
|
|
|
return state.tokenize(stream, state);
|
|
|
}
|
|
|
// Handle regex literals
|
|
|
if (stream.match(regexPrefixes)) {
|
|
|
if (stream.current() != '/' || stream.match(/^.*\//, false)) { // prevent highlight of division
|
|
|
state.tokenize = tokenFactory(stream.current(), 'string-2');
|
|
|
return state.tokenize(stream, state);
|
|
|
} else {
|
|
|
stream.backUp(1);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
// Handle operators and delimiters
|
|
|
if (stream.match(tripleDelimiters) || stream.match(doubleDelimiters)) {
|
|
|
return 'punctuation';
|
|
|
}
|
|
|
if (stream.match(doubleOperators)
|
|
|
|| stream.match(singleOperators)
|
|
|
|| stream.match(wordOperators)) {
|
|
|
return 'operator';
|
|
|
}
|
|
|
if (stream.match(singleDelimiters)) {
|
|
|
return 'punctuation';
|
|
|
}
|
|
|
|
|
|
if (stream.match(constants)) {
|
|
|
return 'atom';
|
|
|
}
|
|
|
|
|
|
if (stream.match(keywords)) {
|
|
|
return 'keyword';
|
|
|
}
|
|
|
|
|
|
if (stream.match(identifiers)) {
|
|
|
return 'variable';
|
|
|
}
|
|
|
|
|
|
if (stream.match(properties)) {
|
|
|
return 'property';
|
|
|
}
|
|
|
|
|
|
// Handle non-detected items
|
|
|
stream.next();
|
|
|
return ERRORCLASS;
|
|
|
}
|
|
|
|
|
|
function tokenFactory(delimiter, outclass) {
|
|
|
var singleline = delimiter.length == 1;
|
|
|
return function tokenString(stream, state) {
|
|
|
while (!stream.eol()) {
|
|
|
stream.eatWhile(/[^'"\/\\]/);
|
|
|
if (stream.eat('\\')) {
|
|
|
stream.next();
|
|
|
if (singleline && stream.eol()) {
|
|
|
return outclass;
|
|
|
}
|
|
|
} else if (stream.match(delimiter)) {
|
|
|
state.tokenize = tokenBase;
|
|
|
return outclass;
|
|
|
} else {
|
|
|
stream.eat(/['"\/]/);
|
|
|
}
|
|
|
}
|
|
|
if (singleline) {
|
|
|
if (conf.mode.singleLineStringErrors) {
|
|
|
outclass = ERRORCLASS
|
|
|
} else {
|
|
|
state.tokenize = tokenBase;
|
|
|
}
|
|
|
}
|
|
|
return outclass;
|
|
|
};
|
|
|
}
|
|
|
|
|
|
function longComment(stream, state) {
|
|
|
while (!stream.eol()) {
|
|
|
stream.eatWhile(/[^#]/);
|
|
|
if (stream.match("###")) {
|
|
|
state.tokenize = tokenBase;
|
|
|
break;
|
|
|
}
|
|
|
stream.eatWhile("#");
|
|
|
}
|
|
|
return "comment"
|
|
|
}
|
|
|
|
|
|
function indent(stream, state, type) {
|
|
|
type = type || 'coffee';
|
|
|
var indentUnit = 0;
|
|
|
if (type === 'coffee') {
|
|
|
for (var i = 0; i < state.scopes.length; i++) {
|
|
|
if (state.scopes[i].type === 'coffee') {
|
|
|
indentUnit = state.scopes[i].offset + conf.indentUnit;
|
|
|
break;
|
|
|
}
|
|
|
}
|
|
|
} else {
|
|
|
indentUnit = stream.column() + stream.current().length;
|
|
|
}
|
|
|
state.scopes.unshift({
|
|
|
offset: indentUnit,
|
|
|
type: type
|
|
|
});
|
|
|
}
|
|
|
|
|
|
function dedent(stream, state) {
|
|
|
if (state.scopes.length == 1) return;
|
|
|
if (state.scopes[0].type === 'coffee') {
|
|
|
var _indent = stream.indentation();
|
|
|
var _indent_index = -1;
|
|
|
for (var i = 0; i < state.scopes.length; ++i) {
|
|
|
if (_indent === state.scopes[i].offset) {
|
|
|
_indent_index = i;
|
|
|
break;
|
|
|
}
|
|
|
}
|
|
|
if (_indent_index === -1) {
|
|
|
return true;
|
|
|
}
|
|
|
while (state.scopes[0].offset !== _indent) {
|
|
|
state.scopes.shift();
|
|
|
}
|
|
|
return false
|
|
|
} else {
|
|
|
state.scopes.shift();
|
|
|
return false;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
function tokenLexer(stream, state) {
|
|
|
var style = state.tokenize(stream, state);
|
|
|
var current = stream.current();
|
|
|
|
|
|
// Handle '.' connected identifiers
|
|
|
if (current === '.') {
|
|
|
style = state.tokenize(stream, state);
|
|
|
current = stream.current();
|
|
|
if (style === 'variable') {
|
|
|
return 'variable';
|
|
|
} else {
|
|
|
return ERRORCLASS;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
// Handle scope changes.
|
|
|
if (current === 'return') {
|
|
|
state.dedent += 1;
|
|
|
}
|
|
|
if (((current === '->' || current === '=>') &&
|
|
|
!state.lambda &&
|
|
|
state.scopes[0].type == 'coffee' &&
|
|
|
stream.peek() === '')
|
|
|
|| style === 'indent') {
|
|
|
indent(stream, state);
|
|
|
}
|
|
|
var delimiter_index = '[({'.indexOf(current);
|
|
|
if (delimiter_index !== -1) {
|
|
|
indent(stream, state, '])}'.slice(delimiter_index, delimiter_index+1));
|
|
|
}
|
|
|
if (indentKeywords.exec(current)){
|
|
|
indent(stream, state);
|
|
|
}
|
|
|
if (current == 'then'){
|
|
|
dedent(stream, state);
|
|
|
}
|
|
|
|
|
|
|
|
|
if (style === 'dedent') {
|
|
|
if (dedent(stream, state)) {
|
|
|
return ERRORCLASS;
|
|
|
}
|
|
|
}
|
|
|
delimiter_index = '])}'.indexOf(current);
|
|
|
if (delimiter_index !== -1) {
|
|
|
if (dedent(stream, state)) {
|
|
|
return ERRORCLASS;
|
|
|
}
|
|
|
}
|
|
|
if (state.dedent > 0 && stream.eol() && state.scopes[0].type == 'coffee') {
|
|
|
if (state.scopes.length > 1) state.scopes.shift();
|
|
|
state.dedent -= 1;
|
|
|
}
|
|
|
|
|
|
return style;
|
|
|
}
|
|
|
|
|
|
var external = {
|
|
|
startState: function(basecolumn) {
|
|
|
return {
|
|
|
tokenize: tokenBase,
|
|
|
scopes: [{offset:basecolumn || 0, type:'coffee'}],
|
|
|
lastToken: null,
|
|
|
lambda: false,
|
|
|
dedent: 0
|
|
|
};
|
|
|
},
|
|
|
|
|
|
token: function(stream, state) {
|
|
|
var style = tokenLexer(stream, state);
|
|
|
|
|
|
state.lastToken = {style:style, content: stream.current()};
|
|
|
|
|
|
if (stream.eol() && stream.lambda) {
|
|
|
state.lambda = false;
|
|
|
}
|
|
|
|
|
|
return style;
|
|
|
},
|
|
|
|
|
|
indent: function(state, textAfter) {
|
|
|
if (state.tokenize != tokenBase) {
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
return state.scopes[0].offset;
|
|
|
}
|
|
|
|
|
|
};
|
|
|
return external;
|
|
|
});
|
|
|
|
|
|
CodeMirror.defineMIME('text/x-coffeescript', 'coffeescript');
|
|
|
|