Node's event emitter for all engines.

events = require("events")
emitter = events.EventEmitter

JavaScript's functional programming helper library --
See http://documentcloud.github.com/underscore for more info

_ = require "underscore"

Console colors for Node --
See https://github.com/Marak/colors.js for more info

colors = require "colors"

Filesystem API

fs = require "fs"

Recursive mkdir for Node (think mkdir -p) --
See ://github.com/substack/node-mkdirp for more info

mkdir = require( "mkdirp" ).mkdirp

Node's path helper library

path = require "path"

A Sinatra inspired web development framework for Node --
See http://expressjs.com for more info

express = require "express"
class Log

onEvent

Logs events in default console color

Args:

  • x {String}: message to log
	onEvent: (x) ->
		unless quiet
			console.log "   #{x}"

onStep

Logs steps in blue

Args:

  • x {String}: message to log
	onStep: (x) ->
		unless quiet
			console.log "#{x}".blue

onComplete

Logs successful process completions in green

Args:

  • x {String}: message to log
	onComplete: (x) ->
		console.log "#{x}".green

onError

Logs errors in red

Args:

  • x {String}: message to log
	onError: (x) ->
		console.log "!!! #{x} !!!".red

log = new Log()

exports.log = log
_ = require "underscore"
path = require "path"
Commander = require( "commander" ).Command

Configuration container

config = { }

Configuration defaults

siteConfig =
	"source": "src"
	"style": "style"
	"markup": "markup"
	"output": 
		{
			"source": [ "lib", "site/js" ],
			"style": [ "css", "site/css" ],
			"markup": "site/"
		}
	"spec": "spec"
	"ext": "ext"
	"lint": {}
	"uglify": {}
	"cssmin": {}
	"hosts": {
	  "/": "site"
	}

libConfig = 
	"source": "src"
	"output": "lib"
	"spec": "spec"
	"ext": "ext"
	"lint": {}
	"uglify": {}
	"hosts": {
	  "/": "spec"
	}

defaultMocha =
	growl: true
	ignoreLeaks: true
	reporter: "spec"
	ui: "bdd"
	colors: true

defaultDoc =
	generator: "docco"
	output: "docs"

continuous = test = inProcess = quiet = debug = false

ext =
	gzip: "gz"
	uglify: "min"
	cssmin: "min"

extensionLookup = 
	".css": "style"
	".scss": "style"
	".sass": "style"
	".less": "style"
	".stylus": "style"
	".js": "source"
	".coffee": "source"
	".markdown": "markup"
	".md": "markup"
	".html": "markup"

Configuration

Do all the things!
Calling anvil from the command line runs this.

class Configuration 

	constructor: ( @fp, @scheduler, @log ) ->

configure

this call will return a configuration object that will
inform the rest of the process
* onConfig {Function}: the callback to invoke with a configuration object

	configure: ( argList, onConfig ) ->
		self = this
		command = new Commander()
		command
			.version("0.7.7")
			.option( "-b, --build [build file]", "Use a custom build file", "./build.json" )
			.option( "--ci", "Run a continuous integration build" )
			.option( "--host", "Setup a static HTTP host" )
			.option( "--lib [project]", "Create a lib project at the folder [project]" )
			.option( "--libfile [file name]", "Create a new lib build file named [file name]" )
			.option( "--site [project]", "Create a site project at the folder [project]" )
			.option( "--sitefile [file name]", "Create a new site build file named [file name]" )
			.option( "--mocha", "Run specifications using Mocha" )

.option( "--docco", "Create annotated source using docco" )

			.option( "--ape", "Create annotated source using ape" )
			.option( "-q, --quiet", "Only print completion and error messages" )

		command.parse( argList );

		if command.libfile or command.sitefile

Generate all the directories and the config file

			name = command.libfile or= command.sitefile
			type = if command.sitefile then 'site' else 'lib'
			@writeConfig type, "#{name}.json", () ->
				self.log.onComplete "Created #{ type } build file - #{ name }"
				onConfig config, true
		else if command.site or command.lib

Generate all the directories and the config file

			type = if command.site then 'site' else 'lib'
			scaffold = command.site or= command.lib
			config = if type == 'site' then siteConfig else libConfig
			@log.onStep "Creating scaffolding for new #{ type } project"

Create all the directories

			self.ensurePaths( () ->
				self.writeConfig( type, scaffold + "/build.json", () ->
					self.log.onComplete "Scaffold ( #{ scaffold } ) created."
					onConfig config, true
				)
			, scaffold )
		else
			buildFile = command.build
			@log.onStep "Checking for #{ buildFile }"
			exists = @fp.pathExists buildFile
			@prepConfig exists, buildFile, () ->
				if command.host
					config.host = true

				if command.ci
					config.continuous = true

				if command.mocha
					config.mocha = defaultMocha

				if command.ape
					config.docs = defaultDoc
					config.docs.generator = "ape"

				if command.docco
					config.docs = defaultDoc

Run transforms and generate output

				self.ensurePaths () ->
					onConfig config		

createLibBuild

This creates a file containing the default lib build convention

	createLibBuild: () ->

build lib template?

		if buildLibTemplate
			output = if buildLibTemplate == true then "build.json" else buildLibTemplate
			writeConfig "lib", output
			global.process.exit(0)
			config

createSiteBuild

This creates a file containing the default site build convention

	createSiteBuild: () ->

build site template?

		if buildSiteTemplate
			output = if buildSiteTemplate == true then "build.json" else buildSiteTemplate
			writeConfig "site", output
			global.process.exit(0)
			config

ensurePaths

Make sure that all expected paths exist

Args:

  • onComplete {Function}: what to call when work is complete
  • prefix {String}: the prefix to prepend to all paths
	ensurePaths: ( onComplete, prefix ) ->
		self = this
		prefix = prefix or= ""
		config.working = config.working || "./tmp"
		fp = @fp
		paths = [
			config[ "source" ]
			config[ "style" ]
			config[ "markup" ]
			config[ "spec" ]
			config[ "ext" ]
			config[ "working" ]
		]

if documenting

		if config.docs
			paths.push config.docs.output
		
		outputList = []

if the output is an object

		if _.isObject config.output
			outputList = _.flatten config.output
		else

if output is a single path

			outputList = [ config.output ]
		paths = paths.concat outputList

if names

		name = config.name
		if name
			for output in outputList
				if _.isString name
					nestedPath = path.dirname name
					if nestedPath 
						paths.push path.join output, nestedPath
				else
					nestedPaths = _.map _.flatten( name ), ( x ) -> path.join output, path.dirname( x )
					paths = paths.concat nestedPaths

		worker = ( p, done ) -> 
			try 
				fp.ensurePath [ prefix, p ], () ->
					done()
			catch err
				done()

		@log.onStep "Ensuring project directory structure"
		@scheduler.parallel paths, worker, onComplete

prepConfig

Fallback to default config, if specified config doesn't exist

Args:

  • exists {Boolean}: does the specified config file exist?
  • file {String}: config file name
  • onComplete {Function}: what to do after config is prepped
	prepConfig: ( exists, file, onComplete ) ->
		self = this
		onDone = () -> self.normalizeConfig onComplete		
		unless exists
			@loadConvention( onDone )
		else
			@loadConfig( file, onDone )

loadConfig

Setup full configuration using specified config file
For example, anvil -b custom.json

Args:

  • file {String}: config file name
  • onComplete {Function}: what to do after config is loaded
	loadConfig: ( file, onComplete ) ->
		@log.onStep "Loading config..."
		fp = @fp
		fp.read file, ( content ) ->
			config = JSON.parse( content )
			if config.extensions
				ext.gzip = config.extensions.gzip || ext.gzip
				ext.uglify = config.extensions.uglify || ext.uglify

Carry on!

			onComplete()

loadConvention

Sets up default config if no config file is found

Args:

  • onComplete {Function}: what to do after config is setup
	loadConvention: ( onComplete ) ->
		isSite = @fp.pathExists "./site"
		conventionConfig = if isSite then siteConfig else libConfig
		@log.onStep "No build file found, using #{ if isSite then 'site' else 'lib' } conventions"
		config = conventionConfig
		onComplete()

normalizeConfig

Tries to normalize differences in configuration formats
between options and site vs. lib configurations

Args:

  • onComplete {Function}: what to call when work is complete
	normalizeConfig: ( onComplete ) ->
		self = this
		fp = @fp
		config.output = config.output || "lib"
		if _.isString config.output
			outputPath = config.output
			config.output =
				style: outputPath
				source: outputPath
				markup: outputPath

		calls = []

finalization?

		finalize = config.finalize
		if finalize 
			calls.push ( done ) -> 
				self.getFinalization finalize, ( result ) -> 
					config.finalize = result
					done()

wrapping?

		wrap = config.wrap
		if wrap
			calls.push ( done ) -> 
				self.getWrap wrap, ( result ) -> 
					config.wrap = result
					done()

		if config.mocha
			config.mocha = _.extend defaultMocha, config.mocha

		if config.docs
			config.docs = _.extend defaultDoc, config.docs

any calls?

		if calls.length > 0
			@scheduler.parallel calls, 
				( call, done ) -> 
					call( done )
				, () -> onComplete()
		else
			onComplete()

getFinalization

Build up a custom state machine to address how
finalization should happen for this project

Args:

  • original {Object}: the existing finalization block
  • onComplete {Function}: what to call when work is complete
	getFinalization: ( original, onComplete ) ->
		self = this
		finalization = {}
		result = {}
		aggregation = {}
		aggregate = @scheduler.aggregate
		

if there's no finalization

		if not original or _.isEqual original, {}
			onComplete finalization

if there's only one section

		else if original.header or 
				original["header-file"] or 
				original.footer or 
				original["footer-file"]

build out aggregation for resolving header and footer

			@getContentBlock original, "header", aggregation
			@getContentBlock original, "footer", aggregation

make sure we don't try to aggregate on empty

			if _.isEqual aggregation, {}
				onComplete finalization
			else
				aggregate aggregation, ( constructed ) ->
					finalization.source = constructed
					onComplete finalization

there are multiple sections

		else
			sources = {}
			blocks = { 
				"source": original[ "source" ], 
				"style": original[ "style" ], 
				"markup": original[ "markup" ] 
			}
			_.each( blocks, ( block, name ) -> 
				subAggregate = {}
				self.getContentBlock block, "header", subAggregate
				self.getContentBlock block, "footer", subAggregate
				sources[ name ] = ( done ) -> 
					aggregate subAggregate, done
			)
			aggregate sources, onComplete

getWrap

Build up a custom state machine to address how
wrapping should happen for this project

Args:

  • original {Object}: the existing wrap block
  • onComplete {Function}: what to call when work is complete
	getWrap: ( original, onComplete ) ->
		self = this
		wrap = {}
		result = {}
		aggregation = {}
		aggregate = @scheduler.aggregate

if there's no wrap

		if not original or _.isEqual original, {}
			onComplete wrap

if there's only one section

		else if original.prefix or 
				original["prefix-file"] or 
				original.suffix or 
				original["suffix-file"]

build out aggregation for resolving prefix and suffix

			@getContentBlock original, "prefix", aggregation
			@getContentBlock original, "suffix", aggregation

make sure we don't try to aggregate on empty

			if _.isEqual aggregation, {}
				onComplete wrap
			else
				aggregate aggregation, ( constructed ) ->
					wrap.source = constructed
					onComplete wrap

there are multiple sections

		else
			sources = {}
			blocks = { 
				"source": original[ "source" ], 
				"style": original[ "style" ], 
				"markup": original[ "markup" ] 
			}
			_.each( blocks, ( block, name ) -> 
				subAggregate = {}
				self.getContentBlock block, "prefix", subAggregate
				self.getContentBlock block, "suffix", subAggregate
				sources[ name ] = ( done ) -> aggregate subAggregate, done
			)
			aggregate sources, onComplete

getContentBlock

Normalizes a wrapper or finalizer segment

Args:

  • _property {string}: the property name to check for
  • source {Object}: the configuration block
  • onComplete {Function}: what to call when work is complete
	getContentBlock: ( source, property, aggregation ) ->
		aggregation[ property ] = ( done ) -> done ""
		fp = @fp
		if source
			propertyPath = source["#{ property }-file"]
			propertyValue = source[ property ]
			if propertyPath and @fp.pathExists propertyPath
				aggregation[ property ] = ( done ) -> 
					fp.read propertyPath, ( content ) ->
						done content
			else if propertyValue
				aggregation[ property ] = ( done ) -> done propertyValue

writeConfig

Creates new default config file

Args:

  • name {String}: the config file name
  • onComplete {Function}: what to call when work is complete
	writeConfig: ( type, name, onComplete ) ->
		config = if type == "lib" then libConfig else siteConfig
		log = @log
		json = JSON.stringify( config, null, "\t" )
		@fp.write name, json, () ->
			log.onComplete "#{name} created successfully!"
			onComplete()

exports.configuration = Configuration

_ = require "underscore"

Scheduler

Provides flow control abstractions
aggregate and parallel are essentially fork/join variations and
pipeline is an asynchronous way to pass an input through a series
of transforms.

class Scheduler

	constructor: () ->

parallel

This takes a list of items and a single asynchronous
function with the signature ( item, done ) and
calls the worker for each item only invoking onComplete
once all calls have completed.
* items {Array}: a list of items to process
* worker {Function}: the worker that processes all the items
* onComplete {Function}: the function to call once all workers have completed

	parallel: ( items, worker, onComplete ) ->

Fail fast if list is empty

		if not items or items.length == 0
			onComplete []
		count = items.length
		results = []

Pushes result (if truthy) onto the results list and, if there are no more
items, calls onComplete with results

		done = ( result ) ->
			count = count - 1

Is result truthy?

			if result

Append to results!

				results.push result

Is iteration complete?

			if count == 0

Call onComplete!

				onComplete( results )

Iteration occurs here

		worker( item, done ) for item in items

pipeline

This takes an item and mutates it by calling a series
of asynchronous workers with the signature ( item, done ) and
only invokes onComplete after the last function in the pipeline completes.
* item {Object}: the initial item to pass to the first call
* workers {Array}: the ordered list of functions that compose the pipeline
* onComplete {Function}: the function to call once the last function has completed

	pipeline: ( item, workers, onComplete ) ->

Fail fast if list is empty

		if item == undefined or not workers or workers.length == 0
			onComplete item || {}

take the next worker in the list
and pass item (in its current state) to it

		iterate = ( done ) ->
			worker = workers.shift()
			worker item, done
		done = ->
		done = ( product ) ->

store the mutated product of the worker

			item = product

Any workers remaining?

			if workers.length == 0

Call onComplete!

				onComplete( product )
			else
				iterate done

kick off the pipeline

		iterate done

aggregate

Takes a hash map of calls and returns a corresponding hash map of
the results once all calls have completed. It's a weird fork/join
with named results vs. a randomly ordered list of results
* calls {Object}: the hash map of named asynchronous functions to call
* onComplete {Function}: the resulting hash map of corresponding values

	aggregate: ( calls, onComplete ) ->
		results = {}

checks to see if all results have been collected

		isDone = () -> 
			_.chain( calls ).keys().all( ( x ) -> results[ x ] != undefined ).value()
		

build a callback for the specific named function

		getCallback = ( name ) ->
			( result ) ->
				results[ name ] = result

have all the other calls completed?

				if isDone()
					onComplete results

iterate through the call list and invoke each one

		_.each( calls, ( call, name ) ->
			callback = getCallback name
			call callback
		)

exports.scheduler = Scheduler

fs = require "fs"
path = require "path"
_ = require "underscore"

FSCrawler

Wrote a custom 'dive' replacement after
the API changed significantly. The needs of Anvil are
pretty unique - always crawl the whole directory structure
from the start point and don't start work until we know all the files.
This 'crawls' a directory and returns all the files in the
structure recursive.

class FSCrawler

	constructor: ( @scheduler ) ->
		_.bindAll( this )

crawl

Crawls the whole directory structure starting with directory
and returns the full file listing.
* directory {String/Array}: a string or path spec for the directory to start crawling at
* onComplete {Function}: the function to call with a complete list of all the files

	crawl: ( directory, onComplete ) ->
		self = this
		fileList = []
		forAll = @scheduler.parallel
		if directory and directory != ""

get the fully qualified path

			directory = path.resolve directory

read directory contents

			fs.readdir directory, ( err, contents ) ->

if we didn't get an error and we have contents

				if not err and contents.length > 0
					qualified = []

resolve and push qualified paths into the array

					for item in contents
						qualified.push path.resolve directory, item
					

find out if we have a directory or a file handle for
all the results from fs.readdir

					self.classifyHandles qualified, ( files, directories ) ->
						fileList = fileList.concat files

if we found any directories, continue crawling those

						if directories.length > 0
							forAll directories, self.crawl, ( files ) ->
								fileList = fileList.concat _.flatten files
								onComplete fileList

no more directories at this level, return the file list

						else
							onComplete fileList

there was a problem or no files, return the list, we're done here

				else
					onComplete fileList

no more to do, return the list

		else
			onComplete fileList

classifyHandles

Provides a fork/join wrapper around getting the fs stat objects for the list
of paths.
* list {Array}: the list of paths to check
* onComplete {Function}: the function to call with the lists of files and directories

	classifyHandles: ( list, onComplete ) ->
		if list and list.length > 0
			@scheduler.parallel list, @classifyHandle, ( classified ) ->
				files = []
				directories = []
				for item in classified
					if item.isDirectory 
						directories.push item.file 
					else if not item.error
						files.push item.file
				onComplete files, directories
		else
			onComplete [], []

classifyHandle

Get the fs stat and determine if the path is to a file or a directory
* file {String}: the path to check
* onComplete {Function}: the function to call with the result of the check

	classifyHandle: ( file, onComplete ) ->	
		fs.stat file, ( err, stat ) ->
			if err
				onComplete { file: file, err: err }
			else
				onComplete { file: file, isDirectory: stat.isDirectory() }
		

exports.crawler = FSCrawler
fs = require "fs"
_ = require "underscore"

FSProvider

An abstraction around file interaction.
This is necessary to test any of Anvil's file level
interactions.

class FSProvider
	
	constructor: ( @crawler, @log ) ->
		_.bindAll this

buildPath

Given an array or string pathspec, return a string pathspec

Args:

  • pathSpec {Array, String}: pathspec of either an array of strings or a single string
	buildPath: ( pathSpec ) ->
		if not pathSpec 
			""
		else
			fullPath = pathSpec
			if _.isArray( pathSpec )
				fullPath = path.join.apply {}, pathSpec
			fullPath

delete

Deletes a file, given the file name (file) and its parent (dir)

Args:

  • dir {String}: pathspec of parent dir
  • filePath {String}: file name or path spec array
  • onDeleted {Function}: callback called if the file delete is successful
	delete: ( filePath, onDeleted ) ->
		filePath = @buildPath filePath
		if @pathExists filePath
			fs.unlink filePath, ( err ) ->
				onDeleted()
			

ensurePath

Makes sure pathSpec path exists before calling onComplete by
calling mkdir pathSpec... if pathSpec does not initially exist

Args:

  • pathSpec {String}: path string or array
  • onComplete {Function}: called if path exists or is successfully created
	ensurePath: ( pathSpec, onComplete ) ->
		pathSpec = @buildPath pathSpec
		path.exists pathSpec, ( exists ) ->
			unless exists

No target yet. Let's make it!

				mkdir pathSpec, "0755", ( err ) ->

Couldn't make the path. Report and abort!

					if err
						log.onError "Could not create #{pathSpec}. #{err}"
					else
						onComplete()
			else
				onComplete()

getFiles

Get all files in a specific path specification
* filePath {String/Array}: a string or array specifying the path to get files for
* onFiles {Function}: the function to call with the list of full file paths

	getFiles: ( filePath, onFiles ) ->
		if not filePath 
			onFiles []
		else
			filePath = @buildPath filePath
			files = []
			@crawler.crawl filePath, onFiles

copy ##

Copy a file
* from {String/Array}: the path spec for the file to copy
* to {String/Array}: the path spec for the destination
* onComplete {Function}: the function to call when the copy has completed

	copy: ( from, to, onComplete ) ->
		from = this.buildPath from
		to = this.buildPath to
		readStream = undefined
		writeStream = fs.createWriteStream( to )
		( readStream = fs.createReadStream( from ) ).pipe( writeStream )
		readStream.on 'end', () ->
			if writeStream
				writeStream.destroySoon()
			onComplete()

pathExists

Sychronously (GASP) check for the existence of a file or directory
* pathSpec {String/Array}: the string or path spec of the file or directory to check for

	pathExists: ( pathSpec ) ->
		pathSpec = this.buildPath pathSpec
		path.existsSync pathSpec

read

Reads a file from filePath and calls onFile callback with contents (Asynchronously)

Args:

  • filePath {String}: pathspec of file to read and pass contents from
  • onContent {Function}: callback to pass file's contents to
	read: ( filePath, onContent ) ->
		filePath = @buildPath filePath
		fs.readFile filePath, "utf8", ( err, content ) ->
			if err
				log.onError "Could not read #{ filePath } : #{ err }"
				onContent "", err
			else
				onContent content

readSync

Reads a file from filePath ... synchronously ... SHAME! SHAAAAAAME! (ok, not really)
This function only exists for a specific use case in config, where there's literally
no advantage to reading files asynchronously but writing the code that way would
be a huge pain. Rationalization FTW

Args:

  • filePath {String}: pathspec of file to read and pass contents from
	readSync: ( filePath ) ->
		filePath = @buildPath filePath
		try
			fs.readFileSync filePath, "utf8"
		catch err
			log.onError "Could not read #{ filePath } : #{ err }"
			err

transformFile

Given input file filePath, perform transform upon it then write the transformed content
to outputPath and call onComplete. (All operations performed asynchronously.)

Args:

  • filePath {String}: pathspec of file to transform
  • transform {Function}: transform to perform on the file
  • outputPath {String}: pathspec of output file
  • onComplete {Function}: called when all operations are complete
	transform: ( filePath, transform, outputPath, onComplete ) ->
		self = this
		filePath = @buildPath filePath
		outputPath = @buildPath outputPath
		this.read(
			filePath,
			( content ) ->
				transform content, ( newContent, error ) ->
					if not error
						self.write outputPath, newContent, onComplete
					else
						onComplete error
		)

write

Writes content to file at filePath calling done after writing is complete (Asynchronously)

Args:

  • filePath {String}: pathspec of file to write
  • content {String}: content to write to the file
  • onComplete {Function}: called when all operations are complete
	write: ( filePath, content, onComplete ) ->
		filePath = @buildPath filePath
		fs.writeFile filePath, content, "utf8", ( err ) ->
			if err
				log.onError "Could not write #{ filePath } : #{ err }"
				onComplete err
			else
				onComplete()

exports.fsProvider = FSProvider

Unfancy JavaScript --
See http://coffeescript.org/ for more info

coffeeScript = require "coffee-script"

LESS Compiler --
See http://lesscss.org

less = require( "less" )

STYLUS Compiler --
See http://learnboost.github.com/stylus/

stylus = require( "stylus" )

HAML Compiler --
See http://haml-lang.com/

haml = require( "haml" )

Markdown Compiler --
See http://github.com/chjj/marked

marked = require( "marked" )
marked.setOptions { sanitize: false }

HAML Compiler --
See http://haml-lang.com/

coffeeKup = require( "coffeekup" )

underscore --
The most essential JS lib that ever was
See http://underscorejs.org/

_ = require "underscore"

Compiler

'Compiles' files based on the extension to produce
browser friendly resources: JS, CSS, HTML

class Compiler

	constructor: (@fp, @log) ->
		_.bindAll( this )

compile

Compiles a file with the correct compiler

Args:

  • file {Object}: file metadata for the file to compile
  • onComplete {Function}: function to invoke when done
	compile: ( file, onComplete ) ->
		self = this
		ext = file.ext()
		newExt = @extensionMap[ ext ]
		newFile = file.name.replace ext, newExt
		log = @log
		log.onEvent "Compiling #{ file.name } to #{ newFile }"
		compiler = @compilers[ ext ]
		if compiler
			@fp.transform( 
				[ file.workingPath, file.name ],
				compiler,
				[ file.workingPath, newFile ],
				( err ) ->
					unless err
						file.name = newFile
						onComplete file
					else
						log.onError "Error compiling #{ file.name }: \r\n #{ err }"
						onComplete err
			)
		else
			onComplete file

extensionMap

Provides a map of original to resulting extension

	extensionMap:
		".js": ".js"
		".css": ".css"
		".html": ".html"
		".coffee" : ".js"
		".kup": ".html"
		".less": ".css"
		".styl": ".css"
		".sass": ".css"
		".scss": ".css"
		".haml": ".html"
		".md": ".html"
		".markdown": ".html"

compilers

A simple hash map of file extension to a function that
invokes the corresponding compiler

	compilers:
		".coffee" : ( content, onContent ) ->
			try
				js = coffeeScript.compile content, { bare: true }
				onContent js
			catch error
				onContent "", error
		".less" : ( content, onContent ) ->
			try
				less.render( content, {}, (e, css) -> onContent(css) )
			catch error
				onContent "", error
		".sass" : ( content, onContent ) ->
			try
				onContent content
			catch error
				onContent "", error
		".scss" : ( content, onContent ) ->
			try
				onContent content
			catch error
				onContent "", error
		".styl" : ( content, onContent ) ->
			try
				stylus.render( content, {}, (e, css) -> onContent( css, e ) )
			catch error
				onContent "", error
		".haml" : ( content, onContent ) ->
			try
				html = haml.render content
				onContent html
			catch error
				onContent "", error
		".md" : ( content, onContent ) ->
			try
				onContent( marked.parse( content ) )
			catch error
				onContent "", error
		".markdown" : ( content, onContent ) ->
			try
				onContent( marked.parse( content ) )
			catch error
				onContent "", error
		".kup" : ( content, onContent ) ->
			try
				html =( coffeeKup.compile content, {} )()
				onContent html
			catch error
				onContent "", error

exports.compiler = Compiler

_ = require "underscore"
path = require "path"

Combiner

Combines imports with the files importing them

class Combiner

	constructor: ( @fp, @scheduler, @findPatterns, @replacePatterns ) ->

combineList

combine all the files in the list and call onComplete when finished

Args:

  • list {Array}: collection of file metadata
  • onComplete {Function}: callback to invoke on completion
	combineList: ( list, onComplete ) ->
		self = this
		forAll = @scheduler.parallel

for all files in the list
find all the imports for every file
then find all the files that depend on each file
then combine all the files in the list

		findImports = _.bind( ( file, done ) ->
				self.findImports file, list, done
			, this )

once the imports are known, we can determine how many
files import (or depend) a given file

		findDependents = _.bind( ( file, done ) ->
				self.findDependents file, list, done
			, this )

replace all of file's import statements with
the imported files' contents

		combineFile = _.bind( ( file, done ) ->
			self.combineFile file, done
			, this )

combine all the files

		forAll list, findImports, () ->
			for f1 in list
				findDependents f1, list
			forAll list, combineFile, onComplete

combineFile

combine a specifc file after ensuring it's dependencies have been combined

Args:

  • file {Object}: the file metadata describing the file to combine
  • onComplete {Function}: callback to invoke on completion
	combineFile: ( file, onComplete ) ->
		self = this
		forAll = @scheduler.parallel

if we've already combined this file, just call complete

		if file.combined
			onComplete()

otherwise, combine all the file's dependencies first, then combine the file

		else
			combineFile = ( file, done ) ->
				self.combineFile file, done

			dependencies = file.imports
			if dependencies and dependencies.length > 0
				forAll dependencies, combineFile, () ->
					self.combine file, () ->
						file.combined = true
						onComplete()
			else
				self.combine file, () ->
					file.combined = true
					onComplete()

fileImports

search the file using regex patterns and store all referenced files

Args:

  • file {Object}: the file metadata describing the file to combine
  • list {Array}: collection of file metadata
  • onComplete {Function}: callback to invoke on completion
	findImports: ( file, list, onComplete ) ->
		self = this
		imports = []
		@fp.read [ file.workingPath, file.name ], ( content ) ->

find the import statements in the file contents using @findPatterns

			for pattern in self.findPatterns
				imports = imports.concat content.match pattern
			imports = _.filter imports, ( x ) -> x

strip out all the raw file names from the import statements
find the matching file metadata for the import

			for imported in imports
				importName = ( imported.match ///['\"].*['\"]/// )[ 0 ].replace(///['\"]///g, "" )
				importedFile = _.find( list, ( i ) -> 
					relativeImportPath = path.relative( path.dirname( file.fullPath ), path.dirname( i.fullPath ) )
					relativeImport = self.fp.buildPath( [ relativeImportPath, i.name ] )
					relativeImport == importName )
				file.imports.push importedFile
			onComplete()

fileDependents

search the list to see if any files import file

Args:

  • file {Object}: the file metadata describing the file to combine
  • list {Array}: collection of file metadata
  • onComplete {Function}: callback to invoke on completion
	findDependents: ( file, list ) ->
		imported = ( importFile ) ->
			file.fullPath == importFile.fullPath
		for item in list
			if _.any item.imports, imported then file.dependents++

combine

combine all the file's imports into its contents

Args:

  • file {Object}: the file metadata describing the file to combine
  • onComplete {Function}: callback to invoke on completion
	combine: ( file, onComplete ) ->
		self = this
		unless file.combined
			pipe = @scheduler.pipeline
			fp = @fp
			if file.imports.length > 0

creates a closure around a specific import to prevent
access to a changing variable

				steps = for imported in file.imports
						self.getStep file, imported
				fp.read [ file.workingPath, file.name ], ( main ) ->
					pipe main, steps, ( result ) ->
						fp.write [ file.workingPath, file.name ], result, () -> onComplete()
			else
				onComplete()
		else
			onComplete()

getStep

This is insane but it works - creating a closure around
a specific import to prevent accessing a changing variable.
* file {Object} : the file we're importing into
* import {Object}: the imported file to create the closure around

	getStep: ( file, imported ) -> 
		self = this
		( text, onDone ) -> self.replace text, file, imported, onDone

replace

create a replacement regex that will take the imported content and replace the
matched patterns within the main file's content

Args:

  • content {Object}: the content of the main file
  • file {Object} : the file we're importing into
  • imported {Object}: file metadata for the imported
  • onComplete {Function}: callback to invoke on completion
	replace: ( content, file, imported, onComplete ) ->
		patterns = @replacePatterns
		pipe = @scheduler.pipeline
		source = imported.name
		working = imported.workingPath
		relativeImportPath = path.relative( path.dirname( file.fullPath ), path.dirname( imported.fullPath ) )
		relativeImport = @fp.buildPath( [ relativeImportPath, imported.name ] )
		@fp.read [ working, source ], ( newContent ) ->
			steps = for pattern in patterns

creates a function that will replace the import statement
with a specific file's contents

				( current, done ) ->
					stringified = pattern.toString().replace ///replace///, relativeImport
					stringified = stringified.substring( 1, stringified.length - 2 )
					fullPattern = new RegExp stringified, "g"					
					capture = fullPattern.exec( content )
					if capture and capture.length > 1

capture the indentation of the import

						whiteSpace = capture[1]

apply indentation to all lines of the new content

						newContent = "#{ whiteSpace }" + newContent.replace ///\n///g, "\n#{ whiteSpace }"
					sanitized = current.replace( fullPattern, newContent.replace( "\$", "$" ) ).replace( "$", "$" )
					done sanitized
			pipe content, steps, ( result ) ->
				onComplete result

exports.combiner = Combiner

Uglify: JavaScript parser and compressor/beautifier toolkit --
See https://github.com/mishoo/UglifyJS for more info

jsp = require( "uglify-js" ).parser
pro = require( "uglify-js" ).uglify

A Node-compatible port of Douglas Crockford's JSLint --

jslint = require( "readyjslint" ).JSLINT

CSS Minifier --
See https://github.com/jbleuzen/node-cssmin

cssminifier = require "cssmin"

StylePipeline

The set of post-processes that happen to completed style outputs.
These include minification, wrapping and
finalization depending on the build configuration.

class StylePipeline

	constructor: ( @config, @fp, @minifier, @scheduler, @log ) ->
		_.bindAll( this )

process

Take the list of files and minify, wrap and finalize them
according to configuration. In the event that files are minified,
this function will create a seperate set of files to separate
processing between developer friendly and deployment friendly files.
* files {Array}: the list of files to process
* onComplete {Array}: the function to call with the list of files

	process: ( files, onComplete ) ->
		self = this
		forAll = @scheduler.parallel
		forAll files, @wrap, () ->
			minified = []
			if self.config.cssmin
				minified = _.map( files, ( x ) -> _.clone x )
			forAll files, self.finalize, () -> 
				self.log.onStep "Finalizing CSS"
				forAll minified, self.minify, () -> 
					if minified.length > 0
						self.log.onStep "Minifying CSS"
					forAll minified, self.finalize, () -> 
						onComplete( files.concat minified )

minify

Uses the cssmin lib to minify the output styles
* file {String}: the file to minify
* onComplete {Function}: the function to call after minification has completed

	minify: ( file, onComplete ) ->
		if @config.cssmin
			@log.onEvent "Minifying #{ file.name }"
			self = this
			ext = file.ext()
			newFile = file.name.replace ext, ".min.css"
			self.fp.transform( 
				[ file.workingPath, file.name ],
				( content, onTransform ) ->
					onTransform( self.minifier.cssmin content )
				, [ file.workingPath, newFile ],
				( ) ->
					file.name = newFile
					onComplete()
			)
		else
			onComplete()

finalize

Finalize, for lack of a better term, puts header and footer content around the file's contents.
This step is different than wrapping because it happens AFTER minification and won't get
mangled as a result.
* file {String}: the file to finalize
* onComplete {Function}: the function to call after finalization has completed

	finalize: ( file, onComplete ) ->
		self = this
		if @config.finalize and @config.finalize.style
			@log.onEvent "Finalizing #{ file.name }"
			header = @config.finalize.style.header
			footer = @config.finalize.style.footer
			@fp.transform( 
				[ file.workingPath, file.name ], 
				( content, onTransform ) ->
					if header
						content = header + content
					if footer
						content = content + footer
					onTransform content
				, [ file.workingPath, file.name ],
				onComplete
			)
		else
			onComplete()

finalize

Wraps the contents of the file with a prefix and suffix before minification occurs.
* file {String}: the file to wrap
* onComplete {Function}: the function to call after wrapping has completed

	wrap: ( file, onComplete ) ->
		self = this
		if @config.wrap and @config.wrap.style
			@log.onEvent "Wrapping #{ file.name }"
			prefix = @config.wrap.style.prefix
			suffix = @config.wrap.style.suffix
			@fp.transform( 
				[ file.workingPath, file.name ], 
				( content, onTransform ) ->
					if prefix
						content = prefix + content
					if suffix
						content = content + suffix
					onTransform content
				, [ file.workingPath, file.name ],
				onComplete
			)
		else
			onComplete()

StylePipeline

The set of post-processes that happen to completed style outputs.
These include minification, wrapping and
finalization depending on the build configuration.

class SourcePipeline

	constructor: ( @config, @fp, @minifier, @scheduler, @log ) ->
		_.bindAll( this )

process

Take the list of files and minify, wrap and finalize them
according to configuration. In the event that files are minified,
this function will create a seperate set of files to separate
processing between developer friendly and deployment friendly files.
* files {Array}: the list of files to process
* onComplete {Array}: the function to call with the list of files

	process: ( files, onComplete ) ->
		self = this
		forAll = @scheduler.parallel
		forAll files, @wrap, () ->
			minify = []
			if self.config.uglify
				minify = _.map( files, ( x ) -> _.clone x )
			forAll files, self.finalize, () -> 
				self.log.onStep "Finalizing source files"
				forAll minify, self.minify, () -> 
					if minify.length > 0
						self.log.onStep "Minifying source files"
					forAll minify, self.finalize, () -> 
						onComplete( files.concat minify )

minify

Uses the uglify lib to minify the output source
* file {String}: the file to minify
* onComplete {Function}: the function to call after minification has completed

	minify: ( file, onComplete ) ->
		exclusions = @config.uglify?.exclude || []
		isExcluded = _.any exclusions, ( x ) -> x == file.name
		if @config.uglify and not isExcluded
			self = this
			ext = file.ext()
			newFile = file.name.replace ext, ".min.js"
			@log.onEvent "Minifying #{ newFile }"
			@fp.transform( 
				[ file.workingPath, file.name ],
				( content, onTransform ) ->
					self.minifier content, ( err, result ) ->
						if err
							self.log.onError "Error minifying #{ file.name } : \r\n\t #{ err }"
							result = content
						onTransform( result )
				, [ file.workingPath, newFile ],
				() ->
					file.name = newFile
					onComplete()
			)
		else
			onComplete()

finalize

Finalize, for lack of a better term, puts header and footer content around the file's contents.
This step is different than wrapping because it happens AFTER minification and won't get
mangled as a result.
* file {String}: the file to finalize
* onComplete {Function}: the function to call after finalization has completed

	finalize: ( file, onComplete ) ->
		self = this
		if @config.finalize and @config.finalize.source
			@log.onEvent "Finalizing #{ file.name }"
			header = @config.finalize.source.header
			footer = @config.finalize.source.footer
			@fp.transform( 
				[ file.workingPath, file.name ], 
				( content, onTransform ) ->
					if header
						content = header + content
					if footer
						content = content + footer
					onTransform content
				, [ file.workingPath, file.name ],
				() ->
					onComplete()
			)
		else
			onComplete()

finalize

Wraps the contents of the file with a prefix and suffix before minification occurs.
* file {String}: the file to wrap
* onComplete {Function}: the function to call after wrapping has completed

	wrap: ( file, onComplete ) ->
		self = this
		if @config.wrap and @config.wrap.source
			@log.onEvent "Wrapping #{ file.name }"
			prefix = @config.wrap.source.prefix
			suffix = @config.wrap.source.suffix  
			@fp.transform( 
				[ file.workingPath, file.name ], 
				( content, onTransform ) ->
					if prefix
						content = prefix + content
					if suffix
						content = content + suffix
					onTransform content
				, [ file.workingPath, file.name ],
				() ->
					onComplete()
			)
		else
			onComplete()

MarkupPipeline

Provides is a placeholder as there are currently
no post-process steps for markup.

class MarkupPipeline

	constructor: () ->

PostProcessor

A provider abstraction around post-process steps for each resource
type that allows Anvil to have a 'branchless' pipeline for all
resource types

class PostProcessor

	constructor: ( @config, @fp, @scheduler, @log ) ->

		uglify = ( source, callback ) ->
			try
				ast = jsp.parse source
				ast = pro.ast_mangle ast
				ast = pro.ast_squeeze ast
				callback undefined, pro.gen_code ast
			catch err
				callback err, ""

		@style = new StylePipeline @config, @fp, cssminifier, @scheduler, @log
		@source = new SourcePipeline @config, @fp, uglify, @scheduler, @log
		@markup = {
			process: ( files, onComplete ) -> onComplete files
		}


exports.postProcessor = PostProcessor

docco --
See http://jashkenas.github.com/docco/
docco = require "docco"

ape --
See

ape = require "ape"

Documents

A minor adaptation of @aaronmccall's docco and ape support
that he contributed to the prior version of Anvil.

class Documenter
	
	constructor: ( @config, @fp, @scheduler, @log ) ->
		self = this
		_.bindAll( this )
		if @config.docs

if @config.docs.generator == "docco"
@generator = @runDocco
else

			@generator = @runApe
		else
			@generator = () -> 
				callback = Array.prototype.slice.call arguments, 4
				if callback
					callback()

generate

Generate documents for the list of files
* files {Array}: the array of file objects to create documents for

	generate: ( files ) ->
		self = this
		if files && files.length > 0
			@log.onEvent "Creating annotated source for: #{ _.pluck( files, 'name' ).toString() }"
			@scheduler.parallel files, @document, () ->
				self.log.onComplete "Code annotation completed"

document

Generate docco/ape annotated source for the combined file
Thanks much to @aaronmccall for contributing this code to Anvil!
* file {String}: the file object to create the document for
* onComplete {Function}: the function to call once the documentation is done

	document: ( file, onComplete ) ->
		self = this
		language = ape.get_language file.name
		ext = file.ext()
		newFile = file.name.replace ext, ".html"

		@log.onEvent "Annotation for #{ file.name }"
		@fp.read [ file.workingPath, file.name ], ( content ) ->
			self.generator language, ext, newFile, content, ( doc ) ->
				self.fp.write [ self.config.docs.output, newFile ], doc, onComplete

runDoco

Wraps the document generation function in docco to a standard call format
runDocco: ( language, extension, newFile, code, onComplete ) ->
docco.generate_doc_from_string newFile, code, extension, ( result ) -> onComplete result

runApe

Wraps the document generation function in docco to a standard call format

	runApe: ( language, extension, newFile, code, onComplete ) ->
		ape.generate_doc code, language, 'html', null, ( err, result ) -> onComplete result
		

Anvil

This provides the primary logic and flow control for build activities

class Anvil

	constructor: ( @fp, @compiler, @combiner, @documenter, @scheduler, @postProcessor, @log, @callback ) ->
		@buildNumber = 0
		@inProcess = false
		
	extensions: [ ".js", ".coffee", ".html", ".haml", ".markdown", ".md", ".css", ".styl", ".less", ".css" ]

build

Kicks off the build for the currently configured Anvil instance

	build: ( config ) ->
		if not @inProcess
			@initialize( config )
			@log.onStep "Build #{ @buildNumber } initiated"
			@inProcess = true
			@buildSource()
			@buildStyle()

buildMarkup

Builds all markup sources and provides the regex patterns used to
identify dependencies using regular expressions.

	buildMarkup: () ->
		findPatterns = [ ///[\<][!][-]{2}.?import[(]?.?['\"].*['\"].?[)]?.?[-]{2}[\>]///g ]
		replacePatterns = [ ///([ \t]*)[\<][!][-]{2}.?import[(]?.?['\"]replace['\"].?[)]?.?[-]{2}[\>]///g ]
		@processType( "markup", findPatterns, replacePatterns )

buildSource

Builds all JS and Coffee sources and provides the regex patterns used to
identify dependencies using regular expressions.

	buildSource: () ->
		findPatterns = [ ///([/]{2}|[\#]{3}).?import.?[(]?.?[\"'].*[\"'].?[)]?[;]?.?([\#]{0,3})///g ]
		replacePatterns = [ ///([ \t]*)([/]{2}|[\#]{3}).?import.?[(]?.?[\"']replace[\"'].?[)]?[;]?.?[\#]{0,3}///g ]
		@processType( "source", findPatterns, replacePatterns )

buildSource

Builds all CSS, LESS and Stylus sources and provides the regex patterns used to
identify dependencies using regular expressions.

	buildStyle: () ->
		findPatterns = [ ///([/]{2}|[/][*]).?import[(]?.?[\"'].*[\"'].?[)]?([*][/])?///g ]
		replacePatterns = [ ///([ \t]*)([/]{2}|[/][*]).?import[(]?.?[\"']replace[\"'].?[)]?([*][/])?///g ]
		@processType( "style", findPatterns, replacePatterns )

initialize

Initializes state for the build

	initialize: ( config ) ->
		@config = config
		@filesBuilt = {}

mini FSM - basically we don't want to start building markup until
everything else is done since markup can import other built resources

		@steps = 
			source: false
			style: false
			markup: false
			hasSource: config.source
			hasStyle: config.style
			hasMarkup: config.markup
			markupReady: () -> ( this.source or not this.hasSource ) and ( this.style or not this.hasStyle )
			allDone: () -> 
				status = ( this.source or not this.hasSource ) and ( this.style or not this.hasStyle ) and ( this.markup or not this.hasMarkup )
				status

processType

The steps that get followed for each resource type are the same.
This function provides the core behavior of identifying, combining,
compiling and post-processing for all the types.
* type {String}: ('source', 'style', 'markup') the type of resources to process
* findPatterns {Regex}: the list of regular expressions used to identify imports in this resource type
* replacePatterns {Regex}: the list of replacement regular expressions used to replace imports with file contents

	processType: ( type, findPatterns, replacePatterns ) ->
		self = this
		forAll = @scheduler.parallel
		compiler = @compiler
		combiner = new @combiner( @fp, @scheduler, findPatterns, replacePatterns )
		postProcessor = @postProcessor

		@log.onStep "Starting #{ type } pipe-line"
		self.prepFiles type, ( list ) ->
			if list and list.length > 0

				self.copyFiles list, () ->

combines imported files

					self.log.onStep "Combining #{ type } files"
					combiner.combineList list, () ->

filter out all files that were combined into another file

						final = _.filter( list, ( x ) -> x.dependents == 0 )

if documentation should be generated, do that now

						if self.config.docs
							self.documenter.generate final

compiles the combined results

						self.log.onStep "Compiling #{ type } files"
						forAll final, compiler.compile, ( compiled ) ->

kick off post processors for compiled files

							self.log.onStep "Post-process #{ type } files"
							postProcessor[ type ].process compiled, ( list ) ->

copy complete files to the destination folders

								self.log.onStep "Moving #{ type } files to destinations"
								self.finalOutput list, () ->
									self.stepComplete type
			else
				self.stepComplete type

finalOutput

Copies the final list of files to their output folders
* files {Array}: the list of files to copy
* onComplete {Function}: the function to call once all files have been copied

	finalOutput: ( files, onComplete ) ->
		fp = @fp
		names = @config.name
		forAll = @scheduler.parallel
		copy = ( file, done ) ->
			forAll( file.outputPaths, ( destination, moved ) ->
				outputName = file.name
				if names
					if _.isString names 
						outputName = names
					else 
						custom = names[ file.name ]
						outputName = custom or= outputName
				fp.copy [ file.workingPath, file.name ], [ destination, outputName ], moved
			, done )
		forAll files, copy, onComplete

copyFiles

Copies the source files to the working path before beginning any processing
* files {Array}: the list of files to copy
* onComplete {Function}: the function to call once all files have been copied

	copyFiles: ( files, onComplete ) ->
		fp = @fp
		copy = ( file, done ) -> 
			fp.ensurePath file.workingPath, () -> 
				fp.copy file.fullPath, [ file.workingPath, file.name ], done
		@scheduler.parallel files, copy, onComplete

cleanWorking

Clears all files from the working directory
* onComplete {Function}: the function to call after directory is cleaned

	cleanWorking: ( onComplete ) ->
		fp = @fp
		forAll = @scheduler.parallel
		fp.getFiles @config.working, ( files ) ->
			forAll files, fp.delete, () ->
				onComplete()

prepFiles

Determine the list of files that belong to this particular resource type
and create metadata objects that describe the file and provide necessary
metadata to the rest of the processes.
* type {String}: ('source', 'style', 'markup')
* onComplete {Function}: the function to invoke with a completed list of file metadata

	prepFiles: ( type, onComplete ) ->
		self = this
		workingBase = @config.working
		typePath = @config[ type ]
		output = @config.output[ type ]
		output = if _.isArray( output ) then output else [ output ]
		log = @log
		@fp.getFiles typePath, ( files ) ->
			log.onEvent "Found #{ files.length } #{ type } files ..."
			list = for file in files
						name = path.basename file
						relative = path.dirname( file.replace( typePath, "") )
						working = self.fp.buildPath( workingBase, relative )
						{
							dependents: 0
							ext: () -> path.extname this.name
							fullPath: file
							imports: []
							name: name
							originalName: name
							outputPaths: output
							relativePath: relative
							workingPath: working
						}
			filtered = _.filter list, ( x ) -> _.any self.extensions, ( y ) -> y == x.ext()
			onComplete filtered

stepComplete

Called at the end of each type's pipe-line in order to control
when markup gets built. Markup must get built last since it can include
built targets from both style and source in it's files.
* step {String}: ('source','style','markup')

	stepComplete: ( step ) ->
		@steps[ step ] = true
		if step != "markup" and @steps.markupReady()
			@buildMarkup()
		if step == "markup" and @steps.allDone()
			@inProcess = false
			@cleanWorking @callback
				

Continuous

Provides a way to trigger the build on file change

class Continuous

	constructor: ( @fp, @config, @onChange ) ->
		@style = @normalize @config.style
		@source = @normalize @config.source
		@markup = @normalize @config.markup
		@spec = @normalize @config.spec
		@watchers = []
		@watching = false
		_.bindAll( this )
		this

normalize

Takes an input and, if it is an array, returns the plain array
if the input is not an array, it turns it into a single element array
* x {Object}: anything

	normalize: ( x ) -> if _.isArray x then x else [ x ]

setup

Determines which directories should cause a build to trigger
if any contents change

	setup: () ->
		if not @watching
			@watching = true
			if @style then @watchPath p for p in @style
			if @source then @watchPath p for p in @source
			if @markup then @watchPath p for p in @markup
			if @spec then @watchPath p for p in @spec

watchpath

Calls watchFiles for all files in the path
* path {String/Array}: the path specification to watch for changes in

	watchPath: ( path ) ->
		@fp.getFiles path, @watchFiles

watchFiles

Creates a file watcher instance for all files in the list
* files {Array}: the list of files to watch for changes in

	watchFiles: ( files ) ->
		for file in files
			@watchers.push fs.watch file, @onEvent

onEvent

This handler triggers the build and closes all watchers in the event
of a change. This is necessary to prevent event storms that can trigger
during the build process.
* event {Object}: the event that fired on the file system
* file {String}: the file that triggered the change

	onEvent: ( event, file ) ->
		if @watching
			@watching = false
			while @watchers.length > 0
				@watchers.pop().close()
			@onChange()

Mocha = require "mocha"
_ = require "underscore"
reporters = Mocha.reporters
interfaces = Mocha.interfaces
Context = Mocha.Context
Runner = Mocha.Runner
Suite = Mocha.Suite
path = require "path"
This class is an adaptation of the code found in _mocha
from TJ Holowaychuk's Mocha repository:
https://github.com/visionmedia/mocha/blob/master/bin/_mocha
class MochaRunner

	constructor: ( @fp, @scheduler, @config, @onComplete ) ->
		_.bindAll( this )
		
	run: () ->
		self = this
		if @config.spec
			forAll = @scheduler.parallel

			opts = @config.mocha or=
				growl: true
				ignoreLeaks: true
				reporter: "spec"
				ui: "bdd"
				colors: true

			reporterName = opts.reporter.toLowerCase().replace( ///([a-z])///, ( x ) -> x.toUpperCase() )
			uiName = opts.ui.toLowerCase()
			mocha = new Mocha( {
				ui: uiName
				ignoreLeaks: true
				colors: opts.colors
				growl: opts.growl
				slow: opts.slow
				timeout: opts.timeout	
			} )
			mocha.reporter(reporterName)

			specs = if _.isString @config.spec then [ @config.spec ] else @config.spec

			forAll specs, @fp.getFiles, ( lists ) ->
				files = _.flatten lists
				for file in files
					delete require.cache[ file ]
					mocha.addFile file

				mocha.run () ->
					self.onComplete()

SocketServer

Class to manage client notifications via socket.io

class SocketServer
	
	constructor: ( app ) ->
		_.bindAll( this )
		@clients = []
		@io = require( "socket.io" ).listen(app)
		@io.set "log level", 1

When a "connection" event occurs, call @addClient

		@io.sockets.on "connection", @addClient

addClient

Adds a new client to be notified upon change to watched files

Args:

  • socket {Object}: Socket object that is generated by a socket.io connection event.
	addClient: ( socket ) ->
		@clients.push socket
		socket.on "end", @removeClient
		socket.on "disconnect", @removeClient
		log.onEvent "client connected"

removeClient

Removes the socket from the current list of connected sockets
* socket {Object}: the socket that has disconnected

	removeClient: ( socket ) ->
		index = @clients.indexOf socket
		@clients.splice index, 1
		log.onEvent "client disconnected"

refreshClient

Sends a 'refresh' message to all connected clients

	refreshClients: ->
		log.onEvent "Refreshing hooked clients"
		@notifyClients "refresh"

notifyClients

Send a message to all connected clients
* msg {String}: the message to send to connected clients

	notifyClients: ( msg ) ->
		for client in @clients
			client.emit msg, {}
express = require 'express'

Host

This class provides a simple static HTTP server
that can support all supported files types for Anvil
builds

class Host

	constructor: ( @fp, @scheduler, @compiler, @config ) ->
		self = this
		_.bindAll( this )

		@app = express.createServer()
		app = @app
		app.use express.bodyParser()
		app.use app.router

		hosts = @config.hosts

if the user told us what to do, make no assumptions
only host exactly what they specify

		if hosts
			_.each( hosts, ( value, key ) ->
				app.use key, express.static( path.resolve value )
			)

otherwise, let's have some fun...

		else 
			output = @config.output
			target = ""
			if @config.markup # this is a site
				if _.isString output 
					target = output
				else if _.isArray output
					target = output[ 0 ]
				else
					target = output.markup
			else # this is a lib
				if _.isString output 
					target = output
				else if _.isArray output
					target = output[ 0 ]
				else
					target = output.source
			app.use "/", express.static( path.resolve target )

		if @config.ext
			app.use "/ext", express.static( path.resolve @config.ext )
		if @config.spec
			app.use "/spec", express.static( path.resolve @config.spec )

host anvil prerequisites for supporting certain browser features out of
the box

		anvilPath = path.resolve( path.dirname( fs.realpathSync( __filename ) ), "../ext" )
		console.log "Hosting anvil prerequisites from #{ anvilPath }"
		app.use "/anvil", express.static( anvilPath )

if a static file type is requested that fits an extension we know how to
compile, use the compiler to translate it on-the-fly

		app.get ///.*[.](coffee|kup|less|styl|md|markdown|haml)///, ( req, res ) ->
			fileName = ".#{ req.url }"

			ext = path.extname fileName
			mimeType = self.contentTypes[ ext ]
			res.header 'Content-Type', mimeType
			self.fp.read fileName, ( content ) ->
				self.compiler.compilers[ ext ] content, ( compiled ) ->
					res.send compiled

		port = if @config.port then @config.port else 3080
		app.listen port

	contentTypes:
		".coffee": "application/javascript"
		".less": "text/css"
		".styl": "text/css"
		".md": "text/html"
		".markdown": "text/html"
		".haml": "text/html"
		".kup": "text/html"

Cli

Provides the command line interface for interacting with Anvil and related modules

class Cli

	constructor: () ->
		@anvil = {}
		
		@ci = undefined
		@documenter = undefined
		@mochaRunner = undefined
		@socketServer = {}
		@postProcessor = {}
		@log = log
		@scheduler = new Scheduler()
		@crawler = new FSCrawler @scheduler
		@fp = new FSProvider @crawler, @log
		@configuration = new Configuration @fp, @scheduler, @log
		@compiler = new Compiler @fp, @log

		_.bindAll this

	initCI: ( config ) ->
		@ci = new Continuous @fp, config, @onFileChange

	initHost: ( config ) ->
		@server = new Host @fp, @scheduler, @compiler, config
		@socketServer = new SocketServer @server.app
		@log.onStep "Static HTTP server listening on port #{ config.port }"

	initMocha: ( config ) ->
		@mochaRunner = new MochaRunner @fp, @scheduler, config, @onTestsComplete

	notifyHttpClients: () ->
		if @socketServer.refreshClients
			@log.onStep "Notifying clients of build completion"
			@socketServer.refreshClients()

	onBuildComplete: () ->
		self = this
		@log.onComplete "Build #{ @anvil.buildNumber++ } completed"
		if self.mochaRunner

wrap the mocha runner invocation in a timeout call
to prevent odd timing issues.

			self.log.onStep "Running specifications with Mocha"
			self.mochaRunner.run()
		else 
			self.startCI()
			self.notifyHttpClients()

	onConfig: ( config, stop ) ->
		@config = config

if stop comes back, then this is not a build and we're done

		if stop then process.exit 0
		

if the user wants CI, setup the continuous module

		if config.continuous then @initCI config
			

if the user wants mocha to run after the build, setup the mocha runner

		if config.mocha then @initMocha config

if the user wants hosting then, spin up the Static HTTP host and socket server

		if config.host then @initHost config

create the post processor instance

		@postProcessor = new PostProcessor config, @fp, @scheduler, @log
		@documenter = new Documenter config, @fp, @scheduler, @log
		@anvil = new Anvil @fp, @compiler, Combiner, @documenter, @scheduler, @postProcessor, @log, @onBuildComplete

		@anvil.build( config )

if we're using CI, kick it off the first time

		@startCI()

	onFileChange: () ->
		@log.onEvent "File change detected, starting build"
		@fileChange = ->
		@anvil.build( @config )

	onTestsComplete: () ->
		@log.onComplete "Tests completed"
		@startCI()
		@notifyHttpClients()
	
	run: () ->
		@configuration.configure process.argv, @onConfig

	startCI: () ->
		if @ci
			@log.onStep "Starting file watchers"
			@ci.setup()
			

exports.run = ->
	cli = new Cli()
	cli.run()