##// END OF EJS Templates
merge with stable
Augie Fackler -
r30417:854190be merge default
parent child Browse files
Show More
@@ -1,1314 +1,1326 b''
1 #
1 #
2 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
2 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import cgi
10 import cgi
11 import copy
11 import copy
12 import mimetypes
12 import mimetypes
13 import os
13 import os
14 import re
14 import re
15
15
16 from ..i18n import _
16 from ..i18n import _
17 from ..node import hex, short
17 from ..node import hex, short
18
18
19 from .common import (
19 from .common import (
20 ErrorResponse,
20 ErrorResponse,
21 HTTP_FORBIDDEN,
21 HTTP_FORBIDDEN,
22 HTTP_NOT_FOUND,
22 HTTP_NOT_FOUND,
23 HTTP_OK,
23 HTTP_OK,
24 get_contact,
24 get_contact,
25 paritygen,
25 paritygen,
26 staticfile,
26 staticfile,
27 )
27 )
28
28
29 from .. import (
29 from .. import (
30 archival,
30 archival,
31 encoding,
31 encoding,
32 error,
32 error,
33 graphmod,
33 graphmod,
34 revset,
34 revset,
35 scmutil,
35 scmutil,
36 templatefilters,
36 templatefilters,
37 templater,
37 templater,
38 util,
38 util,
39 )
39 )
40
40
41 from . import (
41 from . import (
42 webutil,
42 webutil,
43 )
43 )
44
44
45 __all__ = []
45 __all__ = []
46 commands = {}
46 commands = {}
47
47
48 class webcommand(object):
48 class webcommand(object):
49 """Decorator used to register a web command handler.
49 """Decorator used to register a web command handler.
50
50
51 The decorator takes as its positional arguments the name/path the
51 The decorator takes as its positional arguments the name/path the
52 command should be accessible under.
52 command should be accessible under.
53
53
54 Usage:
54 Usage:
55
55
56 @webcommand('mycommand')
56 @webcommand('mycommand')
57 def mycommand(web, req, tmpl):
57 def mycommand(web, req, tmpl):
58 pass
58 pass
59 """
59 """
60
60
61 def __init__(self, name):
61 def __init__(self, name):
62 self.name = name
62 self.name = name
63
63
64 def __call__(self, func):
64 def __call__(self, func):
65 __all__.append(self.name)
65 __all__.append(self.name)
66 commands[self.name] = func
66 commands[self.name] = func
67 return func
67 return func
68
68
69 @webcommand('log')
69 @webcommand('log')
70 def log(web, req, tmpl):
70 def log(web, req, tmpl):
71 """
71 """
72 /log[/{revision}[/{path}]]
72 /log[/{revision}[/{path}]]
73 --------------------------
73 --------------------------
74
74
75 Show repository or file history.
75 Show repository or file history.
76
76
77 For URLs of the form ``/log/{revision}``, a list of changesets starting at
77 For URLs of the form ``/log/{revision}``, a list of changesets starting at
78 the specified changeset identifier is shown. If ``{revision}`` is not
78 the specified changeset identifier is shown. If ``{revision}`` is not
79 defined, the default is ``tip``. This form is equivalent to the
79 defined, the default is ``tip``. This form is equivalent to the
80 ``changelog`` handler.
80 ``changelog`` handler.
81
81
82 For URLs of the form ``/log/{revision}/{file}``, the history for a specific
82 For URLs of the form ``/log/{revision}/{file}``, the history for a specific
83 file will be shown. This form is equivalent to the ``filelog`` handler.
83 file will be shown. This form is equivalent to the ``filelog`` handler.
84 """
84 """
85
85
86 if 'file' in req.form and req.form['file'][0]:
86 if 'file' in req.form and req.form['file'][0]:
87 return filelog(web, req, tmpl)
87 return filelog(web, req, tmpl)
88 else:
88 else:
89 return changelog(web, req, tmpl)
89 return changelog(web, req, tmpl)
90
90
91 @webcommand('rawfile')
91 @webcommand('rawfile')
92 def rawfile(web, req, tmpl):
92 def rawfile(web, req, tmpl):
93 guessmime = web.configbool('web', 'guessmime', False)
93 guessmime = web.configbool('web', 'guessmime', False)
94
94
95 path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
95 path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
96 if not path:
96 if not path:
97 content = manifest(web, req, tmpl)
97 content = manifest(web, req, tmpl)
98 req.respond(HTTP_OK, web.ctype)
98 req.respond(HTTP_OK, web.ctype)
99 return content
99 return content
100
100
101 try:
101 try:
102 fctx = webutil.filectx(web.repo, req)
102 fctx = webutil.filectx(web.repo, req)
103 except error.LookupError as inst:
103 except error.LookupError as inst:
104 try:
104 try:
105 content = manifest(web, req, tmpl)
105 content = manifest(web, req, tmpl)
106 req.respond(HTTP_OK, web.ctype)
106 req.respond(HTTP_OK, web.ctype)
107 return content
107 return content
108 except ErrorResponse:
108 except ErrorResponse:
109 raise inst
109 raise inst
110
110
111 path = fctx.path()
111 path = fctx.path()
112 text = fctx.data()
112 text = fctx.data()
113 mt = 'application/binary'
113 mt = 'application/binary'
114 if guessmime:
114 if guessmime:
115 mt = mimetypes.guess_type(path)[0]
115 mt = mimetypes.guess_type(path)[0]
116 if mt is None:
116 if mt is None:
117 if util.binary(text):
117 if util.binary(text):
118 mt = 'application/binary'
118 mt = 'application/binary'
119 else:
119 else:
120 mt = 'text/plain'
120 mt = 'text/plain'
121 if mt.startswith('text/'):
121 if mt.startswith('text/'):
122 mt += '; charset="%s"' % encoding.encoding
122 mt += '; charset="%s"' % encoding.encoding
123
123
124 req.respond(HTTP_OK, mt, path, body=text)
124 req.respond(HTTP_OK, mt, path, body=text)
125 return []
125 return []
126
126
127 def _filerevision(web, req, tmpl, fctx):
127 def _filerevision(web, req, tmpl, fctx):
128 f = fctx.path()
128 f = fctx.path()
129 text = fctx.data()
129 text = fctx.data()
130 parity = paritygen(web.stripecount)
130 parity = paritygen(web.stripecount)
131
131
132 if util.binary(text):
132 if util.binary(text):
133 mt = mimetypes.guess_type(f)[0] or 'application/octet-stream'
133 mt = mimetypes.guess_type(f)[0] or 'application/octet-stream'
134 text = '(binary:%s)' % mt
134 text = '(binary:%s)' % mt
135
135
136 def lines():
136 def lines():
137 for lineno, t in enumerate(text.splitlines(True)):
137 for lineno, t in enumerate(text.splitlines(True)):
138 yield {"line": t,
138 yield {"line": t,
139 "lineid": "l%d" % (lineno + 1),
139 "lineid": "l%d" % (lineno + 1),
140 "linenumber": "% 6d" % (lineno + 1),
140 "linenumber": "% 6d" % (lineno + 1),
141 "parity": next(parity)}
141 "parity": next(parity)}
142
142
143 return tmpl("filerevision",
143 return tmpl("filerevision",
144 file=f,
144 file=f,
145 path=webutil.up(f),
145 path=webutil.up(f),
146 text=lines(),
146 text=lines(),
147 symrev=webutil.symrevorshortnode(req, fctx),
147 symrev=webutil.symrevorshortnode(req, fctx),
148 rename=webutil.renamelink(fctx),
148 rename=webutil.renamelink(fctx),
149 permissions=fctx.manifest().flags(f),
149 permissions=fctx.manifest().flags(f),
150 **webutil.commonentry(web.repo, fctx))
150 **webutil.commonentry(web.repo, fctx))
151
151
152 @webcommand('file')
152 @webcommand('file')
153 def file(web, req, tmpl):
153 def file(web, req, tmpl):
154 """
154 """
155 /file/{revision}[/{path}]
155 /file/{revision}[/{path}]
156 -------------------------
156 -------------------------
157
157
158 Show information about a directory or file in the repository.
158 Show information about a directory or file in the repository.
159
159
160 Info about the ``path`` given as a URL parameter will be rendered.
160 Info about the ``path`` given as a URL parameter will be rendered.
161
161
162 If ``path`` is a directory, information about the entries in that
162 If ``path`` is a directory, information about the entries in that
163 directory will be rendered. This form is equivalent to the ``manifest``
163 directory will be rendered. This form is equivalent to the ``manifest``
164 handler.
164 handler.
165
165
166 If ``path`` is a file, information about that file will be shown via
166 If ``path`` is a file, information about that file will be shown via
167 the ``filerevision`` template.
167 the ``filerevision`` template.
168
168
169 If ``path`` is not defined, information about the root directory will
169 If ``path`` is not defined, information about the root directory will
170 be rendered.
170 be rendered.
171 """
171 """
172 path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
172 path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
173 if not path:
173 if not path:
174 return manifest(web, req, tmpl)
174 return manifest(web, req, tmpl)
175 try:
175 try:
176 return _filerevision(web, req, tmpl, webutil.filectx(web.repo, req))
176 return _filerevision(web, req, tmpl, webutil.filectx(web.repo, req))
177 except error.LookupError as inst:
177 except error.LookupError as inst:
178 try:
178 try:
179 return manifest(web, req, tmpl)
179 return manifest(web, req, tmpl)
180 except ErrorResponse:
180 except ErrorResponse:
181 raise inst
181 raise inst
182
182
183 def _search(web, req, tmpl):
183 def _search(web, req, tmpl):
184 MODE_REVISION = 'rev'
184 MODE_REVISION = 'rev'
185 MODE_KEYWORD = 'keyword'
185 MODE_KEYWORD = 'keyword'
186 MODE_REVSET = 'revset'
186 MODE_REVSET = 'revset'
187
187
188 def revsearch(ctx):
188 def revsearch(ctx):
189 yield ctx
189 yield ctx
190
190
191 def keywordsearch(query):
191 def keywordsearch(query):
192 lower = encoding.lower
192 lower = encoding.lower
193 qw = lower(query).split()
193 qw = lower(query).split()
194
194
195 def revgen():
195 def revgen():
196 cl = web.repo.changelog
196 cl = web.repo.changelog
197 for i in xrange(len(web.repo) - 1, 0, -100):
197 for i in xrange(len(web.repo) - 1, 0, -100):
198 l = []
198 l = []
199 for j in cl.revs(max(0, i - 99), i):
199 for j in cl.revs(max(0, i - 99), i):
200 ctx = web.repo[j]
200 ctx = web.repo[j]
201 l.append(ctx)
201 l.append(ctx)
202 l.reverse()
202 l.reverse()
203 for e in l:
203 for e in l:
204 yield e
204 yield e
205
205
206 for ctx in revgen():
206 for ctx in revgen():
207 miss = 0
207 miss = 0
208 for q in qw:
208 for q in qw:
209 if not (q in lower(ctx.user()) or
209 if not (q in lower(ctx.user()) or
210 q in lower(ctx.description()) or
210 q in lower(ctx.description()) or
211 q in lower(" ".join(ctx.files()))):
211 q in lower(" ".join(ctx.files()))):
212 miss = 1
212 miss = 1
213 break
213 break
214 if miss:
214 if miss:
215 continue
215 continue
216
216
217 yield ctx
217 yield ctx
218
218
219 def revsetsearch(revs):
219 def revsetsearch(revs):
220 for r in revs:
220 for r in revs:
221 yield web.repo[r]
221 yield web.repo[r]
222
222
223 searchfuncs = {
223 searchfuncs = {
224 MODE_REVISION: (revsearch, 'exact revision search'),
224 MODE_REVISION: (revsearch, 'exact revision search'),
225 MODE_KEYWORD: (keywordsearch, 'literal keyword search'),
225 MODE_KEYWORD: (keywordsearch, 'literal keyword search'),
226 MODE_REVSET: (revsetsearch, 'revset expression search'),
226 MODE_REVSET: (revsetsearch, 'revset expression search'),
227 }
227 }
228
228
229 def getsearchmode(query):
229 def getsearchmode(query):
230 try:
230 try:
231 ctx = web.repo[query]
231 ctx = web.repo[query]
232 except (error.RepoError, error.LookupError):
232 except (error.RepoError, error.LookupError):
233 # query is not an exact revision pointer, need to
233 # query is not an exact revision pointer, need to
234 # decide if it's a revset expression or keywords
234 # decide if it's a revset expression or keywords
235 pass
235 pass
236 else:
236 else:
237 return MODE_REVISION, ctx
237 return MODE_REVISION, ctx
238
238
239 revdef = 'reverse(%s)' % query
239 revdef = 'reverse(%s)' % query
240 try:
240 try:
241 tree = revset.parse(revdef)
241 tree = revset.parse(revdef)
242 except error.ParseError:
242 except error.ParseError:
243 # can't parse to a revset tree
243 # can't parse to a revset tree
244 return MODE_KEYWORD, query
244 return MODE_KEYWORD, query
245
245
246 if revset.depth(tree) <= 2:
246 if revset.depth(tree) <= 2:
247 # no revset syntax used
247 # no revset syntax used
248 return MODE_KEYWORD, query
248 return MODE_KEYWORD, query
249
249
250 if any((token, (value or '')[:3]) == ('string', 're:')
250 if any((token, (value or '')[:3]) == ('string', 're:')
251 for token, value, pos in revset.tokenize(revdef)):
251 for token, value, pos in revset.tokenize(revdef)):
252 return MODE_KEYWORD, query
252 return MODE_KEYWORD, query
253
253
254 funcsused = revset.funcsused(tree)
254 funcsused = revset.funcsused(tree)
255 if not funcsused.issubset(revset.safesymbols):
255 if not funcsused.issubset(revset.safesymbols):
256 return MODE_KEYWORD, query
256 return MODE_KEYWORD, query
257
257
258 mfunc = revset.match(web.repo.ui, revdef)
258 mfunc = revset.match(web.repo.ui, revdef)
259 try:
259 try:
260 revs = mfunc(web.repo)
260 revs = mfunc(web.repo)
261 return MODE_REVSET, revs
261 return MODE_REVSET, revs
262 # ParseError: wrongly placed tokens, wrongs arguments, etc
262 # ParseError: wrongly placed tokens, wrongs arguments, etc
263 # RepoLookupError: no such revision, e.g. in 'revision:'
263 # RepoLookupError: no such revision, e.g. in 'revision:'
264 # Abort: bookmark/tag not exists
264 # Abort: bookmark/tag not exists
265 # LookupError: ambiguous identifier, e.g. in '(bc)' on a large repo
265 # LookupError: ambiguous identifier, e.g. in '(bc)' on a large repo
266 except (error.ParseError, error.RepoLookupError, error.Abort,
266 except (error.ParseError, error.RepoLookupError, error.Abort,
267 LookupError):
267 LookupError):
268 return MODE_KEYWORD, query
268 return MODE_KEYWORD, query
269
269
270 def changelist(**map):
270 def changelist(**map):
271 count = 0
271 count = 0
272
272
273 for ctx in searchfunc[0](funcarg):
273 for ctx in searchfunc[0](funcarg):
274 count += 1
274 count += 1
275 n = ctx.node()
275 n = ctx.node()
276 showtags = webutil.showtag(web.repo, tmpl, 'changelogtag', n)
276 showtags = webutil.showtag(web.repo, tmpl, 'changelogtag', n)
277 files = webutil.listfilediffs(tmpl, ctx.files(), n, web.maxfiles)
277 files = webutil.listfilediffs(tmpl, ctx.files(), n, web.maxfiles)
278
278
279 yield tmpl('searchentry',
279 yield tmpl('searchentry',
280 parity=next(parity),
280 parity=next(parity),
281 changelogtag=showtags,
281 changelogtag=showtags,
282 files=files,
282 files=files,
283 **webutil.commonentry(web.repo, ctx))
283 **webutil.commonentry(web.repo, ctx))
284
284
285 if count >= revcount:
285 if count >= revcount:
286 break
286 break
287
287
288 query = req.form['rev'][0]
288 query = req.form['rev'][0]
289 revcount = web.maxchanges
289 revcount = web.maxchanges
290 if 'revcount' in req.form:
290 if 'revcount' in req.form:
291 try:
291 try:
292 revcount = int(req.form.get('revcount', [revcount])[0])
292 revcount = int(req.form.get('revcount', [revcount])[0])
293 revcount = max(revcount, 1)
293 revcount = max(revcount, 1)
294 tmpl.defaults['sessionvars']['revcount'] = revcount
294 tmpl.defaults['sessionvars']['revcount'] = revcount
295 except ValueError:
295 except ValueError:
296 pass
296 pass
297
297
298 lessvars = copy.copy(tmpl.defaults['sessionvars'])
298 lessvars = copy.copy(tmpl.defaults['sessionvars'])
299 lessvars['revcount'] = max(revcount / 2, 1)
299 lessvars['revcount'] = max(revcount / 2, 1)
300 lessvars['rev'] = query
300 lessvars['rev'] = query
301 morevars = copy.copy(tmpl.defaults['sessionvars'])
301 morevars = copy.copy(tmpl.defaults['sessionvars'])
302 morevars['revcount'] = revcount * 2
302 morevars['revcount'] = revcount * 2
303 morevars['rev'] = query
303 morevars['rev'] = query
304
304
305 mode, funcarg = getsearchmode(query)
305 mode, funcarg = getsearchmode(query)
306
306
307 if 'forcekw' in req.form:
307 if 'forcekw' in req.form:
308 showforcekw = ''
308 showforcekw = ''
309 showunforcekw = searchfuncs[mode][1]
309 showunforcekw = searchfuncs[mode][1]
310 mode = MODE_KEYWORD
310 mode = MODE_KEYWORD
311 funcarg = query
311 funcarg = query
312 else:
312 else:
313 if mode != MODE_KEYWORD:
313 if mode != MODE_KEYWORD:
314 showforcekw = searchfuncs[MODE_KEYWORD][1]
314 showforcekw = searchfuncs[MODE_KEYWORD][1]
315 else:
315 else:
316 showforcekw = ''
316 showforcekw = ''
317 showunforcekw = ''
317 showunforcekw = ''
318
318
319 searchfunc = searchfuncs[mode]
319 searchfunc = searchfuncs[mode]
320
320
321 tip = web.repo['tip']
321 tip = web.repo['tip']
322 parity = paritygen(web.stripecount)
322 parity = paritygen(web.stripecount)
323
323
324 return tmpl('search', query=query, node=tip.hex(), symrev='tip',
324 return tmpl('search', query=query, node=tip.hex(), symrev='tip',
325 entries=changelist, archives=web.archivelist("tip"),
325 entries=changelist, archives=web.archivelist("tip"),
326 morevars=morevars, lessvars=lessvars,
326 morevars=morevars, lessvars=lessvars,
327 modedesc=searchfunc[1],
327 modedesc=searchfunc[1],
328 showforcekw=showforcekw, showunforcekw=showunforcekw)
328 showforcekw=showforcekw, showunforcekw=showunforcekw)
329
329
330 @webcommand('changelog')
330 @webcommand('changelog')
331 def changelog(web, req, tmpl, shortlog=False):
331 def changelog(web, req, tmpl, shortlog=False):
332 """
332 """
333 /changelog[/{revision}]
333 /changelog[/{revision}]
334 -----------------------
334 -----------------------
335
335
336 Show information about multiple changesets.
336 Show information about multiple changesets.
337
337
338 If the optional ``revision`` URL argument is absent, information about
338 If the optional ``revision`` URL argument is absent, information about
339 all changesets starting at ``tip`` will be rendered. If the ``revision``
339 all changesets starting at ``tip`` will be rendered. If the ``revision``
340 argument is present, changesets will be shown starting from the specified
340 argument is present, changesets will be shown starting from the specified
341 revision.
341 revision.
342
342
343 If ``revision`` is absent, the ``rev`` query string argument may be
343 If ``revision`` is absent, the ``rev`` query string argument may be
344 defined. This will perform a search for changesets.
344 defined. This will perform a search for changesets.
345
345
346 The argument for ``rev`` can be a single revision, a revision set,
346 The argument for ``rev`` can be a single revision, a revision set,
347 or a literal keyword to search for in changeset data (equivalent to
347 or a literal keyword to search for in changeset data (equivalent to
348 :hg:`log -k`).
348 :hg:`log -k`).
349
349
350 The ``revcount`` query string argument defines the maximum numbers of
350 The ``revcount`` query string argument defines the maximum numbers of
351 changesets to render.
351 changesets to render.
352
352
353 For non-searches, the ``changelog`` template will be rendered.
353 For non-searches, the ``changelog`` template will be rendered.
354 """
354 """
355
355
356 query = ''
356 query = ''
357 if 'node' in req.form:
357 if 'node' in req.form:
358 ctx = webutil.changectx(web.repo, req)
358 ctx = webutil.changectx(web.repo, req)
359 symrev = webutil.symrevorshortnode(req, ctx)
359 symrev = webutil.symrevorshortnode(req, ctx)
360 elif 'rev' in req.form:
360 elif 'rev' in req.form:
361 return _search(web, req, tmpl)
361 return _search(web, req, tmpl)
362 else:
362 else:
363 ctx = web.repo['tip']
363 ctx = web.repo['tip']
364 symrev = 'tip'
364 symrev = 'tip'
365
365
366 def changelist():
366 def changelist():
367 revs = []
367 revs = []
368 if pos != -1:
368 if pos != -1:
369 revs = web.repo.changelog.revs(pos, 0)
369 revs = web.repo.changelog.revs(pos, 0)
370 curcount = 0
370 curcount = 0
371 for rev in revs:
371 for rev in revs:
372 curcount += 1
372 curcount += 1
373 if curcount > revcount + 1:
373 if curcount > revcount + 1:
374 break
374 break
375
375
376 entry = webutil.changelistentry(web, web.repo[rev], tmpl)
376 entry = webutil.changelistentry(web, web.repo[rev], tmpl)
377 entry['parity'] = next(parity)
377 entry['parity'] = next(parity)
378 yield entry
378 yield entry
379
379
380 if shortlog:
380 if shortlog:
381 revcount = web.maxshortchanges
381 revcount = web.maxshortchanges
382 else:
382 else:
383 revcount = web.maxchanges
383 revcount = web.maxchanges
384
384
385 if 'revcount' in req.form:
385 if 'revcount' in req.form:
386 try:
386 try:
387 revcount = int(req.form.get('revcount', [revcount])[0])
387 revcount = int(req.form.get('revcount', [revcount])[0])
388 revcount = max(revcount, 1)
388 revcount = max(revcount, 1)
389 tmpl.defaults['sessionvars']['revcount'] = revcount
389 tmpl.defaults['sessionvars']['revcount'] = revcount
390 except ValueError:
390 except ValueError:
391 pass
391 pass
392
392
393 lessvars = copy.copy(tmpl.defaults['sessionvars'])
393 lessvars = copy.copy(tmpl.defaults['sessionvars'])
394 lessvars['revcount'] = max(revcount / 2, 1)
394 lessvars['revcount'] = max(revcount / 2, 1)
395 morevars = copy.copy(tmpl.defaults['sessionvars'])
395 morevars = copy.copy(tmpl.defaults['sessionvars'])
396 morevars['revcount'] = revcount * 2
396 morevars['revcount'] = revcount * 2
397
397
398 count = len(web.repo)
398 count = len(web.repo)
399 pos = ctx.rev()
399 pos = ctx.rev()
400 parity = paritygen(web.stripecount)
400 parity = paritygen(web.stripecount)
401
401
402 changenav = webutil.revnav(web.repo).gen(pos, revcount, count)
402 changenav = webutil.revnav(web.repo).gen(pos, revcount, count)
403
403
404 entries = list(changelist())
404 entries = list(changelist())
405 latestentry = entries[:1]
405 latestentry = entries[:1]
406 if len(entries) > revcount:
406 if len(entries) > revcount:
407 nextentry = entries[-1:]
407 nextentry = entries[-1:]
408 entries = entries[:-1]
408 entries = entries[:-1]
409 else:
409 else:
410 nextentry = []
410 nextentry = []
411
411
412 return tmpl(shortlog and 'shortlog' or 'changelog', changenav=changenav,
412 return tmpl(shortlog and 'shortlog' or 'changelog', changenav=changenav,
413 node=ctx.hex(), rev=pos, symrev=symrev, changesets=count,
413 node=ctx.hex(), rev=pos, symrev=symrev, changesets=count,
414 entries=entries,
414 entries=entries,
415 latestentry=latestentry, nextentry=nextentry,
415 latestentry=latestentry, nextentry=nextentry,
416 archives=web.archivelist("tip"), revcount=revcount,
416 archives=web.archivelist("tip"), revcount=revcount,
417 morevars=morevars, lessvars=lessvars, query=query)
417 morevars=morevars, lessvars=lessvars, query=query)
418
418
419 @webcommand('shortlog')
419 @webcommand('shortlog')
420 def shortlog(web, req, tmpl):
420 def shortlog(web, req, tmpl):
421 """
421 """
422 /shortlog
422 /shortlog
423 ---------
423 ---------
424
424
425 Show basic information about a set of changesets.
425 Show basic information about a set of changesets.
426
426
427 This accepts the same parameters as the ``changelog`` handler. The only
427 This accepts the same parameters as the ``changelog`` handler. The only
428 difference is the ``shortlog`` template will be rendered instead of the
428 difference is the ``shortlog`` template will be rendered instead of the
429 ``changelog`` template.
429 ``changelog`` template.
430 """
430 """
431 return changelog(web, req, tmpl, shortlog=True)
431 return changelog(web, req, tmpl, shortlog=True)
432
432
433 @webcommand('changeset')
433 @webcommand('changeset')
434 def changeset(web, req, tmpl):
434 def changeset(web, req, tmpl):
435 """
435 """
436 /changeset[/{revision}]
436 /changeset[/{revision}]
437 -----------------------
437 -----------------------
438
438
439 Show information about a single changeset.
439 Show information about a single changeset.
440
440
441 A URL path argument is the changeset identifier to show. See ``hg help
441 A URL path argument is the changeset identifier to show. See ``hg help
442 revisions`` for possible values. If not defined, the ``tip`` changeset
442 revisions`` for possible values. If not defined, the ``tip`` changeset
443 will be shown.
443 will be shown.
444
444
445 The ``changeset`` template is rendered. Contents of the ``changesettag``,
445 The ``changeset`` template is rendered. Contents of the ``changesettag``,
446 ``changesetbookmark``, ``filenodelink``, ``filenolink``, and the many
446 ``changesetbookmark``, ``filenodelink``, ``filenolink``, and the many
447 templates related to diffs may all be used to produce the output.
447 templates related to diffs may all be used to produce the output.
448 """
448 """
449 ctx = webutil.changectx(web.repo, req)
449 ctx = webutil.changectx(web.repo, req)
450
450
451 return tmpl('changeset', **webutil.changesetentry(web, req, tmpl, ctx))
451 return tmpl('changeset', **webutil.changesetentry(web, req, tmpl, ctx))
452
452
453 rev = webcommand('rev')(changeset)
453 rev = webcommand('rev')(changeset)
454
454
455 def decodepath(path):
455 def decodepath(path):
456 """Hook for mapping a path in the repository to a path in the
456 """Hook for mapping a path in the repository to a path in the
457 working copy.
457 working copy.
458
458
459 Extensions (e.g., largefiles) can override this to remap files in
459 Extensions (e.g., largefiles) can override this to remap files in
460 the virtual file system presented by the manifest command below."""
460 the virtual file system presented by the manifest command below."""
461 return path
461 return path
462
462
463 @webcommand('manifest')
463 @webcommand('manifest')
464 def manifest(web, req, tmpl):
464 def manifest(web, req, tmpl):
465 """
465 """
466 /manifest[/{revision}[/{path}]]
466 /manifest[/{revision}[/{path}]]
467 -------------------------------
467 -------------------------------
468
468
469 Show information about a directory.
469 Show information about a directory.
470
470
471 If the URL path arguments are omitted, information about the root
471 If the URL path arguments are omitted, information about the root
472 directory for the ``tip`` changeset will be shown.
472 directory for the ``tip`` changeset will be shown.
473
473
474 Because this handler can only show information for directories, it
474 Because this handler can only show information for directories, it
475 is recommended to use the ``file`` handler instead, as it can handle both
475 is recommended to use the ``file`` handler instead, as it can handle both
476 directories and files.
476 directories and files.
477
477
478 The ``manifest`` template will be rendered for this handler.
478 The ``manifest`` template will be rendered for this handler.
479 """
479 """
480 if 'node' in req.form:
480 if 'node' in req.form:
481 ctx = webutil.changectx(web.repo, req)
481 ctx = webutil.changectx(web.repo, req)
482 symrev = webutil.symrevorshortnode(req, ctx)
482 symrev = webutil.symrevorshortnode(req, ctx)
483 else:
483 else:
484 ctx = web.repo['tip']
484 ctx = web.repo['tip']
485 symrev = 'tip'
485 symrev = 'tip'
486 path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
486 path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
487 mf = ctx.manifest()
487 mf = ctx.manifest()
488 node = ctx.node()
488 node = ctx.node()
489
489
490 files = {}
490 files = {}
491 dirs = {}
491 dirs = {}
492 parity = paritygen(web.stripecount)
492 parity = paritygen(web.stripecount)
493
493
494 if path and path[-1] != "/":
494 if path and path[-1] != "/":
495 path += "/"
495 path += "/"
496 l = len(path)
496 l = len(path)
497 abspath = "/" + path
497 abspath = "/" + path
498
498
499 for full, n in mf.iteritems():
499 for full, n in mf.iteritems():
500 # the virtual path (working copy path) used for the full
500 # the virtual path (working copy path) used for the full
501 # (repository) path
501 # (repository) path
502 f = decodepath(full)
502 f = decodepath(full)
503
503
504 if f[:l] != path:
504 if f[:l] != path:
505 continue
505 continue
506 remain = f[l:]
506 remain = f[l:]
507 elements = remain.split('/')
507 elements = remain.split('/')
508 if len(elements) == 1:
508 if len(elements) == 1:
509 files[remain] = full
509 files[remain] = full
510 else:
510 else:
511 h = dirs # need to retain ref to dirs (root)
511 h = dirs # need to retain ref to dirs (root)
512 for elem in elements[0:-1]:
512 for elem in elements[0:-1]:
513 if elem not in h:
513 if elem not in h:
514 h[elem] = {}
514 h[elem] = {}
515 h = h[elem]
515 h = h[elem]
516 if len(h) > 1:
516 if len(h) > 1:
517 break
517 break
518 h[None] = None # denotes files present
518 h[None] = None # denotes files present
519
519
520 if mf and not files and not dirs:
520 if mf and not files and not dirs:
521 raise ErrorResponse(HTTP_NOT_FOUND, 'path not found: ' + path)
521 raise ErrorResponse(HTTP_NOT_FOUND, 'path not found: ' + path)
522
522
523 def filelist(**map):
523 def filelist(**map):
524 for f in sorted(files):
524 for f in sorted(files):
525 full = files[f]
525 full = files[f]
526
526
527 fctx = ctx.filectx(full)
527 fctx = ctx.filectx(full)
528 yield {"file": full,
528 yield {"file": full,
529 "parity": next(parity),
529 "parity": next(parity),
530 "basename": f,
530 "basename": f,
531 "date": fctx.date(),
531 "date": fctx.date(),
532 "size": fctx.size(),
532 "size": fctx.size(),
533 "permissions": mf.flags(full)}
533 "permissions": mf.flags(full)}
534
534
535 def dirlist(**map):
535 def dirlist(**map):
536 for d in sorted(dirs):
536 for d in sorted(dirs):
537
537
538 emptydirs = []
538 emptydirs = []
539 h = dirs[d]
539 h = dirs[d]
540 while isinstance(h, dict) and len(h) == 1:
540 while isinstance(h, dict) and len(h) == 1:
541 k, v = h.items()[0]
541 k, v = h.items()[0]
542 if v:
542 if v:
543 emptydirs.append(k)
543 emptydirs.append(k)
544 h = v
544 h = v
545
545
546 path = "%s%s" % (abspath, d)
546 path = "%s%s" % (abspath, d)
547 yield {"parity": next(parity),
547 yield {"parity": next(parity),
548 "path": path,
548 "path": path,
549 "emptydirs": "/".join(emptydirs),
549 "emptydirs": "/".join(emptydirs),
550 "basename": d}
550 "basename": d}
551
551
552 return tmpl("manifest",
552 return tmpl("manifest",
553 symrev=symrev,
553 symrev=symrev,
554 path=abspath,
554 path=abspath,
555 up=webutil.up(abspath),
555 up=webutil.up(abspath),
556 upparity=next(parity),
556 upparity=next(parity),
557 fentries=filelist,
557 fentries=filelist,
558 dentries=dirlist,
558 dentries=dirlist,
559 archives=web.archivelist(hex(node)),
559 archives=web.archivelist(hex(node)),
560 **webutil.commonentry(web.repo, ctx))
560 **webutil.commonentry(web.repo, ctx))
561
561
562 @webcommand('tags')
562 @webcommand('tags')
563 def tags(web, req, tmpl):
563 def tags(web, req, tmpl):
564 """
564 """
565 /tags
565 /tags
566 -----
566 -----
567
567
568 Show information about tags.
568 Show information about tags.
569
569
570 No arguments are accepted.
570 No arguments are accepted.
571
571
572 The ``tags`` template is rendered.
572 The ``tags`` template is rendered.
573 """
573 """
574 i = list(reversed(web.repo.tagslist()))
574 i = list(reversed(web.repo.tagslist()))
575 parity = paritygen(web.stripecount)
575 parity = paritygen(web.stripecount)
576
576
577 def entries(notip, latestonly, **map):
577 def entries(notip, latestonly, **map):
578 t = i
578 t = i
579 if notip:
579 if notip:
580 t = [(k, n) for k, n in i if k != "tip"]
580 t = [(k, n) for k, n in i if k != "tip"]
581 if latestonly:
581 if latestonly:
582 t = t[:1]
582 t = t[:1]
583 for k, n in t:
583 for k, n in t:
584 yield {"parity": next(parity),
584 yield {"parity": next(parity),
585 "tag": k,
585 "tag": k,
586 "date": web.repo[n].date(),
586 "date": web.repo[n].date(),
587 "node": hex(n)}
587 "node": hex(n)}
588
588
589 return tmpl("tags",
589 return tmpl("tags",
590 node=hex(web.repo.changelog.tip()),
590 node=hex(web.repo.changelog.tip()),
591 entries=lambda **x: entries(False, False, **x),
591 entries=lambda **x: entries(False, False, **x),
592 entriesnotip=lambda **x: entries(True, False, **x),
592 entriesnotip=lambda **x: entries(True, False, **x),
593 latestentry=lambda **x: entries(True, True, **x))
593 latestentry=lambda **x: entries(True, True, **x))
594
594
595 @webcommand('bookmarks')
595 @webcommand('bookmarks')
596 def bookmarks(web, req, tmpl):
596 def bookmarks(web, req, tmpl):
597 """
597 """
598 /bookmarks
598 /bookmarks
599 ----------
599 ----------
600
600
601 Show information about bookmarks.
601 Show information about bookmarks.
602
602
603 No arguments are accepted.
603 No arguments are accepted.
604
604
605 The ``bookmarks`` template is rendered.
605 The ``bookmarks`` template is rendered.
606 """
606 """
607 i = [b for b in web.repo._bookmarks.items() if b[1] in web.repo]
607 i = [b for b in web.repo._bookmarks.items() if b[1] in web.repo]
608 sortkey = lambda b: (web.repo[b[1]].rev(), b[0])
608 sortkey = lambda b: (web.repo[b[1]].rev(), b[0])
609 i = sorted(i, key=sortkey, reverse=True)
609 i = sorted(i, key=sortkey, reverse=True)
610 parity = paritygen(web.stripecount)
610 parity = paritygen(web.stripecount)
611
611
612 def entries(latestonly, **map):
612 def entries(latestonly, **map):
613 t = i
613 t = i
614 if latestonly:
614 if latestonly:
615 t = i[:1]
615 t = i[:1]
616 for k, n in t:
616 for k, n in t:
617 yield {"parity": next(parity),
617 yield {"parity": next(parity),
618 "bookmark": k,
618 "bookmark": k,
619 "date": web.repo[n].date(),
619 "date": web.repo[n].date(),
620 "node": hex(n)}
620 "node": hex(n)}
621
621
622 if i:
622 if i:
623 latestrev = i[0][1]
623 latestrev = i[0][1]
624 else:
624 else:
625 latestrev = -1
625 latestrev = -1
626
626
627 return tmpl("bookmarks",
627 return tmpl("bookmarks",
628 node=hex(web.repo.changelog.tip()),
628 node=hex(web.repo.changelog.tip()),
629 lastchange=[{"date": web.repo[latestrev].date()}],
629 lastchange=[{"date": web.repo[latestrev].date()}],
630 entries=lambda **x: entries(latestonly=False, **x),
630 entries=lambda **x: entries(latestonly=False, **x),
631 latestentry=lambda **x: entries(latestonly=True, **x))
631 latestentry=lambda **x: entries(latestonly=True, **x))
632
632
633 @webcommand('branches')
633 @webcommand('branches')
634 def branches(web, req, tmpl):
634 def branches(web, req, tmpl):
635 """
635 """
636 /branches
636 /branches
637 ---------
637 ---------
638
638
639 Show information about branches.
639 Show information about branches.
640
640
641 All known branches are contained in the output, even closed branches.
641 All known branches are contained in the output, even closed branches.
642
642
643 No arguments are accepted.
643 No arguments are accepted.
644
644
645 The ``branches`` template is rendered.
645 The ``branches`` template is rendered.
646 """
646 """
647 entries = webutil.branchentries(web.repo, web.stripecount)
647 entries = webutil.branchentries(web.repo, web.stripecount)
648 latestentry = webutil.branchentries(web.repo, web.stripecount, 1)
648 latestentry = webutil.branchentries(web.repo, web.stripecount, 1)
649 return tmpl('branches', node=hex(web.repo.changelog.tip()),
649 return tmpl('branches', node=hex(web.repo.changelog.tip()),
650 entries=entries, latestentry=latestentry)
650 entries=entries, latestentry=latestentry)
651
651
652 @webcommand('summary')
652 @webcommand('summary')
653 def summary(web, req, tmpl):
653 def summary(web, req, tmpl):
654 """
654 """
655 /summary
655 /summary
656 --------
656 --------
657
657
658 Show a summary of repository state.
658 Show a summary of repository state.
659
659
660 Information about the latest changesets, bookmarks, tags, and branches
660 Information about the latest changesets, bookmarks, tags, and branches
661 is captured by this handler.
661 is captured by this handler.
662
662
663 The ``summary`` template is rendered.
663 The ``summary`` template is rendered.
664 """
664 """
665 i = reversed(web.repo.tagslist())
665 i = reversed(web.repo.tagslist())
666
666
667 def tagentries(**map):
667 def tagentries(**map):
668 parity = paritygen(web.stripecount)
668 parity = paritygen(web.stripecount)
669 count = 0
669 count = 0
670 for k, n in i:
670 for k, n in i:
671 if k == "tip": # skip tip
671 if k == "tip": # skip tip
672 continue
672 continue
673
673
674 count += 1
674 count += 1
675 if count > 10: # limit to 10 tags
675 if count > 10: # limit to 10 tags
676 break
676 break
677
677
678 yield tmpl("tagentry",
678 yield tmpl("tagentry",
679 parity=next(parity),
679 parity=next(parity),
680 tag=k,
680 tag=k,
681 node=hex(n),
681 node=hex(n),
682 date=web.repo[n].date())
682 date=web.repo[n].date())
683
683
684 def bookmarks(**map):
684 def bookmarks(**map):
685 parity = paritygen(web.stripecount)
685 parity = paritygen(web.stripecount)
686 marks = [b for b in web.repo._bookmarks.items() if b[1] in web.repo]
686 marks = [b for b in web.repo._bookmarks.items() if b[1] in web.repo]
687 sortkey = lambda b: (web.repo[b[1]].rev(), b[0])
687 sortkey = lambda b: (web.repo[b[1]].rev(), b[0])
688 marks = sorted(marks, key=sortkey, reverse=True)
688 marks = sorted(marks, key=sortkey, reverse=True)
689 for k, n in marks[:10]: # limit to 10 bookmarks
689 for k, n in marks[:10]: # limit to 10 bookmarks
690 yield {'parity': next(parity),
690 yield {'parity': next(parity),
691 'bookmark': k,
691 'bookmark': k,
692 'date': web.repo[n].date(),
692 'date': web.repo[n].date(),
693 'node': hex(n)}
693 'node': hex(n)}
694
694
695 def changelist(**map):
695 def changelist(**map):
696 parity = paritygen(web.stripecount, offset=start - end)
696 parity = paritygen(web.stripecount, offset=start - end)
697 l = [] # build a list in forward order for efficiency
697 l = [] # build a list in forward order for efficiency
698 revs = []
698 revs = []
699 if start < end:
699 if start < end:
700 revs = web.repo.changelog.revs(start, end - 1)
700 revs = web.repo.changelog.revs(start, end - 1)
701 for i in revs:
701 for i in revs:
702 ctx = web.repo[i]
702 ctx = web.repo[i]
703
703
704 l.append(tmpl(
704 l.append(tmpl(
705 'shortlogentry',
705 'shortlogentry',
706 parity=next(parity),
706 parity=next(parity),
707 **webutil.commonentry(web.repo, ctx)))
707 **webutil.commonentry(web.repo, ctx)))
708
708
709 for entry in reversed(l):
709 for entry in reversed(l):
710 yield entry
710 yield entry
711
711
712 tip = web.repo['tip']
712 tip = web.repo['tip']
713 count = len(web.repo)
713 count = len(web.repo)
714 start = max(0, count - web.maxchanges)
714 start = max(0, count - web.maxchanges)
715 end = min(count, start + web.maxchanges)
715 end = min(count, start + web.maxchanges)
716
716
717 return tmpl("summary",
717 return tmpl("summary",
718 desc=web.config("web", "description", "unknown"),
718 desc=web.config("web", "description", "unknown"),
719 owner=get_contact(web.config) or "unknown",
719 owner=get_contact(web.config) or "unknown",
720 lastchange=tip.date(),
720 lastchange=tip.date(),
721 tags=tagentries,
721 tags=tagentries,
722 bookmarks=bookmarks,
722 bookmarks=bookmarks,
723 branches=webutil.branchentries(web.repo, web.stripecount, 10),
723 branches=webutil.branchentries(web.repo, web.stripecount, 10),
724 shortlog=changelist,
724 shortlog=changelist,
725 node=tip.hex(),
725 node=tip.hex(),
726 symrev='tip',
726 symrev='tip',
727 archives=web.archivelist("tip"),
727 archives=web.archivelist("tip"),
728 labels=web.configlist('web', 'labels'))
728 labels=web.configlist('web', 'labels'))
729
729
730 @webcommand('filediff')
730 @webcommand('filediff')
731 def filediff(web, req, tmpl):
731 def filediff(web, req, tmpl):
732 """
732 """
733 /diff/{revision}/{path}
733 /diff/{revision}/{path}
734 -----------------------
734 -----------------------
735
735
736 Show how a file changed in a particular commit.
736 Show how a file changed in a particular commit.
737
737
738 The ``filediff`` template is rendered.
738 The ``filediff`` template is rendered.
739
739
740 This handler is registered under both the ``/diff`` and ``/filediff``
740 This handler is registered under both the ``/diff`` and ``/filediff``
741 paths. ``/diff`` is used in modern code.
741 paths. ``/diff`` is used in modern code.
742 """
742 """
743 fctx, ctx = None, None
743 fctx, ctx = None, None
744 try:
744 try:
745 fctx = webutil.filectx(web.repo, req)
745 fctx = webutil.filectx(web.repo, req)
746 except LookupError:
746 except LookupError:
747 ctx = webutil.changectx(web.repo, req)
747 ctx = webutil.changectx(web.repo, req)
748 path = webutil.cleanpath(web.repo, req.form['file'][0])
748 path = webutil.cleanpath(web.repo, req.form['file'][0])
749 if path not in ctx.files():
749 if path not in ctx.files():
750 raise
750 raise
751
751
752 if fctx is not None:
752 if fctx is not None:
753 path = fctx.path()
753 path = fctx.path()
754 ctx = fctx.changectx()
754 ctx = fctx.changectx()
755
755
756 parity = paritygen(web.stripecount)
756 parity = paritygen(web.stripecount)
757 style = web.config('web', 'style', 'paper')
757 style = web.config('web', 'style', 'paper')
758 if 'style' in req.form:
758 if 'style' in req.form:
759 style = req.form['style'][0]
759 style = req.form['style'][0]
760
760
761 diffs = webutil.diffs(web.repo, tmpl, ctx, None, [path], parity, style)
761 diffs = webutil.diffs(web.repo, tmpl, ctx, None, [path], parity, style)
762 if fctx is not None:
762 if fctx is not None:
763 rename = webutil.renamelink(fctx)
763 rename = webutil.renamelink(fctx)
764 ctx = fctx
764 ctx = fctx
765 else:
765 else:
766 rename = []
766 rename = []
767 ctx = ctx
767 ctx = ctx
768 return tmpl("filediff",
768 return tmpl("filediff",
769 file=path,
769 file=path,
770 symrev=webutil.symrevorshortnode(req, ctx),
770 symrev=webutil.symrevorshortnode(req, ctx),
771 rename=rename,
771 rename=rename,
772 diff=diffs,
772 diff=diffs,
773 **webutil.commonentry(web.repo, ctx))
773 **webutil.commonentry(web.repo, ctx))
774
774
775 diff = webcommand('diff')(filediff)
775 diff = webcommand('diff')(filediff)
776
776
777 @webcommand('comparison')
777 @webcommand('comparison')
778 def comparison(web, req, tmpl):
778 def comparison(web, req, tmpl):
779 """
779 """
780 /comparison/{revision}/{path}
780 /comparison/{revision}/{path}
781 -----------------------------
781 -----------------------------
782
782
783 Show a comparison between the old and new versions of a file from changes
783 Show a comparison between the old and new versions of a file from changes
784 made on a particular revision.
784 made on a particular revision.
785
785
786 This is similar to the ``diff`` handler. However, this form features
786 This is similar to the ``diff`` handler. However, this form features
787 a split or side-by-side diff rather than a unified diff.
787 a split or side-by-side diff rather than a unified diff.
788
788
789 The ``context`` query string argument can be used to control the lines of
789 The ``context`` query string argument can be used to control the lines of
790 context in the diff.
790 context in the diff.
791
791
792 The ``filecomparison`` template is rendered.
792 The ``filecomparison`` template is rendered.
793 """
793 """
794 ctx = webutil.changectx(web.repo, req)
794 ctx = webutil.changectx(web.repo, req)
795 if 'file' not in req.form:
795 if 'file' not in req.form:
796 raise ErrorResponse(HTTP_NOT_FOUND, 'file not given')
796 raise ErrorResponse(HTTP_NOT_FOUND, 'file not given')
797 path = webutil.cleanpath(web.repo, req.form['file'][0])
797 path = webutil.cleanpath(web.repo, req.form['file'][0])
798
798
799 parsecontext = lambda v: v == 'full' and -1 or int(v)
799 parsecontext = lambda v: v == 'full' and -1 or int(v)
800 if 'context' in req.form:
800 if 'context' in req.form:
801 context = parsecontext(req.form['context'][0])
801 context = parsecontext(req.form['context'][0])
802 else:
802 else:
803 context = parsecontext(web.config('web', 'comparisoncontext', '5'))
803 context = parsecontext(web.config('web', 'comparisoncontext', '5'))
804
804
805 def filelines(f):
805 def filelines(f):
806 if util.binary(f.data()):
806 if util.binary(f.data()):
807 mt = mimetypes.guess_type(f.path())[0]
807 mt = mimetypes.guess_type(f.path())[0]
808 if not mt:
808 if not mt:
809 mt = 'application/octet-stream'
809 mt = 'application/octet-stream'
810 return [_('(binary file %s, hash: %s)') % (mt, hex(f.filenode()))]
810 return [_('(binary file %s, hash: %s)') % (mt, hex(f.filenode()))]
811 return f.data().splitlines()
811 return f.data().splitlines()
812
812
813 fctx = None
813 fctx = None
814 parent = ctx.p1()
814 parent = ctx.p1()
815 leftrev = parent.rev()
815 leftrev = parent.rev()
816 leftnode = parent.node()
816 leftnode = parent.node()
817 rightrev = ctx.rev()
817 rightrev = ctx.rev()
818 rightnode = ctx.node()
818 rightnode = ctx.node()
819 if path in ctx:
819 if path in ctx:
820 fctx = ctx[path]
820 fctx = ctx[path]
821 rightlines = filelines(fctx)
821 rightlines = filelines(fctx)
822 if path not in parent:
822 if path not in parent:
823 leftlines = ()
823 leftlines = ()
824 else:
824 else:
825 pfctx = parent[path]
825 pfctx = parent[path]
826 leftlines = filelines(pfctx)
826 leftlines = filelines(pfctx)
827 else:
827 else:
828 rightlines = ()
828 rightlines = ()
829 pfctx = ctx.parents()[0][path]
829 pfctx = ctx.parents()[0][path]
830 leftlines = filelines(pfctx)
830 leftlines = filelines(pfctx)
831
831
832 comparison = webutil.compare(tmpl, context, leftlines, rightlines)
832 comparison = webutil.compare(tmpl, context, leftlines, rightlines)
833 if fctx is not None:
833 if fctx is not None:
834 rename = webutil.renamelink(fctx)
834 rename = webutil.renamelink(fctx)
835 ctx = fctx
835 ctx = fctx
836 else:
836 else:
837 rename = []
837 rename = []
838 ctx = ctx
838 ctx = ctx
839 return tmpl('filecomparison',
839 return tmpl('filecomparison',
840 file=path,
840 file=path,
841 symrev=webutil.symrevorshortnode(req, ctx),
841 symrev=webutil.symrevorshortnode(req, ctx),
842 rename=rename,
842 rename=rename,
843 leftrev=leftrev,
843 leftrev=leftrev,
844 leftnode=hex(leftnode),
844 leftnode=hex(leftnode),
845 rightrev=rightrev,
845 rightrev=rightrev,
846 rightnode=hex(rightnode),
846 rightnode=hex(rightnode),
847 comparison=comparison,
847 comparison=comparison,
848 **webutil.commonentry(web.repo, ctx))
848 **webutil.commonentry(web.repo, ctx))
849
849
850 @webcommand('annotate')
850 @webcommand('annotate')
851 def annotate(web, req, tmpl):
851 def annotate(web, req, tmpl):
852 """
852 """
853 /annotate/{revision}/{path}
853 /annotate/{revision}/{path}
854 ---------------------------
854 ---------------------------
855
855
856 Show changeset information for each line in a file.
856 Show changeset information for each line in a file.
857
857
858 The ``fileannotate`` template is rendered.
858 The ``fileannotate`` template is rendered.
859 """
859 """
860 fctx = webutil.filectx(web.repo, req)
860 fctx = webutil.filectx(web.repo, req)
861 f = fctx.path()
861 f = fctx.path()
862 parity = paritygen(web.stripecount)
862 parity = paritygen(web.stripecount)
863
863
864 # parents() is called once per line and several lines likely belong to
865 # same revision. So it is worth caching.
866 # TODO there are still redundant operations within basefilectx.parents()
867 # and from the fctx.annotate() call itself that could be cached.
868 parentscache = {}
864 def parents(f):
869 def parents(f):
865 for p in f.parents():
870 rev = f.rev()
866 yield {
871 if rev not in parentscache:
867 "node": p.hex(),
872 parentscache[rev] = []
868 "rev": p.rev(),
873 for p in f.parents():
869 }
874 entry = {
875 'node': p.hex(),
876 'rev': p.rev(),
877 }
878 parentscache[rev].append(entry)
879
880 for p in parentscache[rev]:
881 yield p
870
882
871 def annotate(**map):
883 def annotate(**map):
872 if util.binary(fctx.data()):
884 if util.binary(fctx.data()):
873 mt = (mimetypes.guess_type(fctx.path())[0]
885 mt = (mimetypes.guess_type(fctx.path())[0]
874 or 'application/octet-stream')
886 or 'application/octet-stream')
875 lines = [((fctx.filectx(fctx.filerev()), 1), '(binary:%s)' % mt)]
887 lines = [((fctx.filectx(fctx.filerev()), 1), '(binary:%s)' % mt)]
876 else:
888 else:
877 lines = webutil.annotate(fctx, web.repo.ui)
889 lines = webutil.annotate(fctx, web.repo.ui)
878
890
879 previousrev = None
891 previousrev = None
880 blockparitygen = paritygen(1)
892 blockparitygen = paritygen(1)
881 for lineno, ((f, targetline), l) in enumerate(lines):
893 for lineno, ((f, targetline), l) in enumerate(lines):
882 rev = f.rev()
894 rev = f.rev()
883 if rev != previousrev:
895 if rev != previousrev:
884 blockhead = True
896 blockhead = True
885 blockparity = next(blockparitygen)
897 blockparity = next(blockparitygen)
886 else:
898 else:
887 blockhead = None
899 blockhead = None
888 previousrev = rev
900 previousrev = rev
889 yield {"parity": next(parity),
901 yield {"parity": next(parity),
890 "node": f.hex(),
902 "node": f.hex(),
891 "rev": rev,
903 "rev": rev,
892 "author": f.user(),
904 "author": f.user(),
893 "parents": parents(f),
905 "parents": parents(f),
894 "desc": f.description(),
906 "desc": f.description(),
895 "extra": f.extra(),
907 "extra": f.extra(),
896 "file": f.path(),
908 "file": f.path(),
897 "blockhead": blockhead,
909 "blockhead": blockhead,
898 "blockparity": blockparity,
910 "blockparity": blockparity,
899 "targetline": targetline,
911 "targetline": targetline,
900 "line": l,
912 "line": l,
901 "lineno": lineno + 1,
913 "lineno": lineno + 1,
902 "lineid": "l%d" % (lineno + 1),
914 "lineid": "l%d" % (lineno + 1),
903 "linenumber": "% 6d" % (lineno + 1),
915 "linenumber": "% 6d" % (lineno + 1),
904 "revdate": f.date()}
916 "revdate": f.date()}
905
917
906 return tmpl("fileannotate",
918 return tmpl("fileannotate",
907 file=f,
919 file=f,
908 annotate=annotate,
920 annotate=annotate,
909 path=webutil.up(f),
921 path=webutil.up(f),
910 symrev=webutil.symrevorshortnode(req, fctx),
922 symrev=webutil.symrevorshortnode(req, fctx),
911 rename=webutil.renamelink(fctx),
923 rename=webutil.renamelink(fctx),
912 permissions=fctx.manifest().flags(f),
924 permissions=fctx.manifest().flags(f),
913 **webutil.commonentry(web.repo, fctx))
925 **webutil.commonentry(web.repo, fctx))
914
926
915 @webcommand('filelog')
927 @webcommand('filelog')
916 def filelog(web, req, tmpl):
928 def filelog(web, req, tmpl):
917 """
929 """
918 /filelog/{revision}/{path}
930 /filelog/{revision}/{path}
919 --------------------------
931 --------------------------
920
932
921 Show information about the history of a file in the repository.
933 Show information about the history of a file in the repository.
922
934
923 The ``revcount`` query string argument can be defined to control the
935 The ``revcount`` query string argument can be defined to control the
924 maximum number of entries to show.
936 maximum number of entries to show.
925
937
926 The ``filelog`` template will be rendered.
938 The ``filelog`` template will be rendered.
927 """
939 """
928
940
929 try:
941 try:
930 fctx = webutil.filectx(web.repo, req)
942 fctx = webutil.filectx(web.repo, req)
931 f = fctx.path()
943 f = fctx.path()
932 fl = fctx.filelog()
944 fl = fctx.filelog()
933 except error.LookupError:
945 except error.LookupError:
934 f = webutil.cleanpath(web.repo, req.form['file'][0])
946 f = webutil.cleanpath(web.repo, req.form['file'][0])
935 fl = web.repo.file(f)
947 fl = web.repo.file(f)
936 numrevs = len(fl)
948 numrevs = len(fl)
937 if not numrevs: # file doesn't exist at all
949 if not numrevs: # file doesn't exist at all
938 raise
950 raise
939 rev = webutil.changectx(web.repo, req).rev()
951 rev = webutil.changectx(web.repo, req).rev()
940 first = fl.linkrev(0)
952 first = fl.linkrev(0)
941 if rev < first: # current rev is from before file existed
953 if rev < first: # current rev is from before file existed
942 raise
954 raise
943 frev = numrevs - 1
955 frev = numrevs - 1
944 while fl.linkrev(frev) > rev:
956 while fl.linkrev(frev) > rev:
945 frev -= 1
957 frev -= 1
946 fctx = web.repo.filectx(f, fl.linkrev(frev))
958 fctx = web.repo.filectx(f, fl.linkrev(frev))
947
959
948 revcount = web.maxshortchanges
960 revcount = web.maxshortchanges
949 if 'revcount' in req.form:
961 if 'revcount' in req.form:
950 try:
962 try:
951 revcount = int(req.form.get('revcount', [revcount])[0])
963 revcount = int(req.form.get('revcount', [revcount])[0])
952 revcount = max(revcount, 1)
964 revcount = max(revcount, 1)
953 tmpl.defaults['sessionvars']['revcount'] = revcount
965 tmpl.defaults['sessionvars']['revcount'] = revcount
954 except ValueError:
966 except ValueError:
955 pass
967 pass
956
968
957 lessvars = copy.copy(tmpl.defaults['sessionvars'])
969 lessvars = copy.copy(tmpl.defaults['sessionvars'])
958 lessvars['revcount'] = max(revcount / 2, 1)
970 lessvars['revcount'] = max(revcount / 2, 1)
959 morevars = copy.copy(tmpl.defaults['sessionvars'])
971 morevars = copy.copy(tmpl.defaults['sessionvars'])
960 morevars['revcount'] = revcount * 2
972 morevars['revcount'] = revcount * 2
961
973
962 count = fctx.filerev() + 1
974 count = fctx.filerev() + 1
963 start = max(0, fctx.filerev() - revcount + 1) # first rev on this page
975 start = max(0, fctx.filerev() - revcount + 1) # first rev on this page
964 end = min(count, start + revcount) # last rev on this page
976 end = min(count, start + revcount) # last rev on this page
965 parity = paritygen(web.stripecount, offset=start - end)
977 parity = paritygen(web.stripecount, offset=start - end)
966
978
967 def entries():
979 def entries():
968 l = []
980 l = []
969
981
970 repo = web.repo
982 repo = web.repo
971 revs = fctx.filelog().revs(start, end - 1)
983 revs = fctx.filelog().revs(start, end - 1)
972 for i in revs:
984 for i in revs:
973 iterfctx = fctx.filectx(i)
985 iterfctx = fctx.filectx(i)
974
986
975 l.append(dict(
987 l.append(dict(
976 parity=next(parity),
988 parity=next(parity),
977 filerev=i,
989 filerev=i,
978 file=f,
990 file=f,
979 rename=webutil.renamelink(iterfctx),
991 rename=webutil.renamelink(iterfctx),
980 **webutil.commonentry(repo, iterfctx)))
992 **webutil.commonentry(repo, iterfctx)))
981 for e in reversed(l):
993 for e in reversed(l):
982 yield e
994 yield e
983
995
984 entries = list(entries())
996 entries = list(entries())
985 latestentry = entries[:1]
997 latestentry = entries[:1]
986
998
987 revnav = webutil.filerevnav(web.repo, fctx.path())
999 revnav = webutil.filerevnav(web.repo, fctx.path())
988 nav = revnav.gen(end - 1, revcount, count)
1000 nav = revnav.gen(end - 1, revcount, count)
989 return tmpl("filelog",
1001 return tmpl("filelog",
990 file=f,
1002 file=f,
991 nav=nav,
1003 nav=nav,
992 symrev=webutil.symrevorshortnode(req, fctx),
1004 symrev=webutil.symrevorshortnode(req, fctx),
993 entries=entries,
1005 entries=entries,
994 latestentry=latestentry,
1006 latestentry=latestentry,
995 revcount=revcount,
1007 revcount=revcount,
996 morevars=morevars,
1008 morevars=morevars,
997 lessvars=lessvars,
1009 lessvars=lessvars,
998 **webutil.commonentry(web.repo, fctx))
1010 **webutil.commonentry(web.repo, fctx))
999
1011
1000 @webcommand('archive')
1012 @webcommand('archive')
1001 def archive(web, req, tmpl):
1013 def archive(web, req, tmpl):
1002 """
1014 """
1003 /archive/{revision}.{format}[/{path}]
1015 /archive/{revision}.{format}[/{path}]
1004 -------------------------------------
1016 -------------------------------------
1005
1017
1006 Obtain an archive of repository content.
1018 Obtain an archive of repository content.
1007
1019
1008 The content and type of the archive is defined by a URL path parameter.
1020 The content and type of the archive is defined by a URL path parameter.
1009 ``format`` is the file extension of the archive type to be generated. e.g.
1021 ``format`` is the file extension of the archive type to be generated. e.g.
1010 ``zip`` or ``tar.bz2``. Not all archive types may be allowed by your
1022 ``zip`` or ``tar.bz2``. Not all archive types may be allowed by your
1011 server configuration.
1023 server configuration.
1012
1024
1013 The optional ``path`` URL parameter controls content to include in the
1025 The optional ``path`` URL parameter controls content to include in the
1014 archive. If omitted, every file in the specified revision is present in the
1026 archive. If omitted, every file in the specified revision is present in the
1015 archive. If included, only the specified file or contents of the specified
1027 archive. If included, only the specified file or contents of the specified
1016 directory will be included in the archive.
1028 directory will be included in the archive.
1017
1029
1018 No template is used for this handler. Raw, binary content is generated.
1030 No template is used for this handler. Raw, binary content is generated.
1019 """
1031 """
1020
1032
1021 type_ = req.form.get('type', [None])[0]
1033 type_ = req.form.get('type', [None])[0]
1022 allowed = web.configlist("web", "allow_archive")
1034 allowed = web.configlist("web", "allow_archive")
1023 key = req.form['node'][0]
1035 key = req.form['node'][0]
1024
1036
1025 if type_ not in web.archives:
1037 if type_ not in web.archives:
1026 msg = 'Unsupported archive type: %s' % type_
1038 msg = 'Unsupported archive type: %s' % type_
1027 raise ErrorResponse(HTTP_NOT_FOUND, msg)
1039 raise ErrorResponse(HTTP_NOT_FOUND, msg)
1028
1040
1029 if not ((type_ in allowed or
1041 if not ((type_ in allowed or
1030 web.configbool("web", "allow" + type_, False))):
1042 web.configbool("web", "allow" + type_, False))):
1031 msg = 'Archive type not allowed: %s' % type_
1043 msg = 'Archive type not allowed: %s' % type_
1032 raise ErrorResponse(HTTP_FORBIDDEN, msg)
1044 raise ErrorResponse(HTTP_FORBIDDEN, msg)
1033
1045
1034 reponame = re.sub(r"\W+", "-", os.path.basename(web.reponame))
1046 reponame = re.sub(r"\W+", "-", os.path.basename(web.reponame))
1035 cnode = web.repo.lookup(key)
1047 cnode = web.repo.lookup(key)
1036 arch_version = key
1048 arch_version = key
1037 if cnode == key or key == 'tip':
1049 if cnode == key or key == 'tip':
1038 arch_version = short(cnode)
1050 arch_version = short(cnode)
1039 name = "%s-%s" % (reponame, arch_version)
1051 name = "%s-%s" % (reponame, arch_version)
1040
1052
1041 ctx = webutil.changectx(web.repo, req)
1053 ctx = webutil.changectx(web.repo, req)
1042 pats = []
1054 pats = []
1043 matchfn = scmutil.match(ctx, [])
1055 matchfn = scmutil.match(ctx, [])
1044 file = req.form.get('file', None)
1056 file = req.form.get('file', None)
1045 if file:
1057 if file:
1046 pats = ['path:' + file[0]]
1058 pats = ['path:' + file[0]]
1047 matchfn = scmutil.match(ctx, pats, default='path')
1059 matchfn = scmutil.match(ctx, pats, default='path')
1048 if pats:
1060 if pats:
1049 files = [f for f in ctx.manifest().keys() if matchfn(f)]
1061 files = [f for f in ctx.manifest().keys() if matchfn(f)]
1050 if not files:
1062 if not files:
1051 raise ErrorResponse(HTTP_NOT_FOUND,
1063 raise ErrorResponse(HTTP_NOT_FOUND,
1052 'file(s) not found: %s' % file[0])
1064 'file(s) not found: %s' % file[0])
1053
1065
1054 mimetype, artype, extension, encoding = web.archivespecs[type_]
1066 mimetype, artype, extension, encoding = web.archivespecs[type_]
1055 headers = [
1067 headers = [
1056 ('Content-Disposition', 'attachment; filename=%s%s' % (name, extension))
1068 ('Content-Disposition', 'attachment; filename=%s%s' % (name, extension))
1057 ]
1069 ]
1058 if encoding:
1070 if encoding:
1059 headers.append(('Content-Encoding', encoding))
1071 headers.append(('Content-Encoding', encoding))
1060 req.headers.extend(headers)
1072 req.headers.extend(headers)
1061 req.respond(HTTP_OK, mimetype)
1073 req.respond(HTTP_OK, mimetype)
1062
1074
1063 archival.archive(web.repo, req, cnode, artype, prefix=name,
1075 archival.archive(web.repo, req, cnode, artype, prefix=name,
1064 matchfn=matchfn,
1076 matchfn=matchfn,
1065 subrepos=web.configbool("web", "archivesubrepos"))
1077 subrepos=web.configbool("web", "archivesubrepos"))
1066 return []
1078 return []
1067
1079
1068
1080
1069 @webcommand('static')
1081 @webcommand('static')
1070 def static(web, req, tmpl):
1082 def static(web, req, tmpl):
1071 fname = req.form['file'][0]
1083 fname = req.form['file'][0]
1072 # a repo owner may set web.static in .hg/hgrc to get any file
1084 # a repo owner may set web.static in .hg/hgrc to get any file
1073 # readable by the user running the CGI script
1085 # readable by the user running the CGI script
1074 static = web.config("web", "static", None, untrusted=False)
1086 static = web.config("web", "static", None, untrusted=False)
1075 if not static:
1087 if not static:
1076 tp = web.templatepath or templater.templatepaths()
1088 tp = web.templatepath or templater.templatepaths()
1077 if isinstance(tp, str):
1089 if isinstance(tp, str):
1078 tp = [tp]
1090 tp = [tp]
1079 static = [os.path.join(p, 'static') for p in tp]
1091 static = [os.path.join(p, 'static') for p in tp]
1080 staticfile(static, fname, req)
1092 staticfile(static, fname, req)
1081 return []
1093 return []
1082
1094
1083 @webcommand('graph')
1095 @webcommand('graph')
1084 def graph(web, req, tmpl):
1096 def graph(web, req, tmpl):
1085 """
1097 """
1086 /graph[/{revision}]
1098 /graph[/{revision}]
1087 -------------------
1099 -------------------
1088
1100
1089 Show information about the graphical topology of the repository.
1101 Show information about the graphical topology of the repository.
1090
1102
1091 Information rendered by this handler can be used to create visual
1103 Information rendered by this handler can be used to create visual
1092 representations of repository topology.
1104 representations of repository topology.
1093
1105
1094 The ``revision`` URL parameter controls the starting changeset.
1106 The ``revision`` URL parameter controls the starting changeset.
1095
1107
1096 The ``revcount`` query string argument can define the number of changesets
1108 The ``revcount`` query string argument can define the number of changesets
1097 to show information for.
1109 to show information for.
1098
1110
1099 This handler will render the ``graph`` template.
1111 This handler will render the ``graph`` template.
1100 """
1112 """
1101
1113
1102 if 'node' in req.form:
1114 if 'node' in req.form:
1103 ctx = webutil.changectx(web.repo, req)
1115 ctx = webutil.changectx(web.repo, req)
1104 symrev = webutil.symrevorshortnode(req, ctx)
1116 symrev = webutil.symrevorshortnode(req, ctx)
1105 else:
1117 else:
1106 ctx = web.repo['tip']
1118 ctx = web.repo['tip']
1107 symrev = 'tip'
1119 symrev = 'tip'
1108 rev = ctx.rev()
1120 rev = ctx.rev()
1109
1121
1110 bg_height = 39
1122 bg_height = 39
1111 revcount = web.maxshortchanges
1123 revcount = web.maxshortchanges
1112 if 'revcount' in req.form:
1124 if 'revcount' in req.form:
1113 try:
1125 try:
1114 revcount = int(req.form.get('revcount', [revcount])[0])
1126 revcount = int(req.form.get('revcount', [revcount])[0])
1115 revcount = max(revcount, 1)
1127 revcount = max(revcount, 1)
1116 tmpl.defaults['sessionvars']['revcount'] = revcount
1128 tmpl.defaults['sessionvars']['revcount'] = revcount
1117 except ValueError:
1129 except ValueError:
1118 pass
1130 pass
1119
1131
1120 lessvars = copy.copy(tmpl.defaults['sessionvars'])
1132 lessvars = copy.copy(tmpl.defaults['sessionvars'])
1121 lessvars['revcount'] = max(revcount / 2, 1)
1133 lessvars['revcount'] = max(revcount / 2, 1)
1122 morevars = copy.copy(tmpl.defaults['sessionvars'])
1134 morevars = copy.copy(tmpl.defaults['sessionvars'])
1123 morevars['revcount'] = revcount * 2
1135 morevars['revcount'] = revcount * 2
1124
1136
1125 count = len(web.repo)
1137 count = len(web.repo)
1126 pos = rev
1138 pos = rev
1127
1139
1128 uprev = min(max(0, count - 1), rev + revcount)
1140 uprev = min(max(0, count - 1), rev + revcount)
1129 downrev = max(0, rev - revcount)
1141 downrev = max(0, rev - revcount)
1130 changenav = webutil.revnav(web.repo).gen(pos, revcount, count)
1142 changenav = webutil.revnav(web.repo).gen(pos, revcount, count)
1131
1143
1132 tree = []
1144 tree = []
1133 if pos != -1:
1145 if pos != -1:
1134 allrevs = web.repo.changelog.revs(pos, 0)
1146 allrevs = web.repo.changelog.revs(pos, 0)
1135 revs = []
1147 revs = []
1136 for i in allrevs:
1148 for i in allrevs:
1137 revs.append(i)
1149 revs.append(i)
1138 if len(revs) >= revcount:
1150 if len(revs) >= revcount:
1139 break
1151 break
1140
1152
1141 # We have to feed a baseset to dagwalker as it is expecting smartset
1153 # We have to feed a baseset to dagwalker as it is expecting smartset
1142 # object. This does not have a big impact on hgweb performance itself
1154 # object. This does not have a big impact on hgweb performance itself
1143 # since hgweb graphing code is not itself lazy yet.
1155 # since hgweb graphing code is not itself lazy yet.
1144 dag = graphmod.dagwalker(web.repo, revset.baseset(revs))
1156 dag = graphmod.dagwalker(web.repo, revset.baseset(revs))
1145 # As we said one line above... not lazy.
1157 # As we said one line above... not lazy.
1146 tree = list(graphmod.colored(dag, web.repo))
1158 tree = list(graphmod.colored(dag, web.repo))
1147
1159
1148 def getcolumns(tree):
1160 def getcolumns(tree):
1149 cols = 0
1161 cols = 0
1150 for (id, type, ctx, vtx, edges) in tree:
1162 for (id, type, ctx, vtx, edges) in tree:
1151 if type != graphmod.CHANGESET:
1163 if type != graphmod.CHANGESET:
1152 continue
1164 continue
1153 cols = max(cols, max([edge[0] for edge in edges] or [0]),
1165 cols = max(cols, max([edge[0] for edge in edges] or [0]),
1154 max([edge[1] for edge in edges] or [0]))
1166 max([edge[1] for edge in edges] or [0]))
1155 return cols
1167 return cols
1156
1168
1157 def graphdata(usetuples, encodestr):
1169 def graphdata(usetuples, encodestr):
1158 data = []
1170 data = []
1159
1171
1160 row = 0
1172 row = 0
1161 for (id, type, ctx, vtx, edges) in tree:
1173 for (id, type, ctx, vtx, edges) in tree:
1162 if type != graphmod.CHANGESET:
1174 if type != graphmod.CHANGESET:
1163 continue
1175 continue
1164 node = str(ctx)
1176 node = str(ctx)
1165 age = encodestr(templatefilters.age(ctx.date()))
1177 age = encodestr(templatefilters.age(ctx.date()))
1166 desc = templatefilters.firstline(encodestr(ctx.description()))
1178 desc = templatefilters.firstline(encodestr(ctx.description()))
1167 desc = cgi.escape(templatefilters.nonempty(desc))
1179 desc = cgi.escape(templatefilters.nonempty(desc))
1168 user = cgi.escape(templatefilters.person(encodestr(ctx.user())))
1180 user = cgi.escape(templatefilters.person(encodestr(ctx.user())))
1169 branch = cgi.escape(encodestr(ctx.branch()))
1181 branch = cgi.escape(encodestr(ctx.branch()))
1170 try:
1182 try:
1171 branchnode = web.repo.branchtip(branch)
1183 branchnode = web.repo.branchtip(branch)
1172 except error.RepoLookupError:
1184 except error.RepoLookupError:
1173 branchnode = None
1185 branchnode = None
1174 branch = branch, branchnode == ctx.node()
1186 branch = branch, branchnode == ctx.node()
1175
1187
1176 if usetuples:
1188 if usetuples:
1177 data.append((node, vtx, edges, desc, user, age, branch,
1189 data.append((node, vtx, edges, desc, user, age, branch,
1178 [cgi.escape(encodestr(x)) for x in ctx.tags()],
1190 [cgi.escape(encodestr(x)) for x in ctx.tags()],
1179 [cgi.escape(encodestr(x))
1191 [cgi.escape(encodestr(x))
1180 for x in ctx.bookmarks()]))
1192 for x in ctx.bookmarks()]))
1181 else:
1193 else:
1182 edgedata = [{'col': edge[0], 'nextcol': edge[1],
1194 edgedata = [{'col': edge[0], 'nextcol': edge[1],
1183 'color': (edge[2] - 1) % 6 + 1,
1195 'color': (edge[2] - 1) % 6 + 1,
1184 'width': edge[3], 'bcolor': edge[4]}
1196 'width': edge[3], 'bcolor': edge[4]}
1185 for edge in edges]
1197 for edge in edges]
1186
1198
1187 data.append(
1199 data.append(
1188 {'node': node,
1200 {'node': node,
1189 'col': vtx[0],
1201 'col': vtx[0],
1190 'color': (vtx[1] - 1) % 6 + 1,
1202 'color': (vtx[1] - 1) % 6 + 1,
1191 'edges': edgedata,
1203 'edges': edgedata,
1192 'row': row,
1204 'row': row,
1193 'nextrow': row + 1,
1205 'nextrow': row + 1,
1194 'desc': desc,
1206 'desc': desc,
1195 'user': user,
1207 'user': user,
1196 'age': age,
1208 'age': age,
1197 'bookmarks': webutil.nodebookmarksdict(
1209 'bookmarks': webutil.nodebookmarksdict(
1198 web.repo, ctx.node()),
1210 web.repo, ctx.node()),
1199 'branches': webutil.nodebranchdict(web.repo, ctx),
1211 'branches': webutil.nodebranchdict(web.repo, ctx),
1200 'inbranch': webutil.nodeinbranch(web.repo, ctx),
1212 'inbranch': webutil.nodeinbranch(web.repo, ctx),
1201 'tags': webutil.nodetagsdict(web.repo, ctx.node())})
1213 'tags': webutil.nodetagsdict(web.repo, ctx.node())})
1202
1214
1203 row += 1
1215 row += 1
1204
1216
1205 return data
1217 return data
1206
1218
1207 cols = getcolumns(tree)
1219 cols = getcolumns(tree)
1208 rows = len(tree)
1220 rows = len(tree)
1209 canvasheight = (rows + 1) * bg_height - 27
1221 canvasheight = (rows + 1) * bg_height - 27
1210
1222
1211 return tmpl('graph', rev=rev, symrev=symrev, revcount=revcount,
1223 return tmpl('graph', rev=rev, symrev=symrev, revcount=revcount,
1212 uprev=uprev,
1224 uprev=uprev,
1213 lessvars=lessvars, morevars=morevars, downrev=downrev,
1225 lessvars=lessvars, morevars=morevars, downrev=downrev,
1214 cols=cols, rows=rows,
1226 cols=cols, rows=rows,
1215 canvaswidth=(cols + 1) * bg_height,
1227 canvaswidth=(cols + 1) * bg_height,
1216 truecanvasheight=rows * bg_height,
1228 truecanvasheight=rows * bg_height,
1217 canvasheight=canvasheight, bg_height=bg_height,
1229 canvasheight=canvasheight, bg_height=bg_height,
1218 # {jsdata} will be passed to |json, so it must be in utf-8
1230 # {jsdata} will be passed to |json, so it must be in utf-8
1219 jsdata=lambda **x: graphdata(True, encoding.fromlocal),
1231 jsdata=lambda **x: graphdata(True, encoding.fromlocal),
1220 nodes=lambda **x: graphdata(False, str),
1232 nodes=lambda **x: graphdata(False, str),
1221 node=ctx.hex(), changenav=changenav)
1233 node=ctx.hex(), changenav=changenav)
1222
1234
1223 def _getdoc(e):
1235 def _getdoc(e):
1224 doc = e[0].__doc__
1236 doc = e[0].__doc__
1225 if doc:
1237 if doc:
1226 doc = _(doc).partition('\n')[0]
1238 doc = _(doc).partition('\n')[0]
1227 else:
1239 else:
1228 doc = _('(no help text available)')
1240 doc = _('(no help text available)')
1229 return doc
1241 return doc
1230
1242
1231 @webcommand('help')
1243 @webcommand('help')
1232 def help(web, req, tmpl):
1244 def help(web, req, tmpl):
1233 """
1245 """
1234 /help[/{topic}]
1246 /help[/{topic}]
1235 ---------------
1247 ---------------
1236
1248
1237 Render help documentation.
1249 Render help documentation.
1238
1250
1239 This web command is roughly equivalent to :hg:`help`. If a ``topic``
1251 This web command is roughly equivalent to :hg:`help`. If a ``topic``
1240 is defined, that help topic will be rendered. If not, an index of
1252 is defined, that help topic will be rendered. If not, an index of
1241 available help topics will be rendered.
1253 available help topics will be rendered.
1242
1254
1243 The ``help`` template will be rendered when requesting help for a topic.
1255 The ``help`` template will be rendered when requesting help for a topic.
1244 ``helptopics`` will be rendered for the index of help topics.
1256 ``helptopics`` will be rendered for the index of help topics.
1245 """
1257 """
1246 from .. import commands, help as helpmod # avoid cycle
1258 from .. import commands, help as helpmod # avoid cycle
1247
1259
1248 topicname = req.form.get('node', [None])[0]
1260 topicname = req.form.get('node', [None])[0]
1249 if not topicname:
1261 if not topicname:
1250 def topics(**map):
1262 def topics(**map):
1251 for entries, summary, _doc in helpmod.helptable:
1263 for entries, summary, _doc in helpmod.helptable:
1252 yield {'topic': entries[0], 'summary': summary}
1264 yield {'topic': entries[0], 'summary': summary}
1253
1265
1254 early, other = [], []
1266 early, other = [], []
1255 primary = lambda s: s.partition('|')[0]
1267 primary = lambda s: s.partition('|')[0]
1256 for c, e in commands.table.iteritems():
1268 for c, e in commands.table.iteritems():
1257 doc = _getdoc(e)
1269 doc = _getdoc(e)
1258 if 'DEPRECATED' in doc or c.startswith('debug'):
1270 if 'DEPRECATED' in doc or c.startswith('debug'):
1259 continue
1271 continue
1260 cmd = primary(c)
1272 cmd = primary(c)
1261 if cmd.startswith('^'):
1273 if cmd.startswith('^'):
1262 early.append((cmd[1:], doc))
1274 early.append((cmd[1:], doc))
1263 else:
1275 else:
1264 other.append((cmd, doc))
1276 other.append((cmd, doc))
1265
1277
1266 early.sort()
1278 early.sort()
1267 other.sort()
1279 other.sort()
1268
1280
1269 def earlycommands(**map):
1281 def earlycommands(**map):
1270 for c, doc in early:
1282 for c, doc in early:
1271 yield {'topic': c, 'summary': doc}
1283 yield {'topic': c, 'summary': doc}
1272
1284
1273 def othercommands(**map):
1285 def othercommands(**map):
1274 for c, doc in other:
1286 for c, doc in other:
1275 yield {'topic': c, 'summary': doc}
1287 yield {'topic': c, 'summary': doc}
1276
1288
1277 return tmpl('helptopics', topics=topics, earlycommands=earlycommands,
1289 return tmpl('helptopics', topics=topics, earlycommands=earlycommands,
1278 othercommands=othercommands, title='Index')
1290 othercommands=othercommands, title='Index')
1279
1291
1280 # Render an index of sub-topics.
1292 # Render an index of sub-topics.
1281 if topicname in helpmod.subtopics:
1293 if topicname in helpmod.subtopics:
1282 topics = []
1294 topics = []
1283 for entries, summary, _doc in helpmod.subtopics[topicname]:
1295 for entries, summary, _doc in helpmod.subtopics[topicname]:
1284 topics.append({
1296 topics.append({
1285 'topic': '%s.%s' % (topicname, entries[0]),
1297 'topic': '%s.%s' % (topicname, entries[0]),
1286 'basename': entries[0],
1298 'basename': entries[0],
1287 'summary': summary,
1299 'summary': summary,
1288 })
1300 })
1289
1301
1290 return tmpl('helptopics', topics=topics, title=topicname,
1302 return tmpl('helptopics', topics=topics, title=topicname,
1291 subindex=True)
1303 subindex=True)
1292
1304
1293 u = webutil.wsgiui()
1305 u = webutil.wsgiui()
1294 u.verbose = True
1306 u.verbose = True
1295
1307
1296 # Render a page from a sub-topic.
1308 # Render a page from a sub-topic.
1297 if '.' in topicname:
1309 if '.' in topicname:
1298 # TODO implement support for rendering sections, like
1310 # TODO implement support for rendering sections, like
1299 # `hg help` works.
1311 # `hg help` works.
1300 topic, subtopic = topicname.split('.', 1)
1312 topic, subtopic = topicname.split('.', 1)
1301 if topic not in helpmod.subtopics:
1313 if topic not in helpmod.subtopics:
1302 raise ErrorResponse(HTTP_NOT_FOUND)
1314 raise ErrorResponse(HTTP_NOT_FOUND)
1303 else:
1315 else:
1304 topic = topicname
1316 topic = topicname
1305 subtopic = None
1317 subtopic = None
1306
1318
1307 try:
1319 try:
1308 doc = helpmod.help_(u, topic, subtopic=subtopic)
1320 doc = helpmod.help_(u, topic, subtopic=subtopic)
1309 except error.UnknownCommand:
1321 except error.UnknownCommand:
1310 raise ErrorResponse(HTTP_NOT_FOUND)
1322 raise ErrorResponse(HTTP_NOT_FOUND)
1311 return tmpl('help', topic=topicname, doc=doc)
1323 return tmpl('help', topic=topicname, doc=doc)
1312
1324
1313 # tell hggettext to extract docstrings from these functions:
1325 # tell hggettext to extract docstrings from these functions:
1314 i18nfunctions = commands.values()
1326 i18nfunctions = commands.values()
@@ -1,1472 +1,1470 b''
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import contextlib
10 import contextlib
11 import errno
11 import errno
12 import glob
12 import glob
13 import hashlib
13 import hashlib
14 import os
14 import os
15 import re
15 import re
16 import shutil
16 import shutil
17 import stat
17 import stat
18 import tempfile
18 import tempfile
19 import threading
19 import threading
20
20
21 from .i18n import _
21 from .i18n import _
22 from .node import wdirrev
22 from .node import wdirrev
23 from . import (
23 from . import (
24 encoding,
24 encoding,
25 error,
25 error,
26 match as matchmod,
26 match as matchmod,
27 osutil,
27 osutil,
28 pathutil,
28 pathutil,
29 phases,
29 phases,
30 pycompat,
30 pycompat,
31 revset,
31 revset,
32 similar,
32 similar,
33 util,
33 util,
34 )
34 )
35
35
36 if os.name == 'nt':
36 if os.name == 'nt':
37 from . import scmwindows as scmplatform
37 from . import scmwindows as scmplatform
38 else:
38 else:
39 from . import scmposix as scmplatform
39 from . import scmposix as scmplatform
40
40
41 systemrcpath = scmplatform.systemrcpath
41 systemrcpath = scmplatform.systemrcpath
42 userrcpath = scmplatform.userrcpath
42 userrcpath = scmplatform.userrcpath
43 termsize = scmplatform.termsize
43 termsize = scmplatform.termsize
44
44
45 class status(tuple):
45 class status(tuple):
46 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
46 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
47 and 'ignored' properties are only relevant to the working copy.
47 and 'ignored' properties are only relevant to the working copy.
48 '''
48 '''
49
49
50 __slots__ = ()
50 __slots__ = ()
51
51
52 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
52 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
53 clean):
53 clean):
54 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
54 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
55 ignored, clean))
55 ignored, clean))
56
56
57 @property
57 @property
58 def modified(self):
58 def modified(self):
59 '''files that have been modified'''
59 '''files that have been modified'''
60 return self[0]
60 return self[0]
61
61
62 @property
62 @property
63 def added(self):
63 def added(self):
64 '''files that have been added'''
64 '''files that have been added'''
65 return self[1]
65 return self[1]
66
66
67 @property
67 @property
68 def removed(self):
68 def removed(self):
69 '''files that have been removed'''
69 '''files that have been removed'''
70 return self[2]
70 return self[2]
71
71
72 @property
72 @property
73 def deleted(self):
73 def deleted(self):
74 '''files that are in the dirstate, but have been deleted from the
74 '''files that are in the dirstate, but have been deleted from the
75 working copy (aka "missing")
75 working copy (aka "missing")
76 '''
76 '''
77 return self[3]
77 return self[3]
78
78
79 @property
79 @property
80 def unknown(self):
80 def unknown(self):
81 '''files not in the dirstate that are not ignored'''
81 '''files not in the dirstate that are not ignored'''
82 return self[4]
82 return self[4]
83
83
84 @property
84 @property
85 def ignored(self):
85 def ignored(self):
86 '''files not in the dirstate that are ignored (by _dirignore())'''
86 '''files not in the dirstate that are ignored (by _dirignore())'''
87 return self[5]
87 return self[5]
88
88
89 @property
89 @property
90 def clean(self):
90 def clean(self):
91 '''files that have not been modified'''
91 '''files that have not been modified'''
92 return self[6]
92 return self[6]
93
93
94 def __repr__(self, *args, **kwargs):
94 def __repr__(self, *args, **kwargs):
95 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
95 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
96 'unknown=%r, ignored=%r, clean=%r>') % self)
96 'unknown=%r, ignored=%r, clean=%r>') % self)
97
97
98 def itersubrepos(ctx1, ctx2):
98 def itersubrepos(ctx1, ctx2):
99 """find subrepos in ctx1 or ctx2"""
99 """find subrepos in ctx1 or ctx2"""
100 # Create a (subpath, ctx) mapping where we prefer subpaths from
100 # Create a (subpath, ctx) mapping where we prefer subpaths from
101 # ctx1. The subpaths from ctx2 are important when the .hgsub file
101 # ctx1. The subpaths from ctx2 are important when the .hgsub file
102 # has been modified (in ctx2) but not yet committed (in ctx1).
102 # has been modified (in ctx2) but not yet committed (in ctx1).
103 subpaths = dict.fromkeys(ctx2.substate, ctx2)
103 subpaths = dict.fromkeys(ctx2.substate, ctx2)
104 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
104 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
105
105
106 missing = set()
106 missing = set()
107
107
108 for subpath in ctx2.substate:
108 for subpath in ctx2.substate:
109 if subpath not in ctx1.substate:
109 if subpath not in ctx1.substate:
110 del subpaths[subpath]
110 del subpaths[subpath]
111 missing.add(subpath)
111 missing.add(subpath)
112
112
113 for subpath, ctx in sorted(subpaths.iteritems()):
113 for subpath, ctx in sorted(subpaths.iteritems()):
114 yield subpath, ctx.sub(subpath)
114 yield subpath, ctx.sub(subpath)
115
115
116 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
116 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
117 # status and diff will have an accurate result when it does
117 # status and diff will have an accurate result when it does
118 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
118 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
119 # against itself.
119 # against itself.
120 for subpath in missing:
120 for subpath in missing:
121 yield subpath, ctx2.nullsub(subpath, ctx1)
121 yield subpath, ctx2.nullsub(subpath, ctx1)
122
122
123 def nochangesfound(ui, repo, excluded=None):
123 def nochangesfound(ui, repo, excluded=None):
124 '''Report no changes for push/pull, excluded is None or a list of
124 '''Report no changes for push/pull, excluded is None or a list of
125 nodes excluded from the push/pull.
125 nodes excluded from the push/pull.
126 '''
126 '''
127 secretlist = []
127 secretlist = []
128 if excluded:
128 if excluded:
129 for n in excluded:
129 for n in excluded:
130 if n not in repo:
130 if n not in repo:
131 # discovery should not have included the filtered revision,
131 # discovery should not have included the filtered revision,
132 # we have to explicitly exclude it until discovery is cleanup.
132 # we have to explicitly exclude it until discovery is cleanup.
133 continue
133 continue
134 ctx = repo[n]
134 ctx = repo[n]
135 if ctx.phase() >= phases.secret and not ctx.extinct():
135 if ctx.phase() >= phases.secret and not ctx.extinct():
136 secretlist.append(n)
136 secretlist.append(n)
137
137
138 if secretlist:
138 if secretlist:
139 ui.status(_("no changes found (ignored %d secret changesets)\n")
139 ui.status(_("no changes found (ignored %d secret changesets)\n")
140 % len(secretlist))
140 % len(secretlist))
141 else:
141 else:
142 ui.status(_("no changes found\n"))
142 ui.status(_("no changes found\n"))
143
143
144 def checknewlabel(repo, lbl, kind):
144 def checknewlabel(repo, lbl, kind):
145 # Do not use the "kind" parameter in ui output.
145 # Do not use the "kind" parameter in ui output.
146 # It makes strings difficult to translate.
146 # It makes strings difficult to translate.
147 if lbl in ['tip', '.', 'null']:
147 if lbl in ['tip', '.', 'null']:
148 raise error.Abort(_("the name '%s' is reserved") % lbl)
148 raise error.Abort(_("the name '%s' is reserved") % lbl)
149 for c in (':', '\0', '\n', '\r'):
149 for c in (':', '\0', '\n', '\r'):
150 if c in lbl:
150 if c in lbl:
151 raise error.Abort(_("%r cannot be used in a name") % c)
151 raise error.Abort(_("%r cannot be used in a name") % c)
152 try:
152 try:
153 int(lbl)
153 int(lbl)
154 raise error.Abort(_("cannot use an integer as a name"))
154 raise error.Abort(_("cannot use an integer as a name"))
155 except ValueError:
155 except ValueError:
156 pass
156 pass
157
157
158 def checkfilename(f):
158 def checkfilename(f):
159 '''Check that the filename f is an acceptable filename for a tracked file'''
159 '''Check that the filename f is an acceptable filename for a tracked file'''
160 if '\r' in f or '\n' in f:
160 if '\r' in f or '\n' in f:
161 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
161 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
162
162
163 def checkportable(ui, f):
163 def checkportable(ui, f):
164 '''Check if filename f is portable and warn or abort depending on config'''
164 '''Check if filename f is portable and warn or abort depending on config'''
165 checkfilename(f)
165 checkfilename(f)
166 abort, warn = checkportabilityalert(ui)
166 abort, warn = checkportabilityalert(ui)
167 if abort or warn:
167 if abort or warn:
168 msg = util.checkwinfilename(f)
168 msg = util.checkwinfilename(f)
169 if msg:
169 if msg:
170 msg = "%s: %r" % (msg, f)
170 msg = "%s: %r" % (msg, f)
171 if abort:
171 if abort:
172 raise error.Abort(msg)
172 raise error.Abort(msg)
173 ui.warn(_("warning: %s\n") % msg)
173 ui.warn(_("warning: %s\n") % msg)
174
174
175 def checkportabilityalert(ui):
175 def checkportabilityalert(ui):
176 '''check if the user's config requests nothing, a warning, or abort for
176 '''check if the user's config requests nothing, a warning, or abort for
177 non-portable filenames'''
177 non-portable filenames'''
178 val = ui.config('ui', 'portablefilenames', 'warn')
178 val = ui.config('ui', 'portablefilenames', 'warn')
179 lval = val.lower()
179 lval = val.lower()
180 bval = util.parsebool(val)
180 bval = util.parsebool(val)
181 abort = os.name == 'nt' or lval == 'abort'
181 abort = os.name == 'nt' or lval == 'abort'
182 warn = bval or lval == 'warn'
182 warn = bval or lval == 'warn'
183 if bval is None and not (warn or abort or lval == 'ignore'):
183 if bval is None and not (warn or abort or lval == 'ignore'):
184 raise error.ConfigError(
184 raise error.ConfigError(
185 _("ui.portablefilenames value is invalid ('%s')") % val)
185 _("ui.portablefilenames value is invalid ('%s')") % val)
186 return abort, warn
186 return abort, warn
187
187
188 class casecollisionauditor(object):
188 class casecollisionauditor(object):
189 def __init__(self, ui, abort, dirstate):
189 def __init__(self, ui, abort, dirstate):
190 self._ui = ui
190 self._ui = ui
191 self._abort = abort
191 self._abort = abort
192 allfiles = '\0'.join(dirstate._map)
192 allfiles = '\0'.join(dirstate._map)
193 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
193 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
194 self._dirstate = dirstate
194 self._dirstate = dirstate
195 # The purpose of _newfiles is so that we don't complain about
195 # The purpose of _newfiles is so that we don't complain about
196 # case collisions if someone were to call this object with the
196 # case collisions if someone were to call this object with the
197 # same filename twice.
197 # same filename twice.
198 self._newfiles = set()
198 self._newfiles = set()
199
199
200 def __call__(self, f):
200 def __call__(self, f):
201 if f in self._newfiles:
201 if f in self._newfiles:
202 return
202 return
203 fl = encoding.lower(f)
203 fl = encoding.lower(f)
204 if fl in self._loweredfiles and f not in self._dirstate:
204 if fl in self._loweredfiles and f not in self._dirstate:
205 msg = _('possible case-folding collision for %s') % f
205 msg = _('possible case-folding collision for %s') % f
206 if self._abort:
206 if self._abort:
207 raise error.Abort(msg)
207 raise error.Abort(msg)
208 self._ui.warn(_("warning: %s\n") % msg)
208 self._ui.warn(_("warning: %s\n") % msg)
209 self._loweredfiles.add(fl)
209 self._loweredfiles.add(fl)
210 self._newfiles.add(f)
210 self._newfiles.add(f)
211
211
212 def filteredhash(repo, maxrev):
212 def filteredhash(repo, maxrev):
213 """build hash of filtered revisions in the current repoview.
213 """build hash of filtered revisions in the current repoview.
214
214
215 Multiple caches perform up-to-date validation by checking that the
215 Multiple caches perform up-to-date validation by checking that the
216 tiprev and tipnode stored in the cache file match the current repository.
216 tiprev and tipnode stored in the cache file match the current repository.
217 However, this is not sufficient for validating repoviews because the set
217 However, this is not sufficient for validating repoviews because the set
218 of revisions in the view may change without the repository tiprev and
218 of revisions in the view may change without the repository tiprev and
219 tipnode changing.
219 tipnode changing.
220
220
221 This function hashes all the revs filtered from the view and returns
221 This function hashes all the revs filtered from the view and returns
222 that SHA-1 digest.
222 that SHA-1 digest.
223 """
223 """
224 cl = repo.changelog
224 cl = repo.changelog
225 if not cl.filteredrevs:
225 if not cl.filteredrevs:
226 return None
226 return None
227 key = None
227 key = None
228 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
228 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
229 if revs:
229 if revs:
230 s = hashlib.sha1()
230 s = hashlib.sha1()
231 for rev in revs:
231 for rev in revs:
232 s.update('%s;' % rev)
232 s.update('%s;' % rev)
233 key = s.digest()
233 key = s.digest()
234 return key
234 return key
235
235
236 class abstractvfs(object):
236 class abstractvfs(object):
237 """Abstract base class; cannot be instantiated"""
237 """Abstract base class; cannot be instantiated"""
238
238
239 def __init__(self, *args, **kwargs):
239 def __init__(self, *args, **kwargs):
240 '''Prevent instantiation; don't call this from subclasses.'''
240 '''Prevent instantiation; don't call this from subclasses.'''
241 raise NotImplementedError('attempted instantiating ' + str(type(self)))
241 raise NotImplementedError('attempted instantiating ' + str(type(self)))
242
242
243 def tryread(self, path):
243 def tryread(self, path):
244 '''gracefully return an empty string for missing files'''
244 '''gracefully return an empty string for missing files'''
245 try:
245 try:
246 return self.read(path)
246 return self.read(path)
247 except IOError as inst:
247 except IOError as inst:
248 if inst.errno != errno.ENOENT:
248 if inst.errno != errno.ENOENT:
249 raise
249 raise
250 return ""
250 return ""
251
251
252 def tryreadlines(self, path, mode='rb'):
252 def tryreadlines(self, path, mode='rb'):
253 '''gracefully return an empty array for missing files'''
253 '''gracefully return an empty array for missing files'''
254 try:
254 try:
255 return self.readlines(path, mode=mode)
255 return self.readlines(path, mode=mode)
256 except IOError as inst:
256 except IOError as inst:
257 if inst.errno != errno.ENOENT:
257 if inst.errno != errno.ENOENT:
258 raise
258 raise
259 return []
259 return []
260
260
261 @util.propertycache
261 @util.propertycache
262 def open(self):
262 def open(self):
263 '''Open ``path`` file, which is relative to vfs root.
263 '''Open ``path`` file, which is relative to vfs root.
264
264
265 Newly created directories are marked as "not to be indexed by
265 Newly created directories are marked as "not to be indexed by
266 the content indexing service", if ``notindexed`` is specified
266 the content indexing service", if ``notindexed`` is specified
267 for "write" mode access.
267 for "write" mode access.
268 '''
268 '''
269 return self.__call__
269 return self.__call__
270
270
271 def read(self, path):
271 def read(self, path):
272 with self(path, 'rb') as fp:
272 with self(path, 'rb') as fp:
273 return fp.read()
273 return fp.read()
274
274
275 def readlines(self, path, mode='rb'):
275 def readlines(self, path, mode='rb'):
276 with self(path, mode=mode) as fp:
276 with self(path, mode=mode) as fp:
277 return fp.readlines()
277 return fp.readlines()
278
278
279 def write(self, path, data, backgroundclose=False):
279 def write(self, path, data, backgroundclose=False):
280 with self(path, 'wb', backgroundclose=backgroundclose) as fp:
280 with self(path, 'wb', backgroundclose=backgroundclose) as fp:
281 return fp.write(data)
281 return fp.write(data)
282
282
283 def writelines(self, path, data, mode='wb', notindexed=False):
283 def writelines(self, path, data, mode='wb', notindexed=False):
284 with self(path, mode=mode, notindexed=notindexed) as fp:
284 with self(path, mode=mode, notindexed=notindexed) as fp:
285 return fp.writelines(data)
285 return fp.writelines(data)
286
286
287 def append(self, path, data):
287 def append(self, path, data):
288 with self(path, 'ab') as fp:
288 with self(path, 'ab') as fp:
289 return fp.write(data)
289 return fp.write(data)
290
290
291 def basename(self, path):
291 def basename(self, path):
292 """return base element of a path (as os.path.basename would do)
292 """return base element of a path (as os.path.basename would do)
293
293
294 This exists to allow handling of strange encoding if needed."""
294 This exists to allow handling of strange encoding if needed."""
295 return os.path.basename(path)
295 return os.path.basename(path)
296
296
297 def chmod(self, path, mode):
297 def chmod(self, path, mode):
298 return os.chmod(self.join(path), mode)
298 return os.chmod(self.join(path), mode)
299
299
300 def dirname(self, path):
300 def dirname(self, path):
301 """return dirname element of a path (as os.path.dirname would do)
301 """return dirname element of a path (as os.path.dirname would do)
302
302
303 This exists to allow handling of strange encoding if needed."""
303 This exists to allow handling of strange encoding if needed."""
304 return os.path.dirname(path)
304 return os.path.dirname(path)
305
305
306 def exists(self, path=None):
306 def exists(self, path=None):
307 return os.path.exists(self.join(path))
307 return os.path.exists(self.join(path))
308
308
309 def fstat(self, fp):
309 def fstat(self, fp):
310 return util.fstat(fp)
310 return util.fstat(fp)
311
311
312 def isdir(self, path=None):
312 def isdir(self, path=None):
313 return os.path.isdir(self.join(path))
313 return os.path.isdir(self.join(path))
314
314
315 def isfile(self, path=None):
315 def isfile(self, path=None):
316 return os.path.isfile(self.join(path))
316 return os.path.isfile(self.join(path))
317
317
318 def islink(self, path=None):
318 def islink(self, path=None):
319 return os.path.islink(self.join(path))
319 return os.path.islink(self.join(path))
320
320
321 def isfileorlink(self, path=None):
321 def isfileorlink(self, path=None):
322 '''return whether path is a regular file or a symlink
322 '''return whether path is a regular file or a symlink
323
323
324 Unlike isfile, this doesn't follow symlinks.'''
324 Unlike isfile, this doesn't follow symlinks.'''
325 try:
325 try:
326 st = self.lstat(path)
326 st = self.lstat(path)
327 except OSError:
327 except OSError:
328 return False
328 return False
329 mode = st.st_mode
329 mode = st.st_mode
330 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
330 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
331
331
332 def reljoin(self, *paths):
332 def reljoin(self, *paths):
333 """join various elements of a path together (as os.path.join would do)
333 """join various elements of a path together (as os.path.join would do)
334
334
335 The vfs base is not injected so that path stay relative. This exists
335 The vfs base is not injected so that path stay relative. This exists
336 to allow handling of strange encoding if needed."""
336 to allow handling of strange encoding if needed."""
337 return os.path.join(*paths)
337 return os.path.join(*paths)
338
338
339 def split(self, path):
339 def split(self, path):
340 """split top-most element of a path (as os.path.split would do)
340 """split top-most element of a path (as os.path.split would do)
341
341
342 This exists to allow handling of strange encoding if needed."""
342 This exists to allow handling of strange encoding if needed."""
343 return os.path.split(path)
343 return os.path.split(path)
344
344
345 def lexists(self, path=None):
345 def lexists(self, path=None):
346 return os.path.lexists(self.join(path))
346 return os.path.lexists(self.join(path))
347
347
348 def lstat(self, path=None):
348 def lstat(self, path=None):
349 return os.lstat(self.join(path))
349 return os.lstat(self.join(path))
350
350
351 def listdir(self, path=None):
351 def listdir(self, path=None):
352 return os.listdir(self.join(path))
352 return os.listdir(self.join(path))
353
353
354 def makedir(self, path=None, notindexed=True):
354 def makedir(self, path=None, notindexed=True):
355 return util.makedir(self.join(path), notindexed)
355 return util.makedir(self.join(path), notindexed)
356
356
357 def makedirs(self, path=None, mode=None):
357 def makedirs(self, path=None, mode=None):
358 return util.makedirs(self.join(path), mode)
358 return util.makedirs(self.join(path), mode)
359
359
360 def makelock(self, info, path):
360 def makelock(self, info, path):
361 return util.makelock(info, self.join(path))
361 return util.makelock(info, self.join(path))
362
362
363 def mkdir(self, path=None):
363 def mkdir(self, path=None):
364 return os.mkdir(self.join(path))
364 return os.mkdir(self.join(path))
365
365
366 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
366 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
367 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
367 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
368 dir=self.join(dir), text=text)
368 dir=self.join(dir), text=text)
369 dname, fname = util.split(name)
369 dname, fname = util.split(name)
370 if dir:
370 if dir:
371 return fd, os.path.join(dir, fname)
371 return fd, os.path.join(dir, fname)
372 else:
372 else:
373 return fd, fname
373 return fd, fname
374
374
375 def readdir(self, path=None, stat=None, skip=None):
375 def readdir(self, path=None, stat=None, skip=None):
376 return osutil.listdir(self.join(path), stat, skip)
376 return osutil.listdir(self.join(path), stat, skip)
377
377
378 def readlock(self, path):
378 def readlock(self, path):
379 return util.readlock(self.join(path))
379 return util.readlock(self.join(path))
380
380
381 def rename(self, src, dst, checkambig=False):
381 def rename(self, src, dst, checkambig=False):
382 """Rename from src to dst
382 """Rename from src to dst
383
383
384 checkambig argument is used with util.filestat, and is useful
384 checkambig argument is used with util.filestat, and is useful
385 only if destination file is guarded by any lock
385 only if destination file is guarded by any lock
386 (e.g. repo.lock or repo.wlock).
386 (e.g. repo.lock or repo.wlock).
387 """
387 """
388 dstpath = self.join(dst)
388 dstpath = self.join(dst)
389 oldstat = checkambig and util.filestat(dstpath)
389 oldstat = checkambig and util.filestat(dstpath)
390 if oldstat and oldstat.stat:
390 if oldstat and oldstat.stat:
391 ret = util.rename(self.join(src), dstpath)
391 ret = util.rename(self.join(src), dstpath)
392 newstat = util.filestat(dstpath)
392 newstat = util.filestat(dstpath)
393 if newstat.isambig(oldstat):
393 if newstat.isambig(oldstat):
394 # stat of renamed file is ambiguous to original one
394 # stat of renamed file is ambiguous to original one
395 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
395 newstat.avoidambig(dstpath, oldstat)
396 os.utime(dstpath, (advanced, advanced))
397 return ret
396 return ret
398 return util.rename(self.join(src), dstpath)
397 return util.rename(self.join(src), dstpath)
399
398
400 def readlink(self, path):
399 def readlink(self, path):
401 return os.readlink(self.join(path))
400 return os.readlink(self.join(path))
402
401
403 def removedirs(self, path=None):
402 def removedirs(self, path=None):
404 """Remove a leaf directory and all empty intermediate ones
403 """Remove a leaf directory and all empty intermediate ones
405 """
404 """
406 return util.removedirs(self.join(path))
405 return util.removedirs(self.join(path))
407
406
408 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
407 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
409 """Remove a directory tree recursively
408 """Remove a directory tree recursively
410
409
411 If ``forcibly``, this tries to remove READ-ONLY files, too.
410 If ``forcibly``, this tries to remove READ-ONLY files, too.
412 """
411 """
413 if forcibly:
412 if forcibly:
414 def onerror(function, path, excinfo):
413 def onerror(function, path, excinfo):
415 if function is not os.remove:
414 if function is not os.remove:
416 raise
415 raise
417 # read-only files cannot be unlinked under Windows
416 # read-only files cannot be unlinked under Windows
418 s = os.stat(path)
417 s = os.stat(path)
419 if (s.st_mode & stat.S_IWRITE) != 0:
418 if (s.st_mode & stat.S_IWRITE) != 0:
420 raise
419 raise
421 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
420 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
422 os.remove(path)
421 os.remove(path)
423 else:
422 else:
424 onerror = None
423 onerror = None
425 return shutil.rmtree(self.join(path),
424 return shutil.rmtree(self.join(path),
426 ignore_errors=ignore_errors, onerror=onerror)
425 ignore_errors=ignore_errors, onerror=onerror)
427
426
428 def setflags(self, path, l, x):
427 def setflags(self, path, l, x):
429 return util.setflags(self.join(path), l, x)
428 return util.setflags(self.join(path), l, x)
430
429
431 def stat(self, path=None):
430 def stat(self, path=None):
432 return os.stat(self.join(path))
431 return os.stat(self.join(path))
433
432
434 def unlink(self, path=None):
433 def unlink(self, path=None):
435 return util.unlink(self.join(path))
434 return util.unlink(self.join(path))
436
435
437 def unlinkpath(self, path=None, ignoremissing=False):
436 def unlinkpath(self, path=None, ignoremissing=False):
438 return util.unlinkpath(self.join(path), ignoremissing)
437 return util.unlinkpath(self.join(path), ignoremissing)
439
438
440 def utime(self, path=None, t=None):
439 def utime(self, path=None, t=None):
441 return os.utime(self.join(path), t)
440 return os.utime(self.join(path), t)
442
441
443 def walk(self, path=None, onerror=None):
442 def walk(self, path=None, onerror=None):
444 """Yield (dirpath, dirs, files) tuple for each directories under path
443 """Yield (dirpath, dirs, files) tuple for each directories under path
445
444
446 ``dirpath`` is relative one from the root of this vfs. This
445 ``dirpath`` is relative one from the root of this vfs. This
447 uses ``os.sep`` as path separator, even you specify POSIX
446 uses ``os.sep`` as path separator, even you specify POSIX
448 style ``path``.
447 style ``path``.
449
448
450 "The root of this vfs" is represented as empty ``dirpath``.
449 "The root of this vfs" is represented as empty ``dirpath``.
451 """
450 """
452 root = os.path.normpath(self.join(None))
451 root = os.path.normpath(self.join(None))
453 # when dirpath == root, dirpath[prefixlen:] becomes empty
452 # when dirpath == root, dirpath[prefixlen:] becomes empty
454 # because len(dirpath) < prefixlen.
453 # because len(dirpath) < prefixlen.
455 prefixlen = len(pathutil.normasprefix(root))
454 prefixlen = len(pathutil.normasprefix(root))
456 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
455 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
457 yield (dirpath[prefixlen:], dirs, files)
456 yield (dirpath[prefixlen:], dirs, files)
458
457
459 @contextlib.contextmanager
458 @contextlib.contextmanager
460 def backgroundclosing(self, ui, expectedcount=-1):
459 def backgroundclosing(self, ui, expectedcount=-1):
461 """Allow files to be closed asynchronously.
460 """Allow files to be closed asynchronously.
462
461
463 When this context manager is active, ``backgroundclose`` can be passed
462 When this context manager is active, ``backgroundclose`` can be passed
464 to ``__call__``/``open`` to result in the file possibly being closed
463 to ``__call__``/``open`` to result in the file possibly being closed
465 asynchronously, on a background thread.
464 asynchronously, on a background thread.
466 """
465 """
467 # This is an arbitrary restriction and could be changed if we ever
466 # This is an arbitrary restriction and could be changed if we ever
468 # have a use case.
467 # have a use case.
469 vfs = getattr(self, 'vfs', self)
468 vfs = getattr(self, 'vfs', self)
470 if getattr(vfs, '_backgroundfilecloser', None):
469 if getattr(vfs, '_backgroundfilecloser', None):
471 raise error.Abort(
470 raise error.Abort(
472 _('can only have 1 active background file closer'))
471 _('can only have 1 active background file closer'))
473
472
474 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
473 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
475 try:
474 try:
476 vfs._backgroundfilecloser = bfc
475 vfs._backgroundfilecloser = bfc
477 yield bfc
476 yield bfc
478 finally:
477 finally:
479 vfs._backgroundfilecloser = None
478 vfs._backgroundfilecloser = None
480
479
481 class vfs(abstractvfs):
480 class vfs(abstractvfs):
482 '''Operate files relative to a base directory
481 '''Operate files relative to a base directory
483
482
484 This class is used to hide the details of COW semantics and
483 This class is used to hide the details of COW semantics and
485 remote file access from higher level code.
484 remote file access from higher level code.
486 '''
485 '''
487 def __init__(self, base, audit=True, expandpath=False, realpath=False):
486 def __init__(self, base, audit=True, expandpath=False, realpath=False):
488 if expandpath:
487 if expandpath:
489 base = util.expandpath(base)
488 base = util.expandpath(base)
490 if realpath:
489 if realpath:
491 base = os.path.realpath(base)
490 base = os.path.realpath(base)
492 self.base = base
491 self.base = base
493 self.mustaudit = audit
492 self.mustaudit = audit
494 self.createmode = None
493 self.createmode = None
495 self._trustnlink = None
494 self._trustnlink = None
496
495
497 @property
496 @property
498 def mustaudit(self):
497 def mustaudit(self):
499 return self._audit
498 return self._audit
500
499
501 @mustaudit.setter
500 @mustaudit.setter
502 def mustaudit(self, onoff):
501 def mustaudit(self, onoff):
503 self._audit = onoff
502 self._audit = onoff
504 if onoff:
503 if onoff:
505 self.audit = pathutil.pathauditor(self.base)
504 self.audit = pathutil.pathauditor(self.base)
506 else:
505 else:
507 self.audit = util.always
506 self.audit = util.always
508
507
509 @util.propertycache
508 @util.propertycache
510 def _cansymlink(self):
509 def _cansymlink(self):
511 return util.checklink(self.base)
510 return util.checklink(self.base)
512
511
513 @util.propertycache
512 @util.propertycache
514 def _chmod(self):
513 def _chmod(self):
515 return util.checkexec(self.base)
514 return util.checkexec(self.base)
516
515
517 def _fixfilemode(self, name):
516 def _fixfilemode(self, name):
518 if self.createmode is None or not self._chmod:
517 if self.createmode is None or not self._chmod:
519 return
518 return
520 os.chmod(name, self.createmode & 0o666)
519 os.chmod(name, self.createmode & 0o666)
521
520
522 def __call__(self, path, mode="r", text=False, atomictemp=False,
521 def __call__(self, path, mode="r", text=False, atomictemp=False,
523 notindexed=False, backgroundclose=False, checkambig=False):
522 notindexed=False, backgroundclose=False, checkambig=False):
524 '''Open ``path`` file, which is relative to vfs root.
523 '''Open ``path`` file, which is relative to vfs root.
525
524
526 Newly created directories are marked as "not to be indexed by
525 Newly created directories are marked as "not to be indexed by
527 the content indexing service", if ``notindexed`` is specified
526 the content indexing service", if ``notindexed`` is specified
528 for "write" mode access.
527 for "write" mode access.
529
528
530 If ``backgroundclose`` is passed, the file may be closed asynchronously.
529 If ``backgroundclose`` is passed, the file may be closed asynchronously.
531 It can only be used if the ``self.backgroundclosing()`` context manager
530 It can only be used if the ``self.backgroundclosing()`` context manager
532 is active. This should only be specified if the following criteria hold:
531 is active. This should only be specified if the following criteria hold:
533
532
534 1. There is a potential for writing thousands of files. Unless you
533 1. There is a potential for writing thousands of files. Unless you
535 are writing thousands of files, the performance benefits of
534 are writing thousands of files, the performance benefits of
536 asynchronously closing files is not realized.
535 asynchronously closing files is not realized.
537 2. Files are opened exactly once for the ``backgroundclosing``
536 2. Files are opened exactly once for the ``backgroundclosing``
538 active duration and are therefore free of race conditions between
537 active duration and are therefore free of race conditions between
539 closing a file on a background thread and reopening it. (If the
538 closing a file on a background thread and reopening it. (If the
540 file were opened multiple times, there could be unflushed data
539 file were opened multiple times, there could be unflushed data
541 because the original file handle hasn't been flushed/closed yet.)
540 because the original file handle hasn't been flushed/closed yet.)
542
541
543 ``checkambig`` argument is passed to atomictemplfile (valid
542 ``checkambig`` argument is passed to atomictemplfile (valid
544 only for writing), and is useful only if target file is
543 only for writing), and is useful only if target file is
545 guarded by any lock (e.g. repo.lock or repo.wlock).
544 guarded by any lock (e.g. repo.lock or repo.wlock).
546 '''
545 '''
547 if self._audit:
546 if self._audit:
548 r = util.checkosfilename(path)
547 r = util.checkosfilename(path)
549 if r:
548 if r:
550 raise error.Abort("%s: %r" % (r, path))
549 raise error.Abort("%s: %r" % (r, path))
551 self.audit(path)
550 self.audit(path)
552 f = self.join(path)
551 f = self.join(path)
553
552
554 if not text and "b" not in mode:
553 if not text and "b" not in mode:
555 mode += "b" # for that other OS
554 mode += "b" # for that other OS
556
555
557 nlink = -1
556 nlink = -1
558 if mode not in ('r', 'rb'):
557 if mode not in ('r', 'rb'):
559 dirname, basename = util.split(f)
558 dirname, basename = util.split(f)
560 # If basename is empty, then the path is malformed because it points
559 # If basename is empty, then the path is malformed because it points
561 # to a directory. Let the posixfile() call below raise IOError.
560 # to a directory. Let the posixfile() call below raise IOError.
562 if basename:
561 if basename:
563 if atomictemp:
562 if atomictemp:
564 util.makedirs(dirname, self.createmode, notindexed)
563 util.makedirs(dirname, self.createmode, notindexed)
565 return util.atomictempfile(f, mode, self.createmode,
564 return util.atomictempfile(f, mode, self.createmode,
566 checkambig=checkambig)
565 checkambig=checkambig)
567 try:
566 try:
568 if 'w' in mode:
567 if 'w' in mode:
569 util.unlink(f)
568 util.unlink(f)
570 nlink = 0
569 nlink = 0
571 else:
570 else:
572 # nlinks() may behave differently for files on Windows
571 # nlinks() may behave differently for files on Windows
573 # shares if the file is open.
572 # shares if the file is open.
574 with util.posixfile(f):
573 with util.posixfile(f):
575 nlink = util.nlinks(f)
574 nlink = util.nlinks(f)
576 if nlink < 1:
575 if nlink < 1:
577 nlink = 2 # force mktempcopy (issue1922)
576 nlink = 2 # force mktempcopy (issue1922)
578 except (OSError, IOError) as e:
577 except (OSError, IOError) as e:
579 if e.errno != errno.ENOENT:
578 if e.errno != errno.ENOENT:
580 raise
579 raise
581 nlink = 0
580 nlink = 0
582 util.makedirs(dirname, self.createmode, notindexed)
581 util.makedirs(dirname, self.createmode, notindexed)
583 if nlink > 0:
582 if nlink > 0:
584 if self._trustnlink is None:
583 if self._trustnlink is None:
585 self._trustnlink = nlink > 1 or util.checknlink(f)
584 self._trustnlink = nlink > 1 or util.checknlink(f)
586 if nlink > 1 or not self._trustnlink:
585 if nlink > 1 or not self._trustnlink:
587 util.rename(util.mktempcopy(f), f)
586 util.rename(util.mktempcopy(f), f)
588 fp = util.posixfile(f, mode)
587 fp = util.posixfile(f, mode)
589 if nlink == 0:
588 if nlink == 0:
590 self._fixfilemode(f)
589 self._fixfilemode(f)
591
590
592 if checkambig:
591 if checkambig:
593 if mode in ('r', 'rb'):
592 if mode in ('r', 'rb'):
594 raise error.Abort(_('implementation error: mode %s is not'
593 raise error.Abort(_('implementation error: mode %s is not'
595 ' valid for checkambig=True') % mode)
594 ' valid for checkambig=True') % mode)
596 fp = checkambigatclosing(fp)
595 fp = checkambigatclosing(fp)
597
596
598 if backgroundclose:
597 if backgroundclose:
599 if not self._backgroundfilecloser:
598 if not self._backgroundfilecloser:
600 raise error.Abort(_('backgroundclose can only be used when a '
599 raise error.Abort(_('backgroundclose can only be used when a '
601 'backgroundclosing context manager is active')
600 'backgroundclosing context manager is active')
602 )
601 )
603
602
604 fp = delayclosedfile(fp, self._backgroundfilecloser)
603 fp = delayclosedfile(fp, self._backgroundfilecloser)
605
604
606 return fp
605 return fp
607
606
608 def symlink(self, src, dst):
607 def symlink(self, src, dst):
609 self.audit(dst)
608 self.audit(dst)
610 linkname = self.join(dst)
609 linkname = self.join(dst)
611 try:
610 try:
612 os.unlink(linkname)
611 os.unlink(linkname)
613 except OSError:
612 except OSError:
614 pass
613 pass
615
614
616 util.makedirs(os.path.dirname(linkname), self.createmode)
615 util.makedirs(os.path.dirname(linkname), self.createmode)
617
616
618 if self._cansymlink:
617 if self._cansymlink:
619 try:
618 try:
620 os.symlink(src, linkname)
619 os.symlink(src, linkname)
621 except OSError as err:
620 except OSError as err:
622 raise OSError(err.errno, _('could not symlink to %r: %s') %
621 raise OSError(err.errno, _('could not symlink to %r: %s') %
623 (src, err.strerror), linkname)
622 (src, err.strerror), linkname)
624 else:
623 else:
625 self.write(dst, src)
624 self.write(dst, src)
626
625
627 def join(self, path, *insidef):
626 def join(self, path, *insidef):
628 if path:
627 if path:
629 return os.path.join(self.base, path, *insidef)
628 return os.path.join(self.base, path, *insidef)
630 else:
629 else:
631 return self.base
630 return self.base
632
631
633 opener = vfs
632 opener = vfs
634
633
635 class auditvfs(object):
634 class auditvfs(object):
636 def __init__(self, vfs):
635 def __init__(self, vfs):
637 self.vfs = vfs
636 self.vfs = vfs
638
637
639 @property
638 @property
640 def mustaudit(self):
639 def mustaudit(self):
641 return self.vfs.mustaudit
640 return self.vfs.mustaudit
642
641
643 @mustaudit.setter
642 @mustaudit.setter
644 def mustaudit(self, onoff):
643 def mustaudit(self, onoff):
645 self.vfs.mustaudit = onoff
644 self.vfs.mustaudit = onoff
646
645
647 @property
646 @property
648 def options(self):
647 def options(self):
649 return self.vfs.options
648 return self.vfs.options
650
649
651 @options.setter
650 @options.setter
652 def options(self, value):
651 def options(self, value):
653 self.vfs.options = value
652 self.vfs.options = value
654
653
655 class filtervfs(abstractvfs, auditvfs):
654 class filtervfs(abstractvfs, auditvfs):
656 '''Wrapper vfs for filtering filenames with a function.'''
655 '''Wrapper vfs for filtering filenames with a function.'''
657
656
658 def __init__(self, vfs, filter):
657 def __init__(self, vfs, filter):
659 auditvfs.__init__(self, vfs)
658 auditvfs.__init__(self, vfs)
660 self._filter = filter
659 self._filter = filter
661
660
662 def __call__(self, path, *args, **kwargs):
661 def __call__(self, path, *args, **kwargs):
663 return self.vfs(self._filter(path), *args, **kwargs)
662 return self.vfs(self._filter(path), *args, **kwargs)
664
663
665 def join(self, path, *insidef):
664 def join(self, path, *insidef):
666 if path:
665 if path:
667 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
666 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
668 else:
667 else:
669 return self.vfs.join(path)
668 return self.vfs.join(path)
670
669
671 filteropener = filtervfs
670 filteropener = filtervfs
672
671
673 class readonlyvfs(abstractvfs, auditvfs):
672 class readonlyvfs(abstractvfs, auditvfs):
674 '''Wrapper vfs preventing any writing.'''
673 '''Wrapper vfs preventing any writing.'''
675
674
676 def __init__(self, vfs):
675 def __init__(self, vfs):
677 auditvfs.__init__(self, vfs)
676 auditvfs.__init__(self, vfs)
678
677
679 def __call__(self, path, mode='r', *args, **kw):
678 def __call__(self, path, mode='r', *args, **kw):
680 if mode not in ('r', 'rb'):
679 if mode not in ('r', 'rb'):
681 raise error.Abort(_('this vfs is read only'))
680 raise error.Abort(_('this vfs is read only'))
682 return self.vfs(path, mode, *args, **kw)
681 return self.vfs(path, mode, *args, **kw)
683
682
684 def join(self, path, *insidef):
683 def join(self, path, *insidef):
685 return self.vfs.join(path, *insidef)
684 return self.vfs.join(path, *insidef)
686
685
687 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
686 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
688 '''yield every hg repository under path, always recursively.
687 '''yield every hg repository under path, always recursively.
689 The recurse flag will only control recursion into repo working dirs'''
688 The recurse flag will only control recursion into repo working dirs'''
690 def errhandler(err):
689 def errhandler(err):
691 if err.filename == path:
690 if err.filename == path:
692 raise err
691 raise err
693 samestat = getattr(os.path, 'samestat', None)
692 samestat = getattr(os.path, 'samestat', None)
694 if followsym and samestat is not None:
693 if followsym and samestat is not None:
695 def adddir(dirlst, dirname):
694 def adddir(dirlst, dirname):
696 match = False
695 match = False
697 dirstat = os.stat(dirname)
696 dirstat = os.stat(dirname)
698 for lstdirstat in dirlst:
697 for lstdirstat in dirlst:
699 if samestat(dirstat, lstdirstat):
698 if samestat(dirstat, lstdirstat):
700 match = True
699 match = True
701 break
700 break
702 if not match:
701 if not match:
703 dirlst.append(dirstat)
702 dirlst.append(dirstat)
704 return not match
703 return not match
705 else:
704 else:
706 followsym = False
705 followsym = False
707
706
708 if (seen_dirs is None) and followsym:
707 if (seen_dirs is None) and followsym:
709 seen_dirs = []
708 seen_dirs = []
710 adddir(seen_dirs, path)
709 adddir(seen_dirs, path)
711 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
710 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
712 dirs.sort()
711 dirs.sort()
713 if '.hg' in dirs:
712 if '.hg' in dirs:
714 yield root # found a repository
713 yield root # found a repository
715 qroot = os.path.join(root, '.hg', 'patches')
714 qroot = os.path.join(root, '.hg', 'patches')
716 if os.path.isdir(os.path.join(qroot, '.hg')):
715 if os.path.isdir(os.path.join(qroot, '.hg')):
717 yield qroot # we have a patch queue repo here
716 yield qroot # we have a patch queue repo here
718 if recurse:
717 if recurse:
719 # avoid recursing inside the .hg directory
718 # avoid recursing inside the .hg directory
720 dirs.remove('.hg')
719 dirs.remove('.hg')
721 else:
720 else:
722 dirs[:] = [] # don't descend further
721 dirs[:] = [] # don't descend further
723 elif followsym:
722 elif followsym:
724 newdirs = []
723 newdirs = []
725 for d in dirs:
724 for d in dirs:
726 fname = os.path.join(root, d)
725 fname = os.path.join(root, d)
727 if adddir(seen_dirs, fname):
726 if adddir(seen_dirs, fname):
728 if os.path.islink(fname):
727 if os.path.islink(fname):
729 for hgname in walkrepos(fname, True, seen_dirs):
728 for hgname in walkrepos(fname, True, seen_dirs):
730 yield hgname
729 yield hgname
731 else:
730 else:
732 newdirs.append(d)
731 newdirs.append(d)
733 dirs[:] = newdirs
732 dirs[:] = newdirs
734
733
735 def osrcpath():
734 def osrcpath():
736 '''return default os-specific hgrc search path'''
735 '''return default os-specific hgrc search path'''
737 path = []
736 path = []
738 defaultpath = os.path.join(util.datapath, 'default.d')
737 defaultpath = os.path.join(util.datapath, 'default.d')
739 if os.path.isdir(defaultpath):
738 if os.path.isdir(defaultpath):
740 for f, kind in osutil.listdir(defaultpath):
739 for f, kind in osutil.listdir(defaultpath):
741 if f.endswith('.rc'):
740 if f.endswith('.rc'):
742 path.append(os.path.join(defaultpath, f))
741 path.append(os.path.join(defaultpath, f))
743 path.extend(systemrcpath())
742 path.extend(systemrcpath())
744 path.extend(userrcpath())
743 path.extend(userrcpath())
745 path = [os.path.normpath(f) for f in path]
744 path = [os.path.normpath(f) for f in path]
746 return path
745 return path
747
746
748 _rcpath = None
747 _rcpath = None
749
748
750 def rcpath():
749 def rcpath():
751 '''return hgrc search path. if env var HGRCPATH is set, use it.
750 '''return hgrc search path. if env var HGRCPATH is set, use it.
752 for each item in path, if directory, use files ending in .rc,
751 for each item in path, if directory, use files ending in .rc,
753 else use item.
752 else use item.
754 make HGRCPATH empty to only look in .hg/hgrc of current repo.
753 make HGRCPATH empty to only look in .hg/hgrc of current repo.
755 if no HGRCPATH, use default os-specific path.'''
754 if no HGRCPATH, use default os-specific path.'''
756 global _rcpath
755 global _rcpath
757 if _rcpath is None:
756 if _rcpath is None:
758 if 'HGRCPATH' in encoding.environ:
757 if 'HGRCPATH' in encoding.environ:
759 _rcpath = []
758 _rcpath = []
760 for p in encoding.environ['HGRCPATH'].split(pycompat.ospathsep):
759 for p in encoding.environ['HGRCPATH'].split(pycompat.ospathsep):
761 if not p:
760 if not p:
762 continue
761 continue
763 p = util.expandpath(p)
762 p = util.expandpath(p)
764 if os.path.isdir(p):
763 if os.path.isdir(p):
765 for f, kind in osutil.listdir(p):
764 for f, kind in osutil.listdir(p):
766 if f.endswith('.rc'):
765 if f.endswith('.rc'):
767 _rcpath.append(os.path.join(p, f))
766 _rcpath.append(os.path.join(p, f))
768 else:
767 else:
769 _rcpath.append(p)
768 _rcpath.append(p)
770 else:
769 else:
771 _rcpath = osrcpath()
770 _rcpath = osrcpath()
772 return _rcpath
771 return _rcpath
773
772
774 def intrev(rev):
773 def intrev(rev):
775 """Return integer for a given revision that can be used in comparison or
774 """Return integer for a given revision that can be used in comparison or
776 arithmetic operation"""
775 arithmetic operation"""
777 if rev is None:
776 if rev is None:
778 return wdirrev
777 return wdirrev
779 return rev
778 return rev
780
779
781 def revsingle(repo, revspec, default='.'):
780 def revsingle(repo, revspec, default='.'):
782 if not revspec and revspec != 0:
781 if not revspec and revspec != 0:
783 return repo[default]
782 return repo[default]
784
783
785 l = revrange(repo, [revspec])
784 l = revrange(repo, [revspec])
786 if not l:
785 if not l:
787 raise error.Abort(_('empty revision set'))
786 raise error.Abort(_('empty revision set'))
788 return repo[l.last()]
787 return repo[l.last()]
789
788
790 def _pairspec(revspec):
789 def _pairspec(revspec):
791 tree = revset.parse(revspec)
790 tree = revset.parse(revspec)
792 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
791 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
793
792
794 def revpair(repo, revs):
793 def revpair(repo, revs):
795 if not revs:
794 if not revs:
796 return repo.dirstate.p1(), None
795 return repo.dirstate.p1(), None
797
796
798 l = revrange(repo, revs)
797 l = revrange(repo, revs)
799
798
800 if not l:
799 if not l:
801 first = second = None
800 first = second = None
802 elif l.isascending():
801 elif l.isascending():
803 first = l.min()
802 first = l.min()
804 second = l.max()
803 second = l.max()
805 elif l.isdescending():
804 elif l.isdescending():
806 first = l.max()
805 first = l.max()
807 second = l.min()
806 second = l.min()
808 else:
807 else:
809 first = l.first()
808 first = l.first()
810 second = l.last()
809 second = l.last()
811
810
812 if first is None:
811 if first is None:
813 raise error.Abort(_('empty revision range'))
812 raise error.Abort(_('empty revision range'))
814 if (first == second and len(revs) >= 2
813 if (first == second and len(revs) >= 2
815 and not all(revrange(repo, [r]) for r in revs)):
814 and not all(revrange(repo, [r]) for r in revs)):
816 raise error.Abort(_('empty revision on one side of range'))
815 raise error.Abort(_('empty revision on one side of range'))
817
816
818 # if top-level is range expression, the result must always be a pair
817 # if top-level is range expression, the result must always be a pair
819 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
818 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
820 return repo.lookup(first), None
819 return repo.lookup(first), None
821
820
822 return repo.lookup(first), repo.lookup(second)
821 return repo.lookup(first), repo.lookup(second)
823
822
824 def revrange(repo, specs):
823 def revrange(repo, specs):
825 """Execute 1 to many revsets and return the union.
824 """Execute 1 to many revsets and return the union.
826
825
827 This is the preferred mechanism for executing revsets using user-specified
826 This is the preferred mechanism for executing revsets using user-specified
828 config options, such as revset aliases.
827 config options, such as revset aliases.
829
828
830 The revsets specified by ``specs`` will be executed via a chained ``OR``
829 The revsets specified by ``specs`` will be executed via a chained ``OR``
831 expression. If ``specs`` is empty, an empty result is returned.
830 expression. If ``specs`` is empty, an empty result is returned.
832
831
833 ``specs`` can contain integers, in which case they are assumed to be
832 ``specs`` can contain integers, in which case they are assumed to be
834 revision numbers.
833 revision numbers.
835
834
836 It is assumed the revsets are already formatted. If you have arguments
835 It is assumed the revsets are already formatted. If you have arguments
837 that need to be expanded in the revset, call ``revset.formatspec()``
836 that need to be expanded in the revset, call ``revset.formatspec()``
838 and pass the result as an element of ``specs``.
837 and pass the result as an element of ``specs``.
839
838
840 Specifying a single revset is allowed.
839 Specifying a single revset is allowed.
841
840
842 Returns a ``revset.abstractsmartset`` which is a list-like interface over
841 Returns a ``revset.abstractsmartset`` which is a list-like interface over
843 integer revisions.
842 integer revisions.
844 """
843 """
845 allspecs = []
844 allspecs = []
846 for spec in specs:
845 for spec in specs:
847 if isinstance(spec, int):
846 if isinstance(spec, int):
848 spec = revset.formatspec('rev(%d)', spec)
847 spec = revset.formatspec('rev(%d)', spec)
849 allspecs.append(spec)
848 allspecs.append(spec)
850 m = revset.matchany(repo.ui, allspecs, repo)
849 m = revset.matchany(repo.ui, allspecs, repo)
851 return m(repo)
850 return m(repo)
852
851
853 def meaningfulparents(repo, ctx):
852 def meaningfulparents(repo, ctx):
854 """Return list of meaningful (or all if debug) parentrevs for rev.
853 """Return list of meaningful (or all if debug) parentrevs for rev.
855
854
856 For merges (two non-nullrev revisions) both parents are meaningful.
855 For merges (two non-nullrev revisions) both parents are meaningful.
857 Otherwise the first parent revision is considered meaningful if it
856 Otherwise the first parent revision is considered meaningful if it
858 is not the preceding revision.
857 is not the preceding revision.
859 """
858 """
860 parents = ctx.parents()
859 parents = ctx.parents()
861 if len(parents) > 1:
860 if len(parents) > 1:
862 return parents
861 return parents
863 if repo.ui.debugflag:
862 if repo.ui.debugflag:
864 return [parents[0], repo['null']]
863 return [parents[0], repo['null']]
865 if parents[0].rev() >= intrev(ctx.rev()) - 1:
864 if parents[0].rev() >= intrev(ctx.rev()) - 1:
866 return []
865 return []
867 return parents
866 return parents
868
867
869 def expandpats(pats):
868 def expandpats(pats):
870 '''Expand bare globs when running on windows.
869 '''Expand bare globs when running on windows.
871 On posix we assume it already has already been done by sh.'''
870 On posix we assume it already has already been done by sh.'''
872 if not util.expandglobs:
871 if not util.expandglobs:
873 return list(pats)
872 return list(pats)
874 ret = []
873 ret = []
875 for kindpat in pats:
874 for kindpat in pats:
876 kind, pat = matchmod._patsplit(kindpat, None)
875 kind, pat = matchmod._patsplit(kindpat, None)
877 if kind is None:
876 if kind is None:
878 try:
877 try:
879 globbed = glob.glob(pat)
878 globbed = glob.glob(pat)
880 except re.error:
879 except re.error:
881 globbed = [pat]
880 globbed = [pat]
882 if globbed:
881 if globbed:
883 ret.extend(globbed)
882 ret.extend(globbed)
884 continue
883 continue
885 ret.append(kindpat)
884 ret.append(kindpat)
886 return ret
885 return ret
887
886
888 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
887 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
889 badfn=None):
888 badfn=None):
890 '''Return a matcher and the patterns that were used.
889 '''Return a matcher and the patterns that were used.
891 The matcher will warn about bad matches, unless an alternate badfn callback
890 The matcher will warn about bad matches, unless an alternate badfn callback
892 is provided.'''
891 is provided.'''
893 if pats == ("",):
892 if pats == ("",):
894 pats = []
893 pats = []
895 if opts is None:
894 if opts is None:
896 opts = {}
895 opts = {}
897 if not globbed and default == 'relpath':
896 if not globbed and default == 'relpath':
898 pats = expandpats(pats or [])
897 pats = expandpats(pats or [])
899
898
900 def bad(f, msg):
899 def bad(f, msg):
901 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
900 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
902
901
903 if badfn is None:
902 if badfn is None:
904 badfn = bad
903 badfn = bad
905
904
906 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
905 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
907 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
906 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
908
907
909 if m.always():
908 if m.always():
910 pats = []
909 pats = []
911 return m, pats
910 return m, pats
912
911
913 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
912 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
914 badfn=None):
913 badfn=None):
915 '''Return a matcher that will warn about bad matches.'''
914 '''Return a matcher that will warn about bad matches.'''
916 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
915 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
917
916
918 def matchall(repo):
917 def matchall(repo):
919 '''Return a matcher that will efficiently match everything.'''
918 '''Return a matcher that will efficiently match everything.'''
920 return matchmod.always(repo.root, repo.getcwd())
919 return matchmod.always(repo.root, repo.getcwd())
921
920
922 def matchfiles(repo, files, badfn=None):
921 def matchfiles(repo, files, badfn=None):
923 '''Return a matcher that will efficiently match exactly these files.'''
922 '''Return a matcher that will efficiently match exactly these files.'''
924 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
923 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
925
924
926 def origpath(ui, repo, filepath):
925 def origpath(ui, repo, filepath):
927 '''customize where .orig files are created
926 '''customize where .orig files are created
928
927
929 Fetch user defined path from config file: [ui] origbackuppath = <path>
928 Fetch user defined path from config file: [ui] origbackuppath = <path>
930 Fall back to default (filepath) if not specified
929 Fall back to default (filepath) if not specified
931 '''
930 '''
932 origbackuppath = ui.config('ui', 'origbackuppath', None)
931 origbackuppath = ui.config('ui', 'origbackuppath', None)
933 if origbackuppath is None:
932 if origbackuppath is None:
934 return filepath + ".orig"
933 return filepath + ".orig"
935
934
936 filepathfromroot = os.path.relpath(filepath, start=repo.root)
935 filepathfromroot = os.path.relpath(filepath, start=repo.root)
937 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
936 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
938
937
939 origbackupdir = repo.vfs.dirname(fullorigpath)
938 origbackupdir = repo.vfs.dirname(fullorigpath)
940 if not repo.vfs.exists(origbackupdir):
939 if not repo.vfs.exists(origbackupdir):
941 ui.note(_('creating directory: %s\n') % origbackupdir)
940 ui.note(_('creating directory: %s\n') % origbackupdir)
942 util.makedirs(origbackupdir)
941 util.makedirs(origbackupdir)
943
942
944 return fullorigpath + ".orig"
943 return fullorigpath + ".orig"
945
944
946 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
945 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
947 if opts is None:
946 if opts is None:
948 opts = {}
947 opts = {}
949 m = matcher
948 m = matcher
950 if dry_run is None:
949 if dry_run is None:
951 dry_run = opts.get('dry_run')
950 dry_run = opts.get('dry_run')
952 if similarity is None:
951 if similarity is None:
953 similarity = float(opts.get('similarity') or 0)
952 similarity = float(opts.get('similarity') or 0)
954
953
955 ret = 0
954 ret = 0
956 join = lambda f: os.path.join(prefix, f)
955 join = lambda f: os.path.join(prefix, f)
957
956
958 wctx = repo[None]
957 wctx = repo[None]
959 for subpath in sorted(wctx.substate):
958 for subpath in sorted(wctx.substate):
960 submatch = matchmod.subdirmatcher(subpath, m)
959 submatch = matchmod.subdirmatcher(subpath, m)
961 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
960 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
962 sub = wctx.sub(subpath)
961 sub = wctx.sub(subpath)
963 try:
962 try:
964 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
963 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
965 ret = 1
964 ret = 1
966 except error.LookupError:
965 except error.LookupError:
967 repo.ui.status(_("skipping missing subrepository: %s\n")
966 repo.ui.status(_("skipping missing subrepository: %s\n")
968 % join(subpath))
967 % join(subpath))
969
968
970 rejected = []
969 rejected = []
971 def badfn(f, msg):
970 def badfn(f, msg):
972 if f in m.files():
971 if f in m.files():
973 m.bad(f, msg)
972 m.bad(f, msg)
974 rejected.append(f)
973 rejected.append(f)
975
974
976 badmatch = matchmod.badmatch(m, badfn)
975 badmatch = matchmod.badmatch(m, badfn)
977 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
976 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
978 badmatch)
977 badmatch)
979
978
980 unknownset = set(unknown + forgotten)
979 unknownset = set(unknown + forgotten)
981 toprint = unknownset.copy()
980 toprint = unknownset.copy()
982 toprint.update(deleted)
981 toprint.update(deleted)
983 for abs in sorted(toprint):
982 for abs in sorted(toprint):
984 if repo.ui.verbose or not m.exact(abs):
983 if repo.ui.verbose or not m.exact(abs):
985 if abs in unknownset:
984 if abs in unknownset:
986 status = _('adding %s\n') % m.uipath(abs)
985 status = _('adding %s\n') % m.uipath(abs)
987 else:
986 else:
988 status = _('removing %s\n') % m.uipath(abs)
987 status = _('removing %s\n') % m.uipath(abs)
989 repo.ui.status(status)
988 repo.ui.status(status)
990
989
991 renames = _findrenames(repo, m, added + unknown, removed + deleted,
990 renames = _findrenames(repo, m, added + unknown, removed + deleted,
992 similarity)
991 similarity)
993
992
994 if not dry_run:
993 if not dry_run:
995 _markchanges(repo, unknown + forgotten, deleted, renames)
994 _markchanges(repo, unknown + forgotten, deleted, renames)
996
995
997 for f in rejected:
996 for f in rejected:
998 if f in m.files():
997 if f in m.files():
999 return 1
998 return 1
1000 return ret
999 return ret
1001
1000
1002 def marktouched(repo, files, similarity=0.0):
1001 def marktouched(repo, files, similarity=0.0):
1003 '''Assert that files have somehow been operated upon. files are relative to
1002 '''Assert that files have somehow been operated upon. files are relative to
1004 the repo root.'''
1003 the repo root.'''
1005 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1004 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1006 rejected = []
1005 rejected = []
1007
1006
1008 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1007 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1009
1008
1010 if repo.ui.verbose:
1009 if repo.ui.verbose:
1011 unknownset = set(unknown + forgotten)
1010 unknownset = set(unknown + forgotten)
1012 toprint = unknownset.copy()
1011 toprint = unknownset.copy()
1013 toprint.update(deleted)
1012 toprint.update(deleted)
1014 for abs in sorted(toprint):
1013 for abs in sorted(toprint):
1015 if abs in unknownset:
1014 if abs in unknownset:
1016 status = _('adding %s\n') % abs
1015 status = _('adding %s\n') % abs
1017 else:
1016 else:
1018 status = _('removing %s\n') % abs
1017 status = _('removing %s\n') % abs
1019 repo.ui.status(status)
1018 repo.ui.status(status)
1020
1019
1021 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1020 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1022 similarity)
1021 similarity)
1023
1022
1024 _markchanges(repo, unknown + forgotten, deleted, renames)
1023 _markchanges(repo, unknown + forgotten, deleted, renames)
1025
1024
1026 for f in rejected:
1025 for f in rejected:
1027 if f in m.files():
1026 if f in m.files():
1028 return 1
1027 return 1
1029 return 0
1028 return 0
1030
1029
1031 def _interestingfiles(repo, matcher):
1030 def _interestingfiles(repo, matcher):
1032 '''Walk dirstate with matcher, looking for files that addremove would care
1031 '''Walk dirstate with matcher, looking for files that addremove would care
1033 about.
1032 about.
1034
1033
1035 This is different from dirstate.status because it doesn't care about
1034 This is different from dirstate.status because it doesn't care about
1036 whether files are modified or clean.'''
1035 whether files are modified or clean.'''
1037 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1036 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1038 audit_path = pathutil.pathauditor(repo.root)
1037 audit_path = pathutil.pathauditor(repo.root)
1039
1038
1040 ctx = repo[None]
1039 ctx = repo[None]
1041 dirstate = repo.dirstate
1040 dirstate = repo.dirstate
1042 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
1041 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
1043 full=False)
1042 full=False)
1044 for abs, st in walkresults.iteritems():
1043 for abs, st in walkresults.iteritems():
1045 dstate = dirstate[abs]
1044 dstate = dirstate[abs]
1046 if dstate == '?' and audit_path.check(abs):
1045 if dstate == '?' and audit_path.check(abs):
1047 unknown.append(abs)
1046 unknown.append(abs)
1048 elif dstate != 'r' and not st:
1047 elif dstate != 'r' and not st:
1049 deleted.append(abs)
1048 deleted.append(abs)
1050 elif dstate == 'r' and st:
1049 elif dstate == 'r' and st:
1051 forgotten.append(abs)
1050 forgotten.append(abs)
1052 # for finding renames
1051 # for finding renames
1053 elif dstate == 'r' and not st:
1052 elif dstate == 'r' and not st:
1054 removed.append(abs)
1053 removed.append(abs)
1055 elif dstate == 'a':
1054 elif dstate == 'a':
1056 added.append(abs)
1055 added.append(abs)
1057
1056
1058 return added, unknown, deleted, removed, forgotten
1057 return added, unknown, deleted, removed, forgotten
1059
1058
1060 def _findrenames(repo, matcher, added, removed, similarity):
1059 def _findrenames(repo, matcher, added, removed, similarity):
1061 '''Find renames from removed files to added ones.'''
1060 '''Find renames from removed files to added ones.'''
1062 renames = {}
1061 renames = {}
1063 if similarity > 0:
1062 if similarity > 0:
1064 for old, new, score in similar.findrenames(repo, added, removed,
1063 for old, new, score in similar.findrenames(repo, added, removed,
1065 similarity):
1064 similarity):
1066 if (repo.ui.verbose or not matcher.exact(old)
1065 if (repo.ui.verbose or not matcher.exact(old)
1067 or not matcher.exact(new)):
1066 or not matcher.exact(new)):
1068 repo.ui.status(_('recording removal of %s as rename to %s '
1067 repo.ui.status(_('recording removal of %s as rename to %s '
1069 '(%d%% similar)\n') %
1068 '(%d%% similar)\n') %
1070 (matcher.rel(old), matcher.rel(new),
1069 (matcher.rel(old), matcher.rel(new),
1071 score * 100))
1070 score * 100))
1072 renames[new] = old
1071 renames[new] = old
1073 return renames
1072 return renames
1074
1073
1075 def _markchanges(repo, unknown, deleted, renames):
1074 def _markchanges(repo, unknown, deleted, renames):
1076 '''Marks the files in unknown as added, the files in deleted as removed,
1075 '''Marks the files in unknown as added, the files in deleted as removed,
1077 and the files in renames as copied.'''
1076 and the files in renames as copied.'''
1078 wctx = repo[None]
1077 wctx = repo[None]
1079 with repo.wlock():
1078 with repo.wlock():
1080 wctx.forget(deleted)
1079 wctx.forget(deleted)
1081 wctx.add(unknown)
1080 wctx.add(unknown)
1082 for new, old in renames.iteritems():
1081 for new, old in renames.iteritems():
1083 wctx.copy(old, new)
1082 wctx.copy(old, new)
1084
1083
1085 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1084 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1086 """Update the dirstate to reflect the intent of copying src to dst. For
1085 """Update the dirstate to reflect the intent of copying src to dst. For
1087 different reasons it might not end with dst being marked as copied from src.
1086 different reasons it might not end with dst being marked as copied from src.
1088 """
1087 """
1089 origsrc = repo.dirstate.copied(src) or src
1088 origsrc = repo.dirstate.copied(src) or src
1090 if dst == origsrc: # copying back a copy?
1089 if dst == origsrc: # copying back a copy?
1091 if repo.dirstate[dst] not in 'mn' and not dryrun:
1090 if repo.dirstate[dst] not in 'mn' and not dryrun:
1092 repo.dirstate.normallookup(dst)
1091 repo.dirstate.normallookup(dst)
1093 else:
1092 else:
1094 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1093 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1095 if not ui.quiet:
1094 if not ui.quiet:
1096 ui.warn(_("%s has not been committed yet, so no copy "
1095 ui.warn(_("%s has not been committed yet, so no copy "
1097 "data will be stored for %s.\n")
1096 "data will be stored for %s.\n")
1098 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1097 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1099 if repo.dirstate[dst] in '?r' and not dryrun:
1098 if repo.dirstate[dst] in '?r' and not dryrun:
1100 wctx.add([dst])
1099 wctx.add([dst])
1101 elif not dryrun:
1100 elif not dryrun:
1102 wctx.copy(origsrc, dst)
1101 wctx.copy(origsrc, dst)
1103
1102
1104 def readrequires(opener, supported):
1103 def readrequires(opener, supported):
1105 '''Reads and parses .hg/requires and checks if all entries found
1104 '''Reads and parses .hg/requires and checks if all entries found
1106 are in the list of supported features.'''
1105 are in the list of supported features.'''
1107 requirements = set(opener.read("requires").splitlines())
1106 requirements = set(opener.read("requires").splitlines())
1108 missings = []
1107 missings = []
1109 for r in requirements:
1108 for r in requirements:
1110 if r not in supported:
1109 if r not in supported:
1111 if not r or not r[0].isalnum():
1110 if not r or not r[0].isalnum():
1112 raise error.RequirementError(_(".hg/requires file is corrupt"))
1111 raise error.RequirementError(_(".hg/requires file is corrupt"))
1113 missings.append(r)
1112 missings.append(r)
1114 missings.sort()
1113 missings.sort()
1115 if missings:
1114 if missings:
1116 raise error.RequirementError(
1115 raise error.RequirementError(
1117 _("repository requires features unknown to this Mercurial: %s")
1116 _("repository requires features unknown to this Mercurial: %s")
1118 % " ".join(missings),
1117 % " ".join(missings),
1119 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1118 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1120 " for more information"))
1119 " for more information"))
1121 return requirements
1120 return requirements
1122
1121
1123 def writerequires(opener, requirements):
1122 def writerequires(opener, requirements):
1124 with opener('requires', 'w') as fp:
1123 with opener('requires', 'w') as fp:
1125 for r in sorted(requirements):
1124 for r in sorted(requirements):
1126 fp.write("%s\n" % r)
1125 fp.write("%s\n" % r)
1127
1126
1128 class filecachesubentry(object):
1127 class filecachesubentry(object):
1129 def __init__(self, path, stat):
1128 def __init__(self, path, stat):
1130 self.path = path
1129 self.path = path
1131 self.cachestat = None
1130 self.cachestat = None
1132 self._cacheable = None
1131 self._cacheable = None
1133
1132
1134 if stat:
1133 if stat:
1135 self.cachestat = filecachesubentry.stat(self.path)
1134 self.cachestat = filecachesubentry.stat(self.path)
1136
1135
1137 if self.cachestat:
1136 if self.cachestat:
1138 self._cacheable = self.cachestat.cacheable()
1137 self._cacheable = self.cachestat.cacheable()
1139 else:
1138 else:
1140 # None means we don't know yet
1139 # None means we don't know yet
1141 self._cacheable = None
1140 self._cacheable = None
1142
1141
1143 def refresh(self):
1142 def refresh(self):
1144 if self.cacheable():
1143 if self.cacheable():
1145 self.cachestat = filecachesubentry.stat(self.path)
1144 self.cachestat = filecachesubentry.stat(self.path)
1146
1145
1147 def cacheable(self):
1146 def cacheable(self):
1148 if self._cacheable is not None:
1147 if self._cacheable is not None:
1149 return self._cacheable
1148 return self._cacheable
1150
1149
1151 # we don't know yet, assume it is for now
1150 # we don't know yet, assume it is for now
1152 return True
1151 return True
1153
1152
1154 def changed(self):
1153 def changed(self):
1155 # no point in going further if we can't cache it
1154 # no point in going further if we can't cache it
1156 if not self.cacheable():
1155 if not self.cacheable():
1157 return True
1156 return True
1158
1157
1159 newstat = filecachesubentry.stat(self.path)
1158 newstat = filecachesubentry.stat(self.path)
1160
1159
1161 # we may not know if it's cacheable yet, check again now
1160 # we may not know if it's cacheable yet, check again now
1162 if newstat and self._cacheable is None:
1161 if newstat and self._cacheable is None:
1163 self._cacheable = newstat.cacheable()
1162 self._cacheable = newstat.cacheable()
1164
1163
1165 # check again
1164 # check again
1166 if not self._cacheable:
1165 if not self._cacheable:
1167 return True
1166 return True
1168
1167
1169 if self.cachestat != newstat:
1168 if self.cachestat != newstat:
1170 self.cachestat = newstat
1169 self.cachestat = newstat
1171 return True
1170 return True
1172 else:
1171 else:
1173 return False
1172 return False
1174
1173
1175 @staticmethod
1174 @staticmethod
1176 def stat(path):
1175 def stat(path):
1177 try:
1176 try:
1178 return util.cachestat(path)
1177 return util.cachestat(path)
1179 except OSError as e:
1178 except OSError as e:
1180 if e.errno != errno.ENOENT:
1179 if e.errno != errno.ENOENT:
1181 raise
1180 raise
1182
1181
1183 class filecacheentry(object):
1182 class filecacheentry(object):
1184 def __init__(self, paths, stat=True):
1183 def __init__(self, paths, stat=True):
1185 self._entries = []
1184 self._entries = []
1186 for path in paths:
1185 for path in paths:
1187 self._entries.append(filecachesubentry(path, stat))
1186 self._entries.append(filecachesubentry(path, stat))
1188
1187
1189 def changed(self):
1188 def changed(self):
1190 '''true if any entry has changed'''
1189 '''true if any entry has changed'''
1191 for entry in self._entries:
1190 for entry in self._entries:
1192 if entry.changed():
1191 if entry.changed():
1193 return True
1192 return True
1194 return False
1193 return False
1195
1194
1196 def refresh(self):
1195 def refresh(self):
1197 for entry in self._entries:
1196 for entry in self._entries:
1198 entry.refresh()
1197 entry.refresh()
1199
1198
1200 class filecache(object):
1199 class filecache(object):
1201 '''A property like decorator that tracks files under .hg/ for updates.
1200 '''A property like decorator that tracks files under .hg/ for updates.
1202
1201
1203 Records stat info when called in _filecache.
1202 Records stat info when called in _filecache.
1204
1203
1205 On subsequent calls, compares old stat info with new info, and recreates the
1204 On subsequent calls, compares old stat info with new info, and recreates the
1206 object when any of the files changes, updating the new stat info in
1205 object when any of the files changes, updating the new stat info in
1207 _filecache.
1206 _filecache.
1208
1207
1209 Mercurial either atomic renames or appends for files under .hg,
1208 Mercurial either atomic renames or appends for files under .hg,
1210 so to ensure the cache is reliable we need the filesystem to be able
1209 so to ensure the cache is reliable we need the filesystem to be able
1211 to tell us if a file has been replaced. If it can't, we fallback to
1210 to tell us if a file has been replaced. If it can't, we fallback to
1212 recreating the object on every call (essentially the same behavior as
1211 recreating the object on every call (essentially the same behavior as
1213 propertycache).
1212 propertycache).
1214
1213
1215 '''
1214 '''
1216 def __init__(self, *paths):
1215 def __init__(self, *paths):
1217 self.paths = paths
1216 self.paths = paths
1218
1217
1219 def join(self, obj, fname):
1218 def join(self, obj, fname):
1220 """Used to compute the runtime path of a cached file.
1219 """Used to compute the runtime path of a cached file.
1221
1220
1222 Users should subclass filecache and provide their own version of this
1221 Users should subclass filecache and provide their own version of this
1223 function to call the appropriate join function on 'obj' (an instance
1222 function to call the appropriate join function on 'obj' (an instance
1224 of the class that its member function was decorated).
1223 of the class that its member function was decorated).
1225 """
1224 """
1226 return obj.join(fname)
1225 return obj.join(fname)
1227
1226
1228 def __call__(self, func):
1227 def __call__(self, func):
1229 self.func = func
1228 self.func = func
1230 self.name = func.__name__
1229 self.name = func.__name__
1231 return self
1230 return self
1232
1231
1233 def __get__(self, obj, type=None):
1232 def __get__(self, obj, type=None):
1234 # if accessed on the class, return the descriptor itself.
1233 # if accessed on the class, return the descriptor itself.
1235 if obj is None:
1234 if obj is None:
1236 return self
1235 return self
1237 # do we need to check if the file changed?
1236 # do we need to check if the file changed?
1238 if self.name in obj.__dict__:
1237 if self.name in obj.__dict__:
1239 assert self.name in obj._filecache, self.name
1238 assert self.name in obj._filecache, self.name
1240 return obj.__dict__[self.name]
1239 return obj.__dict__[self.name]
1241
1240
1242 entry = obj._filecache.get(self.name)
1241 entry = obj._filecache.get(self.name)
1243
1242
1244 if entry:
1243 if entry:
1245 if entry.changed():
1244 if entry.changed():
1246 entry.obj = self.func(obj)
1245 entry.obj = self.func(obj)
1247 else:
1246 else:
1248 paths = [self.join(obj, path) for path in self.paths]
1247 paths = [self.join(obj, path) for path in self.paths]
1249
1248
1250 # We stat -before- creating the object so our cache doesn't lie if
1249 # We stat -before- creating the object so our cache doesn't lie if
1251 # a writer modified between the time we read and stat
1250 # a writer modified between the time we read and stat
1252 entry = filecacheentry(paths, True)
1251 entry = filecacheentry(paths, True)
1253 entry.obj = self.func(obj)
1252 entry.obj = self.func(obj)
1254
1253
1255 obj._filecache[self.name] = entry
1254 obj._filecache[self.name] = entry
1256
1255
1257 obj.__dict__[self.name] = entry.obj
1256 obj.__dict__[self.name] = entry.obj
1258 return entry.obj
1257 return entry.obj
1259
1258
1260 def __set__(self, obj, value):
1259 def __set__(self, obj, value):
1261 if self.name not in obj._filecache:
1260 if self.name not in obj._filecache:
1262 # we add an entry for the missing value because X in __dict__
1261 # we add an entry for the missing value because X in __dict__
1263 # implies X in _filecache
1262 # implies X in _filecache
1264 paths = [self.join(obj, path) for path in self.paths]
1263 paths = [self.join(obj, path) for path in self.paths]
1265 ce = filecacheentry(paths, False)
1264 ce = filecacheentry(paths, False)
1266 obj._filecache[self.name] = ce
1265 obj._filecache[self.name] = ce
1267 else:
1266 else:
1268 ce = obj._filecache[self.name]
1267 ce = obj._filecache[self.name]
1269
1268
1270 ce.obj = value # update cached copy
1269 ce.obj = value # update cached copy
1271 obj.__dict__[self.name] = value # update copy returned by obj.x
1270 obj.__dict__[self.name] = value # update copy returned by obj.x
1272
1271
1273 def __delete__(self, obj):
1272 def __delete__(self, obj):
1274 try:
1273 try:
1275 del obj.__dict__[self.name]
1274 del obj.__dict__[self.name]
1276 except KeyError:
1275 except KeyError:
1277 raise AttributeError(self.name)
1276 raise AttributeError(self.name)
1278
1277
1279 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1278 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1280 if lock is None:
1279 if lock is None:
1281 raise error.LockInheritanceContractViolation(
1280 raise error.LockInheritanceContractViolation(
1282 'lock can only be inherited while held')
1281 'lock can only be inherited while held')
1283 if environ is None:
1282 if environ is None:
1284 environ = {}
1283 environ = {}
1285 with lock.inherit() as locker:
1284 with lock.inherit() as locker:
1286 environ[envvar] = locker
1285 environ[envvar] = locker
1287 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1286 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1288
1287
1289 def wlocksub(repo, cmd, *args, **kwargs):
1288 def wlocksub(repo, cmd, *args, **kwargs):
1290 """run cmd as a subprocess that allows inheriting repo's wlock
1289 """run cmd as a subprocess that allows inheriting repo's wlock
1291
1290
1292 This can only be called while the wlock is held. This takes all the
1291 This can only be called while the wlock is held. This takes all the
1293 arguments that ui.system does, and returns the exit code of the
1292 arguments that ui.system does, and returns the exit code of the
1294 subprocess."""
1293 subprocess."""
1295 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1294 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1296 **kwargs)
1295 **kwargs)
1297
1296
1298 def gdinitconfig(ui):
1297 def gdinitconfig(ui):
1299 """helper function to know if a repo should be created as general delta
1298 """helper function to know if a repo should be created as general delta
1300 """
1299 """
1301 # experimental config: format.generaldelta
1300 # experimental config: format.generaldelta
1302 return (ui.configbool('format', 'generaldelta', False)
1301 return (ui.configbool('format', 'generaldelta', False)
1303 or ui.configbool('format', 'usegeneraldelta', True))
1302 or ui.configbool('format', 'usegeneraldelta', True))
1304
1303
1305 def gddeltaconfig(ui):
1304 def gddeltaconfig(ui):
1306 """helper function to know if incoming delta should be optimised
1305 """helper function to know if incoming delta should be optimised
1307 """
1306 """
1308 # experimental config: format.generaldelta
1307 # experimental config: format.generaldelta
1309 return ui.configbool('format', 'generaldelta', False)
1308 return ui.configbool('format', 'generaldelta', False)
1310
1309
1311 class closewrapbase(object):
1310 class closewrapbase(object):
1312 """Base class of wrapper, which hooks closing
1311 """Base class of wrapper, which hooks closing
1313
1312
1314 Do not instantiate outside of the vfs layer.
1313 Do not instantiate outside of the vfs layer.
1315 """
1314 """
1316 def __init__(self, fh):
1315 def __init__(self, fh):
1317 object.__setattr__(self, '_origfh', fh)
1316 object.__setattr__(self, '_origfh', fh)
1318
1317
1319 def __getattr__(self, attr):
1318 def __getattr__(self, attr):
1320 return getattr(self._origfh, attr)
1319 return getattr(self._origfh, attr)
1321
1320
1322 def __setattr__(self, attr, value):
1321 def __setattr__(self, attr, value):
1323 return setattr(self._origfh, attr, value)
1322 return setattr(self._origfh, attr, value)
1324
1323
1325 def __delattr__(self, attr):
1324 def __delattr__(self, attr):
1326 return delattr(self._origfh, attr)
1325 return delattr(self._origfh, attr)
1327
1326
1328 def __enter__(self):
1327 def __enter__(self):
1329 return self._origfh.__enter__()
1328 return self._origfh.__enter__()
1330
1329
1331 def __exit__(self, exc_type, exc_value, exc_tb):
1330 def __exit__(self, exc_type, exc_value, exc_tb):
1332 raise NotImplementedError('attempted instantiating ' + str(type(self)))
1331 raise NotImplementedError('attempted instantiating ' + str(type(self)))
1333
1332
1334 def close(self):
1333 def close(self):
1335 raise NotImplementedError('attempted instantiating ' + str(type(self)))
1334 raise NotImplementedError('attempted instantiating ' + str(type(self)))
1336
1335
1337 class delayclosedfile(closewrapbase):
1336 class delayclosedfile(closewrapbase):
1338 """Proxy for a file object whose close is delayed.
1337 """Proxy for a file object whose close is delayed.
1339
1338
1340 Do not instantiate outside of the vfs layer.
1339 Do not instantiate outside of the vfs layer.
1341 """
1340 """
1342 def __init__(self, fh, closer):
1341 def __init__(self, fh, closer):
1343 super(delayclosedfile, self).__init__(fh)
1342 super(delayclosedfile, self).__init__(fh)
1344 object.__setattr__(self, '_closer', closer)
1343 object.__setattr__(self, '_closer', closer)
1345
1344
1346 def __exit__(self, exc_type, exc_value, exc_tb):
1345 def __exit__(self, exc_type, exc_value, exc_tb):
1347 self._closer.close(self._origfh)
1346 self._closer.close(self._origfh)
1348
1347
1349 def close(self):
1348 def close(self):
1350 self._closer.close(self._origfh)
1349 self._closer.close(self._origfh)
1351
1350
1352 class backgroundfilecloser(object):
1351 class backgroundfilecloser(object):
1353 """Coordinates background closing of file handles on multiple threads."""
1352 """Coordinates background closing of file handles on multiple threads."""
1354 def __init__(self, ui, expectedcount=-1):
1353 def __init__(self, ui, expectedcount=-1):
1355 self._running = False
1354 self._running = False
1356 self._entered = False
1355 self._entered = False
1357 self._threads = []
1356 self._threads = []
1358 self._threadexception = None
1357 self._threadexception = None
1359
1358
1360 # Only Windows/NTFS has slow file closing. So only enable by default
1359 # Only Windows/NTFS has slow file closing. So only enable by default
1361 # on that platform. But allow to be enabled elsewhere for testing.
1360 # on that platform. But allow to be enabled elsewhere for testing.
1362 defaultenabled = os.name == 'nt'
1361 defaultenabled = os.name == 'nt'
1363 enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
1362 enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
1364
1363
1365 if not enabled:
1364 if not enabled:
1366 return
1365 return
1367
1366
1368 # There is overhead to starting and stopping the background threads.
1367 # There is overhead to starting and stopping the background threads.
1369 # Don't do background processing unless the file count is large enough
1368 # Don't do background processing unless the file count is large enough
1370 # to justify it.
1369 # to justify it.
1371 minfilecount = ui.configint('worker', 'backgroundcloseminfilecount',
1370 minfilecount = ui.configint('worker', 'backgroundcloseminfilecount',
1372 2048)
1371 2048)
1373 # FUTURE dynamically start background threads after minfilecount closes.
1372 # FUTURE dynamically start background threads after minfilecount closes.
1374 # (We don't currently have any callers that don't know their file count)
1373 # (We don't currently have any callers that don't know their file count)
1375 if expectedcount > 0 and expectedcount < minfilecount:
1374 if expectedcount > 0 and expectedcount < minfilecount:
1376 return
1375 return
1377
1376
1378 # Windows defaults to a limit of 512 open files. A buffer of 128
1377 # Windows defaults to a limit of 512 open files. A buffer of 128
1379 # should give us enough headway.
1378 # should give us enough headway.
1380 maxqueue = ui.configint('worker', 'backgroundclosemaxqueue', 384)
1379 maxqueue = ui.configint('worker', 'backgroundclosemaxqueue', 384)
1381 threadcount = ui.configint('worker', 'backgroundclosethreadcount', 4)
1380 threadcount = ui.configint('worker', 'backgroundclosethreadcount', 4)
1382
1381
1383 ui.debug('starting %d threads for background file closing\n' %
1382 ui.debug('starting %d threads for background file closing\n' %
1384 threadcount)
1383 threadcount)
1385
1384
1386 self._queue = util.queue(maxsize=maxqueue)
1385 self._queue = util.queue(maxsize=maxqueue)
1387 self._running = True
1386 self._running = True
1388
1387
1389 for i in range(threadcount):
1388 for i in range(threadcount):
1390 t = threading.Thread(target=self._worker, name='backgroundcloser')
1389 t = threading.Thread(target=self._worker, name='backgroundcloser')
1391 self._threads.append(t)
1390 self._threads.append(t)
1392 t.start()
1391 t.start()
1393
1392
1394 def __enter__(self):
1393 def __enter__(self):
1395 self._entered = True
1394 self._entered = True
1396 return self
1395 return self
1397
1396
1398 def __exit__(self, exc_type, exc_value, exc_tb):
1397 def __exit__(self, exc_type, exc_value, exc_tb):
1399 self._running = False
1398 self._running = False
1400
1399
1401 # Wait for threads to finish closing so open files don't linger for
1400 # Wait for threads to finish closing so open files don't linger for
1402 # longer than lifetime of context manager.
1401 # longer than lifetime of context manager.
1403 for t in self._threads:
1402 for t in self._threads:
1404 t.join()
1403 t.join()
1405
1404
1406 def _worker(self):
1405 def _worker(self):
1407 """Main routine for worker thread."""
1406 """Main routine for worker thread."""
1408 while True:
1407 while True:
1409 try:
1408 try:
1410 fh = self._queue.get(block=True, timeout=0.100)
1409 fh = self._queue.get(block=True, timeout=0.100)
1411 # Need to catch or the thread will terminate and
1410 # Need to catch or the thread will terminate and
1412 # we could orphan file descriptors.
1411 # we could orphan file descriptors.
1413 try:
1412 try:
1414 fh.close()
1413 fh.close()
1415 except Exception as e:
1414 except Exception as e:
1416 # Stash so can re-raise from main thread later.
1415 # Stash so can re-raise from main thread later.
1417 self._threadexception = e
1416 self._threadexception = e
1418 except util.empty:
1417 except util.empty:
1419 if not self._running:
1418 if not self._running:
1420 break
1419 break
1421
1420
1422 def close(self, fh):
1421 def close(self, fh):
1423 """Schedule a file for closing."""
1422 """Schedule a file for closing."""
1424 if not self._entered:
1423 if not self._entered:
1425 raise error.Abort(_('can only call close() when context manager '
1424 raise error.Abort(_('can only call close() when context manager '
1426 'active'))
1425 'active'))
1427
1426
1428 # If a background thread encountered an exception, raise now so we fail
1427 # If a background thread encountered an exception, raise now so we fail
1429 # fast. Otherwise we may potentially go on for minutes until the error
1428 # fast. Otherwise we may potentially go on for minutes until the error
1430 # is acted on.
1429 # is acted on.
1431 if self._threadexception:
1430 if self._threadexception:
1432 e = self._threadexception
1431 e = self._threadexception
1433 self._threadexception = None
1432 self._threadexception = None
1434 raise e
1433 raise e
1435
1434
1436 # If we're not actively running, close synchronously.
1435 # If we're not actively running, close synchronously.
1437 if not self._running:
1436 if not self._running:
1438 fh.close()
1437 fh.close()
1439 return
1438 return
1440
1439
1441 self._queue.put(fh, block=True, timeout=None)
1440 self._queue.put(fh, block=True, timeout=None)
1442
1441
1443 class checkambigatclosing(closewrapbase):
1442 class checkambigatclosing(closewrapbase):
1444 """Proxy for a file object, to avoid ambiguity of file stat
1443 """Proxy for a file object, to avoid ambiguity of file stat
1445
1444
1446 See also util.filestat for detail about "ambiguity of file stat".
1445 See also util.filestat for detail about "ambiguity of file stat".
1447
1446
1448 This proxy is useful only if the target file is guarded by any
1447 This proxy is useful only if the target file is guarded by any
1449 lock (e.g. repo.lock or repo.wlock)
1448 lock (e.g. repo.lock or repo.wlock)
1450
1449
1451 Do not instantiate outside of the vfs layer.
1450 Do not instantiate outside of the vfs layer.
1452 """
1451 """
1453 def __init__(self, fh):
1452 def __init__(self, fh):
1454 super(checkambigatclosing, self).__init__(fh)
1453 super(checkambigatclosing, self).__init__(fh)
1455 object.__setattr__(self, '_oldstat', util.filestat(fh.name))
1454 object.__setattr__(self, '_oldstat', util.filestat(fh.name))
1456
1455
1457 def _checkambig(self):
1456 def _checkambig(self):
1458 oldstat = self._oldstat
1457 oldstat = self._oldstat
1459 if oldstat.stat:
1458 if oldstat.stat:
1460 newstat = util.filestat(self._origfh.name)
1459 newstat = util.filestat(self._origfh.name)
1461 if newstat.isambig(oldstat):
1460 if newstat.isambig(oldstat):
1462 # stat of changed file is ambiguous to original one
1461 # stat of changed file is ambiguous to original one
1463 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1462 newstat.avoidambig(self._origfh.name, oldstat)
1464 os.utime(self._origfh.name, (advanced, advanced))
1465
1463
1466 def __exit__(self, exc_type, exc_value, exc_tb):
1464 def __exit__(self, exc_type, exc_value, exc_tb):
1467 self._origfh.__exit__(exc_type, exc_value, exc_tb)
1465 self._origfh.__exit__(exc_type, exc_value, exc_tb)
1468 self._checkambig()
1466 self._checkambig()
1469
1467
1470 def close(self):
1468 def close(self):
1471 self._origfh.close()
1469 self._origfh.close()
1472 self._checkambig()
1470 self._checkambig()
@@ -1,3093 +1,3111 b''
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from __future__ import absolute_import
16 from __future__ import absolute_import
17
17
18 import bz2
18 import bz2
19 import calendar
19 import calendar
20 import collections
20 import collections
21 import datetime
21 import datetime
22 import errno
22 import errno
23 import gc
23 import gc
24 import hashlib
24 import hashlib
25 import imp
25 import imp
26 import os
26 import os
27 import re as remod
27 import re as remod
28 import shutil
28 import shutil
29 import signal
29 import signal
30 import socket
30 import socket
31 import string
31 import string
32 import subprocess
32 import subprocess
33 import sys
33 import sys
34 import tempfile
34 import tempfile
35 import textwrap
35 import textwrap
36 import time
36 import time
37 import traceback
37 import traceback
38 import zlib
38 import zlib
39
39
40 from . import (
40 from . import (
41 encoding,
41 encoding,
42 error,
42 error,
43 i18n,
43 i18n,
44 osutil,
44 osutil,
45 parsers,
45 parsers,
46 pycompat,
46 pycompat,
47 )
47 )
48
48
49 for attr in (
49 for attr in (
50 'empty',
50 'empty',
51 'httplib',
51 'httplib',
52 'httpserver',
52 'httpserver',
53 'pickle',
53 'pickle',
54 'queue',
54 'queue',
55 'urlerr',
55 'urlerr',
56 'urlparse',
56 'urlparse',
57 # we do import urlreq, but we do it outside the loop
57 # we do import urlreq, but we do it outside the loop
58 #'urlreq',
58 #'urlreq',
59 'stringio',
59 'stringio',
60 'socketserver',
60 'socketserver',
61 'xmlrpclib',
61 'xmlrpclib',
62 ):
62 ):
63 a = pycompat.sysstr(attr)
63 a = pycompat.sysstr(attr)
64 globals()[a] = getattr(pycompat, a)
64 globals()[a] = getattr(pycompat, a)
65
65
66 # This line is to make pyflakes happy:
66 # This line is to make pyflakes happy:
67 urlreq = pycompat.urlreq
67 urlreq = pycompat.urlreq
68
68
69 if os.name == 'nt':
69 if os.name == 'nt':
70 from . import windows as platform
70 from . import windows as platform
71 else:
71 else:
72 from . import posix as platform
72 from . import posix as platform
73
73
74 _ = i18n._
74 _ = i18n._
75
75
76 bindunixsocket = platform.bindunixsocket
76 bindunixsocket = platform.bindunixsocket
77 cachestat = platform.cachestat
77 cachestat = platform.cachestat
78 checkexec = platform.checkexec
78 checkexec = platform.checkexec
79 checklink = platform.checklink
79 checklink = platform.checklink
80 copymode = platform.copymode
80 copymode = platform.copymode
81 executablepath = platform.executablepath
81 executablepath = platform.executablepath
82 expandglobs = platform.expandglobs
82 expandglobs = platform.expandglobs
83 explainexit = platform.explainexit
83 explainexit = platform.explainexit
84 findexe = platform.findexe
84 findexe = platform.findexe
85 gethgcmd = platform.gethgcmd
85 gethgcmd = platform.gethgcmd
86 getuser = platform.getuser
86 getuser = platform.getuser
87 getpid = os.getpid
87 getpid = os.getpid
88 groupmembers = platform.groupmembers
88 groupmembers = platform.groupmembers
89 groupname = platform.groupname
89 groupname = platform.groupname
90 hidewindow = platform.hidewindow
90 hidewindow = platform.hidewindow
91 isexec = platform.isexec
91 isexec = platform.isexec
92 isowner = platform.isowner
92 isowner = platform.isowner
93 localpath = platform.localpath
93 localpath = platform.localpath
94 lookupreg = platform.lookupreg
94 lookupreg = platform.lookupreg
95 makedir = platform.makedir
95 makedir = platform.makedir
96 nlinks = platform.nlinks
96 nlinks = platform.nlinks
97 normpath = platform.normpath
97 normpath = platform.normpath
98 normcase = platform.normcase
98 normcase = platform.normcase
99 normcasespec = platform.normcasespec
99 normcasespec = platform.normcasespec
100 normcasefallback = platform.normcasefallback
100 normcasefallback = platform.normcasefallback
101 openhardlinks = platform.openhardlinks
101 openhardlinks = platform.openhardlinks
102 oslink = platform.oslink
102 oslink = platform.oslink
103 parsepatchoutput = platform.parsepatchoutput
103 parsepatchoutput = platform.parsepatchoutput
104 pconvert = platform.pconvert
104 pconvert = platform.pconvert
105 poll = platform.poll
105 poll = platform.poll
106 popen = platform.popen
106 popen = platform.popen
107 posixfile = platform.posixfile
107 posixfile = platform.posixfile
108 quotecommand = platform.quotecommand
108 quotecommand = platform.quotecommand
109 readpipe = platform.readpipe
109 readpipe = platform.readpipe
110 rename = platform.rename
110 rename = platform.rename
111 removedirs = platform.removedirs
111 removedirs = platform.removedirs
112 samedevice = platform.samedevice
112 samedevice = platform.samedevice
113 samefile = platform.samefile
113 samefile = platform.samefile
114 samestat = platform.samestat
114 samestat = platform.samestat
115 setbinary = platform.setbinary
115 setbinary = platform.setbinary
116 setflags = platform.setflags
116 setflags = platform.setflags
117 setsignalhandler = platform.setsignalhandler
117 setsignalhandler = platform.setsignalhandler
118 shellquote = platform.shellquote
118 shellquote = platform.shellquote
119 spawndetached = platform.spawndetached
119 spawndetached = platform.spawndetached
120 split = platform.split
120 split = platform.split
121 sshargs = platform.sshargs
121 sshargs = platform.sshargs
122 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
122 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
123 statisexec = platform.statisexec
123 statisexec = platform.statisexec
124 statislink = platform.statislink
124 statislink = platform.statislink
125 testpid = platform.testpid
125 testpid = platform.testpid
126 umask = platform.umask
126 umask = platform.umask
127 unlink = platform.unlink
127 unlink = platform.unlink
128 unlinkpath = platform.unlinkpath
128 unlinkpath = platform.unlinkpath
129 username = platform.username
129 username = platform.username
130
130
131 # Python compatibility
131 # Python compatibility
132
132
133 _notset = object()
133 _notset = object()
134
134
135 # disable Python's problematic floating point timestamps (issue4836)
135 # disable Python's problematic floating point timestamps (issue4836)
136 # (Python hypocritically says you shouldn't change this behavior in
136 # (Python hypocritically says you shouldn't change this behavior in
137 # libraries, and sure enough Mercurial is not a library.)
137 # libraries, and sure enough Mercurial is not a library.)
138 os.stat_float_times(False)
138 os.stat_float_times(False)
139
139
140 def safehasattr(thing, attr):
140 def safehasattr(thing, attr):
141 return getattr(thing, attr, _notset) is not _notset
141 return getattr(thing, attr, _notset) is not _notset
142
142
143 DIGESTS = {
143 DIGESTS = {
144 'md5': hashlib.md5,
144 'md5': hashlib.md5,
145 'sha1': hashlib.sha1,
145 'sha1': hashlib.sha1,
146 'sha512': hashlib.sha512,
146 'sha512': hashlib.sha512,
147 }
147 }
148 # List of digest types from strongest to weakest
148 # List of digest types from strongest to weakest
149 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
149 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
150
150
151 for k in DIGESTS_BY_STRENGTH:
151 for k in DIGESTS_BY_STRENGTH:
152 assert k in DIGESTS
152 assert k in DIGESTS
153
153
154 class digester(object):
154 class digester(object):
155 """helper to compute digests.
155 """helper to compute digests.
156
156
157 This helper can be used to compute one or more digests given their name.
157 This helper can be used to compute one or more digests given their name.
158
158
159 >>> d = digester(['md5', 'sha1'])
159 >>> d = digester(['md5', 'sha1'])
160 >>> d.update('foo')
160 >>> d.update('foo')
161 >>> [k for k in sorted(d)]
161 >>> [k for k in sorted(d)]
162 ['md5', 'sha1']
162 ['md5', 'sha1']
163 >>> d['md5']
163 >>> d['md5']
164 'acbd18db4cc2f85cedef654fccc4a4d8'
164 'acbd18db4cc2f85cedef654fccc4a4d8'
165 >>> d['sha1']
165 >>> d['sha1']
166 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
166 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
167 >>> digester.preferred(['md5', 'sha1'])
167 >>> digester.preferred(['md5', 'sha1'])
168 'sha1'
168 'sha1'
169 """
169 """
170
170
171 def __init__(self, digests, s=''):
171 def __init__(self, digests, s=''):
172 self._hashes = {}
172 self._hashes = {}
173 for k in digests:
173 for k in digests:
174 if k not in DIGESTS:
174 if k not in DIGESTS:
175 raise Abort(_('unknown digest type: %s') % k)
175 raise Abort(_('unknown digest type: %s') % k)
176 self._hashes[k] = DIGESTS[k]()
176 self._hashes[k] = DIGESTS[k]()
177 if s:
177 if s:
178 self.update(s)
178 self.update(s)
179
179
180 def update(self, data):
180 def update(self, data):
181 for h in self._hashes.values():
181 for h in self._hashes.values():
182 h.update(data)
182 h.update(data)
183
183
184 def __getitem__(self, key):
184 def __getitem__(self, key):
185 if key not in DIGESTS:
185 if key not in DIGESTS:
186 raise Abort(_('unknown digest type: %s') % k)
186 raise Abort(_('unknown digest type: %s') % k)
187 return self._hashes[key].hexdigest()
187 return self._hashes[key].hexdigest()
188
188
189 def __iter__(self):
189 def __iter__(self):
190 return iter(self._hashes)
190 return iter(self._hashes)
191
191
192 @staticmethod
192 @staticmethod
193 def preferred(supported):
193 def preferred(supported):
194 """returns the strongest digest type in both supported and DIGESTS."""
194 """returns the strongest digest type in both supported and DIGESTS."""
195
195
196 for k in DIGESTS_BY_STRENGTH:
196 for k in DIGESTS_BY_STRENGTH:
197 if k in supported:
197 if k in supported:
198 return k
198 return k
199 return None
199 return None
200
200
201 class digestchecker(object):
201 class digestchecker(object):
202 """file handle wrapper that additionally checks content against a given
202 """file handle wrapper that additionally checks content against a given
203 size and digests.
203 size and digests.
204
204
205 d = digestchecker(fh, size, {'md5': '...'})
205 d = digestchecker(fh, size, {'md5': '...'})
206
206
207 When multiple digests are given, all of them are validated.
207 When multiple digests are given, all of them are validated.
208 """
208 """
209
209
210 def __init__(self, fh, size, digests):
210 def __init__(self, fh, size, digests):
211 self._fh = fh
211 self._fh = fh
212 self._size = size
212 self._size = size
213 self._got = 0
213 self._got = 0
214 self._digests = dict(digests)
214 self._digests = dict(digests)
215 self._digester = digester(self._digests.keys())
215 self._digester = digester(self._digests.keys())
216
216
217 def read(self, length=-1):
217 def read(self, length=-1):
218 content = self._fh.read(length)
218 content = self._fh.read(length)
219 self._digester.update(content)
219 self._digester.update(content)
220 self._got += len(content)
220 self._got += len(content)
221 return content
221 return content
222
222
223 def validate(self):
223 def validate(self):
224 if self._size != self._got:
224 if self._size != self._got:
225 raise Abort(_('size mismatch: expected %d, got %d') %
225 raise Abort(_('size mismatch: expected %d, got %d') %
226 (self._size, self._got))
226 (self._size, self._got))
227 for k, v in self._digests.items():
227 for k, v in self._digests.items():
228 if v != self._digester[k]:
228 if v != self._digester[k]:
229 # i18n: first parameter is a digest name
229 # i18n: first parameter is a digest name
230 raise Abort(_('%s mismatch: expected %s, got %s') %
230 raise Abort(_('%s mismatch: expected %s, got %s') %
231 (k, v, self._digester[k]))
231 (k, v, self._digester[k]))
232
232
233 try:
233 try:
234 buffer = buffer
234 buffer = buffer
235 except NameError:
235 except NameError:
236 if not pycompat.ispy3:
236 if not pycompat.ispy3:
237 def buffer(sliceable, offset=0):
237 def buffer(sliceable, offset=0):
238 return sliceable[offset:]
238 return sliceable[offset:]
239 else:
239 else:
240 def buffer(sliceable, offset=0):
240 def buffer(sliceable, offset=0):
241 return memoryview(sliceable)[offset:]
241 return memoryview(sliceable)[offset:]
242
242
243 closefds = os.name == 'posix'
243 closefds = os.name == 'posix'
244
244
245 _chunksize = 4096
245 _chunksize = 4096
246
246
247 class bufferedinputpipe(object):
247 class bufferedinputpipe(object):
248 """a manually buffered input pipe
248 """a manually buffered input pipe
249
249
250 Python will not let us use buffered IO and lazy reading with 'polling' at
250 Python will not let us use buffered IO and lazy reading with 'polling' at
251 the same time. We cannot probe the buffer state and select will not detect
251 the same time. We cannot probe the buffer state and select will not detect
252 that data are ready to read if they are already buffered.
252 that data are ready to read if they are already buffered.
253
253
254 This class let us work around that by implementing its own buffering
254 This class let us work around that by implementing its own buffering
255 (allowing efficient readline) while offering a way to know if the buffer is
255 (allowing efficient readline) while offering a way to know if the buffer is
256 empty from the output (allowing collaboration of the buffer with polling).
256 empty from the output (allowing collaboration of the buffer with polling).
257
257
258 This class lives in the 'util' module because it makes use of the 'os'
258 This class lives in the 'util' module because it makes use of the 'os'
259 module from the python stdlib.
259 module from the python stdlib.
260 """
260 """
261
261
262 def __init__(self, input):
262 def __init__(self, input):
263 self._input = input
263 self._input = input
264 self._buffer = []
264 self._buffer = []
265 self._eof = False
265 self._eof = False
266 self._lenbuf = 0
266 self._lenbuf = 0
267
267
268 @property
268 @property
269 def hasbuffer(self):
269 def hasbuffer(self):
270 """True is any data is currently buffered
270 """True is any data is currently buffered
271
271
272 This will be used externally a pre-step for polling IO. If there is
272 This will be used externally a pre-step for polling IO. If there is
273 already data then no polling should be set in place."""
273 already data then no polling should be set in place."""
274 return bool(self._buffer)
274 return bool(self._buffer)
275
275
276 @property
276 @property
277 def closed(self):
277 def closed(self):
278 return self._input.closed
278 return self._input.closed
279
279
280 def fileno(self):
280 def fileno(self):
281 return self._input.fileno()
281 return self._input.fileno()
282
282
283 def close(self):
283 def close(self):
284 return self._input.close()
284 return self._input.close()
285
285
286 def read(self, size):
286 def read(self, size):
287 while (not self._eof) and (self._lenbuf < size):
287 while (not self._eof) and (self._lenbuf < size):
288 self._fillbuffer()
288 self._fillbuffer()
289 return self._frombuffer(size)
289 return self._frombuffer(size)
290
290
291 def readline(self, *args, **kwargs):
291 def readline(self, *args, **kwargs):
292 if 1 < len(self._buffer):
292 if 1 < len(self._buffer):
293 # this should not happen because both read and readline end with a
293 # this should not happen because both read and readline end with a
294 # _frombuffer call that collapse it.
294 # _frombuffer call that collapse it.
295 self._buffer = [''.join(self._buffer)]
295 self._buffer = [''.join(self._buffer)]
296 self._lenbuf = len(self._buffer[0])
296 self._lenbuf = len(self._buffer[0])
297 lfi = -1
297 lfi = -1
298 if self._buffer:
298 if self._buffer:
299 lfi = self._buffer[-1].find('\n')
299 lfi = self._buffer[-1].find('\n')
300 while (not self._eof) and lfi < 0:
300 while (not self._eof) and lfi < 0:
301 self._fillbuffer()
301 self._fillbuffer()
302 if self._buffer:
302 if self._buffer:
303 lfi = self._buffer[-1].find('\n')
303 lfi = self._buffer[-1].find('\n')
304 size = lfi + 1
304 size = lfi + 1
305 if lfi < 0: # end of file
305 if lfi < 0: # end of file
306 size = self._lenbuf
306 size = self._lenbuf
307 elif 1 < len(self._buffer):
307 elif 1 < len(self._buffer):
308 # we need to take previous chunks into account
308 # we need to take previous chunks into account
309 size += self._lenbuf - len(self._buffer[-1])
309 size += self._lenbuf - len(self._buffer[-1])
310 return self._frombuffer(size)
310 return self._frombuffer(size)
311
311
312 def _frombuffer(self, size):
312 def _frombuffer(self, size):
313 """return at most 'size' data from the buffer
313 """return at most 'size' data from the buffer
314
314
315 The data are removed from the buffer."""
315 The data are removed from the buffer."""
316 if size == 0 or not self._buffer:
316 if size == 0 or not self._buffer:
317 return ''
317 return ''
318 buf = self._buffer[0]
318 buf = self._buffer[0]
319 if 1 < len(self._buffer):
319 if 1 < len(self._buffer):
320 buf = ''.join(self._buffer)
320 buf = ''.join(self._buffer)
321
321
322 data = buf[:size]
322 data = buf[:size]
323 buf = buf[len(data):]
323 buf = buf[len(data):]
324 if buf:
324 if buf:
325 self._buffer = [buf]
325 self._buffer = [buf]
326 self._lenbuf = len(buf)
326 self._lenbuf = len(buf)
327 else:
327 else:
328 self._buffer = []
328 self._buffer = []
329 self._lenbuf = 0
329 self._lenbuf = 0
330 return data
330 return data
331
331
332 def _fillbuffer(self):
332 def _fillbuffer(self):
333 """read data to the buffer"""
333 """read data to the buffer"""
334 data = os.read(self._input.fileno(), _chunksize)
334 data = os.read(self._input.fileno(), _chunksize)
335 if not data:
335 if not data:
336 self._eof = True
336 self._eof = True
337 else:
337 else:
338 self._lenbuf += len(data)
338 self._lenbuf += len(data)
339 self._buffer.append(data)
339 self._buffer.append(data)
340
340
341 def popen2(cmd, env=None, newlines=False):
341 def popen2(cmd, env=None, newlines=False):
342 # Setting bufsize to -1 lets the system decide the buffer size.
342 # Setting bufsize to -1 lets the system decide the buffer size.
343 # The default for bufsize is 0, meaning unbuffered. This leads to
343 # The default for bufsize is 0, meaning unbuffered. This leads to
344 # poor performance on Mac OS X: http://bugs.python.org/issue4194
344 # poor performance on Mac OS X: http://bugs.python.org/issue4194
345 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
345 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
346 close_fds=closefds,
346 close_fds=closefds,
347 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
347 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
348 universal_newlines=newlines,
348 universal_newlines=newlines,
349 env=env)
349 env=env)
350 return p.stdin, p.stdout
350 return p.stdin, p.stdout
351
351
352 def popen3(cmd, env=None, newlines=False):
352 def popen3(cmd, env=None, newlines=False):
353 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
353 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
354 return stdin, stdout, stderr
354 return stdin, stdout, stderr
355
355
356 def popen4(cmd, env=None, newlines=False, bufsize=-1):
356 def popen4(cmd, env=None, newlines=False, bufsize=-1):
357 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
357 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
358 close_fds=closefds,
358 close_fds=closefds,
359 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
359 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
360 stderr=subprocess.PIPE,
360 stderr=subprocess.PIPE,
361 universal_newlines=newlines,
361 universal_newlines=newlines,
362 env=env)
362 env=env)
363 return p.stdin, p.stdout, p.stderr, p
363 return p.stdin, p.stdout, p.stderr, p
364
364
365 def version():
365 def version():
366 """Return version information if available."""
366 """Return version information if available."""
367 try:
367 try:
368 from . import __version__
368 from . import __version__
369 return __version__.version
369 return __version__.version
370 except ImportError:
370 except ImportError:
371 return 'unknown'
371 return 'unknown'
372
372
373 def versiontuple(v=None, n=4):
373 def versiontuple(v=None, n=4):
374 """Parses a Mercurial version string into an N-tuple.
374 """Parses a Mercurial version string into an N-tuple.
375
375
376 The version string to be parsed is specified with the ``v`` argument.
376 The version string to be parsed is specified with the ``v`` argument.
377 If it isn't defined, the current Mercurial version string will be parsed.
377 If it isn't defined, the current Mercurial version string will be parsed.
378
378
379 ``n`` can be 2, 3, or 4. Here is how some version strings map to
379 ``n`` can be 2, 3, or 4. Here is how some version strings map to
380 returned values:
380 returned values:
381
381
382 >>> v = '3.6.1+190-df9b73d2d444'
382 >>> v = '3.6.1+190-df9b73d2d444'
383 >>> versiontuple(v, 2)
383 >>> versiontuple(v, 2)
384 (3, 6)
384 (3, 6)
385 >>> versiontuple(v, 3)
385 >>> versiontuple(v, 3)
386 (3, 6, 1)
386 (3, 6, 1)
387 >>> versiontuple(v, 4)
387 >>> versiontuple(v, 4)
388 (3, 6, 1, '190-df9b73d2d444')
388 (3, 6, 1, '190-df9b73d2d444')
389
389
390 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
390 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
391 (3, 6, 1, '190-df9b73d2d444+20151118')
391 (3, 6, 1, '190-df9b73d2d444+20151118')
392
392
393 >>> v = '3.6'
393 >>> v = '3.6'
394 >>> versiontuple(v, 2)
394 >>> versiontuple(v, 2)
395 (3, 6)
395 (3, 6)
396 >>> versiontuple(v, 3)
396 >>> versiontuple(v, 3)
397 (3, 6, None)
397 (3, 6, None)
398 >>> versiontuple(v, 4)
398 >>> versiontuple(v, 4)
399 (3, 6, None, None)
399 (3, 6, None, None)
400
400
401 >>> v = '3.9-rc'
401 >>> v = '3.9-rc'
402 >>> versiontuple(v, 2)
402 >>> versiontuple(v, 2)
403 (3, 9)
403 (3, 9)
404 >>> versiontuple(v, 3)
404 >>> versiontuple(v, 3)
405 (3, 9, None)
405 (3, 9, None)
406 >>> versiontuple(v, 4)
406 >>> versiontuple(v, 4)
407 (3, 9, None, 'rc')
407 (3, 9, None, 'rc')
408
408
409 >>> v = '3.9-rc+2-02a8fea4289b'
409 >>> v = '3.9-rc+2-02a8fea4289b'
410 >>> versiontuple(v, 2)
410 >>> versiontuple(v, 2)
411 (3, 9)
411 (3, 9)
412 >>> versiontuple(v, 3)
412 >>> versiontuple(v, 3)
413 (3, 9, None)
413 (3, 9, None)
414 >>> versiontuple(v, 4)
414 >>> versiontuple(v, 4)
415 (3, 9, None, 'rc+2-02a8fea4289b')
415 (3, 9, None, 'rc+2-02a8fea4289b')
416 """
416 """
417 if not v:
417 if not v:
418 v = version()
418 v = version()
419 parts = remod.split('[\+-]', v, 1)
419 parts = remod.split('[\+-]', v, 1)
420 if len(parts) == 1:
420 if len(parts) == 1:
421 vparts, extra = parts[0], None
421 vparts, extra = parts[0], None
422 else:
422 else:
423 vparts, extra = parts
423 vparts, extra = parts
424
424
425 vints = []
425 vints = []
426 for i in vparts.split('.'):
426 for i in vparts.split('.'):
427 try:
427 try:
428 vints.append(int(i))
428 vints.append(int(i))
429 except ValueError:
429 except ValueError:
430 break
430 break
431 # (3, 6) -> (3, 6, None)
431 # (3, 6) -> (3, 6, None)
432 while len(vints) < 3:
432 while len(vints) < 3:
433 vints.append(None)
433 vints.append(None)
434
434
435 if n == 2:
435 if n == 2:
436 return (vints[0], vints[1])
436 return (vints[0], vints[1])
437 if n == 3:
437 if n == 3:
438 return (vints[0], vints[1], vints[2])
438 return (vints[0], vints[1], vints[2])
439 if n == 4:
439 if n == 4:
440 return (vints[0], vints[1], vints[2], extra)
440 return (vints[0], vints[1], vints[2], extra)
441
441
442 # used by parsedate
442 # used by parsedate
443 defaultdateformats = (
443 defaultdateformats = (
444 '%Y-%m-%dT%H:%M:%S', # the 'real' ISO8601
444 '%Y-%m-%dT%H:%M:%S', # the 'real' ISO8601
445 '%Y-%m-%dT%H:%M', # without seconds
445 '%Y-%m-%dT%H:%M', # without seconds
446 '%Y-%m-%dT%H%M%S', # another awful but legal variant without :
446 '%Y-%m-%dT%H%M%S', # another awful but legal variant without :
447 '%Y-%m-%dT%H%M', # without seconds
447 '%Y-%m-%dT%H%M', # without seconds
448 '%Y-%m-%d %H:%M:%S', # our common legal variant
448 '%Y-%m-%d %H:%M:%S', # our common legal variant
449 '%Y-%m-%d %H:%M', # without seconds
449 '%Y-%m-%d %H:%M', # without seconds
450 '%Y-%m-%d %H%M%S', # without :
450 '%Y-%m-%d %H%M%S', # without :
451 '%Y-%m-%d %H%M', # without seconds
451 '%Y-%m-%d %H%M', # without seconds
452 '%Y-%m-%d %I:%M:%S%p',
452 '%Y-%m-%d %I:%M:%S%p',
453 '%Y-%m-%d %H:%M',
453 '%Y-%m-%d %H:%M',
454 '%Y-%m-%d %I:%M%p',
454 '%Y-%m-%d %I:%M%p',
455 '%Y-%m-%d',
455 '%Y-%m-%d',
456 '%m-%d',
456 '%m-%d',
457 '%m/%d',
457 '%m/%d',
458 '%m/%d/%y',
458 '%m/%d/%y',
459 '%m/%d/%Y',
459 '%m/%d/%Y',
460 '%a %b %d %H:%M:%S %Y',
460 '%a %b %d %H:%M:%S %Y',
461 '%a %b %d %I:%M:%S%p %Y',
461 '%a %b %d %I:%M:%S%p %Y',
462 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
462 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
463 '%b %d %H:%M:%S %Y',
463 '%b %d %H:%M:%S %Y',
464 '%b %d %I:%M:%S%p %Y',
464 '%b %d %I:%M:%S%p %Y',
465 '%b %d %H:%M:%S',
465 '%b %d %H:%M:%S',
466 '%b %d %I:%M:%S%p',
466 '%b %d %I:%M:%S%p',
467 '%b %d %H:%M',
467 '%b %d %H:%M',
468 '%b %d %I:%M%p',
468 '%b %d %I:%M%p',
469 '%b %d %Y',
469 '%b %d %Y',
470 '%b %d',
470 '%b %d',
471 '%H:%M:%S',
471 '%H:%M:%S',
472 '%I:%M:%S%p',
472 '%I:%M:%S%p',
473 '%H:%M',
473 '%H:%M',
474 '%I:%M%p',
474 '%I:%M%p',
475 )
475 )
476
476
477 extendeddateformats = defaultdateformats + (
477 extendeddateformats = defaultdateformats + (
478 "%Y",
478 "%Y",
479 "%Y-%m",
479 "%Y-%m",
480 "%b",
480 "%b",
481 "%b %Y",
481 "%b %Y",
482 )
482 )
483
483
484 def cachefunc(func):
484 def cachefunc(func):
485 '''cache the result of function calls'''
485 '''cache the result of function calls'''
486 # XXX doesn't handle keywords args
486 # XXX doesn't handle keywords args
487 if func.__code__.co_argcount == 0:
487 if func.__code__.co_argcount == 0:
488 cache = []
488 cache = []
489 def f():
489 def f():
490 if len(cache) == 0:
490 if len(cache) == 0:
491 cache.append(func())
491 cache.append(func())
492 return cache[0]
492 return cache[0]
493 return f
493 return f
494 cache = {}
494 cache = {}
495 if func.__code__.co_argcount == 1:
495 if func.__code__.co_argcount == 1:
496 # we gain a small amount of time because
496 # we gain a small amount of time because
497 # we don't need to pack/unpack the list
497 # we don't need to pack/unpack the list
498 def f(arg):
498 def f(arg):
499 if arg not in cache:
499 if arg not in cache:
500 cache[arg] = func(arg)
500 cache[arg] = func(arg)
501 return cache[arg]
501 return cache[arg]
502 else:
502 else:
503 def f(*args):
503 def f(*args):
504 if args not in cache:
504 if args not in cache:
505 cache[args] = func(*args)
505 cache[args] = func(*args)
506 return cache[args]
506 return cache[args]
507
507
508 return f
508 return f
509
509
510 class sortdict(dict):
510 class sortdict(dict):
511 '''a simple sorted dictionary'''
511 '''a simple sorted dictionary'''
512 def __init__(self, data=None):
512 def __init__(self, data=None):
513 self._list = []
513 self._list = []
514 if data:
514 if data:
515 self.update(data)
515 self.update(data)
516 def copy(self):
516 def copy(self):
517 return sortdict(self)
517 return sortdict(self)
518 def __setitem__(self, key, val):
518 def __setitem__(self, key, val):
519 if key in self:
519 if key in self:
520 self._list.remove(key)
520 self._list.remove(key)
521 self._list.append(key)
521 self._list.append(key)
522 dict.__setitem__(self, key, val)
522 dict.__setitem__(self, key, val)
523 def __iter__(self):
523 def __iter__(self):
524 return self._list.__iter__()
524 return self._list.__iter__()
525 def update(self, src):
525 def update(self, src):
526 if isinstance(src, dict):
526 if isinstance(src, dict):
527 src = src.iteritems()
527 src = src.iteritems()
528 for k, v in src:
528 for k, v in src:
529 self[k] = v
529 self[k] = v
530 def clear(self):
530 def clear(self):
531 dict.clear(self)
531 dict.clear(self)
532 self._list = []
532 self._list = []
533 def items(self):
533 def items(self):
534 return [(k, self[k]) for k in self._list]
534 return [(k, self[k]) for k in self._list]
535 def __delitem__(self, key):
535 def __delitem__(self, key):
536 dict.__delitem__(self, key)
536 dict.__delitem__(self, key)
537 self._list.remove(key)
537 self._list.remove(key)
538 def pop(self, key, *args, **kwargs):
538 def pop(self, key, *args, **kwargs):
539 dict.pop(self, key, *args, **kwargs)
539 dict.pop(self, key, *args, **kwargs)
540 try:
540 try:
541 self._list.remove(key)
541 self._list.remove(key)
542 except ValueError:
542 except ValueError:
543 pass
543 pass
544 def keys(self):
544 def keys(self):
545 return self._list
545 return self._list
546 def iterkeys(self):
546 def iterkeys(self):
547 return self._list.__iter__()
547 return self._list.__iter__()
548 def iteritems(self):
548 def iteritems(self):
549 for k in self._list:
549 for k in self._list:
550 yield k, self[k]
550 yield k, self[k]
551 def insert(self, index, key, val):
551 def insert(self, index, key, val):
552 self._list.insert(index, key)
552 self._list.insert(index, key)
553 dict.__setitem__(self, key, val)
553 dict.__setitem__(self, key, val)
554 def __repr__(self):
554 def __repr__(self):
555 if not self:
555 if not self:
556 return '%s()' % self.__class__.__name__
556 return '%s()' % self.__class__.__name__
557 return '%s(%r)' % (self.__class__.__name__, self.items())
557 return '%s(%r)' % (self.__class__.__name__, self.items())
558
558
559 class _lrucachenode(object):
559 class _lrucachenode(object):
560 """A node in a doubly linked list.
560 """A node in a doubly linked list.
561
561
562 Holds a reference to nodes on either side as well as a key-value
562 Holds a reference to nodes on either side as well as a key-value
563 pair for the dictionary entry.
563 pair for the dictionary entry.
564 """
564 """
565 __slots__ = (u'next', u'prev', u'key', u'value')
565 __slots__ = (u'next', u'prev', u'key', u'value')
566
566
567 def __init__(self):
567 def __init__(self):
568 self.next = None
568 self.next = None
569 self.prev = None
569 self.prev = None
570
570
571 self.key = _notset
571 self.key = _notset
572 self.value = None
572 self.value = None
573
573
574 def markempty(self):
574 def markempty(self):
575 """Mark the node as emptied."""
575 """Mark the node as emptied."""
576 self.key = _notset
576 self.key = _notset
577
577
578 class lrucachedict(object):
578 class lrucachedict(object):
579 """Dict that caches most recent accesses and sets.
579 """Dict that caches most recent accesses and sets.
580
580
581 The dict consists of an actual backing dict - indexed by original
581 The dict consists of an actual backing dict - indexed by original
582 key - and a doubly linked circular list defining the order of entries in
582 key - and a doubly linked circular list defining the order of entries in
583 the cache.
583 the cache.
584
584
585 The head node is the newest entry in the cache. If the cache is full,
585 The head node is the newest entry in the cache. If the cache is full,
586 we recycle head.prev and make it the new head. Cache accesses result in
586 we recycle head.prev and make it the new head. Cache accesses result in
587 the node being moved to before the existing head and being marked as the
587 the node being moved to before the existing head and being marked as the
588 new head node.
588 new head node.
589 """
589 """
590 def __init__(self, max):
590 def __init__(self, max):
591 self._cache = {}
591 self._cache = {}
592
592
593 self._head = head = _lrucachenode()
593 self._head = head = _lrucachenode()
594 head.prev = head
594 head.prev = head
595 head.next = head
595 head.next = head
596 self._size = 1
596 self._size = 1
597 self._capacity = max
597 self._capacity = max
598
598
599 def __len__(self):
599 def __len__(self):
600 return len(self._cache)
600 return len(self._cache)
601
601
602 def __contains__(self, k):
602 def __contains__(self, k):
603 return k in self._cache
603 return k in self._cache
604
604
605 def __iter__(self):
605 def __iter__(self):
606 # We don't have to iterate in cache order, but why not.
606 # We don't have to iterate in cache order, but why not.
607 n = self._head
607 n = self._head
608 for i in range(len(self._cache)):
608 for i in range(len(self._cache)):
609 yield n.key
609 yield n.key
610 n = n.next
610 n = n.next
611
611
612 def __getitem__(self, k):
612 def __getitem__(self, k):
613 node = self._cache[k]
613 node = self._cache[k]
614 self._movetohead(node)
614 self._movetohead(node)
615 return node.value
615 return node.value
616
616
617 def __setitem__(self, k, v):
617 def __setitem__(self, k, v):
618 node = self._cache.get(k)
618 node = self._cache.get(k)
619 # Replace existing value and mark as newest.
619 # Replace existing value and mark as newest.
620 if node is not None:
620 if node is not None:
621 node.value = v
621 node.value = v
622 self._movetohead(node)
622 self._movetohead(node)
623 return
623 return
624
624
625 if self._size < self._capacity:
625 if self._size < self._capacity:
626 node = self._addcapacity()
626 node = self._addcapacity()
627 else:
627 else:
628 # Grab the last/oldest item.
628 # Grab the last/oldest item.
629 node = self._head.prev
629 node = self._head.prev
630
630
631 # At capacity. Kill the old entry.
631 # At capacity. Kill the old entry.
632 if node.key is not _notset:
632 if node.key is not _notset:
633 del self._cache[node.key]
633 del self._cache[node.key]
634
634
635 node.key = k
635 node.key = k
636 node.value = v
636 node.value = v
637 self._cache[k] = node
637 self._cache[k] = node
638 # And mark it as newest entry. No need to adjust order since it
638 # And mark it as newest entry. No need to adjust order since it
639 # is already self._head.prev.
639 # is already self._head.prev.
640 self._head = node
640 self._head = node
641
641
642 def __delitem__(self, k):
642 def __delitem__(self, k):
643 node = self._cache.pop(k)
643 node = self._cache.pop(k)
644 node.markempty()
644 node.markempty()
645
645
646 # Temporarily mark as newest item before re-adjusting head to make
646 # Temporarily mark as newest item before re-adjusting head to make
647 # this node the oldest item.
647 # this node the oldest item.
648 self._movetohead(node)
648 self._movetohead(node)
649 self._head = node.next
649 self._head = node.next
650
650
651 # Additional dict methods.
651 # Additional dict methods.
652
652
653 def get(self, k, default=None):
653 def get(self, k, default=None):
654 try:
654 try:
655 return self._cache[k].value
655 return self._cache[k].value
656 except KeyError:
656 except KeyError:
657 return default
657 return default
658
658
659 def clear(self):
659 def clear(self):
660 n = self._head
660 n = self._head
661 while n.key is not _notset:
661 while n.key is not _notset:
662 n.markempty()
662 n.markempty()
663 n = n.next
663 n = n.next
664
664
665 self._cache.clear()
665 self._cache.clear()
666
666
667 def copy(self):
667 def copy(self):
668 result = lrucachedict(self._capacity)
668 result = lrucachedict(self._capacity)
669 n = self._head.prev
669 n = self._head.prev
670 # Iterate in oldest-to-newest order, so the copy has the right ordering
670 # Iterate in oldest-to-newest order, so the copy has the right ordering
671 for i in range(len(self._cache)):
671 for i in range(len(self._cache)):
672 result[n.key] = n.value
672 result[n.key] = n.value
673 n = n.prev
673 n = n.prev
674 return result
674 return result
675
675
676 def _movetohead(self, node):
676 def _movetohead(self, node):
677 """Mark a node as the newest, making it the new head.
677 """Mark a node as the newest, making it the new head.
678
678
679 When a node is accessed, it becomes the freshest entry in the LRU
679 When a node is accessed, it becomes the freshest entry in the LRU
680 list, which is denoted by self._head.
680 list, which is denoted by self._head.
681
681
682 Visually, let's make ``N`` the new head node (* denotes head):
682 Visually, let's make ``N`` the new head node (* denotes head):
683
683
684 previous/oldest <-> head <-> next/next newest
684 previous/oldest <-> head <-> next/next newest
685
685
686 ----<->--- A* ---<->-----
686 ----<->--- A* ---<->-----
687 | |
687 | |
688 E <-> D <-> N <-> C <-> B
688 E <-> D <-> N <-> C <-> B
689
689
690 To:
690 To:
691
691
692 ----<->--- N* ---<->-----
692 ----<->--- N* ---<->-----
693 | |
693 | |
694 E <-> D <-> C <-> B <-> A
694 E <-> D <-> C <-> B <-> A
695
695
696 This requires the following moves:
696 This requires the following moves:
697
697
698 C.next = D (node.prev.next = node.next)
698 C.next = D (node.prev.next = node.next)
699 D.prev = C (node.next.prev = node.prev)
699 D.prev = C (node.next.prev = node.prev)
700 E.next = N (head.prev.next = node)
700 E.next = N (head.prev.next = node)
701 N.prev = E (node.prev = head.prev)
701 N.prev = E (node.prev = head.prev)
702 N.next = A (node.next = head)
702 N.next = A (node.next = head)
703 A.prev = N (head.prev = node)
703 A.prev = N (head.prev = node)
704 """
704 """
705 head = self._head
705 head = self._head
706 # C.next = D
706 # C.next = D
707 node.prev.next = node.next
707 node.prev.next = node.next
708 # D.prev = C
708 # D.prev = C
709 node.next.prev = node.prev
709 node.next.prev = node.prev
710 # N.prev = E
710 # N.prev = E
711 node.prev = head.prev
711 node.prev = head.prev
712 # N.next = A
712 # N.next = A
713 # It is tempting to do just "head" here, however if node is
713 # It is tempting to do just "head" here, however if node is
714 # adjacent to head, this will do bad things.
714 # adjacent to head, this will do bad things.
715 node.next = head.prev.next
715 node.next = head.prev.next
716 # E.next = N
716 # E.next = N
717 node.next.prev = node
717 node.next.prev = node
718 # A.prev = N
718 # A.prev = N
719 node.prev.next = node
719 node.prev.next = node
720
720
721 self._head = node
721 self._head = node
722
722
723 def _addcapacity(self):
723 def _addcapacity(self):
724 """Add a node to the circular linked list.
724 """Add a node to the circular linked list.
725
725
726 The new node is inserted before the head node.
726 The new node is inserted before the head node.
727 """
727 """
728 head = self._head
728 head = self._head
729 node = _lrucachenode()
729 node = _lrucachenode()
730 head.prev.next = node
730 head.prev.next = node
731 node.prev = head.prev
731 node.prev = head.prev
732 node.next = head
732 node.next = head
733 head.prev = node
733 head.prev = node
734 self._size += 1
734 self._size += 1
735 return node
735 return node
736
736
737 def lrucachefunc(func):
737 def lrucachefunc(func):
738 '''cache most recent results of function calls'''
738 '''cache most recent results of function calls'''
739 cache = {}
739 cache = {}
740 order = collections.deque()
740 order = collections.deque()
741 if func.__code__.co_argcount == 1:
741 if func.__code__.co_argcount == 1:
742 def f(arg):
742 def f(arg):
743 if arg not in cache:
743 if arg not in cache:
744 if len(cache) > 20:
744 if len(cache) > 20:
745 del cache[order.popleft()]
745 del cache[order.popleft()]
746 cache[arg] = func(arg)
746 cache[arg] = func(arg)
747 else:
747 else:
748 order.remove(arg)
748 order.remove(arg)
749 order.append(arg)
749 order.append(arg)
750 return cache[arg]
750 return cache[arg]
751 else:
751 else:
752 def f(*args):
752 def f(*args):
753 if args not in cache:
753 if args not in cache:
754 if len(cache) > 20:
754 if len(cache) > 20:
755 del cache[order.popleft()]
755 del cache[order.popleft()]
756 cache[args] = func(*args)
756 cache[args] = func(*args)
757 else:
757 else:
758 order.remove(args)
758 order.remove(args)
759 order.append(args)
759 order.append(args)
760 return cache[args]
760 return cache[args]
761
761
762 return f
762 return f
763
763
764 class propertycache(object):
764 class propertycache(object):
765 def __init__(self, func):
765 def __init__(self, func):
766 self.func = func
766 self.func = func
767 self.name = func.__name__
767 self.name = func.__name__
768 def __get__(self, obj, type=None):
768 def __get__(self, obj, type=None):
769 result = self.func(obj)
769 result = self.func(obj)
770 self.cachevalue(obj, result)
770 self.cachevalue(obj, result)
771 return result
771 return result
772
772
773 def cachevalue(self, obj, value):
773 def cachevalue(self, obj, value):
774 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
774 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
775 obj.__dict__[self.name] = value
775 obj.__dict__[self.name] = value
776
776
777 def pipefilter(s, cmd):
777 def pipefilter(s, cmd):
778 '''filter string S through command CMD, returning its output'''
778 '''filter string S through command CMD, returning its output'''
779 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
779 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
780 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
780 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
781 pout, perr = p.communicate(s)
781 pout, perr = p.communicate(s)
782 return pout
782 return pout
783
783
784 def tempfilter(s, cmd):
784 def tempfilter(s, cmd):
785 '''filter string S through a pair of temporary files with CMD.
785 '''filter string S through a pair of temporary files with CMD.
786 CMD is used as a template to create the real command to be run,
786 CMD is used as a template to create the real command to be run,
787 with the strings INFILE and OUTFILE replaced by the real names of
787 with the strings INFILE and OUTFILE replaced by the real names of
788 the temporary files generated.'''
788 the temporary files generated.'''
789 inname, outname = None, None
789 inname, outname = None, None
790 try:
790 try:
791 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
791 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
792 fp = os.fdopen(infd, 'wb')
792 fp = os.fdopen(infd, 'wb')
793 fp.write(s)
793 fp.write(s)
794 fp.close()
794 fp.close()
795 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
795 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
796 os.close(outfd)
796 os.close(outfd)
797 cmd = cmd.replace('INFILE', inname)
797 cmd = cmd.replace('INFILE', inname)
798 cmd = cmd.replace('OUTFILE', outname)
798 cmd = cmd.replace('OUTFILE', outname)
799 code = os.system(cmd)
799 code = os.system(cmd)
800 if sys.platform == 'OpenVMS' and code & 1:
800 if sys.platform == 'OpenVMS' and code & 1:
801 code = 0
801 code = 0
802 if code:
802 if code:
803 raise Abort(_("command '%s' failed: %s") %
803 raise Abort(_("command '%s' failed: %s") %
804 (cmd, explainexit(code)))
804 (cmd, explainexit(code)))
805 return readfile(outname)
805 return readfile(outname)
806 finally:
806 finally:
807 try:
807 try:
808 if inname:
808 if inname:
809 os.unlink(inname)
809 os.unlink(inname)
810 except OSError:
810 except OSError:
811 pass
811 pass
812 try:
812 try:
813 if outname:
813 if outname:
814 os.unlink(outname)
814 os.unlink(outname)
815 except OSError:
815 except OSError:
816 pass
816 pass
817
817
818 filtertable = {
818 filtertable = {
819 'tempfile:': tempfilter,
819 'tempfile:': tempfilter,
820 'pipe:': pipefilter,
820 'pipe:': pipefilter,
821 }
821 }
822
822
823 def filter(s, cmd):
823 def filter(s, cmd):
824 "filter a string through a command that transforms its input to its output"
824 "filter a string through a command that transforms its input to its output"
825 for name, fn in filtertable.iteritems():
825 for name, fn in filtertable.iteritems():
826 if cmd.startswith(name):
826 if cmd.startswith(name):
827 return fn(s, cmd[len(name):].lstrip())
827 return fn(s, cmd[len(name):].lstrip())
828 return pipefilter(s, cmd)
828 return pipefilter(s, cmd)
829
829
830 def binary(s):
830 def binary(s):
831 """return true if a string is binary data"""
831 """return true if a string is binary data"""
832 return bool(s and '\0' in s)
832 return bool(s and '\0' in s)
833
833
834 def increasingchunks(source, min=1024, max=65536):
834 def increasingchunks(source, min=1024, max=65536):
835 '''return no less than min bytes per chunk while data remains,
835 '''return no less than min bytes per chunk while data remains,
836 doubling min after each chunk until it reaches max'''
836 doubling min after each chunk until it reaches max'''
837 def log2(x):
837 def log2(x):
838 if not x:
838 if not x:
839 return 0
839 return 0
840 i = 0
840 i = 0
841 while x:
841 while x:
842 x >>= 1
842 x >>= 1
843 i += 1
843 i += 1
844 return i - 1
844 return i - 1
845
845
846 buf = []
846 buf = []
847 blen = 0
847 blen = 0
848 for chunk in source:
848 for chunk in source:
849 buf.append(chunk)
849 buf.append(chunk)
850 blen += len(chunk)
850 blen += len(chunk)
851 if blen >= min:
851 if blen >= min:
852 if min < max:
852 if min < max:
853 min = min << 1
853 min = min << 1
854 nmin = 1 << log2(blen)
854 nmin = 1 << log2(blen)
855 if nmin > min:
855 if nmin > min:
856 min = nmin
856 min = nmin
857 if min > max:
857 if min > max:
858 min = max
858 min = max
859 yield ''.join(buf)
859 yield ''.join(buf)
860 blen = 0
860 blen = 0
861 buf = []
861 buf = []
862 if buf:
862 if buf:
863 yield ''.join(buf)
863 yield ''.join(buf)
864
864
865 Abort = error.Abort
865 Abort = error.Abort
866
866
867 def always(fn):
867 def always(fn):
868 return True
868 return True
869
869
870 def never(fn):
870 def never(fn):
871 return False
871 return False
872
872
873 def nogc(func):
873 def nogc(func):
874 """disable garbage collector
874 """disable garbage collector
875
875
876 Python's garbage collector triggers a GC each time a certain number of
876 Python's garbage collector triggers a GC each time a certain number of
877 container objects (the number being defined by gc.get_threshold()) are
877 container objects (the number being defined by gc.get_threshold()) are
878 allocated even when marked not to be tracked by the collector. Tracking has
878 allocated even when marked not to be tracked by the collector. Tracking has
879 no effect on when GCs are triggered, only on what objects the GC looks
879 no effect on when GCs are triggered, only on what objects the GC looks
880 into. As a workaround, disable GC while building complex (huge)
880 into. As a workaround, disable GC while building complex (huge)
881 containers.
881 containers.
882
882
883 This garbage collector issue have been fixed in 2.7.
883 This garbage collector issue have been fixed in 2.7.
884 """
884 """
885 if sys.version_info >= (2, 7):
885 if sys.version_info >= (2, 7):
886 return func
886 return func
887 def wrapper(*args, **kwargs):
887 def wrapper(*args, **kwargs):
888 gcenabled = gc.isenabled()
888 gcenabled = gc.isenabled()
889 gc.disable()
889 gc.disable()
890 try:
890 try:
891 return func(*args, **kwargs)
891 return func(*args, **kwargs)
892 finally:
892 finally:
893 if gcenabled:
893 if gcenabled:
894 gc.enable()
894 gc.enable()
895 return wrapper
895 return wrapper
896
896
897 def pathto(root, n1, n2):
897 def pathto(root, n1, n2):
898 '''return the relative path from one place to another.
898 '''return the relative path from one place to another.
899 root should use os.sep to separate directories
899 root should use os.sep to separate directories
900 n1 should use os.sep to separate directories
900 n1 should use os.sep to separate directories
901 n2 should use "/" to separate directories
901 n2 should use "/" to separate directories
902 returns an os.sep-separated path.
902 returns an os.sep-separated path.
903
903
904 If n1 is a relative path, it's assumed it's
904 If n1 is a relative path, it's assumed it's
905 relative to root.
905 relative to root.
906 n2 should always be relative to root.
906 n2 should always be relative to root.
907 '''
907 '''
908 if not n1:
908 if not n1:
909 return localpath(n2)
909 return localpath(n2)
910 if os.path.isabs(n1):
910 if os.path.isabs(n1):
911 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
911 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
912 return os.path.join(root, localpath(n2))
912 return os.path.join(root, localpath(n2))
913 n2 = '/'.join((pconvert(root), n2))
913 n2 = '/'.join((pconvert(root), n2))
914 a, b = splitpath(n1), n2.split('/')
914 a, b = splitpath(n1), n2.split('/')
915 a.reverse()
915 a.reverse()
916 b.reverse()
916 b.reverse()
917 while a and b and a[-1] == b[-1]:
917 while a and b and a[-1] == b[-1]:
918 a.pop()
918 a.pop()
919 b.pop()
919 b.pop()
920 b.reverse()
920 b.reverse()
921 return os.sep.join((['..'] * len(a)) + b) or '.'
921 return os.sep.join((['..'] * len(a)) + b) or '.'
922
922
923 def mainfrozen():
923 def mainfrozen():
924 """return True if we are a frozen executable.
924 """return True if we are a frozen executable.
925
925
926 The code supports py2exe (most common, Windows only) and tools/freeze
926 The code supports py2exe (most common, Windows only) and tools/freeze
927 (portable, not much used).
927 (portable, not much used).
928 """
928 """
929 return (safehasattr(sys, "frozen") or # new py2exe
929 return (safehasattr(sys, "frozen") or # new py2exe
930 safehasattr(sys, "importers") or # old py2exe
930 safehasattr(sys, "importers") or # old py2exe
931 imp.is_frozen(u"__main__")) # tools/freeze
931 imp.is_frozen(u"__main__")) # tools/freeze
932
932
933 # the location of data files matching the source code
933 # the location of data files matching the source code
934 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
934 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
935 # executable version (py2exe) doesn't support __file__
935 # executable version (py2exe) doesn't support __file__
936 datapath = os.path.dirname(sys.executable)
936 datapath = os.path.dirname(sys.executable)
937 else:
937 else:
938 datapath = os.path.dirname(__file__)
938 datapath = os.path.dirname(__file__)
939
939
940 if not isinstance(datapath, bytes):
940 if not isinstance(datapath, bytes):
941 datapath = pycompat.fsencode(datapath)
941 datapath = pycompat.fsencode(datapath)
942
942
943 i18n.setdatapath(datapath)
943 i18n.setdatapath(datapath)
944
944
945 _hgexecutable = None
945 _hgexecutable = None
946
946
947 def hgexecutable():
947 def hgexecutable():
948 """return location of the 'hg' executable.
948 """return location of the 'hg' executable.
949
949
950 Defaults to $HG or 'hg' in the search path.
950 Defaults to $HG or 'hg' in the search path.
951 """
951 """
952 if _hgexecutable is None:
952 if _hgexecutable is None:
953 hg = os.environ.get('HG')
953 hg = os.environ.get('HG')
954 mainmod = sys.modules['__main__']
954 mainmod = sys.modules['__main__']
955 if hg:
955 if hg:
956 _sethgexecutable(hg)
956 _sethgexecutable(hg)
957 elif mainfrozen():
957 elif mainfrozen():
958 if getattr(sys, 'frozen', None) == 'macosx_app':
958 if getattr(sys, 'frozen', None) == 'macosx_app':
959 # Env variable set by py2app
959 # Env variable set by py2app
960 _sethgexecutable(os.environ['EXECUTABLEPATH'])
960 _sethgexecutable(os.environ['EXECUTABLEPATH'])
961 else:
961 else:
962 _sethgexecutable(sys.executable)
962 _sethgexecutable(sys.executable)
963 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
963 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
964 _sethgexecutable(mainmod.__file__)
964 _sethgexecutable(mainmod.__file__)
965 else:
965 else:
966 exe = findexe('hg') or os.path.basename(sys.argv[0])
966 exe = findexe('hg') or os.path.basename(sys.argv[0])
967 _sethgexecutable(exe)
967 _sethgexecutable(exe)
968 return _hgexecutable
968 return _hgexecutable
969
969
970 def _sethgexecutable(path):
970 def _sethgexecutable(path):
971 """set location of the 'hg' executable"""
971 """set location of the 'hg' executable"""
972 global _hgexecutable
972 global _hgexecutable
973 _hgexecutable = path
973 _hgexecutable = path
974
974
975 def _isstdout(f):
975 def _isstdout(f):
976 fileno = getattr(f, 'fileno', None)
976 fileno = getattr(f, 'fileno', None)
977 return fileno and fileno() == sys.__stdout__.fileno()
977 return fileno and fileno() == sys.__stdout__.fileno()
978
978
979 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
979 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
980 '''enhanced shell command execution.
980 '''enhanced shell command execution.
981 run with environment maybe modified, maybe in different dir.
981 run with environment maybe modified, maybe in different dir.
982
982
983 if command fails and onerr is None, return status, else raise onerr
983 if command fails and onerr is None, return status, else raise onerr
984 object as exception.
984 object as exception.
985
985
986 if out is specified, it is assumed to be a file-like object that has a
986 if out is specified, it is assumed to be a file-like object that has a
987 write() method. stdout and stderr will be redirected to out.'''
987 write() method. stdout and stderr will be redirected to out.'''
988 if environ is None:
988 if environ is None:
989 environ = {}
989 environ = {}
990 try:
990 try:
991 sys.stdout.flush()
991 sys.stdout.flush()
992 except Exception:
992 except Exception:
993 pass
993 pass
994 def py2shell(val):
994 def py2shell(val):
995 'convert python object into string that is useful to shell'
995 'convert python object into string that is useful to shell'
996 if val is None or val is False:
996 if val is None or val is False:
997 return '0'
997 return '0'
998 if val is True:
998 if val is True:
999 return '1'
999 return '1'
1000 return str(val)
1000 return str(val)
1001 origcmd = cmd
1001 origcmd = cmd
1002 cmd = quotecommand(cmd)
1002 cmd = quotecommand(cmd)
1003 if sys.platform == 'plan9' and (sys.version_info[0] == 2
1003 if sys.platform == 'plan9' and (sys.version_info[0] == 2
1004 and sys.version_info[1] < 7):
1004 and sys.version_info[1] < 7):
1005 # subprocess kludge to work around issues in half-baked Python
1005 # subprocess kludge to work around issues in half-baked Python
1006 # ports, notably bichued/python:
1006 # ports, notably bichued/python:
1007 if not cwd is None:
1007 if not cwd is None:
1008 os.chdir(cwd)
1008 os.chdir(cwd)
1009 rc = os.system(cmd)
1009 rc = os.system(cmd)
1010 else:
1010 else:
1011 env = dict(os.environ)
1011 env = dict(os.environ)
1012 env.update((k, py2shell(v)) for k, v in environ.iteritems())
1012 env.update((k, py2shell(v)) for k, v in environ.iteritems())
1013 env['HG'] = hgexecutable()
1013 env['HG'] = hgexecutable()
1014 if out is None or _isstdout(out):
1014 if out is None or _isstdout(out):
1015 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
1015 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
1016 env=env, cwd=cwd)
1016 env=env, cwd=cwd)
1017 else:
1017 else:
1018 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1018 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1019 env=env, cwd=cwd, stdout=subprocess.PIPE,
1019 env=env, cwd=cwd, stdout=subprocess.PIPE,
1020 stderr=subprocess.STDOUT)
1020 stderr=subprocess.STDOUT)
1021 for line in iter(proc.stdout.readline, ''):
1021 for line in iter(proc.stdout.readline, ''):
1022 out.write(line)
1022 out.write(line)
1023 proc.wait()
1023 proc.wait()
1024 rc = proc.returncode
1024 rc = proc.returncode
1025 if sys.platform == 'OpenVMS' and rc & 1:
1025 if sys.platform == 'OpenVMS' and rc & 1:
1026 rc = 0
1026 rc = 0
1027 if rc and onerr:
1027 if rc and onerr:
1028 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
1028 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
1029 explainexit(rc)[0])
1029 explainexit(rc)[0])
1030 if errprefix:
1030 if errprefix:
1031 errmsg = '%s: %s' % (errprefix, errmsg)
1031 errmsg = '%s: %s' % (errprefix, errmsg)
1032 raise onerr(errmsg)
1032 raise onerr(errmsg)
1033 return rc
1033 return rc
1034
1034
1035 def checksignature(func):
1035 def checksignature(func):
1036 '''wrap a function with code to check for calling errors'''
1036 '''wrap a function with code to check for calling errors'''
1037 def check(*args, **kwargs):
1037 def check(*args, **kwargs):
1038 try:
1038 try:
1039 return func(*args, **kwargs)
1039 return func(*args, **kwargs)
1040 except TypeError:
1040 except TypeError:
1041 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1041 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1042 raise error.SignatureError
1042 raise error.SignatureError
1043 raise
1043 raise
1044
1044
1045 return check
1045 return check
1046
1046
1047 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1047 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1048 '''copy a file, preserving mode and optionally other stat info like
1048 '''copy a file, preserving mode and optionally other stat info like
1049 atime/mtime
1049 atime/mtime
1050
1050
1051 checkambig argument is used with filestat, and is useful only if
1051 checkambig argument is used with filestat, and is useful only if
1052 destination file is guarded by any lock (e.g. repo.lock or
1052 destination file is guarded by any lock (e.g. repo.lock or
1053 repo.wlock).
1053 repo.wlock).
1054
1054
1055 copystat and checkambig should be exclusive.
1055 copystat and checkambig should be exclusive.
1056 '''
1056 '''
1057 assert not (copystat and checkambig)
1057 assert not (copystat and checkambig)
1058 oldstat = None
1058 oldstat = None
1059 if os.path.lexists(dest):
1059 if os.path.lexists(dest):
1060 if checkambig:
1060 if checkambig:
1061 oldstat = checkambig and filestat(dest)
1061 oldstat = checkambig and filestat(dest)
1062 unlink(dest)
1062 unlink(dest)
1063 # hardlinks are problematic on CIFS, quietly ignore this flag
1063 # hardlinks are problematic on CIFS, quietly ignore this flag
1064 # until we find a way to work around it cleanly (issue4546)
1064 # until we find a way to work around it cleanly (issue4546)
1065 if False and hardlink:
1065 if False and hardlink:
1066 try:
1066 try:
1067 oslink(src, dest)
1067 oslink(src, dest)
1068 return
1068 return
1069 except (IOError, OSError):
1069 except (IOError, OSError):
1070 pass # fall back to normal copy
1070 pass # fall back to normal copy
1071 if os.path.islink(src):
1071 if os.path.islink(src):
1072 os.symlink(os.readlink(src), dest)
1072 os.symlink(os.readlink(src), dest)
1073 # copytime is ignored for symlinks, but in general copytime isn't needed
1073 # copytime is ignored for symlinks, but in general copytime isn't needed
1074 # for them anyway
1074 # for them anyway
1075 else:
1075 else:
1076 try:
1076 try:
1077 shutil.copyfile(src, dest)
1077 shutil.copyfile(src, dest)
1078 if copystat:
1078 if copystat:
1079 # copystat also copies mode
1079 # copystat also copies mode
1080 shutil.copystat(src, dest)
1080 shutil.copystat(src, dest)
1081 else:
1081 else:
1082 shutil.copymode(src, dest)
1082 shutil.copymode(src, dest)
1083 if oldstat and oldstat.stat:
1083 if oldstat and oldstat.stat:
1084 newstat = filestat(dest)
1084 newstat = filestat(dest)
1085 if newstat.isambig(oldstat):
1085 if newstat.isambig(oldstat):
1086 # stat of copied file is ambiguous to original one
1086 # stat of copied file is ambiguous to original one
1087 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1087 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1088 os.utime(dest, (advanced, advanced))
1088 os.utime(dest, (advanced, advanced))
1089 except shutil.Error as inst:
1089 except shutil.Error as inst:
1090 raise Abort(str(inst))
1090 raise Abort(str(inst))
1091
1091
1092 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1092 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1093 """Copy a directory tree using hardlinks if possible."""
1093 """Copy a directory tree using hardlinks if possible."""
1094 num = 0
1094 num = 0
1095
1095
1096 if hardlink is None:
1096 if hardlink is None:
1097 hardlink = (os.stat(src).st_dev ==
1097 hardlink = (os.stat(src).st_dev ==
1098 os.stat(os.path.dirname(dst)).st_dev)
1098 os.stat(os.path.dirname(dst)).st_dev)
1099 if hardlink:
1099 if hardlink:
1100 topic = _('linking')
1100 topic = _('linking')
1101 else:
1101 else:
1102 topic = _('copying')
1102 topic = _('copying')
1103
1103
1104 if os.path.isdir(src):
1104 if os.path.isdir(src):
1105 os.mkdir(dst)
1105 os.mkdir(dst)
1106 for name, kind in osutil.listdir(src):
1106 for name, kind in osutil.listdir(src):
1107 srcname = os.path.join(src, name)
1107 srcname = os.path.join(src, name)
1108 dstname = os.path.join(dst, name)
1108 dstname = os.path.join(dst, name)
1109 def nprog(t, pos):
1109 def nprog(t, pos):
1110 if pos is not None:
1110 if pos is not None:
1111 return progress(t, pos + num)
1111 return progress(t, pos + num)
1112 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1112 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1113 num += n
1113 num += n
1114 else:
1114 else:
1115 if hardlink:
1115 if hardlink:
1116 try:
1116 try:
1117 oslink(src, dst)
1117 oslink(src, dst)
1118 except (IOError, OSError):
1118 except (IOError, OSError):
1119 hardlink = False
1119 hardlink = False
1120 shutil.copy(src, dst)
1120 shutil.copy(src, dst)
1121 else:
1121 else:
1122 shutil.copy(src, dst)
1122 shutil.copy(src, dst)
1123 num += 1
1123 num += 1
1124 progress(topic, num)
1124 progress(topic, num)
1125 progress(topic, None)
1125 progress(topic, None)
1126
1126
1127 return hardlink, num
1127 return hardlink, num
1128
1128
1129 _winreservednames = '''con prn aux nul
1129 _winreservednames = '''con prn aux nul
1130 com1 com2 com3 com4 com5 com6 com7 com8 com9
1130 com1 com2 com3 com4 com5 com6 com7 com8 com9
1131 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1131 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1132 _winreservedchars = ':*?"<>|'
1132 _winreservedchars = ':*?"<>|'
1133 def checkwinfilename(path):
1133 def checkwinfilename(path):
1134 r'''Check that the base-relative path is a valid filename on Windows.
1134 r'''Check that the base-relative path is a valid filename on Windows.
1135 Returns None if the path is ok, or a UI string describing the problem.
1135 Returns None if the path is ok, or a UI string describing the problem.
1136
1136
1137 >>> checkwinfilename("just/a/normal/path")
1137 >>> checkwinfilename("just/a/normal/path")
1138 >>> checkwinfilename("foo/bar/con.xml")
1138 >>> checkwinfilename("foo/bar/con.xml")
1139 "filename contains 'con', which is reserved on Windows"
1139 "filename contains 'con', which is reserved on Windows"
1140 >>> checkwinfilename("foo/con.xml/bar")
1140 >>> checkwinfilename("foo/con.xml/bar")
1141 "filename contains 'con', which is reserved on Windows"
1141 "filename contains 'con', which is reserved on Windows"
1142 >>> checkwinfilename("foo/bar/xml.con")
1142 >>> checkwinfilename("foo/bar/xml.con")
1143 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1143 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1144 "filename contains 'AUX', which is reserved on Windows"
1144 "filename contains 'AUX', which is reserved on Windows"
1145 >>> checkwinfilename("foo/bar/bla:.txt")
1145 >>> checkwinfilename("foo/bar/bla:.txt")
1146 "filename contains ':', which is reserved on Windows"
1146 "filename contains ':', which is reserved on Windows"
1147 >>> checkwinfilename("foo/bar/b\07la.txt")
1147 >>> checkwinfilename("foo/bar/b\07la.txt")
1148 "filename contains '\\x07', which is invalid on Windows"
1148 "filename contains '\\x07', which is invalid on Windows"
1149 >>> checkwinfilename("foo/bar/bla ")
1149 >>> checkwinfilename("foo/bar/bla ")
1150 "filename ends with ' ', which is not allowed on Windows"
1150 "filename ends with ' ', which is not allowed on Windows"
1151 >>> checkwinfilename("../bar")
1151 >>> checkwinfilename("../bar")
1152 >>> checkwinfilename("foo\\")
1152 >>> checkwinfilename("foo\\")
1153 "filename ends with '\\', which is invalid on Windows"
1153 "filename ends with '\\', which is invalid on Windows"
1154 >>> checkwinfilename("foo\\/bar")
1154 >>> checkwinfilename("foo\\/bar")
1155 "directory name ends with '\\', which is invalid on Windows"
1155 "directory name ends with '\\', which is invalid on Windows"
1156 '''
1156 '''
1157 if path.endswith('\\'):
1157 if path.endswith('\\'):
1158 return _("filename ends with '\\', which is invalid on Windows")
1158 return _("filename ends with '\\', which is invalid on Windows")
1159 if '\\/' in path:
1159 if '\\/' in path:
1160 return _("directory name ends with '\\', which is invalid on Windows")
1160 return _("directory name ends with '\\', which is invalid on Windows")
1161 for n in path.replace('\\', '/').split('/'):
1161 for n in path.replace('\\', '/').split('/'):
1162 if not n:
1162 if not n:
1163 continue
1163 continue
1164 for c in n:
1164 for c in n:
1165 if c in _winreservedchars:
1165 if c in _winreservedchars:
1166 return _("filename contains '%s', which is reserved "
1166 return _("filename contains '%s', which is reserved "
1167 "on Windows") % c
1167 "on Windows") % c
1168 if ord(c) <= 31:
1168 if ord(c) <= 31:
1169 return _("filename contains %r, which is invalid "
1169 return _("filename contains %r, which is invalid "
1170 "on Windows") % c
1170 "on Windows") % c
1171 base = n.split('.')[0]
1171 base = n.split('.')[0]
1172 if base and base.lower() in _winreservednames:
1172 if base and base.lower() in _winreservednames:
1173 return _("filename contains '%s', which is reserved "
1173 return _("filename contains '%s', which is reserved "
1174 "on Windows") % base
1174 "on Windows") % base
1175 t = n[-1]
1175 t = n[-1]
1176 if t in '. ' and n not in '..':
1176 if t in '. ' and n not in '..':
1177 return _("filename ends with '%s', which is not allowed "
1177 return _("filename ends with '%s', which is not allowed "
1178 "on Windows") % t
1178 "on Windows") % t
1179
1179
1180 if os.name == 'nt':
1180 if os.name == 'nt':
1181 checkosfilename = checkwinfilename
1181 checkosfilename = checkwinfilename
1182 else:
1182 else:
1183 checkosfilename = platform.checkosfilename
1183 checkosfilename = platform.checkosfilename
1184
1184
1185 def makelock(info, pathname):
1185 def makelock(info, pathname):
1186 try:
1186 try:
1187 return os.symlink(info, pathname)
1187 return os.symlink(info, pathname)
1188 except OSError as why:
1188 except OSError as why:
1189 if why.errno == errno.EEXIST:
1189 if why.errno == errno.EEXIST:
1190 raise
1190 raise
1191 except AttributeError: # no symlink in os
1191 except AttributeError: # no symlink in os
1192 pass
1192 pass
1193
1193
1194 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1194 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1195 os.write(ld, info)
1195 os.write(ld, info)
1196 os.close(ld)
1196 os.close(ld)
1197
1197
1198 def readlock(pathname):
1198 def readlock(pathname):
1199 try:
1199 try:
1200 return os.readlink(pathname)
1200 return os.readlink(pathname)
1201 except OSError as why:
1201 except OSError as why:
1202 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1202 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1203 raise
1203 raise
1204 except AttributeError: # no symlink in os
1204 except AttributeError: # no symlink in os
1205 pass
1205 pass
1206 fp = posixfile(pathname)
1206 fp = posixfile(pathname)
1207 r = fp.read()
1207 r = fp.read()
1208 fp.close()
1208 fp.close()
1209 return r
1209 return r
1210
1210
1211 def fstat(fp):
1211 def fstat(fp):
1212 '''stat file object that may not have fileno method.'''
1212 '''stat file object that may not have fileno method.'''
1213 try:
1213 try:
1214 return os.fstat(fp.fileno())
1214 return os.fstat(fp.fileno())
1215 except AttributeError:
1215 except AttributeError:
1216 return os.stat(fp.name)
1216 return os.stat(fp.name)
1217
1217
1218 # File system features
1218 # File system features
1219
1219
1220 def fscasesensitive(path):
1220 def fscasesensitive(path):
1221 """
1221 """
1222 Return true if the given path is on a case-sensitive filesystem
1222 Return true if the given path is on a case-sensitive filesystem
1223
1223
1224 Requires a path (like /foo/.hg) ending with a foldable final
1224 Requires a path (like /foo/.hg) ending with a foldable final
1225 directory component.
1225 directory component.
1226 """
1226 """
1227 s1 = os.lstat(path)
1227 s1 = os.lstat(path)
1228 d, b = os.path.split(path)
1228 d, b = os.path.split(path)
1229 b2 = b.upper()
1229 b2 = b.upper()
1230 if b == b2:
1230 if b == b2:
1231 b2 = b.lower()
1231 b2 = b.lower()
1232 if b == b2:
1232 if b == b2:
1233 return True # no evidence against case sensitivity
1233 return True # no evidence against case sensitivity
1234 p2 = os.path.join(d, b2)
1234 p2 = os.path.join(d, b2)
1235 try:
1235 try:
1236 s2 = os.lstat(p2)
1236 s2 = os.lstat(p2)
1237 if s2 == s1:
1237 if s2 == s1:
1238 return False
1238 return False
1239 return True
1239 return True
1240 except OSError:
1240 except OSError:
1241 return True
1241 return True
1242
1242
1243 try:
1243 try:
1244 import re2
1244 import re2
1245 _re2 = None
1245 _re2 = None
1246 except ImportError:
1246 except ImportError:
1247 _re2 = False
1247 _re2 = False
1248
1248
1249 class _re(object):
1249 class _re(object):
1250 def _checkre2(self):
1250 def _checkre2(self):
1251 global _re2
1251 global _re2
1252 try:
1252 try:
1253 # check if match works, see issue3964
1253 # check if match works, see issue3964
1254 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1254 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1255 except ImportError:
1255 except ImportError:
1256 _re2 = False
1256 _re2 = False
1257
1257
1258 def compile(self, pat, flags=0):
1258 def compile(self, pat, flags=0):
1259 '''Compile a regular expression, using re2 if possible
1259 '''Compile a regular expression, using re2 if possible
1260
1260
1261 For best performance, use only re2-compatible regexp features. The
1261 For best performance, use only re2-compatible regexp features. The
1262 only flags from the re module that are re2-compatible are
1262 only flags from the re module that are re2-compatible are
1263 IGNORECASE and MULTILINE.'''
1263 IGNORECASE and MULTILINE.'''
1264 if _re2 is None:
1264 if _re2 is None:
1265 self._checkre2()
1265 self._checkre2()
1266 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1266 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1267 if flags & remod.IGNORECASE:
1267 if flags & remod.IGNORECASE:
1268 pat = '(?i)' + pat
1268 pat = '(?i)' + pat
1269 if flags & remod.MULTILINE:
1269 if flags & remod.MULTILINE:
1270 pat = '(?m)' + pat
1270 pat = '(?m)' + pat
1271 try:
1271 try:
1272 return re2.compile(pat)
1272 return re2.compile(pat)
1273 except re2.error:
1273 except re2.error:
1274 pass
1274 pass
1275 return remod.compile(pat, flags)
1275 return remod.compile(pat, flags)
1276
1276
1277 @propertycache
1277 @propertycache
1278 def escape(self):
1278 def escape(self):
1279 '''Return the version of escape corresponding to self.compile.
1279 '''Return the version of escape corresponding to self.compile.
1280
1280
1281 This is imperfect because whether re2 or re is used for a particular
1281 This is imperfect because whether re2 or re is used for a particular
1282 function depends on the flags, etc, but it's the best we can do.
1282 function depends on the flags, etc, but it's the best we can do.
1283 '''
1283 '''
1284 global _re2
1284 global _re2
1285 if _re2 is None:
1285 if _re2 is None:
1286 self._checkre2()
1286 self._checkre2()
1287 if _re2:
1287 if _re2:
1288 return re2.escape
1288 return re2.escape
1289 else:
1289 else:
1290 return remod.escape
1290 return remod.escape
1291
1291
1292 re = _re()
1292 re = _re()
1293
1293
1294 _fspathcache = {}
1294 _fspathcache = {}
1295 def fspath(name, root):
1295 def fspath(name, root):
1296 '''Get name in the case stored in the filesystem
1296 '''Get name in the case stored in the filesystem
1297
1297
1298 The name should be relative to root, and be normcase-ed for efficiency.
1298 The name should be relative to root, and be normcase-ed for efficiency.
1299
1299
1300 Note that this function is unnecessary, and should not be
1300 Note that this function is unnecessary, and should not be
1301 called, for case-sensitive filesystems (simply because it's expensive).
1301 called, for case-sensitive filesystems (simply because it's expensive).
1302
1302
1303 The root should be normcase-ed, too.
1303 The root should be normcase-ed, too.
1304 '''
1304 '''
1305 def _makefspathcacheentry(dir):
1305 def _makefspathcacheentry(dir):
1306 return dict((normcase(n), n) for n in os.listdir(dir))
1306 return dict((normcase(n), n) for n in os.listdir(dir))
1307
1307
1308 seps = os.sep
1308 seps = os.sep
1309 if os.altsep:
1309 if os.altsep:
1310 seps = seps + os.altsep
1310 seps = seps + os.altsep
1311 # Protect backslashes. This gets silly very quickly.
1311 # Protect backslashes. This gets silly very quickly.
1312 seps.replace('\\','\\\\')
1312 seps.replace('\\','\\\\')
1313 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1313 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1314 dir = os.path.normpath(root)
1314 dir = os.path.normpath(root)
1315 result = []
1315 result = []
1316 for part, sep in pattern.findall(name):
1316 for part, sep in pattern.findall(name):
1317 if sep:
1317 if sep:
1318 result.append(sep)
1318 result.append(sep)
1319 continue
1319 continue
1320
1320
1321 if dir not in _fspathcache:
1321 if dir not in _fspathcache:
1322 _fspathcache[dir] = _makefspathcacheentry(dir)
1322 _fspathcache[dir] = _makefspathcacheentry(dir)
1323 contents = _fspathcache[dir]
1323 contents = _fspathcache[dir]
1324
1324
1325 found = contents.get(part)
1325 found = contents.get(part)
1326 if not found:
1326 if not found:
1327 # retry "once per directory" per "dirstate.walk" which
1327 # retry "once per directory" per "dirstate.walk" which
1328 # may take place for each patches of "hg qpush", for example
1328 # may take place for each patches of "hg qpush", for example
1329 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1329 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1330 found = contents.get(part)
1330 found = contents.get(part)
1331
1331
1332 result.append(found or part)
1332 result.append(found or part)
1333 dir = os.path.join(dir, part)
1333 dir = os.path.join(dir, part)
1334
1334
1335 return ''.join(result)
1335 return ''.join(result)
1336
1336
1337 def checknlink(testfile):
1337 def checknlink(testfile):
1338 '''check whether hardlink count reporting works properly'''
1338 '''check whether hardlink count reporting works properly'''
1339
1339
1340 # testfile may be open, so we need a separate file for checking to
1340 # testfile may be open, so we need a separate file for checking to
1341 # work around issue2543 (or testfile may get lost on Samba shares)
1341 # work around issue2543 (or testfile may get lost on Samba shares)
1342 f1 = testfile + ".hgtmp1"
1342 f1 = testfile + ".hgtmp1"
1343 if os.path.lexists(f1):
1343 if os.path.lexists(f1):
1344 return False
1344 return False
1345 try:
1345 try:
1346 posixfile(f1, 'w').close()
1346 posixfile(f1, 'w').close()
1347 except IOError:
1347 except IOError:
1348 try:
1348 try:
1349 os.unlink(f1)
1349 os.unlink(f1)
1350 except OSError:
1350 except OSError:
1351 pass
1351 pass
1352 return False
1352 return False
1353
1353
1354 f2 = testfile + ".hgtmp2"
1354 f2 = testfile + ".hgtmp2"
1355 fd = None
1355 fd = None
1356 try:
1356 try:
1357 oslink(f1, f2)
1357 oslink(f1, f2)
1358 # nlinks() may behave differently for files on Windows shares if
1358 # nlinks() may behave differently for files on Windows shares if
1359 # the file is open.
1359 # the file is open.
1360 fd = posixfile(f2)
1360 fd = posixfile(f2)
1361 return nlinks(f2) > 1
1361 return nlinks(f2) > 1
1362 except OSError:
1362 except OSError:
1363 return False
1363 return False
1364 finally:
1364 finally:
1365 if fd is not None:
1365 if fd is not None:
1366 fd.close()
1366 fd.close()
1367 for f in (f1, f2):
1367 for f in (f1, f2):
1368 try:
1368 try:
1369 os.unlink(f)
1369 os.unlink(f)
1370 except OSError:
1370 except OSError:
1371 pass
1371 pass
1372
1372
1373 def endswithsep(path):
1373 def endswithsep(path):
1374 '''Check path ends with os.sep or os.altsep.'''
1374 '''Check path ends with os.sep or os.altsep.'''
1375 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1375 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1376
1376
1377 def splitpath(path):
1377 def splitpath(path):
1378 '''Split path by os.sep.
1378 '''Split path by os.sep.
1379 Note that this function does not use os.altsep because this is
1379 Note that this function does not use os.altsep because this is
1380 an alternative of simple "xxx.split(os.sep)".
1380 an alternative of simple "xxx.split(os.sep)".
1381 It is recommended to use os.path.normpath() before using this
1381 It is recommended to use os.path.normpath() before using this
1382 function if need.'''
1382 function if need.'''
1383 return path.split(os.sep)
1383 return path.split(os.sep)
1384
1384
1385 def gui():
1385 def gui():
1386 '''Are we running in a GUI?'''
1386 '''Are we running in a GUI?'''
1387 if sys.platform == 'darwin':
1387 if sys.platform == 'darwin':
1388 if 'SSH_CONNECTION' in os.environ:
1388 if 'SSH_CONNECTION' in os.environ:
1389 # handle SSH access to a box where the user is logged in
1389 # handle SSH access to a box where the user is logged in
1390 return False
1390 return False
1391 elif getattr(osutil, 'isgui', None):
1391 elif getattr(osutil, 'isgui', None):
1392 # check if a CoreGraphics session is available
1392 # check if a CoreGraphics session is available
1393 return osutil.isgui()
1393 return osutil.isgui()
1394 else:
1394 else:
1395 # pure build; use a safe default
1395 # pure build; use a safe default
1396 return True
1396 return True
1397 else:
1397 else:
1398 return os.name == "nt" or os.environ.get("DISPLAY")
1398 return os.name == "nt" or os.environ.get("DISPLAY")
1399
1399
1400 def mktempcopy(name, emptyok=False, createmode=None):
1400 def mktempcopy(name, emptyok=False, createmode=None):
1401 """Create a temporary file with the same contents from name
1401 """Create a temporary file with the same contents from name
1402
1402
1403 The permission bits are copied from the original file.
1403 The permission bits are copied from the original file.
1404
1404
1405 If the temporary file is going to be truncated immediately, you
1405 If the temporary file is going to be truncated immediately, you
1406 can use emptyok=True as an optimization.
1406 can use emptyok=True as an optimization.
1407
1407
1408 Returns the name of the temporary file.
1408 Returns the name of the temporary file.
1409 """
1409 """
1410 d, fn = os.path.split(name)
1410 d, fn = os.path.split(name)
1411 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1411 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1412 os.close(fd)
1412 os.close(fd)
1413 # Temporary files are created with mode 0600, which is usually not
1413 # Temporary files are created with mode 0600, which is usually not
1414 # what we want. If the original file already exists, just copy
1414 # what we want. If the original file already exists, just copy
1415 # its mode. Otherwise, manually obey umask.
1415 # its mode. Otherwise, manually obey umask.
1416 copymode(name, temp, createmode)
1416 copymode(name, temp, createmode)
1417 if emptyok:
1417 if emptyok:
1418 return temp
1418 return temp
1419 try:
1419 try:
1420 try:
1420 try:
1421 ifp = posixfile(name, "rb")
1421 ifp = posixfile(name, "rb")
1422 except IOError as inst:
1422 except IOError as inst:
1423 if inst.errno == errno.ENOENT:
1423 if inst.errno == errno.ENOENT:
1424 return temp
1424 return temp
1425 if not getattr(inst, 'filename', None):
1425 if not getattr(inst, 'filename', None):
1426 inst.filename = name
1426 inst.filename = name
1427 raise
1427 raise
1428 ofp = posixfile(temp, "wb")
1428 ofp = posixfile(temp, "wb")
1429 for chunk in filechunkiter(ifp):
1429 for chunk in filechunkiter(ifp):
1430 ofp.write(chunk)
1430 ofp.write(chunk)
1431 ifp.close()
1431 ifp.close()
1432 ofp.close()
1432 ofp.close()
1433 except: # re-raises
1433 except: # re-raises
1434 try: os.unlink(temp)
1434 try: os.unlink(temp)
1435 except OSError: pass
1435 except OSError: pass
1436 raise
1436 raise
1437 return temp
1437 return temp
1438
1438
1439 class filestat(object):
1439 class filestat(object):
1440 """help to exactly detect change of a file
1440 """help to exactly detect change of a file
1441
1441
1442 'stat' attribute is result of 'os.stat()' if specified 'path'
1442 'stat' attribute is result of 'os.stat()' if specified 'path'
1443 exists. Otherwise, it is None. This can avoid preparative
1443 exists. Otherwise, it is None. This can avoid preparative
1444 'exists()' examination on client side of this class.
1444 'exists()' examination on client side of this class.
1445 """
1445 """
1446 def __init__(self, path):
1446 def __init__(self, path):
1447 try:
1447 try:
1448 self.stat = os.stat(path)
1448 self.stat = os.stat(path)
1449 except OSError as err:
1449 except OSError as err:
1450 if err.errno != errno.ENOENT:
1450 if err.errno != errno.ENOENT:
1451 raise
1451 raise
1452 self.stat = None
1452 self.stat = None
1453
1453
1454 __hash__ = object.__hash__
1454 __hash__ = object.__hash__
1455
1455
1456 def __eq__(self, old):
1456 def __eq__(self, old):
1457 try:
1457 try:
1458 # if ambiguity between stat of new and old file is
1458 # if ambiguity between stat of new and old file is
1459 # avoided, comparison of size, ctime and mtime is enough
1459 # avoided, comparison of size, ctime and mtime is enough
1460 # to exactly detect change of a file regardless of platform
1460 # to exactly detect change of a file regardless of platform
1461 return (self.stat.st_size == old.stat.st_size and
1461 return (self.stat.st_size == old.stat.st_size and
1462 self.stat.st_ctime == old.stat.st_ctime and
1462 self.stat.st_ctime == old.stat.st_ctime and
1463 self.stat.st_mtime == old.stat.st_mtime)
1463 self.stat.st_mtime == old.stat.st_mtime)
1464 except AttributeError:
1464 except AttributeError:
1465 return False
1465 return False
1466
1466
1467 def isambig(self, old):
1467 def isambig(self, old):
1468 """Examine whether new (= self) stat is ambiguous against old one
1468 """Examine whether new (= self) stat is ambiguous against old one
1469
1469
1470 "S[N]" below means stat of a file at N-th change:
1470 "S[N]" below means stat of a file at N-th change:
1471
1471
1472 - S[n-1].ctime < S[n].ctime: can detect change of a file
1472 - S[n-1].ctime < S[n].ctime: can detect change of a file
1473 - S[n-1].ctime == S[n].ctime
1473 - S[n-1].ctime == S[n].ctime
1474 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1474 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1475 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1475 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1476 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1476 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1477 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1477 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1478
1478
1479 Case (*2) above means that a file was changed twice or more at
1479 Case (*2) above means that a file was changed twice or more at
1480 same time in sec (= S[n-1].ctime), and comparison of timestamp
1480 same time in sec (= S[n-1].ctime), and comparison of timestamp
1481 is ambiguous.
1481 is ambiguous.
1482
1482
1483 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1483 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1484 timestamp is ambiguous".
1484 timestamp is ambiguous".
1485
1485
1486 But advancing mtime only in case (*2) doesn't work as
1486 But advancing mtime only in case (*2) doesn't work as
1487 expected, because naturally advanced S[n].mtime in case (*1)
1487 expected, because naturally advanced S[n].mtime in case (*1)
1488 might be equal to manually advanced S[n-1 or earlier].mtime.
1488 might be equal to manually advanced S[n-1 or earlier].mtime.
1489
1489
1490 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1490 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1491 treated as ambiguous regardless of mtime, to avoid overlooking
1491 treated as ambiguous regardless of mtime, to avoid overlooking
1492 by confliction between such mtime.
1492 by confliction between such mtime.
1493
1493
1494 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1494 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1495 S[n].mtime", even if size of a file isn't changed.
1495 S[n].mtime", even if size of a file isn't changed.
1496 """
1496 """
1497 try:
1497 try:
1498 return (self.stat.st_ctime == old.stat.st_ctime)
1498 return (self.stat.st_ctime == old.stat.st_ctime)
1499 except AttributeError:
1499 except AttributeError:
1500 return False
1500 return False
1501
1501
1502 def avoidambig(self, path, old):
1503 """Change file stat of specified path to avoid ambiguity
1504
1505 'old' should be previous filestat of 'path'.
1506
1507 This skips avoiding ambiguity, if a process doesn't have
1508 appropriate privileges for 'path'.
1509 """
1510 advanced = (old.stat.st_mtime + 1) & 0x7fffffff
1511 try:
1512 os.utime(path, (advanced, advanced))
1513 except OSError as inst:
1514 if inst.errno == errno.EPERM:
1515 # utime() on the file created by another user causes EPERM,
1516 # if a process doesn't have appropriate privileges
1517 return
1518 raise
1519
1502 def __ne__(self, other):
1520 def __ne__(self, other):
1503 return not self == other
1521 return not self == other
1504
1522
1505 class atomictempfile(object):
1523 class atomictempfile(object):
1506 '''writable file object that atomically updates a file
1524 '''writable file object that atomically updates a file
1507
1525
1508 All writes will go to a temporary copy of the original file. Call
1526 All writes will go to a temporary copy of the original file. Call
1509 close() when you are done writing, and atomictempfile will rename
1527 close() when you are done writing, and atomictempfile will rename
1510 the temporary copy to the original name, making the changes
1528 the temporary copy to the original name, making the changes
1511 visible. If the object is destroyed without being closed, all your
1529 visible. If the object is destroyed without being closed, all your
1512 writes are discarded.
1530 writes are discarded.
1513
1531
1514 checkambig argument of constructor is used with filestat, and is
1532 checkambig argument of constructor is used with filestat, and is
1515 useful only if target file is guarded by any lock (e.g. repo.lock
1533 useful only if target file is guarded by any lock (e.g. repo.lock
1516 or repo.wlock).
1534 or repo.wlock).
1517 '''
1535 '''
1518 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
1536 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
1519 self.__name = name # permanent name
1537 self.__name = name # permanent name
1520 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1538 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1521 createmode=createmode)
1539 createmode=createmode)
1522 self._fp = posixfile(self._tempname, mode)
1540 self._fp = posixfile(self._tempname, mode)
1523 self._checkambig = checkambig
1541 self._checkambig = checkambig
1524
1542
1525 # delegated methods
1543 # delegated methods
1526 self.read = self._fp.read
1544 self.read = self._fp.read
1527 self.write = self._fp.write
1545 self.write = self._fp.write
1528 self.seek = self._fp.seek
1546 self.seek = self._fp.seek
1529 self.tell = self._fp.tell
1547 self.tell = self._fp.tell
1530 self.fileno = self._fp.fileno
1548 self.fileno = self._fp.fileno
1531
1549
1532 def close(self):
1550 def close(self):
1533 if not self._fp.closed:
1551 if not self._fp.closed:
1534 self._fp.close()
1552 self._fp.close()
1535 filename = localpath(self.__name)
1553 filename = localpath(self.__name)
1536 oldstat = self._checkambig and filestat(filename)
1554 oldstat = self._checkambig and filestat(filename)
1537 if oldstat and oldstat.stat:
1555 if oldstat and oldstat.stat:
1538 rename(self._tempname, filename)
1556 rename(self._tempname, filename)
1539 newstat = filestat(filename)
1557 newstat = filestat(filename)
1540 if newstat.isambig(oldstat):
1558 if newstat.isambig(oldstat):
1541 # stat of changed file is ambiguous to original one
1559 # stat of changed file is ambiguous to original one
1542 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1560 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1543 os.utime(filename, (advanced, advanced))
1561 os.utime(filename, (advanced, advanced))
1544 else:
1562 else:
1545 rename(self._tempname, filename)
1563 rename(self._tempname, filename)
1546
1564
1547 def discard(self):
1565 def discard(self):
1548 if not self._fp.closed:
1566 if not self._fp.closed:
1549 try:
1567 try:
1550 os.unlink(self._tempname)
1568 os.unlink(self._tempname)
1551 except OSError:
1569 except OSError:
1552 pass
1570 pass
1553 self._fp.close()
1571 self._fp.close()
1554
1572
1555 def __del__(self):
1573 def __del__(self):
1556 if safehasattr(self, '_fp'): # constructor actually did something
1574 if safehasattr(self, '_fp'): # constructor actually did something
1557 self.discard()
1575 self.discard()
1558
1576
1559 def __enter__(self):
1577 def __enter__(self):
1560 return self
1578 return self
1561
1579
1562 def __exit__(self, exctype, excvalue, traceback):
1580 def __exit__(self, exctype, excvalue, traceback):
1563 if exctype is not None:
1581 if exctype is not None:
1564 self.discard()
1582 self.discard()
1565 else:
1583 else:
1566 self.close()
1584 self.close()
1567
1585
1568 def makedirs(name, mode=None, notindexed=False):
1586 def makedirs(name, mode=None, notindexed=False):
1569 """recursive directory creation with parent mode inheritance
1587 """recursive directory creation with parent mode inheritance
1570
1588
1571 Newly created directories are marked as "not to be indexed by
1589 Newly created directories are marked as "not to be indexed by
1572 the content indexing service", if ``notindexed`` is specified
1590 the content indexing service", if ``notindexed`` is specified
1573 for "write" mode access.
1591 for "write" mode access.
1574 """
1592 """
1575 try:
1593 try:
1576 makedir(name, notindexed)
1594 makedir(name, notindexed)
1577 except OSError as err:
1595 except OSError as err:
1578 if err.errno == errno.EEXIST:
1596 if err.errno == errno.EEXIST:
1579 return
1597 return
1580 if err.errno != errno.ENOENT or not name:
1598 if err.errno != errno.ENOENT or not name:
1581 raise
1599 raise
1582 parent = os.path.dirname(os.path.abspath(name))
1600 parent = os.path.dirname(os.path.abspath(name))
1583 if parent == name:
1601 if parent == name:
1584 raise
1602 raise
1585 makedirs(parent, mode, notindexed)
1603 makedirs(parent, mode, notindexed)
1586 try:
1604 try:
1587 makedir(name, notindexed)
1605 makedir(name, notindexed)
1588 except OSError as err:
1606 except OSError as err:
1589 # Catch EEXIST to handle races
1607 # Catch EEXIST to handle races
1590 if err.errno == errno.EEXIST:
1608 if err.errno == errno.EEXIST:
1591 return
1609 return
1592 raise
1610 raise
1593 if mode is not None:
1611 if mode is not None:
1594 os.chmod(name, mode)
1612 os.chmod(name, mode)
1595
1613
1596 def readfile(path):
1614 def readfile(path):
1597 with open(path, 'rb') as fp:
1615 with open(path, 'rb') as fp:
1598 return fp.read()
1616 return fp.read()
1599
1617
1600 def writefile(path, text):
1618 def writefile(path, text):
1601 with open(path, 'wb') as fp:
1619 with open(path, 'wb') as fp:
1602 fp.write(text)
1620 fp.write(text)
1603
1621
1604 def appendfile(path, text):
1622 def appendfile(path, text):
1605 with open(path, 'ab') as fp:
1623 with open(path, 'ab') as fp:
1606 fp.write(text)
1624 fp.write(text)
1607
1625
1608 class chunkbuffer(object):
1626 class chunkbuffer(object):
1609 """Allow arbitrary sized chunks of data to be efficiently read from an
1627 """Allow arbitrary sized chunks of data to be efficiently read from an
1610 iterator over chunks of arbitrary size."""
1628 iterator over chunks of arbitrary size."""
1611
1629
1612 def __init__(self, in_iter):
1630 def __init__(self, in_iter):
1613 """in_iter is the iterator that's iterating over the input chunks.
1631 """in_iter is the iterator that's iterating over the input chunks.
1614 targetsize is how big a buffer to try to maintain."""
1632 targetsize is how big a buffer to try to maintain."""
1615 def splitbig(chunks):
1633 def splitbig(chunks):
1616 for chunk in chunks:
1634 for chunk in chunks:
1617 if len(chunk) > 2**20:
1635 if len(chunk) > 2**20:
1618 pos = 0
1636 pos = 0
1619 while pos < len(chunk):
1637 while pos < len(chunk):
1620 end = pos + 2 ** 18
1638 end = pos + 2 ** 18
1621 yield chunk[pos:end]
1639 yield chunk[pos:end]
1622 pos = end
1640 pos = end
1623 else:
1641 else:
1624 yield chunk
1642 yield chunk
1625 self.iter = splitbig(in_iter)
1643 self.iter = splitbig(in_iter)
1626 self._queue = collections.deque()
1644 self._queue = collections.deque()
1627 self._chunkoffset = 0
1645 self._chunkoffset = 0
1628
1646
1629 def read(self, l=None):
1647 def read(self, l=None):
1630 """Read L bytes of data from the iterator of chunks of data.
1648 """Read L bytes of data from the iterator of chunks of data.
1631 Returns less than L bytes if the iterator runs dry.
1649 Returns less than L bytes if the iterator runs dry.
1632
1650
1633 If size parameter is omitted, read everything"""
1651 If size parameter is omitted, read everything"""
1634 if l is None:
1652 if l is None:
1635 return ''.join(self.iter)
1653 return ''.join(self.iter)
1636
1654
1637 left = l
1655 left = l
1638 buf = []
1656 buf = []
1639 queue = self._queue
1657 queue = self._queue
1640 while left > 0:
1658 while left > 0:
1641 # refill the queue
1659 # refill the queue
1642 if not queue:
1660 if not queue:
1643 target = 2**18
1661 target = 2**18
1644 for chunk in self.iter:
1662 for chunk in self.iter:
1645 queue.append(chunk)
1663 queue.append(chunk)
1646 target -= len(chunk)
1664 target -= len(chunk)
1647 if target <= 0:
1665 if target <= 0:
1648 break
1666 break
1649 if not queue:
1667 if not queue:
1650 break
1668 break
1651
1669
1652 # The easy way to do this would be to queue.popleft(), modify the
1670 # The easy way to do this would be to queue.popleft(), modify the
1653 # chunk (if necessary), then queue.appendleft(). However, for cases
1671 # chunk (if necessary), then queue.appendleft(). However, for cases
1654 # where we read partial chunk content, this incurs 2 dequeue
1672 # where we read partial chunk content, this incurs 2 dequeue
1655 # mutations and creates a new str for the remaining chunk in the
1673 # mutations and creates a new str for the remaining chunk in the
1656 # queue. Our code below avoids this overhead.
1674 # queue. Our code below avoids this overhead.
1657
1675
1658 chunk = queue[0]
1676 chunk = queue[0]
1659 chunkl = len(chunk)
1677 chunkl = len(chunk)
1660 offset = self._chunkoffset
1678 offset = self._chunkoffset
1661
1679
1662 # Use full chunk.
1680 # Use full chunk.
1663 if offset == 0 and left >= chunkl:
1681 if offset == 0 and left >= chunkl:
1664 left -= chunkl
1682 left -= chunkl
1665 queue.popleft()
1683 queue.popleft()
1666 buf.append(chunk)
1684 buf.append(chunk)
1667 # self._chunkoffset remains at 0.
1685 # self._chunkoffset remains at 0.
1668 continue
1686 continue
1669
1687
1670 chunkremaining = chunkl - offset
1688 chunkremaining = chunkl - offset
1671
1689
1672 # Use all of unconsumed part of chunk.
1690 # Use all of unconsumed part of chunk.
1673 if left >= chunkremaining:
1691 if left >= chunkremaining:
1674 left -= chunkremaining
1692 left -= chunkremaining
1675 queue.popleft()
1693 queue.popleft()
1676 # offset == 0 is enabled by block above, so this won't merely
1694 # offset == 0 is enabled by block above, so this won't merely
1677 # copy via ``chunk[0:]``.
1695 # copy via ``chunk[0:]``.
1678 buf.append(chunk[offset:])
1696 buf.append(chunk[offset:])
1679 self._chunkoffset = 0
1697 self._chunkoffset = 0
1680
1698
1681 # Partial chunk needed.
1699 # Partial chunk needed.
1682 else:
1700 else:
1683 buf.append(chunk[offset:offset + left])
1701 buf.append(chunk[offset:offset + left])
1684 self._chunkoffset += left
1702 self._chunkoffset += left
1685 left -= chunkremaining
1703 left -= chunkremaining
1686
1704
1687 return ''.join(buf)
1705 return ''.join(buf)
1688
1706
1689 def filechunkiter(f, size=131072, limit=None):
1707 def filechunkiter(f, size=131072, limit=None):
1690 """Create a generator that produces the data in the file size
1708 """Create a generator that produces the data in the file size
1691 (default 131072) bytes at a time, up to optional limit (default is
1709 (default 131072) bytes at a time, up to optional limit (default is
1692 to read all data). Chunks may be less than size bytes if the
1710 to read all data). Chunks may be less than size bytes if the
1693 chunk is the last chunk in the file, or the file is a socket or
1711 chunk is the last chunk in the file, or the file is a socket or
1694 some other type of file that sometimes reads less data than is
1712 some other type of file that sometimes reads less data than is
1695 requested."""
1713 requested."""
1696 assert size >= 0
1714 assert size >= 0
1697 assert limit is None or limit >= 0
1715 assert limit is None or limit >= 0
1698 while True:
1716 while True:
1699 if limit is None:
1717 if limit is None:
1700 nbytes = size
1718 nbytes = size
1701 else:
1719 else:
1702 nbytes = min(limit, size)
1720 nbytes = min(limit, size)
1703 s = nbytes and f.read(nbytes)
1721 s = nbytes and f.read(nbytes)
1704 if not s:
1722 if not s:
1705 break
1723 break
1706 if limit:
1724 if limit:
1707 limit -= len(s)
1725 limit -= len(s)
1708 yield s
1726 yield s
1709
1727
1710 def makedate(timestamp=None):
1728 def makedate(timestamp=None):
1711 '''Return a unix timestamp (or the current time) as a (unixtime,
1729 '''Return a unix timestamp (or the current time) as a (unixtime,
1712 offset) tuple based off the local timezone.'''
1730 offset) tuple based off the local timezone.'''
1713 if timestamp is None:
1731 if timestamp is None:
1714 timestamp = time.time()
1732 timestamp = time.time()
1715 if timestamp < 0:
1733 if timestamp < 0:
1716 hint = _("check your clock")
1734 hint = _("check your clock")
1717 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1735 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1718 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1736 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1719 datetime.datetime.fromtimestamp(timestamp))
1737 datetime.datetime.fromtimestamp(timestamp))
1720 tz = delta.days * 86400 + delta.seconds
1738 tz = delta.days * 86400 + delta.seconds
1721 return timestamp, tz
1739 return timestamp, tz
1722
1740
1723 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1741 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1724 """represent a (unixtime, offset) tuple as a localized time.
1742 """represent a (unixtime, offset) tuple as a localized time.
1725 unixtime is seconds since the epoch, and offset is the time zone's
1743 unixtime is seconds since the epoch, and offset is the time zone's
1726 number of seconds away from UTC.
1744 number of seconds away from UTC.
1727
1745
1728 >>> datestr((0, 0))
1746 >>> datestr((0, 0))
1729 'Thu Jan 01 00:00:00 1970 +0000'
1747 'Thu Jan 01 00:00:00 1970 +0000'
1730 >>> datestr((42, 0))
1748 >>> datestr((42, 0))
1731 'Thu Jan 01 00:00:42 1970 +0000'
1749 'Thu Jan 01 00:00:42 1970 +0000'
1732 >>> datestr((-42, 0))
1750 >>> datestr((-42, 0))
1733 'Wed Dec 31 23:59:18 1969 +0000'
1751 'Wed Dec 31 23:59:18 1969 +0000'
1734 >>> datestr((0x7fffffff, 0))
1752 >>> datestr((0x7fffffff, 0))
1735 'Tue Jan 19 03:14:07 2038 +0000'
1753 'Tue Jan 19 03:14:07 2038 +0000'
1736 >>> datestr((-0x80000000, 0))
1754 >>> datestr((-0x80000000, 0))
1737 'Fri Dec 13 20:45:52 1901 +0000'
1755 'Fri Dec 13 20:45:52 1901 +0000'
1738 """
1756 """
1739 t, tz = date or makedate()
1757 t, tz = date or makedate()
1740 if "%1" in format or "%2" in format or "%z" in format:
1758 if "%1" in format or "%2" in format or "%z" in format:
1741 sign = (tz > 0) and "-" or "+"
1759 sign = (tz > 0) and "-" or "+"
1742 minutes = abs(tz) // 60
1760 minutes = abs(tz) // 60
1743 q, r = divmod(minutes, 60)
1761 q, r = divmod(minutes, 60)
1744 format = format.replace("%z", "%1%2")
1762 format = format.replace("%z", "%1%2")
1745 format = format.replace("%1", "%c%02d" % (sign, q))
1763 format = format.replace("%1", "%c%02d" % (sign, q))
1746 format = format.replace("%2", "%02d" % r)
1764 format = format.replace("%2", "%02d" % r)
1747 d = t - tz
1765 d = t - tz
1748 if d > 0x7fffffff:
1766 if d > 0x7fffffff:
1749 d = 0x7fffffff
1767 d = 0x7fffffff
1750 elif d < -0x80000000:
1768 elif d < -0x80000000:
1751 d = -0x80000000
1769 d = -0x80000000
1752 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1770 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1753 # because they use the gmtime() system call which is buggy on Windows
1771 # because they use the gmtime() system call which is buggy on Windows
1754 # for negative values.
1772 # for negative values.
1755 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1773 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1756 s = t.strftime(format)
1774 s = t.strftime(format)
1757 return s
1775 return s
1758
1776
1759 def shortdate(date=None):
1777 def shortdate(date=None):
1760 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1778 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1761 return datestr(date, format='%Y-%m-%d')
1779 return datestr(date, format='%Y-%m-%d')
1762
1780
1763 def parsetimezone(s):
1781 def parsetimezone(s):
1764 """find a trailing timezone, if any, in string, and return a
1782 """find a trailing timezone, if any, in string, and return a
1765 (offset, remainder) pair"""
1783 (offset, remainder) pair"""
1766
1784
1767 if s.endswith("GMT") or s.endswith("UTC"):
1785 if s.endswith("GMT") or s.endswith("UTC"):
1768 return 0, s[:-3].rstrip()
1786 return 0, s[:-3].rstrip()
1769
1787
1770 # Unix-style timezones [+-]hhmm
1788 # Unix-style timezones [+-]hhmm
1771 if len(s) >= 5 and s[-5] in "+-" and s[-4:].isdigit():
1789 if len(s) >= 5 and s[-5] in "+-" and s[-4:].isdigit():
1772 sign = (s[-5] == "+") and 1 or -1
1790 sign = (s[-5] == "+") and 1 or -1
1773 hours = int(s[-4:-2])
1791 hours = int(s[-4:-2])
1774 minutes = int(s[-2:])
1792 minutes = int(s[-2:])
1775 return -sign * (hours * 60 + minutes) * 60, s[:-5].rstrip()
1793 return -sign * (hours * 60 + minutes) * 60, s[:-5].rstrip()
1776
1794
1777 # ISO8601 trailing Z
1795 # ISO8601 trailing Z
1778 if s.endswith("Z") and s[-2:-1].isdigit():
1796 if s.endswith("Z") and s[-2:-1].isdigit():
1779 return 0, s[:-1]
1797 return 0, s[:-1]
1780
1798
1781 # ISO8601-style [+-]hh:mm
1799 # ISO8601-style [+-]hh:mm
1782 if (len(s) >= 6 and s[-6] in "+-" and s[-3] == ":" and
1800 if (len(s) >= 6 and s[-6] in "+-" and s[-3] == ":" and
1783 s[-5:-3].isdigit() and s[-2:].isdigit()):
1801 s[-5:-3].isdigit() and s[-2:].isdigit()):
1784 sign = (s[-6] == "+") and 1 or -1
1802 sign = (s[-6] == "+") and 1 or -1
1785 hours = int(s[-5:-3])
1803 hours = int(s[-5:-3])
1786 minutes = int(s[-2:])
1804 minutes = int(s[-2:])
1787 return -sign * (hours * 60 + minutes) * 60, s[:-6]
1805 return -sign * (hours * 60 + minutes) * 60, s[:-6]
1788
1806
1789 return None, s
1807 return None, s
1790
1808
1791 def strdate(string, format, defaults=[]):
1809 def strdate(string, format, defaults=[]):
1792 """parse a localized time string and return a (unixtime, offset) tuple.
1810 """parse a localized time string and return a (unixtime, offset) tuple.
1793 if the string cannot be parsed, ValueError is raised."""
1811 if the string cannot be parsed, ValueError is raised."""
1794 # NOTE: unixtime = localunixtime + offset
1812 # NOTE: unixtime = localunixtime + offset
1795 offset, date = parsetimezone(string)
1813 offset, date = parsetimezone(string)
1796
1814
1797 # add missing elements from defaults
1815 # add missing elements from defaults
1798 usenow = False # default to using biased defaults
1816 usenow = False # default to using biased defaults
1799 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1817 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1800 found = [True for p in part if ("%"+p) in format]
1818 found = [True for p in part if ("%"+p) in format]
1801 if not found:
1819 if not found:
1802 date += "@" + defaults[part][usenow]
1820 date += "@" + defaults[part][usenow]
1803 format += "@%" + part[0]
1821 format += "@%" + part[0]
1804 else:
1822 else:
1805 # We've found a specific time element, less specific time
1823 # We've found a specific time element, less specific time
1806 # elements are relative to today
1824 # elements are relative to today
1807 usenow = True
1825 usenow = True
1808
1826
1809 timetuple = time.strptime(date, format)
1827 timetuple = time.strptime(date, format)
1810 localunixtime = int(calendar.timegm(timetuple))
1828 localunixtime = int(calendar.timegm(timetuple))
1811 if offset is None:
1829 if offset is None:
1812 # local timezone
1830 # local timezone
1813 unixtime = int(time.mktime(timetuple))
1831 unixtime = int(time.mktime(timetuple))
1814 offset = unixtime - localunixtime
1832 offset = unixtime - localunixtime
1815 else:
1833 else:
1816 unixtime = localunixtime + offset
1834 unixtime = localunixtime + offset
1817 return unixtime, offset
1835 return unixtime, offset
1818
1836
1819 def parsedate(date, formats=None, bias=None):
1837 def parsedate(date, formats=None, bias=None):
1820 """parse a localized date/time and return a (unixtime, offset) tuple.
1838 """parse a localized date/time and return a (unixtime, offset) tuple.
1821
1839
1822 The date may be a "unixtime offset" string or in one of the specified
1840 The date may be a "unixtime offset" string or in one of the specified
1823 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1841 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1824
1842
1825 >>> parsedate(' today ') == parsedate(\
1843 >>> parsedate(' today ') == parsedate(\
1826 datetime.date.today().strftime('%b %d'))
1844 datetime.date.today().strftime('%b %d'))
1827 True
1845 True
1828 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1846 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1829 datetime.timedelta(days=1)\
1847 datetime.timedelta(days=1)\
1830 ).strftime('%b %d'))
1848 ).strftime('%b %d'))
1831 True
1849 True
1832 >>> now, tz = makedate()
1850 >>> now, tz = makedate()
1833 >>> strnow, strtz = parsedate('now')
1851 >>> strnow, strtz = parsedate('now')
1834 >>> (strnow - now) < 1
1852 >>> (strnow - now) < 1
1835 True
1853 True
1836 >>> tz == strtz
1854 >>> tz == strtz
1837 True
1855 True
1838 """
1856 """
1839 if bias is None:
1857 if bias is None:
1840 bias = {}
1858 bias = {}
1841 if not date:
1859 if not date:
1842 return 0, 0
1860 return 0, 0
1843 if isinstance(date, tuple) and len(date) == 2:
1861 if isinstance(date, tuple) and len(date) == 2:
1844 return date
1862 return date
1845 if not formats:
1863 if not formats:
1846 formats = defaultdateformats
1864 formats = defaultdateformats
1847 date = date.strip()
1865 date = date.strip()
1848
1866
1849 if date == 'now' or date == _('now'):
1867 if date == 'now' or date == _('now'):
1850 return makedate()
1868 return makedate()
1851 if date == 'today' or date == _('today'):
1869 if date == 'today' or date == _('today'):
1852 date = datetime.date.today().strftime('%b %d')
1870 date = datetime.date.today().strftime('%b %d')
1853 elif date == 'yesterday' or date == _('yesterday'):
1871 elif date == 'yesterday' or date == _('yesterday'):
1854 date = (datetime.date.today() -
1872 date = (datetime.date.today() -
1855 datetime.timedelta(days=1)).strftime('%b %d')
1873 datetime.timedelta(days=1)).strftime('%b %d')
1856
1874
1857 try:
1875 try:
1858 when, offset = map(int, date.split(' '))
1876 when, offset = map(int, date.split(' '))
1859 except ValueError:
1877 except ValueError:
1860 # fill out defaults
1878 # fill out defaults
1861 now = makedate()
1879 now = makedate()
1862 defaults = {}
1880 defaults = {}
1863 for part in ("d", "mb", "yY", "HI", "M", "S"):
1881 for part in ("d", "mb", "yY", "HI", "M", "S"):
1864 # this piece is for rounding the specific end of unknowns
1882 # this piece is for rounding the specific end of unknowns
1865 b = bias.get(part)
1883 b = bias.get(part)
1866 if b is None:
1884 if b is None:
1867 if part[0] in "HMS":
1885 if part[0] in "HMS":
1868 b = "00"
1886 b = "00"
1869 else:
1887 else:
1870 b = "0"
1888 b = "0"
1871
1889
1872 # this piece is for matching the generic end to today's date
1890 # this piece is for matching the generic end to today's date
1873 n = datestr(now, "%" + part[0])
1891 n = datestr(now, "%" + part[0])
1874
1892
1875 defaults[part] = (b, n)
1893 defaults[part] = (b, n)
1876
1894
1877 for format in formats:
1895 for format in formats:
1878 try:
1896 try:
1879 when, offset = strdate(date, format, defaults)
1897 when, offset = strdate(date, format, defaults)
1880 except (ValueError, OverflowError):
1898 except (ValueError, OverflowError):
1881 pass
1899 pass
1882 else:
1900 else:
1883 break
1901 break
1884 else:
1902 else:
1885 raise Abort(_('invalid date: %r') % date)
1903 raise Abort(_('invalid date: %r') % date)
1886 # validate explicit (probably user-specified) date and
1904 # validate explicit (probably user-specified) date and
1887 # time zone offset. values must fit in signed 32 bits for
1905 # time zone offset. values must fit in signed 32 bits for
1888 # current 32-bit linux runtimes. timezones go from UTC-12
1906 # current 32-bit linux runtimes. timezones go from UTC-12
1889 # to UTC+14
1907 # to UTC+14
1890 if when < -0x80000000 or when > 0x7fffffff:
1908 if when < -0x80000000 or when > 0x7fffffff:
1891 raise Abort(_('date exceeds 32 bits: %d') % when)
1909 raise Abort(_('date exceeds 32 bits: %d') % when)
1892 if offset < -50400 or offset > 43200:
1910 if offset < -50400 or offset > 43200:
1893 raise Abort(_('impossible time zone offset: %d') % offset)
1911 raise Abort(_('impossible time zone offset: %d') % offset)
1894 return when, offset
1912 return when, offset
1895
1913
1896 def matchdate(date):
1914 def matchdate(date):
1897 """Return a function that matches a given date match specifier
1915 """Return a function that matches a given date match specifier
1898
1916
1899 Formats include:
1917 Formats include:
1900
1918
1901 '{date}' match a given date to the accuracy provided
1919 '{date}' match a given date to the accuracy provided
1902
1920
1903 '<{date}' on or before a given date
1921 '<{date}' on or before a given date
1904
1922
1905 '>{date}' on or after a given date
1923 '>{date}' on or after a given date
1906
1924
1907 >>> p1 = parsedate("10:29:59")
1925 >>> p1 = parsedate("10:29:59")
1908 >>> p2 = parsedate("10:30:00")
1926 >>> p2 = parsedate("10:30:00")
1909 >>> p3 = parsedate("10:30:59")
1927 >>> p3 = parsedate("10:30:59")
1910 >>> p4 = parsedate("10:31:00")
1928 >>> p4 = parsedate("10:31:00")
1911 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1929 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1912 >>> f = matchdate("10:30")
1930 >>> f = matchdate("10:30")
1913 >>> f(p1[0])
1931 >>> f(p1[0])
1914 False
1932 False
1915 >>> f(p2[0])
1933 >>> f(p2[0])
1916 True
1934 True
1917 >>> f(p3[0])
1935 >>> f(p3[0])
1918 True
1936 True
1919 >>> f(p4[0])
1937 >>> f(p4[0])
1920 False
1938 False
1921 >>> f(p5[0])
1939 >>> f(p5[0])
1922 False
1940 False
1923 """
1941 """
1924
1942
1925 def lower(date):
1943 def lower(date):
1926 d = {'mb': "1", 'd': "1"}
1944 d = {'mb': "1", 'd': "1"}
1927 return parsedate(date, extendeddateformats, d)[0]
1945 return parsedate(date, extendeddateformats, d)[0]
1928
1946
1929 def upper(date):
1947 def upper(date):
1930 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1948 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1931 for days in ("31", "30", "29"):
1949 for days in ("31", "30", "29"):
1932 try:
1950 try:
1933 d["d"] = days
1951 d["d"] = days
1934 return parsedate(date, extendeddateformats, d)[0]
1952 return parsedate(date, extendeddateformats, d)[0]
1935 except Abort:
1953 except Abort:
1936 pass
1954 pass
1937 d["d"] = "28"
1955 d["d"] = "28"
1938 return parsedate(date, extendeddateformats, d)[0]
1956 return parsedate(date, extendeddateformats, d)[0]
1939
1957
1940 date = date.strip()
1958 date = date.strip()
1941
1959
1942 if not date:
1960 if not date:
1943 raise Abort(_("dates cannot consist entirely of whitespace"))
1961 raise Abort(_("dates cannot consist entirely of whitespace"))
1944 elif date[0] == "<":
1962 elif date[0] == "<":
1945 if not date[1:]:
1963 if not date[1:]:
1946 raise Abort(_("invalid day spec, use '<DATE'"))
1964 raise Abort(_("invalid day spec, use '<DATE'"))
1947 when = upper(date[1:])
1965 when = upper(date[1:])
1948 return lambda x: x <= when
1966 return lambda x: x <= when
1949 elif date[0] == ">":
1967 elif date[0] == ">":
1950 if not date[1:]:
1968 if not date[1:]:
1951 raise Abort(_("invalid day spec, use '>DATE'"))
1969 raise Abort(_("invalid day spec, use '>DATE'"))
1952 when = lower(date[1:])
1970 when = lower(date[1:])
1953 return lambda x: x >= when
1971 return lambda x: x >= when
1954 elif date[0] == "-":
1972 elif date[0] == "-":
1955 try:
1973 try:
1956 days = int(date[1:])
1974 days = int(date[1:])
1957 except ValueError:
1975 except ValueError:
1958 raise Abort(_("invalid day spec: %s") % date[1:])
1976 raise Abort(_("invalid day spec: %s") % date[1:])
1959 if days < 0:
1977 if days < 0:
1960 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
1978 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
1961 % date[1:])
1979 % date[1:])
1962 when = makedate()[0] - days * 3600 * 24
1980 when = makedate()[0] - days * 3600 * 24
1963 return lambda x: x >= when
1981 return lambda x: x >= when
1964 elif " to " in date:
1982 elif " to " in date:
1965 a, b = date.split(" to ")
1983 a, b = date.split(" to ")
1966 start, stop = lower(a), upper(b)
1984 start, stop = lower(a), upper(b)
1967 return lambda x: x >= start and x <= stop
1985 return lambda x: x >= start and x <= stop
1968 else:
1986 else:
1969 start, stop = lower(date), upper(date)
1987 start, stop = lower(date), upper(date)
1970 return lambda x: x >= start and x <= stop
1988 return lambda x: x >= start and x <= stop
1971
1989
1972 def stringmatcher(pattern):
1990 def stringmatcher(pattern):
1973 """
1991 """
1974 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1992 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1975 returns the matcher name, pattern, and matcher function.
1993 returns the matcher name, pattern, and matcher function.
1976 missing or unknown prefixes are treated as literal matches.
1994 missing or unknown prefixes are treated as literal matches.
1977
1995
1978 helper for tests:
1996 helper for tests:
1979 >>> def test(pattern, *tests):
1997 >>> def test(pattern, *tests):
1980 ... kind, pattern, matcher = stringmatcher(pattern)
1998 ... kind, pattern, matcher = stringmatcher(pattern)
1981 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1999 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1982
2000
1983 exact matching (no prefix):
2001 exact matching (no prefix):
1984 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
2002 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1985 ('literal', 'abcdefg', [False, False, True])
2003 ('literal', 'abcdefg', [False, False, True])
1986
2004
1987 regex matching ('re:' prefix)
2005 regex matching ('re:' prefix)
1988 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
2006 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1989 ('re', 'a.+b', [False, False, True])
2007 ('re', 'a.+b', [False, False, True])
1990
2008
1991 force exact matches ('literal:' prefix)
2009 force exact matches ('literal:' prefix)
1992 >>> test('literal:re:foobar', 'foobar', 're:foobar')
2010 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1993 ('literal', 're:foobar', [False, True])
2011 ('literal', 're:foobar', [False, True])
1994
2012
1995 unknown prefixes are ignored and treated as literals
2013 unknown prefixes are ignored and treated as literals
1996 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
2014 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1997 ('literal', 'foo:bar', [False, False, True])
2015 ('literal', 'foo:bar', [False, False, True])
1998 """
2016 """
1999 if pattern.startswith('re:'):
2017 if pattern.startswith('re:'):
2000 pattern = pattern[3:]
2018 pattern = pattern[3:]
2001 try:
2019 try:
2002 regex = remod.compile(pattern)
2020 regex = remod.compile(pattern)
2003 except remod.error as e:
2021 except remod.error as e:
2004 raise error.ParseError(_('invalid regular expression: %s')
2022 raise error.ParseError(_('invalid regular expression: %s')
2005 % e)
2023 % e)
2006 return 're', pattern, regex.search
2024 return 're', pattern, regex.search
2007 elif pattern.startswith('literal:'):
2025 elif pattern.startswith('literal:'):
2008 pattern = pattern[8:]
2026 pattern = pattern[8:]
2009 return 'literal', pattern, pattern.__eq__
2027 return 'literal', pattern, pattern.__eq__
2010
2028
2011 def shortuser(user):
2029 def shortuser(user):
2012 """Return a short representation of a user name or email address."""
2030 """Return a short representation of a user name or email address."""
2013 f = user.find('@')
2031 f = user.find('@')
2014 if f >= 0:
2032 if f >= 0:
2015 user = user[:f]
2033 user = user[:f]
2016 f = user.find('<')
2034 f = user.find('<')
2017 if f >= 0:
2035 if f >= 0:
2018 user = user[f + 1:]
2036 user = user[f + 1:]
2019 f = user.find(' ')
2037 f = user.find(' ')
2020 if f >= 0:
2038 if f >= 0:
2021 user = user[:f]
2039 user = user[:f]
2022 f = user.find('.')
2040 f = user.find('.')
2023 if f >= 0:
2041 if f >= 0:
2024 user = user[:f]
2042 user = user[:f]
2025 return user
2043 return user
2026
2044
2027 def emailuser(user):
2045 def emailuser(user):
2028 """Return the user portion of an email address."""
2046 """Return the user portion of an email address."""
2029 f = user.find('@')
2047 f = user.find('@')
2030 if f >= 0:
2048 if f >= 0:
2031 user = user[:f]
2049 user = user[:f]
2032 f = user.find('<')
2050 f = user.find('<')
2033 if f >= 0:
2051 if f >= 0:
2034 user = user[f + 1:]
2052 user = user[f + 1:]
2035 return user
2053 return user
2036
2054
2037 def email(author):
2055 def email(author):
2038 '''get email of author.'''
2056 '''get email of author.'''
2039 r = author.find('>')
2057 r = author.find('>')
2040 if r == -1:
2058 if r == -1:
2041 r = None
2059 r = None
2042 return author[author.find('<') + 1:r]
2060 return author[author.find('<') + 1:r]
2043
2061
2044 def ellipsis(text, maxlength=400):
2062 def ellipsis(text, maxlength=400):
2045 """Trim string to at most maxlength (default: 400) columns in display."""
2063 """Trim string to at most maxlength (default: 400) columns in display."""
2046 return encoding.trim(text, maxlength, ellipsis='...')
2064 return encoding.trim(text, maxlength, ellipsis='...')
2047
2065
2048 def unitcountfn(*unittable):
2066 def unitcountfn(*unittable):
2049 '''return a function that renders a readable count of some quantity'''
2067 '''return a function that renders a readable count of some quantity'''
2050
2068
2051 def go(count):
2069 def go(count):
2052 for multiplier, divisor, format in unittable:
2070 for multiplier, divisor, format in unittable:
2053 if count >= divisor * multiplier:
2071 if count >= divisor * multiplier:
2054 return format % (count / float(divisor))
2072 return format % (count / float(divisor))
2055 return unittable[-1][2] % count
2073 return unittable[-1][2] % count
2056
2074
2057 return go
2075 return go
2058
2076
2059 bytecount = unitcountfn(
2077 bytecount = unitcountfn(
2060 (100, 1 << 30, _('%.0f GB')),
2078 (100, 1 << 30, _('%.0f GB')),
2061 (10, 1 << 30, _('%.1f GB')),
2079 (10, 1 << 30, _('%.1f GB')),
2062 (1, 1 << 30, _('%.2f GB')),
2080 (1, 1 << 30, _('%.2f GB')),
2063 (100, 1 << 20, _('%.0f MB')),
2081 (100, 1 << 20, _('%.0f MB')),
2064 (10, 1 << 20, _('%.1f MB')),
2082 (10, 1 << 20, _('%.1f MB')),
2065 (1, 1 << 20, _('%.2f MB')),
2083 (1, 1 << 20, _('%.2f MB')),
2066 (100, 1 << 10, _('%.0f KB')),
2084 (100, 1 << 10, _('%.0f KB')),
2067 (10, 1 << 10, _('%.1f KB')),
2085 (10, 1 << 10, _('%.1f KB')),
2068 (1, 1 << 10, _('%.2f KB')),
2086 (1, 1 << 10, _('%.2f KB')),
2069 (1, 1, _('%.0f bytes')),
2087 (1, 1, _('%.0f bytes')),
2070 )
2088 )
2071
2089
2072 def uirepr(s):
2090 def uirepr(s):
2073 # Avoid double backslash in Windows path repr()
2091 # Avoid double backslash in Windows path repr()
2074 return repr(s).replace('\\\\', '\\')
2092 return repr(s).replace('\\\\', '\\')
2075
2093
2076 # delay import of textwrap
2094 # delay import of textwrap
2077 def MBTextWrapper(**kwargs):
2095 def MBTextWrapper(**kwargs):
2078 class tw(textwrap.TextWrapper):
2096 class tw(textwrap.TextWrapper):
2079 """
2097 """
2080 Extend TextWrapper for width-awareness.
2098 Extend TextWrapper for width-awareness.
2081
2099
2082 Neither number of 'bytes' in any encoding nor 'characters' is
2100 Neither number of 'bytes' in any encoding nor 'characters' is
2083 appropriate to calculate terminal columns for specified string.
2101 appropriate to calculate terminal columns for specified string.
2084
2102
2085 Original TextWrapper implementation uses built-in 'len()' directly,
2103 Original TextWrapper implementation uses built-in 'len()' directly,
2086 so overriding is needed to use width information of each characters.
2104 so overriding is needed to use width information of each characters.
2087
2105
2088 In addition, characters classified into 'ambiguous' width are
2106 In addition, characters classified into 'ambiguous' width are
2089 treated as wide in East Asian area, but as narrow in other.
2107 treated as wide in East Asian area, but as narrow in other.
2090
2108
2091 This requires use decision to determine width of such characters.
2109 This requires use decision to determine width of such characters.
2092 """
2110 """
2093 def _cutdown(self, ucstr, space_left):
2111 def _cutdown(self, ucstr, space_left):
2094 l = 0
2112 l = 0
2095 colwidth = encoding.ucolwidth
2113 colwidth = encoding.ucolwidth
2096 for i in xrange(len(ucstr)):
2114 for i in xrange(len(ucstr)):
2097 l += colwidth(ucstr[i])
2115 l += colwidth(ucstr[i])
2098 if space_left < l:
2116 if space_left < l:
2099 return (ucstr[:i], ucstr[i:])
2117 return (ucstr[:i], ucstr[i:])
2100 return ucstr, ''
2118 return ucstr, ''
2101
2119
2102 # overriding of base class
2120 # overriding of base class
2103 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2121 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2104 space_left = max(width - cur_len, 1)
2122 space_left = max(width - cur_len, 1)
2105
2123
2106 if self.break_long_words:
2124 if self.break_long_words:
2107 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2125 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2108 cur_line.append(cut)
2126 cur_line.append(cut)
2109 reversed_chunks[-1] = res
2127 reversed_chunks[-1] = res
2110 elif not cur_line:
2128 elif not cur_line:
2111 cur_line.append(reversed_chunks.pop())
2129 cur_line.append(reversed_chunks.pop())
2112
2130
2113 # this overriding code is imported from TextWrapper of Python 2.6
2131 # this overriding code is imported from TextWrapper of Python 2.6
2114 # to calculate columns of string by 'encoding.ucolwidth()'
2132 # to calculate columns of string by 'encoding.ucolwidth()'
2115 def _wrap_chunks(self, chunks):
2133 def _wrap_chunks(self, chunks):
2116 colwidth = encoding.ucolwidth
2134 colwidth = encoding.ucolwidth
2117
2135
2118 lines = []
2136 lines = []
2119 if self.width <= 0:
2137 if self.width <= 0:
2120 raise ValueError("invalid width %r (must be > 0)" % self.width)
2138 raise ValueError("invalid width %r (must be > 0)" % self.width)
2121
2139
2122 # Arrange in reverse order so items can be efficiently popped
2140 # Arrange in reverse order so items can be efficiently popped
2123 # from a stack of chucks.
2141 # from a stack of chucks.
2124 chunks.reverse()
2142 chunks.reverse()
2125
2143
2126 while chunks:
2144 while chunks:
2127
2145
2128 # Start the list of chunks that will make up the current line.
2146 # Start the list of chunks that will make up the current line.
2129 # cur_len is just the length of all the chunks in cur_line.
2147 # cur_len is just the length of all the chunks in cur_line.
2130 cur_line = []
2148 cur_line = []
2131 cur_len = 0
2149 cur_len = 0
2132
2150
2133 # Figure out which static string will prefix this line.
2151 # Figure out which static string will prefix this line.
2134 if lines:
2152 if lines:
2135 indent = self.subsequent_indent
2153 indent = self.subsequent_indent
2136 else:
2154 else:
2137 indent = self.initial_indent
2155 indent = self.initial_indent
2138
2156
2139 # Maximum width for this line.
2157 # Maximum width for this line.
2140 width = self.width - len(indent)
2158 width = self.width - len(indent)
2141
2159
2142 # First chunk on line is whitespace -- drop it, unless this
2160 # First chunk on line is whitespace -- drop it, unless this
2143 # is the very beginning of the text (i.e. no lines started yet).
2161 # is the very beginning of the text (i.e. no lines started yet).
2144 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
2162 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
2145 del chunks[-1]
2163 del chunks[-1]
2146
2164
2147 while chunks:
2165 while chunks:
2148 l = colwidth(chunks[-1])
2166 l = colwidth(chunks[-1])
2149
2167
2150 # Can at least squeeze this chunk onto the current line.
2168 # Can at least squeeze this chunk onto the current line.
2151 if cur_len + l <= width:
2169 if cur_len + l <= width:
2152 cur_line.append(chunks.pop())
2170 cur_line.append(chunks.pop())
2153 cur_len += l
2171 cur_len += l
2154
2172
2155 # Nope, this line is full.
2173 # Nope, this line is full.
2156 else:
2174 else:
2157 break
2175 break
2158
2176
2159 # The current line is full, and the next chunk is too big to
2177 # The current line is full, and the next chunk is too big to
2160 # fit on *any* line (not just this one).
2178 # fit on *any* line (not just this one).
2161 if chunks and colwidth(chunks[-1]) > width:
2179 if chunks and colwidth(chunks[-1]) > width:
2162 self._handle_long_word(chunks, cur_line, cur_len, width)
2180 self._handle_long_word(chunks, cur_line, cur_len, width)
2163
2181
2164 # If the last chunk on this line is all whitespace, drop it.
2182 # If the last chunk on this line is all whitespace, drop it.
2165 if (self.drop_whitespace and
2183 if (self.drop_whitespace and
2166 cur_line and cur_line[-1].strip() == ''):
2184 cur_line and cur_line[-1].strip() == ''):
2167 del cur_line[-1]
2185 del cur_line[-1]
2168
2186
2169 # Convert current line back to a string and store it in list
2187 # Convert current line back to a string and store it in list
2170 # of all lines (return value).
2188 # of all lines (return value).
2171 if cur_line:
2189 if cur_line:
2172 lines.append(indent + ''.join(cur_line))
2190 lines.append(indent + ''.join(cur_line))
2173
2191
2174 return lines
2192 return lines
2175
2193
2176 global MBTextWrapper
2194 global MBTextWrapper
2177 MBTextWrapper = tw
2195 MBTextWrapper = tw
2178 return tw(**kwargs)
2196 return tw(**kwargs)
2179
2197
2180 def wrap(line, width, initindent='', hangindent=''):
2198 def wrap(line, width, initindent='', hangindent=''):
2181 maxindent = max(len(hangindent), len(initindent))
2199 maxindent = max(len(hangindent), len(initindent))
2182 if width <= maxindent:
2200 if width <= maxindent:
2183 # adjust for weird terminal size
2201 # adjust for weird terminal size
2184 width = max(78, maxindent + 1)
2202 width = max(78, maxindent + 1)
2185 line = line.decode(encoding.encoding, encoding.encodingmode)
2203 line = line.decode(encoding.encoding, encoding.encodingmode)
2186 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2204 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2187 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2205 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2188 wrapper = MBTextWrapper(width=width,
2206 wrapper = MBTextWrapper(width=width,
2189 initial_indent=initindent,
2207 initial_indent=initindent,
2190 subsequent_indent=hangindent)
2208 subsequent_indent=hangindent)
2191 return wrapper.fill(line).encode(encoding.encoding)
2209 return wrapper.fill(line).encode(encoding.encoding)
2192
2210
2193 def iterfile(fp):
2211 def iterfile(fp):
2194 """like fp.__iter__ but does not have issues with EINTR. Python 2.7.12 is
2212 """like fp.__iter__ but does not have issues with EINTR. Python 2.7.12 is
2195 known to have such issues."""
2213 known to have such issues."""
2196 return iter(fp.readline, '')
2214 return iter(fp.readline, '')
2197
2215
2198 def iterlines(iterator):
2216 def iterlines(iterator):
2199 for chunk in iterator:
2217 for chunk in iterator:
2200 for line in chunk.splitlines():
2218 for line in chunk.splitlines():
2201 yield line
2219 yield line
2202
2220
2203 def expandpath(path):
2221 def expandpath(path):
2204 return os.path.expanduser(os.path.expandvars(path))
2222 return os.path.expanduser(os.path.expandvars(path))
2205
2223
2206 def hgcmd():
2224 def hgcmd():
2207 """Return the command used to execute current hg
2225 """Return the command used to execute current hg
2208
2226
2209 This is different from hgexecutable() because on Windows we want
2227 This is different from hgexecutable() because on Windows we want
2210 to avoid things opening new shell windows like batch files, so we
2228 to avoid things opening new shell windows like batch files, so we
2211 get either the python call or current executable.
2229 get either the python call or current executable.
2212 """
2230 """
2213 if mainfrozen():
2231 if mainfrozen():
2214 if getattr(sys, 'frozen', None) == 'macosx_app':
2232 if getattr(sys, 'frozen', None) == 'macosx_app':
2215 # Env variable set by py2app
2233 # Env variable set by py2app
2216 return [os.environ['EXECUTABLEPATH']]
2234 return [os.environ['EXECUTABLEPATH']]
2217 else:
2235 else:
2218 return [sys.executable]
2236 return [sys.executable]
2219 return gethgcmd()
2237 return gethgcmd()
2220
2238
2221 def rundetached(args, condfn):
2239 def rundetached(args, condfn):
2222 """Execute the argument list in a detached process.
2240 """Execute the argument list in a detached process.
2223
2241
2224 condfn is a callable which is called repeatedly and should return
2242 condfn is a callable which is called repeatedly and should return
2225 True once the child process is known to have started successfully.
2243 True once the child process is known to have started successfully.
2226 At this point, the child process PID is returned. If the child
2244 At this point, the child process PID is returned. If the child
2227 process fails to start or finishes before condfn() evaluates to
2245 process fails to start or finishes before condfn() evaluates to
2228 True, return -1.
2246 True, return -1.
2229 """
2247 """
2230 # Windows case is easier because the child process is either
2248 # Windows case is easier because the child process is either
2231 # successfully starting and validating the condition or exiting
2249 # successfully starting and validating the condition or exiting
2232 # on failure. We just poll on its PID. On Unix, if the child
2250 # on failure. We just poll on its PID. On Unix, if the child
2233 # process fails to start, it will be left in a zombie state until
2251 # process fails to start, it will be left in a zombie state until
2234 # the parent wait on it, which we cannot do since we expect a long
2252 # the parent wait on it, which we cannot do since we expect a long
2235 # running process on success. Instead we listen for SIGCHLD telling
2253 # running process on success. Instead we listen for SIGCHLD telling
2236 # us our child process terminated.
2254 # us our child process terminated.
2237 terminated = set()
2255 terminated = set()
2238 def handler(signum, frame):
2256 def handler(signum, frame):
2239 terminated.add(os.wait())
2257 terminated.add(os.wait())
2240 prevhandler = None
2258 prevhandler = None
2241 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2259 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2242 if SIGCHLD is not None:
2260 if SIGCHLD is not None:
2243 prevhandler = signal.signal(SIGCHLD, handler)
2261 prevhandler = signal.signal(SIGCHLD, handler)
2244 try:
2262 try:
2245 pid = spawndetached(args)
2263 pid = spawndetached(args)
2246 while not condfn():
2264 while not condfn():
2247 if ((pid in terminated or not testpid(pid))
2265 if ((pid in terminated or not testpid(pid))
2248 and not condfn()):
2266 and not condfn()):
2249 return -1
2267 return -1
2250 time.sleep(0.1)
2268 time.sleep(0.1)
2251 return pid
2269 return pid
2252 finally:
2270 finally:
2253 if prevhandler is not None:
2271 if prevhandler is not None:
2254 signal.signal(signal.SIGCHLD, prevhandler)
2272 signal.signal(signal.SIGCHLD, prevhandler)
2255
2273
2256 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2274 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2257 """Return the result of interpolating items in the mapping into string s.
2275 """Return the result of interpolating items in the mapping into string s.
2258
2276
2259 prefix is a single character string, or a two character string with
2277 prefix is a single character string, or a two character string with
2260 a backslash as the first character if the prefix needs to be escaped in
2278 a backslash as the first character if the prefix needs to be escaped in
2261 a regular expression.
2279 a regular expression.
2262
2280
2263 fn is an optional function that will be applied to the replacement text
2281 fn is an optional function that will be applied to the replacement text
2264 just before replacement.
2282 just before replacement.
2265
2283
2266 escape_prefix is an optional flag that allows using doubled prefix for
2284 escape_prefix is an optional flag that allows using doubled prefix for
2267 its escaping.
2285 its escaping.
2268 """
2286 """
2269 fn = fn or (lambda s: s)
2287 fn = fn or (lambda s: s)
2270 patterns = '|'.join(mapping.keys())
2288 patterns = '|'.join(mapping.keys())
2271 if escape_prefix:
2289 if escape_prefix:
2272 patterns += '|' + prefix
2290 patterns += '|' + prefix
2273 if len(prefix) > 1:
2291 if len(prefix) > 1:
2274 prefix_char = prefix[1:]
2292 prefix_char = prefix[1:]
2275 else:
2293 else:
2276 prefix_char = prefix
2294 prefix_char = prefix
2277 mapping[prefix_char] = prefix_char
2295 mapping[prefix_char] = prefix_char
2278 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2296 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2279 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2297 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2280
2298
2281 def getport(port):
2299 def getport(port):
2282 """Return the port for a given network service.
2300 """Return the port for a given network service.
2283
2301
2284 If port is an integer, it's returned as is. If it's a string, it's
2302 If port is an integer, it's returned as is. If it's a string, it's
2285 looked up using socket.getservbyname(). If there's no matching
2303 looked up using socket.getservbyname(). If there's no matching
2286 service, error.Abort is raised.
2304 service, error.Abort is raised.
2287 """
2305 """
2288 try:
2306 try:
2289 return int(port)
2307 return int(port)
2290 except ValueError:
2308 except ValueError:
2291 pass
2309 pass
2292
2310
2293 try:
2311 try:
2294 return socket.getservbyname(port)
2312 return socket.getservbyname(port)
2295 except socket.error:
2313 except socket.error:
2296 raise Abort(_("no port number associated with service '%s'") % port)
2314 raise Abort(_("no port number associated with service '%s'") % port)
2297
2315
2298 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2316 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2299 '0': False, 'no': False, 'false': False, 'off': False,
2317 '0': False, 'no': False, 'false': False, 'off': False,
2300 'never': False}
2318 'never': False}
2301
2319
2302 def parsebool(s):
2320 def parsebool(s):
2303 """Parse s into a boolean.
2321 """Parse s into a boolean.
2304
2322
2305 If s is not a valid boolean, returns None.
2323 If s is not a valid boolean, returns None.
2306 """
2324 """
2307 return _booleans.get(s.lower(), None)
2325 return _booleans.get(s.lower(), None)
2308
2326
2309 _hextochr = dict((a + b, chr(int(a + b, 16)))
2327 _hextochr = dict((a + b, chr(int(a + b, 16)))
2310 for a in string.hexdigits for b in string.hexdigits)
2328 for a in string.hexdigits for b in string.hexdigits)
2311
2329
2312 class url(object):
2330 class url(object):
2313 r"""Reliable URL parser.
2331 r"""Reliable URL parser.
2314
2332
2315 This parses URLs and provides attributes for the following
2333 This parses URLs and provides attributes for the following
2316 components:
2334 components:
2317
2335
2318 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2336 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2319
2337
2320 Missing components are set to None. The only exception is
2338 Missing components are set to None. The only exception is
2321 fragment, which is set to '' if present but empty.
2339 fragment, which is set to '' if present but empty.
2322
2340
2323 If parsefragment is False, fragment is included in query. If
2341 If parsefragment is False, fragment is included in query. If
2324 parsequery is False, query is included in path. If both are
2342 parsequery is False, query is included in path. If both are
2325 False, both fragment and query are included in path.
2343 False, both fragment and query are included in path.
2326
2344
2327 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2345 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2328
2346
2329 Note that for backward compatibility reasons, bundle URLs do not
2347 Note that for backward compatibility reasons, bundle URLs do not
2330 take host names. That means 'bundle://../' has a path of '../'.
2348 take host names. That means 'bundle://../' has a path of '../'.
2331
2349
2332 Examples:
2350 Examples:
2333
2351
2334 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2352 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2335 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2353 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2336 >>> url('ssh://[::1]:2200//home/joe/repo')
2354 >>> url('ssh://[::1]:2200//home/joe/repo')
2337 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2355 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2338 >>> url('file:///home/joe/repo')
2356 >>> url('file:///home/joe/repo')
2339 <url scheme: 'file', path: '/home/joe/repo'>
2357 <url scheme: 'file', path: '/home/joe/repo'>
2340 >>> url('file:///c:/temp/foo/')
2358 >>> url('file:///c:/temp/foo/')
2341 <url scheme: 'file', path: 'c:/temp/foo/'>
2359 <url scheme: 'file', path: 'c:/temp/foo/'>
2342 >>> url('bundle:foo')
2360 >>> url('bundle:foo')
2343 <url scheme: 'bundle', path: 'foo'>
2361 <url scheme: 'bundle', path: 'foo'>
2344 >>> url('bundle://../foo')
2362 >>> url('bundle://../foo')
2345 <url scheme: 'bundle', path: '../foo'>
2363 <url scheme: 'bundle', path: '../foo'>
2346 >>> url(r'c:\foo\bar')
2364 >>> url(r'c:\foo\bar')
2347 <url path: 'c:\\foo\\bar'>
2365 <url path: 'c:\\foo\\bar'>
2348 >>> url(r'\\blah\blah\blah')
2366 >>> url(r'\\blah\blah\blah')
2349 <url path: '\\\\blah\\blah\\blah'>
2367 <url path: '\\\\blah\\blah\\blah'>
2350 >>> url(r'\\blah\blah\blah#baz')
2368 >>> url(r'\\blah\blah\blah#baz')
2351 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2369 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2352 >>> url(r'file:///C:\users\me')
2370 >>> url(r'file:///C:\users\me')
2353 <url scheme: 'file', path: 'C:\\users\\me'>
2371 <url scheme: 'file', path: 'C:\\users\\me'>
2354
2372
2355 Authentication credentials:
2373 Authentication credentials:
2356
2374
2357 >>> url('ssh://joe:xyz@x/repo')
2375 >>> url('ssh://joe:xyz@x/repo')
2358 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2376 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2359 >>> url('ssh://joe@x/repo')
2377 >>> url('ssh://joe@x/repo')
2360 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2378 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2361
2379
2362 Query strings and fragments:
2380 Query strings and fragments:
2363
2381
2364 >>> url('http://host/a?b#c')
2382 >>> url('http://host/a?b#c')
2365 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2383 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2366 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2384 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2367 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2385 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2368
2386
2369 Empty path:
2387 Empty path:
2370
2388
2371 >>> url('')
2389 >>> url('')
2372 <url path: ''>
2390 <url path: ''>
2373 >>> url('#a')
2391 >>> url('#a')
2374 <url path: '', fragment: 'a'>
2392 <url path: '', fragment: 'a'>
2375 >>> url('http://host/')
2393 >>> url('http://host/')
2376 <url scheme: 'http', host: 'host', path: ''>
2394 <url scheme: 'http', host: 'host', path: ''>
2377 >>> url('http://host/#a')
2395 >>> url('http://host/#a')
2378 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2396 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2379
2397
2380 Only scheme:
2398 Only scheme:
2381
2399
2382 >>> url('http:')
2400 >>> url('http:')
2383 <url scheme: 'http'>
2401 <url scheme: 'http'>
2384 """
2402 """
2385
2403
2386 _safechars = "!~*'()+"
2404 _safechars = "!~*'()+"
2387 _safepchars = "/!~*'()+:\\"
2405 _safepchars = "/!~*'()+:\\"
2388 _matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
2406 _matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
2389
2407
2390 def __init__(self, path, parsequery=True, parsefragment=True):
2408 def __init__(self, path, parsequery=True, parsefragment=True):
2391 # We slowly chomp away at path until we have only the path left
2409 # We slowly chomp away at path until we have only the path left
2392 self.scheme = self.user = self.passwd = self.host = None
2410 self.scheme = self.user = self.passwd = self.host = None
2393 self.port = self.path = self.query = self.fragment = None
2411 self.port = self.path = self.query = self.fragment = None
2394 self._localpath = True
2412 self._localpath = True
2395 self._hostport = ''
2413 self._hostport = ''
2396 self._origpath = path
2414 self._origpath = path
2397
2415
2398 if parsefragment and '#' in path:
2416 if parsefragment and '#' in path:
2399 path, self.fragment = path.split('#', 1)
2417 path, self.fragment = path.split('#', 1)
2400
2418
2401 # special case for Windows drive letters and UNC paths
2419 # special case for Windows drive letters and UNC paths
2402 if hasdriveletter(path) or path.startswith('\\\\'):
2420 if hasdriveletter(path) or path.startswith('\\\\'):
2403 self.path = path
2421 self.path = path
2404 return
2422 return
2405
2423
2406 # For compatibility reasons, we can't handle bundle paths as
2424 # For compatibility reasons, we can't handle bundle paths as
2407 # normal URLS
2425 # normal URLS
2408 if path.startswith('bundle:'):
2426 if path.startswith('bundle:'):
2409 self.scheme = 'bundle'
2427 self.scheme = 'bundle'
2410 path = path[7:]
2428 path = path[7:]
2411 if path.startswith('//'):
2429 if path.startswith('//'):
2412 path = path[2:]
2430 path = path[2:]
2413 self.path = path
2431 self.path = path
2414 return
2432 return
2415
2433
2416 if self._matchscheme(path):
2434 if self._matchscheme(path):
2417 parts = path.split(':', 1)
2435 parts = path.split(':', 1)
2418 if parts[0]:
2436 if parts[0]:
2419 self.scheme, path = parts
2437 self.scheme, path = parts
2420 self._localpath = False
2438 self._localpath = False
2421
2439
2422 if not path:
2440 if not path:
2423 path = None
2441 path = None
2424 if self._localpath:
2442 if self._localpath:
2425 self.path = ''
2443 self.path = ''
2426 return
2444 return
2427 else:
2445 else:
2428 if self._localpath:
2446 if self._localpath:
2429 self.path = path
2447 self.path = path
2430 return
2448 return
2431
2449
2432 if parsequery and '?' in path:
2450 if parsequery and '?' in path:
2433 path, self.query = path.split('?', 1)
2451 path, self.query = path.split('?', 1)
2434 if not path:
2452 if not path:
2435 path = None
2453 path = None
2436 if not self.query:
2454 if not self.query:
2437 self.query = None
2455 self.query = None
2438
2456
2439 # // is required to specify a host/authority
2457 # // is required to specify a host/authority
2440 if path and path.startswith('//'):
2458 if path and path.startswith('//'):
2441 parts = path[2:].split('/', 1)
2459 parts = path[2:].split('/', 1)
2442 if len(parts) > 1:
2460 if len(parts) > 1:
2443 self.host, path = parts
2461 self.host, path = parts
2444 else:
2462 else:
2445 self.host = parts[0]
2463 self.host = parts[0]
2446 path = None
2464 path = None
2447 if not self.host:
2465 if not self.host:
2448 self.host = None
2466 self.host = None
2449 # path of file:///d is /d
2467 # path of file:///d is /d
2450 # path of file:///d:/ is d:/, not /d:/
2468 # path of file:///d:/ is d:/, not /d:/
2451 if path and not hasdriveletter(path):
2469 if path and not hasdriveletter(path):
2452 path = '/' + path
2470 path = '/' + path
2453
2471
2454 if self.host and '@' in self.host:
2472 if self.host and '@' in self.host:
2455 self.user, self.host = self.host.rsplit('@', 1)
2473 self.user, self.host = self.host.rsplit('@', 1)
2456 if ':' in self.user:
2474 if ':' in self.user:
2457 self.user, self.passwd = self.user.split(':', 1)
2475 self.user, self.passwd = self.user.split(':', 1)
2458 if not self.host:
2476 if not self.host:
2459 self.host = None
2477 self.host = None
2460
2478
2461 # Don't split on colons in IPv6 addresses without ports
2479 # Don't split on colons in IPv6 addresses without ports
2462 if (self.host and ':' in self.host and
2480 if (self.host and ':' in self.host and
2463 not (self.host.startswith('[') and self.host.endswith(']'))):
2481 not (self.host.startswith('[') and self.host.endswith(']'))):
2464 self._hostport = self.host
2482 self._hostport = self.host
2465 self.host, self.port = self.host.rsplit(':', 1)
2483 self.host, self.port = self.host.rsplit(':', 1)
2466 if not self.host:
2484 if not self.host:
2467 self.host = None
2485 self.host = None
2468
2486
2469 if (self.host and self.scheme == 'file' and
2487 if (self.host and self.scheme == 'file' and
2470 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2488 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2471 raise Abort(_('file:// URLs can only refer to localhost'))
2489 raise Abort(_('file:// URLs can only refer to localhost'))
2472
2490
2473 self.path = path
2491 self.path = path
2474
2492
2475 # leave the query string escaped
2493 # leave the query string escaped
2476 for a in ('user', 'passwd', 'host', 'port',
2494 for a in ('user', 'passwd', 'host', 'port',
2477 'path', 'fragment'):
2495 'path', 'fragment'):
2478 v = getattr(self, a)
2496 v = getattr(self, a)
2479 if v is not None:
2497 if v is not None:
2480 setattr(self, a, pycompat.urlunquote(v))
2498 setattr(self, a, pycompat.urlunquote(v))
2481
2499
2482 def __repr__(self):
2500 def __repr__(self):
2483 attrs = []
2501 attrs = []
2484 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2502 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2485 'query', 'fragment'):
2503 'query', 'fragment'):
2486 v = getattr(self, a)
2504 v = getattr(self, a)
2487 if v is not None:
2505 if v is not None:
2488 attrs.append('%s: %r' % (a, v))
2506 attrs.append('%s: %r' % (a, v))
2489 return '<url %s>' % ', '.join(attrs)
2507 return '<url %s>' % ', '.join(attrs)
2490
2508
2491 def __str__(self):
2509 def __str__(self):
2492 r"""Join the URL's components back into a URL string.
2510 r"""Join the URL's components back into a URL string.
2493
2511
2494 Examples:
2512 Examples:
2495
2513
2496 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2514 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2497 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2515 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2498 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2516 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2499 'http://user:pw@host:80/?foo=bar&baz=42'
2517 'http://user:pw@host:80/?foo=bar&baz=42'
2500 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2518 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2501 'http://user:pw@host:80/?foo=bar%3dbaz'
2519 'http://user:pw@host:80/?foo=bar%3dbaz'
2502 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2520 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2503 'ssh://user:pw@[::1]:2200//home/joe#'
2521 'ssh://user:pw@[::1]:2200//home/joe#'
2504 >>> str(url('http://localhost:80//'))
2522 >>> str(url('http://localhost:80//'))
2505 'http://localhost:80//'
2523 'http://localhost:80//'
2506 >>> str(url('http://localhost:80/'))
2524 >>> str(url('http://localhost:80/'))
2507 'http://localhost:80/'
2525 'http://localhost:80/'
2508 >>> str(url('http://localhost:80'))
2526 >>> str(url('http://localhost:80'))
2509 'http://localhost:80/'
2527 'http://localhost:80/'
2510 >>> str(url('bundle:foo'))
2528 >>> str(url('bundle:foo'))
2511 'bundle:foo'
2529 'bundle:foo'
2512 >>> str(url('bundle://../foo'))
2530 >>> str(url('bundle://../foo'))
2513 'bundle:../foo'
2531 'bundle:../foo'
2514 >>> str(url('path'))
2532 >>> str(url('path'))
2515 'path'
2533 'path'
2516 >>> str(url('file:///tmp/foo/bar'))
2534 >>> str(url('file:///tmp/foo/bar'))
2517 'file:///tmp/foo/bar'
2535 'file:///tmp/foo/bar'
2518 >>> str(url('file:///c:/tmp/foo/bar'))
2536 >>> str(url('file:///c:/tmp/foo/bar'))
2519 'file:///c:/tmp/foo/bar'
2537 'file:///c:/tmp/foo/bar'
2520 >>> print url(r'bundle:foo\bar')
2538 >>> print url(r'bundle:foo\bar')
2521 bundle:foo\bar
2539 bundle:foo\bar
2522 >>> print url(r'file:///D:\data\hg')
2540 >>> print url(r'file:///D:\data\hg')
2523 file:///D:\data\hg
2541 file:///D:\data\hg
2524 """
2542 """
2525 if self._localpath:
2543 if self._localpath:
2526 s = self.path
2544 s = self.path
2527 if self.scheme == 'bundle':
2545 if self.scheme == 'bundle':
2528 s = 'bundle:' + s
2546 s = 'bundle:' + s
2529 if self.fragment:
2547 if self.fragment:
2530 s += '#' + self.fragment
2548 s += '#' + self.fragment
2531 return s
2549 return s
2532
2550
2533 s = self.scheme + ':'
2551 s = self.scheme + ':'
2534 if self.user or self.passwd or self.host:
2552 if self.user or self.passwd or self.host:
2535 s += '//'
2553 s += '//'
2536 elif self.scheme and (not self.path or self.path.startswith('/')
2554 elif self.scheme and (not self.path or self.path.startswith('/')
2537 or hasdriveletter(self.path)):
2555 or hasdriveletter(self.path)):
2538 s += '//'
2556 s += '//'
2539 if hasdriveletter(self.path):
2557 if hasdriveletter(self.path):
2540 s += '/'
2558 s += '/'
2541 if self.user:
2559 if self.user:
2542 s += urlreq.quote(self.user, safe=self._safechars)
2560 s += urlreq.quote(self.user, safe=self._safechars)
2543 if self.passwd:
2561 if self.passwd:
2544 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2562 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2545 if self.user or self.passwd:
2563 if self.user or self.passwd:
2546 s += '@'
2564 s += '@'
2547 if self.host:
2565 if self.host:
2548 if not (self.host.startswith('[') and self.host.endswith(']')):
2566 if not (self.host.startswith('[') and self.host.endswith(']')):
2549 s += urlreq.quote(self.host)
2567 s += urlreq.quote(self.host)
2550 else:
2568 else:
2551 s += self.host
2569 s += self.host
2552 if self.port:
2570 if self.port:
2553 s += ':' + urlreq.quote(self.port)
2571 s += ':' + urlreq.quote(self.port)
2554 if self.host:
2572 if self.host:
2555 s += '/'
2573 s += '/'
2556 if self.path:
2574 if self.path:
2557 # TODO: similar to the query string, we should not unescape the
2575 # TODO: similar to the query string, we should not unescape the
2558 # path when we store it, the path might contain '%2f' = '/',
2576 # path when we store it, the path might contain '%2f' = '/',
2559 # which we should *not* escape.
2577 # which we should *not* escape.
2560 s += urlreq.quote(self.path, safe=self._safepchars)
2578 s += urlreq.quote(self.path, safe=self._safepchars)
2561 if self.query:
2579 if self.query:
2562 # we store the query in escaped form.
2580 # we store the query in escaped form.
2563 s += '?' + self.query
2581 s += '?' + self.query
2564 if self.fragment is not None:
2582 if self.fragment is not None:
2565 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2583 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2566 return s
2584 return s
2567
2585
2568 def authinfo(self):
2586 def authinfo(self):
2569 user, passwd = self.user, self.passwd
2587 user, passwd = self.user, self.passwd
2570 try:
2588 try:
2571 self.user, self.passwd = None, None
2589 self.user, self.passwd = None, None
2572 s = str(self)
2590 s = str(self)
2573 finally:
2591 finally:
2574 self.user, self.passwd = user, passwd
2592 self.user, self.passwd = user, passwd
2575 if not self.user:
2593 if not self.user:
2576 return (s, None)
2594 return (s, None)
2577 # authinfo[1] is passed to urllib2 password manager, and its
2595 # authinfo[1] is passed to urllib2 password manager, and its
2578 # URIs must not contain credentials. The host is passed in the
2596 # URIs must not contain credentials. The host is passed in the
2579 # URIs list because Python < 2.4.3 uses only that to search for
2597 # URIs list because Python < 2.4.3 uses only that to search for
2580 # a password.
2598 # a password.
2581 return (s, (None, (s, self.host),
2599 return (s, (None, (s, self.host),
2582 self.user, self.passwd or ''))
2600 self.user, self.passwd or ''))
2583
2601
2584 def isabs(self):
2602 def isabs(self):
2585 if self.scheme and self.scheme != 'file':
2603 if self.scheme and self.scheme != 'file':
2586 return True # remote URL
2604 return True # remote URL
2587 if hasdriveletter(self.path):
2605 if hasdriveletter(self.path):
2588 return True # absolute for our purposes - can't be joined()
2606 return True # absolute for our purposes - can't be joined()
2589 if self.path.startswith(r'\\'):
2607 if self.path.startswith(r'\\'):
2590 return True # Windows UNC path
2608 return True # Windows UNC path
2591 if self.path.startswith('/'):
2609 if self.path.startswith('/'):
2592 return True # POSIX-style
2610 return True # POSIX-style
2593 return False
2611 return False
2594
2612
2595 def localpath(self):
2613 def localpath(self):
2596 if self.scheme == 'file' or self.scheme == 'bundle':
2614 if self.scheme == 'file' or self.scheme == 'bundle':
2597 path = self.path or '/'
2615 path = self.path or '/'
2598 # For Windows, we need to promote hosts containing drive
2616 # For Windows, we need to promote hosts containing drive
2599 # letters to paths with drive letters.
2617 # letters to paths with drive letters.
2600 if hasdriveletter(self._hostport):
2618 if hasdriveletter(self._hostport):
2601 path = self._hostport + '/' + self.path
2619 path = self._hostport + '/' + self.path
2602 elif (self.host is not None and self.path
2620 elif (self.host is not None and self.path
2603 and not hasdriveletter(path)):
2621 and not hasdriveletter(path)):
2604 path = '/' + path
2622 path = '/' + path
2605 return path
2623 return path
2606 return self._origpath
2624 return self._origpath
2607
2625
2608 def islocal(self):
2626 def islocal(self):
2609 '''whether localpath will return something that posixfile can open'''
2627 '''whether localpath will return something that posixfile can open'''
2610 return (not self.scheme or self.scheme == 'file'
2628 return (not self.scheme or self.scheme == 'file'
2611 or self.scheme == 'bundle')
2629 or self.scheme == 'bundle')
2612
2630
2613 def hasscheme(path):
2631 def hasscheme(path):
2614 return bool(url(path).scheme)
2632 return bool(url(path).scheme)
2615
2633
2616 def hasdriveletter(path):
2634 def hasdriveletter(path):
2617 return path and path[1:2] == ':' and path[0:1].isalpha()
2635 return path and path[1:2] == ':' and path[0:1].isalpha()
2618
2636
2619 def urllocalpath(path):
2637 def urllocalpath(path):
2620 return url(path, parsequery=False, parsefragment=False).localpath()
2638 return url(path, parsequery=False, parsefragment=False).localpath()
2621
2639
2622 def hidepassword(u):
2640 def hidepassword(u):
2623 '''hide user credential in a url string'''
2641 '''hide user credential in a url string'''
2624 u = url(u)
2642 u = url(u)
2625 if u.passwd:
2643 if u.passwd:
2626 u.passwd = '***'
2644 u.passwd = '***'
2627 return str(u)
2645 return str(u)
2628
2646
2629 def removeauth(u):
2647 def removeauth(u):
2630 '''remove all authentication information from a url string'''
2648 '''remove all authentication information from a url string'''
2631 u = url(u)
2649 u = url(u)
2632 u.user = u.passwd = None
2650 u.user = u.passwd = None
2633 return str(u)
2651 return str(u)
2634
2652
2635 def isatty(fp):
2653 def isatty(fp):
2636 try:
2654 try:
2637 return fp.isatty()
2655 return fp.isatty()
2638 except AttributeError:
2656 except AttributeError:
2639 return False
2657 return False
2640
2658
2641 timecount = unitcountfn(
2659 timecount = unitcountfn(
2642 (1, 1e3, _('%.0f s')),
2660 (1, 1e3, _('%.0f s')),
2643 (100, 1, _('%.1f s')),
2661 (100, 1, _('%.1f s')),
2644 (10, 1, _('%.2f s')),
2662 (10, 1, _('%.2f s')),
2645 (1, 1, _('%.3f s')),
2663 (1, 1, _('%.3f s')),
2646 (100, 0.001, _('%.1f ms')),
2664 (100, 0.001, _('%.1f ms')),
2647 (10, 0.001, _('%.2f ms')),
2665 (10, 0.001, _('%.2f ms')),
2648 (1, 0.001, _('%.3f ms')),
2666 (1, 0.001, _('%.3f ms')),
2649 (100, 0.000001, _('%.1f us')),
2667 (100, 0.000001, _('%.1f us')),
2650 (10, 0.000001, _('%.2f us')),
2668 (10, 0.000001, _('%.2f us')),
2651 (1, 0.000001, _('%.3f us')),
2669 (1, 0.000001, _('%.3f us')),
2652 (100, 0.000000001, _('%.1f ns')),
2670 (100, 0.000000001, _('%.1f ns')),
2653 (10, 0.000000001, _('%.2f ns')),
2671 (10, 0.000000001, _('%.2f ns')),
2654 (1, 0.000000001, _('%.3f ns')),
2672 (1, 0.000000001, _('%.3f ns')),
2655 )
2673 )
2656
2674
2657 _timenesting = [0]
2675 _timenesting = [0]
2658
2676
2659 def timed(func):
2677 def timed(func):
2660 '''Report the execution time of a function call to stderr.
2678 '''Report the execution time of a function call to stderr.
2661
2679
2662 During development, use as a decorator when you need to measure
2680 During development, use as a decorator when you need to measure
2663 the cost of a function, e.g. as follows:
2681 the cost of a function, e.g. as follows:
2664
2682
2665 @util.timed
2683 @util.timed
2666 def foo(a, b, c):
2684 def foo(a, b, c):
2667 pass
2685 pass
2668 '''
2686 '''
2669
2687
2670 def wrapper(*args, **kwargs):
2688 def wrapper(*args, **kwargs):
2671 start = time.time()
2689 start = time.time()
2672 indent = 2
2690 indent = 2
2673 _timenesting[0] += indent
2691 _timenesting[0] += indent
2674 try:
2692 try:
2675 return func(*args, **kwargs)
2693 return func(*args, **kwargs)
2676 finally:
2694 finally:
2677 elapsed = time.time() - start
2695 elapsed = time.time() - start
2678 _timenesting[0] -= indent
2696 _timenesting[0] -= indent
2679 sys.stderr.write('%s%s: %s\n' %
2697 sys.stderr.write('%s%s: %s\n' %
2680 (' ' * _timenesting[0], func.__name__,
2698 (' ' * _timenesting[0], func.__name__,
2681 timecount(elapsed)))
2699 timecount(elapsed)))
2682 return wrapper
2700 return wrapper
2683
2701
2684 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2702 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2685 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2703 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2686
2704
2687 def sizetoint(s):
2705 def sizetoint(s):
2688 '''Convert a space specifier to a byte count.
2706 '''Convert a space specifier to a byte count.
2689
2707
2690 >>> sizetoint('30')
2708 >>> sizetoint('30')
2691 30
2709 30
2692 >>> sizetoint('2.2kb')
2710 >>> sizetoint('2.2kb')
2693 2252
2711 2252
2694 >>> sizetoint('6M')
2712 >>> sizetoint('6M')
2695 6291456
2713 6291456
2696 '''
2714 '''
2697 t = s.strip().lower()
2715 t = s.strip().lower()
2698 try:
2716 try:
2699 for k, u in _sizeunits:
2717 for k, u in _sizeunits:
2700 if t.endswith(k):
2718 if t.endswith(k):
2701 return int(float(t[:-len(k)]) * u)
2719 return int(float(t[:-len(k)]) * u)
2702 return int(t)
2720 return int(t)
2703 except ValueError:
2721 except ValueError:
2704 raise error.ParseError(_("couldn't parse size: %s") % s)
2722 raise error.ParseError(_("couldn't parse size: %s") % s)
2705
2723
2706 class hooks(object):
2724 class hooks(object):
2707 '''A collection of hook functions that can be used to extend a
2725 '''A collection of hook functions that can be used to extend a
2708 function's behavior. Hooks are called in lexicographic order,
2726 function's behavior. Hooks are called in lexicographic order,
2709 based on the names of their sources.'''
2727 based on the names of their sources.'''
2710
2728
2711 def __init__(self):
2729 def __init__(self):
2712 self._hooks = []
2730 self._hooks = []
2713
2731
2714 def add(self, source, hook):
2732 def add(self, source, hook):
2715 self._hooks.append((source, hook))
2733 self._hooks.append((source, hook))
2716
2734
2717 def __call__(self, *args):
2735 def __call__(self, *args):
2718 self._hooks.sort(key=lambda x: x[0])
2736 self._hooks.sort(key=lambda x: x[0])
2719 results = []
2737 results = []
2720 for source, hook in self._hooks:
2738 for source, hook in self._hooks:
2721 results.append(hook(*args))
2739 results.append(hook(*args))
2722 return results
2740 return results
2723
2741
2724 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s'):
2742 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s'):
2725 '''Yields lines for a nicely formatted stacktrace.
2743 '''Yields lines for a nicely formatted stacktrace.
2726 Skips the 'skip' last entries.
2744 Skips the 'skip' last entries.
2727 Each file+linenumber is formatted according to fileline.
2745 Each file+linenumber is formatted according to fileline.
2728 Each line is formatted according to line.
2746 Each line is formatted according to line.
2729 If line is None, it yields:
2747 If line is None, it yields:
2730 length of longest filepath+line number,
2748 length of longest filepath+line number,
2731 filepath+linenumber,
2749 filepath+linenumber,
2732 function
2750 function
2733
2751
2734 Not be used in production code but very convenient while developing.
2752 Not be used in production code but very convenient while developing.
2735 '''
2753 '''
2736 entries = [(fileline % (fn, ln), func)
2754 entries = [(fileline % (fn, ln), func)
2737 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2755 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2738 if entries:
2756 if entries:
2739 fnmax = max(len(entry[0]) for entry in entries)
2757 fnmax = max(len(entry[0]) for entry in entries)
2740 for fnln, func in entries:
2758 for fnln, func in entries:
2741 if line is None:
2759 if line is None:
2742 yield (fnmax, fnln, func)
2760 yield (fnmax, fnln, func)
2743 else:
2761 else:
2744 yield line % (fnmax, fnln, func)
2762 yield line % (fnmax, fnln, func)
2745
2763
2746 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2764 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2747 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2765 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2748 Skips the 'skip' last entries. By default it will flush stdout first.
2766 Skips the 'skip' last entries. By default it will flush stdout first.
2749 It can be used everywhere and intentionally does not require an ui object.
2767 It can be used everywhere and intentionally does not require an ui object.
2750 Not be used in production code but very convenient while developing.
2768 Not be used in production code but very convenient while developing.
2751 '''
2769 '''
2752 if otherf:
2770 if otherf:
2753 otherf.flush()
2771 otherf.flush()
2754 f.write('%s at:\n' % msg)
2772 f.write('%s at:\n' % msg)
2755 for line in getstackframes(skip + 1):
2773 for line in getstackframes(skip + 1):
2756 f.write(line)
2774 f.write(line)
2757 f.flush()
2775 f.flush()
2758
2776
2759 class dirs(object):
2777 class dirs(object):
2760 '''a multiset of directory names from a dirstate or manifest'''
2778 '''a multiset of directory names from a dirstate or manifest'''
2761
2779
2762 def __init__(self, map, skip=None):
2780 def __init__(self, map, skip=None):
2763 self._dirs = {}
2781 self._dirs = {}
2764 addpath = self.addpath
2782 addpath = self.addpath
2765 if safehasattr(map, 'iteritems') and skip is not None:
2783 if safehasattr(map, 'iteritems') and skip is not None:
2766 for f, s in map.iteritems():
2784 for f, s in map.iteritems():
2767 if s[0] != skip:
2785 if s[0] != skip:
2768 addpath(f)
2786 addpath(f)
2769 else:
2787 else:
2770 for f in map:
2788 for f in map:
2771 addpath(f)
2789 addpath(f)
2772
2790
2773 def addpath(self, path):
2791 def addpath(self, path):
2774 dirs = self._dirs
2792 dirs = self._dirs
2775 for base in finddirs(path):
2793 for base in finddirs(path):
2776 if base in dirs:
2794 if base in dirs:
2777 dirs[base] += 1
2795 dirs[base] += 1
2778 return
2796 return
2779 dirs[base] = 1
2797 dirs[base] = 1
2780
2798
2781 def delpath(self, path):
2799 def delpath(self, path):
2782 dirs = self._dirs
2800 dirs = self._dirs
2783 for base in finddirs(path):
2801 for base in finddirs(path):
2784 if dirs[base] > 1:
2802 if dirs[base] > 1:
2785 dirs[base] -= 1
2803 dirs[base] -= 1
2786 return
2804 return
2787 del dirs[base]
2805 del dirs[base]
2788
2806
2789 def __iter__(self):
2807 def __iter__(self):
2790 return self._dirs.iterkeys()
2808 return self._dirs.iterkeys()
2791
2809
2792 def __contains__(self, d):
2810 def __contains__(self, d):
2793 return d in self._dirs
2811 return d in self._dirs
2794
2812
2795 if safehasattr(parsers, 'dirs'):
2813 if safehasattr(parsers, 'dirs'):
2796 dirs = parsers.dirs
2814 dirs = parsers.dirs
2797
2815
2798 def finddirs(path):
2816 def finddirs(path):
2799 pos = path.rfind('/')
2817 pos = path.rfind('/')
2800 while pos != -1:
2818 while pos != -1:
2801 yield path[:pos]
2819 yield path[:pos]
2802 pos = path.rfind('/', 0, pos)
2820 pos = path.rfind('/', 0, pos)
2803
2821
2804 class ctxmanager(object):
2822 class ctxmanager(object):
2805 '''A context manager for use in 'with' blocks to allow multiple
2823 '''A context manager for use in 'with' blocks to allow multiple
2806 contexts to be entered at once. This is both safer and more
2824 contexts to be entered at once. This is both safer and more
2807 flexible than contextlib.nested.
2825 flexible than contextlib.nested.
2808
2826
2809 Once Mercurial supports Python 2.7+, this will become mostly
2827 Once Mercurial supports Python 2.7+, this will become mostly
2810 unnecessary.
2828 unnecessary.
2811 '''
2829 '''
2812
2830
2813 def __init__(self, *args):
2831 def __init__(self, *args):
2814 '''Accepts a list of no-argument functions that return context
2832 '''Accepts a list of no-argument functions that return context
2815 managers. These will be invoked at __call__ time.'''
2833 managers. These will be invoked at __call__ time.'''
2816 self._pending = args
2834 self._pending = args
2817 self._atexit = []
2835 self._atexit = []
2818
2836
2819 def __enter__(self):
2837 def __enter__(self):
2820 return self
2838 return self
2821
2839
2822 def enter(self):
2840 def enter(self):
2823 '''Create and enter context managers in the order in which they were
2841 '''Create and enter context managers in the order in which they were
2824 passed to the constructor.'''
2842 passed to the constructor.'''
2825 values = []
2843 values = []
2826 for func in self._pending:
2844 for func in self._pending:
2827 obj = func()
2845 obj = func()
2828 values.append(obj.__enter__())
2846 values.append(obj.__enter__())
2829 self._atexit.append(obj.__exit__)
2847 self._atexit.append(obj.__exit__)
2830 del self._pending
2848 del self._pending
2831 return values
2849 return values
2832
2850
2833 def atexit(self, func, *args, **kwargs):
2851 def atexit(self, func, *args, **kwargs):
2834 '''Add a function to call when this context manager exits. The
2852 '''Add a function to call when this context manager exits. The
2835 ordering of multiple atexit calls is unspecified, save that
2853 ordering of multiple atexit calls is unspecified, save that
2836 they will happen before any __exit__ functions.'''
2854 they will happen before any __exit__ functions.'''
2837 def wrapper(exc_type, exc_val, exc_tb):
2855 def wrapper(exc_type, exc_val, exc_tb):
2838 func(*args, **kwargs)
2856 func(*args, **kwargs)
2839 self._atexit.append(wrapper)
2857 self._atexit.append(wrapper)
2840 return func
2858 return func
2841
2859
2842 def __exit__(self, exc_type, exc_val, exc_tb):
2860 def __exit__(self, exc_type, exc_val, exc_tb):
2843 '''Context managers are exited in the reverse order from which
2861 '''Context managers are exited in the reverse order from which
2844 they were created.'''
2862 they were created.'''
2845 received = exc_type is not None
2863 received = exc_type is not None
2846 suppressed = False
2864 suppressed = False
2847 pending = None
2865 pending = None
2848 self._atexit.reverse()
2866 self._atexit.reverse()
2849 for exitfunc in self._atexit:
2867 for exitfunc in self._atexit:
2850 try:
2868 try:
2851 if exitfunc(exc_type, exc_val, exc_tb):
2869 if exitfunc(exc_type, exc_val, exc_tb):
2852 suppressed = True
2870 suppressed = True
2853 exc_type = None
2871 exc_type = None
2854 exc_val = None
2872 exc_val = None
2855 exc_tb = None
2873 exc_tb = None
2856 except BaseException:
2874 except BaseException:
2857 pending = sys.exc_info()
2875 pending = sys.exc_info()
2858 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2876 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2859 del self._atexit
2877 del self._atexit
2860 if pending:
2878 if pending:
2861 raise exc_val
2879 raise exc_val
2862 return received and suppressed
2880 return received and suppressed
2863
2881
2864 # compression code
2882 # compression code
2865
2883
2866 class compressormanager(object):
2884 class compressormanager(object):
2867 """Holds registrations of various compression engines.
2885 """Holds registrations of various compression engines.
2868
2886
2869 This class essentially abstracts the differences between compression
2887 This class essentially abstracts the differences between compression
2870 engines to allow new compression formats to be added easily, possibly from
2888 engines to allow new compression formats to be added easily, possibly from
2871 extensions.
2889 extensions.
2872
2890
2873 Compressors are registered against the global instance by calling its
2891 Compressors are registered against the global instance by calling its
2874 ``register()`` method.
2892 ``register()`` method.
2875 """
2893 """
2876 def __init__(self):
2894 def __init__(self):
2877 self._engines = {}
2895 self._engines = {}
2878 # Bundle spec human name to engine name.
2896 # Bundle spec human name to engine name.
2879 self._bundlenames = {}
2897 self._bundlenames = {}
2880 # Internal bundle identifier to engine name.
2898 # Internal bundle identifier to engine name.
2881 self._bundletypes = {}
2899 self._bundletypes = {}
2882
2900
2883 def __getitem__(self, key):
2901 def __getitem__(self, key):
2884 return self._engines[key]
2902 return self._engines[key]
2885
2903
2886 def __contains__(self, key):
2904 def __contains__(self, key):
2887 return key in self._engines
2905 return key in self._engines
2888
2906
2889 def __iter__(self):
2907 def __iter__(self):
2890 return iter(self._engines.keys())
2908 return iter(self._engines.keys())
2891
2909
2892 def register(self, engine):
2910 def register(self, engine):
2893 """Register a compression engine with the manager.
2911 """Register a compression engine with the manager.
2894
2912
2895 The argument must be a ``compressionengine`` instance.
2913 The argument must be a ``compressionengine`` instance.
2896 """
2914 """
2897 if not isinstance(engine, compressionengine):
2915 if not isinstance(engine, compressionengine):
2898 raise ValueError(_('argument must be a compressionengine'))
2916 raise ValueError(_('argument must be a compressionengine'))
2899
2917
2900 name = engine.name()
2918 name = engine.name()
2901
2919
2902 if name in self._engines:
2920 if name in self._engines:
2903 raise error.Abort(_('compression engine %s already registered') %
2921 raise error.Abort(_('compression engine %s already registered') %
2904 name)
2922 name)
2905
2923
2906 bundleinfo = engine.bundletype()
2924 bundleinfo = engine.bundletype()
2907 if bundleinfo:
2925 if bundleinfo:
2908 bundlename, bundletype = bundleinfo
2926 bundlename, bundletype = bundleinfo
2909
2927
2910 if bundlename in self._bundlenames:
2928 if bundlename in self._bundlenames:
2911 raise error.Abort(_('bundle name %s already registered') %
2929 raise error.Abort(_('bundle name %s already registered') %
2912 bundlename)
2930 bundlename)
2913 if bundletype in self._bundletypes:
2931 if bundletype in self._bundletypes:
2914 raise error.Abort(_('bundle type %s already registered by %s') %
2932 raise error.Abort(_('bundle type %s already registered by %s') %
2915 (bundletype, self._bundletypes[bundletype]))
2933 (bundletype, self._bundletypes[bundletype]))
2916
2934
2917 # No external facing name declared.
2935 # No external facing name declared.
2918 if bundlename:
2936 if bundlename:
2919 self._bundlenames[bundlename] = name
2937 self._bundlenames[bundlename] = name
2920
2938
2921 self._bundletypes[bundletype] = name
2939 self._bundletypes[bundletype] = name
2922
2940
2923 self._engines[name] = engine
2941 self._engines[name] = engine
2924
2942
2925 @property
2943 @property
2926 def supportedbundlenames(self):
2944 def supportedbundlenames(self):
2927 return set(self._bundlenames.keys())
2945 return set(self._bundlenames.keys())
2928
2946
2929 @property
2947 @property
2930 def supportedbundletypes(self):
2948 def supportedbundletypes(self):
2931 return set(self._bundletypes.keys())
2949 return set(self._bundletypes.keys())
2932
2950
2933 def forbundlename(self, bundlename):
2951 def forbundlename(self, bundlename):
2934 """Obtain a compression engine registered to a bundle name.
2952 """Obtain a compression engine registered to a bundle name.
2935
2953
2936 Will raise KeyError if the bundle type isn't registered.
2954 Will raise KeyError if the bundle type isn't registered.
2937 """
2955 """
2938 return self._engines[self._bundlenames[bundlename]]
2956 return self._engines[self._bundlenames[bundlename]]
2939
2957
2940 def forbundletype(self, bundletype):
2958 def forbundletype(self, bundletype):
2941 """Obtain a compression engine registered to a bundle type.
2959 """Obtain a compression engine registered to a bundle type.
2942
2960
2943 Will raise KeyError if the bundle type isn't registered.
2961 Will raise KeyError if the bundle type isn't registered.
2944 """
2962 """
2945 return self._engines[self._bundletypes[bundletype]]
2963 return self._engines[self._bundletypes[bundletype]]
2946
2964
2947 compengines = compressormanager()
2965 compengines = compressormanager()
2948
2966
2949 class compressionengine(object):
2967 class compressionengine(object):
2950 """Base class for compression engines.
2968 """Base class for compression engines.
2951
2969
2952 Compression engines must implement the interface defined by this class.
2970 Compression engines must implement the interface defined by this class.
2953 """
2971 """
2954 def name(self):
2972 def name(self):
2955 """Returns the name of the compression engine.
2973 """Returns the name of the compression engine.
2956
2974
2957 This is the key the engine is registered under.
2975 This is the key the engine is registered under.
2958
2976
2959 This method must be implemented.
2977 This method must be implemented.
2960 """
2978 """
2961 raise NotImplementedError()
2979 raise NotImplementedError()
2962
2980
2963 def bundletype(self):
2981 def bundletype(self):
2964 """Describes bundle identifiers for this engine.
2982 """Describes bundle identifiers for this engine.
2965
2983
2966 If this compression engine isn't supported for bundles, returns None.
2984 If this compression engine isn't supported for bundles, returns None.
2967
2985
2968 If this engine can be used for bundles, returns a 2-tuple of strings of
2986 If this engine can be used for bundles, returns a 2-tuple of strings of
2969 the user-facing "bundle spec" compression name and an internal
2987 the user-facing "bundle spec" compression name and an internal
2970 identifier used to denote the compression format within bundles. To
2988 identifier used to denote the compression format within bundles. To
2971 exclude the name from external usage, set the first element to ``None``.
2989 exclude the name from external usage, set the first element to ``None``.
2972
2990
2973 If bundle compression is supported, the class must also implement
2991 If bundle compression is supported, the class must also implement
2974 ``compressstream`` and `decompressorreader``.
2992 ``compressstream`` and `decompressorreader``.
2975 """
2993 """
2976 return None
2994 return None
2977
2995
2978 def compressstream(self, it, opts=None):
2996 def compressstream(self, it, opts=None):
2979 """Compress an iterator of chunks.
2997 """Compress an iterator of chunks.
2980
2998
2981 The method receives an iterator (ideally a generator) of chunks of
2999 The method receives an iterator (ideally a generator) of chunks of
2982 bytes to be compressed. It returns an iterator (ideally a generator)
3000 bytes to be compressed. It returns an iterator (ideally a generator)
2983 of bytes of chunks representing the compressed output.
3001 of bytes of chunks representing the compressed output.
2984
3002
2985 Optionally accepts an argument defining how to perform compression.
3003 Optionally accepts an argument defining how to perform compression.
2986 Each engine treats this argument differently.
3004 Each engine treats this argument differently.
2987 """
3005 """
2988 raise NotImplementedError()
3006 raise NotImplementedError()
2989
3007
2990 def decompressorreader(self, fh):
3008 def decompressorreader(self, fh):
2991 """Perform decompression on a file object.
3009 """Perform decompression on a file object.
2992
3010
2993 Argument is an object with a ``read(size)`` method that returns
3011 Argument is an object with a ``read(size)`` method that returns
2994 compressed data. Return value is an object with a ``read(size)`` that
3012 compressed data. Return value is an object with a ``read(size)`` that
2995 returns uncompressed data.
3013 returns uncompressed data.
2996 """
3014 """
2997 raise NotImplementedError()
3015 raise NotImplementedError()
2998
3016
2999 class _zlibengine(compressionengine):
3017 class _zlibengine(compressionengine):
3000 def name(self):
3018 def name(self):
3001 return 'zlib'
3019 return 'zlib'
3002
3020
3003 def bundletype(self):
3021 def bundletype(self):
3004 return 'gzip', 'GZ'
3022 return 'gzip', 'GZ'
3005
3023
3006 def compressstream(self, it, opts=None):
3024 def compressstream(self, it, opts=None):
3007 opts = opts or {}
3025 opts = opts or {}
3008
3026
3009 z = zlib.compressobj(opts.get('level', -1))
3027 z = zlib.compressobj(opts.get('level', -1))
3010 for chunk in it:
3028 for chunk in it:
3011 data = z.compress(chunk)
3029 data = z.compress(chunk)
3012 # Not all calls to compress emit data. It is cheaper to inspect
3030 # Not all calls to compress emit data. It is cheaper to inspect
3013 # here than to feed empty chunks through generator.
3031 # here than to feed empty chunks through generator.
3014 if data:
3032 if data:
3015 yield data
3033 yield data
3016
3034
3017 yield z.flush()
3035 yield z.flush()
3018
3036
3019 def decompressorreader(self, fh):
3037 def decompressorreader(self, fh):
3020 def gen():
3038 def gen():
3021 d = zlib.decompressobj()
3039 d = zlib.decompressobj()
3022 for chunk in filechunkiter(fh):
3040 for chunk in filechunkiter(fh):
3023 yield d.decompress(chunk)
3041 yield d.decompress(chunk)
3024
3042
3025 return chunkbuffer(gen())
3043 return chunkbuffer(gen())
3026
3044
3027 compengines.register(_zlibengine())
3045 compengines.register(_zlibengine())
3028
3046
3029 class _bz2engine(compressionengine):
3047 class _bz2engine(compressionengine):
3030 def name(self):
3048 def name(self):
3031 return 'bz2'
3049 return 'bz2'
3032
3050
3033 def bundletype(self):
3051 def bundletype(self):
3034 return 'bzip2', 'BZ'
3052 return 'bzip2', 'BZ'
3035
3053
3036 def compressstream(self, it, opts=None):
3054 def compressstream(self, it, opts=None):
3037 opts = opts or {}
3055 opts = opts or {}
3038 z = bz2.BZ2Compressor(opts.get('level', 9))
3056 z = bz2.BZ2Compressor(opts.get('level', 9))
3039 for chunk in it:
3057 for chunk in it:
3040 data = z.compress(chunk)
3058 data = z.compress(chunk)
3041 if data:
3059 if data:
3042 yield data
3060 yield data
3043
3061
3044 yield z.flush()
3062 yield z.flush()
3045
3063
3046 def decompressorreader(self, fh):
3064 def decompressorreader(self, fh):
3047 def gen():
3065 def gen():
3048 d = bz2.BZ2Decompressor()
3066 d = bz2.BZ2Decompressor()
3049 for chunk in filechunkiter(fh):
3067 for chunk in filechunkiter(fh):
3050 yield d.decompress(chunk)
3068 yield d.decompress(chunk)
3051
3069
3052 return chunkbuffer(gen())
3070 return chunkbuffer(gen())
3053
3071
3054 compengines.register(_bz2engine())
3072 compengines.register(_bz2engine())
3055
3073
3056 class _truncatedbz2engine(compressionengine):
3074 class _truncatedbz2engine(compressionengine):
3057 def name(self):
3075 def name(self):
3058 return 'bz2truncated'
3076 return 'bz2truncated'
3059
3077
3060 def bundletype(self):
3078 def bundletype(self):
3061 return None, '_truncatedBZ'
3079 return None, '_truncatedBZ'
3062
3080
3063 # We don't implement compressstream because it is hackily handled elsewhere.
3081 # We don't implement compressstream because it is hackily handled elsewhere.
3064
3082
3065 def decompressorreader(self, fh):
3083 def decompressorreader(self, fh):
3066 def gen():
3084 def gen():
3067 # The input stream doesn't have the 'BZ' header. So add it back.
3085 # The input stream doesn't have the 'BZ' header. So add it back.
3068 d = bz2.BZ2Decompressor()
3086 d = bz2.BZ2Decompressor()
3069 d.decompress('BZ')
3087 d.decompress('BZ')
3070 for chunk in filechunkiter(fh):
3088 for chunk in filechunkiter(fh):
3071 yield d.decompress(chunk)
3089 yield d.decompress(chunk)
3072
3090
3073 return chunkbuffer(gen())
3091 return chunkbuffer(gen())
3074
3092
3075 compengines.register(_truncatedbz2engine())
3093 compengines.register(_truncatedbz2engine())
3076
3094
3077 class _noopengine(compressionengine):
3095 class _noopengine(compressionengine):
3078 def name(self):
3096 def name(self):
3079 return 'none'
3097 return 'none'
3080
3098
3081 def bundletype(self):
3099 def bundletype(self):
3082 return 'none', 'UN'
3100 return 'none', 'UN'
3083
3101
3084 def compressstream(self, it, opts=None):
3102 def compressstream(self, it, opts=None):
3085 return it
3103 return it
3086
3104
3087 def decompressorreader(self, fh):
3105 def decompressorreader(self, fh):
3088 return fh
3106 return fh
3089
3107
3090 compengines.register(_noopengine())
3108 compengines.register(_noopengine())
3091
3109
3092 # convenient shortcut
3110 # convenient shortcut
3093 dst = debugstacktrace
3111 dst = debugstacktrace
General Comments 0
You need to be logged in to leave comments. Login now