##// END OF EJS Templates
doc: describe detail about checkambig optional argument...
FUJIWARA Katsunori -
r29367:4e6e280e default
parent child Browse files
Show More
@@ -1,1391 +1,1399 b''
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import contextlib
10 import contextlib
11 import errno
11 import errno
12 import glob
12 import glob
13 import hashlib
13 import hashlib
14 import os
14 import os
15 import re
15 import re
16 import shutil
16 import shutil
17 import stat
17 import stat
18 import tempfile
18 import tempfile
19 import threading
19 import threading
20
20
21 from .i18n import _
21 from .i18n import _
22 from .node import wdirrev
22 from .node import wdirrev
23 from . import (
23 from . import (
24 encoding,
24 encoding,
25 error,
25 error,
26 match as matchmod,
26 match as matchmod,
27 osutil,
27 osutil,
28 pathutil,
28 pathutil,
29 phases,
29 phases,
30 revset,
30 revset,
31 similar,
31 similar,
32 util,
32 util,
33 )
33 )
34
34
35 if os.name == 'nt':
35 if os.name == 'nt':
36 from . import scmwindows as scmplatform
36 from . import scmwindows as scmplatform
37 else:
37 else:
38 from . import scmposix as scmplatform
38 from . import scmposix as scmplatform
39
39
40 systemrcpath = scmplatform.systemrcpath
40 systemrcpath = scmplatform.systemrcpath
41 userrcpath = scmplatform.userrcpath
41 userrcpath = scmplatform.userrcpath
42
42
43 class status(tuple):
43 class status(tuple):
44 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
44 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
45 and 'ignored' properties are only relevant to the working copy.
45 and 'ignored' properties are only relevant to the working copy.
46 '''
46 '''
47
47
48 __slots__ = ()
48 __slots__ = ()
49
49
50 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
50 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
51 clean):
51 clean):
52 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
52 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
53 ignored, clean))
53 ignored, clean))
54
54
55 @property
55 @property
56 def modified(self):
56 def modified(self):
57 '''files that have been modified'''
57 '''files that have been modified'''
58 return self[0]
58 return self[0]
59
59
60 @property
60 @property
61 def added(self):
61 def added(self):
62 '''files that have been added'''
62 '''files that have been added'''
63 return self[1]
63 return self[1]
64
64
65 @property
65 @property
66 def removed(self):
66 def removed(self):
67 '''files that have been removed'''
67 '''files that have been removed'''
68 return self[2]
68 return self[2]
69
69
70 @property
70 @property
71 def deleted(self):
71 def deleted(self):
72 '''files that are in the dirstate, but have been deleted from the
72 '''files that are in the dirstate, but have been deleted from the
73 working copy (aka "missing")
73 working copy (aka "missing")
74 '''
74 '''
75 return self[3]
75 return self[3]
76
76
77 @property
77 @property
78 def unknown(self):
78 def unknown(self):
79 '''files not in the dirstate that are not ignored'''
79 '''files not in the dirstate that are not ignored'''
80 return self[4]
80 return self[4]
81
81
82 @property
82 @property
83 def ignored(self):
83 def ignored(self):
84 '''files not in the dirstate that are ignored (by _dirignore())'''
84 '''files not in the dirstate that are ignored (by _dirignore())'''
85 return self[5]
85 return self[5]
86
86
87 @property
87 @property
88 def clean(self):
88 def clean(self):
89 '''files that have not been modified'''
89 '''files that have not been modified'''
90 return self[6]
90 return self[6]
91
91
92 def __repr__(self, *args, **kwargs):
92 def __repr__(self, *args, **kwargs):
93 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
93 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
94 'unknown=%r, ignored=%r, clean=%r>') % self)
94 'unknown=%r, ignored=%r, clean=%r>') % self)
95
95
96 def itersubrepos(ctx1, ctx2):
96 def itersubrepos(ctx1, ctx2):
97 """find subrepos in ctx1 or ctx2"""
97 """find subrepos in ctx1 or ctx2"""
98 # Create a (subpath, ctx) mapping where we prefer subpaths from
98 # Create a (subpath, ctx) mapping where we prefer subpaths from
99 # ctx1. The subpaths from ctx2 are important when the .hgsub file
99 # ctx1. The subpaths from ctx2 are important when the .hgsub file
100 # has been modified (in ctx2) but not yet committed (in ctx1).
100 # has been modified (in ctx2) but not yet committed (in ctx1).
101 subpaths = dict.fromkeys(ctx2.substate, ctx2)
101 subpaths = dict.fromkeys(ctx2.substate, ctx2)
102 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
102 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
103
103
104 missing = set()
104 missing = set()
105
105
106 for subpath in ctx2.substate:
106 for subpath in ctx2.substate:
107 if subpath not in ctx1.substate:
107 if subpath not in ctx1.substate:
108 del subpaths[subpath]
108 del subpaths[subpath]
109 missing.add(subpath)
109 missing.add(subpath)
110
110
111 for subpath, ctx in sorted(subpaths.iteritems()):
111 for subpath, ctx in sorted(subpaths.iteritems()):
112 yield subpath, ctx.sub(subpath)
112 yield subpath, ctx.sub(subpath)
113
113
114 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
114 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
115 # status and diff will have an accurate result when it does
115 # status and diff will have an accurate result when it does
116 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
116 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
117 # against itself.
117 # against itself.
118 for subpath in missing:
118 for subpath in missing:
119 yield subpath, ctx2.nullsub(subpath, ctx1)
119 yield subpath, ctx2.nullsub(subpath, ctx1)
120
120
121 def nochangesfound(ui, repo, excluded=None):
121 def nochangesfound(ui, repo, excluded=None):
122 '''Report no changes for push/pull, excluded is None or a list of
122 '''Report no changes for push/pull, excluded is None or a list of
123 nodes excluded from the push/pull.
123 nodes excluded from the push/pull.
124 '''
124 '''
125 secretlist = []
125 secretlist = []
126 if excluded:
126 if excluded:
127 for n in excluded:
127 for n in excluded:
128 if n not in repo:
128 if n not in repo:
129 # discovery should not have included the filtered revision,
129 # discovery should not have included the filtered revision,
130 # we have to explicitly exclude it until discovery is cleanup.
130 # we have to explicitly exclude it until discovery is cleanup.
131 continue
131 continue
132 ctx = repo[n]
132 ctx = repo[n]
133 if ctx.phase() >= phases.secret and not ctx.extinct():
133 if ctx.phase() >= phases.secret and not ctx.extinct():
134 secretlist.append(n)
134 secretlist.append(n)
135
135
136 if secretlist:
136 if secretlist:
137 ui.status(_("no changes found (ignored %d secret changesets)\n")
137 ui.status(_("no changes found (ignored %d secret changesets)\n")
138 % len(secretlist))
138 % len(secretlist))
139 else:
139 else:
140 ui.status(_("no changes found\n"))
140 ui.status(_("no changes found\n"))
141
141
142 def checknewlabel(repo, lbl, kind):
142 def checknewlabel(repo, lbl, kind):
143 # Do not use the "kind" parameter in ui output.
143 # Do not use the "kind" parameter in ui output.
144 # It makes strings difficult to translate.
144 # It makes strings difficult to translate.
145 if lbl in ['tip', '.', 'null']:
145 if lbl in ['tip', '.', 'null']:
146 raise error.Abort(_("the name '%s' is reserved") % lbl)
146 raise error.Abort(_("the name '%s' is reserved") % lbl)
147 for c in (':', '\0', '\n', '\r'):
147 for c in (':', '\0', '\n', '\r'):
148 if c in lbl:
148 if c in lbl:
149 raise error.Abort(_("%r cannot be used in a name") % c)
149 raise error.Abort(_("%r cannot be used in a name") % c)
150 try:
150 try:
151 int(lbl)
151 int(lbl)
152 raise error.Abort(_("cannot use an integer as a name"))
152 raise error.Abort(_("cannot use an integer as a name"))
153 except ValueError:
153 except ValueError:
154 pass
154 pass
155
155
156 def checkfilename(f):
156 def checkfilename(f):
157 '''Check that the filename f is an acceptable filename for a tracked file'''
157 '''Check that the filename f is an acceptable filename for a tracked file'''
158 if '\r' in f or '\n' in f:
158 if '\r' in f or '\n' in f:
159 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
159 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
160
160
161 def checkportable(ui, f):
161 def checkportable(ui, f):
162 '''Check if filename f is portable and warn or abort depending on config'''
162 '''Check if filename f is portable and warn or abort depending on config'''
163 checkfilename(f)
163 checkfilename(f)
164 abort, warn = checkportabilityalert(ui)
164 abort, warn = checkportabilityalert(ui)
165 if abort or warn:
165 if abort or warn:
166 msg = util.checkwinfilename(f)
166 msg = util.checkwinfilename(f)
167 if msg:
167 if msg:
168 msg = "%s: %r" % (msg, f)
168 msg = "%s: %r" % (msg, f)
169 if abort:
169 if abort:
170 raise error.Abort(msg)
170 raise error.Abort(msg)
171 ui.warn(_("warning: %s\n") % msg)
171 ui.warn(_("warning: %s\n") % msg)
172
172
173 def checkportabilityalert(ui):
173 def checkportabilityalert(ui):
174 '''check if the user's config requests nothing, a warning, or abort for
174 '''check if the user's config requests nothing, a warning, or abort for
175 non-portable filenames'''
175 non-portable filenames'''
176 val = ui.config('ui', 'portablefilenames', 'warn')
176 val = ui.config('ui', 'portablefilenames', 'warn')
177 lval = val.lower()
177 lval = val.lower()
178 bval = util.parsebool(val)
178 bval = util.parsebool(val)
179 abort = os.name == 'nt' or lval == 'abort'
179 abort = os.name == 'nt' or lval == 'abort'
180 warn = bval or lval == 'warn'
180 warn = bval or lval == 'warn'
181 if bval is None and not (warn or abort or lval == 'ignore'):
181 if bval is None and not (warn or abort or lval == 'ignore'):
182 raise error.ConfigError(
182 raise error.ConfigError(
183 _("ui.portablefilenames value is invalid ('%s')") % val)
183 _("ui.portablefilenames value is invalid ('%s')") % val)
184 return abort, warn
184 return abort, warn
185
185
186 class casecollisionauditor(object):
186 class casecollisionauditor(object):
187 def __init__(self, ui, abort, dirstate):
187 def __init__(self, ui, abort, dirstate):
188 self._ui = ui
188 self._ui = ui
189 self._abort = abort
189 self._abort = abort
190 allfiles = '\0'.join(dirstate._map)
190 allfiles = '\0'.join(dirstate._map)
191 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
191 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
192 self._dirstate = dirstate
192 self._dirstate = dirstate
193 # The purpose of _newfiles is so that we don't complain about
193 # The purpose of _newfiles is so that we don't complain about
194 # case collisions if someone were to call this object with the
194 # case collisions if someone were to call this object with the
195 # same filename twice.
195 # same filename twice.
196 self._newfiles = set()
196 self._newfiles = set()
197
197
198 def __call__(self, f):
198 def __call__(self, f):
199 if f in self._newfiles:
199 if f in self._newfiles:
200 return
200 return
201 fl = encoding.lower(f)
201 fl = encoding.lower(f)
202 if fl in self._loweredfiles and f not in self._dirstate:
202 if fl in self._loweredfiles and f not in self._dirstate:
203 msg = _('possible case-folding collision for %s') % f
203 msg = _('possible case-folding collision for %s') % f
204 if self._abort:
204 if self._abort:
205 raise error.Abort(msg)
205 raise error.Abort(msg)
206 self._ui.warn(_("warning: %s\n") % msg)
206 self._ui.warn(_("warning: %s\n") % msg)
207 self._loweredfiles.add(fl)
207 self._loweredfiles.add(fl)
208 self._newfiles.add(f)
208 self._newfiles.add(f)
209
209
210 def filteredhash(repo, maxrev):
210 def filteredhash(repo, maxrev):
211 """build hash of filtered revisions in the current repoview.
211 """build hash of filtered revisions in the current repoview.
212
212
213 Multiple caches perform up-to-date validation by checking that the
213 Multiple caches perform up-to-date validation by checking that the
214 tiprev and tipnode stored in the cache file match the current repository.
214 tiprev and tipnode stored in the cache file match the current repository.
215 However, this is not sufficient for validating repoviews because the set
215 However, this is not sufficient for validating repoviews because the set
216 of revisions in the view may change without the repository tiprev and
216 of revisions in the view may change without the repository tiprev and
217 tipnode changing.
217 tipnode changing.
218
218
219 This function hashes all the revs filtered from the view and returns
219 This function hashes all the revs filtered from the view and returns
220 that SHA-1 digest.
220 that SHA-1 digest.
221 """
221 """
222 cl = repo.changelog
222 cl = repo.changelog
223 if not cl.filteredrevs:
223 if not cl.filteredrevs:
224 return None
224 return None
225 key = None
225 key = None
226 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
226 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
227 if revs:
227 if revs:
228 s = hashlib.sha1()
228 s = hashlib.sha1()
229 for rev in revs:
229 for rev in revs:
230 s.update('%s;' % rev)
230 s.update('%s;' % rev)
231 key = s.digest()
231 key = s.digest()
232 return key
232 return key
233
233
234 class abstractvfs(object):
234 class abstractvfs(object):
235 """Abstract base class; cannot be instantiated"""
235 """Abstract base class; cannot be instantiated"""
236
236
237 def __init__(self, *args, **kwargs):
237 def __init__(self, *args, **kwargs):
238 '''Prevent instantiation; don't call this from subclasses.'''
238 '''Prevent instantiation; don't call this from subclasses.'''
239 raise NotImplementedError('attempted instantiating ' + str(type(self)))
239 raise NotImplementedError('attempted instantiating ' + str(type(self)))
240
240
241 def tryread(self, path):
241 def tryread(self, path):
242 '''gracefully return an empty string for missing files'''
242 '''gracefully return an empty string for missing files'''
243 try:
243 try:
244 return self.read(path)
244 return self.read(path)
245 except IOError as inst:
245 except IOError as inst:
246 if inst.errno != errno.ENOENT:
246 if inst.errno != errno.ENOENT:
247 raise
247 raise
248 return ""
248 return ""
249
249
250 def tryreadlines(self, path, mode='rb'):
250 def tryreadlines(self, path, mode='rb'):
251 '''gracefully return an empty array for missing files'''
251 '''gracefully return an empty array for missing files'''
252 try:
252 try:
253 return self.readlines(path, mode=mode)
253 return self.readlines(path, mode=mode)
254 except IOError as inst:
254 except IOError as inst:
255 if inst.errno != errno.ENOENT:
255 if inst.errno != errno.ENOENT:
256 raise
256 raise
257 return []
257 return []
258
258
259 def open(self, path, mode="r", text=False, atomictemp=False,
259 def open(self, path, mode="r", text=False, atomictemp=False,
260 notindexed=False, backgroundclose=False):
260 notindexed=False, backgroundclose=False):
261 '''Open ``path`` file, which is relative to vfs root.
261 '''Open ``path`` file, which is relative to vfs root.
262
262
263 Newly created directories are marked as "not to be indexed by
263 Newly created directories are marked as "not to be indexed by
264 the content indexing service", if ``notindexed`` is specified
264 the content indexing service", if ``notindexed`` is specified
265 for "write" mode access.
265 for "write" mode access.
266 '''
266 '''
267 self.open = self.__call__
267 self.open = self.__call__
268 return self.__call__(path, mode, text, atomictemp, notindexed,
268 return self.__call__(path, mode, text, atomictemp, notindexed,
269 backgroundclose=backgroundclose)
269 backgroundclose=backgroundclose)
270
270
271 def read(self, path):
271 def read(self, path):
272 with self(path, 'rb') as fp:
272 with self(path, 'rb') as fp:
273 return fp.read()
273 return fp.read()
274
274
275 def readlines(self, path, mode='rb'):
275 def readlines(self, path, mode='rb'):
276 with self(path, mode=mode) as fp:
276 with self(path, mode=mode) as fp:
277 return fp.readlines()
277 return fp.readlines()
278
278
279 def write(self, path, data, backgroundclose=False):
279 def write(self, path, data, backgroundclose=False):
280 with self(path, 'wb', backgroundclose=backgroundclose) as fp:
280 with self(path, 'wb', backgroundclose=backgroundclose) as fp:
281 return fp.write(data)
281 return fp.write(data)
282
282
283 def writelines(self, path, data, mode='wb', notindexed=False):
283 def writelines(self, path, data, mode='wb', notindexed=False):
284 with self(path, mode=mode, notindexed=notindexed) as fp:
284 with self(path, mode=mode, notindexed=notindexed) as fp:
285 return fp.writelines(data)
285 return fp.writelines(data)
286
286
287 def append(self, path, data):
287 def append(self, path, data):
288 with self(path, 'ab') as fp:
288 with self(path, 'ab') as fp:
289 return fp.write(data)
289 return fp.write(data)
290
290
291 def basename(self, path):
291 def basename(self, path):
292 """return base element of a path (as os.path.basename would do)
292 """return base element of a path (as os.path.basename would do)
293
293
294 This exists to allow handling of strange encoding if needed."""
294 This exists to allow handling of strange encoding if needed."""
295 return os.path.basename(path)
295 return os.path.basename(path)
296
296
297 def chmod(self, path, mode):
297 def chmod(self, path, mode):
298 return os.chmod(self.join(path), mode)
298 return os.chmod(self.join(path), mode)
299
299
300 def dirname(self, path):
300 def dirname(self, path):
301 """return dirname element of a path (as os.path.dirname would do)
301 """return dirname element of a path (as os.path.dirname would do)
302
302
303 This exists to allow handling of strange encoding if needed."""
303 This exists to allow handling of strange encoding if needed."""
304 return os.path.dirname(path)
304 return os.path.dirname(path)
305
305
306 def exists(self, path=None):
306 def exists(self, path=None):
307 return os.path.exists(self.join(path))
307 return os.path.exists(self.join(path))
308
308
309 def fstat(self, fp):
309 def fstat(self, fp):
310 return util.fstat(fp)
310 return util.fstat(fp)
311
311
312 def isdir(self, path=None):
312 def isdir(self, path=None):
313 return os.path.isdir(self.join(path))
313 return os.path.isdir(self.join(path))
314
314
315 def isfile(self, path=None):
315 def isfile(self, path=None):
316 return os.path.isfile(self.join(path))
316 return os.path.isfile(self.join(path))
317
317
318 def islink(self, path=None):
318 def islink(self, path=None):
319 return os.path.islink(self.join(path))
319 return os.path.islink(self.join(path))
320
320
321 def isfileorlink(self, path=None):
321 def isfileorlink(self, path=None):
322 '''return whether path is a regular file or a symlink
322 '''return whether path is a regular file or a symlink
323
323
324 Unlike isfile, this doesn't follow symlinks.'''
324 Unlike isfile, this doesn't follow symlinks.'''
325 try:
325 try:
326 st = self.lstat(path)
326 st = self.lstat(path)
327 except OSError:
327 except OSError:
328 return False
328 return False
329 mode = st.st_mode
329 mode = st.st_mode
330 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
330 return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
331
331
332 def reljoin(self, *paths):
332 def reljoin(self, *paths):
333 """join various elements of a path together (as os.path.join would do)
333 """join various elements of a path together (as os.path.join would do)
334
334
335 The vfs base is not injected so that path stay relative. This exists
335 The vfs base is not injected so that path stay relative. This exists
336 to allow handling of strange encoding if needed."""
336 to allow handling of strange encoding if needed."""
337 return os.path.join(*paths)
337 return os.path.join(*paths)
338
338
339 def split(self, path):
339 def split(self, path):
340 """split top-most element of a path (as os.path.split would do)
340 """split top-most element of a path (as os.path.split would do)
341
341
342 This exists to allow handling of strange encoding if needed."""
342 This exists to allow handling of strange encoding if needed."""
343 return os.path.split(path)
343 return os.path.split(path)
344
344
345 def lexists(self, path=None):
345 def lexists(self, path=None):
346 return os.path.lexists(self.join(path))
346 return os.path.lexists(self.join(path))
347
347
348 def lstat(self, path=None):
348 def lstat(self, path=None):
349 return os.lstat(self.join(path))
349 return os.lstat(self.join(path))
350
350
351 def listdir(self, path=None):
351 def listdir(self, path=None):
352 return os.listdir(self.join(path))
352 return os.listdir(self.join(path))
353
353
354 def makedir(self, path=None, notindexed=True):
354 def makedir(self, path=None, notindexed=True):
355 return util.makedir(self.join(path), notindexed)
355 return util.makedir(self.join(path), notindexed)
356
356
357 def makedirs(self, path=None, mode=None):
357 def makedirs(self, path=None, mode=None):
358 return util.makedirs(self.join(path), mode)
358 return util.makedirs(self.join(path), mode)
359
359
360 def makelock(self, info, path):
360 def makelock(self, info, path):
361 return util.makelock(info, self.join(path))
361 return util.makelock(info, self.join(path))
362
362
363 def mkdir(self, path=None):
363 def mkdir(self, path=None):
364 return os.mkdir(self.join(path))
364 return os.mkdir(self.join(path))
365
365
366 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
366 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
367 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
367 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
368 dir=self.join(dir), text=text)
368 dir=self.join(dir), text=text)
369 dname, fname = util.split(name)
369 dname, fname = util.split(name)
370 if dir:
370 if dir:
371 return fd, os.path.join(dir, fname)
371 return fd, os.path.join(dir, fname)
372 else:
372 else:
373 return fd, fname
373 return fd, fname
374
374
375 def readdir(self, path=None, stat=None, skip=None):
375 def readdir(self, path=None, stat=None, skip=None):
376 return osutil.listdir(self.join(path), stat, skip)
376 return osutil.listdir(self.join(path), stat, skip)
377
377
378 def readlock(self, path):
378 def readlock(self, path):
379 return util.readlock(self.join(path))
379 return util.readlock(self.join(path))
380
380
381 def rename(self, src, dst, checkambig=False):
381 def rename(self, src, dst, checkambig=False):
382 """Rename from src to dst
383
384 checkambig argument is used with util.filestat, and is useful
385 only if destination file is guarded by any lock
386 (e.g. repo.lock or repo.wlock).
387 """
382 dstpath = self.join(dst)
388 dstpath = self.join(dst)
383 oldstat = checkambig and util.filestat(dstpath)
389 oldstat = checkambig and util.filestat(dstpath)
384 if oldstat and oldstat.stat:
390 if oldstat and oldstat.stat:
385 ret = util.rename(self.join(src), dstpath)
391 ret = util.rename(self.join(src), dstpath)
386 newstat = util.filestat(dstpath)
392 newstat = util.filestat(dstpath)
387 if newstat.isambig(oldstat):
393 if newstat.isambig(oldstat):
388 # stat of renamed file is ambiguous to original one
394 # stat of renamed file is ambiguous to original one
389 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
395 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
390 os.utime(dstpath, (advanced, advanced))
396 os.utime(dstpath, (advanced, advanced))
391 return ret
397 return ret
392 return util.rename(self.join(src), dstpath)
398 return util.rename(self.join(src), dstpath)
393
399
394 def readlink(self, path):
400 def readlink(self, path):
395 return os.readlink(self.join(path))
401 return os.readlink(self.join(path))
396
402
397 def removedirs(self, path=None):
403 def removedirs(self, path=None):
398 """Remove a leaf directory and all empty intermediate ones
404 """Remove a leaf directory and all empty intermediate ones
399 """
405 """
400 return util.removedirs(self.join(path))
406 return util.removedirs(self.join(path))
401
407
402 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
408 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
403 """Remove a directory tree recursively
409 """Remove a directory tree recursively
404
410
405 If ``forcibly``, this tries to remove READ-ONLY files, too.
411 If ``forcibly``, this tries to remove READ-ONLY files, too.
406 """
412 """
407 if forcibly:
413 if forcibly:
408 def onerror(function, path, excinfo):
414 def onerror(function, path, excinfo):
409 if function is not os.remove:
415 if function is not os.remove:
410 raise
416 raise
411 # read-only files cannot be unlinked under Windows
417 # read-only files cannot be unlinked under Windows
412 s = os.stat(path)
418 s = os.stat(path)
413 if (s.st_mode & stat.S_IWRITE) != 0:
419 if (s.st_mode & stat.S_IWRITE) != 0:
414 raise
420 raise
415 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
421 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
416 os.remove(path)
422 os.remove(path)
417 else:
423 else:
418 onerror = None
424 onerror = None
419 return shutil.rmtree(self.join(path),
425 return shutil.rmtree(self.join(path),
420 ignore_errors=ignore_errors, onerror=onerror)
426 ignore_errors=ignore_errors, onerror=onerror)
421
427
422 def setflags(self, path, l, x):
428 def setflags(self, path, l, x):
423 return util.setflags(self.join(path), l, x)
429 return util.setflags(self.join(path), l, x)
424
430
425 def stat(self, path=None):
431 def stat(self, path=None):
426 return os.stat(self.join(path))
432 return os.stat(self.join(path))
427
433
428 def unlink(self, path=None):
434 def unlink(self, path=None):
429 return util.unlink(self.join(path))
435 return util.unlink(self.join(path))
430
436
431 def unlinkpath(self, path=None, ignoremissing=False):
437 def unlinkpath(self, path=None, ignoremissing=False):
432 return util.unlinkpath(self.join(path), ignoremissing)
438 return util.unlinkpath(self.join(path), ignoremissing)
433
439
434 def utime(self, path=None, t=None):
440 def utime(self, path=None, t=None):
435 return os.utime(self.join(path), t)
441 return os.utime(self.join(path), t)
436
442
437 def walk(self, path=None, onerror=None):
443 def walk(self, path=None, onerror=None):
438 """Yield (dirpath, dirs, files) tuple for each directories under path
444 """Yield (dirpath, dirs, files) tuple for each directories under path
439
445
440 ``dirpath`` is relative one from the root of this vfs. This
446 ``dirpath`` is relative one from the root of this vfs. This
441 uses ``os.sep`` as path separator, even you specify POSIX
447 uses ``os.sep`` as path separator, even you specify POSIX
442 style ``path``.
448 style ``path``.
443
449
444 "The root of this vfs" is represented as empty ``dirpath``.
450 "The root of this vfs" is represented as empty ``dirpath``.
445 """
451 """
446 root = os.path.normpath(self.join(None))
452 root = os.path.normpath(self.join(None))
447 # when dirpath == root, dirpath[prefixlen:] becomes empty
453 # when dirpath == root, dirpath[prefixlen:] becomes empty
448 # because len(dirpath) < prefixlen.
454 # because len(dirpath) < prefixlen.
449 prefixlen = len(pathutil.normasprefix(root))
455 prefixlen = len(pathutil.normasprefix(root))
450 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
456 for dirpath, dirs, files in os.walk(self.join(path), onerror=onerror):
451 yield (dirpath[prefixlen:], dirs, files)
457 yield (dirpath[prefixlen:], dirs, files)
452
458
453 @contextlib.contextmanager
459 @contextlib.contextmanager
454 def backgroundclosing(self, ui, expectedcount=-1):
460 def backgroundclosing(self, ui, expectedcount=-1):
455 """Allow files to be closed asynchronously.
461 """Allow files to be closed asynchronously.
456
462
457 When this context manager is active, ``backgroundclose`` can be passed
463 When this context manager is active, ``backgroundclose`` can be passed
458 to ``__call__``/``open`` to result in the file possibly being closed
464 to ``__call__``/``open`` to result in the file possibly being closed
459 asynchronously, on a background thread.
465 asynchronously, on a background thread.
460 """
466 """
461 # This is an arbitrary restriction and could be changed if we ever
467 # This is an arbitrary restriction and could be changed if we ever
462 # have a use case.
468 # have a use case.
463 vfs = getattr(self, 'vfs', self)
469 vfs = getattr(self, 'vfs', self)
464 if getattr(vfs, '_backgroundfilecloser', None):
470 if getattr(vfs, '_backgroundfilecloser', None):
465 raise error.Abort('can only have 1 active background file closer')
471 raise error.Abort('can only have 1 active background file closer')
466
472
467 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
473 with backgroundfilecloser(ui, expectedcount=expectedcount) as bfc:
468 try:
474 try:
469 vfs._backgroundfilecloser = bfc
475 vfs._backgroundfilecloser = bfc
470 yield bfc
476 yield bfc
471 finally:
477 finally:
472 vfs._backgroundfilecloser = None
478 vfs._backgroundfilecloser = None
473
479
474 class vfs(abstractvfs):
480 class vfs(abstractvfs):
475 '''Operate files relative to a base directory
481 '''Operate files relative to a base directory
476
482
477 This class is used to hide the details of COW semantics and
483 This class is used to hide the details of COW semantics and
478 remote file access from higher level code.
484 remote file access from higher level code.
479 '''
485 '''
480 def __init__(self, base, audit=True, expandpath=False, realpath=False):
486 def __init__(self, base, audit=True, expandpath=False, realpath=False):
481 if expandpath:
487 if expandpath:
482 base = util.expandpath(base)
488 base = util.expandpath(base)
483 if realpath:
489 if realpath:
484 base = os.path.realpath(base)
490 base = os.path.realpath(base)
485 self.base = base
491 self.base = base
486 self.mustaudit = audit
492 self.mustaudit = audit
487 self.createmode = None
493 self.createmode = None
488 self._trustnlink = None
494 self._trustnlink = None
489
495
490 @property
496 @property
491 def mustaudit(self):
497 def mustaudit(self):
492 return self._audit
498 return self._audit
493
499
494 @mustaudit.setter
500 @mustaudit.setter
495 def mustaudit(self, onoff):
501 def mustaudit(self, onoff):
496 self._audit = onoff
502 self._audit = onoff
497 if onoff:
503 if onoff:
498 self.audit = pathutil.pathauditor(self.base)
504 self.audit = pathutil.pathauditor(self.base)
499 else:
505 else:
500 self.audit = util.always
506 self.audit = util.always
501
507
502 @util.propertycache
508 @util.propertycache
503 def _cansymlink(self):
509 def _cansymlink(self):
504 return util.checklink(self.base)
510 return util.checklink(self.base)
505
511
506 @util.propertycache
512 @util.propertycache
507 def _chmod(self):
513 def _chmod(self):
508 return util.checkexec(self.base)
514 return util.checkexec(self.base)
509
515
510 def _fixfilemode(self, name):
516 def _fixfilemode(self, name):
511 if self.createmode is None or not self._chmod:
517 if self.createmode is None or not self._chmod:
512 return
518 return
513 os.chmod(name, self.createmode & 0o666)
519 os.chmod(name, self.createmode & 0o666)
514
520
515 def __call__(self, path, mode="r", text=False, atomictemp=False,
521 def __call__(self, path, mode="r", text=False, atomictemp=False,
516 notindexed=False, backgroundclose=False, checkambig=False):
522 notindexed=False, backgroundclose=False, checkambig=False):
517 '''Open ``path`` file, which is relative to vfs root.
523 '''Open ``path`` file, which is relative to vfs root.
518
524
519 Newly created directories are marked as "not to be indexed by
525 Newly created directories are marked as "not to be indexed by
520 the content indexing service", if ``notindexed`` is specified
526 the content indexing service", if ``notindexed`` is specified
521 for "write" mode access.
527 for "write" mode access.
522
528
523 If ``backgroundclose`` is passed, the file may be closed asynchronously.
529 If ``backgroundclose`` is passed, the file may be closed asynchronously.
524 It can only be used if the ``self.backgroundclosing()`` context manager
530 It can only be used if the ``self.backgroundclosing()`` context manager
525 is active. This should only be specified if the following criteria hold:
531 is active. This should only be specified if the following criteria hold:
526
532
527 1. There is a potential for writing thousands of files. Unless you
533 1. There is a potential for writing thousands of files. Unless you
528 are writing thousands of files, the performance benefits of
534 are writing thousands of files, the performance benefits of
529 asynchronously closing files is not realized.
535 asynchronously closing files is not realized.
530 2. Files are opened exactly once for the ``backgroundclosing``
536 2. Files are opened exactly once for the ``backgroundclosing``
531 active duration and are therefore free of race conditions between
537 active duration and are therefore free of race conditions between
532 closing a file on a background thread and reopening it. (If the
538 closing a file on a background thread and reopening it. (If the
533 file were opened multiple times, there could be unflushed data
539 file were opened multiple times, there could be unflushed data
534 because the original file handle hasn't been flushed/closed yet.)
540 because the original file handle hasn't been flushed/closed yet.)
535
541
536 ``checkambig`` is passed to atomictempfile (valid only for writing).
542 ``checkambig`` argument is passed to atomictemplfile (valid
543 only for writing), and is useful only if target file is
544 guarded by any lock (e.g. repo.lock or repo.wlock).
537 '''
545 '''
538 if self._audit:
546 if self._audit:
539 r = util.checkosfilename(path)
547 r = util.checkosfilename(path)
540 if r:
548 if r:
541 raise error.Abort("%s: %r" % (r, path))
549 raise error.Abort("%s: %r" % (r, path))
542 self.audit(path)
550 self.audit(path)
543 f = self.join(path)
551 f = self.join(path)
544
552
545 if not text and "b" not in mode:
553 if not text and "b" not in mode:
546 mode += "b" # for that other OS
554 mode += "b" # for that other OS
547
555
548 nlink = -1
556 nlink = -1
549 if mode not in ('r', 'rb'):
557 if mode not in ('r', 'rb'):
550 dirname, basename = util.split(f)
558 dirname, basename = util.split(f)
551 # If basename is empty, then the path is malformed because it points
559 # If basename is empty, then the path is malformed because it points
552 # to a directory. Let the posixfile() call below raise IOError.
560 # to a directory. Let the posixfile() call below raise IOError.
553 if basename:
561 if basename:
554 if atomictemp:
562 if atomictemp:
555 util.makedirs(dirname, self.createmode, notindexed)
563 util.makedirs(dirname, self.createmode, notindexed)
556 return util.atomictempfile(f, mode, self.createmode,
564 return util.atomictempfile(f, mode, self.createmode,
557 checkambig=checkambig)
565 checkambig=checkambig)
558 try:
566 try:
559 if 'w' in mode:
567 if 'w' in mode:
560 util.unlink(f)
568 util.unlink(f)
561 nlink = 0
569 nlink = 0
562 else:
570 else:
563 # nlinks() may behave differently for files on Windows
571 # nlinks() may behave differently for files on Windows
564 # shares if the file is open.
572 # shares if the file is open.
565 with util.posixfile(f):
573 with util.posixfile(f):
566 nlink = util.nlinks(f)
574 nlink = util.nlinks(f)
567 if nlink < 1:
575 if nlink < 1:
568 nlink = 2 # force mktempcopy (issue1922)
576 nlink = 2 # force mktempcopy (issue1922)
569 except (OSError, IOError) as e:
577 except (OSError, IOError) as e:
570 if e.errno != errno.ENOENT:
578 if e.errno != errno.ENOENT:
571 raise
579 raise
572 nlink = 0
580 nlink = 0
573 util.makedirs(dirname, self.createmode, notindexed)
581 util.makedirs(dirname, self.createmode, notindexed)
574 if nlink > 0:
582 if nlink > 0:
575 if self._trustnlink is None:
583 if self._trustnlink is None:
576 self._trustnlink = nlink > 1 or util.checknlink(f)
584 self._trustnlink = nlink > 1 or util.checknlink(f)
577 if nlink > 1 or not self._trustnlink:
585 if nlink > 1 or not self._trustnlink:
578 util.rename(util.mktempcopy(f), f)
586 util.rename(util.mktempcopy(f), f)
579 fp = util.posixfile(f, mode)
587 fp = util.posixfile(f, mode)
580 if nlink == 0:
588 if nlink == 0:
581 self._fixfilemode(f)
589 self._fixfilemode(f)
582
590
583 if backgroundclose:
591 if backgroundclose:
584 if not self._backgroundfilecloser:
592 if not self._backgroundfilecloser:
585 raise error.Abort('backgroundclose can only be used when a '
593 raise error.Abort('backgroundclose can only be used when a '
586 'backgroundclosing context manager is active')
594 'backgroundclosing context manager is active')
587
595
588 fp = delayclosedfile(fp, self._backgroundfilecloser)
596 fp = delayclosedfile(fp, self._backgroundfilecloser)
589
597
590 return fp
598 return fp
591
599
592 def symlink(self, src, dst):
600 def symlink(self, src, dst):
593 self.audit(dst)
601 self.audit(dst)
594 linkname = self.join(dst)
602 linkname = self.join(dst)
595 try:
603 try:
596 os.unlink(linkname)
604 os.unlink(linkname)
597 except OSError:
605 except OSError:
598 pass
606 pass
599
607
600 util.makedirs(os.path.dirname(linkname), self.createmode)
608 util.makedirs(os.path.dirname(linkname), self.createmode)
601
609
602 if self._cansymlink:
610 if self._cansymlink:
603 try:
611 try:
604 os.symlink(src, linkname)
612 os.symlink(src, linkname)
605 except OSError as err:
613 except OSError as err:
606 raise OSError(err.errno, _('could not symlink to %r: %s') %
614 raise OSError(err.errno, _('could not symlink to %r: %s') %
607 (src, err.strerror), linkname)
615 (src, err.strerror), linkname)
608 else:
616 else:
609 self.write(dst, src)
617 self.write(dst, src)
610
618
611 def join(self, path, *insidef):
619 def join(self, path, *insidef):
612 if path:
620 if path:
613 return os.path.join(self.base, path, *insidef)
621 return os.path.join(self.base, path, *insidef)
614 else:
622 else:
615 return self.base
623 return self.base
616
624
617 opener = vfs
625 opener = vfs
618
626
619 class auditvfs(object):
627 class auditvfs(object):
620 def __init__(self, vfs):
628 def __init__(self, vfs):
621 self.vfs = vfs
629 self.vfs = vfs
622
630
623 @property
631 @property
624 def mustaudit(self):
632 def mustaudit(self):
625 return self.vfs.mustaudit
633 return self.vfs.mustaudit
626
634
627 @mustaudit.setter
635 @mustaudit.setter
628 def mustaudit(self, onoff):
636 def mustaudit(self, onoff):
629 self.vfs.mustaudit = onoff
637 self.vfs.mustaudit = onoff
630
638
631 class filtervfs(abstractvfs, auditvfs):
639 class filtervfs(abstractvfs, auditvfs):
632 '''Wrapper vfs for filtering filenames with a function.'''
640 '''Wrapper vfs for filtering filenames with a function.'''
633
641
634 def __init__(self, vfs, filter):
642 def __init__(self, vfs, filter):
635 auditvfs.__init__(self, vfs)
643 auditvfs.__init__(self, vfs)
636 self._filter = filter
644 self._filter = filter
637
645
638 def __call__(self, path, *args, **kwargs):
646 def __call__(self, path, *args, **kwargs):
639 return self.vfs(self._filter(path), *args, **kwargs)
647 return self.vfs(self._filter(path), *args, **kwargs)
640
648
641 def join(self, path, *insidef):
649 def join(self, path, *insidef):
642 if path:
650 if path:
643 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
651 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
644 else:
652 else:
645 return self.vfs.join(path)
653 return self.vfs.join(path)
646
654
647 filteropener = filtervfs
655 filteropener = filtervfs
648
656
649 class readonlyvfs(abstractvfs, auditvfs):
657 class readonlyvfs(abstractvfs, auditvfs):
650 '''Wrapper vfs preventing any writing.'''
658 '''Wrapper vfs preventing any writing.'''
651
659
652 def __init__(self, vfs):
660 def __init__(self, vfs):
653 auditvfs.__init__(self, vfs)
661 auditvfs.__init__(self, vfs)
654
662
655 def __call__(self, path, mode='r', *args, **kw):
663 def __call__(self, path, mode='r', *args, **kw):
656 if mode not in ('r', 'rb'):
664 if mode not in ('r', 'rb'):
657 raise error.Abort('this vfs is read only')
665 raise error.Abort('this vfs is read only')
658 return self.vfs(path, mode, *args, **kw)
666 return self.vfs(path, mode, *args, **kw)
659
667
660 def join(self, path, *insidef):
668 def join(self, path, *insidef):
661 return self.vfs.join(path, *insidef)
669 return self.vfs.join(path, *insidef)
662
670
663 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
671 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
664 '''yield every hg repository under path, always recursively.
672 '''yield every hg repository under path, always recursively.
665 The recurse flag will only control recursion into repo working dirs'''
673 The recurse flag will only control recursion into repo working dirs'''
666 def errhandler(err):
674 def errhandler(err):
667 if err.filename == path:
675 if err.filename == path:
668 raise err
676 raise err
669 samestat = getattr(os.path, 'samestat', None)
677 samestat = getattr(os.path, 'samestat', None)
670 if followsym and samestat is not None:
678 if followsym and samestat is not None:
671 def adddir(dirlst, dirname):
679 def adddir(dirlst, dirname):
672 match = False
680 match = False
673 dirstat = os.stat(dirname)
681 dirstat = os.stat(dirname)
674 for lstdirstat in dirlst:
682 for lstdirstat in dirlst:
675 if samestat(dirstat, lstdirstat):
683 if samestat(dirstat, lstdirstat):
676 match = True
684 match = True
677 break
685 break
678 if not match:
686 if not match:
679 dirlst.append(dirstat)
687 dirlst.append(dirstat)
680 return not match
688 return not match
681 else:
689 else:
682 followsym = False
690 followsym = False
683
691
684 if (seen_dirs is None) and followsym:
692 if (seen_dirs is None) and followsym:
685 seen_dirs = []
693 seen_dirs = []
686 adddir(seen_dirs, path)
694 adddir(seen_dirs, path)
687 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
695 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
688 dirs.sort()
696 dirs.sort()
689 if '.hg' in dirs:
697 if '.hg' in dirs:
690 yield root # found a repository
698 yield root # found a repository
691 qroot = os.path.join(root, '.hg', 'patches')
699 qroot = os.path.join(root, '.hg', 'patches')
692 if os.path.isdir(os.path.join(qroot, '.hg')):
700 if os.path.isdir(os.path.join(qroot, '.hg')):
693 yield qroot # we have a patch queue repo here
701 yield qroot # we have a patch queue repo here
694 if recurse:
702 if recurse:
695 # avoid recursing inside the .hg directory
703 # avoid recursing inside the .hg directory
696 dirs.remove('.hg')
704 dirs.remove('.hg')
697 else:
705 else:
698 dirs[:] = [] # don't descend further
706 dirs[:] = [] # don't descend further
699 elif followsym:
707 elif followsym:
700 newdirs = []
708 newdirs = []
701 for d in dirs:
709 for d in dirs:
702 fname = os.path.join(root, d)
710 fname = os.path.join(root, d)
703 if adddir(seen_dirs, fname):
711 if adddir(seen_dirs, fname):
704 if os.path.islink(fname):
712 if os.path.islink(fname):
705 for hgname in walkrepos(fname, True, seen_dirs):
713 for hgname in walkrepos(fname, True, seen_dirs):
706 yield hgname
714 yield hgname
707 else:
715 else:
708 newdirs.append(d)
716 newdirs.append(d)
709 dirs[:] = newdirs
717 dirs[:] = newdirs
710
718
711 def osrcpath():
719 def osrcpath():
712 '''return default os-specific hgrc search path'''
720 '''return default os-specific hgrc search path'''
713 path = []
721 path = []
714 defaultpath = os.path.join(util.datapath, 'default.d')
722 defaultpath = os.path.join(util.datapath, 'default.d')
715 if os.path.isdir(defaultpath):
723 if os.path.isdir(defaultpath):
716 for f, kind in osutil.listdir(defaultpath):
724 for f, kind in osutil.listdir(defaultpath):
717 if f.endswith('.rc'):
725 if f.endswith('.rc'):
718 path.append(os.path.join(defaultpath, f))
726 path.append(os.path.join(defaultpath, f))
719 path.extend(systemrcpath())
727 path.extend(systemrcpath())
720 path.extend(userrcpath())
728 path.extend(userrcpath())
721 path = [os.path.normpath(f) for f in path]
729 path = [os.path.normpath(f) for f in path]
722 return path
730 return path
723
731
724 _rcpath = None
732 _rcpath = None
725
733
726 def rcpath():
734 def rcpath():
727 '''return hgrc search path. if env var HGRCPATH is set, use it.
735 '''return hgrc search path. if env var HGRCPATH is set, use it.
728 for each item in path, if directory, use files ending in .rc,
736 for each item in path, if directory, use files ending in .rc,
729 else use item.
737 else use item.
730 make HGRCPATH empty to only look in .hg/hgrc of current repo.
738 make HGRCPATH empty to only look in .hg/hgrc of current repo.
731 if no HGRCPATH, use default os-specific path.'''
739 if no HGRCPATH, use default os-specific path.'''
732 global _rcpath
740 global _rcpath
733 if _rcpath is None:
741 if _rcpath is None:
734 if 'HGRCPATH' in os.environ:
742 if 'HGRCPATH' in os.environ:
735 _rcpath = []
743 _rcpath = []
736 for p in os.environ['HGRCPATH'].split(os.pathsep):
744 for p in os.environ['HGRCPATH'].split(os.pathsep):
737 if not p:
745 if not p:
738 continue
746 continue
739 p = util.expandpath(p)
747 p = util.expandpath(p)
740 if os.path.isdir(p):
748 if os.path.isdir(p):
741 for f, kind in osutil.listdir(p):
749 for f, kind in osutil.listdir(p):
742 if f.endswith('.rc'):
750 if f.endswith('.rc'):
743 _rcpath.append(os.path.join(p, f))
751 _rcpath.append(os.path.join(p, f))
744 else:
752 else:
745 _rcpath.append(p)
753 _rcpath.append(p)
746 else:
754 else:
747 _rcpath = osrcpath()
755 _rcpath = osrcpath()
748 return _rcpath
756 return _rcpath
749
757
750 def intrev(rev):
758 def intrev(rev):
751 """Return integer for a given revision that can be used in comparison or
759 """Return integer for a given revision that can be used in comparison or
752 arithmetic operation"""
760 arithmetic operation"""
753 if rev is None:
761 if rev is None:
754 return wdirrev
762 return wdirrev
755 return rev
763 return rev
756
764
757 def revsingle(repo, revspec, default='.'):
765 def revsingle(repo, revspec, default='.'):
758 if not revspec and revspec != 0:
766 if not revspec and revspec != 0:
759 return repo[default]
767 return repo[default]
760
768
761 l = revrange(repo, [revspec])
769 l = revrange(repo, [revspec])
762 if not l:
770 if not l:
763 raise error.Abort(_('empty revision set'))
771 raise error.Abort(_('empty revision set'))
764 return repo[l.last()]
772 return repo[l.last()]
765
773
766 def _pairspec(revspec):
774 def _pairspec(revspec):
767 tree = revset.parse(revspec)
775 tree = revset.parse(revspec)
768 tree = revset.optimize(tree) # fix up "x^:y" -> "(x^):y"
776 tree = revset.optimize(tree) # fix up "x^:y" -> "(x^):y"
769 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
777 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
770
778
771 def revpair(repo, revs):
779 def revpair(repo, revs):
772 if not revs:
780 if not revs:
773 return repo.dirstate.p1(), None
781 return repo.dirstate.p1(), None
774
782
775 l = revrange(repo, revs)
783 l = revrange(repo, revs)
776
784
777 if not l:
785 if not l:
778 first = second = None
786 first = second = None
779 elif l.isascending():
787 elif l.isascending():
780 first = l.min()
788 first = l.min()
781 second = l.max()
789 second = l.max()
782 elif l.isdescending():
790 elif l.isdescending():
783 first = l.max()
791 first = l.max()
784 second = l.min()
792 second = l.min()
785 else:
793 else:
786 first = l.first()
794 first = l.first()
787 second = l.last()
795 second = l.last()
788
796
789 if first is None:
797 if first is None:
790 raise error.Abort(_('empty revision range'))
798 raise error.Abort(_('empty revision range'))
791 if (first == second and len(revs) >= 2
799 if (first == second and len(revs) >= 2
792 and not all(revrange(repo, [r]) for r in revs)):
800 and not all(revrange(repo, [r]) for r in revs)):
793 raise error.Abort(_('empty revision on one side of range'))
801 raise error.Abort(_('empty revision on one side of range'))
794
802
795 # if top-level is range expression, the result must always be a pair
803 # if top-level is range expression, the result must always be a pair
796 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
804 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
797 return repo.lookup(first), None
805 return repo.lookup(first), None
798
806
799 return repo.lookup(first), repo.lookup(second)
807 return repo.lookup(first), repo.lookup(second)
800
808
801 def revrange(repo, revs):
809 def revrange(repo, revs):
802 """Yield revision as strings from a list of revision specifications."""
810 """Yield revision as strings from a list of revision specifications."""
803 allspecs = []
811 allspecs = []
804 for spec in revs:
812 for spec in revs:
805 if isinstance(spec, int):
813 if isinstance(spec, int):
806 spec = revset.formatspec('rev(%d)', spec)
814 spec = revset.formatspec('rev(%d)', spec)
807 allspecs.append(spec)
815 allspecs.append(spec)
808 m = revset.matchany(repo.ui, allspecs, repo)
816 m = revset.matchany(repo.ui, allspecs, repo)
809 return m(repo)
817 return m(repo)
810
818
811 def meaningfulparents(repo, ctx):
819 def meaningfulparents(repo, ctx):
812 """Return list of meaningful (or all if debug) parentrevs for rev.
820 """Return list of meaningful (or all if debug) parentrevs for rev.
813
821
814 For merges (two non-nullrev revisions) both parents are meaningful.
822 For merges (two non-nullrev revisions) both parents are meaningful.
815 Otherwise the first parent revision is considered meaningful if it
823 Otherwise the first parent revision is considered meaningful if it
816 is not the preceding revision.
824 is not the preceding revision.
817 """
825 """
818 parents = ctx.parents()
826 parents = ctx.parents()
819 if len(parents) > 1:
827 if len(parents) > 1:
820 return parents
828 return parents
821 if repo.ui.debugflag:
829 if repo.ui.debugflag:
822 return [parents[0], repo['null']]
830 return [parents[0], repo['null']]
823 if parents[0].rev() >= intrev(ctx.rev()) - 1:
831 if parents[0].rev() >= intrev(ctx.rev()) - 1:
824 return []
832 return []
825 return parents
833 return parents
826
834
827 def expandpats(pats):
835 def expandpats(pats):
828 '''Expand bare globs when running on windows.
836 '''Expand bare globs when running on windows.
829 On posix we assume it already has already been done by sh.'''
837 On posix we assume it already has already been done by sh.'''
830 if not util.expandglobs:
838 if not util.expandglobs:
831 return list(pats)
839 return list(pats)
832 ret = []
840 ret = []
833 for kindpat in pats:
841 for kindpat in pats:
834 kind, pat = matchmod._patsplit(kindpat, None)
842 kind, pat = matchmod._patsplit(kindpat, None)
835 if kind is None:
843 if kind is None:
836 try:
844 try:
837 globbed = glob.glob(pat)
845 globbed = glob.glob(pat)
838 except re.error:
846 except re.error:
839 globbed = [pat]
847 globbed = [pat]
840 if globbed:
848 if globbed:
841 ret.extend(globbed)
849 ret.extend(globbed)
842 continue
850 continue
843 ret.append(kindpat)
851 ret.append(kindpat)
844 return ret
852 return ret
845
853
846 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
854 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
847 badfn=None):
855 badfn=None):
848 '''Return a matcher and the patterns that were used.
856 '''Return a matcher and the patterns that were used.
849 The matcher will warn about bad matches, unless an alternate badfn callback
857 The matcher will warn about bad matches, unless an alternate badfn callback
850 is provided.'''
858 is provided.'''
851 if pats == ("",):
859 if pats == ("",):
852 pats = []
860 pats = []
853 if opts is None:
861 if opts is None:
854 opts = {}
862 opts = {}
855 if not globbed and default == 'relpath':
863 if not globbed and default == 'relpath':
856 pats = expandpats(pats or [])
864 pats = expandpats(pats or [])
857
865
858 def bad(f, msg):
866 def bad(f, msg):
859 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
867 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
860
868
861 if badfn is None:
869 if badfn is None:
862 badfn = bad
870 badfn = bad
863
871
864 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
872 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
865 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
873 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
866
874
867 if m.always():
875 if m.always():
868 pats = []
876 pats = []
869 return m, pats
877 return m, pats
870
878
871 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
879 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
872 badfn=None):
880 badfn=None):
873 '''Return a matcher that will warn about bad matches.'''
881 '''Return a matcher that will warn about bad matches.'''
874 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
882 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
875
883
876 def matchall(repo):
884 def matchall(repo):
877 '''Return a matcher that will efficiently match everything.'''
885 '''Return a matcher that will efficiently match everything.'''
878 return matchmod.always(repo.root, repo.getcwd())
886 return matchmod.always(repo.root, repo.getcwd())
879
887
880 def matchfiles(repo, files, badfn=None):
888 def matchfiles(repo, files, badfn=None):
881 '''Return a matcher that will efficiently match exactly these files.'''
889 '''Return a matcher that will efficiently match exactly these files.'''
882 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
890 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
883
891
884 def origpath(ui, repo, filepath):
892 def origpath(ui, repo, filepath):
885 '''customize where .orig files are created
893 '''customize where .orig files are created
886
894
887 Fetch user defined path from config file: [ui] origbackuppath = <path>
895 Fetch user defined path from config file: [ui] origbackuppath = <path>
888 Fall back to default (filepath) if not specified
896 Fall back to default (filepath) if not specified
889 '''
897 '''
890 origbackuppath = ui.config('ui', 'origbackuppath', None)
898 origbackuppath = ui.config('ui', 'origbackuppath', None)
891 if origbackuppath is None:
899 if origbackuppath is None:
892 return filepath + ".orig"
900 return filepath + ".orig"
893
901
894 filepathfromroot = os.path.relpath(filepath, start=repo.root)
902 filepathfromroot = os.path.relpath(filepath, start=repo.root)
895 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
903 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
896
904
897 origbackupdir = repo.vfs.dirname(fullorigpath)
905 origbackupdir = repo.vfs.dirname(fullorigpath)
898 if not repo.vfs.exists(origbackupdir):
906 if not repo.vfs.exists(origbackupdir):
899 ui.note(_('creating directory: %s\n') % origbackupdir)
907 ui.note(_('creating directory: %s\n') % origbackupdir)
900 util.makedirs(origbackupdir)
908 util.makedirs(origbackupdir)
901
909
902 return fullorigpath + ".orig"
910 return fullorigpath + ".orig"
903
911
904 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
912 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
905 if opts is None:
913 if opts is None:
906 opts = {}
914 opts = {}
907 m = matcher
915 m = matcher
908 if dry_run is None:
916 if dry_run is None:
909 dry_run = opts.get('dry_run')
917 dry_run = opts.get('dry_run')
910 if similarity is None:
918 if similarity is None:
911 similarity = float(opts.get('similarity') or 0)
919 similarity = float(opts.get('similarity') or 0)
912
920
913 ret = 0
921 ret = 0
914 join = lambda f: os.path.join(prefix, f)
922 join = lambda f: os.path.join(prefix, f)
915
923
916 def matchessubrepo(matcher, subpath):
924 def matchessubrepo(matcher, subpath):
917 if matcher.exact(subpath):
925 if matcher.exact(subpath):
918 return True
926 return True
919 for f in matcher.files():
927 for f in matcher.files():
920 if f.startswith(subpath):
928 if f.startswith(subpath):
921 return True
929 return True
922 return False
930 return False
923
931
924 wctx = repo[None]
932 wctx = repo[None]
925 for subpath in sorted(wctx.substate):
933 for subpath in sorted(wctx.substate):
926 if opts.get('subrepos') or matchessubrepo(m, subpath):
934 if opts.get('subrepos') or matchessubrepo(m, subpath):
927 sub = wctx.sub(subpath)
935 sub = wctx.sub(subpath)
928 try:
936 try:
929 submatch = matchmod.subdirmatcher(subpath, m)
937 submatch = matchmod.subdirmatcher(subpath, m)
930 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
938 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
931 ret = 1
939 ret = 1
932 except error.LookupError:
940 except error.LookupError:
933 repo.ui.status(_("skipping missing subrepository: %s\n")
941 repo.ui.status(_("skipping missing subrepository: %s\n")
934 % join(subpath))
942 % join(subpath))
935
943
936 rejected = []
944 rejected = []
937 def badfn(f, msg):
945 def badfn(f, msg):
938 if f in m.files():
946 if f in m.files():
939 m.bad(f, msg)
947 m.bad(f, msg)
940 rejected.append(f)
948 rejected.append(f)
941
949
942 badmatch = matchmod.badmatch(m, badfn)
950 badmatch = matchmod.badmatch(m, badfn)
943 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
951 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
944 badmatch)
952 badmatch)
945
953
946 unknownset = set(unknown + forgotten)
954 unknownset = set(unknown + forgotten)
947 toprint = unknownset.copy()
955 toprint = unknownset.copy()
948 toprint.update(deleted)
956 toprint.update(deleted)
949 for abs in sorted(toprint):
957 for abs in sorted(toprint):
950 if repo.ui.verbose or not m.exact(abs):
958 if repo.ui.verbose or not m.exact(abs):
951 if abs in unknownset:
959 if abs in unknownset:
952 status = _('adding %s\n') % m.uipath(abs)
960 status = _('adding %s\n') % m.uipath(abs)
953 else:
961 else:
954 status = _('removing %s\n') % m.uipath(abs)
962 status = _('removing %s\n') % m.uipath(abs)
955 repo.ui.status(status)
963 repo.ui.status(status)
956
964
957 renames = _findrenames(repo, m, added + unknown, removed + deleted,
965 renames = _findrenames(repo, m, added + unknown, removed + deleted,
958 similarity)
966 similarity)
959
967
960 if not dry_run:
968 if not dry_run:
961 _markchanges(repo, unknown + forgotten, deleted, renames)
969 _markchanges(repo, unknown + forgotten, deleted, renames)
962
970
963 for f in rejected:
971 for f in rejected:
964 if f in m.files():
972 if f in m.files():
965 return 1
973 return 1
966 return ret
974 return ret
967
975
968 def marktouched(repo, files, similarity=0.0):
976 def marktouched(repo, files, similarity=0.0):
969 '''Assert that files have somehow been operated upon. files are relative to
977 '''Assert that files have somehow been operated upon. files are relative to
970 the repo root.'''
978 the repo root.'''
971 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
979 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
972 rejected = []
980 rejected = []
973
981
974 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
982 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
975
983
976 if repo.ui.verbose:
984 if repo.ui.verbose:
977 unknownset = set(unknown + forgotten)
985 unknownset = set(unknown + forgotten)
978 toprint = unknownset.copy()
986 toprint = unknownset.copy()
979 toprint.update(deleted)
987 toprint.update(deleted)
980 for abs in sorted(toprint):
988 for abs in sorted(toprint):
981 if abs in unknownset:
989 if abs in unknownset:
982 status = _('adding %s\n') % abs
990 status = _('adding %s\n') % abs
983 else:
991 else:
984 status = _('removing %s\n') % abs
992 status = _('removing %s\n') % abs
985 repo.ui.status(status)
993 repo.ui.status(status)
986
994
987 renames = _findrenames(repo, m, added + unknown, removed + deleted,
995 renames = _findrenames(repo, m, added + unknown, removed + deleted,
988 similarity)
996 similarity)
989
997
990 _markchanges(repo, unknown + forgotten, deleted, renames)
998 _markchanges(repo, unknown + forgotten, deleted, renames)
991
999
992 for f in rejected:
1000 for f in rejected:
993 if f in m.files():
1001 if f in m.files():
994 return 1
1002 return 1
995 return 0
1003 return 0
996
1004
997 def _interestingfiles(repo, matcher):
1005 def _interestingfiles(repo, matcher):
998 '''Walk dirstate with matcher, looking for files that addremove would care
1006 '''Walk dirstate with matcher, looking for files that addremove would care
999 about.
1007 about.
1000
1008
1001 This is different from dirstate.status because it doesn't care about
1009 This is different from dirstate.status because it doesn't care about
1002 whether files are modified or clean.'''
1010 whether files are modified or clean.'''
1003 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1011 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1004 audit_path = pathutil.pathauditor(repo.root)
1012 audit_path = pathutil.pathauditor(repo.root)
1005
1013
1006 ctx = repo[None]
1014 ctx = repo[None]
1007 dirstate = repo.dirstate
1015 dirstate = repo.dirstate
1008 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
1016 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
1009 full=False)
1017 full=False)
1010 for abs, st in walkresults.iteritems():
1018 for abs, st in walkresults.iteritems():
1011 dstate = dirstate[abs]
1019 dstate = dirstate[abs]
1012 if dstate == '?' and audit_path.check(abs):
1020 if dstate == '?' and audit_path.check(abs):
1013 unknown.append(abs)
1021 unknown.append(abs)
1014 elif dstate != 'r' and not st:
1022 elif dstate != 'r' and not st:
1015 deleted.append(abs)
1023 deleted.append(abs)
1016 elif dstate == 'r' and st:
1024 elif dstate == 'r' and st:
1017 forgotten.append(abs)
1025 forgotten.append(abs)
1018 # for finding renames
1026 # for finding renames
1019 elif dstate == 'r' and not st:
1027 elif dstate == 'r' and not st:
1020 removed.append(abs)
1028 removed.append(abs)
1021 elif dstate == 'a':
1029 elif dstate == 'a':
1022 added.append(abs)
1030 added.append(abs)
1023
1031
1024 return added, unknown, deleted, removed, forgotten
1032 return added, unknown, deleted, removed, forgotten
1025
1033
1026 def _findrenames(repo, matcher, added, removed, similarity):
1034 def _findrenames(repo, matcher, added, removed, similarity):
1027 '''Find renames from removed files to added ones.'''
1035 '''Find renames from removed files to added ones.'''
1028 renames = {}
1036 renames = {}
1029 if similarity > 0:
1037 if similarity > 0:
1030 for old, new, score in similar.findrenames(repo, added, removed,
1038 for old, new, score in similar.findrenames(repo, added, removed,
1031 similarity):
1039 similarity):
1032 if (repo.ui.verbose or not matcher.exact(old)
1040 if (repo.ui.verbose or not matcher.exact(old)
1033 or not matcher.exact(new)):
1041 or not matcher.exact(new)):
1034 repo.ui.status(_('recording removal of %s as rename to %s '
1042 repo.ui.status(_('recording removal of %s as rename to %s '
1035 '(%d%% similar)\n') %
1043 '(%d%% similar)\n') %
1036 (matcher.rel(old), matcher.rel(new),
1044 (matcher.rel(old), matcher.rel(new),
1037 score * 100))
1045 score * 100))
1038 renames[new] = old
1046 renames[new] = old
1039 return renames
1047 return renames
1040
1048
1041 def _markchanges(repo, unknown, deleted, renames):
1049 def _markchanges(repo, unknown, deleted, renames):
1042 '''Marks the files in unknown as added, the files in deleted as removed,
1050 '''Marks the files in unknown as added, the files in deleted as removed,
1043 and the files in renames as copied.'''
1051 and the files in renames as copied.'''
1044 wctx = repo[None]
1052 wctx = repo[None]
1045 with repo.wlock():
1053 with repo.wlock():
1046 wctx.forget(deleted)
1054 wctx.forget(deleted)
1047 wctx.add(unknown)
1055 wctx.add(unknown)
1048 for new, old in renames.iteritems():
1056 for new, old in renames.iteritems():
1049 wctx.copy(old, new)
1057 wctx.copy(old, new)
1050
1058
1051 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1059 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1052 """Update the dirstate to reflect the intent of copying src to dst. For
1060 """Update the dirstate to reflect the intent of copying src to dst. For
1053 different reasons it might not end with dst being marked as copied from src.
1061 different reasons it might not end with dst being marked as copied from src.
1054 """
1062 """
1055 origsrc = repo.dirstate.copied(src) or src
1063 origsrc = repo.dirstate.copied(src) or src
1056 if dst == origsrc: # copying back a copy?
1064 if dst == origsrc: # copying back a copy?
1057 if repo.dirstate[dst] not in 'mn' and not dryrun:
1065 if repo.dirstate[dst] not in 'mn' and not dryrun:
1058 repo.dirstate.normallookup(dst)
1066 repo.dirstate.normallookup(dst)
1059 else:
1067 else:
1060 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1068 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1061 if not ui.quiet:
1069 if not ui.quiet:
1062 ui.warn(_("%s has not been committed yet, so no copy "
1070 ui.warn(_("%s has not been committed yet, so no copy "
1063 "data will be stored for %s.\n")
1071 "data will be stored for %s.\n")
1064 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1072 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1065 if repo.dirstate[dst] in '?r' and not dryrun:
1073 if repo.dirstate[dst] in '?r' and not dryrun:
1066 wctx.add([dst])
1074 wctx.add([dst])
1067 elif not dryrun:
1075 elif not dryrun:
1068 wctx.copy(origsrc, dst)
1076 wctx.copy(origsrc, dst)
1069
1077
1070 def readrequires(opener, supported):
1078 def readrequires(opener, supported):
1071 '''Reads and parses .hg/requires and checks if all entries found
1079 '''Reads and parses .hg/requires and checks if all entries found
1072 are in the list of supported features.'''
1080 are in the list of supported features.'''
1073 requirements = set(opener.read("requires").splitlines())
1081 requirements = set(opener.read("requires").splitlines())
1074 missings = []
1082 missings = []
1075 for r in requirements:
1083 for r in requirements:
1076 if r not in supported:
1084 if r not in supported:
1077 if not r or not r[0].isalnum():
1085 if not r or not r[0].isalnum():
1078 raise error.RequirementError(_(".hg/requires file is corrupt"))
1086 raise error.RequirementError(_(".hg/requires file is corrupt"))
1079 missings.append(r)
1087 missings.append(r)
1080 missings.sort()
1088 missings.sort()
1081 if missings:
1089 if missings:
1082 raise error.RequirementError(
1090 raise error.RequirementError(
1083 _("repository requires features unknown to this Mercurial: %s")
1091 _("repository requires features unknown to this Mercurial: %s")
1084 % " ".join(missings),
1092 % " ".join(missings),
1085 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1093 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
1086 " for more information"))
1094 " for more information"))
1087 return requirements
1095 return requirements
1088
1096
1089 def writerequires(opener, requirements):
1097 def writerequires(opener, requirements):
1090 with opener('requires', 'w') as fp:
1098 with opener('requires', 'w') as fp:
1091 for r in sorted(requirements):
1099 for r in sorted(requirements):
1092 fp.write("%s\n" % r)
1100 fp.write("%s\n" % r)
1093
1101
1094 class filecachesubentry(object):
1102 class filecachesubentry(object):
1095 def __init__(self, path, stat):
1103 def __init__(self, path, stat):
1096 self.path = path
1104 self.path = path
1097 self.cachestat = None
1105 self.cachestat = None
1098 self._cacheable = None
1106 self._cacheable = None
1099
1107
1100 if stat:
1108 if stat:
1101 self.cachestat = filecachesubentry.stat(self.path)
1109 self.cachestat = filecachesubentry.stat(self.path)
1102
1110
1103 if self.cachestat:
1111 if self.cachestat:
1104 self._cacheable = self.cachestat.cacheable()
1112 self._cacheable = self.cachestat.cacheable()
1105 else:
1113 else:
1106 # None means we don't know yet
1114 # None means we don't know yet
1107 self._cacheable = None
1115 self._cacheable = None
1108
1116
1109 def refresh(self):
1117 def refresh(self):
1110 if self.cacheable():
1118 if self.cacheable():
1111 self.cachestat = filecachesubentry.stat(self.path)
1119 self.cachestat = filecachesubentry.stat(self.path)
1112
1120
1113 def cacheable(self):
1121 def cacheable(self):
1114 if self._cacheable is not None:
1122 if self._cacheable is not None:
1115 return self._cacheable
1123 return self._cacheable
1116
1124
1117 # we don't know yet, assume it is for now
1125 # we don't know yet, assume it is for now
1118 return True
1126 return True
1119
1127
1120 def changed(self):
1128 def changed(self):
1121 # no point in going further if we can't cache it
1129 # no point in going further if we can't cache it
1122 if not self.cacheable():
1130 if not self.cacheable():
1123 return True
1131 return True
1124
1132
1125 newstat = filecachesubentry.stat(self.path)
1133 newstat = filecachesubentry.stat(self.path)
1126
1134
1127 # we may not know if it's cacheable yet, check again now
1135 # we may not know if it's cacheable yet, check again now
1128 if newstat and self._cacheable is None:
1136 if newstat and self._cacheable is None:
1129 self._cacheable = newstat.cacheable()
1137 self._cacheable = newstat.cacheable()
1130
1138
1131 # check again
1139 # check again
1132 if not self._cacheable:
1140 if not self._cacheable:
1133 return True
1141 return True
1134
1142
1135 if self.cachestat != newstat:
1143 if self.cachestat != newstat:
1136 self.cachestat = newstat
1144 self.cachestat = newstat
1137 return True
1145 return True
1138 else:
1146 else:
1139 return False
1147 return False
1140
1148
1141 @staticmethod
1149 @staticmethod
1142 def stat(path):
1150 def stat(path):
1143 try:
1151 try:
1144 return util.cachestat(path)
1152 return util.cachestat(path)
1145 except OSError as e:
1153 except OSError as e:
1146 if e.errno != errno.ENOENT:
1154 if e.errno != errno.ENOENT:
1147 raise
1155 raise
1148
1156
1149 class filecacheentry(object):
1157 class filecacheentry(object):
1150 def __init__(self, paths, stat=True):
1158 def __init__(self, paths, stat=True):
1151 self._entries = []
1159 self._entries = []
1152 for path in paths:
1160 for path in paths:
1153 self._entries.append(filecachesubentry(path, stat))
1161 self._entries.append(filecachesubentry(path, stat))
1154
1162
1155 def changed(self):
1163 def changed(self):
1156 '''true if any entry has changed'''
1164 '''true if any entry has changed'''
1157 for entry in self._entries:
1165 for entry in self._entries:
1158 if entry.changed():
1166 if entry.changed():
1159 return True
1167 return True
1160 return False
1168 return False
1161
1169
1162 def refresh(self):
1170 def refresh(self):
1163 for entry in self._entries:
1171 for entry in self._entries:
1164 entry.refresh()
1172 entry.refresh()
1165
1173
1166 class filecache(object):
1174 class filecache(object):
1167 '''A property like decorator that tracks files under .hg/ for updates.
1175 '''A property like decorator that tracks files under .hg/ for updates.
1168
1176
1169 Records stat info when called in _filecache.
1177 Records stat info when called in _filecache.
1170
1178
1171 On subsequent calls, compares old stat info with new info, and recreates the
1179 On subsequent calls, compares old stat info with new info, and recreates the
1172 object when any of the files changes, updating the new stat info in
1180 object when any of the files changes, updating the new stat info in
1173 _filecache.
1181 _filecache.
1174
1182
1175 Mercurial either atomic renames or appends for files under .hg,
1183 Mercurial either atomic renames or appends for files under .hg,
1176 so to ensure the cache is reliable we need the filesystem to be able
1184 so to ensure the cache is reliable we need the filesystem to be able
1177 to tell us if a file has been replaced. If it can't, we fallback to
1185 to tell us if a file has been replaced. If it can't, we fallback to
1178 recreating the object on every call (essentially the same behavior as
1186 recreating the object on every call (essentially the same behavior as
1179 propertycache).
1187 propertycache).
1180
1188
1181 '''
1189 '''
1182 def __init__(self, *paths):
1190 def __init__(self, *paths):
1183 self.paths = paths
1191 self.paths = paths
1184
1192
1185 def join(self, obj, fname):
1193 def join(self, obj, fname):
1186 """Used to compute the runtime path of a cached file.
1194 """Used to compute the runtime path of a cached file.
1187
1195
1188 Users should subclass filecache and provide their own version of this
1196 Users should subclass filecache and provide their own version of this
1189 function to call the appropriate join function on 'obj' (an instance
1197 function to call the appropriate join function on 'obj' (an instance
1190 of the class that its member function was decorated).
1198 of the class that its member function was decorated).
1191 """
1199 """
1192 return obj.join(fname)
1200 return obj.join(fname)
1193
1201
1194 def __call__(self, func):
1202 def __call__(self, func):
1195 self.func = func
1203 self.func = func
1196 self.name = func.__name__
1204 self.name = func.__name__
1197 return self
1205 return self
1198
1206
1199 def __get__(self, obj, type=None):
1207 def __get__(self, obj, type=None):
1200 # do we need to check if the file changed?
1208 # do we need to check if the file changed?
1201 if self.name in obj.__dict__:
1209 if self.name in obj.__dict__:
1202 assert self.name in obj._filecache, self.name
1210 assert self.name in obj._filecache, self.name
1203 return obj.__dict__[self.name]
1211 return obj.__dict__[self.name]
1204
1212
1205 entry = obj._filecache.get(self.name)
1213 entry = obj._filecache.get(self.name)
1206
1214
1207 if entry:
1215 if entry:
1208 if entry.changed():
1216 if entry.changed():
1209 entry.obj = self.func(obj)
1217 entry.obj = self.func(obj)
1210 else:
1218 else:
1211 paths = [self.join(obj, path) for path in self.paths]
1219 paths = [self.join(obj, path) for path in self.paths]
1212
1220
1213 # We stat -before- creating the object so our cache doesn't lie if
1221 # We stat -before- creating the object so our cache doesn't lie if
1214 # a writer modified between the time we read and stat
1222 # a writer modified between the time we read and stat
1215 entry = filecacheentry(paths, True)
1223 entry = filecacheentry(paths, True)
1216 entry.obj = self.func(obj)
1224 entry.obj = self.func(obj)
1217
1225
1218 obj._filecache[self.name] = entry
1226 obj._filecache[self.name] = entry
1219
1227
1220 obj.__dict__[self.name] = entry.obj
1228 obj.__dict__[self.name] = entry.obj
1221 return entry.obj
1229 return entry.obj
1222
1230
1223 def __set__(self, obj, value):
1231 def __set__(self, obj, value):
1224 if self.name not in obj._filecache:
1232 if self.name not in obj._filecache:
1225 # we add an entry for the missing value because X in __dict__
1233 # we add an entry for the missing value because X in __dict__
1226 # implies X in _filecache
1234 # implies X in _filecache
1227 paths = [self.join(obj, path) for path in self.paths]
1235 paths = [self.join(obj, path) for path in self.paths]
1228 ce = filecacheentry(paths, False)
1236 ce = filecacheentry(paths, False)
1229 obj._filecache[self.name] = ce
1237 obj._filecache[self.name] = ce
1230 else:
1238 else:
1231 ce = obj._filecache[self.name]
1239 ce = obj._filecache[self.name]
1232
1240
1233 ce.obj = value # update cached copy
1241 ce.obj = value # update cached copy
1234 obj.__dict__[self.name] = value # update copy returned by obj.x
1242 obj.__dict__[self.name] = value # update copy returned by obj.x
1235
1243
1236 def __delete__(self, obj):
1244 def __delete__(self, obj):
1237 try:
1245 try:
1238 del obj.__dict__[self.name]
1246 del obj.__dict__[self.name]
1239 except KeyError:
1247 except KeyError:
1240 raise AttributeError(self.name)
1248 raise AttributeError(self.name)
1241
1249
1242 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1250 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1243 if lock is None:
1251 if lock is None:
1244 raise error.LockInheritanceContractViolation(
1252 raise error.LockInheritanceContractViolation(
1245 'lock can only be inherited while held')
1253 'lock can only be inherited while held')
1246 if environ is None:
1254 if environ is None:
1247 environ = {}
1255 environ = {}
1248 with lock.inherit() as locker:
1256 with lock.inherit() as locker:
1249 environ[envvar] = locker
1257 environ[envvar] = locker
1250 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1258 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1251
1259
1252 def wlocksub(repo, cmd, *args, **kwargs):
1260 def wlocksub(repo, cmd, *args, **kwargs):
1253 """run cmd as a subprocess that allows inheriting repo's wlock
1261 """run cmd as a subprocess that allows inheriting repo's wlock
1254
1262
1255 This can only be called while the wlock is held. This takes all the
1263 This can only be called while the wlock is held. This takes all the
1256 arguments that ui.system does, and returns the exit code of the
1264 arguments that ui.system does, and returns the exit code of the
1257 subprocess."""
1265 subprocess."""
1258 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1266 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1259 **kwargs)
1267 **kwargs)
1260
1268
1261 def gdinitconfig(ui):
1269 def gdinitconfig(ui):
1262 """helper function to know if a repo should be created as general delta
1270 """helper function to know if a repo should be created as general delta
1263 """
1271 """
1264 # experimental config: format.generaldelta
1272 # experimental config: format.generaldelta
1265 return (ui.configbool('format', 'generaldelta', False)
1273 return (ui.configbool('format', 'generaldelta', False)
1266 or ui.configbool('format', 'usegeneraldelta', True))
1274 or ui.configbool('format', 'usegeneraldelta', True))
1267
1275
1268 def gddeltaconfig(ui):
1276 def gddeltaconfig(ui):
1269 """helper function to know if incoming delta should be optimised
1277 """helper function to know if incoming delta should be optimised
1270 """
1278 """
1271 # experimental config: format.generaldelta
1279 # experimental config: format.generaldelta
1272 return ui.configbool('format', 'generaldelta', False)
1280 return ui.configbool('format', 'generaldelta', False)
1273
1281
1274 class delayclosedfile(object):
1282 class delayclosedfile(object):
1275 """Proxy for a file object whose close is delayed.
1283 """Proxy for a file object whose close is delayed.
1276
1284
1277 Do not instantiate outside of the vfs layer.
1285 Do not instantiate outside of the vfs layer.
1278 """
1286 """
1279
1287
1280 def __init__(self, fh, closer):
1288 def __init__(self, fh, closer):
1281 object.__setattr__(self, '_origfh', fh)
1289 object.__setattr__(self, '_origfh', fh)
1282 object.__setattr__(self, '_closer', closer)
1290 object.__setattr__(self, '_closer', closer)
1283
1291
1284 def __getattr__(self, attr):
1292 def __getattr__(self, attr):
1285 return getattr(self._origfh, attr)
1293 return getattr(self._origfh, attr)
1286
1294
1287 def __setattr__(self, attr, value):
1295 def __setattr__(self, attr, value):
1288 return setattr(self._origfh, attr, value)
1296 return setattr(self._origfh, attr, value)
1289
1297
1290 def __delattr__(self, attr):
1298 def __delattr__(self, attr):
1291 return delattr(self._origfh, attr)
1299 return delattr(self._origfh, attr)
1292
1300
1293 def __enter__(self):
1301 def __enter__(self):
1294 return self._origfh.__enter__()
1302 return self._origfh.__enter__()
1295
1303
1296 def __exit__(self, exc_type, exc_value, exc_tb):
1304 def __exit__(self, exc_type, exc_value, exc_tb):
1297 self._closer.close(self._origfh)
1305 self._closer.close(self._origfh)
1298
1306
1299 def close(self):
1307 def close(self):
1300 self._closer.close(self._origfh)
1308 self._closer.close(self._origfh)
1301
1309
1302 class backgroundfilecloser(object):
1310 class backgroundfilecloser(object):
1303 """Coordinates background closing of file handles on multiple threads."""
1311 """Coordinates background closing of file handles on multiple threads."""
1304 def __init__(self, ui, expectedcount=-1):
1312 def __init__(self, ui, expectedcount=-1):
1305 self._running = False
1313 self._running = False
1306 self._entered = False
1314 self._entered = False
1307 self._threads = []
1315 self._threads = []
1308 self._threadexception = None
1316 self._threadexception = None
1309
1317
1310 # Only Windows/NTFS has slow file closing. So only enable by default
1318 # Only Windows/NTFS has slow file closing. So only enable by default
1311 # on that platform. But allow to be enabled elsewhere for testing.
1319 # on that platform. But allow to be enabled elsewhere for testing.
1312 defaultenabled = os.name == 'nt'
1320 defaultenabled = os.name == 'nt'
1313 enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
1321 enabled = ui.configbool('worker', 'backgroundclose', defaultenabled)
1314
1322
1315 if not enabled:
1323 if not enabled:
1316 return
1324 return
1317
1325
1318 # There is overhead to starting and stopping the background threads.
1326 # There is overhead to starting and stopping the background threads.
1319 # Don't do background processing unless the file count is large enough
1327 # Don't do background processing unless the file count is large enough
1320 # to justify it.
1328 # to justify it.
1321 minfilecount = ui.configint('worker', 'backgroundcloseminfilecount',
1329 minfilecount = ui.configint('worker', 'backgroundcloseminfilecount',
1322 2048)
1330 2048)
1323 # FUTURE dynamically start background threads after minfilecount closes.
1331 # FUTURE dynamically start background threads after minfilecount closes.
1324 # (We don't currently have any callers that don't know their file count)
1332 # (We don't currently have any callers that don't know their file count)
1325 if expectedcount > 0 and expectedcount < minfilecount:
1333 if expectedcount > 0 and expectedcount < minfilecount:
1326 return
1334 return
1327
1335
1328 # Windows defaults to a limit of 512 open files. A buffer of 128
1336 # Windows defaults to a limit of 512 open files. A buffer of 128
1329 # should give us enough headway.
1337 # should give us enough headway.
1330 maxqueue = ui.configint('worker', 'backgroundclosemaxqueue', 384)
1338 maxqueue = ui.configint('worker', 'backgroundclosemaxqueue', 384)
1331 threadcount = ui.configint('worker', 'backgroundclosethreadcount', 4)
1339 threadcount = ui.configint('worker', 'backgroundclosethreadcount', 4)
1332
1340
1333 ui.debug('starting %d threads for background file closing\n' %
1341 ui.debug('starting %d threads for background file closing\n' %
1334 threadcount)
1342 threadcount)
1335
1343
1336 self._queue = util.queue(maxsize=maxqueue)
1344 self._queue = util.queue(maxsize=maxqueue)
1337 self._running = True
1345 self._running = True
1338
1346
1339 for i in range(threadcount):
1347 for i in range(threadcount):
1340 t = threading.Thread(target=self._worker, name='backgroundcloser')
1348 t = threading.Thread(target=self._worker, name='backgroundcloser')
1341 self._threads.append(t)
1349 self._threads.append(t)
1342 t.start()
1350 t.start()
1343
1351
1344 def __enter__(self):
1352 def __enter__(self):
1345 self._entered = True
1353 self._entered = True
1346 return self
1354 return self
1347
1355
1348 def __exit__(self, exc_type, exc_value, exc_tb):
1356 def __exit__(self, exc_type, exc_value, exc_tb):
1349 self._running = False
1357 self._running = False
1350
1358
1351 # Wait for threads to finish closing so open files don't linger for
1359 # Wait for threads to finish closing so open files don't linger for
1352 # longer than lifetime of context manager.
1360 # longer than lifetime of context manager.
1353 for t in self._threads:
1361 for t in self._threads:
1354 t.join()
1362 t.join()
1355
1363
1356 def _worker(self):
1364 def _worker(self):
1357 """Main routine for worker thread."""
1365 """Main routine for worker thread."""
1358 while True:
1366 while True:
1359 try:
1367 try:
1360 fh = self._queue.get(block=True, timeout=0.100)
1368 fh = self._queue.get(block=True, timeout=0.100)
1361 # Need to catch or the thread will terminate and
1369 # Need to catch or the thread will terminate and
1362 # we could orphan file descriptors.
1370 # we could orphan file descriptors.
1363 try:
1371 try:
1364 fh.close()
1372 fh.close()
1365 except Exception as e:
1373 except Exception as e:
1366 # Stash so can re-raise from main thread later.
1374 # Stash so can re-raise from main thread later.
1367 self._threadexception = e
1375 self._threadexception = e
1368 except util.empty:
1376 except util.empty:
1369 if not self._running:
1377 if not self._running:
1370 break
1378 break
1371
1379
1372 def close(self, fh):
1380 def close(self, fh):
1373 """Schedule a file for closing."""
1381 """Schedule a file for closing."""
1374 if not self._entered:
1382 if not self._entered:
1375 raise error.Abort('can only call close() when context manager '
1383 raise error.Abort('can only call close() when context manager '
1376 'active')
1384 'active')
1377
1385
1378 # If a background thread encountered an exception, raise now so we fail
1386 # If a background thread encountered an exception, raise now so we fail
1379 # fast. Otherwise we may potentially go on for minutes until the error
1387 # fast. Otherwise we may potentially go on for minutes until the error
1380 # is acted on.
1388 # is acted on.
1381 if self._threadexception:
1389 if self._threadexception:
1382 e = self._threadexception
1390 e = self._threadexception
1383 self._threadexception = None
1391 self._threadexception = None
1384 raise e
1392 raise e
1385
1393
1386 # If we're not actively running, close synchronously.
1394 # If we're not actively running, close synchronously.
1387 if not self._running:
1395 if not self._running:
1388 fh.close()
1396 fh.close()
1389 return
1397 return
1390
1398
1391 self._queue.put(fh, block=True, timeout=None)
1399 self._queue.put(fh, block=True, timeout=None)
@@ -1,2832 +1,2843 b''
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from __future__ import absolute_import
16 from __future__ import absolute_import
17
17
18 import bz2
18 import bz2
19 import calendar
19 import calendar
20 import collections
20 import collections
21 import datetime
21 import datetime
22 import errno
22 import errno
23 import gc
23 import gc
24 import hashlib
24 import hashlib
25 import imp
25 import imp
26 import os
26 import os
27 import re as remod
27 import re as remod
28 import shutil
28 import shutil
29 import signal
29 import signal
30 import socket
30 import socket
31 import subprocess
31 import subprocess
32 import sys
32 import sys
33 import tempfile
33 import tempfile
34 import textwrap
34 import textwrap
35 import time
35 import time
36 import traceback
36 import traceback
37 import zlib
37 import zlib
38
38
39 from . import (
39 from . import (
40 encoding,
40 encoding,
41 error,
41 error,
42 i18n,
42 i18n,
43 osutil,
43 osutil,
44 parsers,
44 parsers,
45 pycompat,
45 pycompat,
46 )
46 )
47
47
48 for attr in (
48 for attr in (
49 'empty',
49 'empty',
50 'pickle',
50 'pickle',
51 'queue',
51 'queue',
52 'urlerr',
52 'urlerr',
53 # we do import urlreq, but we do it outside the loop
53 # we do import urlreq, but we do it outside the loop
54 #'urlreq',
54 #'urlreq',
55 'stringio',
55 'stringio',
56 ):
56 ):
57 globals()[attr] = getattr(pycompat, attr)
57 globals()[attr] = getattr(pycompat, attr)
58
58
59 # This line is to make pyflakes happy:
59 # This line is to make pyflakes happy:
60 urlreq = pycompat.urlreq
60 urlreq = pycompat.urlreq
61
61
62 if os.name == 'nt':
62 if os.name == 'nt':
63 from . import windows as platform
63 from . import windows as platform
64 else:
64 else:
65 from . import posix as platform
65 from . import posix as platform
66
66
67 _ = i18n._
67 _ = i18n._
68
68
69 cachestat = platform.cachestat
69 cachestat = platform.cachestat
70 checkexec = platform.checkexec
70 checkexec = platform.checkexec
71 checklink = platform.checklink
71 checklink = platform.checklink
72 copymode = platform.copymode
72 copymode = platform.copymode
73 executablepath = platform.executablepath
73 executablepath = platform.executablepath
74 expandglobs = platform.expandglobs
74 expandglobs = platform.expandglobs
75 explainexit = platform.explainexit
75 explainexit = platform.explainexit
76 findexe = platform.findexe
76 findexe = platform.findexe
77 gethgcmd = platform.gethgcmd
77 gethgcmd = platform.gethgcmd
78 getuser = platform.getuser
78 getuser = platform.getuser
79 getpid = os.getpid
79 getpid = os.getpid
80 groupmembers = platform.groupmembers
80 groupmembers = platform.groupmembers
81 groupname = platform.groupname
81 groupname = platform.groupname
82 hidewindow = platform.hidewindow
82 hidewindow = platform.hidewindow
83 isexec = platform.isexec
83 isexec = platform.isexec
84 isowner = platform.isowner
84 isowner = platform.isowner
85 localpath = platform.localpath
85 localpath = platform.localpath
86 lookupreg = platform.lookupreg
86 lookupreg = platform.lookupreg
87 makedir = platform.makedir
87 makedir = platform.makedir
88 nlinks = platform.nlinks
88 nlinks = platform.nlinks
89 normpath = platform.normpath
89 normpath = platform.normpath
90 normcase = platform.normcase
90 normcase = platform.normcase
91 normcasespec = platform.normcasespec
91 normcasespec = platform.normcasespec
92 normcasefallback = platform.normcasefallback
92 normcasefallback = platform.normcasefallback
93 openhardlinks = platform.openhardlinks
93 openhardlinks = platform.openhardlinks
94 oslink = platform.oslink
94 oslink = platform.oslink
95 parsepatchoutput = platform.parsepatchoutput
95 parsepatchoutput = platform.parsepatchoutput
96 pconvert = platform.pconvert
96 pconvert = platform.pconvert
97 poll = platform.poll
97 poll = platform.poll
98 popen = platform.popen
98 popen = platform.popen
99 posixfile = platform.posixfile
99 posixfile = platform.posixfile
100 quotecommand = platform.quotecommand
100 quotecommand = platform.quotecommand
101 readpipe = platform.readpipe
101 readpipe = platform.readpipe
102 rename = platform.rename
102 rename = platform.rename
103 removedirs = platform.removedirs
103 removedirs = platform.removedirs
104 samedevice = platform.samedevice
104 samedevice = platform.samedevice
105 samefile = platform.samefile
105 samefile = platform.samefile
106 samestat = platform.samestat
106 samestat = platform.samestat
107 setbinary = platform.setbinary
107 setbinary = platform.setbinary
108 setflags = platform.setflags
108 setflags = platform.setflags
109 setsignalhandler = platform.setsignalhandler
109 setsignalhandler = platform.setsignalhandler
110 shellquote = platform.shellquote
110 shellquote = platform.shellquote
111 spawndetached = platform.spawndetached
111 spawndetached = platform.spawndetached
112 split = platform.split
112 split = platform.split
113 sshargs = platform.sshargs
113 sshargs = platform.sshargs
114 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
114 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
115 statisexec = platform.statisexec
115 statisexec = platform.statisexec
116 statislink = platform.statislink
116 statislink = platform.statislink
117 termwidth = platform.termwidth
117 termwidth = platform.termwidth
118 testpid = platform.testpid
118 testpid = platform.testpid
119 umask = platform.umask
119 umask = platform.umask
120 unlink = platform.unlink
120 unlink = platform.unlink
121 unlinkpath = platform.unlinkpath
121 unlinkpath = platform.unlinkpath
122 username = platform.username
122 username = platform.username
123
123
124 # Python compatibility
124 # Python compatibility
125
125
126 _notset = object()
126 _notset = object()
127
127
128 # disable Python's problematic floating point timestamps (issue4836)
128 # disable Python's problematic floating point timestamps (issue4836)
129 # (Python hypocritically says you shouldn't change this behavior in
129 # (Python hypocritically says you shouldn't change this behavior in
130 # libraries, and sure enough Mercurial is not a library.)
130 # libraries, and sure enough Mercurial is not a library.)
131 os.stat_float_times(False)
131 os.stat_float_times(False)
132
132
133 def safehasattr(thing, attr):
133 def safehasattr(thing, attr):
134 return getattr(thing, attr, _notset) is not _notset
134 return getattr(thing, attr, _notset) is not _notset
135
135
136 DIGESTS = {
136 DIGESTS = {
137 'md5': hashlib.md5,
137 'md5': hashlib.md5,
138 'sha1': hashlib.sha1,
138 'sha1': hashlib.sha1,
139 'sha512': hashlib.sha512,
139 'sha512': hashlib.sha512,
140 }
140 }
141 # List of digest types from strongest to weakest
141 # List of digest types from strongest to weakest
142 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
142 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
143
143
144 for k in DIGESTS_BY_STRENGTH:
144 for k in DIGESTS_BY_STRENGTH:
145 assert k in DIGESTS
145 assert k in DIGESTS
146
146
147 class digester(object):
147 class digester(object):
148 """helper to compute digests.
148 """helper to compute digests.
149
149
150 This helper can be used to compute one or more digests given their name.
150 This helper can be used to compute one or more digests given their name.
151
151
152 >>> d = digester(['md5', 'sha1'])
152 >>> d = digester(['md5', 'sha1'])
153 >>> d.update('foo')
153 >>> d.update('foo')
154 >>> [k for k in sorted(d)]
154 >>> [k for k in sorted(d)]
155 ['md5', 'sha1']
155 ['md5', 'sha1']
156 >>> d['md5']
156 >>> d['md5']
157 'acbd18db4cc2f85cedef654fccc4a4d8'
157 'acbd18db4cc2f85cedef654fccc4a4d8'
158 >>> d['sha1']
158 >>> d['sha1']
159 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
159 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
160 >>> digester.preferred(['md5', 'sha1'])
160 >>> digester.preferred(['md5', 'sha1'])
161 'sha1'
161 'sha1'
162 """
162 """
163
163
164 def __init__(self, digests, s=''):
164 def __init__(self, digests, s=''):
165 self._hashes = {}
165 self._hashes = {}
166 for k in digests:
166 for k in digests:
167 if k not in DIGESTS:
167 if k not in DIGESTS:
168 raise Abort(_('unknown digest type: %s') % k)
168 raise Abort(_('unknown digest type: %s') % k)
169 self._hashes[k] = DIGESTS[k]()
169 self._hashes[k] = DIGESTS[k]()
170 if s:
170 if s:
171 self.update(s)
171 self.update(s)
172
172
173 def update(self, data):
173 def update(self, data):
174 for h in self._hashes.values():
174 for h in self._hashes.values():
175 h.update(data)
175 h.update(data)
176
176
177 def __getitem__(self, key):
177 def __getitem__(self, key):
178 if key not in DIGESTS:
178 if key not in DIGESTS:
179 raise Abort(_('unknown digest type: %s') % k)
179 raise Abort(_('unknown digest type: %s') % k)
180 return self._hashes[key].hexdigest()
180 return self._hashes[key].hexdigest()
181
181
182 def __iter__(self):
182 def __iter__(self):
183 return iter(self._hashes)
183 return iter(self._hashes)
184
184
185 @staticmethod
185 @staticmethod
186 def preferred(supported):
186 def preferred(supported):
187 """returns the strongest digest type in both supported and DIGESTS."""
187 """returns the strongest digest type in both supported and DIGESTS."""
188
188
189 for k in DIGESTS_BY_STRENGTH:
189 for k in DIGESTS_BY_STRENGTH:
190 if k in supported:
190 if k in supported:
191 return k
191 return k
192 return None
192 return None
193
193
194 class digestchecker(object):
194 class digestchecker(object):
195 """file handle wrapper that additionally checks content against a given
195 """file handle wrapper that additionally checks content against a given
196 size and digests.
196 size and digests.
197
197
198 d = digestchecker(fh, size, {'md5': '...'})
198 d = digestchecker(fh, size, {'md5': '...'})
199
199
200 When multiple digests are given, all of them are validated.
200 When multiple digests are given, all of them are validated.
201 """
201 """
202
202
203 def __init__(self, fh, size, digests):
203 def __init__(self, fh, size, digests):
204 self._fh = fh
204 self._fh = fh
205 self._size = size
205 self._size = size
206 self._got = 0
206 self._got = 0
207 self._digests = dict(digests)
207 self._digests = dict(digests)
208 self._digester = digester(self._digests.keys())
208 self._digester = digester(self._digests.keys())
209
209
210 def read(self, length=-1):
210 def read(self, length=-1):
211 content = self._fh.read(length)
211 content = self._fh.read(length)
212 self._digester.update(content)
212 self._digester.update(content)
213 self._got += len(content)
213 self._got += len(content)
214 return content
214 return content
215
215
216 def validate(self):
216 def validate(self):
217 if self._size != self._got:
217 if self._size != self._got:
218 raise Abort(_('size mismatch: expected %d, got %d') %
218 raise Abort(_('size mismatch: expected %d, got %d') %
219 (self._size, self._got))
219 (self._size, self._got))
220 for k, v in self._digests.items():
220 for k, v in self._digests.items():
221 if v != self._digester[k]:
221 if v != self._digester[k]:
222 # i18n: first parameter is a digest name
222 # i18n: first parameter is a digest name
223 raise Abort(_('%s mismatch: expected %s, got %s') %
223 raise Abort(_('%s mismatch: expected %s, got %s') %
224 (k, v, self._digester[k]))
224 (k, v, self._digester[k]))
225
225
226 try:
226 try:
227 buffer = buffer
227 buffer = buffer
228 except NameError:
228 except NameError:
229 if sys.version_info[0] < 3:
229 if sys.version_info[0] < 3:
230 def buffer(sliceable, offset=0):
230 def buffer(sliceable, offset=0):
231 return sliceable[offset:]
231 return sliceable[offset:]
232 else:
232 else:
233 def buffer(sliceable, offset=0):
233 def buffer(sliceable, offset=0):
234 return memoryview(sliceable)[offset:]
234 return memoryview(sliceable)[offset:]
235
235
236 closefds = os.name == 'posix'
236 closefds = os.name == 'posix'
237
237
238 _chunksize = 4096
238 _chunksize = 4096
239
239
240 class bufferedinputpipe(object):
240 class bufferedinputpipe(object):
241 """a manually buffered input pipe
241 """a manually buffered input pipe
242
242
243 Python will not let us use buffered IO and lazy reading with 'polling' at
243 Python will not let us use buffered IO and lazy reading with 'polling' at
244 the same time. We cannot probe the buffer state and select will not detect
244 the same time. We cannot probe the buffer state and select will not detect
245 that data are ready to read if they are already buffered.
245 that data are ready to read if they are already buffered.
246
246
247 This class let us work around that by implementing its own buffering
247 This class let us work around that by implementing its own buffering
248 (allowing efficient readline) while offering a way to know if the buffer is
248 (allowing efficient readline) while offering a way to know if the buffer is
249 empty from the output (allowing collaboration of the buffer with polling).
249 empty from the output (allowing collaboration of the buffer with polling).
250
250
251 This class lives in the 'util' module because it makes use of the 'os'
251 This class lives in the 'util' module because it makes use of the 'os'
252 module from the python stdlib.
252 module from the python stdlib.
253 """
253 """
254
254
255 def __init__(self, input):
255 def __init__(self, input):
256 self._input = input
256 self._input = input
257 self._buffer = []
257 self._buffer = []
258 self._eof = False
258 self._eof = False
259 self._lenbuf = 0
259 self._lenbuf = 0
260
260
261 @property
261 @property
262 def hasbuffer(self):
262 def hasbuffer(self):
263 """True is any data is currently buffered
263 """True is any data is currently buffered
264
264
265 This will be used externally a pre-step for polling IO. If there is
265 This will be used externally a pre-step for polling IO. If there is
266 already data then no polling should be set in place."""
266 already data then no polling should be set in place."""
267 return bool(self._buffer)
267 return bool(self._buffer)
268
268
269 @property
269 @property
270 def closed(self):
270 def closed(self):
271 return self._input.closed
271 return self._input.closed
272
272
273 def fileno(self):
273 def fileno(self):
274 return self._input.fileno()
274 return self._input.fileno()
275
275
276 def close(self):
276 def close(self):
277 return self._input.close()
277 return self._input.close()
278
278
279 def read(self, size):
279 def read(self, size):
280 while (not self._eof) and (self._lenbuf < size):
280 while (not self._eof) and (self._lenbuf < size):
281 self._fillbuffer()
281 self._fillbuffer()
282 return self._frombuffer(size)
282 return self._frombuffer(size)
283
283
284 def readline(self, *args, **kwargs):
284 def readline(self, *args, **kwargs):
285 if 1 < len(self._buffer):
285 if 1 < len(self._buffer):
286 # this should not happen because both read and readline end with a
286 # this should not happen because both read and readline end with a
287 # _frombuffer call that collapse it.
287 # _frombuffer call that collapse it.
288 self._buffer = [''.join(self._buffer)]
288 self._buffer = [''.join(self._buffer)]
289 self._lenbuf = len(self._buffer[0])
289 self._lenbuf = len(self._buffer[0])
290 lfi = -1
290 lfi = -1
291 if self._buffer:
291 if self._buffer:
292 lfi = self._buffer[-1].find('\n')
292 lfi = self._buffer[-1].find('\n')
293 while (not self._eof) and lfi < 0:
293 while (not self._eof) and lfi < 0:
294 self._fillbuffer()
294 self._fillbuffer()
295 if self._buffer:
295 if self._buffer:
296 lfi = self._buffer[-1].find('\n')
296 lfi = self._buffer[-1].find('\n')
297 size = lfi + 1
297 size = lfi + 1
298 if lfi < 0: # end of file
298 if lfi < 0: # end of file
299 size = self._lenbuf
299 size = self._lenbuf
300 elif 1 < len(self._buffer):
300 elif 1 < len(self._buffer):
301 # we need to take previous chunks into account
301 # we need to take previous chunks into account
302 size += self._lenbuf - len(self._buffer[-1])
302 size += self._lenbuf - len(self._buffer[-1])
303 return self._frombuffer(size)
303 return self._frombuffer(size)
304
304
305 def _frombuffer(self, size):
305 def _frombuffer(self, size):
306 """return at most 'size' data from the buffer
306 """return at most 'size' data from the buffer
307
307
308 The data are removed from the buffer."""
308 The data are removed from the buffer."""
309 if size == 0 or not self._buffer:
309 if size == 0 or not self._buffer:
310 return ''
310 return ''
311 buf = self._buffer[0]
311 buf = self._buffer[0]
312 if 1 < len(self._buffer):
312 if 1 < len(self._buffer):
313 buf = ''.join(self._buffer)
313 buf = ''.join(self._buffer)
314
314
315 data = buf[:size]
315 data = buf[:size]
316 buf = buf[len(data):]
316 buf = buf[len(data):]
317 if buf:
317 if buf:
318 self._buffer = [buf]
318 self._buffer = [buf]
319 self._lenbuf = len(buf)
319 self._lenbuf = len(buf)
320 else:
320 else:
321 self._buffer = []
321 self._buffer = []
322 self._lenbuf = 0
322 self._lenbuf = 0
323 return data
323 return data
324
324
325 def _fillbuffer(self):
325 def _fillbuffer(self):
326 """read data to the buffer"""
326 """read data to the buffer"""
327 data = os.read(self._input.fileno(), _chunksize)
327 data = os.read(self._input.fileno(), _chunksize)
328 if not data:
328 if not data:
329 self._eof = True
329 self._eof = True
330 else:
330 else:
331 self._lenbuf += len(data)
331 self._lenbuf += len(data)
332 self._buffer.append(data)
332 self._buffer.append(data)
333
333
334 def popen2(cmd, env=None, newlines=False):
334 def popen2(cmd, env=None, newlines=False):
335 # Setting bufsize to -1 lets the system decide the buffer size.
335 # Setting bufsize to -1 lets the system decide the buffer size.
336 # The default for bufsize is 0, meaning unbuffered. This leads to
336 # The default for bufsize is 0, meaning unbuffered. This leads to
337 # poor performance on Mac OS X: http://bugs.python.org/issue4194
337 # poor performance on Mac OS X: http://bugs.python.org/issue4194
338 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
338 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
339 close_fds=closefds,
339 close_fds=closefds,
340 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
340 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
341 universal_newlines=newlines,
341 universal_newlines=newlines,
342 env=env)
342 env=env)
343 return p.stdin, p.stdout
343 return p.stdin, p.stdout
344
344
345 def popen3(cmd, env=None, newlines=False):
345 def popen3(cmd, env=None, newlines=False):
346 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
346 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
347 return stdin, stdout, stderr
347 return stdin, stdout, stderr
348
348
349 def popen4(cmd, env=None, newlines=False, bufsize=-1):
349 def popen4(cmd, env=None, newlines=False, bufsize=-1):
350 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
350 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
351 close_fds=closefds,
351 close_fds=closefds,
352 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
352 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
353 stderr=subprocess.PIPE,
353 stderr=subprocess.PIPE,
354 universal_newlines=newlines,
354 universal_newlines=newlines,
355 env=env)
355 env=env)
356 return p.stdin, p.stdout, p.stderr, p
356 return p.stdin, p.stdout, p.stderr, p
357
357
358 def version():
358 def version():
359 """Return version information if available."""
359 """Return version information if available."""
360 try:
360 try:
361 from . import __version__
361 from . import __version__
362 return __version__.version
362 return __version__.version
363 except ImportError:
363 except ImportError:
364 return 'unknown'
364 return 'unknown'
365
365
366 def versiontuple(v=None, n=4):
366 def versiontuple(v=None, n=4):
367 """Parses a Mercurial version string into an N-tuple.
367 """Parses a Mercurial version string into an N-tuple.
368
368
369 The version string to be parsed is specified with the ``v`` argument.
369 The version string to be parsed is specified with the ``v`` argument.
370 If it isn't defined, the current Mercurial version string will be parsed.
370 If it isn't defined, the current Mercurial version string will be parsed.
371
371
372 ``n`` can be 2, 3, or 4. Here is how some version strings map to
372 ``n`` can be 2, 3, or 4. Here is how some version strings map to
373 returned values:
373 returned values:
374
374
375 >>> v = '3.6.1+190-df9b73d2d444'
375 >>> v = '3.6.1+190-df9b73d2d444'
376 >>> versiontuple(v, 2)
376 >>> versiontuple(v, 2)
377 (3, 6)
377 (3, 6)
378 >>> versiontuple(v, 3)
378 >>> versiontuple(v, 3)
379 (3, 6, 1)
379 (3, 6, 1)
380 >>> versiontuple(v, 4)
380 >>> versiontuple(v, 4)
381 (3, 6, 1, '190-df9b73d2d444')
381 (3, 6, 1, '190-df9b73d2d444')
382
382
383 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
383 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
384 (3, 6, 1, '190-df9b73d2d444+20151118')
384 (3, 6, 1, '190-df9b73d2d444+20151118')
385
385
386 >>> v = '3.6'
386 >>> v = '3.6'
387 >>> versiontuple(v, 2)
387 >>> versiontuple(v, 2)
388 (3, 6)
388 (3, 6)
389 >>> versiontuple(v, 3)
389 >>> versiontuple(v, 3)
390 (3, 6, None)
390 (3, 6, None)
391 >>> versiontuple(v, 4)
391 >>> versiontuple(v, 4)
392 (3, 6, None, None)
392 (3, 6, None, None)
393 """
393 """
394 if not v:
394 if not v:
395 v = version()
395 v = version()
396 parts = v.split('+', 1)
396 parts = v.split('+', 1)
397 if len(parts) == 1:
397 if len(parts) == 1:
398 vparts, extra = parts[0], None
398 vparts, extra = parts[0], None
399 else:
399 else:
400 vparts, extra = parts
400 vparts, extra = parts
401
401
402 vints = []
402 vints = []
403 for i in vparts.split('.'):
403 for i in vparts.split('.'):
404 try:
404 try:
405 vints.append(int(i))
405 vints.append(int(i))
406 except ValueError:
406 except ValueError:
407 break
407 break
408 # (3, 6) -> (3, 6, None)
408 # (3, 6) -> (3, 6, None)
409 while len(vints) < 3:
409 while len(vints) < 3:
410 vints.append(None)
410 vints.append(None)
411
411
412 if n == 2:
412 if n == 2:
413 return (vints[0], vints[1])
413 return (vints[0], vints[1])
414 if n == 3:
414 if n == 3:
415 return (vints[0], vints[1], vints[2])
415 return (vints[0], vints[1], vints[2])
416 if n == 4:
416 if n == 4:
417 return (vints[0], vints[1], vints[2], extra)
417 return (vints[0], vints[1], vints[2], extra)
418
418
419 # used by parsedate
419 # used by parsedate
420 defaultdateformats = (
420 defaultdateformats = (
421 '%Y-%m-%d %H:%M:%S',
421 '%Y-%m-%d %H:%M:%S',
422 '%Y-%m-%d %I:%M:%S%p',
422 '%Y-%m-%d %I:%M:%S%p',
423 '%Y-%m-%d %H:%M',
423 '%Y-%m-%d %H:%M',
424 '%Y-%m-%d %I:%M%p',
424 '%Y-%m-%d %I:%M%p',
425 '%Y-%m-%d',
425 '%Y-%m-%d',
426 '%m-%d',
426 '%m-%d',
427 '%m/%d',
427 '%m/%d',
428 '%m/%d/%y',
428 '%m/%d/%y',
429 '%m/%d/%Y',
429 '%m/%d/%Y',
430 '%a %b %d %H:%M:%S %Y',
430 '%a %b %d %H:%M:%S %Y',
431 '%a %b %d %I:%M:%S%p %Y',
431 '%a %b %d %I:%M:%S%p %Y',
432 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
432 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
433 '%b %d %H:%M:%S %Y',
433 '%b %d %H:%M:%S %Y',
434 '%b %d %I:%M:%S%p %Y',
434 '%b %d %I:%M:%S%p %Y',
435 '%b %d %H:%M:%S',
435 '%b %d %H:%M:%S',
436 '%b %d %I:%M:%S%p',
436 '%b %d %I:%M:%S%p',
437 '%b %d %H:%M',
437 '%b %d %H:%M',
438 '%b %d %I:%M%p',
438 '%b %d %I:%M%p',
439 '%b %d %Y',
439 '%b %d %Y',
440 '%b %d',
440 '%b %d',
441 '%H:%M:%S',
441 '%H:%M:%S',
442 '%I:%M:%S%p',
442 '%I:%M:%S%p',
443 '%H:%M',
443 '%H:%M',
444 '%I:%M%p',
444 '%I:%M%p',
445 )
445 )
446
446
447 extendeddateformats = defaultdateformats + (
447 extendeddateformats = defaultdateformats + (
448 "%Y",
448 "%Y",
449 "%Y-%m",
449 "%Y-%m",
450 "%b",
450 "%b",
451 "%b %Y",
451 "%b %Y",
452 )
452 )
453
453
454 def cachefunc(func):
454 def cachefunc(func):
455 '''cache the result of function calls'''
455 '''cache the result of function calls'''
456 # XXX doesn't handle keywords args
456 # XXX doesn't handle keywords args
457 if func.__code__.co_argcount == 0:
457 if func.__code__.co_argcount == 0:
458 cache = []
458 cache = []
459 def f():
459 def f():
460 if len(cache) == 0:
460 if len(cache) == 0:
461 cache.append(func())
461 cache.append(func())
462 return cache[0]
462 return cache[0]
463 return f
463 return f
464 cache = {}
464 cache = {}
465 if func.__code__.co_argcount == 1:
465 if func.__code__.co_argcount == 1:
466 # we gain a small amount of time because
466 # we gain a small amount of time because
467 # we don't need to pack/unpack the list
467 # we don't need to pack/unpack the list
468 def f(arg):
468 def f(arg):
469 if arg not in cache:
469 if arg not in cache:
470 cache[arg] = func(arg)
470 cache[arg] = func(arg)
471 return cache[arg]
471 return cache[arg]
472 else:
472 else:
473 def f(*args):
473 def f(*args):
474 if args not in cache:
474 if args not in cache:
475 cache[args] = func(*args)
475 cache[args] = func(*args)
476 return cache[args]
476 return cache[args]
477
477
478 return f
478 return f
479
479
480 class sortdict(dict):
480 class sortdict(dict):
481 '''a simple sorted dictionary'''
481 '''a simple sorted dictionary'''
482 def __init__(self, data=None):
482 def __init__(self, data=None):
483 self._list = []
483 self._list = []
484 if data:
484 if data:
485 self.update(data)
485 self.update(data)
486 def copy(self):
486 def copy(self):
487 return sortdict(self)
487 return sortdict(self)
488 def __setitem__(self, key, val):
488 def __setitem__(self, key, val):
489 if key in self:
489 if key in self:
490 self._list.remove(key)
490 self._list.remove(key)
491 self._list.append(key)
491 self._list.append(key)
492 dict.__setitem__(self, key, val)
492 dict.__setitem__(self, key, val)
493 def __iter__(self):
493 def __iter__(self):
494 return self._list.__iter__()
494 return self._list.__iter__()
495 def update(self, src):
495 def update(self, src):
496 if isinstance(src, dict):
496 if isinstance(src, dict):
497 src = src.iteritems()
497 src = src.iteritems()
498 for k, v in src:
498 for k, v in src:
499 self[k] = v
499 self[k] = v
500 def clear(self):
500 def clear(self):
501 dict.clear(self)
501 dict.clear(self)
502 self._list = []
502 self._list = []
503 def items(self):
503 def items(self):
504 return [(k, self[k]) for k in self._list]
504 return [(k, self[k]) for k in self._list]
505 def __delitem__(self, key):
505 def __delitem__(self, key):
506 dict.__delitem__(self, key)
506 dict.__delitem__(self, key)
507 self._list.remove(key)
507 self._list.remove(key)
508 def pop(self, key, *args, **kwargs):
508 def pop(self, key, *args, **kwargs):
509 dict.pop(self, key, *args, **kwargs)
509 dict.pop(self, key, *args, **kwargs)
510 try:
510 try:
511 self._list.remove(key)
511 self._list.remove(key)
512 except ValueError:
512 except ValueError:
513 pass
513 pass
514 def keys(self):
514 def keys(self):
515 return self._list
515 return self._list
516 def iterkeys(self):
516 def iterkeys(self):
517 return self._list.__iter__()
517 return self._list.__iter__()
518 def iteritems(self):
518 def iteritems(self):
519 for k in self._list:
519 for k in self._list:
520 yield k, self[k]
520 yield k, self[k]
521 def insert(self, index, key, val):
521 def insert(self, index, key, val):
522 self._list.insert(index, key)
522 self._list.insert(index, key)
523 dict.__setitem__(self, key, val)
523 dict.__setitem__(self, key, val)
524
524
525 class _lrucachenode(object):
525 class _lrucachenode(object):
526 """A node in a doubly linked list.
526 """A node in a doubly linked list.
527
527
528 Holds a reference to nodes on either side as well as a key-value
528 Holds a reference to nodes on either side as well as a key-value
529 pair for the dictionary entry.
529 pair for the dictionary entry.
530 """
530 """
531 __slots__ = ('next', 'prev', 'key', 'value')
531 __slots__ = ('next', 'prev', 'key', 'value')
532
532
533 def __init__(self):
533 def __init__(self):
534 self.next = None
534 self.next = None
535 self.prev = None
535 self.prev = None
536
536
537 self.key = _notset
537 self.key = _notset
538 self.value = None
538 self.value = None
539
539
540 def markempty(self):
540 def markempty(self):
541 """Mark the node as emptied."""
541 """Mark the node as emptied."""
542 self.key = _notset
542 self.key = _notset
543
543
544 class lrucachedict(object):
544 class lrucachedict(object):
545 """Dict that caches most recent accesses and sets.
545 """Dict that caches most recent accesses and sets.
546
546
547 The dict consists of an actual backing dict - indexed by original
547 The dict consists of an actual backing dict - indexed by original
548 key - and a doubly linked circular list defining the order of entries in
548 key - and a doubly linked circular list defining the order of entries in
549 the cache.
549 the cache.
550
550
551 The head node is the newest entry in the cache. If the cache is full,
551 The head node is the newest entry in the cache. If the cache is full,
552 we recycle head.prev and make it the new head. Cache accesses result in
552 we recycle head.prev and make it the new head. Cache accesses result in
553 the node being moved to before the existing head and being marked as the
553 the node being moved to before the existing head and being marked as the
554 new head node.
554 new head node.
555 """
555 """
556 def __init__(self, max):
556 def __init__(self, max):
557 self._cache = {}
557 self._cache = {}
558
558
559 self._head = head = _lrucachenode()
559 self._head = head = _lrucachenode()
560 head.prev = head
560 head.prev = head
561 head.next = head
561 head.next = head
562 self._size = 1
562 self._size = 1
563 self._capacity = max
563 self._capacity = max
564
564
565 def __len__(self):
565 def __len__(self):
566 return len(self._cache)
566 return len(self._cache)
567
567
568 def __contains__(self, k):
568 def __contains__(self, k):
569 return k in self._cache
569 return k in self._cache
570
570
571 def __iter__(self):
571 def __iter__(self):
572 # We don't have to iterate in cache order, but why not.
572 # We don't have to iterate in cache order, but why not.
573 n = self._head
573 n = self._head
574 for i in range(len(self._cache)):
574 for i in range(len(self._cache)):
575 yield n.key
575 yield n.key
576 n = n.next
576 n = n.next
577
577
578 def __getitem__(self, k):
578 def __getitem__(self, k):
579 node = self._cache[k]
579 node = self._cache[k]
580 self._movetohead(node)
580 self._movetohead(node)
581 return node.value
581 return node.value
582
582
583 def __setitem__(self, k, v):
583 def __setitem__(self, k, v):
584 node = self._cache.get(k)
584 node = self._cache.get(k)
585 # Replace existing value and mark as newest.
585 # Replace existing value and mark as newest.
586 if node is not None:
586 if node is not None:
587 node.value = v
587 node.value = v
588 self._movetohead(node)
588 self._movetohead(node)
589 return
589 return
590
590
591 if self._size < self._capacity:
591 if self._size < self._capacity:
592 node = self._addcapacity()
592 node = self._addcapacity()
593 else:
593 else:
594 # Grab the last/oldest item.
594 # Grab the last/oldest item.
595 node = self._head.prev
595 node = self._head.prev
596
596
597 # At capacity. Kill the old entry.
597 # At capacity. Kill the old entry.
598 if node.key is not _notset:
598 if node.key is not _notset:
599 del self._cache[node.key]
599 del self._cache[node.key]
600
600
601 node.key = k
601 node.key = k
602 node.value = v
602 node.value = v
603 self._cache[k] = node
603 self._cache[k] = node
604 # And mark it as newest entry. No need to adjust order since it
604 # And mark it as newest entry. No need to adjust order since it
605 # is already self._head.prev.
605 # is already self._head.prev.
606 self._head = node
606 self._head = node
607
607
608 def __delitem__(self, k):
608 def __delitem__(self, k):
609 node = self._cache.pop(k)
609 node = self._cache.pop(k)
610 node.markempty()
610 node.markempty()
611
611
612 # Temporarily mark as newest item before re-adjusting head to make
612 # Temporarily mark as newest item before re-adjusting head to make
613 # this node the oldest item.
613 # this node the oldest item.
614 self._movetohead(node)
614 self._movetohead(node)
615 self._head = node.next
615 self._head = node.next
616
616
617 # Additional dict methods.
617 # Additional dict methods.
618
618
619 def get(self, k, default=None):
619 def get(self, k, default=None):
620 try:
620 try:
621 return self._cache[k]
621 return self._cache[k]
622 except KeyError:
622 except KeyError:
623 return default
623 return default
624
624
625 def clear(self):
625 def clear(self):
626 n = self._head
626 n = self._head
627 while n.key is not _notset:
627 while n.key is not _notset:
628 n.markempty()
628 n.markempty()
629 n = n.next
629 n = n.next
630
630
631 self._cache.clear()
631 self._cache.clear()
632
632
633 def copy(self):
633 def copy(self):
634 result = lrucachedict(self._capacity)
634 result = lrucachedict(self._capacity)
635 n = self._head.prev
635 n = self._head.prev
636 # Iterate in oldest-to-newest order, so the copy has the right ordering
636 # Iterate in oldest-to-newest order, so the copy has the right ordering
637 for i in range(len(self._cache)):
637 for i in range(len(self._cache)):
638 result[n.key] = n.value
638 result[n.key] = n.value
639 n = n.prev
639 n = n.prev
640 return result
640 return result
641
641
642 def _movetohead(self, node):
642 def _movetohead(self, node):
643 """Mark a node as the newest, making it the new head.
643 """Mark a node as the newest, making it the new head.
644
644
645 When a node is accessed, it becomes the freshest entry in the LRU
645 When a node is accessed, it becomes the freshest entry in the LRU
646 list, which is denoted by self._head.
646 list, which is denoted by self._head.
647
647
648 Visually, let's make ``N`` the new head node (* denotes head):
648 Visually, let's make ``N`` the new head node (* denotes head):
649
649
650 previous/oldest <-> head <-> next/next newest
650 previous/oldest <-> head <-> next/next newest
651
651
652 ----<->--- A* ---<->-----
652 ----<->--- A* ---<->-----
653 | |
653 | |
654 E <-> D <-> N <-> C <-> B
654 E <-> D <-> N <-> C <-> B
655
655
656 To:
656 To:
657
657
658 ----<->--- N* ---<->-----
658 ----<->--- N* ---<->-----
659 | |
659 | |
660 E <-> D <-> C <-> B <-> A
660 E <-> D <-> C <-> B <-> A
661
661
662 This requires the following moves:
662 This requires the following moves:
663
663
664 C.next = D (node.prev.next = node.next)
664 C.next = D (node.prev.next = node.next)
665 D.prev = C (node.next.prev = node.prev)
665 D.prev = C (node.next.prev = node.prev)
666 E.next = N (head.prev.next = node)
666 E.next = N (head.prev.next = node)
667 N.prev = E (node.prev = head.prev)
667 N.prev = E (node.prev = head.prev)
668 N.next = A (node.next = head)
668 N.next = A (node.next = head)
669 A.prev = N (head.prev = node)
669 A.prev = N (head.prev = node)
670 """
670 """
671 head = self._head
671 head = self._head
672 # C.next = D
672 # C.next = D
673 node.prev.next = node.next
673 node.prev.next = node.next
674 # D.prev = C
674 # D.prev = C
675 node.next.prev = node.prev
675 node.next.prev = node.prev
676 # N.prev = E
676 # N.prev = E
677 node.prev = head.prev
677 node.prev = head.prev
678 # N.next = A
678 # N.next = A
679 # It is tempting to do just "head" here, however if node is
679 # It is tempting to do just "head" here, however if node is
680 # adjacent to head, this will do bad things.
680 # adjacent to head, this will do bad things.
681 node.next = head.prev.next
681 node.next = head.prev.next
682 # E.next = N
682 # E.next = N
683 node.next.prev = node
683 node.next.prev = node
684 # A.prev = N
684 # A.prev = N
685 node.prev.next = node
685 node.prev.next = node
686
686
687 self._head = node
687 self._head = node
688
688
689 def _addcapacity(self):
689 def _addcapacity(self):
690 """Add a node to the circular linked list.
690 """Add a node to the circular linked list.
691
691
692 The new node is inserted before the head node.
692 The new node is inserted before the head node.
693 """
693 """
694 head = self._head
694 head = self._head
695 node = _lrucachenode()
695 node = _lrucachenode()
696 head.prev.next = node
696 head.prev.next = node
697 node.prev = head.prev
697 node.prev = head.prev
698 node.next = head
698 node.next = head
699 head.prev = node
699 head.prev = node
700 self._size += 1
700 self._size += 1
701 return node
701 return node
702
702
703 def lrucachefunc(func):
703 def lrucachefunc(func):
704 '''cache most recent results of function calls'''
704 '''cache most recent results of function calls'''
705 cache = {}
705 cache = {}
706 order = collections.deque()
706 order = collections.deque()
707 if func.__code__.co_argcount == 1:
707 if func.__code__.co_argcount == 1:
708 def f(arg):
708 def f(arg):
709 if arg not in cache:
709 if arg not in cache:
710 if len(cache) > 20:
710 if len(cache) > 20:
711 del cache[order.popleft()]
711 del cache[order.popleft()]
712 cache[arg] = func(arg)
712 cache[arg] = func(arg)
713 else:
713 else:
714 order.remove(arg)
714 order.remove(arg)
715 order.append(arg)
715 order.append(arg)
716 return cache[arg]
716 return cache[arg]
717 else:
717 else:
718 def f(*args):
718 def f(*args):
719 if args not in cache:
719 if args not in cache:
720 if len(cache) > 20:
720 if len(cache) > 20:
721 del cache[order.popleft()]
721 del cache[order.popleft()]
722 cache[args] = func(*args)
722 cache[args] = func(*args)
723 else:
723 else:
724 order.remove(args)
724 order.remove(args)
725 order.append(args)
725 order.append(args)
726 return cache[args]
726 return cache[args]
727
727
728 return f
728 return f
729
729
730 class propertycache(object):
730 class propertycache(object):
731 def __init__(self, func):
731 def __init__(self, func):
732 self.func = func
732 self.func = func
733 self.name = func.__name__
733 self.name = func.__name__
734 def __get__(self, obj, type=None):
734 def __get__(self, obj, type=None):
735 result = self.func(obj)
735 result = self.func(obj)
736 self.cachevalue(obj, result)
736 self.cachevalue(obj, result)
737 return result
737 return result
738
738
739 def cachevalue(self, obj, value):
739 def cachevalue(self, obj, value):
740 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
740 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
741 obj.__dict__[self.name] = value
741 obj.__dict__[self.name] = value
742
742
743 def pipefilter(s, cmd):
743 def pipefilter(s, cmd):
744 '''filter string S through command CMD, returning its output'''
744 '''filter string S through command CMD, returning its output'''
745 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
745 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
746 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
746 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
747 pout, perr = p.communicate(s)
747 pout, perr = p.communicate(s)
748 return pout
748 return pout
749
749
750 def tempfilter(s, cmd):
750 def tempfilter(s, cmd):
751 '''filter string S through a pair of temporary files with CMD.
751 '''filter string S through a pair of temporary files with CMD.
752 CMD is used as a template to create the real command to be run,
752 CMD is used as a template to create the real command to be run,
753 with the strings INFILE and OUTFILE replaced by the real names of
753 with the strings INFILE and OUTFILE replaced by the real names of
754 the temporary files generated.'''
754 the temporary files generated.'''
755 inname, outname = None, None
755 inname, outname = None, None
756 try:
756 try:
757 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
757 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
758 fp = os.fdopen(infd, 'wb')
758 fp = os.fdopen(infd, 'wb')
759 fp.write(s)
759 fp.write(s)
760 fp.close()
760 fp.close()
761 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
761 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
762 os.close(outfd)
762 os.close(outfd)
763 cmd = cmd.replace('INFILE', inname)
763 cmd = cmd.replace('INFILE', inname)
764 cmd = cmd.replace('OUTFILE', outname)
764 cmd = cmd.replace('OUTFILE', outname)
765 code = os.system(cmd)
765 code = os.system(cmd)
766 if sys.platform == 'OpenVMS' and code & 1:
766 if sys.platform == 'OpenVMS' and code & 1:
767 code = 0
767 code = 0
768 if code:
768 if code:
769 raise Abort(_("command '%s' failed: %s") %
769 raise Abort(_("command '%s' failed: %s") %
770 (cmd, explainexit(code)))
770 (cmd, explainexit(code)))
771 return readfile(outname)
771 return readfile(outname)
772 finally:
772 finally:
773 try:
773 try:
774 if inname:
774 if inname:
775 os.unlink(inname)
775 os.unlink(inname)
776 except OSError:
776 except OSError:
777 pass
777 pass
778 try:
778 try:
779 if outname:
779 if outname:
780 os.unlink(outname)
780 os.unlink(outname)
781 except OSError:
781 except OSError:
782 pass
782 pass
783
783
784 filtertable = {
784 filtertable = {
785 'tempfile:': tempfilter,
785 'tempfile:': tempfilter,
786 'pipe:': pipefilter,
786 'pipe:': pipefilter,
787 }
787 }
788
788
789 def filter(s, cmd):
789 def filter(s, cmd):
790 "filter a string through a command that transforms its input to its output"
790 "filter a string through a command that transforms its input to its output"
791 for name, fn in filtertable.iteritems():
791 for name, fn in filtertable.iteritems():
792 if cmd.startswith(name):
792 if cmd.startswith(name):
793 return fn(s, cmd[len(name):].lstrip())
793 return fn(s, cmd[len(name):].lstrip())
794 return pipefilter(s, cmd)
794 return pipefilter(s, cmd)
795
795
796 def binary(s):
796 def binary(s):
797 """return true if a string is binary data"""
797 """return true if a string is binary data"""
798 return bool(s and '\0' in s)
798 return bool(s and '\0' in s)
799
799
800 def increasingchunks(source, min=1024, max=65536):
800 def increasingchunks(source, min=1024, max=65536):
801 '''return no less than min bytes per chunk while data remains,
801 '''return no less than min bytes per chunk while data remains,
802 doubling min after each chunk until it reaches max'''
802 doubling min after each chunk until it reaches max'''
803 def log2(x):
803 def log2(x):
804 if not x:
804 if not x:
805 return 0
805 return 0
806 i = 0
806 i = 0
807 while x:
807 while x:
808 x >>= 1
808 x >>= 1
809 i += 1
809 i += 1
810 return i - 1
810 return i - 1
811
811
812 buf = []
812 buf = []
813 blen = 0
813 blen = 0
814 for chunk in source:
814 for chunk in source:
815 buf.append(chunk)
815 buf.append(chunk)
816 blen += len(chunk)
816 blen += len(chunk)
817 if blen >= min:
817 if blen >= min:
818 if min < max:
818 if min < max:
819 min = min << 1
819 min = min << 1
820 nmin = 1 << log2(blen)
820 nmin = 1 << log2(blen)
821 if nmin > min:
821 if nmin > min:
822 min = nmin
822 min = nmin
823 if min > max:
823 if min > max:
824 min = max
824 min = max
825 yield ''.join(buf)
825 yield ''.join(buf)
826 blen = 0
826 blen = 0
827 buf = []
827 buf = []
828 if buf:
828 if buf:
829 yield ''.join(buf)
829 yield ''.join(buf)
830
830
831 Abort = error.Abort
831 Abort = error.Abort
832
832
833 def always(fn):
833 def always(fn):
834 return True
834 return True
835
835
836 def never(fn):
836 def never(fn):
837 return False
837 return False
838
838
839 def nogc(func):
839 def nogc(func):
840 """disable garbage collector
840 """disable garbage collector
841
841
842 Python's garbage collector triggers a GC each time a certain number of
842 Python's garbage collector triggers a GC each time a certain number of
843 container objects (the number being defined by gc.get_threshold()) are
843 container objects (the number being defined by gc.get_threshold()) are
844 allocated even when marked not to be tracked by the collector. Tracking has
844 allocated even when marked not to be tracked by the collector. Tracking has
845 no effect on when GCs are triggered, only on what objects the GC looks
845 no effect on when GCs are triggered, only on what objects the GC looks
846 into. As a workaround, disable GC while building complex (huge)
846 into. As a workaround, disable GC while building complex (huge)
847 containers.
847 containers.
848
848
849 This garbage collector issue have been fixed in 2.7.
849 This garbage collector issue have been fixed in 2.7.
850 """
850 """
851 def wrapper(*args, **kwargs):
851 def wrapper(*args, **kwargs):
852 gcenabled = gc.isenabled()
852 gcenabled = gc.isenabled()
853 gc.disable()
853 gc.disable()
854 try:
854 try:
855 return func(*args, **kwargs)
855 return func(*args, **kwargs)
856 finally:
856 finally:
857 if gcenabled:
857 if gcenabled:
858 gc.enable()
858 gc.enable()
859 return wrapper
859 return wrapper
860
860
861 def pathto(root, n1, n2):
861 def pathto(root, n1, n2):
862 '''return the relative path from one place to another.
862 '''return the relative path from one place to another.
863 root should use os.sep to separate directories
863 root should use os.sep to separate directories
864 n1 should use os.sep to separate directories
864 n1 should use os.sep to separate directories
865 n2 should use "/" to separate directories
865 n2 should use "/" to separate directories
866 returns an os.sep-separated path.
866 returns an os.sep-separated path.
867
867
868 If n1 is a relative path, it's assumed it's
868 If n1 is a relative path, it's assumed it's
869 relative to root.
869 relative to root.
870 n2 should always be relative to root.
870 n2 should always be relative to root.
871 '''
871 '''
872 if not n1:
872 if not n1:
873 return localpath(n2)
873 return localpath(n2)
874 if os.path.isabs(n1):
874 if os.path.isabs(n1):
875 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
875 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
876 return os.path.join(root, localpath(n2))
876 return os.path.join(root, localpath(n2))
877 n2 = '/'.join((pconvert(root), n2))
877 n2 = '/'.join((pconvert(root), n2))
878 a, b = splitpath(n1), n2.split('/')
878 a, b = splitpath(n1), n2.split('/')
879 a.reverse()
879 a.reverse()
880 b.reverse()
880 b.reverse()
881 while a and b and a[-1] == b[-1]:
881 while a and b and a[-1] == b[-1]:
882 a.pop()
882 a.pop()
883 b.pop()
883 b.pop()
884 b.reverse()
884 b.reverse()
885 return os.sep.join((['..'] * len(a)) + b) or '.'
885 return os.sep.join((['..'] * len(a)) + b) or '.'
886
886
887 def mainfrozen():
887 def mainfrozen():
888 """return True if we are a frozen executable.
888 """return True if we are a frozen executable.
889
889
890 The code supports py2exe (most common, Windows only) and tools/freeze
890 The code supports py2exe (most common, Windows only) and tools/freeze
891 (portable, not much used).
891 (portable, not much used).
892 """
892 """
893 return (safehasattr(sys, "frozen") or # new py2exe
893 return (safehasattr(sys, "frozen") or # new py2exe
894 safehasattr(sys, "importers") or # old py2exe
894 safehasattr(sys, "importers") or # old py2exe
895 imp.is_frozen("__main__")) # tools/freeze
895 imp.is_frozen("__main__")) # tools/freeze
896
896
897 # the location of data files matching the source code
897 # the location of data files matching the source code
898 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
898 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
899 # executable version (py2exe) doesn't support __file__
899 # executable version (py2exe) doesn't support __file__
900 datapath = os.path.dirname(sys.executable)
900 datapath = os.path.dirname(sys.executable)
901 else:
901 else:
902 datapath = os.path.dirname(__file__)
902 datapath = os.path.dirname(__file__)
903
903
904 i18n.setdatapath(datapath)
904 i18n.setdatapath(datapath)
905
905
906 _hgexecutable = None
906 _hgexecutable = None
907
907
908 def hgexecutable():
908 def hgexecutable():
909 """return location of the 'hg' executable.
909 """return location of the 'hg' executable.
910
910
911 Defaults to $HG or 'hg' in the search path.
911 Defaults to $HG or 'hg' in the search path.
912 """
912 """
913 if _hgexecutable is None:
913 if _hgexecutable is None:
914 hg = os.environ.get('HG')
914 hg = os.environ.get('HG')
915 mainmod = sys.modules['__main__']
915 mainmod = sys.modules['__main__']
916 if hg:
916 if hg:
917 _sethgexecutable(hg)
917 _sethgexecutable(hg)
918 elif mainfrozen():
918 elif mainfrozen():
919 if getattr(sys, 'frozen', None) == 'macosx_app':
919 if getattr(sys, 'frozen', None) == 'macosx_app':
920 # Env variable set by py2app
920 # Env variable set by py2app
921 _sethgexecutable(os.environ['EXECUTABLEPATH'])
921 _sethgexecutable(os.environ['EXECUTABLEPATH'])
922 else:
922 else:
923 _sethgexecutable(sys.executable)
923 _sethgexecutable(sys.executable)
924 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
924 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
925 _sethgexecutable(mainmod.__file__)
925 _sethgexecutable(mainmod.__file__)
926 else:
926 else:
927 exe = findexe('hg') or os.path.basename(sys.argv[0])
927 exe = findexe('hg') or os.path.basename(sys.argv[0])
928 _sethgexecutable(exe)
928 _sethgexecutable(exe)
929 return _hgexecutable
929 return _hgexecutable
930
930
931 def _sethgexecutable(path):
931 def _sethgexecutable(path):
932 """set location of the 'hg' executable"""
932 """set location of the 'hg' executable"""
933 global _hgexecutable
933 global _hgexecutable
934 _hgexecutable = path
934 _hgexecutable = path
935
935
936 def _isstdout(f):
936 def _isstdout(f):
937 fileno = getattr(f, 'fileno', None)
937 fileno = getattr(f, 'fileno', None)
938 return fileno and fileno() == sys.__stdout__.fileno()
938 return fileno and fileno() == sys.__stdout__.fileno()
939
939
940 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
940 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
941 '''enhanced shell command execution.
941 '''enhanced shell command execution.
942 run with environment maybe modified, maybe in different dir.
942 run with environment maybe modified, maybe in different dir.
943
943
944 if command fails and onerr is None, return status, else raise onerr
944 if command fails and onerr is None, return status, else raise onerr
945 object as exception.
945 object as exception.
946
946
947 if out is specified, it is assumed to be a file-like object that has a
947 if out is specified, it is assumed to be a file-like object that has a
948 write() method. stdout and stderr will be redirected to out.'''
948 write() method. stdout and stderr will be redirected to out.'''
949 if environ is None:
949 if environ is None:
950 environ = {}
950 environ = {}
951 try:
951 try:
952 sys.stdout.flush()
952 sys.stdout.flush()
953 except Exception:
953 except Exception:
954 pass
954 pass
955 def py2shell(val):
955 def py2shell(val):
956 'convert python object into string that is useful to shell'
956 'convert python object into string that is useful to shell'
957 if val is None or val is False:
957 if val is None or val is False:
958 return '0'
958 return '0'
959 if val is True:
959 if val is True:
960 return '1'
960 return '1'
961 return str(val)
961 return str(val)
962 origcmd = cmd
962 origcmd = cmd
963 cmd = quotecommand(cmd)
963 cmd = quotecommand(cmd)
964 if sys.platform == 'plan9' and (sys.version_info[0] == 2
964 if sys.platform == 'plan9' and (sys.version_info[0] == 2
965 and sys.version_info[1] < 7):
965 and sys.version_info[1] < 7):
966 # subprocess kludge to work around issues in half-baked Python
966 # subprocess kludge to work around issues in half-baked Python
967 # ports, notably bichued/python:
967 # ports, notably bichued/python:
968 if not cwd is None:
968 if not cwd is None:
969 os.chdir(cwd)
969 os.chdir(cwd)
970 rc = os.system(cmd)
970 rc = os.system(cmd)
971 else:
971 else:
972 env = dict(os.environ)
972 env = dict(os.environ)
973 env.update((k, py2shell(v)) for k, v in environ.iteritems())
973 env.update((k, py2shell(v)) for k, v in environ.iteritems())
974 env['HG'] = hgexecutable()
974 env['HG'] = hgexecutable()
975 if out is None or _isstdout(out):
975 if out is None or _isstdout(out):
976 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
976 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
977 env=env, cwd=cwd)
977 env=env, cwd=cwd)
978 else:
978 else:
979 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
979 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
980 env=env, cwd=cwd, stdout=subprocess.PIPE,
980 env=env, cwd=cwd, stdout=subprocess.PIPE,
981 stderr=subprocess.STDOUT)
981 stderr=subprocess.STDOUT)
982 while True:
982 while True:
983 line = proc.stdout.readline()
983 line = proc.stdout.readline()
984 if not line:
984 if not line:
985 break
985 break
986 out.write(line)
986 out.write(line)
987 proc.wait()
987 proc.wait()
988 rc = proc.returncode
988 rc = proc.returncode
989 if sys.platform == 'OpenVMS' and rc & 1:
989 if sys.platform == 'OpenVMS' and rc & 1:
990 rc = 0
990 rc = 0
991 if rc and onerr:
991 if rc and onerr:
992 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
992 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
993 explainexit(rc)[0])
993 explainexit(rc)[0])
994 if errprefix:
994 if errprefix:
995 errmsg = '%s: %s' % (errprefix, errmsg)
995 errmsg = '%s: %s' % (errprefix, errmsg)
996 raise onerr(errmsg)
996 raise onerr(errmsg)
997 return rc
997 return rc
998
998
999 def checksignature(func):
999 def checksignature(func):
1000 '''wrap a function with code to check for calling errors'''
1000 '''wrap a function with code to check for calling errors'''
1001 def check(*args, **kwargs):
1001 def check(*args, **kwargs):
1002 try:
1002 try:
1003 return func(*args, **kwargs)
1003 return func(*args, **kwargs)
1004 except TypeError:
1004 except TypeError:
1005 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1005 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1006 raise error.SignatureError
1006 raise error.SignatureError
1007 raise
1007 raise
1008
1008
1009 return check
1009 return check
1010
1010
1011 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1011 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1012 '''copy a file, preserving mode and optionally other stat info like
1012 '''copy a file, preserving mode and optionally other stat info like
1013 atime/mtime'''
1013 atime/mtime
1014
1015 checkambig argument is used with filestat, and is useful only if
1016 destination file is guarded by any lock (e.g. repo.lock or
1017 repo.wlock).
1018
1019 copystat and checkambig should be exclusive.
1020 '''
1014 assert not (copystat and checkambig)
1021 assert not (copystat and checkambig)
1015 oldstat = None
1022 oldstat = None
1016 if os.path.lexists(dest):
1023 if os.path.lexists(dest):
1017 if checkambig:
1024 if checkambig:
1018 oldstat = checkambig and filestat(dest)
1025 oldstat = checkambig and filestat(dest)
1019 unlink(dest)
1026 unlink(dest)
1020 # hardlinks are problematic on CIFS, quietly ignore this flag
1027 # hardlinks are problematic on CIFS, quietly ignore this flag
1021 # until we find a way to work around it cleanly (issue4546)
1028 # until we find a way to work around it cleanly (issue4546)
1022 if False and hardlink:
1029 if False and hardlink:
1023 try:
1030 try:
1024 oslink(src, dest)
1031 oslink(src, dest)
1025 return
1032 return
1026 except (IOError, OSError):
1033 except (IOError, OSError):
1027 pass # fall back to normal copy
1034 pass # fall back to normal copy
1028 if os.path.islink(src):
1035 if os.path.islink(src):
1029 os.symlink(os.readlink(src), dest)
1036 os.symlink(os.readlink(src), dest)
1030 # copytime is ignored for symlinks, but in general copytime isn't needed
1037 # copytime is ignored for symlinks, but in general copytime isn't needed
1031 # for them anyway
1038 # for them anyway
1032 else:
1039 else:
1033 try:
1040 try:
1034 shutil.copyfile(src, dest)
1041 shutil.copyfile(src, dest)
1035 if copystat:
1042 if copystat:
1036 # copystat also copies mode
1043 # copystat also copies mode
1037 shutil.copystat(src, dest)
1044 shutil.copystat(src, dest)
1038 else:
1045 else:
1039 shutil.copymode(src, dest)
1046 shutil.copymode(src, dest)
1040 if oldstat and oldstat.stat:
1047 if oldstat and oldstat.stat:
1041 newstat = filestat(dest)
1048 newstat = filestat(dest)
1042 if newstat.isambig(oldstat):
1049 if newstat.isambig(oldstat):
1043 # stat of copied file is ambiguous to original one
1050 # stat of copied file is ambiguous to original one
1044 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1051 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1045 os.utime(dest, (advanced, advanced))
1052 os.utime(dest, (advanced, advanced))
1046 except shutil.Error as inst:
1053 except shutil.Error as inst:
1047 raise Abort(str(inst))
1054 raise Abort(str(inst))
1048
1055
1049 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1056 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1050 """Copy a directory tree using hardlinks if possible."""
1057 """Copy a directory tree using hardlinks if possible."""
1051 num = 0
1058 num = 0
1052
1059
1053 if hardlink is None:
1060 if hardlink is None:
1054 hardlink = (os.stat(src).st_dev ==
1061 hardlink = (os.stat(src).st_dev ==
1055 os.stat(os.path.dirname(dst)).st_dev)
1062 os.stat(os.path.dirname(dst)).st_dev)
1056 if hardlink:
1063 if hardlink:
1057 topic = _('linking')
1064 topic = _('linking')
1058 else:
1065 else:
1059 topic = _('copying')
1066 topic = _('copying')
1060
1067
1061 if os.path.isdir(src):
1068 if os.path.isdir(src):
1062 os.mkdir(dst)
1069 os.mkdir(dst)
1063 for name, kind in osutil.listdir(src):
1070 for name, kind in osutil.listdir(src):
1064 srcname = os.path.join(src, name)
1071 srcname = os.path.join(src, name)
1065 dstname = os.path.join(dst, name)
1072 dstname = os.path.join(dst, name)
1066 def nprog(t, pos):
1073 def nprog(t, pos):
1067 if pos is not None:
1074 if pos is not None:
1068 return progress(t, pos + num)
1075 return progress(t, pos + num)
1069 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1076 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1070 num += n
1077 num += n
1071 else:
1078 else:
1072 if hardlink:
1079 if hardlink:
1073 try:
1080 try:
1074 oslink(src, dst)
1081 oslink(src, dst)
1075 except (IOError, OSError):
1082 except (IOError, OSError):
1076 hardlink = False
1083 hardlink = False
1077 shutil.copy(src, dst)
1084 shutil.copy(src, dst)
1078 else:
1085 else:
1079 shutil.copy(src, dst)
1086 shutil.copy(src, dst)
1080 num += 1
1087 num += 1
1081 progress(topic, num)
1088 progress(topic, num)
1082 progress(topic, None)
1089 progress(topic, None)
1083
1090
1084 return hardlink, num
1091 return hardlink, num
1085
1092
1086 _winreservednames = '''con prn aux nul
1093 _winreservednames = '''con prn aux nul
1087 com1 com2 com3 com4 com5 com6 com7 com8 com9
1094 com1 com2 com3 com4 com5 com6 com7 com8 com9
1088 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1095 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1089 _winreservedchars = ':*?"<>|'
1096 _winreservedchars = ':*?"<>|'
1090 def checkwinfilename(path):
1097 def checkwinfilename(path):
1091 r'''Check that the base-relative path is a valid filename on Windows.
1098 r'''Check that the base-relative path is a valid filename on Windows.
1092 Returns None if the path is ok, or a UI string describing the problem.
1099 Returns None if the path is ok, or a UI string describing the problem.
1093
1100
1094 >>> checkwinfilename("just/a/normal/path")
1101 >>> checkwinfilename("just/a/normal/path")
1095 >>> checkwinfilename("foo/bar/con.xml")
1102 >>> checkwinfilename("foo/bar/con.xml")
1096 "filename contains 'con', which is reserved on Windows"
1103 "filename contains 'con', which is reserved on Windows"
1097 >>> checkwinfilename("foo/con.xml/bar")
1104 >>> checkwinfilename("foo/con.xml/bar")
1098 "filename contains 'con', which is reserved on Windows"
1105 "filename contains 'con', which is reserved on Windows"
1099 >>> checkwinfilename("foo/bar/xml.con")
1106 >>> checkwinfilename("foo/bar/xml.con")
1100 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1107 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1101 "filename contains 'AUX', which is reserved on Windows"
1108 "filename contains 'AUX', which is reserved on Windows"
1102 >>> checkwinfilename("foo/bar/bla:.txt")
1109 >>> checkwinfilename("foo/bar/bla:.txt")
1103 "filename contains ':', which is reserved on Windows"
1110 "filename contains ':', which is reserved on Windows"
1104 >>> checkwinfilename("foo/bar/b\07la.txt")
1111 >>> checkwinfilename("foo/bar/b\07la.txt")
1105 "filename contains '\\x07', which is invalid on Windows"
1112 "filename contains '\\x07', which is invalid on Windows"
1106 >>> checkwinfilename("foo/bar/bla ")
1113 >>> checkwinfilename("foo/bar/bla ")
1107 "filename ends with ' ', which is not allowed on Windows"
1114 "filename ends with ' ', which is not allowed on Windows"
1108 >>> checkwinfilename("../bar")
1115 >>> checkwinfilename("../bar")
1109 >>> checkwinfilename("foo\\")
1116 >>> checkwinfilename("foo\\")
1110 "filename ends with '\\', which is invalid on Windows"
1117 "filename ends with '\\', which is invalid on Windows"
1111 >>> checkwinfilename("foo\\/bar")
1118 >>> checkwinfilename("foo\\/bar")
1112 "directory name ends with '\\', which is invalid on Windows"
1119 "directory name ends with '\\', which is invalid on Windows"
1113 '''
1120 '''
1114 if path.endswith('\\'):
1121 if path.endswith('\\'):
1115 return _("filename ends with '\\', which is invalid on Windows")
1122 return _("filename ends with '\\', which is invalid on Windows")
1116 if '\\/' in path:
1123 if '\\/' in path:
1117 return _("directory name ends with '\\', which is invalid on Windows")
1124 return _("directory name ends with '\\', which is invalid on Windows")
1118 for n in path.replace('\\', '/').split('/'):
1125 for n in path.replace('\\', '/').split('/'):
1119 if not n:
1126 if not n:
1120 continue
1127 continue
1121 for c in n:
1128 for c in n:
1122 if c in _winreservedchars:
1129 if c in _winreservedchars:
1123 return _("filename contains '%s', which is reserved "
1130 return _("filename contains '%s', which is reserved "
1124 "on Windows") % c
1131 "on Windows") % c
1125 if ord(c) <= 31:
1132 if ord(c) <= 31:
1126 return _("filename contains %r, which is invalid "
1133 return _("filename contains %r, which is invalid "
1127 "on Windows") % c
1134 "on Windows") % c
1128 base = n.split('.')[0]
1135 base = n.split('.')[0]
1129 if base and base.lower() in _winreservednames:
1136 if base and base.lower() in _winreservednames:
1130 return _("filename contains '%s', which is reserved "
1137 return _("filename contains '%s', which is reserved "
1131 "on Windows") % base
1138 "on Windows") % base
1132 t = n[-1]
1139 t = n[-1]
1133 if t in '. ' and n not in '..':
1140 if t in '. ' and n not in '..':
1134 return _("filename ends with '%s', which is not allowed "
1141 return _("filename ends with '%s', which is not allowed "
1135 "on Windows") % t
1142 "on Windows") % t
1136
1143
1137 if os.name == 'nt':
1144 if os.name == 'nt':
1138 checkosfilename = checkwinfilename
1145 checkosfilename = checkwinfilename
1139 else:
1146 else:
1140 checkosfilename = platform.checkosfilename
1147 checkosfilename = platform.checkosfilename
1141
1148
1142 def makelock(info, pathname):
1149 def makelock(info, pathname):
1143 try:
1150 try:
1144 return os.symlink(info, pathname)
1151 return os.symlink(info, pathname)
1145 except OSError as why:
1152 except OSError as why:
1146 if why.errno == errno.EEXIST:
1153 if why.errno == errno.EEXIST:
1147 raise
1154 raise
1148 except AttributeError: # no symlink in os
1155 except AttributeError: # no symlink in os
1149 pass
1156 pass
1150
1157
1151 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1158 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1152 os.write(ld, info)
1159 os.write(ld, info)
1153 os.close(ld)
1160 os.close(ld)
1154
1161
1155 def readlock(pathname):
1162 def readlock(pathname):
1156 try:
1163 try:
1157 return os.readlink(pathname)
1164 return os.readlink(pathname)
1158 except OSError as why:
1165 except OSError as why:
1159 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1166 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1160 raise
1167 raise
1161 except AttributeError: # no symlink in os
1168 except AttributeError: # no symlink in os
1162 pass
1169 pass
1163 fp = posixfile(pathname)
1170 fp = posixfile(pathname)
1164 r = fp.read()
1171 r = fp.read()
1165 fp.close()
1172 fp.close()
1166 return r
1173 return r
1167
1174
1168 def fstat(fp):
1175 def fstat(fp):
1169 '''stat file object that may not have fileno method.'''
1176 '''stat file object that may not have fileno method.'''
1170 try:
1177 try:
1171 return os.fstat(fp.fileno())
1178 return os.fstat(fp.fileno())
1172 except AttributeError:
1179 except AttributeError:
1173 return os.stat(fp.name)
1180 return os.stat(fp.name)
1174
1181
1175 # File system features
1182 # File system features
1176
1183
1177 def checkcase(path):
1184 def checkcase(path):
1178 """
1185 """
1179 Return true if the given path is on a case-sensitive filesystem
1186 Return true if the given path is on a case-sensitive filesystem
1180
1187
1181 Requires a path (like /foo/.hg) ending with a foldable final
1188 Requires a path (like /foo/.hg) ending with a foldable final
1182 directory component.
1189 directory component.
1183 """
1190 """
1184 s1 = os.lstat(path)
1191 s1 = os.lstat(path)
1185 d, b = os.path.split(path)
1192 d, b = os.path.split(path)
1186 b2 = b.upper()
1193 b2 = b.upper()
1187 if b == b2:
1194 if b == b2:
1188 b2 = b.lower()
1195 b2 = b.lower()
1189 if b == b2:
1196 if b == b2:
1190 return True # no evidence against case sensitivity
1197 return True # no evidence against case sensitivity
1191 p2 = os.path.join(d, b2)
1198 p2 = os.path.join(d, b2)
1192 try:
1199 try:
1193 s2 = os.lstat(p2)
1200 s2 = os.lstat(p2)
1194 if s2 == s1:
1201 if s2 == s1:
1195 return False
1202 return False
1196 return True
1203 return True
1197 except OSError:
1204 except OSError:
1198 return True
1205 return True
1199
1206
1200 try:
1207 try:
1201 import re2
1208 import re2
1202 _re2 = None
1209 _re2 = None
1203 except ImportError:
1210 except ImportError:
1204 _re2 = False
1211 _re2 = False
1205
1212
1206 class _re(object):
1213 class _re(object):
1207 def _checkre2(self):
1214 def _checkre2(self):
1208 global _re2
1215 global _re2
1209 try:
1216 try:
1210 # check if match works, see issue3964
1217 # check if match works, see issue3964
1211 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1218 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1212 except ImportError:
1219 except ImportError:
1213 _re2 = False
1220 _re2 = False
1214
1221
1215 def compile(self, pat, flags=0):
1222 def compile(self, pat, flags=0):
1216 '''Compile a regular expression, using re2 if possible
1223 '''Compile a regular expression, using re2 if possible
1217
1224
1218 For best performance, use only re2-compatible regexp features. The
1225 For best performance, use only re2-compatible regexp features. The
1219 only flags from the re module that are re2-compatible are
1226 only flags from the re module that are re2-compatible are
1220 IGNORECASE and MULTILINE.'''
1227 IGNORECASE and MULTILINE.'''
1221 if _re2 is None:
1228 if _re2 is None:
1222 self._checkre2()
1229 self._checkre2()
1223 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1230 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1224 if flags & remod.IGNORECASE:
1231 if flags & remod.IGNORECASE:
1225 pat = '(?i)' + pat
1232 pat = '(?i)' + pat
1226 if flags & remod.MULTILINE:
1233 if flags & remod.MULTILINE:
1227 pat = '(?m)' + pat
1234 pat = '(?m)' + pat
1228 try:
1235 try:
1229 return re2.compile(pat)
1236 return re2.compile(pat)
1230 except re2.error:
1237 except re2.error:
1231 pass
1238 pass
1232 return remod.compile(pat, flags)
1239 return remod.compile(pat, flags)
1233
1240
1234 @propertycache
1241 @propertycache
1235 def escape(self):
1242 def escape(self):
1236 '''Return the version of escape corresponding to self.compile.
1243 '''Return the version of escape corresponding to self.compile.
1237
1244
1238 This is imperfect because whether re2 or re is used for a particular
1245 This is imperfect because whether re2 or re is used for a particular
1239 function depends on the flags, etc, but it's the best we can do.
1246 function depends on the flags, etc, but it's the best we can do.
1240 '''
1247 '''
1241 global _re2
1248 global _re2
1242 if _re2 is None:
1249 if _re2 is None:
1243 self._checkre2()
1250 self._checkre2()
1244 if _re2:
1251 if _re2:
1245 return re2.escape
1252 return re2.escape
1246 else:
1253 else:
1247 return remod.escape
1254 return remod.escape
1248
1255
1249 re = _re()
1256 re = _re()
1250
1257
1251 _fspathcache = {}
1258 _fspathcache = {}
1252 def fspath(name, root):
1259 def fspath(name, root):
1253 '''Get name in the case stored in the filesystem
1260 '''Get name in the case stored in the filesystem
1254
1261
1255 The name should be relative to root, and be normcase-ed for efficiency.
1262 The name should be relative to root, and be normcase-ed for efficiency.
1256
1263
1257 Note that this function is unnecessary, and should not be
1264 Note that this function is unnecessary, and should not be
1258 called, for case-sensitive filesystems (simply because it's expensive).
1265 called, for case-sensitive filesystems (simply because it's expensive).
1259
1266
1260 The root should be normcase-ed, too.
1267 The root should be normcase-ed, too.
1261 '''
1268 '''
1262 def _makefspathcacheentry(dir):
1269 def _makefspathcacheentry(dir):
1263 return dict((normcase(n), n) for n in os.listdir(dir))
1270 return dict((normcase(n), n) for n in os.listdir(dir))
1264
1271
1265 seps = os.sep
1272 seps = os.sep
1266 if os.altsep:
1273 if os.altsep:
1267 seps = seps + os.altsep
1274 seps = seps + os.altsep
1268 # Protect backslashes. This gets silly very quickly.
1275 # Protect backslashes. This gets silly very quickly.
1269 seps.replace('\\','\\\\')
1276 seps.replace('\\','\\\\')
1270 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1277 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1271 dir = os.path.normpath(root)
1278 dir = os.path.normpath(root)
1272 result = []
1279 result = []
1273 for part, sep in pattern.findall(name):
1280 for part, sep in pattern.findall(name):
1274 if sep:
1281 if sep:
1275 result.append(sep)
1282 result.append(sep)
1276 continue
1283 continue
1277
1284
1278 if dir not in _fspathcache:
1285 if dir not in _fspathcache:
1279 _fspathcache[dir] = _makefspathcacheentry(dir)
1286 _fspathcache[dir] = _makefspathcacheentry(dir)
1280 contents = _fspathcache[dir]
1287 contents = _fspathcache[dir]
1281
1288
1282 found = contents.get(part)
1289 found = contents.get(part)
1283 if not found:
1290 if not found:
1284 # retry "once per directory" per "dirstate.walk" which
1291 # retry "once per directory" per "dirstate.walk" which
1285 # may take place for each patches of "hg qpush", for example
1292 # may take place for each patches of "hg qpush", for example
1286 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1293 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1287 found = contents.get(part)
1294 found = contents.get(part)
1288
1295
1289 result.append(found or part)
1296 result.append(found or part)
1290 dir = os.path.join(dir, part)
1297 dir = os.path.join(dir, part)
1291
1298
1292 return ''.join(result)
1299 return ''.join(result)
1293
1300
1294 def checknlink(testfile):
1301 def checknlink(testfile):
1295 '''check whether hardlink count reporting works properly'''
1302 '''check whether hardlink count reporting works properly'''
1296
1303
1297 # testfile may be open, so we need a separate file for checking to
1304 # testfile may be open, so we need a separate file for checking to
1298 # work around issue2543 (or testfile may get lost on Samba shares)
1305 # work around issue2543 (or testfile may get lost on Samba shares)
1299 f1 = testfile + ".hgtmp1"
1306 f1 = testfile + ".hgtmp1"
1300 if os.path.lexists(f1):
1307 if os.path.lexists(f1):
1301 return False
1308 return False
1302 try:
1309 try:
1303 posixfile(f1, 'w').close()
1310 posixfile(f1, 'w').close()
1304 except IOError:
1311 except IOError:
1305 return False
1312 return False
1306
1313
1307 f2 = testfile + ".hgtmp2"
1314 f2 = testfile + ".hgtmp2"
1308 fd = None
1315 fd = None
1309 try:
1316 try:
1310 oslink(f1, f2)
1317 oslink(f1, f2)
1311 # nlinks() may behave differently for files on Windows shares if
1318 # nlinks() may behave differently for files on Windows shares if
1312 # the file is open.
1319 # the file is open.
1313 fd = posixfile(f2)
1320 fd = posixfile(f2)
1314 return nlinks(f2) > 1
1321 return nlinks(f2) > 1
1315 except OSError:
1322 except OSError:
1316 return False
1323 return False
1317 finally:
1324 finally:
1318 if fd is not None:
1325 if fd is not None:
1319 fd.close()
1326 fd.close()
1320 for f in (f1, f2):
1327 for f in (f1, f2):
1321 try:
1328 try:
1322 os.unlink(f)
1329 os.unlink(f)
1323 except OSError:
1330 except OSError:
1324 pass
1331 pass
1325
1332
1326 def endswithsep(path):
1333 def endswithsep(path):
1327 '''Check path ends with os.sep or os.altsep.'''
1334 '''Check path ends with os.sep or os.altsep.'''
1328 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1335 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1329
1336
1330 def splitpath(path):
1337 def splitpath(path):
1331 '''Split path by os.sep.
1338 '''Split path by os.sep.
1332 Note that this function does not use os.altsep because this is
1339 Note that this function does not use os.altsep because this is
1333 an alternative of simple "xxx.split(os.sep)".
1340 an alternative of simple "xxx.split(os.sep)".
1334 It is recommended to use os.path.normpath() before using this
1341 It is recommended to use os.path.normpath() before using this
1335 function if need.'''
1342 function if need.'''
1336 return path.split(os.sep)
1343 return path.split(os.sep)
1337
1344
1338 def gui():
1345 def gui():
1339 '''Are we running in a GUI?'''
1346 '''Are we running in a GUI?'''
1340 if sys.platform == 'darwin':
1347 if sys.platform == 'darwin':
1341 if 'SSH_CONNECTION' in os.environ:
1348 if 'SSH_CONNECTION' in os.environ:
1342 # handle SSH access to a box where the user is logged in
1349 # handle SSH access to a box where the user is logged in
1343 return False
1350 return False
1344 elif getattr(osutil, 'isgui', None):
1351 elif getattr(osutil, 'isgui', None):
1345 # check if a CoreGraphics session is available
1352 # check if a CoreGraphics session is available
1346 return osutil.isgui()
1353 return osutil.isgui()
1347 else:
1354 else:
1348 # pure build; use a safe default
1355 # pure build; use a safe default
1349 return True
1356 return True
1350 else:
1357 else:
1351 return os.name == "nt" or os.environ.get("DISPLAY")
1358 return os.name == "nt" or os.environ.get("DISPLAY")
1352
1359
1353 def mktempcopy(name, emptyok=False, createmode=None):
1360 def mktempcopy(name, emptyok=False, createmode=None):
1354 """Create a temporary file with the same contents from name
1361 """Create a temporary file with the same contents from name
1355
1362
1356 The permission bits are copied from the original file.
1363 The permission bits are copied from the original file.
1357
1364
1358 If the temporary file is going to be truncated immediately, you
1365 If the temporary file is going to be truncated immediately, you
1359 can use emptyok=True as an optimization.
1366 can use emptyok=True as an optimization.
1360
1367
1361 Returns the name of the temporary file.
1368 Returns the name of the temporary file.
1362 """
1369 """
1363 d, fn = os.path.split(name)
1370 d, fn = os.path.split(name)
1364 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1371 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1365 os.close(fd)
1372 os.close(fd)
1366 # Temporary files are created with mode 0600, which is usually not
1373 # Temporary files are created with mode 0600, which is usually not
1367 # what we want. If the original file already exists, just copy
1374 # what we want. If the original file already exists, just copy
1368 # its mode. Otherwise, manually obey umask.
1375 # its mode. Otherwise, manually obey umask.
1369 copymode(name, temp, createmode)
1376 copymode(name, temp, createmode)
1370 if emptyok:
1377 if emptyok:
1371 return temp
1378 return temp
1372 try:
1379 try:
1373 try:
1380 try:
1374 ifp = posixfile(name, "rb")
1381 ifp = posixfile(name, "rb")
1375 except IOError as inst:
1382 except IOError as inst:
1376 if inst.errno == errno.ENOENT:
1383 if inst.errno == errno.ENOENT:
1377 return temp
1384 return temp
1378 if not getattr(inst, 'filename', None):
1385 if not getattr(inst, 'filename', None):
1379 inst.filename = name
1386 inst.filename = name
1380 raise
1387 raise
1381 ofp = posixfile(temp, "wb")
1388 ofp = posixfile(temp, "wb")
1382 for chunk in filechunkiter(ifp):
1389 for chunk in filechunkiter(ifp):
1383 ofp.write(chunk)
1390 ofp.write(chunk)
1384 ifp.close()
1391 ifp.close()
1385 ofp.close()
1392 ofp.close()
1386 except: # re-raises
1393 except: # re-raises
1387 try: os.unlink(temp)
1394 try: os.unlink(temp)
1388 except OSError: pass
1395 except OSError: pass
1389 raise
1396 raise
1390 return temp
1397 return temp
1391
1398
1392 class filestat(object):
1399 class filestat(object):
1393 """help to exactly detect change of a file
1400 """help to exactly detect change of a file
1394
1401
1395 'stat' attribute is result of 'os.stat()' if specified 'path'
1402 'stat' attribute is result of 'os.stat()' if specified 'path'
1396 exists. Otherwise, it is None. This can avoid preparative
1403 exists. Otherwise, it is None. This can avoid preparative
1397 'exists()' examination on client side of this class.
1404 'exists()' examination on client side of this class.
1398 """
1405 """
1399 def __init__(self, path):
1406 def __init__(self, path):
1400 try:
1407 try:
1401 self.stat = os.stat(path)
1408 self.stat = os.stat(path)
1402 except OSError as err:
1409 except OSError as err:
1403 if err.errno != errno.ENOENT:
1410 if err.errno != errno.ENOENT:
1404 raise
1411 raise
1405 self.stat = None
1412 self.stat = None
1406
1413
1407 __hash__ = object.__hash__
1414 __hash__ = object.__hash__
1408
1415
1409 def __eq__(self, old):
1416 def __eq__(self, old):
1410 try:
1417 try:
1411 # if ambiguity between stat of new and old file is
1418 # if ambiguity between stat of new and old file is
1412 # avoided, comparision of size, ctime and mtime is enough
1419 # avoided, comparision of size, ctime and mtime is enough
1413 # to exactly detect change of a file regardless of platform
1420 # to exactly detect change of a file regardless of platform
1414 return (self.stat.st_size == old.stat.st_size and
1421 return (self.stat.st_size == old.stat.st_size and
1415 self.stat.st_ctime == old.stat.st_ctime and
1422 self.stat.st_ctime == old.stat.st_ctime and
1416 self.stat.st_mtime == old.stat.st_mtime)
1423 self.stat.st_mtime == old.stat.st_mtime)
1417 except AttributeError:
1424 except AttributeError:
1418 return False
1425 return False
1419
1426
1420 def isambig(self, old):
1427 def isambig(self, old):
1421 """Examine whether new (= self) stat is ambiguous against old one
1428 """Examine whether new (= self) stat is ambiguous against old one
1422
1429
1423 "S[N]" below means stat of a file at N-th change:
1430 "S[N]" below means stat of a file at N-th change:
1424
1431
1425 - S[n-1].ctime < S[n].ctime: can detect change of a file
1432 - S[n-1].ctime < S[n].ctime: can detect change of a file
1426 - S[n-1].ctime == S[n].ctime
1433 - S[n-1].ctime == S[n].ctime
1427 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1434 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1428 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1435 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1429 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1436 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1430 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1437 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1431
1438
1432 Case (*2) above means that a file was changed twice or more at
1439 Case (*2) above means that a file was changed twice or more at
1433 same time in sec (= S[n-1].ctime), and comparison of timestamp
1440 same time in sec (= S[n-1].ctime), and comparison of timestamp
1434 is ambiguous.
1441 is ambiguous.
1435
1442
1436 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1443 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1437 timestamp is ambiguous".
1444 timestamp is ambiguous".
1438
1445
1439 But advancing mtime only in case (*2) doesn't work as
1446 But advancing mtime only in case (*2) doesn't work as
1440 expected, because naturally advanced S[n].mtime in case (*1)
1447 expected, because naturally advanced S[n].mtime in case (*1)
1441 might be equal to manually advanced S[n-1 or earlier].mtime.
1448 might be equal to manually advanced S[n-1 or earlier].mtime.
1442
1449
1443 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1450 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1444 treated as ambiguous regardless of mtime, to avoid overlooking
1451 treated as ambiguous regardless of mtime, to avoid overlooking
1445 by confliction between such mtime.
1452 by confliction between such mtime.
1446
1453
1447 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1454 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1448 S[n].mtime", even if size of a file isn't changed.
1455 S[n].mtime", even if size of a file isn't changed.
1449 """
1456 """
1450 try:
1457 try:
1451 return (self.stat.st_ctime == old.stat.st_ctime)
1458 return (self.stat.st_ctime == old.stat.st_ctime)
1452 except AttributeError:
1459 except AttributeError:
1453 return False
1460 return False
1454
1461
1455 def __ne__(self, other):
1462 def __ne__(self, other):
1456 return not self == other
1463 return not self == other
1457
1464
1458 class atomictempfile(object):
1465 class atomictempfile(object):
1459 '''writable file object that atomically updates a file
1466 '''writable file object that atomically updates a file
1460
1467
1461 All writes will go to a temporary copy of the original file. Call
1468 All writes will go to a temporary copy of the original file. Call
1462 close() when you are done writing, and atomictempfile will rename
1469 close() when you are done writing, and atomictempfile will rename
1463 the temporary copy to the original name, making the changes
1470 the temporary copy to the original name, making the changes
1464 visible. If the object is destroyed without being closed, all your
1471 visible. If the object is destroyed without being closed, all your
1465 writes are discarded.
1472 writes are discarded.
1473
1474 checkambig argument of constructor is used with filestat, and is
1475 useful only if target file is guarded by any lock (e.g. repo.lock
1476 or repo.wlock).
1466 '''
1477 '''
1467 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
1478 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
1468 self.__name = name # permanent name
1479 self.__name = name # permanent name
1469 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1480 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1470 createmode=createmode)
1481 createmode=createmode)
1471 self._fp = posixfile(self._tempname, mode)
1482 self._fp = posixfile(self._tempname, mode)
1472 self._checkambig = checkambig
1483 self._checkambig = checkambig
1473
1484
1474 # delegated methods
1485 # delegated methods
1475 self.write = self._fp.write
1486 self.write = self._fp.write
1476 self.seek = self._fp.seek
1487 self.seek = self._fp.seek
1477 self.tell = self._fp.tell
1488 self.tell = self._fp.tell
1478 self.fileno = self._fp.fileno
1489 self.fileno = self._fp.fileno
1479
1490
1480 def close(self):
1491 def close(self):
1481 if not self._fp.closed:
1492 if not self._fp.closed:
1482 self._fp.close()
1493 self._fp.close()
1483 filename = localpath(self.__name)
1494 filename = localpath(self.__name)
1484 oldstat = self._checkambig and filestat(filename)
1495 oldstat = self._checkambig and filestat(filename)
1485 if oldstat and oldstat.stat:
1496 if oldstat and oldstat.stat:
1486 rename(self._tempname, filename)
1497 rename(self._tempname, filename)
1487 newstat = filestat(filename)
1498 newstat = filestat(filename)
1488 if newstat.isambig(oldstat):
1499 if newstat.isambig(oldstat):
1489 # stat of changed file is ambiguous to original one
1500 # stat of changed file is ambiguous to original one
1490 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1501 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1491 os.utime(filename, (advanced, advanced))
1502 os.utime(filename, (advanced, advanced))
1492 else:
1503 else:
1493 rename(self._tempname, filename)
1504 rename(self._tempname, filename)
1494
1505
1495 def discard(self):
1506 def discard(self):
1496 if not self._fp.closed:
1507 if not self._fp.closed:
1497 try:
1508 try:
1498 os.unlink(self._tempname)
1509 os.unlink(self._tempname)
1499 except OSError:
1510 except OSError:
1500 pass
1511 pass
1501 self._fp.close()
1512 self._fp.close()
1502
1513
1503 def __del__(self):
1514 def __del__(self):
1504 if safehasattr(self, '_fp'): # constructor actually did something
1515 if safehasattr(self, '_fp'): # constructor actually did something
1505 self.discard()
1516 self.discard()
1506
1517
1507 def makedirs(name, mode=None, notindexed=False):
1518 def makedirs(name, mode=None, notindexed=False):
1508 """recursive directory creation with parent mode inheritance
1519 """recursive directory creation with parent mode inheritance
1509
1520
1510 Newly created directories are marked as "not to be indexed by
1521 Newly created directories are marked as "not to be indexed by
1511 the content indexing service", if ``notindexed`` is specified
1522 the content indexing service", if ``notindexed`` is specified
1512 for "write" mode access.
1523 for "write" mode access.
1513 """
1524 """
1514 try:
1525 try:
1515 makedir(name, notindexed)
1526 makedir(name, notindexed)
1516 except OSError as err:
1527 except OSError as err:
1517 if err.errno == errno.EEXIST:
1528 if err.errno == errno.EEXIST:
1518 return
1529 return
1519 if err.errno != errno.ENOENT or not name:
1530 if err.errno != errno.ENOENT or not name:
1520 raise
1531 raise
1521 parent = os.path.dirname(os.path.abspath(name))
1532 parent = os.path.dirname(os.path.abspath(name))
1522 if parent == name:
1533 if parent == name:
1523 raise
1534 raise
1524 makedirs(parent, mode, notindexed)
1535 makedirs(parent, mode, notindexed)
1525 try:
1536 try:
1526 makedir(name, notindexed)
1537 makedir(name, notindexed)
1527 except OSError as err:
1538 except OSError as err:
1528 # Catch EEXIST to handle races
1539 # Catch EEXIST to handle races
1529 if err.errno == errno.EEXIST:
1540 if err.errno == errno.EEXIST:
1530 return
1541 return
1531 raise
1542 raise
1532 if mode is not None:
1543 if mode is not None:
1533 os.chmod(name, mode)
1544 os.chmod(name, mode)
1534
1545
1535 def readfile(path):
1546 def readfile(path):
1536 with open(path, 'rb') as fp:
1547 with open(path, 'rb') as fp:
1537 return fp.read()
1548 return fp.read()
1538
1549
1539 def writefile(path, text):
1550 def writefile(path, text):
1540 with open(path, 'wb') as fp:
1551 with open(path, 'wb') as fp:
1541 fp.write(text)
1552 fp.write(text)
1542
1553
1543 def appendfile(path, text):
1554 def appendfile(path, text):
1544 with open(path, 'ab') as fp:
1555 with open(path, 'ab') as fp:
1545 fp.write(text)
1556 fp.write(text)
1546
1557
1547 class chunkbuffer(object):
1558 class chunkbuffer(object):
1548 """Allow arbitrary sized chunks of data to be efficiently read from an
1559 """Allow arbitrary sized chunks of data to be efficiently read from an
1549 iterator over chunks of arbitrary size."""
1560 iterator over chunks of arbitrary size."""
1550
1561
1551 def __init__(self, in_iter):
1562 def __init__(self, in_iter):
1552 """in_iter is the iterator that's iterating over the input chunks.
1563 """in_iter is the iterator that's iterating over the input chunks.
1553 targetsize is how big a buffer to try to maintain."""
1564 targetsize is how big a buffer to try to maintain."""
1554 def splitbig(chunks):
1565 def splitbig(chunks):
1555 for chunk in chunks:
1566 for chunk in chunks:
1556 if len(chunk) > 2**20:
1567 if len(chunk) > 2**20:
1557 pos = 0
1568 pos = 0
1558 while pos < len(chunk):
1569 while pos < len(chunk):
1559 end = pos + 2 ** 18
1570 end = pos + 2 ** 18
1560 yield chunk[pos:end]
1571 yield chunk[pos:end]
1561 pos = end
1572 pos = end
1562 else:
1573 else:
1563 yield chunk
1574 yield chunk
1564 self.iter = splitbig(in_iter)
1575 self.iter = splitbig(in_iter)
1565 self._queue = collections.deque()
1576 self._queue = collections.deque()
1566 self._chunkoffset = 0
1577 self._chunkoffset = 0
1567
1578
1568 def read(self, l=None):
1579 def read(self, l=None):
1569 """Read L bytes of data from the iterator of chunks of data.
1580 """Read L bytes of data from the iterator of chunks of data.
1570 Returns less than L bytes if the iterator runs dry.
1581 Returns less than L bytes if the iterator runs dry.
1571
1582
1572 If size parameter is omitted, read everything"""
1583 If size parameter is omitted, read everything"""
1573 if l is None:
1584 if l is None:
1574 return ''.join(self.iter)
1585 return ''.join(self.iter)
1575
1586
1576 left = l
1587 left = l
1577 buf = []
1588 buf = []
1578 queue = self._queue
1589 queue = self._queue
1579 while left > 0:
1590 while left > 0:
1580 # refill the queue
1591 # refill the queue
1581 if not queue:
1592 if not queue:
1582 target = 2**18
1593 target = 2**18
1583 for chunk in self.iter:
1594 for chunk in self.iter:
1584 queue.append(chunk)
1595 queue.append(chunk)
1585 target -= len(chunk)
1596 target -= len(chunk)
1586 if target <= 0:
1597 if target <= 0:
1587 break
1598 break
1588 if not queue:
1599 if not queue:
1589 break
1600 break
1590
1601
1591 # The easy way to do this would be to queue.popleft(), modify the
1602 # The easy way to do this would be to queue.popleft(), modify the
1592 # chunk (if necessary), then queue.appendleft(). However, for cases
1603 # chunk (if necessary), then queue.appendleft(). However, for cases
1593 # where we read partial chunk content, this incurs 2 dequeue
1604 # where we read partial chunk content, this incurs 2 dequeue
1594 # mutations and creates a new str for the remaining chunk in the
1605 # mutations and creates a new str for the remaining chunk in the
1595 # queue. Our code below avoids this overhead.
1606 # queue. Our code below avoids this overhead.
1596
1607
1597 chunk = queue[0]
1608 chunk = queue[0]
1598 chunkl = len(chunk)
1609 chunkl = len(chunk)
1599 offset = self._chunkoffset
1610 offset = self._chunkoffset
1600
1611
1601 # Use full chunk.
1612 # Use full chunk.
1602 if offset == 0 and left >= chunkl:
1613 if offset == 0 and left >= chunkl:
1603 left -= chunkl
1614 left -= chunkl
1604 queue.popleft()
1615 queue.popleft()
1605 buf.append(chunk)
1616 buf.append(chunk)
1606 # self._chunkoffset remains at 0.
1617 # self._chunkoffset remains at 0.
1607 continue
1618 continue
1608
1619
1609 chunkremaining = chunkl - offset
1620 chunkremaining = chunkl - offset
1610
1621
1611 # Use all of unconsumed part of chunk.
1622 # Use all of unconsumed part of chunk.
1612 if left >= chunkremaining:
1623 if left >= chunkremaining:
1613 left -= chunkremaining
1624 left -= chunkremaining
1614 queue.popleft()
1625 queue.popleft()
1615 # offset == 0 is enabled by block above, so this won't merely
1626 # offset == 0 is enabled by block above, so this won't merely
1616 # copy via ``chunk[0:]``.
1627 # copy via ``chunk[0:]``.
1617 buf.append(chunk[offset:])
1628 buf.append(chunk[offset:])
1618 self._chunkoffset = 0
1629 self._chunkoffset = 0
1619
1630
1620 # Partial chunk needed.
1631 # Partial chunk needed.
1621 else:
1632 else:
1622 buf.append(chunk[offset:offset + left])
1633 buf.append(chunk[offset:offset + left])
1623 self._chunkoffset += left
1634 self._chunkoffset += left
1624 left -= chunkremaining
1635 left -= chunkremaining
1625
1636
1626 return ''.join(buf)
1637 return ''.join(buf)
1627
1638
1628 def filechunkiter(f, size=65536, limit=None):
1639 def filechunkiter(f, size=65536, limit=None):
1629 """Create a generator that produces the data in the file size
1640 """Create a generator that produces the data in the file size
1630 (default 65536) bytes at a time, up to optional limit (default is
1641 (default 65536) bytes at a time, up to optional limit (default is
1631 to read all data). Chunks may be less than size bytes if the
1642 to read all data). Chunks may be less than size bytes if the
1632 chunk is the last chunk in the file, or the file is a socket or
1643 chunk is the last chunk in the file, or the file is a socket or
1633 some other type of file that sometimes reads less data than is
1644 some other type of file that sometimes reads less data than is
1634 requested."""
1645 requested."""
1635 assert size >= 0
1646 assert size >= 0
1636 assert limit is None or limit >= 0
1647 assert limit is None or limit >= 0
1637 while True:
1648 while True:
1638 if limit is None:
1649 if limit is None:
1639 nbytes = size
1650 nbytes = size
1640 else:
1651 else:
1641 nbytes = min(limit, size)
1652 nbytes = min(limit, size)
1642 s = nbytes and f.read(nbytes)
1653 s = nbytes and f.read(nbytes)
1643 if not s:
1654 if not s:
1644 break
1655 break
1645 if limit:
1656 if limit:
1646 limit -= len(s)
1657 limit -= len(s)
1647 yield s
1658 yield s
1648
1659
1649 def makedate(timestamp=None):
1660 def makedate(timestamp=None):
1650 '''Return a unix timestamp (or the current time) as a (unixtime,
1661 '''Return a unix timestamp (or the current time) as a (unixtime,
1651 offset) tuple based off the local timezone.'''
1662 offset) tuple based off the local timezone.'''
1652 if timestamp is None:
1663 if timestamp is None:
1653 timestamp = time.time()
1664 timestamp = time.time()
1654 if timestamp < 0:
1665 if timestamp < 0:
1655 hint = _("check your clock")
1666 hint = _("check your clock")
1656 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1667 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1657 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1668 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1658 datetime.datetime.fromtimestamp(timestamp))
1669 datetime.datetime.fromtimestamp(timestamp))
1659 tz = delta.days * 86400 + delta.seconds
1670 tz = delta.days * 86400 + delta.seconds
1660 return timestamp, tz
1671 return timestamp, tz
1661
1672
1662 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1673 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1663 """represent a (unixtime, offset) tuple as a localized time.
1674 """represent a (unixtime, offset) tuple as a localized time.
1664 unixtime is seconds since the epoch, and offset is the time zone's
1675 unixtime is seconds since the epoch, and offset is the time zone's
1665 number of seconds away from UTC.
1676 number of seconds away from UTC.
1666
1677
1667 >>> datestr((0, 0))
1678 >>> datestr((0, 0))
1668 'Thu Jan 01 00:00:00 1970 +0000'
1679 'Thu Jan 01 00:00:00 1970 +0000'
1669 >>> datestr((42, 0))
1680 >>> datestr((42, 0))
1670 'Thu Jan 01 00:00:42 1970 +0000'
1681 'Thu Jan 01 00:00:42 1970 +0000'
1671 >>> datestr((-42, 0))
1682 >>> datestr((-42, 0))
1672 'Wed Dec 31 23:59:18 1969 +0000'
1683 'Wed Dec 31 23:59:18 1969 +0000'
1673 >>> datestr((0x7fffffff, 0))
1684 >>> datestr((0x7fffffff, 0))
1674 'Tue Jan 19 03:14:07 2038 +0000'
1685 'Tue Jan 19 03:14:07 2038 +0000'
1675 >>> datestr((-0x80000000, 0))
1686 >>> datestr((-0x80000000, 0))
1676 'Fri Dec 13 20:45:52 1901 +0000'
1687 'Fri Dec 13 20:45:52 1901 +0000'
1677 """
1688 """
1678 t, tz = date or makedate()
1689 t, tz = date or makedate()
1679 if "%1" in format or "%2" in format or "%z" in format:
1690 if "%1" in format or "%2" in format or "%z" in format:
1680 sign = (tz > 0) and "-" or "+"
1691 sign = (tz > 0) and "-" or "+"
1681 minutes = abs(tz) // 60
1692 minutes = abs(tz) // 60
1682 q, r = divmod(minutes, 60)
1693 q, r = divmod(minutes, 60)
1683 format = format.replace("%z", "%1%2")
1694 format = format.replace("%z", "%1%2")
1684 format = format.replace("%1", "%c%02d" % (sign, q))
1695 format = format.replace("%1", "%c%02d" % (sign, q))
1685 format = format.replace("%2", "%02d" % r)
1696 format = format.replace("%2", "%02d" % r)
1686 d = t - tz
1697 d = t - tz
1687 if d > 0x7fffffff:
1698 if d > 0x7fffffff:
1688 d = 0x7fffffff
1699 d = 0x7fffffff
1689 elif d < -0x80000000:
1700 elif d < -0x80000000:
1690 d = -0x80000000
1701 d = -0x80000000
1691 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1702 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1692 # because they use the gmtime() system call which is buggy on Windows
1703 # because they use the gmtime() system call which is buggy on Windows
1693 # for negative values.
1704 # for negative values.
1694 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1705 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1695 s = t.strftime(format)
1706 s = t.strftime(format)
1696 return s
1707 return s
1697
1708
1698 def shortdate(date=None):
1709 def shortdate(date=None):
1699 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1710 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1700 return datestr(date, format='%Y-%m-%d')
1711 return datestr(date, format='%Y-%m-%d')
1701
1712
1702 def parsetimezone(tz):
1713 def parsetimezone(tz):
1703 """parse a timezone string and return an offset integer"""
1714 """parse a timezone string and return an offset integer"""
1704 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1715 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1705 sign = (tz[0] == "+") and 1 or -1
1716 sign = (tz[0] == "+") and 1 or -1
1706 hours = int(tz[1:3])
1717 hours = int(tz[1:3])
1707 minutes = int(tz[3:5])
1718 minutes = int(tz[3:5])
1708 return -sign * (hours * 60 + minutes) * 60
1719 return -sign * (hours * 60 + minutes) * 60
1709 if tz == "GMT" or tz == "UTC":
1720 if tz == "GMT" or tz == "UTC":
1710 return 0
1721 return 0
1711 return None
1722 return None
1712
1723
1713 def strdate(string, format, defaults=[]):
1724 def strdate(string, format, defaults=[]):
1714 """parse a localized time string and return a (unixtime, offset) tuple.
1725 """parse a localized time string and return a (unixtime, offset) tuple.
1715 if the string cannot be parsed, ValueError is raised."""
1726 if the string cannot be parsed, ValueError is raised."""
1716 # NOTE: unixtime = localunixtime + offset
1727 # NOTE: unixtime = localunixtime + offset
1717 offset, date = parsetimezone(string.split()[-1]), string
1728 offset, date = parsetimezone(string.split()[-1]), string
1718 if offset is not None:
1729 if offset is not None:
1719 date = " ".join(string.split()[:-1])
1730 date = " ".join(string.split()[:-1])
1720
1731
1721 # add missing elements from defaults
1732 # add missing elements from defaults
1722 usenow = False # default to using biased defaults
1733 usenow = False # default to using biased defaults
1723 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1734 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1724 found = [True for p in part if ("%"+p) in format]
1735 found = [True for p in part if ("%"+p) in format]
1725 if not found:
1736 if not found:
1726 date += "@" + defaults[part][usenow]
1737 date += "@" + defaults[part][usenow]
1727 format += "@%" + part[0]
1738 format += "@%" + part[0]
1728 else:
1739 else:
1729 # We've found a specific time element, less specific time
1740 # We've found a specific time element, less specific time
1730 # elements are relative to today
1741 # elements are relative to today
1731 usenow = True
1742 usenow = True
1732
1743
1733 timetuple = time.strptime(date, format)
1744 timetuple = time.strptime(date, format)
1734 localunixtime = int(calendar.timegm(timetuple))
1745 localunixtime = int(calendar.timegm(timetuple))
1735 if offset is None:
1746 if offset is None:
1736 # local timezone
1747 # local timezone
1737 unixtime = int(time.mktime(timetuple))
1748 unixtime = int(time.mktime(timetuple))
1738 offset = unixtime - localunixtime
1749 offset = unixtime - localunixtime
1739 else:
1750 else:
1740 unixtime = localunixtime + offset
1751 unixtime = localunixtime + offset
1741 return unixtime, offset
1752 return unixtime, offset
1742
1753
1743 def parsedate(date, formats=None, bias=None):
1754 def parsedate(date, formats=None, bias=None):
1744 """parse a localized date/time and return a (unixtime, offset) tuple.
1755 """parse a localized date/time and return a (unixtime, offset) tuple.
1745
1756
1746 The date may be a "unixtime offset" string or in one of the specified
1757 The date may be a "unixtime offset" string or in one of the specified
1747 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1758 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1748
1759
1749 >>> parsedate(' today ') == parsedate(\
1760 >>> parsedate(' today ') == parsedate(\
1750 datetime.date.today().strftime('%b %d'))
1761 datetime.date.today().strftime('%b %d'))
1751 True
1762 True
1752 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1763 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1753 datetime.timedelta(days=1)\
1764 datetime.timedelta(days=1)\
1754 ).strftime('%b %d'))
1765 ).strftime('%b %d'))
1755 True
1766 True
1756 >>> now, tz = makedate()
1767 >>> now, tz = makedate()
1757 >>> strnow, strtz = parsedate('now')
1768 >>> strnow, strtz = parsedate('now')
1758 >>> (strnow - now) < 1
1769 >>> (strnow - now) < 1
1759 True
1770 True
1760 >>> tz == strtz
1771 >>> tz == strtz
1761 True
1772 True
1762 """
1773 """
1763 if bias is None:
1774 if bias is None:
1764 bias = {}
1775 bias = {}
1765 if not date:
1776 if not date:
1766 return 0, 0
1777 return 0, 0
1767 if isinstance(date, tuple) and len(date) == 2:
1778 if isinstance(date, tuple) and len(date) == 2:
1768 return date
1779 return date
1769 if not formats:
1780 if not formats:
1770 formats = defaultdateformats
1781 formats = defaultdateformats
1771 date = date.strip()
1782 date = date.strip()
1772
1783
1773 if date == 'now' or date == _('now'):
1784 if date == 'now' or date == _('now'):
1774 return makedate()
1785 return makedate()
1775 if date == 'today' or date == _('today'):
1786 if date == 'today' or date == _('today'):
1776 date = datetime.date.today().strftime('%b %d')
1787 date = datetime.date.today().strftime('%b %d')
1777 elif date == 'yesterday' or date == _('yesterday'):
1788 elif date == 'yesterday' or date == _('yesterday'):
1778 date = (datetime.date.today() -
1789 date = (datetime.date.today() -
1779 datetime.timedelta(days=1)).strftime('%b %d')
1790 datetime.timedelta(days=1)).strftime('%b %d')
1780
1791
1781 try:
1792 try:
1782 when, offset = map(int, date.split(' '))
1793 when, offset = map(int, date.split(' '))
1783 except ValueError:
1794 except ValueError:
1784 # fill out defaults
1795 # fill out defaults
1785 now = makedate()
1796 now = makedate()
1786 defaults = {}
1797 defaults = {}
1787 for part in ("d", "mb", "yY", "HI", "M", "S"):
1798 for part in ("d", "mb", "yY", "HI", "M", "S"):
1788 # this piece is for rounding the specific end of unknowns
1799 # this piece is for rounding the specific end of unknowns
1789 b = bias.get(part)
1800 b = bias.get(part)
1790 if b is None:
1801 if b is None:
1791 if part[0] in "HMS":
1802 if part[0] in "HMS":
1792 b = "00"
1803 b = "00"
1793 else:
1804 else:
1794 b = "0"
1805 b = "0"
1795
1806
1796 # this piece is for matching the generic end to today's date
1807 # this piece is for matching the generic end to today's date
1797 n = datestr(now, "%" + part[0])
1808 n = datestr(now, "%" + part[0])
1798
1809
1799 defaults[part] = (b, n)
1810 defaults[part] = (b, n)
1800
1811
1801 for format in formats:
1812 for format in formats:
1802 try:
1813 try:
1803 when, offset = strdate(date, format, defaults)
1814 when, offset = strdate(date, format, defaults)
1804 except (ValueError, OverflowError):
1815 except (ValueError, OverflowError):
1805 pass
1816 pass
1806 else:
1817 else:
1807 break
1818 break
1808 else:
1819 else:
1809 raise Abort(_('invalid date: %r') % date)
1820 raise Abort(_('invalid date: %r') % date)
1810 # validate explicit (probably user-specified) date and
1821 # validate explicit (probably user-specified) date and
1811 # time zone offset. values must fit in signed 32 bits for
1822 # time zone offset. values must fit in signed 32 bits for
1812 # current 32-bit linux runtimes. timezones go from UTC-12
1823 # current 32-bit linux runtimes. timezones go from UTC-12
1813 # to UTC+14
1824 # to UTC+14
1814 if when < -0x80000000 or when > 0x7fffffff:
1825 if when < -0x80000000 or when > 0x7fffffff:
1815 raise Abort(_('date exceeds 32 bits: %d') % when)
1826 raise Abort(_('date exceeds 32 bits: %d') % when)
1816 if offset < -50400 or offset > 43200:
1827 if offset < -50400 or offset > 43200:
1817 raise Abort(_('impossible time zone offset: %d') % offset)
1828 raise Abort(_('impossible time zone offset: %d') % offset)
1818 return when, offset
1829 return when, offset
1819
1830
1820 def matchdate(date):
1831 def matchdate(date):
1821 """Return a function that matches a given date match specifier
1832 """Return a function that matches a given date match specifier
1822
1833
1823 Formats include:
1834 Formats include:
1824
1835
1825 '{date}' match a given date to the accuracy provided
1836 '{date}' match a given date to the accuracy provided
1826
1837
1827 '<{date}' on or before a given date
1838 '<{date}' on or before a given date
1828
1839
1829 '>{date}' on or after a given date
1840 '>{date}' on or after a given date
1830
1841
1831 >>> p1 = parsedate("10:29:59")
1842 >>> p1 = parsedate("10:29:59")
1832 >>> p2 = parsedate("10:30:00")
1843 >>> p2 = parsedate("10:30:00")
1833 >>> p3 = parsedate("10:30:59")
1844 >>> p3 = parsedate("10:30:59")
1834 >>> p4 = parsedate("10:31:00")
1845 >>> p4 = parsedate("10:31:00")
1835 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1846 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1836 >>> f = matchdate("10:30")
1847 >>> f = matchdate("10:30")
1837 >>> f(p1[0])
1848 >>> f(p1[0])
1838 False
1849 False
1839 >>> f(p2[0])
1850 >>> f(p2[0])
1840 True
1851 True
1841 >>> f(p3[0])
1852 >>> f(p3[0])
1842 True
1853 True
1843 >>> f(p4[0])
1854 >>> f(p4[0])
1844 False
1855 False
1845 >>> f(p5[0])
1856 >>> f(p5[0])
1846 False
1857 False
1847 """
1858 """
1848
1859
1849 def lower(date):
1860 def lower(date):
1850 d = {'mb': "1", 'd': "1"}
1861 d = {'mb': "1", 'd': "1"}
1851 return parsedate(date, extendeddateformats, d)[0]
1862 return parsedate(date, extendeddateformats, d)[0]
1852
1863
1853 def upper(date):
1864 def upper(date):
1854 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1865 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1855 for days in ("31", "30", "29"):
1866 for days in ("31", "30", "29"):
1856 try:
1867 try:
1857 d["d"] = days
1868 d["d"] = days
1858 return parsedate(date, extendeddateformats, d)[0]
1869 return parsedate(date, extendeddateformats, d)[0]
1859 except Abort:
1870 except Abort:
1860 pass
1871 pass
1861 d["d"] = "28"
1872 d["d"] = "28"
1862 return parsedate(date, extendeddateformats, d)[0]
1873 return parsedate(date, extendeddateformats, d)[0]
1863
1874
1864 date = date.strip()
1875 date = date.strip()
1865
1876
1866 if not date:
1877 if not date:
1867 raise Abort(_("dates cannot consist entirely of whitespace"))
1878 raise Abort(_("dates cannot consist entirely of whitespace"))
1868 elif date[0] == "<":
1879 elif date[0] == "<":
1869 if not date[1:]:
1880 if not date[1:]:
1870 raise Abort(_("invalid day spec, use '<DATE'"))
1881 raise Abort(_("invalid day spec, use '<DATE'"))
1871 when = upper(date[1:])
1882 when = upper(date[1:])
1872 return lambda x: x <= when
1883 return lambda x: x <= when
1873 elif date[0] == ">":
1884 elif date[0] == ">":
1874 if not date[1:]:
1885 if not date[1:]:
1875 raise Abort(_("invalid day spec, use '>DATE'"))
1886 raise Abort(_("invalid day spec, use '>DATE'"))
1876 when = lower(date[1:])
1887 when = lower(date[1:])
1877 return lambda x: x >= when
1888 return lambda x: x >= when
1878 elif date[0] == "-":
1889 elif date[0] == "-":
1879 try:
1890 try:
1880 days = int(date[1:])
1891 days = int(date[1:])
1881 except ValueError:
1892 except ValueError:
1882 raise Abort(_("invalid day spec: %s") % date[1:])
1893 raise Abort(_("invalid day spec: %s") % date[1:])
1883 if days < 0:
1894 if days < 0:
1884 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1895 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1885 % date[1:])
1896 % date[1:])
1886 when = makedate()[0] - days * 3600 * 24
1897 when = makedate()[0] - days * 3600 * 24
1887 return lambda x: x >= when
1898 return lambda x: x >= when
1888 elif " to " in date:
1899 elif " to " in date:
1889 a, b = date.split(" to ")
1900 a, b = date.split(" to ")
1890 start, stop = lower(a), upper(b)
1901 start, stop = lower(a), upper(b)
1891 return lambda x: x >= start and x <= stop
1902 return lambda x: x >= start and x <= stop
1892 else:
1903 else:
1893 start, stop = lower(date), upper(date)
1904 start, stop = lower(date), upper(date)
1894 return lambda x: x >= start and x <= stop
1905 return lambda x: x >= start and x <= stop
1895
1906
1896 def stringmatcher(pattern):
1907 def stringmatcher(pattern):
1897 """
1908 """
1898 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1909 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1899 returns the matcher name, pattern, and matcher function.
1910 returns the matcher name, pattern, and matcher function.
1900 missing or unknown prefixes are treated as literal matches.
1911 missing or unknown prefixes are treated as literal matches.
1901
1912
1902 helper for tests:
1913 helper for tests:
1903 >>> def test(pattern, *tests):
1914 >>> def test(pattern, *tests):
1904 ... kind, pattern, matcher = stringmatcher(pattern)
1915 ... kind, pattern, matcher = stringmatcher(pattern)
1905 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1916 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1906
1917
1907 exact matching (no prefix):
1918 exact matching (no prefix):
1908 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1919 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1909 ('literal', 'abcdefg', [False, False, True])
1920 ('literal', 'abcdefg', [False, False, True])
1910
1921
1911 regex matching ('re:' prefix)
1922 regex matching ('re:' prefix)
1912 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1923 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1913 ('re', 'a.+b', [False, False, True])
1924 ('re', 'a.+b', [False, False, True])
1914
1925
1915 force exact matches ('literal:' prefix)
1926 force exact matches ('literal:' prefix)
1916 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1927 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1917 ('literal', 're:foobar', [False, True])
1928 ('literal', 're:foobar', [False, True])
1918
1929
1919 unknown prefixes are ignored and treated as literals
1930 unknown prefixes are ignored and treated as literals
1920 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1931 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1921 ('literal', 'foo:bar', [False, False, True])
1932 ('literal', 'foo:bar', [False, False, True])
1922 """
1933 """
1923 if pattern.startswith('re:'):
1934 if pattern.startswith('re:'):
1924 pattern = pattern[3:]
1935 pattern = pattern[3:]
1925 try:
1936 try:
1926 regex = remod.compile(pattern)
1937 regex = remod.compile(pattern)
1927 except remod.error as e:
1938 except remod.error as e:
1928 raise error.ParseError(_('invalid regular expression: %s')
1939 raise error.ParseError(_('invalid regular expression: %s')
1929 % e)
1940 % e)
1930 return 're', pattern, regex.search
1941 return 're', pattern, regex.search
1931 elif pattern.startswith('literal:'):
1942 elif pattern.startswith('literal:'):
1932 pattern = pattern[8:]
1943 pattern = pattern[8:]
1933 return 'literal', pattern, pattern.__eq__
1944 return 'literal', pattern, pattern.__eq__
1934
1945
1935 def shortuser(user):
1946 def shortuser(user):
1936 """Return a short representation of a user name or email address."""
1947 """Return a short representation of a user name or email address."""
1937 f = user.find('@')
1948 f = user.find('@')
1938 if f >= 0:
1949 if f >= 0:
1939 user = user[:f]
1950 user = user[:f]
1940 f = user.find('<')
1951 f = user.find('<')
1941 if f >= 0:
1952 if f >= 0:
1942 user = user[f + 1:]
1953 user = user[f + 1:]
1943 f = user.find(' ')
1954 f = user.find(' ')
1944 if f >= 0:
1955 if f >= 0:
1945 user = user[:f]
1956 user = user[:f]
1946 f = user.find('.')
1957 f = user.find('.')
1947 if f >= 0:
1958 if f >= 0:
1948 user = user[:f]
1959 user = user[:f]
1949 return user
1960 return user
1950
1961
1951 def emailuser(user):
1962 def emailuser(user):
1952 """Return the user portion of an email address."""
1963 """Return the user portion of an email address."""
1953 f = user.find('@')
1964 f = user.find('@')
1954 if f >= 0:
1965 if f >= 0:
1955 user = user[:f]
1966 user = user[:f]
1956 f = user.find('<')
1967 f = user.find('<')
1957 if f >= 0:
1968 if f >= 0:
1958 user = user[f + 1:]
1969 user = user[f + 1:]
1959 return user
1970 return user
1960
1971
1961 def email(author):
1972 def email(author):
1962 '''get email of author.'''
1973 '''get email of author.'''
1963 r = author.find('>')
1974 r = author.find('>')
1964 if r == -1:
1975 if r == -1:
1965 r = None
1976 r = None
1966 return author[author.find('<') + 1:r]
1977 return author[author.find('<') + 1:r]
1967
1978
1968 def ellipsis(text, maxlength=400):
1979 def ellipsis(text, maxlength=400):
1969 """Trim string to at most maxlength (default: 400) columns in display."""
1980 """Trim string to at most maxlength (default: 400) columns in display."""
1970 return encoding.trim(text, maxlength, ellipsis='...')
1981 return encoding.trim(text, maxlength, ellipsis='...')
1971
1982
1972 def unitcountfn(*unittable):
1983 def unitcountfn(*unittable):
1973 '''return a function that renders a readable count of some quantity'''
1984 '''return a function that renders a readable count of some quantity'''
1974
1985
1975 def go(count):
1986 def go(count):
1976 for multiplier, divisor, format in unittable:
1987 for multiplier, divisor, format in unittable:
1977 if count >= divisor * multiplier:
1988 if count >= divisor * multiplier:
1978 return format % (count / float(divisor))
1989 return format % (count / float(divisor))
1979 return unittable[-1][2] % count
1990 return unittable[-1][2] % count
1980
1991
1981 return go
1992 return go
1982
1993
1983 bytecount = unitcountfn(
1994 bytecount = unitcountfn(
1984 (100, 1 << 30, _('%.0f GB')),
1995 (100, 1 << 30, _('%.0f GB')),
1985 (10, 1 << 30, _('%.1f GB')),
1996 (10, 1 << 30, _('%.1f GB')),
1986 (1, 1 << 30, _('%.2f GB')),
1997 (1, 1 << 30, _('%.2f GB')),
1987 (100, 1 << 20, _('%.0f MB')),
1998 (100, 1 << 20, _('%.0f MB')),
1988 (10, 1 << 20, _('%.1f MB')),
1999 (10, 1 << 20, _('%.1f MB')),
1989 (1, 1 << 20, _('%.2f MB')),
2000 (1, 1 << 20, _('%.2f MB')),
1990 (100, 1 << 10, _('%.0f KB')),
2001 (100, 1 << 10, _('%.0f KB')),
1991 (10, 1 << 10, _('%.1f KB')),
2002 (10, 1 << 10, _('%.1f KB')),
1992 (1, 1 << 10, _('%.2f KB')),
2003 (1, 1 << 10, _('%.2f KB')),
1993 (1, 1, _('%.0f bytes')),
2004 (1, 1, _('%.0f bytes')),
1994 )
2005 )
1995
2006
1996 def uirepr(s):
2007 def uirepr(s):
1997 # Avoid double backslash in Windows path repr()
2008 # Avoid double backslash in Windows path repr()
1998 return repr(s).replace('\\\\', '\\')
2009 return repr(s).replace('\\\\', '\\')
1999
2010
2000 # delay import of textwrap
2011 # delay import of textwrap
2001 def MBTextWrapper(**kwargs):
2012 def MBTextWrapper(**kwargs):
2002 class tw(textwrap.TextWrapper):
2013 class tw(textwrap.TextWrapper):
2003 """
2014 """
2004 Extend TextWrapper for width-awareness.
2015 Extend TextWrapper for width-awareness.
2005
2016
2006 Neither number of 'bytes' in any encoding nor 'characters' is
2017 Neither number of 'bytes' in any encoding nor 'characters' is
2007 appropriate to calculate terminal columns for specified string.
2018 appropriate to calculate terminal columns for specified string.
2008
2019
2009 Original TextWrapper implementation uses built-in 'len()' directly,
2020 Original TextWrapper implementation uses built-in 'len()' directly,
2010 so overriding is needed to use width information of each characters.
2021 so overriding is needed to use width information of each characters.
2011
2022
2012 In addition, characters classified into 'ambiguous' width are
2023 In addition, characters classified into 'ambiguous' width are
2013 treated as wide in East Asian area, but as narrow in other.
2024 treated as wide in East Asian area, but as narrow in other.
2014
2025
2015 This requires use decision to determine width of such characters.
2026 This requires use decision to determine width of such characters.
2016 """
2027 """
2017 def _cutdown(self, ucstr, space_left):
2028 def _cutdown(self, ucstr, space_left):
2018 l = 0
2029 l = 0
2019 colwidth = encoding.ucolwidth
2030 colwidth = encoding.ucolwidth
2020 for i in xrange(len(ucstr)):
2031 for i in xrange(len(ucstr)):
2021 l += colwidth(ucstr[i])
2032 l += colwidth(ucstr[i])
2022 if space_left < l:
2033 if space_left < l:
2023 return (ucstr[:i], ucstr[i:])
2034 return (ucstr[:i], ucstr[i:])
2024 return ucstr, ''
2035 return ucstr, ''
2025
2036
2026 # overriding of base class
2037 # overriding of base class
2027 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2038 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2028 space_left = max(width - cur_len, 1)
2039 space_left = max(width - cur_len, 1)
2029
2040
2030 if self.break_long_words:
2041 if self.break_long_words:
2031 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2042 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2032 cur_line.append(cut)
2043 cur_line.append(cut)
2033 reversed_chunks[-1] = res
2044 reversed_chunks[-1] = res
2034 elif not cur_line:
2045 elif not cur_line:
2035 cur_line.append(reversed_chunks.pop())
2046 cur_line.append(reversed_chunks.pop())
2036
2047
2037 # this overriding code is imported from TextWrapper of Python 2.6
2048 # this overriding code is imported from TextWrapper of Python 2.6
2038 # to calculate columns of string by 'encoding.ucolwidth()'
2049 # to calculate columns of string by 'encoding.ucolwidth()'
2039 def _wrap_chunks(self, chunks):
2050 def _wrap_chunks(self, chunks):
2040 colwidth = encoding.ucolwidth
2051 colwidth = encoding.ucolwidth
2041
2052
2042 lines = []
2053 lines = []
2043 if self.width <= 0:
2054 if self.width <= 0:
2044 raise ValueError("invalid width %r (must be > 0)" % self.width)
2055 raise ValueError("invalid width %r (must be > 0)" % self.width)
2045
2056
2046 # Arrange in reverse order so items can be efficiently popped
2057 # Arrange in reverse order so items can be efficiently popped
2047 # from a stack of chucks.
2058 # from a stack of chucks.
2048 chunks.reverse()
2059 chunks.reverse()
2049
2060
2050 while chunks:
2061 while chunks:
2051
2062
2052 # Start the list of chunks that will make up the current line.
2063 # Start the list of chunks that will make up the current line.
2053 # cur_len is just the length of all the chunks in cur_line.
2064 # cur_len is just the length of all the chunks in cur_line.
2054 cur_line = []
2065 cur_line = []
2055 cur_len = 0
2066 cur_len = 0
2056
2067
2057 # Figure out which static string will prefix this line.
2068 # Figure out which static string will prefix this line.
2058 if lines:
2069 if lines:
2059 indent = self.subsequent_indent
2070 indent = self.subsequent_indent
2060 else:
2071 else:
2061 indent = self.initial_indent
2072 indent = self.initial_indent
2062
2073
2063 # Maximum width for this line.
2074 # Maximum width for this line.
2064 width = self.width - len(indent)
2075 width = self.width - len(indent)
2065
2076
2066 # First chunk on line is whitespace -- drop it, unless this
2077 # First chunk on line is whitespace -- drop it, unless this
2067 # is the very beginning of the text (i.e. no lines started yet).
2078 # is the very beginning of the text (i.e. no lines started yet).
2068 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
2079 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
2069 del chunks[-1]
2080 del chunks[-1]
2070
2081
2071 while chunks:
2082 while chunks:
2072 l = colwidth(chunks[-1])
2083 l = colwidth(chunks[-1])
2073
2084
2074 # Can at least squeeze this chunk onto the current line.
2085 # Can at least squeeze this chunk onto the current line.
2075 if cur_len + l <= width:
2086 if cur_len + l <= width:
2076 cur_line.append(chunks.pop())
2087 cur_line.append(chunks.pop())
2077 cur_len += l
2088 cur_len += l
2078
2089
2079 # Nope, this line is full.
2090 # Nope, this line is full.
2080 else:
2091 else:
2081 break
2092 break
2082
2093
2083 # The current line is full, and the next chunk is too big to
2094 # The current line is full, and the next chunk is too big to
2084 # fit on *any* line (not just this one).
2095 # fit on *any* line (not just this one).
2085 if chunks and colwidth(chunks[-1]) > width:
2096 if chunks and colwidth(chunks[-1]) > width:
2086 self._handle_long_word(chunks, cur_line, cur_len, width)
2097 self._handle_long_word(chunks, cur_line, cur_len, width)
2087
2098
2088 # If the last chunk on this line is all whitespace, drop it.
2099 # If the last chunk on this line is all whitespace, drop it.
2089 if (self.drop_whitespace and
2100 if (self.drop_whitespace and
2090 cur_line and cur_line[-1].strip() == ''):
2101 cur_line and cur_line[-1].strip() == ''):
2091 del cur_line[-1]
2102 del cur_line[-1]
2092
2103
2093 # Convert current line back to a string and store it in list
2104 # Convert current line back to a string and store it in list
2094 # of all lines (return value).
2105 # of all lines (return value).
2095 if cur_line:
2106 if cur_line:
2096 lines.append(indent + ''.join(cur_line))
2107 lines.append(indent + ''.join(cur_line))
2097
2108
2098 return lines
2109 return lines
2099
2110
2100 global MBTextWrapper
2111 global MBTextWrapper
2101 MBTextWrapper = tw
2112 MBTextWrapper = tw
2102 return tw(**kwargs)
2113 return tw(**kwargs)
2103
2114
2104 def wrap(line, width, initindent='', hangindent=''):
2115 def wrap(line, width, initindent='', hangindent=''):
2105 maxindent = max(len(hangindent), len(initindent))
2116 maxindent = max(len(hangindent), len(initindent))
2106 if width <= maxindent:
2117 if width <= maxindent:
2107 # adjust for weird terminal size
2118 # adjust for weird terminal size
2108 width = max(78, maxindent + 1)
2119 width = max(78, maxindent + 1)
2109 line = line.decode(encoding.encoding, encoding.encodingmode)
2120 line = line.decode(encoding.encoding, encoding.encodingmode)
2110 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2121 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2111 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2122 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2112 wrapper = MBTextWrapper(width=width,
2123 wrapper = MBTextWrapper(width=width,
2113 initial_indent=initindent,
2124 initial_indent=initindent,
2114 subsequent_indent=hangindent)
2125 subsequent_indent=hangindent)
2115 return wrapper.fill(line).encode(encoding.encoding)
2126 return wrapper.fill(line).encode(encoding.encoding)
2116
2127
2117 def iterlines(iterator):
2128 def iterlines(iterator):
2118 for chunk in iterator:
2129 for chunk in iterator:
2119 for line in chunk.splitlines():
2130 for line in chunk.splitlines():
2120 yield line
2131 yield line
2121
2132
2122 def expandpath(path):
2133 def expandpath(path):
2123 return os.path.expanduser(os.path.expandvars(path))
2134 return os.path.expanduser(os.path.expandvars(path))
2124
2135
2125 def hgcmd():
2136 def hgcmd():
2126 """Return the command used to execute current hg
2137 """Return the command used to execute current hg
2127
2138
2128 This is different from hgexecutable() because on Windows we want
2139 This is different from hgexecutable() because on Windows we want
2129 to avoid things opening new shell windows like batch files, so we
2140 to avoid things opening new shell windows like batch files, so we
2130 get either the python call or current executable.
2141 get either the python call or current executable.
2131 """
2142 """
2132 if mainfrozen():
2143 if mainfrozen():
2133 if getattr(sys, 'frozen', None) == 'macosx_app':
2144 if getattr(sys, 'frozen', None) == 'macosx_app':
2134 # Env variable set by py2app
2145 # Env variable set by py2app
2135 return [os.environ['EXECUTABLEPATH']]
2146 return [os.environ['EXECUTABLEPATH']]
2136 else:
2147 else:
2137 return [sys.executable]
2148 return [sys.executable]
2138 return gethgcmd()
2149 return gethgcmd()
2139
2150
2140 def rundetached(args, condfn):
2151 def rundetached(args, condfn):
2141 """Execute the argument list in a detached process.
2152 """Execute the argument list in a detached process.
2142
2153
2143 condfn is a callable which is called repeatedly and should return
2154 condfn is a callable which is called repeatedly and should return
2144 True once the child process is known to have started successfully.
2155 True once the child process is known to have started successfully.
2145 At this point, the child process PID is returned. If the child
2156 At this point, the child process PID is returned. If the child
2146 process fails to start or finishes before condfn() evaluates to
2157 process fails to start or finishes before condfn() evaluates to
2147 True, return -1.
2158 True, return -1.
2148 """
2159 """
2149 # Windows case is easier because the child process is either
2160 # Windows case is easier because the child process is either
2150 # successfully starting and validating the condition or exiting
2161 # successfully starting and validating the condition or exiting
2151 # on failure. We just poll on its PID. On Unix, if the child
2162 # on failure. We just poll on its PID. On Unix, if the child
2152 # process fails to start, it will be left in a zombie state until
2163 # process fails to start, it will be left in a zombie state until
2153 # the parent wait on it, which we cannot do since we expect a long
2164 # the parent wait on it, which we cannot do since we expect a long
2154 # running process on success. Instead we listen for SIGCHLD telling
2165 # running process on success. Instead we listen for SIGCHLD telling
2155 # us our child process terminated.
2166 # us our child process terminated.
2156 terminated = set()
2167 terminated = set()
2157 def handler(signum, frame):
2168 def handler(signum, frame):
2158 terminated.add(os.wait())
2169 terminated.add(os.wait())
2159 prevhandler = None
2170 prevhandler = None
2160 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2171 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2161 if SIGCHLD is not None:
2172 if SIGCHLD is not None:
2162 prevhandler = signal.signal(SIGCHLD, handler)
2173 prevhandler = signal.signal(SIGCHLD, handler)
2163 try:
2174 try:
2164 pid = spawndetached(args)
2175 pid = spawndetached(args)
2165 while not condfn():
2176 while not condfn():
2166 if ((pid in terminated or not testpid(pid))
2177 if ((pid in terminated or not testpid(pid))
2167 and not condfn()):
2178 and not condfn()):
2168 return -1
2179 return -1
2169 time.sleep(0.1)
2180 time.sleep(0.1)
2170 return pid
2181 return pid
2171 finally:
2182 finally:
2172 if prevhandler is not None:
2183 if prevhandler is not None:
2173 signal.signal(signal.SIGCHLD, prevhandler)
2184 signal.signal(signal.SIGCHLD, prevhandler)
2174
2185
2175 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2186 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2176 """Return the result of interpolating items in the mapping into string s.
2187 """Return the result of interpolating items in the mapping into string s.
2177
2188
2178 prefix is a single character string, or a two character string with
2189 prefix is a single character string, or a two character string with
2179 a backslash as the first character if the prefix needs to be escaped in
2190 a backslash as the first character if the prefix needs to be escaped in
2180 a regular expression.
2191 a regular expression.
2181
2192
2182 fn is an optional function that will be applied to the replacement text
2193 fn is an optional function that will be applied to the replacement text
2183 just before replacement.
2194 just before replacement.
2184
2195
2185 escape_prefix is an optional flag that allows using doubled prefix for
2196 escape_prefix is an optional flag that allows using doubled prefix for
2186 its escaping.
2197 its escaping.
2187 """
2198 """
2188 fn = fn or (lambda s: s)
2199 fn = fn or (lambda s: s)
2189 patterns = '|'.join(mapping.keys())
2200 patterns = '|'.join(mapping.keys())
2190 if escape_prefix:
2201 if escape_prefix:
2191 patterns += '|' + prefix
2202 patterns += '|' + prefix
2192 if len(prefix) > 1:
2203 if len(prefix) > 1:
2193 prefix_char = prefix[1:]
2204 prefix_char = prefix[1:]
2194 else:
2205 else:
2195 prefix_char = prefix
2206 prefix_char = prefix
2196 mapping[prefix_char] = prefix_char
2207 mapping[prefix_char] = prefix_char
2197 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2208 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2198 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2209 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2199
2210
2200 def getport(port):
2211 def getport(port):
2201 """Return the port for a given network service.
2212 """Return the port for a given network service.
2202
2213
2203 If port is an integer, it's returned as is. If it's a string, it's
2214 If port is an integer, it's returned as is. If it's a string, it's
2204 looked up using socket.getservbyname(). If there's no matching
2215 looked up using socket.getservbyname(). If there's no matching
2205 service, error.Abort is raised.
2216 service, error.Abort is raised.
2206 """
2217 """
2207 try:
2218 try:
2208 return int(port)
2219 return int(port)
2209 except ValueError:
2220 except ValueError:
2210 pass
2221 pass
2211
2222
2212 try:
2223 try:
2213 return socket.getservbyname(port)
2224 return socket.getservbyname(port)
2214 except socket.error:
2225 except socket.error:
2215 raise Abort(_("no port number associated with service '%s'") % port)
2226 raise Abort(_("no port number associated with service '%s'") % port)
2216
2227
2217 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2228 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2218 '0': False, 'no': False, 'false': False, 'off': False,
2229 '0': False, 'no': False, 'false': False, 'off': False,
2219 'never': False}
2230 'never': False}
2220
2231
2221 def parsebool(s):
2232 def parsebool(s):
2222 """Parse s into a boolean.
2233 """Parse s into a boolean.
2223
2234
2224 If s is not a valid boolean, returns None.
2235 If s is not a valid boolean, returns None.
2225 """
2236 """
2226 return _booleans.get(s.lower(), None)
2237 return _booleans.get(s.lower(), None)
2227
2238
2228 _hexdig = '0123456789ABCDEFabcdef'
2239 _hexdig = '0123456789ABCDEFabcdef'
2229 _hextochr = dict((a + b, chr(int(a + b, 16)))
2240 _hextochr = dict((a + b, chr(int(a + b, 16)))
2230 for a in _hexdig for b in _hexdig)
2241 for a in _hexdig for b in _hexdig)
2231
2242
2232 def _urlunquote(s):
2243 def _urlunquote(s):
2233 """Decode HTTP/HTML % encoding.
2244 """Decode HTTP/HTML % encoding.
2234
2245
2235 >>> _urlunquote('abc%20def')
2246 >>> _urlunquote('abc%20def')
2236 'abc def'
2247 'abc def'
2237 """
2248 """
2238 res = s.split('%')
2249 res = s.split('%')
2239 # fastpath
2250 # fastpath
2240 if len(res) == 1:
2251 if len(res) == 1:
2241 return s
2252 return s
2242 s = res[0]
2253 s = res[0]
2243 for item in res[1:]:
2254 for item in res[1:]:
2244 try:
2255 try:
2245 s += _hextochr[item[:2]] + item[2:]
2256 s += _hextochr[item[:2]] + item[2:]
2246 except KeyError:
2257 except KeyError:
2247 s += '%' + item
2258 s += '%' + item
2248 except UnicodeDecodeError:
2259 except UnicodeDecodeError:
2249 s += unichr(int(item[:2], 16)) + item[2:]
2260 s += unichr(int(item[:2], 16)) + item[2:]
2250 return s
2261 return s
2251
2262
2252 class url(object):
2263 class url(object):
2253 r"""Reliable URL parser.
2264 r"""Reliable URL parser.
2254
2265
2255 This parses URLs and provides attributes for the following
2266 This parses URLs and provides attributes for the following
2256 components:
2267 components:
2257
2268
2258 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2269 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2259
2270
2260 Missing components are set to None. The only exception is
2271 Missing components are set to None. The only exception is
2261 fragment, which is set to '' if present but empty.
2272 fragment, which is set to '' if present but empty.
2262
2273
2263 If parsefragment is False, fragment is included in query. If
2274 If parsefragment is False, fragment is included in query. If
2264 parsequery is False, query is included in path. If both are
2275 parsequery is False, query is included in path. If both are
2265 False, both fragment and query are included in path.
2276 False, both fragment and query are included in path.
2266
2277
2267 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2278 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2268
2279
2269 Note that for backward compatibility reasons, bundle URLs do not
2280 Note that for backward compatibility reasons, bundle URLs do not
2270 take host names. That means 'bundle://../' has a path of '../'.
2281 take host names. That means 'bundle://../' has a path of '../'.
2271
2282
2272 Examples:
2283 Examples:
2273
2284
2274 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2285 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2275 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2286 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2276 >>> url('ssh://[::1]:2200//home/joe/repo')
2287 >>> url('ssh://[::1]:2200//home/joe/repo')
2277 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2288 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2278 >>> url('file:///home/joe/repo')
2289 >>> url('file:///home/joe/repo')
2279 <url scheme: 'file', path: '/home/joe/repo'>
2290 <url scheme: 'file', path: '/home/joe/repo'>
2280 >>> url('file:///c:/temp/foo/')
2291 >>> url('file:///c:/temp/foo/')
2281 <url scheme: 'file', path: 'c:/temp/foo/'>
2292 <url scheme: 'file', path: 'c:/temp/foo/'>
2282 >>> url('bundle:foo')
2293 >>> url('bundle:foo')
2283 <url scheme: 'bundle', path: 'foo'>
2294 <url scheme: 'bundle', path: 'foo'>
2284 >>> url('bundle://../foo')
2295 >>> url('bundle://../foo')
2285 <url scheme: 'bundle', path: '../foo'>
2296 <url scheme: 'bundle', path: '../foo'>
2286 >>> url(r'c:\foo\bar')
2297 >>> url(r'c:\foo\bar')
2287 <url path: 'c:\\foo\\bar'>
2298 <url path: 'c:\\foo\\bar'>
2288 >>> url(r'\\blah\blah\blah')
2299 >>> url(r'\\blah\blah\blah')
2289 <url path: '\\\\blah\\blah\\blah'>
2300 <url path: '\\\\blah\\blah\\blah'>
2290 >>> url(r'\\blah\blah\blah#baz')
2301 >>> url(r'\\blah\blah\blah#baz')
2291 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2302 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2292 >>> url(r'file:///C:\users\me')
2303 >>> url(r'file:///C:\users\me')
2293 <url scheme: 'file', path: 'C:\\users\\me'>
2304 <url scheme: 'file', path: 'C:\\users\\me'>
2294
2305
2295 Authentication credentials:
2306 Authentication credentials:
2296
2307
2297 >>> url('ssh://joe:xyz@x/repo')
2308 >>> url('ssh://joe:xyz@x/repo')
2298 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2309 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2299 >>> url('ssh://joe@x/repo')
2310 >>> url('ssh://joe@x/repo')
2300 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2311 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2301
2312
2302 Query strings and fragments:
2313 Query strings and fragments:
2303
2314
2304 >>> url('http://host/a?b#c')
2315 >>> url('http://host/a?b#c')
2305 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2316 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2306 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2317 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2307 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2318 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2308 """
2319 """
2309
2320
2310 _safechars = "!~*'()+"
2321 _safechars = "!~*'()+"
2311 _safepchars = "/!~*'()+:\\"
2322 _safepchars = "/!~*'()+:\\"
2312 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2323 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2313
2324
2314 def __init__(self, path, parsequery=True, parsefragment=True):
2325 def __init__(self, path, parsequery=True, parsefragment=True):
2315 # We slowly chomp away at path until we have only the path left
2326 # We slowly chomp away at path until we have only the path left
2316 self.scheme = self.user = self.passwd = self.host = None
2327 self.scheme = self.user = self.passwd = self.host = None
2317 self.port = self.path = self.query = self.fragment = None
2328 self.port = self.path = self.query = self.fragment = None
2318 self._localpath = True
2329 self._localpath = True
2319 self._hostport = ''
2330 self._hostport = ''
2320 self._origpath = path
2331 self._origpath = path
2321
2332
2322 if parsefragment and '#' in path:
2333 if parsefragment and '#' in path:
2323 path, self.fragment = path.split('#', 1)
2334 path, self.fragment = path.split('#', 1)
2324 if not path:
2335 if not path:
2325 path = None
2336 path = None
2326
2337
2327 # special case for Windows drive letters and UNC paths
2338 # special case for Windows drive letters and UNC paths
2328 if hasdriveletter(path) or path.startswith(r'\\'):
2339 if hasdriveletter(path) or path.startswith(r'\\'):
2329 self.path = path
2340 self.path = path
2330 return
2341 return
2331
2342
2332 # For compatibility reasons, we can't handle bundle paths as
2343 # For compatibility reasons, we can't handle bundle paths as
2333 # normal URLS
2344 # normal URLS
2334 if path.startswith('bundle:'):
2345 if path.startswith('bundle:'):
2335 self.scheme = 'bundle'
2346 self.scheme = 'bundle'
2336 path = path[7:]
2347 path = path[7:]
2337 if path.startswith('//'):
2348 if path.startswith('//'):
2338 path = path[2:]
2349 path = path[2:]
2339 self.path = path
2350 self.path = path
2340 return
2351 return
2341
2352
2342 if self._matchscheme(path):
2353 if self._matchscheme(path):
2343 parts = path.split(':', 1)
2354 parts = path.split(':', 1)
2344 if parts[0]:
2355 if parts[0]:
2345 self.scheme, path = parts
2356 self.scheme, path = parts
2346 self._localpath = False
2357 self._localpath = False
2347
2358
2348 if not path:
2359 if not path:
2349 path = None
2360 path = None
2350 if self._localpath:
2361 if self._localpath:
2351 self.path = ''
2362 self.path = ''
2352 return
2363 return
2353 else:
2364 else:
2354 if self._localpath:
2365 if self._localpath:
2355 self.path = path
2366 self.path = path
2356 return
2367 return
2357
2368
2358 if parsequery and '?' in path:
2369 if parsequery and '?' in path:
2359 path, self.query = path.split('?', 1)
2370 path, self.query = path.split('?', 1)
2360 if not path:
2371 if not path:
2361 path = None
2372 path = None
2362 if not self.query:
2373 if not self.query:
2363 self.query = None
2374 self.query = None
2364
2375
2365 # // is required to specify a host/authority
2376 # // is required to specify a host/authority
2366 if path and path.startswith('//'):
2377 if path and path.startswith('//'):
2367 parts = path[2:].split('/', 1)
2378 parts = path[2:].split('/', 1)
2368 if len(parts) > 1:
2379 if len(parts) > 1:
2369 self.host, path = parts
2380 self.host, path = parts
2370 else:
2381 else:
2371 self.host = parts[0]
2382 self.host = parts[0]
2372 path = None
2383 path = None
2373 if not self.host:
2384 if not self.host:
2374 self.host = None
2385 self.host = None
2375 # path of file:///d is /d
2386 # path of file:///d is /d
2376 # path of file:///d:/ is d:/, not /d:/
2387 # path of file:///d:/ is d:/, not /d:/
2377 if path and not hasdriveletter(path):
2388 if path and not hasdriveletter(path):
2378 path = '/' + path
2389 path = '/' + path
2379
2390
2380 if self.host and '@' in self.host:
2391 if self.host and '@' in self.host:
2381 self.user, self.host = self.host.rsplit('@', 1)
2392 self.user, self.host = self.host.rsplit('@', 1)
2382 if ':' in self.user:
2393 if ':' in self.user:
2383 self.user, self.passwd = self.user.split(':', 1)
2394 self.user, self.passwd = self.user.split(':', 1)
2384 if not self.host:
2395 if not self.host:
2385 self.host = None
2396 self.host = None
2386
2397
2387 # Don't split on colons in IPv6 addresses without ports
2398 # Don't split on colons in IPv6 addresses without ports
2388 if (self.host and ':' in self.host and
2399 if (self.host and ':' in self.host and
2389 not (self.host.startswith('[') and self.host.endswith(']'))):
2400 not (self.host.startswith('[') and self.host.endswith(']'))):
2390 self._hostport = self.host
2401 self._hostport = self.host
2391 self.host, self.port = self.host.rsplit(':', 1)
2402 self.host, self.port = self.host.rsplit(':', 1)
2392 if not self.host:
2403 if not self.host:
2393 self.host = None
2404 self.host = None
2394
2405
2395 if (self.host and self.scheme == 'file' and
2406 if (self.host and self.scheme == 'file' and
2396 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2407 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2397 raise Abort(_('file:// URLs can only refer to localhost'))
2408 raise Abort(_('file:// URLs can only refer to localhost'))
2398
2409
2399 self.path = path
2410 self.path = path
2400
2411
2401 # leave the query string escaped
2412 # leave the query string escaped
2402 for a in ('user', 'passwd', 'host', 'port',
2413 for a in ('user', 'passwd', 'host', 'port',
2403 'path', 'fragment'):
2414 'path', 'fragment'):
2404 v = getattr(self, a)
2415 v = getattr(self, a)
2405 if v is not None:
2416 if v is not None:
2406 setattr(self, a, _urlunquote(v))
2417 setattr(self, a, _urlunquote(v))
2407
2418
2408 def __repr__(self):
2419 def __repr__(self):
2409 attrs = []
2420 attrs = []
2410 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2421 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2411 'query', 'fragment'):
2422 'query', 'fragment'):
2412 v = getattr(self, a)
2423 v = getattr(self, a)
2413 if v is not None:
2424 if v is not None:
2414 attrs.append('%s: %r' % (a, v))
2425 attrs.append('%s: %r' % (a, v))
2415 return '<url %s>' % ', '.join(attrs)
2426 return '<url %s>' % ', '.join(attrs)
2416
2427
2417 def __str__(self):
2428 def __str__(self):
2418 r"""Join the URL's components back into a URL string.
2429 r"""Join the URL's components back into a URL string.
2419
2430
2420 Examples:
2431 Examples:
2421
2432
2422 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2433 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2423 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2434 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2424 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2435 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2425 'http://user:pw@host:80/?foo=bar&baz=42'
2436 'http://user:pw@host:80/?foo=bar&baz=42'
2426 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2437 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2427 'http://user:pw@host:80/?foo=bar%3dbaz'
2438 'http://user:pw@host:80/?foo=bar%3dbaz'
2428 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2439 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2429 'ssh://user:pw@[::1]:2200//home/joe#'
2440 'ssh://user:pw@[::1]:2200//home/joe#'
2430 >>> str(url('http://localhost:80//'))
2441 >>> str(url('http://localhost:80//'))
2431 'http://localhost:80//'
2442 'http://localhost:80//'
2432 >>> str(url('http://localhost:80/'))
2443 >>> str(url('http://localhost:80/'))
2433 'http://localhost:80/'
2444 'http://localhost:80/'
2434 >>> str(url('http://localhost:80'))
2445 >>> str(url('http://localhost:80'))
2435 'http://localhost:80/'
2446 'http://localhost:80/'
2436 >>> str(url('bundle:foo'))
2447 >>> str(url('bundle:foo'))
2437 'bundle:foo'
2448 'bundle:foo'
2438 >>> str(url('bundle://../foo'))
2449 >>> str(url('bundle://../foo'))
2439 'bundle:../foo'
2450 'bundle:../foo'
2440 >>> str(url('path'))
2451 >>> str(url('path'))
2441 'path'
2452 'path'
2442 >>> str(url('file:///tmp/foo/bar'))
2453 >>> str(url('file:///tmp/foo/bar'))
2443 'file:///tmp/foo/bar'
2454 'file:///tmp/foo/bar'
2444 >>> str(url('file:///c:/tmp/foo/bar'))
2455 >>> str(url('file:///c:/tmp/foo/bar'))
2445 'file:///c:/tmp/foo/bar'
2456 'file:///c:/tmp/foo/bar'
2446 >>> print url(r'bundle:foo\bar')
2457 >>> print url(r'bundle:foo\bar')
2447 bundle:foo\bar
2458 bundle:foo\bar
2448 >>> print url(r'file:///D:\data\hg')
2459 >>> print url(r'file:///D:\data\hg')
2449 file:///D:\data\hg
2460 file:///D:\data\hg
2450 """
2461 """
2451 if self._localpath:
2462 if self._localpath:
2452 s = self.path
2463 s = self.path
2453 if self.scheme == 'bundle':
2464 if self.scheme == 'bundle':
2454 s = 'bundle:' + s
2465 s = 'bundle:' + s
2455 if self.fragment:
2466 if self.fragment:
2456 s += '#' + self.fragment
2467 s += '#' + self.fragment
2457 return s
2468 return s
2458
2469
2459 s = self.scheme + ':'
2470 s = self.scheme + ':'
2460 if self.user or self.passwd or self.host:
2471 if self.user or self.passwd or self.host:
2461 s += '//'
2472 s += '//'
2462 elif self.scheme and (not self.path or self.path.startswith('/')
2473 elif self.scheme and (not self.path or self.path.startswith('/')
2463 or hasdriveletter(self.path)):
2474 or hasdriveletter(self.path)):
2464 s += '//'
2475 s += '//'
2465 if hasdriveletter(self.path):
2476 if hasdriveletter(self.path):
2466 s += '/'
2477 s += '/'
2467 if self.user:
2478 if self.user:
2468 s += urlreq.quote(self.user, safe=self._safechars)
2479 s += urlreq.quote(self.user, safe=self._safechars)
2469 if self.passwd:
2480 if self.passwd:
2470 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2481 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2471 if self.user or self.passwd:
2482 if self.user or self.passwd:
2472 s += '@'
2483 s += '@'
2473 if self.host:
2484 if self.host:
2474 if not (self.host.startswith('[') and self.host.endswith(']')):
2485 if not (self.host.startswith('[') and self.host.endswith(']')):
2475 s += urlreq.quote(self.host)
2486 s += urlreq.quote(self.host)
2476 else:
2487 else:
2477 s += self.host
2488 s += self.host
2478 if self.port:
2489 if self.port:
2479 s += ':' + urlreq.quote(self.port)
2490 s += ':' + urlreq.quote(self.port)
2480 if self.host:
2491 if self.host:
2481 s += '/'
2492 s += '/'
2482 if self.path:
2493 if self.path:
2483 # TODO: similar to the query string, we should not unescape the
2494 # TODO: similar to the query string, we should not unescape the
2484 # path when we store it, the path might contain '%2f' = '/',
2495 # path when we store it, the path might contain '%2f' = '/',
2485 # which we should *not* escape.
2496 # which we should *not* escape.
2486 s += urlreq.quote(self.path, safe=self._safepchars)
2497 s += urlreq.quote(self.path, safe=self._safepchars)
2487 if self.query:
2498 if self.query:
2488 # we store the query in escaped form.
2499 # we store the query in escaped form.
2489 s += '?' + self.query
2500 s += '?' + self.query
2490 if self.fragment is not None:
2501 if self.fragment is not None:
2491 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2502 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2492 return s
2503 return s
2493
2504
2494 def authinfo(self):
2505 def authinfo(self):
2495 user, passwd = self.user, self.passwd
2506 user, passwd = self.user, self.passwd
2496 try:
2507 try:
2497 self.user, self.passwd = None, None
2508 self.user, self.passwd = None, None
2498 s = str(self)
2509 s = str(self)
2499 finally:
2510 finally:
2500 self.user, self.passwd = user, passwd
2511 self.user, self.passwd = user, passwd
2501 if not self.user:
2512 if not self.user:
2502 return (s, None)
2513 return (s, None)
2503 # authinfo[1] is passed to urllib2 password manager, and its
2514 # authinfo[1] is passed to urllib2 password manager, and its
2504 # URIs must not contain credentials. The host is passed in the
2515 # URIs must not contain credentials. The host is passed in the
2505 # URIs list because Python < 2.4.3 uses only that to search for
2516 # URIs list because Python < 2.4.3 uses only that to search for
2506 # a password.
2517 # a password.
2507 return (s, (None, (s, self.host),
2518 return (s, (None, (s, self.host),
2508 self.user, self.passwd or ''))
2519 self.user, self.passwd or ''))
2509
2520
2510 def isabs(self):
2521 def isabs(self):
2511 if self.scheme and self.scheme != 'file':
2522 if self.scheme and self.scheme != 'file':
2512 return True # remote URL
2523 return True # remote URL
2513 if hasdriveletter(self.path):
2524 if hasdriveletter(self.path):
2514 return True # absolute for our purposes - can't be joined()
2525 return True # absolute for our purposes - can't be joined()
2515 if self.path.startswith(r'\\'):
2526 if self.path.startswith(r'\\'):
2516 return True # Windows UNC path
2527 return True # Windows UNC path
2517 if self.path.startswith('/'):
2528 if self.path.startswith('/'):
2518 return True # POSIX-style
2529 return True # POSIX-style
2519 return False
2530 return False
2520
2531
2521 def localpath(self):
2532 def localpath(self):
2522 if self.scheme == 'file' or self.scheme == 'bundle':
2533 if self.scheme == 'file' or self.scheme == 'bundle':
2523 path = self.path or '/'
2534 path = self.path or '/'
2524 # For Windows, we need to promote hosts containing drive
2535 # For Windows, we need to promote hosts containing drive
2525 # letters to paths with drive letters.
2536 # letters to paths with drive letters.
2526 if hasdriveletter(self._hostport):
2537 if hasdriveletter(self._hostport):
2527 path = self._hostport + '/' + self.path
2538 path = self._hostport + '/' + self.path
2528 elif (self.host is not None and self.path
2539 elif (self.host is not None and self.path
2529 and not hasdriveletter(path)):
2540 and not hasdriveletter(path)):
2530 path = '/' + path
2541 path = '/' + path
2531 return path
2542 return path
2532 return self._origpath
2543 return self._origpath
2533
2544
2534 def islocal(self):
2545 def islocal(self):
2535 '''whether localpath will return something that posixfile can open'''
2546 '''whether localpath will return something that posixfile can open'''
2536 return (not self.scheme or self.scheme == 'file'
2547 return (not self.scheme or self.scheme == 'file'
2537 or self.scheme == 'bundle')
2548 or self.scheme == 'bundle')
2538
2549
2539 def hasscheme(path):
2550 def hasscheme(path):
2540 return bool(url(path).scheme)
2551 return bool(url(path).scheme)
2541
2552
2542 def hasdriveletter(path):
2553 def hasdriveletter(path):
2543 return path and path[1:2] == ':' and path[0:1].isalpha()
2554 return path and path[1:2] == ':' and path[0:1].isalpha()
2544
2555
2545 def urllocalpath(path):
2556 def urllocalpath(path):
2546 return url(path, parsequery=False, parsefragment=False).localpath()
2557 return url(path, parsequery=False, parsefragment=False).localpath()
2547
2558
2548 def hidepassword(u):
2559 def hidepassword(u):
2549 '''hide user credential in a url string'''
2560 '''hide user credential in a url string'''
2550 u = url(u)
2561 u = url(u)
2551 if u.passwd:
2562 if u.passwd:
2552 u.passwd = '***'
2563 u.passwd = '***'
2553 return str(u)
2564 return str(u)
2554
2565
2555 def removeauth(u):
2566 def removeauth(u):
2556 '''remove all authentication information from a url string'''
2567 '''remove all authentication information from a url string'''
2557 u = url(u)
2568 u = url(u)
2558 u.user = u.passwd = None
2569 u.user = u.passwd = None
2559 return str(u)
2570 return str(u)
2560
2571
2561 def isatty(fp):
2572 def isatty(fp):
2562 try:
2573 try:
2563 return fp.isatty()
2574 return fp.isatty()
2564 except AttributeError:
2575 except AttributeError:
2565 return False
2576 return False
2566
2577
2567 timecount = unitcountfn(
2578 timecount = unitcountfn(
2568 (1, 1e3, _('%.0f s')),
2579 (1, 1e3, _('%.0f s')),
2569 (100, 1, _('%.1f s')),
2580 (100, 1, _('%.1f s')),
2570 (10, 1, _('%.2f s')),
2581 (10, 1, _('%.2f s')),
2571 (1, 1, _('%.3f s')),
2582 (1, 1, _('%.3f s')),
2572 (100, 0.001, _('%.1f ms')),
2583 (100, 0.001, _('%.1f ms')),
2573 (10, 0.001, _('%.2f ms')),
2584 (10, 0.001, _('%.2f ms')),
2574 (1, 0.001, _('%.3f ms')),
2585 (1, 0.001, _('%.3f ms')),
2575 (100, 0.000001, _('%.1f us')),
2586 (100, 0.000001, _('%.1f us')),
2576 (10, 0.000001, _('%.2f us')),
2587 (10, 0.000001, _('%.2f us')),
2577 (1, 0.000001, _('%.3f us')),
2588 (1, 0.000001, _('%.3f us')),
2578 (100, 0.000000001, _('%.1f ns')),
2589 (100, 0.000000001, _('%.1f ns')),
2579 (10, 0.000000001, _('%.2f ns')),
2590 (10, 0.000000001, _('%.2f ns')),
2580 (1, 0.000000001, _('%.3f ns')),
2591 (1, 0.000000001, _('%.3f ns')),
2581 )
2592 )
2582
2593
2583 _timenesting = [0]
2594 _timenesting = [0]
2584
2595
2585 def timed(func):
2596 def timed(func):
2586 '''Report the execution time of a function call to stderr.
2597 '''Report the execution time of a function call to stderr.
2587
2598
2588 During development, use as a decorator when you need to measure
2599 During development, use as a decorator when you need to measure
2589 the cost of a function, e.g. as follows:
2600 the cost of a function, e.g. as follows:
2590
2601
2591 @util.timed
2602 @util.timed
2592 def foo(a, b, c):
2603 def foo(a, b, c):
2593 pass
2604 pass
2594 '''
2605 '''
2595
2606
2596 def wrapper(*args, **kwargs):
2607 def wrapper(*args, **kwargs):
2597 start = time.time()
2608 start = time.time()
2598 indent = 2
2609 indent = 2
2599 _timenesting[0] += indent
2610 _timenesting[0] += indent
2600 try:
2611 try:
2601 return func(*args, **kwargs)
2612 return func(*args, **kwargs)
2602 finally:
2613 finally:
2603 elapsed = time.time() - start
2614 elapsed = time.time() - start
2604 _timenesting[0] -= indent
2615 _timenesting[0] -= indent
2605 sys.stderr.write('%s%s: %s\n' %
2616 sys.stderr.write('%s%s: %s\n' %
2606 (' ' * _timenesting[0], func.__name__,
2617 (' ' * _timenesting[0], func.__name__,
2607 timecount(elapsed)))
2618 timecount(elapsed)))
2608 return wrapper
2619 return wrapper
2609
2620
2610 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2621 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2611 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2622 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2612
2623
2613 def sizetoint(s):
2624 def sizetoint(s):
2614 '''Convert a space specifier to a byte count.
2625 '''Convert a space specifier to a byte count.
2615
2626
2616 >>> sizetoint('30')
2627 >>> sizetoint('30')
2617 30
2628 30
2618 >>> sizetoint('2.2kb')
2629 >>> sizetoint('2.2kb')
2619 2252
2630 2252
2620 >>> sizetoint('6M')
2631 >>> sizetoint('6M')
2621 6291456
2632 6291456
2622 '''
2633 '''
2623 t = s.strip().lower()
2634 t = s.strip().lower()
2624 try:
2635 try:
2625 for k, u in _sizeunits:
2636 for k, u in _sizeunits:
2626 if t.endswith(k):
2637 if t.endswith(k):
2627 return int(float(t[:-len(k)]) * u)
2638 return int(float(t[:-len(k)]) * u)
2628 return int(t)
2639 return int(t)
2629 except ValueError:
2640 except ValueError:
2630 raise error.ParseError(_("couldn't parse size: %s") % s)
2641 raise error.ParseError(_("couldn't parse size: %s") % s)
2631
2642
2632 class hooks(object):
2643 class hooks(object):
2633 '''A collection of hook functions that can be used to extend a
2644 '''A collection of hook functions that can be used to extend a
2634 function's behavior. Hooks are called in lexicographic order,
2645 function's behavior. Hooks are called in lexicographic order,
2635 based on the names of their sources.'''
2646 based on the names of their sources.'''
2636
2647
2637 def __init__(self):
2648 def __init__(self):
2638 self._hooks = []
2649 self._hooks = []
2639
2650
2640 def add(self, source, hook):
2651 def add(self, source, hook):
2641 self._hooks.append((source, hook))
2652 self._hooks.append((source, hook))
2642
2653
2643 def __call__(self, *args):
2654 def __call__(self, *args):
2644 self._hooks.sort(key=lambda x: x[0])
2655 self._hooks.sort(key=lambda x: x[0])
2645 results = []
2656 results = []
2646 for source, hook in self._hooks:
2657 for source, hook in self._hooks:
2647 results.append(hook(*args))
2658 results.append(hook(*args))
2648 return results
2659 return results
2649
2660
2650 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s'):
2661 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s'):
2651 '''Yields lines for a nicely formatted stacktrace.
2662 '''Yields lines for a nicely formatted stacktrace.
2652 Skips the 'skip' last entries.
2663 Skips the 'skip' last entries.
2653 Each file+linenumber is formatted according to fileline.
2664 Each file+linenumber is formatted according to fileline.
2654 Each line is formatted according to line.
2665 Each line is formatted according to line.
2655 If line is None, it yields:
2666 If line is None, it yields:
2656 length of longest filepath+line number,
2667 length of longest filepath+line number,
2657 filepath+linenumber,
2668 filepath+linenumber,
2658 function
2669 function
2659
2670
2660 Not be used in production code but very convenient while developing.
2671 Not be used in production code but very convenient while developing.
2661 '''
2672 '''
2662 entries = [(fileline % (fn, ln), func)
2673 entries = [(fileline % (fn, ln), func)
2663 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2674 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2664 if entries:
2675 if entries:
2665 fnmax = max(len(entry[0]) for entry in entries)
2676 fnmax = max(len(entry[0]) for entry in entries)
2666 for fnln, func in entries:
2677 for fnln, func in entries:
2667 if line is None:
2678 if line is None:
2668 yield (fnmax, fnln, func)
2679 yield (fnmax, fnln, func)
2669 else:
2680 else:
2670 yield line % (fnmax, fnln, func)
2681 yield line % (fnmax, fnln, func)
2671
2682
2672 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2683 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2673 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2684 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2674 Skips the 'skip' last entries. By default it will flush stdout first.
2685 Skips the 'skip' last entries. By default it will flush stdout first.
2675 It can be used everywhere and intentionally does not require an ui object.
2686 It can be used everywhere and intentionally does not require an ui object.
2676 Not be used in production code but very convenient while developing.
2687 Not be used in production code but very convenient while developing.
2677 '''
2688 '''
2678 if otherf:
2689 if otherf:
2679 otherf.flush()
2690 otherf.flush()
2680 f.write('%s at:\n' % msg)
2691 f.write('%s at:\n' % msg)
2681 for line in getstackframes(skip + 1):
2692 for line in getstackframes(skip + 1):
2682 f.write(line)
2693 f.write(line)
2683 f.flush()
2694 f.flush()
2684
2695
2685 class dirs(object):
2696 class dirs(object):
2686 '''a multiset of directory names from a dirstate or manifest'''
2697 '''a multiset of directory names from a dirstate or manifest'''
2687
2698
2688 def __init__(self, map, skip=None):
2699 def __init__(self, map, skip=None):
2689 self._dirs = {}
2700 self._dirs = {}
2690 addpath = self.addpath
2701 addpath = self.addpath
2691 if safehasattr(map, 'iteritems') and skip is not None:
2702 if safehasattr(map, 'iteritems') and skip is not None:
2692 for f, s in map.iteritems():
2703 for f, s in map.iteritems():
2693 if s[0] != skip:
2704 if s[0] != skip:
2694 addpath(f)
2705 addpath(f)
2695 else:
2706 else:
2696 for f in map:
2707 for f in map:
2697 addpath(f)
2708 addpath(f)
2698
2709
2699 def addpath(self, path):
2710 def addpath(self, path):
2700 dirs = self._dirs
2711 dirs = self._dirs
2701 for base in finddirs(path):
2712 for base in finddirs(path):
2702 if base in dirs:
2713 if base in dirs:
2703 dirs[base] += 1
2714 dirs[base] += 1
2704 return
2715 return
2705 dirs[base] = 1
2716 dirs[base] = 1
2706
2717
2707 def delpath(self, path):
2718 def delpath(self, path):
2708 dirs = self._dirs
2719 dirs = self._dirs
2709 for base in finddirs(path):
2720 for base in finddirs(path):
2710 if dirs[base] > 1:
2721 if dirs[base] > 1:
2711 dirs[base] -= 1
2722 dirs[base] -= 1
2712 return
2723 return
2713 del dirs[base]
2724 del dirs[base]
2714
2725
2715 def __iter__(self):
2726 def __iter__(self):
2716 return self._dirs.iterkeys()
2727 return self._dirs.iterkeys()
2717
2728
2718 def __contains__(self, d):
2729 def __contains__(self, d):
2719 return d in self._dirs
2730 return d in self._dirs
2720
2731
2721 if safehasattr(parsers, 'dirs'):
2732 if safehasattr(parsers, 'dirs'):
2722 dirs = parsers.dirs
2733 dirs = parsers.dirs
2723
2734
2724 def finddirs(path):
2735 def finddirs(path):
2725 pos = path.rfind('/')
2736 pos = path.rfind('/')
2726 while pos != -1:
2737 while pos != -1:
2727 yield path[:pos]
2738 yield path[:pos]
2728 pos = path.rfind('/', 0, pos)
2739 pos = path.rfind('/', 0, pos)
2729
2740
2730 # compression utility
2741 # compression utility
2731
2742
2732 class nocompress(object):
2743 class nocompress(object):
2733 def compress(self, x):
2744 def compress(self, x):
2734 return x
2745 return x
2735 def flush(self):
2746 def flush(self):
2736 return ""
2747 return ""
2737
2748
2738 compressors = {
2749 compressors = {
2739 None: nocompress,
2750 None: nocompress,
2740 # lambda to prevent early import
2751 # lambda to prevent early import
2741 'BZ': lambda: bz2.BZ2Compressor(),
2752 'BZ': lambda: bz2.BZ2Compressor(),
2742 'GZ': lambda: zlib.compressobj(),
2753 'GZ': lambda: zlib.compressobj(),
2743 }
2754 }
2744 # also support the old form by courtesies
2755 # also support the old form by courtesies
2745 compressors['UN'] = compressors[None]
2756 compressors['UN'] = compressors[None]
2746
2757
2747 def _makedecompressor(decompcls):
2758 def _makedecompressor(decompcls):
2748 def generator(f):
2759 def generator(f):
2749 d = decompcls()
2760 d = decompcls()
2750 for chunk in filechunkiter(f):
2761 for chunk in filechunkiter(f):
2751 yield d.decompress(chunk)
2762 yield d.decompress(chunk)
2752 def func(fh):
2763 def func(fh):
2753 return chunkbuffer(generator(fh))
2764 return chunkbuffer(generator(fh))
2754 return func
2765 return func
2755
2766
2756 class ctxmanager(object):
2767 class ctxmanager(object):
2757 '''A context manager for use in 'with' blocks to allow multiple
2768 '''A context manager for use in 'with' blocks to allow multiple
2758 contexts to be entered at once. This is both safer and more
2769 contexts to be entered at once. This is both safer and more
2759 flexible than contextlib.nested.
2770 flexible than contextlib.nested.
2760
2771
2761 Once Mercurial supports Python 2.7+, this will become mostly
2772 Once Mercurial supports Python 2.7+, this will become mostly
2762 unnecessary.
2773 unnecessary.
2763 '''
2774 '''
2764
2775
2765 def __init__(self, *args):
2776 def __init__(self, *args):
2766 '''Accepts a list of no-argument functions that return context
2777 '''Accepts a list of no-argument functions that return context
2767 managers. These will be invoked at __call__ time.'''
2778 managers. These will be invoked at __call__ time.'''
2768 self._pending = args
2779 self._pending = args
2769 self._atexit = []
2780 self._atexit = []
2770
2781
2771 def __enter__(self):
2782 def __enter__(self):
2772 return self
2783 return self
2773
2784
2774 def enter(self):
2785 def enter(self):
2775 '''Create and enter context managers in the order in which they were
2786 '''Create and enter context managers in the order in which they were
2776 passed to the constructor.'''
2787 passed to the constructor.'''
2777 values = []
2788 values = []
2778 for func in self._pending:
2789 for func in self._pending:
2779 obj = func()
2790 obj = func()
2780 values.append(obj.__enter__())
2791 values.append(obj.__enter__())
2781 self._atexit.append(obj.__exit__)
2792 self._atexit.append(obj.__exit__)
2782 del self._pending
2793 del self._pending
2783 return values
2794 return values
2784
2795
2785 def atexit(self, func, *args, **kwargs):
2796 def atexit(self, func, *args, **kwargs):
2786 '''Add a function to call when this context manager exits. The
2797 '''Add a function to call when this context manager exits. The
2787 ordering of multiple atexit calls is unspecified, save that
2798 ordering of multiple atexit calls is unspecified, save that
2788 they will happen before any __exit__ functions.'''
2799 they will happen before any __exit__ functions.'''
2789 def wrapper(exc_type, exc_val, exc_tb):
2800 def wrapper(exc_type, exc_val, exc_tb):
2790 func(*args, **kwargs)
2801 func(*args, **kwargs)
2791 self._atexit.append(wrapper)
2802 self._atexit.append(wrapper)
2792 return func
2803 return func
2793
2804
2794 def __exit__(self, exc_type, exc_val, exc_tb):
2805 def __exit__(self, exc_type, exc_val, exc_tb):
2795 '''Context managers are exited in the reverse order from which
2806 '''Context managers are exited in the reverse order from which
2796 they were created.'''
2807 they were created.'''
2797 received = exc_type is not None
2808 received = exc_type is not None
2798 suppressed = False
2809 suppressed = False
2799 pending = None
2810 pending = None
2800 self._atexit.reverse()
2811 self._atexit.reverse()
2801 for exitfunc in self._atexit:
2812 for exitfunc in self._atexit:
2802 try:
2813 try:
2803 if exitfunc(exc_type, exc_val, exc_tb):
2814 if exitfunc(exc_type, exc_val, exc_tb):
2804 suppressed = True
2815 suppressed = True
2805 exc_type = None
2816 exc_type = None
2806 exc_val = None
2817 exc_val = None
2807 exc_tb = None
2818 exc_tb = None
2808 except BaseException:
2819 except BaseException:
2809 pending = sys.exc_info()
2820 pending = sys.exc_info()
2810 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2821 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2811 del self._atexit
2822 del self._atexit
2812 if pending:
2823 if pending:
2813 raise exc_val
2824 raise exc_val
2814 return received and suppressed
2825 return received and suppressed
2815
2826
2816 def _bz2():
2827 def _bz2():
2817 d = bz2.BZ2Decompressor()
2828 d = bz2.BZ2Decompressor()
2818 # Bzip2 stream start with BZ, but we stripped it.
2829 # Bzip2 stream start with BZ, but we stripped it.
2819 # we put it back for good measure.
2830 # we put it back for good measure.
2820 d.decompress('BZ')
2831 d.decompress('BZ')
2821 return d
2832 return d
2822
2833
2823 decompressors = {None: lambda fh: fh,
2834 decompressors = {None: lambda fh: fh,
2824 '_truncatedBZ': _makedecompressor(_bz2),
2835 '_truncatedBZ': _makedecompressor(_bz2),
2825 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2836 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2826 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2837 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2827 }
2838 }
2828 # also support the old form by courtesies
2839 # also support the old form by courtesies
2829 decompressors['UN'] = decompressors[None]
2840 decompressors['UN'] = decompressors[None]
2830
2841
2831 # convenient shortcut
2842 # convenient shortcut
2832 dst = debugstacktrace
2843 dst = debugstacktrace
General Comments 0
You need to be logged in to leave comments. Login now