##// END OF EJS Templates
vfs: add "notindexed" argument to invoke "ensuredir" with it in write mode...
FUJIWARA Katsunori -
r23370:46265d0f default
parent child Browse files
Show More
@@ -1,1046 +1,1060
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from mercurial.node import nullrev
9 from mercurial.node import nullrev
10 import util, error, osutil, revset, similar, encoding, phases, parsers
10 import util, error, osutil, revset, similar, encoding, phases, parsers
11 import pathutil
11 import pathutil
12 import match as matchmod
12 import match as matchmod
13 import os, errno, re, glob, tempfile
13 import os, errno, re, glob, tempfile
14
14
15 if os.name == 'nt':
15 if os.name == 'nt':
16 import scmwindows as scmplatform
16 import scmwindows as scmplatform
17 else:
17 else:
18 import scmposix as scmplatform
18 import scmposix as scmplatform
19
19
20 systemrcpath = scmplatform.systemrcpath
20 systemrcpath = scmplatform.systemrcpath
21 userrcpath = scmplatform.userrcpath
21 userrcpath = scmplatform.userrcpath
22
22
23 class status(tuple):
23 class status(tuple):
24 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
24 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
25 and 'ignored' properties are only relevant to the working copy.
25 and 'ignored' properties are only relevant to the working copy.
26 '''
26 '''
27
27
28 __slots__ = ()
28 __slots__ = ()
29
29
30 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
30 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
31 clean):
31 clean):
32 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
32 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
33 ignored, clean))
33 ignored, clean))
34
34
35 @property
35 @property
36 def modified(self):
36 def modified(self):
37 '''files that have been modified'''
37 '''files that have been modified'''
38 return self[0]
38 return self[0]
39
39
40 @property
40 @property
41 def added(self):
41 def added(self):
42 '''files that have been added'''
42 '''files that have been added'''
43 return self[1]
43 return self[1]
44
44
45 @property
45 @property
46 def removed(self):
46 def removed(self):
47 '''files that have been removed'''
47 '''files that have been removed'''
48 return self[2]
48 return self[2]
49
49
50 @property
50 @property
51 def deleted(self):
51 def deleted(self):
52 '''files that are in the dirstate, but have been deleted from the
52 '''files that are in the dirstate, but have been deleted from the
53 working copy (aka "missing")
53 working copy (aka "missing")
54 '''
54 '''
55 return self[3]
55 return self[3]
56
56
57 @property
57 @property
58 def unknown(self):
58 def unknown(self):
59 '''files not in the dirstate that are not ignored'''
59 '''files not in the dirstate that are not ignored'''
60 return self[4]
60 return self[4]
61
61
62 @property
62 @property
63 def ignored(self):
63 def ignored(self):
64 '''files not in the dirstate that are ignored (by _dirignore())'''
64 '''files not in the dirstate that are ignored (by _dirignore())'''
65 return self[5]
65 return self[5]
66
66
67 @property
67 @property
68 def clean(self):
68 def clean(self):
69 '''files that have not been modified'''
69 '''files that have not been modified'''
70 return self[6]
70 return self[6]
71
71
72 def __repr__(self, *args, **kwargs):
72 def __repr__(self, *args, **kwargs):
73 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
73 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
74 'unknown=%r, ignored=%r, clean=%r>') % self)
74 'unknown=%r, ignored=%r, clean=%r>') % self)
75
75
76 def itersubrepos(ctx1, ctx2):
76 def itersubrepos(ctx1, ctx2):
77 """find subrepos in ctx1 or ctx2"""
77 """find subrepos in ctx1 or ctx2"""
78 # Create a (subpath, ctx) mapping where we prefer subpaths from
78 # Create a (subpath, ctx) mapping where we prefer subpaths from
79 # ctx1. The subpaths from ctx2 are important when the .hgsub file
79 # ctx1. The subpaths from ctx2 are important when the .hgsub file
80 # has been modified (in ctx2) but not yet committed (in ctx1).
80 # has been modified (in ctx2) but not yet committed (in ctx1).
81 subpaths = dict.fromkeys(ctx2.substate, ctx2)
81 subpaths = dict.fromkeys(ctx2.substate, ctx2)
82 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
82 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
83 for subpath, ctx in sorted(subpaths.iteritems()):
83 for subpath, ctx in sorted(subpaths.iteritems()):
84 yield subpath, ctx.sub(subpath)
84 yield subpath, ctx.sub(subpath)
85
85
86 def nochangesfound(ui, repo, excluded=None):
86 def nochangesfound(ui, repo, excluded=None):
87 '''Report no changes for push/pull, excluded is None or a list of
87 '''Report no changes for push/pull, excluded is None or a list of
88 nodes excluded from the push/pull.
88 nodes excluded from the push/pull.
89 '''
89 '''
90 secretlist = []
90 secretlist = []
91 if excluded:
91 if excluded:
92 for n in excluded:
92 for n in excluded:
93 if n not in repo:
93 if n not in repo:
94 # discovery should not have included the filtered revision,
94 # discovery should not have included the filtered revision,
95 # we have to explicitly exclude it until discovery is cleanup.
95 # we have to explicitly exclude it until discovery is cleanup.
96 continue
96 continue
97 ctx = repo[n]
97 ctx = repo[n]
98 if ctx.phase() >= phases.secret and not ctx.extinct():
98 if ctx.phase() >= phases.secret and not ctx.extinct():
99 secretlist.append(n)
99 secretlist.append(n)
100
100
101 if secretlist:
101 if secretlist:
102 ui.status(_("no changes found (ignored %d secret changesets)\n")
102 ui.status(_("no changes found (ignored %d secret changesets)\n")
103 % len(secretlist))
103 % len(secretlist))
104 else:
104 else:
105 ui.status(_("no changes found\n"))
105 ui.status(_("no changes found\n"))
106
106
107 def checknewlabel(repo, lbl, kind):
107 def checknewlabel(repo, lbl, kind):
108 # Do not use the "kind" parameter in ui output.
108 # Do not use the "kind" parameter in ui output.
109 # It makes strings difficult to translate.
109 # It makes strings difficult to translate.
110 if lbl in ['tip', '.', 'null']:
110 if lbl in ['tip', '.', 'null']:
111 raise util.Abort(_("the name '%s' is reserved") % lbl)
111 raise util.Abort(_("the name '%s' is reserved") % lbl)
112 for c in (':', '\0', '\n', '\r'):
112 for c in (':', '\0', '\n', '\r'):
113 if c in lbl:
113 if c in lbl:
114 raise util.Abort(_("%r cannot be used in a name") % c)
114 raise util.Abort(_("%r cannot be used in a name") % c)
115 try:
115 try:
116 int(lbl)
116 int(lbl)
117 raise util.Abort(_("cannot use an integer as a name"))
117 raise util.Abort(_("cannot use an integer as a name"))
118 except ValueError:
118 except ValueError:
119 pass
119 pass
120
120
121 def checkfilename(f):
121 def checkfilename(f):
122 '''Check that the filename f is an acceptable filename for a tracked file'''
122 '''Check that the filename f is an acceptable filename for a tracked file'''
123 if '\r' in f or '\n' in f:
123 if '\r' in f or '\n' in f:
124 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
124 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
125
125
126 def checkportable(ui, f):
126 def checkportable(ui, f):
127 '''Check if filename f is portable and warn or abort depending on config'''
127 '''Check if filename f is portable and warn or abort depending on config'''
128 checkfilename(f)
128 checkfilename(f)
129 abort, warn = checkportabilityalert(ui)
129 abort, warn = checkportabilityalert(ui)
130 if abort or warn:
130 if abort or warn:
131 msg = util.checkwinfilename(f)
131 msg = util.checkwinfilename(f)
132 if msg:
132 if msg:
133 msg = "%s: %r" % (msg, f)
133 msg = "%s: %r" % (msg, f)
134 if abort:
134 if abort:
135 raise util.Abort(msg)
135 raise util.Abort(msg)
136 ui.warn(_("warning: %s\n") % msg)
136 ui.warn(_("warning: %s\n") % msg)
137
137
138 def checkportabilityalert(ui):
138 def checkportabilityalert(ui):
139 '''check if the user's config requests nothing, a warning, or abort for
139 '''check if the user's config requests nothing, a warning, or abort for
140 non-portable filenames'''
140 non-portable filenames'''
141 val = ui.config('ui', 'portablefilenames', 'warn')
141 val = ui.config('ui', 'portablefilenames', 'warn')
142 lval = val.lower()
142 lval = val.lower()
143 bval = util.parsebool(val)
143 bval = util.parsebool(val)
144 abort = os.name == 'nt' or lval == 'abort'
144 abort = os.name == 'nt' or lval == 'abort'
145 warn = bval or lval == 'warn'
145 warn = bval or lval == 'warn'
146 if bval is None and not (warn or abort or lval == 'ignore'):
146 if bval is None and not (warn or abort or lval == 'ignore'):
147 raise error.ConfigError(
147 raise error.ConfigError(
148 _("ui.portablefilenames value is invalid ('%s')") % val)
148 _("ui.portablefilenames value is invalid ('%s')") % val)
149 return abort, warn
149 return abort, warn
150
150
151 class casecollisionauditor(object):
151 class casecollisionauditor(object):
152 def __init__(self, ui, abort, dirstate):
152 def __init__(self, ui, abort, dirstate):
153 self._ui = ui
153 self._ui = ui
154 self._abort = abort
154 self._abort = abort
155 allfiles = '\0'.join(dirstate._map)
155 allfiles = '\0'.join(dirstate._map)
156 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
156 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
157 self._dirstate = dirstate
157 self._dirstate = dirstate
158 # The purpose of _newfiles is so that we don't complain about
158 # The purpose of _newfiles is so that we don't complain about
159 # case collisions if someone were to call this object with the
159 # case collisions if someone were to call this object with the
160 # same filename twice.
160 # same filename twice.
161 self._newfiles = set()
161 self._newfiles = set()
162
162
163 def __call__(self, f):
163 def __call__(self, f):
164 if f in self._newfiles:
164 if f in self._newfiles:
165 return
165 return
166 fl = encoding.lower(f)
166 fl = encoding.lower(f)
167 if fl in self._loweredfiles and f not in self._dirstate:
167 if fl in self._loweredfiles and f not in self._dirstate:
168 msg = _('possible case-folding collision for %s') % f
168 msg = _('possible case-folding collision for %s') % f
169 if self._abort:
169 if self._abort:
170 raise util.Abort(msg)
170 raise util.Abort(msg)
171 self._ui.warn(_("warning: %s\n") % msg)
171 self._ui.warn(_("warning: %s\n") % msg)
172 self._loweredfiles.add(fl)
172 self._loweredfiles.add(fl)
173 self._newfiles.add(f)
173 self._newfiles.add(f)
174
174
175 class abstractvfs(object):
175 class abstractvfs(object):
176 """Abstract base class; cannot be instantiated"""
176 """Abstract base class; cannot be instantiated"""
177
177
178 def __init__(self, *args, **kwargs):
178 def __init__(self, *args, **kwargs):
179 '''Prevent instantiation; don't call this from subclasses.'''
179 '''Prevent instantiation; don't call this from subclasses.'''
180 raise NotImplementedError('attempted instantiating ' + str(type(self)))
180 raise NotImplementedError('attempted instantiating ' + str(type(self)))
181
181
182 def tryread(self, path):
182 def tryread(self, path):
183 '''gracefully return an empty string for missing files'''
183 '''gracefully return an empty string for missing files'''
184 try:
184 try:
185 return self.read(path)
185 return self.read(path)
186 except IOError, inst:
186 except IOError, inst:
187 if inst.errno != errno.ENOENT:
187 if inst.errno != errno.ENOENT:
188 raise
188 raise
189 return ""
189 return ""
190
190
191 def tryreadlines(self, path, mode='rb'):
191 def tryreadlines(self, path, mode='rb'):
192 '''gracefully return an empty array for missing files'''
192 '''gracefully return an empty array for missing files'''
193 try:
193 try:
194 return self.readlines(path, mode=mode)
194 return self.readlines(path, mode=mode)
195 except IOError, inst:
195 except IOError, inst:
196 if inst.errno != errno.ENOENT:
196 if inst.errno != errno.ENOENT:
197 raise
197 raise
198 return []
198 return []
199
199
200 def open(self, path, mode="r", text=False, atomictemp=False):
200 def open(self, path, mode="r", text=False, atomictemp=False,
201 notindexed=False):
202 '''Open ``path`` file, which is relative to vfs root.
203
204 Newly created directories are marked as "not to be indexed by
205 the content indexing service", if ``notindexed`` is specified
206 for "write" mode access.
207 '''
201 self.open = self.__call__
208 self.open = self.__call__
202 return self.__call__(path, mode, text, atomictemp)
209 return self.__call__(path, mode, text, atomictemp, notindexed)
203
210
204 def read(self, path):
211 def read(self, path):
205 fp = self(path, 'rb')
212 fp = self(path, 'rb')
206 try:
213 try:
207 return fp.read()
214 return fp.read()
208 finally:
215 finally:
209 fp.close()
216 fp.close()
210
217
211 def readlines(self, path, mode='rb'):
218 def readlines(self, path, mode='rb'):
212 fp = self(path, mode=mode)
219 fp = self(path, mode=mode)
213 try:
220 try:
214 return fp.readlines()
221 return fp.readlines()
215 finally:
222 finally:
216 fp.close()
223 fp.close()
217
224
218 def write(self, path, data):
225 def write(self, path, data):
219 fp = self(path, 'wb')
226 fp = self(path, 'wb')
220 try:
227 try:
221 return fp.write(data)
228 return fp.write(data)
222 finally:
229 finally:
223 fp.close()
230 fp.close()
224
231
225 def append(self, path, data):
232 def append(self, path, data):
226 fp = self(path, 'ab')
233 fp = self(path, 'ab')
227 try:
234 try:
228 return fp.write(data)
235 return fp.write(data)
229 finally:
236 finally:
230 fp.close()
237 fp.close()
231
238
232 def chmod(self, path, mode):
239 def chmod(self, path, mode):
233 return os.chmod(self.join(path), mode)
240 return os.chmod(self.join(path), mode)
234
241
235 def exists(self, path=None):
242 def exists(self, path=None):
236 return os.path.exists(self.join(path))
243 return os.path.exists(self.join(path))
237
244
238 def fstat(self, fp):
245 def fstat(self, fp):
239 return util.fstat(fp)
246 return util.fstat(fp)
240
247
241 def isdir(self, path=None):
248 def isdir(self, path=None):
242 return os.path.isdir(self.join(path))
249 return os.path.isdir(self.join(path))
243
250
244 def isfile(self, path=None):
251 def isfile(self, path=None):
245 return os.path.isfile(self.join(path))
252 return os.path.isfile(self.join(path))
246
253
247 def islink(self, path=None):
254 def islink(self, path=None):
248 return os.path.islink(self.join(path))
255 return os.path.islink(self.join(path))
249
256
250 def lexists(self, path=None):
257 def lexists(self, path=None):
251 return os.path.lexists(self.join(path))
258 return os.path.lexists(self.join(path))
252
259
253 def lstat(self, path=None):
260 def lstat(self, path=None):
254 return os.lstat(self.join(path))
261 return os.lstat(self.join(path))
255
262
256 def listdir(self, path=None):
263 def listdir(self, path=None):
257 return os.listdir(self.join(path))
264 return os.listdir(self.join(path))
258
265
259 def makedir(self, path=None, notindexed=True):
266 def makedir(self, path=None, notindexed=True):
260 return util.makedir(self.join(path), notindexed)
267 return util.makedir(self.join(path), notindexed)
261
268
262 def makedirs(self, path=None, mode=None):
269 def makedirs(self, path=None, mode=None):
263 return util.makedirs(self.join(path), mode)
270 return util.makedirs(self.join(path), mode)
264
271
265 def makelock(self, info, path):
272 def makelock(self, info, path):
266 return util.makelock(info, self.join(path))
273 return util.makelock(info, self.join(path))
267
274
268 def mkdir(self, path=None):
275 def mkdir(self, path=None):
269 return os.mkdir(self.join(path))
276 return os.mkdir(self.join(path))
270
277
271 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
278 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
272 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
279 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
273 dir=self.join(dir), text=text)
280 dir=self.join(dir), text=text)
274 dname, fname = util.split(name)
281 dname, fname = util.split(name)
275 if dir:
282 if dir:
276 return fd, os.path.join(dir, fname)
283 return fd, os.path.join(dir, fname)
277 else:
284 else:
278 return fd, fname
285 return fd, fname
279
286
280 def readdir(self, path=None, stat=None, skip=None):
287 def readdir(self, path=None, stat=None, skip=None):
281 return osutil.listdir(self.join(path), stat, skip)
288 return osutil.listdir(self.join(path), stat, skip)
282
289
283 def readlock(self, path):
290 def readlock(self, path):
284 return util.readlock(self.join(path))
291 return util.readlock(self.join(path))
285
292
286 def rename(self, src, dst):
293 def rename(self, src, dst):
287 return util.rename(self.join(src), self.join(dst))
294 return util.rename(self.join(src), self.join(dst))
288
295
289 def readlink(self, path):
296 def readlink(self, path):
290 return os.readlink(self.join(path))
297 return os.readlink(self.join(path))
291
298
292 def setflags(self, path, l, x):
299 def setflags(self, path, l, x):
293 return util.setflags(self.join(path), l, x)
300 return util.setflags(self.join(path), l, x)
294
301
295 def stat(self, path=None):
302 def stat(self, path=None):
296 return os.stat(self.join(path))
303 return os.stat(self.join(path))
297
304
298 def unlink(self, path=None):
305 def unlink(self, path=None):
299 return util.unlink(self.join(path))
306 return util.unlink(self.join(path))
300
307
301 def unlinkpath(self, path=None, ignoremissing=False):
308 def unlinkpath(self, path=None, ignoremissing=False):
302 return util.unlinkpath(self.join(path), ignoremissing)
309 return util.unlinkpath(self.join(path), ignoremissing)
303
310
304 def utime(self, path=None, t=None):
311 def utime(self, path=None, t=None):
305 return os.utime(self.join(path), t)
312 return os.utime(self.join(path), t)
306
313
307 class vfs(abstractvfs):
314 class vfs(abstractvfs):
308 '''Operate files relative to a base directory
315 '''Operate files relative to a base directory
309
316
310 This class is used to hide the details of COW semantics and
317 This class is used to hide the details of COW semantics and
311 remote file access from higher level code.
318 remote file access from higher level code.
312 '''
319 '''
313 def __init__(self, base, audit=True, expandpath=False, realpath=False):
320 def __init__(self, base, audit=True, expandpath=False, realpath=False):
314 if expandpath:
321 if expandpath:
315 base = util.expandpath(base)
322 base = util.expandpath(base)
316 if realpath:
323 if realpath:
317 base = os.path.realpath(base)
324 base = os.path.realpath(base)
318 self.base = base
325 self.base = base
319 self._setmustaudit(audit)
326 self._setmustaudit(audit)
320 self.createmode = None
327 self.createmode = None
321 self._trustnlink = None
328 self._trustnlink = None
322
329
323 def _getmustaudit(self):
330 def _getmustaudit(self):
324 return self._audit
331 return self._audit
325
332
326 def _setmustaudit(self, onoff):
333 def _setmustaudit(self, onoff):
327 self._audit = onoff
334 self._audit = onoff
328 if onoff:
335 if onoff:
329 self.audit = pathutil.pathauditor(self.base)
336 self.audit = pathutil.pathauditor(self.base)
330 else:
337 else:
331 self.audit = util.always
338 self.audit = util.always
332
339
333 mustaudit = property(_getmustaudit, _setmustaudit)
340 mustaudit = property(_getmustaudit, _setmustaudit)
334
341
335 @util.propertycache
342 @util.propertycache
336 def _cansymlink(self):
343 def _cansymlink(self):
337 return util.checklink(self.base)
344 return util.checklink(self.base)
338
345
339 @util.propertycache
346 @util.propertycache
340 def _chmod(self):
347 def _chmod(self):
341 return util.checkexec(self.base)
348 return util.checkexec(self.base)
342
349
343 def _fixfilemode(self, name):
350 def _fixfilemode(self, name):
344 if self.createmode is None or not self._chmod:
351 if self.createmode is None or not self._chmod:
345 return
352 return
346 os.chmod(name, self.createmode & 0666)
353 os.chmod(name, self.createmode & 0666)
347
354
348 def __call__(self, path, mode="r", text=False, atomictemp=False):
355 def __call__(self, path, mode="r", text=False, atomictemp=False,
356 notindexed=False):
357 '''Open ``path`` file, which is relative to vfs root.
358
359 Newly created directories are marked as "not to be indexed by
360 the content indexing service", if ``notindexed`` is specified
361 for "write" mode access.
362 '''
349 if self._audit:
363 if self._audit:
350 r = util.checkosfilename(path)
364 r = util.checkosfilename(path)
351 if r:
365 if r:
352 raise util.Abort("%s: %r" % (r, path))
366 raise util.Abort("%s: %r" % (r, path))
353 self.audit(path)
367 self.audit(path)
354 f = self.join(path)
368 f = self.join(path)
355
369
356 if not text and "b" not in mode:
370 if not text and "b" not in mode:
357 mode += "b" # for that other OS
371 mode += "b" # for that other OS
358
372
359 nlink = -1
373 nlink = -1
360 if mode not in ('r', 'rb'):
374 if mode not in ('r', 'rb'):
361 dirname, basename = util.split(f)
375 dirname, basename = util.split(f)
362 # If basename is empty, then the path is malformed because it points
376 # If basename is empty, then the path is malformed because it points
363 # to a directory. Let the posixfile() call below raise IOError.
377 # to a directory. Let the posixfile() call below raise IOError.
364 if basename:
378 if basename:
365 if atomictemp:
379 if atomictemp:
366 util.ensuredirs(dirname, self.createmode)
380 util.ensuredirs(dirname, self.createmode, notindexed)
367 return util.atomictempfile(f, mode, self.createmode)
381 return util.atomictempfile(f, mode, self.createmode)
368 try:
382 try:
369 if 'w' in mode:
383 if 'w' in mode:
370 util.unlink(f)
384 util.unlink(f)
371 nlink = 0
385 nlink = 0
372 else:
386 else:
373 # nlinks() may behave differently for files on Windows
387 # nlinks() may behave differently for files on Windows
374 # shares if the file is open.
388 # shares if the file is open.
375 fd = util.posixfile(f)
389 fd = util.posixfile(f)
376 nlink = util.nlinks(f)
390 nlink = util.nlinks(f)
377 if nlink < 1:
391 if nlink < 1:
378 nlink = 2 # force mktempcopy (issue1922)
392 nlink = 2 # force mktempcopy (issue1922)
379 fd.close()
393 fd.close()
380 except (OSError, IOError), e:
394 except (OSError, IOError), e:
381 if e.errno != errno.ENOENT:
395 if e.errno != errno.ENOENT:
382 raise
396 raise
383 nlink = 0
397 nlink = 0
384 util.ensuredirs(dirname, self.createmode)
398 util.ensuredirs(dirname, self.createmode, notindexed)
385 if nlink > 0:
399 if nlink > 0:
386 if self._trustnlink is None:
400 if self._trustnlink is None:
387 self._trustnlink = nlink > 1 or util.checknlink(f)
401 self._trustnlink = nlink > 1 or util.checknlink(f)
388 if nlink > 1 or not self._trustnlink:
402 if nlink > 1 or not self._trustnlink:
389 util.rename(util.mktempcopy(f), f)
403 util.rename(util.mktempcopy(f), f)
390 fp = util.posixfile(f, mode)
404 fp = util.posixfile(f, mode)
391 if nlink == 0:
405 if nlink == 0:
392 self._fixfilemode(f)
406 self._fixfilemode(f)
393 return fp
407 return fp
394
408
395 def symlink(self, src, dst):
409 def symlink(self, src, dst):
396 self.audit(dst)
410 self.audit(dst)
397 linkname = self.join(dst)
411 linkname = self.join(dst)
398 try:
412 try:
399 os.unlink(linkname)
413 os.unlink(linkname)
400 except OSError:
414 except OSError:
401 pass
415 pass
402
416
403 util.ensuredirs(os.path.dirname(linkname), self.createmode)
417 util.ensuredirs(os.path.dirname(linkname), self.createmode)
404
418
405 if self._cansymlink:
419 if self._cansymlink:
406 try:
420 try:
407 os.symlink(src, linkname)
421 os.symlink(src, linkname)
408 except OSError, err:
422 except OSError, err:
409 raise OSError(err.errno, _('could not symlink to %r: %s') %
423 raise OSError(err.errno, _('could not symlink to %r: %s') %
410 (src, err.strerror), linkname)
424 (src, err.strerror), linkname)
411 else:
425 else:
412 self.write(dst, src)
426 self.write(dst, src)
413
427
414 def join(self, path):
428 def join(self, path):
415 if path:
429 if path:
416 return os.path.join(self.base, path)
430 return os.path.join(self.base, path)
417 else:
431 else:
418 return self.base
432 return self.base
419
433
420 opener = vfs
434 opener = vfs
421
435
422 class auditvfs(object):
436 class auditvfs(object):
423 def __init__(self, vfs):
437 def __init__(self, vfs):
424 self.vfs = vfs
438 self.vfs = vfs
425
439
426 def _getmustaudit(self):
440 def _getmustaudit(self):
427 return self.vfs.mustaudit
441 return self.vfs.mustaudit
428
442
429 def _setmustaudit(self, onoff):
443 def _setmustaudit(self, onoff):
430 self.vfs.mustaudit = onoff
444 self.vfs.mustaudit = onoff
431
445
432 mustaudit = property(_getmustaudit, _setmustaudit)
446 mustaudit = property(_getmustaudit, _setmustaudit)
433
447
434 class filtervfs(abstractvfs, auditvfs):
448 class filtervfs(abstractvfs, auditvfs):
435 '''Wrapper vfs for filtering filenames with a function.'''
449 '''Wrapper vfs for filtering filenames with a function.'''
436
450
437 def __init__(self, vfs, filter):
451 def __init__(self, vfs, filter):
438 auditvfs.__init__(self, vfs)
452 auditvfs.__init__(self, vfs)
439 self._filter = filter
453 self._filter = filter
440
454
441 def __call__(self, path, *args, **kwargs):
455 def __call__(self, path, *args, **kwargs):
442 return self.vfs(self._filter(path), *args, **kwargs)
456 return self.vfs(self._filter(path), *args, **kwargs)
443
457
444 def join(self, path):
458 def join(self, path):
445 if path:
459 if path:
446 return self.vfs.join(self._filter(path))
460 return self.vfs.join(self._filter(path))
447 else:
461 else:
448 return self.vfs.join(path)
462 return self.vfs.join(path)
449
463
450 filteropener = filtervfs
464 filteropener = filtervfs
451
465
452 class readonlyvfs(abstractvfs, auditvfs):
466 class readonlyvfs(abstractvfs, auditvfs):
453 '''Wrapper vfs preventing any writing.'''
467 '''Wrapper vfs preventing any writing.'''
454
468
455 def __init__(self, vfs):
469 def __init__(self, vfs):
456 auditvfs.__init__(self, vfs)
470 auditvfs.__init__(self, vfs)
457
471
458 def __call__(self, path, mode='r', *args, **kw):
472 def __call__(self, path, mode='r', *args, **kw):
459 if mode not in ('r', 'rb'):
473 if mode not in ('r', 'rb'):
460 raise util.Abort('this vfs is read only')
474 raise util.Abort('this vfs is read only')
461 return self.vfs(path, mode, *args, **kw)
475 return self.vfs(path, mode, *args, **kw)
462
476
463
477
464 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
478 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
465 '''yield every hg repository under path, always recursively.
479 '''yield every hg repository under path, always recursively.
466 The recurse flag will only control recursion into repo working dirs'''
480 The recurse flag will only control recursion into repo working dirs'''
467 def errhandler(err):
481 def errhandler(err):
468 if err.filename == path:
482 if err.filename == path:
469 raise err
483 raise err
470 samestat = getattr(os.path, 'samestat', None)
484 samestat = getattr(os.path, 'samestat', None)
471 if followsym and samestat is not None:
485 if followsym and samestat is not None:
472 def adddir(dirlst, dirname):
486 def adddir(dirlst, dirname):
473 match = False
487 match = False
474 dirstat = os.stat(dirname)
488 dirstat = os.stat(dirname)
475 for lstdirstat in dirlst:
489 for lstdirstat in dirlst:
476 if samestat(dirstat, lstdirstat):
490 if samestat(dirstat, lstdirstat):
477 match = True
491 match = True
478 break
492 break
479 if not match:
493 if not match:
480 dirlst.append(dirstat)
494 dirlst.append(dirstat)
481 return not match
495 return not match
482 else:
496 else:
483 followsym = False
497 followsym = False
484
498
485 if (seen_dirs is None) and followsym:
499 if (seen_dirs is None) and followsym:
486 seen_dirs = []
500 seen_dirs = []
487 adddir(seen_dirs, path)
501 adddir(seen_dirs, path)
488 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
502 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
489 dirs.sort()
503 dirs.sort()
490 if '.hg' in dirs:
504 if '.hg' in dirs:
491 yield root # found a repository
505 yield root # found a repository
492 qroot = os.path.join(root, '.hg', 'patches')
506 qroot = os.path.join(root, '.hg', 'patches')
493 if os.path.isdir(os.path.join(qroot, '.hg')):
507 if os.path.isdir(os.path.join(qroot, '.hg')):
494 yield qroot # we have a patch queue repo here
508 yield qroot # we have a patch queue repo here
495 if recurse:
509 if recurse:
496 # avoid recursing inside the .hg directory
510 # avoid recursing inside the .hg directory
497 dirs.remove('.hg')
511 dirs.remove('.hg')
498 else:
512 else:
499 dirs[:] = [] # don't descend further
513 dirs[:] = [] # don't descend further
500 elif followsym:
514 elif followsym:
501 newdirs = []
515 newdirs = []
502 for d in dirs:
516 for d in dirs:
503 fname = os.path.join(root, d)
517 fname = os.path.join(root, d)
504 if adddir(seen_dirs, fname):
518 if adddir(seen_dirs, fname):
505 if os.path.islink(fname):
519 if os.path.islink(fname):
506 for hgname in walkrepos(fname, True, seen_dirs):
520 for hgname in walkrepos(fname, True, seen_dirs):
507 yield hgname
521 yield hgname
508 else:
522 else:
509 newdirs.append(d)
523 newdirs.append(d)
510 dirs[:] = newdirs
524 dirs[:] = newdirs
511
525
512 def osrcpath():
526 def osrcpath():
513 '''return default os-specific hgrc search path'''
527 '''return default os-specific hgrc search path'''
514 path = []
528 path = []
515 defaultpath = os.path.join(util.datapath, 'default.d')
529 defaultpath = os.path.join(util.datapath, 'default.d')
516 if os.path.isdir(defaultpath):
530 if os.path.isdir(defaultpath):
517 for f, kind in osutil.listdir(defaultpath):
531 for f, kind in osutil.listdir(defaultpath):
518 if f.endswith('.rc'):
532 if f.endswith('.rc'):
519 path.append(os.path.join(defaultpath, f))
533 path.append(os.path.join(defaultpath, f))
520 path.extend(systemrcpath())
534 path.extend(systemrcpath())
521 path.extend(userrcpath())
535 path.extend(userrcpath())
522 path = [os.path.normpath(f) for f in path]
536 path = [os.path.normpath(f) for f in path]
523 return path
537 return path
524
538
525 _rcpath = None
539 _rcpath = None
526
540
527 def rcpath():
541 def rcpath():
528 '''return hgrc search path. if env var HGRCPATH is set, use it.
542 '''return hgrc search path. if env var HGRCPATH is set, use it.
529 for each item in path, if directory, use files ending in .rc,
543 for each item in path, if directory, use files ending in .rc,
530 else use item.
544 else use item.
531 make HGRCPATH empty to only look in .hg/hgrc of current repo.
545 make HGRCPATH empty to only look in .hg/hgrc of current repo.
532 if no HGRCPATH, use default os-specific path.'''
546 if no HGRCPATH, use default os-specific path.'''
533 global _rcpath
547 global _rcpath
534 if _rcpath is None:
548 if _rcpath is None:
535 if 'HGRCPATH' in os.environ:
549 if 'HGRCPATH' in os.environ:
536 _rcpath = []
550 _rcpath = []
537 for p in os.environ['HGRCPATH'].split(os.pathsep):
551 for p in os.environ['HGRCPATH'].split(os.pathsep):
538 if not p:
552 if not p:
539 continue
553 continue
540 p = util.expandpath(p)
554 p = util.expandpath(p)
541 if os.path.isdir(p):
555 if os.path.isdir(p):
542 for f, kind in osutil.listdir(p):
556 for f, kind in osutil.listdir(p):
543 if f.endswith('.rc'):
557 if f.endswith('.rc'):
544 _rcpath.append(os.path.join(p, f))
558 _rcpath.append(os.path.join(p, f))
545 else:
559 else:
546 _rcpath.append(p)
560 _rcpath.append(p)
547 else:
561 else:
548 _rcpath = osrcpath()
562 _rcpath = osrcpath()
549 return _rcpath
563 return _rcpath
550
564
551 def revsingle(repo, revspec, default='.'):
565 def revsingle(repo, revspec, default='.'):
552 if not revspec and revspec != 0:
566 if not revspec and revspec != 0:
553 return repo[default]
567 return repo[default]
554
568
555 l = revrange(repo, [revspec])
569 l = revrange(repo, [revspec])
556 if not l:
570 if not l:
557 raise util.Abort(_('empty revision set'))
571 raise util.Abort(_('empty revision set'))
558 return repo[l.last()]
572 return repo[l.last()]
559
573
560 def revpair(repo, revs):
574 def revpair(repo, revs):
561 if not revs:
575 if not revs:
562 return repo.dirstate.p1(), None
576 return repo.dirstate.p1(), None
563
577
564 l = revrange(repo, revs)
578 l = revrange(repo, revs)
565
579
566 if not l:
580 if not l:
567 first = second = None
581 first = second = None
568 elif l.isascending():
582 elif l.isascending():
569 first = l.min()
583 first = l.min()
570 second = l.max()
584 second = l.max()
571 elif l.isdescending():
585 elif l.isdescending():
572 first = l.max()
586 first = l.max()
573 second = l.min()
587 second = l.min()
574 else:
588 else:
575 first = l.first()
589 first = l.first()
576 second = l.last()
590 second = l.last()
577
591
578 if first is None:
592 if first is None:
579 raise util.Abort(_('empty revision range'))
593 raise util.Abort(_('empty revision range'))
580
594
581 if first == second and len(revs) == 1 and _revrangesep not in revs[0]:
595 if first == second and len(revs) == 1 and _revrangesep not in revs[0]:
582 return repo.lookup(first), None
596 return repo.lookup(first), None
583
597
584 return repo.lookup(first), repo.lookup(second)
598 return repo.lookup(first), repo.lookup(second)
585
599
586 _revrangesep = ':'
600 _revrangesep = ':'
587
601
588 def revrange(repo, revs):
602 def revrange(repo, revs):
589 """Yield revision as strings from a list of revision specifications."""
603 """Yield revision as strings from a list of revision specifications."""
590
604
591 def revfix(repo, val, defval):
605 def revfix(repo, val, defval):
592 if not val and val != 0 and defval is not None:
606 if not val and val != 0 and defval is not None:
593 return defval
607 return defval
594 return repo[val].rev()
608 return repo[val].rev()
595
609
596 seen, l = set(), revset.baseset([])
610 seen, l = set(), revset.baseset([])
597 for spec in revs:
611 for spec in revs:
598 if l and not seen:
612 if l and not seen:
599 seen = set(l)
613 seen = set(l)
600 # attempt to parse old-style ranges first to deal with
614 # attempt to parse old-style ranges first to deal with
601 # things like old-tag which contain query metacharacters
615 # things like old-tag which contain query metacharacters
602 try:
616 try:
603 if isinstance(spec, int):
617 if isinstance(spec, int):
604 seen.add(spec)
618 seen.add(spec)
605 l = l + revset.baseset([spec])
619 l = l + revset.baseset([spec])
606 continue
620 continue
607
621
608 if _revrangesep in spec:
622 if _revrangesep in spec:
609 start, end = spec.split(_revrangesep, 1)
623 start, end = spec.split(_revrangesep, 1)
610 start = revfix(repo, start, 0)
624 start = revfix(repo, start, 0)
611 end = revfix(repo, end, len(repo) - 1)
625 end = revfix(repo, end, len(repo) - 1)
612 if end == nullrev and start < 0:
626 if end == nullrev and start < 0:
613 start = nullrev
627 start = nullrev
614 rangeiter = repo.changelog.revs(start, end)
628 rangeiter = repo.changelog.revs(start, end)
615 if not seen and not l:
629 if not seen and not l:
616 # by far the most common case: revs = ["-1:0"]
630 # by far the most common case: revs = ["-1:0"]
617 l = revset.baseset(rangeiter)
631 l = revset.baseset(rangeiter)
618 # defer syncing seen until next iteration
632 # defer syncing seen until next iteration
619 continue
633 continue
620 newrevs = set(rangeiter)
634 newrevs = set(rangeiter)
621 if seen:
635 if seen:
622 newrevs.difference_update(seen)
636 newrevs.difference_update(seen)
623 seen.update(newrevs)
637 seen.update(newrevs)
624 else:
638 else:
625 seen = newrevs
639 seen = newrevs
626 l = l + revset.baseset(sorted(newrevs, reverse=start > end))
640 l = l + revset.baseset(sorted(newrevs, reverse=start > end))
627 continue
641 continue
628 elif spec and spec in repo: # single unquoted rev
642 elif spec and spec in repo: # single unquoted rev
629 rev = revfix(repo, spec, None)
643 rev = revfix(repo, spec, None)
630 if rev in seen:
644 if rev in seen:
631 continue
645 continue
632 seen.add(rev)
646 seen.add(rev)
633 l = l + revset.baseset([rev])
647 l = l + revset.baseset([rev])
634 continue
648 continue
635 except error.RepoLookupError:
649 except error.RepoLookupError:
636 pass
650 pass
637
651
638 # fall through to new-style queries if old-style fails
652 # fall through to new-style queries if old-style fails
639 m = revset.match(repo.ui, spec, repo)
653 m = revset.match(repo.ui, spec, repo)
640 if seen or l:
654 if seen or l:
641 dl = [r for r in m(repo, revset.spanset(repo)) if r not in seen]
655 dl = [r for r in m(repo, revset.spanset(repo)) if r not in seen]
642 l = l + revset.baseset(dl)
656 l = l + revset.baseset(dl)
643 seen.update(dl)
657 seen.update(dl)
644 else:
658 else:
645 l = m(repo, revset.spanset(repo))
659 l = m(repo, revset.spanset(repo))
646
660
647 return l
661 return l
648
662
649 def expandpats(pats):
663 def expandpats(pats):
650 '''Expand bare globs when running on windows.
664 '''Expand bare globs when running on windows.
651 On posix we assume it already has already been done by sh.'''
665 On posix we assume it already has already been done by sh.'''
652 if not util.expandglobs:
666 if not util.expandglobs:
653 return list(pats)
667 return list(pats)
654 ret = []
668 ret = []
655 for kindpat in pats:
669 for kindpat in pats:
656 kind, pat = matchmod._patsplit(kindpat, None)
670 kind, pat = matchmod._patsplit(kindpat, None)
657 if kind is None:
671 if kind is None:
658 try:
672 try:
659 globbed = glob.glob(pat)
673 globbed = glob.glob(pat)
660 except re.error:
674 except re.error:
661 globbed = [pat]
675 globbed = [pat]
662 if globbed:
676 if globbed:
663 ret.extend(globbed)
677 ret.extend(globbed)
664 continue
678 continue
665 ret.append(kindpat)
679 ret.append(kindpat)
666 return ret
680 return ret
667
681
668 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
682 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
669 '''Return a matcher and the patterns that were used.
683 '''Return a matcher and the patterns that were used.
670 The matcher will warn about bad matches.'''
684 The matcher will warn about bad matches.'''
671 if pats == ("",):
685 if pats == ("",):
672 pats = []
686 pats = []
673 if not globbed and default == 'relpath':
687 if not globbed and default == 'relpath':
674 pats = expandpats(pats or [])
688 pats = expandpats(pats or [])
675
689
676 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
690 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
677 default)
691 default)
678 def badfn(f, msg):
692 def badfn(f, msg):
679 ctx._repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
693 ctx._repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
680 m.bad = badfn
694 m.bad = badfn
681 return m, pats
695 return m, pats
682
696
683 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
697 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
684 '''Return a matcher that will warn about bad matches.'''
698 '''Return a matcher that will warn about bad matches.'''
685 return matchandpats(ctx, pats, opts, globbed, default)[0]
699 return matchandpats(ctx, pats, opts, globbed, default)[0]
686
700
687 def matchall(repo):
701 def matchall(repo):
688 '''Return a matcher that will efficiently match everything.'''
702 '''Return a matcher that will efficiently match everything.'''
689 return matchmod.always(repo.root, repo.getcwd())
703 return matchmod.always(repo.root, repo.getcwd())
690
704
691 def matchfiles(repo, files):
705 def matchfiles(repo, files):
692 '''Return a matcher that will efficiently match exactly these files.'''
706 '''Return a matcher that will efficiently match exactly these files.'''
693 return matchmod.exact(repo.root, repo.getcwd(), files)
707 return matchmod.exact(repo.root, repo.getcwd(), files)
694
708
695 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
709 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
696 if dry_run is None:
710 if dry_run is None:
697 dry_run = opts.get('dry_run')
711 dry_run = opts.get('dry_run')
698 if similarity is None:
712 if similarity is None:
699 similarity = float(opts.get('similarity') or 0)
713 similarity = float(opts.get('similarity') or 0)
700 # we'd use status here, except handling of symlinks and ignore is tricky
714 # we'd use status here, except handling of symlinks and ignore is tricky
701 m = match(repo[None], pats, opts)
715 m = match(repo[None], pats, opts)
702 rejected = []
716 rejected = []
703 m.bad = lambda x, y: rejected.append(x)
717 m.bad = lambda x, y: rejected.append(x)
704
718
705 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
719 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
706
720
707 unknownset = set(unknown + forgotten)
721 unknownset = set(unknown + forgotten)
708 toprint = unknownset.copy()
722 toprint = unknownset.copy()
709 toprint.update(deleted)
723 toprint.update(deleted)
710 for abs in sorted(toprint):
724 for abs in sorted(toprint):
711 if repo.ui.verbose or not m.exact(abs):
725 if repo.ui.verbose or not m.exact(abs):
712 rel = m.rel(abs)
726 rel = m.rel(abs)
713 if abs in unknownset:
727 if abs in unknownset:
714 status = _('adding %s\n') % ((pats and rel) or abs)
728 status = _('adding %s\n') % ((pats and rel) or abs)
715 else:
729 else:
716 status = _('removing %s\n') % ((pats and rel) or abs)
730 status = _('removing %s\n') % ((pats and rel) or abs)
717 repo.ui.status(status)
731 repo.ui.status(status)
718
732
719 renames = _findrenames(repo, m, added + unknown, removed + deleted,
733 renames = _findrenames(repo, m, added + unknown, removed + deleted,
720 similarity)
734 similarity)
721
735
722 if not dry_run:
736 if not dry_run:
723 _markchanges(repo, unknown + forgotten, deleted, renames)
737 _markchanges(repo, unknown + forgotten, deleted, renames)
724
738
725 for f in rejected:
739 for f in rejected:
726 if f in m.files():
740 if f in m.files():
727 return 1
741 return 1
728 return 0
742 return 0
729
743
730 def marktouched(repo, files, similarity=0.0):
744 def marktouched(repo, files, similarity=0.0):
731 '''Assert that files have somehow been operated upon. files are relative to
745 '''Assert that files have somehow been operated upon. files are relative to
732 the repo root.'''
746 the repo root.'''
733 m = matchfiles(repo, files)
747 m = matchfiles(repo, files)
734 rejected = []
748 rejected = []
735 m.bad = lambda x, y: rejected.append(x)
749 m.bad = lambda x, y: rejected.append(x)
736
750
737 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
751 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
738
752
739 if repo.ui.verbose:
753 if repo.ui.verbose:
740 unknownset = set(unknown + forgotten)
754 unknownset = set(unknown + forgotten)
741 toprint = unknownset.copy()
755 toprint = unknownset.copy()
742 toprint.update(deleted)
756 toprint.update(deleted)
743 for abs in sorted(toprint):
757 for abs in sorted(toprint):
744 if abs in unknownset:
758 if abs in unknownset:
745 status = _('adding %s\n') % abs
759 status = _('adding %s\n') % abs
746 else:
760 else:
747 status = _('removing %s\n') % abs
761 status = _('removing %s\n') % abs
748 repo.ui.status(status)
762 repo.ui.status(status)
749
763
750 renames = _findrenames(repo, m, added + unknown, removed + deleted,
764 renames = _findrenames(repo, m, added + unknown, removed + deleted,
751 similarity)
765 similarity)
752
766
753 _markchanges(repo, unknown + forgotten, deleted, renames)
767 _markchanges(repo, unknown + forgotten, deleted, renames)
754
768
755 for f in rejected:
769 for f in rejected:
756 if f in m.files():
770 if f in m.files():
757 return 1
771 return 1
758 return 0
772 return 0
759
773
760 def _interestingfiles(repo, matcher):
774 def _interestingfiles(repo, matcher):
761 '''Walk dirstate with matcher, looking for files that addremove would care
775 '''Walk dirstate with matcher, looking for files that addremove would care
762 about.
776 about.
763
777
764 This is different from dirstate.status because it doesn't care about
778 This is different from dirstate.status because it doesn't care about
765 whether files are modified or clean.'''
779 whether files are modified or clean.'''
766 added, unknown, deleted, removed, forgotten = [], [], [], [], []
780 added, unknown, deleted, removed, forgotten = [], [], [], [], []
767 audit_path = pathutil.pathauditor(repo.root)
781 audit_path = pathutil.pathauditor(repo.root)
768
782
769 ctx = repo[None]
783 ctx = repo[None]
770 dirstate = repo.dirstate
784 dirstate = repo.dirstate
771 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
785 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
772 full=False)
786 full=False)
773 for abs, st in walkresults.iteritems():
787 for abs, st in walkresults.iteritems():
774 dstate = dirstate[abs]
788 dstate = dirstate[abs]
775 if dstate == '?' and audit_path.check(abs):
789 if dstate == '?' and audit_path.check(abs):
776 unknown.append(abs)
790 unknown.append(abs)
777 elif dstate != 'r' and not st:
791 elif dstate != 'r' and not st:
778 deleted.append(abs)
792 deleted.append(abs)
779 elif dstate == 'r' and st:
793 elif dstate == 'r' and st:
780 forgotten.append(abs)
794 forgotten.append(abs)
781 # for finding renames
795 # for finding renames
782 elif dstate == 'r' and not st:
796 elif dstate == 'r' and not st:
783 removed.append(abs)
797 removed.append(abs)
784 elif dstate == 'a':
798 elif dstate == 'a':
785 added.append(abs)
799 added.append(abs)
786
800
787 return added, unknown, deleted, removed, forgotten
801 return added, unknown, deleted, removed, forgotten
788
802
789 def _findrenames(repo, matcher, added, removed, similarity):
803 def _findrenames(repo, matcher, added, removed, similarity):
790 '''Find renames from removed files to added ones.'''
804 '''Find renames from removed files to added ones.'''
791 renames = {}
805 renames = {}
792 if similarity > 0:
806 if similarity > 0:
793 for old, new, score in similar.findrenames(repo, added, removed,
807 for old, new, score in similar.findrenames(repo, added, removed,
794 similarity):
808 similarity):
795 if (repo.ui.verbose or not matcher.exact(old)
809 if (repo.ui.verbose or not matcher.exact(old)
796 or not matcher.exact(new)):
810 or not matcher.exact(new)):
797 repo.ui.status(_('recording removal of %s as rename to %s '
811 repo.ui.status(_('recording removal of %s as rename to %s '
798 '(%d%% similar)\n') %
812 '(%d%% similar)\n') %
799 (matcher.rel(old), matcher.rel(new),
813 (matcher.rel(old), matcher.rel(new),
800 score * 100))
814 score * 100))
801 renames[new] = old
815 renames[new] = old
802 return renames
816 return renames
803
817
804 def _markchanges(repo, unknown, deleted, renames):
818 def _markchanges(repo, unknown, deleted, renames):
805 '''Marks the files in unknown as added, the files in deleted as removed,
819 '''Marks the files in unknown as added, the files in deleted as removed,
806 and the files in renames as copied.'''
820 and the files in renames as copied.'''
807 wctx = repo[None]
821 wctx = repo[None]
808 wlock = repo.wlock()
822 wlock = repo.wlock()
809 try:
823 try:
810 wctx.forget(deleted)
824 wctx.forget(deleted)
811 wctx.add(unknown)
825 wctx.add(unknown)
812 for new, old in renames.iteritems():
826 for new, old in renames.iteritems():
813 wctx.copy(old, new)
827 wctx.copy(old, new)
814 finally:
828 finally:
815 wlock.release()
829 wlock.release()
816
830
817 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
831 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
818 """Update the dirstate to reflect the intent of copying src to dst. For
832 """Update the dirstate to reflect the intent of copying src to dst. For
819 different reasons it might not end with dst being marked as copied from src.
833 different reasons it might not end with dst being marked as copied from src.
820 """
834 """
821 origsrc = repo.dirstate.copied(src) or src
835 origsrc = repo.dirstate.copied(src) or src
822 if dst == origsrc: # copying back a copy?
836 if dst == origsrc: # copying back a copy?
823 if repo.dirstate[dst] not in 'mn' and not dryrun:
837 if repo.dirstate[dst] not in 'mn' and not dryrun:
824 repo.dirstate.normallookup(dst)
838 repo.dirstate.normallookup(dst)
825 else:
839 else:
826 if repo.dirstate[origsrc] == 'a' and origsrc == src:
840 if repo.dirstate[origsrc] == 'a' and origsrc == src:
827 if not ui.quiet:
841 if not ui.quiet:
828 ui.warn(_("%s has not been committed yet, so no copy "
842 ui.warn(_("%s has not been committed yet, so no copy "
829 "data will be stored for %s.\n")
843 "data will be stored for %s.\n")
830 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
844 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
831 if repo.dirstate[dst] in '?r' and not dryrun:
845 if repo.dirstate[dst] in '?r' and not dryrun:
832 wctx.add([dst])
846 wctx.add([dst])
833 elif not dryrun:
847 elif not dryrun:
834 wctx.copy(origsrc, dst)
848 wctx.copy(origsrc, dst)
835
849
836 def readrequires(opener, supported):
850 def readrequires(opener, supported):
837 '''Reads and parses .hg/requires and checks if all entries found
851 '''Reads and parses .hg/requires and checks if all entries found
838 are in the list of supported features.'''
852 are in the list of supported features.'''
839 requirements = set(opener.read("requires").splitlines())
853 requirements = set(opener.read("requires").splitlines())
840 missings = []
854 missings = []
841 for r in requirements:
855 for r in requirements:
842 if r not in supported:
856 if r not in supported:
843 if not r or not r[0].isalnum():
857 if not r or not r[0].isalnum():
844 raise error.RequirementError(_(".hg/requires file is corrupt"))
858 raise error.RequirementError(_(".hg/requires file is corrupt"))
845 missings.append(r)
859 missings.append(r)
846 missings.sort()
860 missings.sort()
847 if missings:
861 if missings:
848 raise error.RequirementError(
862 raise error.RequirementError(
849 _("repository requires features unknown to this Mercurial: %s")
863 _("repository requires features unknown to this Mercurial: %s")
850 % " ".join(missings),
864 % " ".join(missings),
851 hint=_("see http://mercurial.selenic.com/wiki/MissingRequirement"
865 hint=_("see http://mercurial.selenic.com/wiki/MissingRequirement"
852 " for more information"))
866 " for more information"))
853 return requirements
867 return requirements
854
868
855 class filecachesubentry(object):
869 class filecachesubentry(object):
856 def __init__(self, path, stat):
870 def __init__(self, path, stat):
857 self.path = path
871 self.path = path
858 self.cachestat = None
872 self.cachestat = None
859 self._cacheable = None
873 self._cacheable = None
860
874
861 if stat:
875 if stat:
862 self.cachestat = filecachesubentry.stat(self.path)
876 self.cachestat = filecachesubentry.stat(self.path)
863
877
864 if self.cachestat:
878 if self.cachestat:
865 self._cacheable = self.cachestat.cacheable()
879 self._cacheable = self.cachestat.cacheable()
866 else:
880 else:
867 # None means we don't know yet
881 # None means we don't know yet
868 self._cacheable = None
882 self._cacheable = None
869
883
870 def refresh(self):
884 def refresh(self):
871 if self.cacheable():
885 if self.cacheable():
872 self.cachestat = filecachesubentry.stat(self.path)
886 self.cachestat = filecachesubentry.stat(self.path)
873
887
874 def cacheable(self):
888 def cacheable(self):
875 if self._cacheable is not None:
889 if self._cacheable is not None:
876 return self._cacheable
890 return self._cacheable
877
891
878 # we don't know yet, assume it is for now
892 # we don't know yet, assume it is for now
879 return True
893 return True
880
894
881 def changed(self):
895 def changed(self):
882 # no point in going further if we can't cache it
896 # no point in going further if we can't cache it
883 if not self.cacheable():
897 if not self.cacheable():
884 return True
898 return True
885
899
886 newstat = filecachesubentry.stat(self.path)
900 newstat = filecachesubentry.stat(self.path)
887
901
888 # we may not know if it's cacheable yet, check again now
902 # we may not know if it's cacheable yet, check again now
889 if newstat and self._cacheable is None:
903 if newstat and self._cacheable is None:
890 self._cacheable = newstat.cacheable()
904 self._cacheable = newstat.cacheable()
891
905
892 # check again
906 # check again
893 if not self._cacheable:
907 if not self._cacheable:
894 return True
908 return True
895
909
896 if self.cachestat != newstat:
910 if self.cachestat != newstat:
897 self.cachestat = newstat
911 self.cachestat = newstat
898 return True
912 return True
899 else:
913 else:
900 return False
914 return False
901
915
902 @staticmethod
916 @staticmethod
903 def stat(path):
917 def stat(path):
904 try:
918 try:
905 return util.cachestat(path)
919 return util.cachestat(path)
906 except OSError, e:
920 except OSError, e:
907 if e.errno != errno.ENOENT:
921 if e.errno != errno.ENOENT:
908 raise
922 raise
909
923
910 class filecacheentry(object):
924 class filecacheentry(object):
911 def __init__(self, paths, stat=True):
925 def __init__(self, paths, stat=True):
912 self._entries = []
926 self._entries = []
913 for path in paths:
927 for path in paths:
914 self._entries.append(filecachesubentry(path, stat))
928 self._entries.append(filecachesubentry(path, stat))
915
929
916 def changed(self):
930 def changed(self):
917 '''true if any entry has changed'''
931 '''true if any entry has changed'''
918 for entry in self._entries:
932 for entry in self._entries:
919 if entry.changed():
933 if entry.changed():
920 return True
934 return True
921 return False
935 return False
922
936
923 def refresh(self):
937 def refresh(self):
924 for entry in self._entries:
938 for entry in self._entries:
925 entry.refresh()
939 entry.refresh()
926
940
927 class filecache(object):
941 class filecache(object):
928 '''A property like decorator that tracks files under .hg/ for updates.
942 '''A property like decorator that tracks files under .hg/ for updates.
929
943
930 Records stat info when called in _filecache.
944 Records stat info when called in _filecache.
931
945
932 On subsequent calls, compares old stat info with new info, and recreates the
946 On subsequent calls, compares old stat info with new info, and recreates the
933 object when any of the files changes, updating the new stat info in
947 object when any of the files changes, updating the new stat info in
934 _filecache.
948 _filecache.
935
949
936 Mercurial either atomic renames or appends for files under .hg,
950 Mercurial either atomic renames or appends for files under .hg,
937 so to ensure the cache is reliable we need the filesystem to be able
951 so to ensure the cache is reliable we need the filesystem to be able
938 to tell us if a file has been replaced. If it can't, we fallback to
952 to tell us if a file has been replaced. If it can't, we fallback to
939 recreating the object on every call (essentially the same behaviour as
953 recreating the object on every call (essentially the same behaviour as
940 propertycache).
954 propertycache).
941
955
942 '''
956 '''
943 def __init__(self, *paths):
957 def __init__(self, *paths):
944 self.paths = paths
958 self.paths = paths
945
959
946 def join(self, obj, fname):
960 def join(self, obj, fname):
947 """Used to compute the runtime path of a cached file.
961 """Used to compute the runtime path of a cached file.
948
962
949 Users should subclass filecache and provide their own version of this
963 Users should subclass filecache and provide their own version of this
950 function to call the appropriate join function on 'obj' (an instance
964 function to call the appropriate join function on 'obj' (an instance
951 of the class that its member function was decorated).
965 of the class that its member function was decorated).
952 """
966 """
953 return obj.join(fname)
967 return obj.join(fname)
954
968
955 def __call__(self, func):
969 def __call__(self, func):
956 self.func = func
970 self.func = func
957 self.name = func.__name__
971 self.name = func.__name__
958 return self
972 return self
959
973
960 def __get__(self, obj, type=None):
974 def __get__(self, obj, type=None):
961 # do we need to check if the file changed?
975 # do we need to check if the file changed?
962 if self.name in obj.__dict__:
976 if self.name in obj.__dict__:
963 assert self.name in obj._filecache, self.name
977 assert self.name in obj._filecache, self.name
964 return obj.__dict__[self.name]
978 return obj.__dict__[self.name]
965
979
966 entry = obj._filecache.get(self.name)
980 entry = obj._filecache.get(self.name)
967
981
968 if entry:
982 if entry:
969 if entry.changed():
983 if entry.changed():
970 entry.obj = self.func(obj)
984 entry.obj = self.func(obj)
971 else:
985 else:
972 paths = [self.join(obj, path) for path in self.paths]
986 paths = [self.join(obj, path) for path in self.paths]
973
987
974 # We stat -before- creating the object so our cache doesn't lie if
988 # We stat -before- creating the object so our cache doesn't lie if
975 # a writer modified between the time we read and stat
989 # a writer modified between the time we read and stat
976 entry = filecacheentry(paths, True)
990 entry = filecacheentry(paths, True)
977 entry.obj = self.func(obj)
991 entry.obj = self.func(obj)
978
992
979 obj._filecache[self.name] = entry
993 obj._filecache[self.name] = entry
980
994
981 obj.__dict__[self.name] = entry.obj
995 obj.__dict__[self.name] = entry.obj
982 return entry.obj
996 return entry.obj
983
997
984 def __set__(self, obj, value):
998 def __set__(self, obj, value):
985 if self.name not in obj._filecache:
999 if self.name not in obj._filecache:
986 # we add an entry for the missing value because X in __dict__
1000 # we add an entry for the missing value because X in __dict__
987 # implies X in _filecache
1001 # implies X in _filecache
988 paths = [self.join(obj, path) for path in self.paths]
1002 paths = [self.join(obj, path) for path in self.paths]
989 ce = filecacheentry(paths, False)
1003 ce = filecacheentry(paths, False)
990 obj._filecache[self.name] = ce
1004 obj._filecache[self.name] = ce
991 else:
1005 else:
992 ce = obj._filecache[self.name]
1006 ce = obj._filecache[self.name]
993
1007
994 ce.obj = value # update cached copy
1008 ce.obj = value # update cached copy
995 obj.__dict__[self.name] = value # update copy returned by obj.x
1009 obj.__dict__[self.name] = value # update copy returned by obj.x
996
1010
997 def __delete__(self, obj):
1011 def __delete__(self, obj):
998 try:
1012 try:
999 del obj.__dict__[self.name]
1013 del obj.__dict__[self.name]
1000 except KeyError:
1014 except KeyError:
1001 raise AttributeError(self.name)
1015 raise AttributeError(self.name)
1002
1016
1003 class dirs(object):
1017 class dirs(object):
1004 '''a multiset of directory names from a dirstate or manifest'''
1018 '''a multiset of directory names from a dirstate or manifest'''
1005
1019
1006 def __init__(self, map, skip=None):
1020 def __init__(self, map, skip=None):
1007 self._dirs = {}
1021 self._dirs = {}
1008 addpath = self.addpath
1022 addpath = self.addpath
1009 if util.safehasattr(map, 'iteritems') and skip is not None:
1023 if util.safehasattr(map, 'iteritems') and skip is not None:
1010 for f, s in map.iteritems():
1024 for f, s in map.iteritems():
1011 if s[0] != skip:
1025 if s[0] != skip:
1012 addpath(f)
1026 addpath(f)
1013 else:
1027 else:
1014 for f in map:
1028 for f in map:
1015 addpath(f)
1029 addpath(f)
1016
1030
1017 def addpath(self, path):
1031 def addpath(self, path):
1018 dirs = self._dirs
1032 dirs = self._dirs
1019 for base in finddirs(path):
1033 for base in finddirs(path):
1020 if base in dirs:
1034 if base in dirs:
1021 dirs[base] += 1
1035 dirs[base] += 1
1022 return
1036 return
1023 dirs[base] = 1
1037 dirs[base] = 1
1024
1038
1025 def delpath(self, path):
1039 def delpath(self, path):
1026 dirs = self._dirs
1040 dirs = self._dirs
1027 for base in finddirs(path):
1041 for base in finddirs(path):
1028 if dirs[base] > 1:
1042 if dirs[base] > 1:
1029 dirs[base] -= 1
1043 dirs[base] -= 1
1030 return
1044 return
1031 del dirs[base]
1045 del dirs[base]
1032
1046
1033 def __iter__(self):
1047 def __iter__(self):
1034 return self._dirs.iterkeys()
1048 return self._dirs.iterkeys()
1035
1049
1036 def __contains__(self, d):
1050 def __contains__(self, d):
1037 return d in self._dirs
1051 return d in self._dirs
1038
1052
1039 if util.safehasattr(parsers, 'dirs'):
1053 if util.safehasattr(parsers, 'dirs'):
1040 dirs = parsers.dirs
1054 dirs = parsers.dirs
1041
1055
1042 def finddirs(path):
1056 def finddirs(path):
1043 pos = path.rfind('/')
1057 pos = path.rfind('/')
1044 while pos != -1:
1058 while pos != -1:
1045 yield path[:pos]
1059 yield path[:pos]
1046 pos = path.rfind('/', 0, pos)
1060 pos = path.rfind('/', 0, pos)
@@ -1,2186 +1,2191
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 import i18n
16 import i18n
17 _ = i18n._
17 _ = i18n._
18 import error, osutil, encoding
18 import error, osutil, encoding
19 import errno, shutil, sys, tempfile, traceback
19 import errno, shutil, sys, tempfile, traceback
20 import re as remod
20 import re as remod
21 import os, time, datetime, calendar, textwrap, signal, collections
21 import os, time, datetime, calendar, textwrap, signal, collections
22 import imp, socket, urllib
22 import imp, socket, urllib
23
23
24 if os.name == 'nt':
24 if os.name == 'nt':
25 import windows as platform
25 import windows as platform
26 else:
26 else:
27 import posix as platform
27 import posix as platform
28
28
29 cachestat = platform.cachestat
29 cachestat = platform.cachestat
30 checkexec = platform.checkexec
30 checkexec = platform.checkexec
31 checklink = platform.checklink
31 checklink = platform.checklink
32 copymode = platform.copymode
32 copymode = platform.copymode
33 executablepath = platform.executablepath
33 executablepath = platform.executablepath
34 expandglobs = platform.expandglobs
34 expandglobs = platform.expandglobs
35 explainexit = platform.explainexit
35 explainexit = platform.explainexit
36 findexe = platform.findexe
36 findexe = platform.findexe
37 gethgcmd = platform.gethgcmd
37 gethgcmd = platform.gethgcmd
38 getuser = platform.getuser
38 getuser = platform.getuser
39 groupmembers = platform.groupmembers
39 groupmembers = platform.groupmembers
40 groupname = platform.groupname
40 groupname = platform.groupname
41 hidewindow = platform.hidewindow
41 hidewindow = platform.hidewindow
42 isexec = platform.isexec
42 isexec = platform.isexec
43 isowner = platform.isowner
43 isowner = platform.isowner
44 localpath = platform.localpath
44 localpath = platform.localpath
45 lookupreg = platform.lookupreg
45 lookupreg = platform.lookupreg
46 makedir = platform.makedir
46 makedir = platform.makedir
47 nlinks = platform.nlinks
47 nlinks = platform.nlinks
48 normpath = platform.normpath
48 normpath = platform.normpath
49 normcase = platform.normcase
49 normcase = platform.normcase
50 openhardlinks = platform.openhardlinks
50 openhardlinks = platform.openhardlinks
51 oslink = platform.oslink
51 oslink = platform.oslink
52 parsepatchoutput = platform.parsepatchoutput
52 parsepatchoutput = platform.parsepatchoutput
53 pconvert = platform.pconvert
53 pconvert = platform.pconvert
54 popen = platform.popen
54 popen = platform.popen
55 posixfile = platform.posixfile
55 posixfile = platform.posixfile
56 quotecommand = platform.quotecommand
56 quotecommand = platform.quotecommand
57 readpipe = platform.readpipe
57 readpipe = platform.readpipe
58 rename = platform.rename
58 rename = platform.rename
59 samedevice = platform.samedevice
59 samedevice = platform.samedevice
60 samefile = platform.samefile
60 samefile = platform.samefile
61 samestat = platform.samestat
61 samestat = platform.samestat
62 setbinary = platform.setbinary
62 setbinary = platform.setbinary
63 setflags = platform.setflags
63 setflags = platform.setflags
64 setsignalhandler = platform.setsignalhandler
64 setsignalhandler = platform.setsignalhandler
65 shellquote = platform.shellquote
65 shellquote = platform.shellquote
66 spawndetached = platform.spawndetached
66 spawndetached = platform.spawndetached
67 split = platform.split
67 split = platform.split
68 sshargs = platform.sshargs
68 sshargs = platform.sshargs
69 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
69 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
70 statisexec = platform.statisexec
70 statisexec = platform.statisexec
71 statislink = platform.statislink
71 statislink = platform.statislink
72 termwidth = platform.termwidth
72 termwidth = platform.termwidth
73 testpid = platform.testpid
73 testpid = platform.testpid
74 umask = platform.umask
74 umask = platform.umask
75 unlink = platform.unlink
75 unlink = platform.unlink
76 unlinkpath = platform.unlinkpath
76 unlinkpath = platform.unlinkpath
77 username = platform.username
77 username = platform.username
78
78
79 # Python compatibility
79 # Python compatibility
80
80
81 _notset = object()
81 _notset = object()
82
82
83 def safehasattr(thing, attr):
83 def safehasattr(thing, attr):
84 return getattr(thing, attr, _notset) is not _notset
84 return getattr(thing, attr, _notset) is not _notset
85
85
86 def sha1(s=''):
86 def sha1(s=''):
87 '''
87 '''
88 Low-overhead wrapper around Python's SHA support
88 Low-overhead wrapper around Python's SHA support
89
89
90 >>> f = _fastsha1
90 >>> f = _fastsha1
91 >>> a = sha1()
91 >>> a = sha1()
92 >>> a = f()
92 >>> a = f()
93 >>> a.hexdigest()
93 >>> a.hexdigest()
94 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
94 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
95 '''
95 '''
96
96
97 return _fastsha1(s)
97 return _fastsha1(s)
98
98
99 def _fastsha1(s=''):
99 def _fastsha1(s=''):
100 # This function will import sha1 from hashlib or sha (whichever is
100 # This function will import sha1 from hashlib or sha (whichever is
101 # available) and overwrite itself with it on the first call.
101 # available) and overwrite itself with it on the first call.
102 # Subsequent calls will go directly to the imported function.
102 # Subsequent calls will go directly to the imported function.
103 if sys.version_info >= (2, 5):
103 if sys.version_info >= (2, 5):
104 from hashlib import sha1 as _sha1
104 from hashlib import sha1 as _sha1
105 else:
105 else:
106 from sha import sha as _sha1
106 from sha import sha as _sha1
107 global _fastsha1, sha1
107 global _fastsha1, sha1
108 _fastsha1 = sha1 = _sha1
108 _fastsha1 = sha1 = _sha1
109 return _sha1(s)
109 return _sha1(s)
110
110
111 def md5(s=''):
111 def md5(s=''):
112 try:
112 try:
113 from hashlib import md5 as _md5
113 from hashlib import md5 as _md5
114 except ImportError:
114 except ImportError:
115 from md5 import md5 as _md5
115 from md5 import md5 as _md5
116 global md5
116 global md5
117 md5 = _md5
117 md5 = _md5
118 return _md5(s)
118 return _md5(s)
119
119
120 DIGESTS = {
120 DIGESTS = {
121 'md5': md5,
121 'md5': md5,
122 'sha1': sha1,
122 'sha1': sha1,
123 }
123 }
124 # List of digest types from strongest to weakest
124 # List of digest types from strongest to weakest
125 DIGESTS_BY_STRENGTH = ['sha1', 'md5']
125 DIGESTS_BY_STRENGTH = ['sha1', 'md5']
126
126
127 try:
127 try:
128 import hashlib
128 import hashlib
129 DIGESTS.update({
129 DIGESTS.update({
130 'sha512': hashlib.sha512,
130 'sha512': hashlib.sha512,
131 })
131 })
132 DIGESTS_BY_STRENGTH.insert(0, 'sha512')
132 DIGESTS_BY_STRENGTH.insert(0, 'sha512')
133 except ImportError:
133 except ImportError:
134 pass
134 pass
135
135
136 for k in DIGESTS_BY_STRENGTH:
136 for k in DIGESTS_BY_STRENGTH:
137 assert k in DIGESTS
137 assert k in DIGESTS
138
138
139 class digester(object):
139 class digester(object):
140 """helper to compute digests.
140 """helper to compute digests.
141
141
142 This helper can be used to compute one or more digests given their name.
142 This helper can be used to compute one or more digests given their name.
143
143
144 >>> d = digester(['md5', 'sha1'])
144 >>> d = digester(['md5', 'sha1'])
145 >>> d.update('foo')
145 >>> d.update('foo')
146 >>> [k for k in sorted(d)]
146 >>> [k for k in sorted(d)]
147 ['md5', 'sha1']
147 ['md5', 'sha1']
148 >>> d['md5']
148 >>> d['md5']
149 'acbd18db4cc2f85cedef654fccc4a4d8'
149 'acbd18db4cc2f85cedef654fccc4a4d8'
150 >>> d['sha1']
150 >>> d['sha1']
151 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
151 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
152 >>> digester.preferred(['md5', 'sha1'])
152 >>> digester.preferred(['md5', 'sha1'])
153 'sha1'
153 'sha1'
154 """
154 """
155
155
156 def __init__(self, digests, s=''):
156 def __init__(self, digests, s=''):
157 self._hashes = {}
157 self._hashes = {}
158 for k in digests:
158 for k in digests:
159 if k not in DIGESTS:
159 if k not in DIGESTS:
160 raise Abort(_('unknown digest type: %s') % k)
160 raise Abort(_('unknown digest type: %s') % k)
161 self._hashes[k] = DIGESTS[k]()
161 self._hashes[k] = DIGESTS[k]()
162 if s:
162 if s:
163 self.update(s)
163 self.update(s)
164
164
165 def update(self, data):
165 def update(self, data):
166 for h in self._hashes.values():
166 for h in self._hashes.values():
167 h.update(data)
167 h.update(data)
168
168
169 def __getitem__(self, key):
169 def __getitem__(self, key):
170 if key not in DIGESTS:
170 if key not in DIGESTS:
171 raise Abort(_('unknown digest type: %s') % k)
171 raise Abort(_('unknown digest type: %s') % k)
172 return self._hashes[key].hexdigest()
172 return self._hashes[key].hexdigest()
173
173
174 def __iter__(self):
174 def __iter__(self):
175 return iter(self._hashes)
175 return iter(self._hashes)
176
176
177 @staticmethod
177 @staticmethod
178 def preferred(supported):
178 def preferred(supported):
179 """returns the strongest digest type in both supported and DIGESTS."""
179 """returns the strongest digest type in both supported and DIGESTS."""
180
180
181 for k in DIGESTS_BY_STRENGTH:
181 for k in DIGESTS_BY_STRENGTH:
182 if k in supported:
182 if k in supported:
183 return k
183 return k
184 return None
184 return None
185
185
186 class digestchecker(object):
186 class digestchecker(object):
187 """file handle wrapper that additionally checks content against a given
187 """file handle wrapper that additionally checks content against a given
188 size and digests.
188 size and digests.
189
189
190 d = digestchecker(fh, size, {'md5': '...'})
190 d = digestchecker(fh, size, {'md5': '...'})
191
191
192 When multiple digests are given, all of them are validated.
192 When multiple digests are given, all of them are validated.
193 """
193 """
194
194
195 def __init__(self, fh, size, digests):
195 def __init__(self, fh, size, digests):
196 self._fh = fh
196 self._fh = fh
197 self._size = size
197 self._size = size
198 self._got = 0
198 self._got = 0
199 self._digests = dict(digests)
199 self._digests = dict(digests)
200 self._digester = digester(self._digests.keys())
200 self._digester = digester(self._digests.keys())
201
201
202 def read(self, length=-1):
202 def read(self, length=-1):
203 content = self._fh.read(length)
203 content = self._fh.read(length)
204 self._digester.update(content)
204 self._digester.update(content)
205 self._got += len(content)
205 self._got += len(content)
206 return content
206 return content
207
207
208 def validate(self):
208 def validate(self):
209 if self._size != self._got:
209 if self._size != self._got:
210 raise Abort(_('size mismatch: expected %d, got %d') %
210 raise Abort(_('size mismatch: expected %d, got %d') %
211 (self._size, self._got))
211 (self._size, self._got))
212 for k, v in self._digests.items():
212 for k, v in self._digests.items():
213 if v != self._digester[k]:
213 if v != self._digester[k]:
214 # i18n: first parameter is a digest name
214 # i18n: first parameter is a digest name
215 raise Abort(_('%s mismatch: expected %s, got %s') %
215 raise Abort(_('%s mismatch: expected %s, got %s') %
216 (k, v, self._digester[k]))
216 (k, v, self._digester[k]))
217
217
218 try:
218 try:
219 buffer = buffer
219 buffer = buffer
220 except NameError:
220 except NameError:
221 if sys.version_info[0] < 3:
221 if sys.version_info[0] < 3:
222 def buffer(sliceable, offset=0):
222 def buffer(sliceable, offset=0):
223 return sliceable[offset:]
223 return sliceable[offset:]
224 else:
224 else:
225 def buffer(sliceable, offset=0):
225 def buffer(sliceable, offset=0):
226 return memoryview(sliceable)[offset:]
226 return memoryview(sliceable)[offset:]
227
227
228 import subprocess
228 import subprocess
229 closefds = os.name == 'posix'
229 closefds = os.name == 'posix'
230
230
231 def popen2(cmd, env=None, newlines=False):
231 def popen2(cmd, env=None, newlines=False):
232 # Setting bufsize to -1 lets the system decide the buffer size.
232 # Setting bufsize to -1 lets the system decide the buffer size.
233 # The default for bufsize is 0, meaning unbuffered. This leads to
233 # The default for bufsize is 0, meaning unbuffered. This leads to
234 # poor performance on Mac OS X: http://bugs.python.org/issue4194
234 # poor performance on Mac OS X: http://bugs.python.org/issue4194
235 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
235 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
236 close_fds=closefds,
236 close_fds=closefds,
237 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
237 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
238 universal_newlines=newlines,
238 universal_newlines=newlines,
239 env=env)
239 env=env)
240 return p.stdin, p.stdout
240 return p.stdin, p.stdout
241
241
242 def popen3(cmd, env=None, newlines=False):
242 def popen3(cmd, env=None, newlines=False):
243 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
243 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
244 return stdin, stdout, stderr
244 return stdin, stdout, stderr
245
245
246 def popen4(cmd, env=None, newlines=False):
246 def popen4(cmd, env=None, newlines=False):
247 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
247 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
248 close_fds=closefds,
248 close_fds=closefds,
249 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
249 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
250 stderr=subprocess.PIPE,
250 stderr=subprocess.PIPE,
251 universal_newlines=newlines,
251 universal_newlines=newlines,
252 env=env)
252 env=env)
253 return p.stdin, p.stdout, p.stderr, p
253 return p.stdin, p.stdout, p.stderr, p
254
254
255 def version():
255 def version():
256 """Return version information if available."""
256 """Return version information if available."""
257 try:
257 try:
258 import __version__
258 import __version__
259 return __version__.version
259 return __version__.version
260 except ImportError:
260 except ImportError:
261 return 'unknown'
261 return 'unknown'
262
262
263 # used by parsedate
263 # used by parsedate
264 defaultdateformats = (
264 defaultdateformats = (
265 '%Y-%m-%d %H:%M:%S',
265 '%Y-%m-%d %H:%M:%S',
266 '%Y-%m-%d %I:%M:%S%p',
266 '%Y-%m-%d %I:%M:%S%p',
267 '%Y-%m-%d %H:%M',
267 '%Y-%m-%d %H:%M',
268 '%Y-%m-%d %I:%M%p',
268 '%Y-%m-%d %I:%M%p',
269 '%Y-%m-%d',
269 '%Y-%m-%d',
270 '%m-%d',
270 '%m-%d',
271 '%m/%d',
271 '%m/%d',
272 '%m/%d/%y',
272 '%m/%d/%y',
273 '%m/%d/%Y',
273 '%m/%d/%Y',
274 '%a %b %d %H:%M:%S %Y',
274 '%a %b %d %H:%M:%S %Y',
275 '%a %b %d %I:%M:%S%p %Y',
275 '%a %b %d %I:%M:%S%p %Y',
276 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
276 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
277 '%b %d %H:%M:%S %Y',
277 '%b %d %H:%M:%S %Y',
278 '%b %d %I:%M:%S%p %Y',
278 '%b %d %I:%M:%S%p %Y',
279 '%b %d %H:%M:%S',
279 '%b %d %H:%M:%S',
280 '%b %d %I:%M:%S%p',
280 '%b %d %I:%M:%S%p',
281 '%b %d %H:%M',
281 '%b %d %H:%M',
282 '%b %d %I:%M%p',
282 '%b %d %I:%M%p',
283 '%b %d %Y',
283 '%b %d %Y',
284 '%b %d',
284 '%b %d',
285 '%H:%M:%S',
285 '%H:%M:%S',
286 '%I:%M:%S%p',
286 '%I:%M:%S%p',
287 '%H:%M',
287 '%H:%M',
288 '%I:%M%p',
288 '%I:%M%p',
289 )
289 )
290
290
291 extendeddateformats = defaultdateformats + (
291 extendeddateformats = defaultdateformats + (
292 "%Y",
292 "%Y",
293 "%Y-%m",
293 "%Y-%m",
294 "%b",
294 "%b",
295 "%b %Y",
295 "%b %Y",
296 )
296 )
297
297
298 def cachefunc(func):
298 def cachefunc(func):
299 '''cache the result of function calls'''
299 '''cache the result of function calls'''
300 # XXX doesn't handle keywords args
300 # XXX doesn't handle keywords args
301 if func.func_code.co_argcount == 0:
301 if func.func_code.co_argcount == 0:
302 cache = []
302 cache = []
303 def f():
303 def f():
304 if len(cache) == 0:
304 if len(cache) == 0:
305 cache.append(func())
305 cache.append(func())
306 return cache[0]
306 return cache[0]
307 return f
307 return f
308 cache = {}
308 cache = {}
309 if func.func_code.co_argcount == 1:
309 if func.func_code.co_argcount == 1:
310 # we gain a small amount of time because
310 # we gain a small amount of time because
311 # we don't need to pack/unpack the list
311 # we don't need to pack/unpack the list
312 def f(arg):
312 def f(arg):
313 if arg not in cache:
313 if arg not in cache:
314 cache[arg] = func(arg)
314 cache[arg] = func(arg)
315 return cache[arg]
315 return cache[arg]
316 else:
316 else:
317 def f(*args):
317 def f(*args):
318 if args not in cache:
318 if args not in cache:
319 cache[args] = func(*args)
319 cache[args] = func(*args)
320 return cache[args]
320 return cache[args]
321
321
322 return f
322 return f
323
323
324 try:
324 try:
325 collections.deque.remove
325 collections.deque.remove
326 deque = collections.deque
326 deque = collections.deque
327 except AttributeError:
327 except AttributeError:
328 # python 2.4 lacks deque.remove
328 # python 2.4 lacks deque.remove
329 class deque(collections.deque):
329 class deque(collections.deque):
330 def remove(self, val):
330 def remove(self, val):
331 for i, v in enumerate(self):
331 for i, v in enumerate(self):
332 if v == val:
332 if v == val:
333 del self[i]
333 del self[i]
334 break
334 break
335
335
336 class sortdict(dict):
336 class sortdict(dict):
337 '''a simple sorted dictionary'''
337 '''a simple sorted dictionary'''
338 def __init__(self, data=None):
338 def __init__(self, data=None):
339 self._list = []
339 self._list = []
340 if data:
340 if data:
341 self.update(data)
341 self.update(data)
342 def copy(self):
342 def copy(self):
343 return sortdict(self)
343 return sortdict(self)
344 def __setitem__(self, key, val):
344 def __setitem__(self, key, val):
345 if key in self:
345 if key in self:
346 self._list.remove(key)
346 self._list.remove(key)
347 self._list.append(key)
347 self._list.append(key)
348 dict.__setitem__(self, key, val)
348 dict.__setitem__(self, key, val)
349 def __iter__(self):
349 def __iter__(self):
350 return self._list.__iter__()
350 return self._list.__iter__()
351 def update(self, src):
351 def update(self, src):
352 for k in src:
352 for k in src:
353 self[k] = src[k]
353 self[k] = src[k]
354 def clear(self):
354 def clear(self):
355 dict.clear(self)
355 dict.clear(self)
356 self._list = []
356 self._list = []
357 def items(self):
357 def items(self):
358 return [(k, self[k]) for k in self._list]
358 return [(k, self[k]) for k in self._list]
359 def __delitem__(self, key):
359 def __delitem__(self, key):
360 dict.__delitem__(self, key)
360 dict.__delitem__(self, key)
361 self._list.remove(key)
361 self._list.remove(key)
362 def pop(self, key, *args, **kwargs):
362 def pop(self, key, *args, **kwargs):
363 dict.pop(self, key, *args, **kwargs)
363 dict.pop(self, key, *args, **kwargs)
364 try:
364 try:
365 self._list.remove(key)
365 self._list.remove(key)
366 except ValueError:
366 except ValueError:
367 pass
367 pass
368 def keys(self):
368 def keys(self):
369 return self._list
369 return self._list
370 def iterkeys(self):
370 def iterkeys(self):
371 return self._list.__iter__()
371 return self._list.__iter__()
372 def iteritems(self):
372 def iteritems(self):
373 for k in self._list:
373 for k in self._list:
374 yield k, self[k]
374 yield k, self[k]
375 def insert(self, index, key, val):
375 def insert(self, index, key, val):
376 self._list.insert(index, key)
376 self._list.insert(index, key)
377 dict.__setitem__(self, key, val)
377 dict.__setitem__(self, key, val)
378
378
379 class lrucachedict(object):
379 class lrucachedict(object):
380 '''cache most recent gets from or sets to this dictionary'''
380 '''cache most recent gets from or sets to this dictionary'''
381 def __init__(self, maxsize):
381 def __init__(self, maxsize):
382 self._cache = {}
382 self._cache = {}
383 self._maxsize = maxsize
383 self._maxsize = maxsize
384 self._order = deque()
384 self._order = deque()
385
385
386 def __getitem__(self, key):
386 def __getitem__(self, key):
387 value = self._cache[key]
387 value = self._cache[key]
388 self._order.remove(key)
388 self._order.remove(key)
389 self._order.append(key)
389 self._order.append(key)
390 return value
390 return value
391
391
392 def __setitem__(self, key, value):
392 def __setitem__(self, key, value):
393 if key not in self._cache:
393 if key not in self._cache:
394 if len(self._cache) >= self._maxsize:
394 if len(self._cache) >= self._maxsize:
395 del self._cache[self._order.popleft()]
395 del self._cache[self._order.popleft()]
396 else:
396 else:
397 self._order.remove(key)
397 self._order.remove(key)
398 self._cache[key] = value
398 self._cache[key] = value
399 self._order.append(key)
399 self._order.append(key)
400
400
401 def __contains__(self, key):
401 def __contains__(self, key):
402 return key in self._cache
402 return key in self._cache
403
403
404 def clear(self):
404 def clear(self):
405 self._cache.clear()
405 self._cache.clear()
406 self._order = deque()
406 self._order = deque()
407
407
408 def lrucachefunc(func):
408 def lrucachefunc(func):
409 '''cache most recent results of function calls'''
409 '''cache most recent results of function calls'''
410 cache = {}
410 cache = {}
411 order = deque()
411 order = deque()
412 if func.func_code.co_argcount == 1:
412 if func.func_code.co_argcount == 1:
413 def f(arg):
413 def f(arg):
414 if arg not in cache:
414 if arg not in cache:
415 if len(cache) > 20:
415 if len(cache) > 20:
416 del cache[order.popleft()]
416 del cache[order.popleft()]
417 cache[arg] = func(arg)
417 cache[arg] = func(arg)
418 else:
418 else:
419 order.remove(arg)
419 order.remove(arg)
420 order.append(arg)
420 order.append(arg)
421 return cache[arg]
421 return cache[arg]
422 else:
422 else:
423 def f(*args):
423 def f(*args):
424 if args not in cache:
424 if args not in cache:
425 if len(cache) > 20:
425 if len(cache) > 20:
426 del cache[order.popleft()]
426 del cache[order.popleft()]
427 cache[args] = func(*args)
427 cache[args] = func(*args)
428 else:
428 else:
429 order.remove(args)
429 order.remove(args)
430 order.append(args)
430 order.append(args)
431 return cache[args]
431 return cache[args]
432
432
433 return f
433 return f
434
434
435 class propertycache(object):
435 class propertycache(object):
436 def __init__(self, func):
436 def __init__(self, func):
437 self.func = func
437 self.func = func
438 self.name = func.__name__
438 self.name = func.__name__
439 def __get__(self, obj, type=None):
439 def __get__(self, obj, type=None):
440 result = self.func(obj)
440 result = self.func(obj)
441 self.cachevalue(obj, result)
441 self.cachevalue(obj, result)
442 return result
442 return result
443
443
444 def cachevalue(self, obj, value):
444 def cachevalue(self, obj, value):
445 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
445 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
446 obj.__dict__[self.name] = value
446 obj.__dict__[self.name] = value
447
447
448 def pipefilter(s, cmd):
448 def pipefilter(s, cmd):
449 '''filter string S through command CMD, returning its output'''
449 '''filter string S through command CMD, returning its output'''
450 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
450 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
451 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
451 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
452 pout, perr = p.communicate(s)
452 pout, perr = p.communicate(s)
453 return pout
453 return pout
454
454
455 def tempfilter(s, cmd):
455 def tempfilter(s, cmd):
456 '''filter string S through a pair of temporary files with CMD.
456 '''filter string S through a pair of temporary files with CMD.
457 CMD is used as a template to create the real command to be run,
457 CMD is used as a template to create the real command to be run,
458 with the strings INFILE and OUTFILE replaced by the real names of
458 with the strings INFILE and OUTFILE replaced by the real names of
459 the temporary files generated.'''
459 the temporary files generated.'''
460 inname, outname = None, None
460 inname, outname = None, None
461 try:
461 try:
462 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
462 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
463 fp = os.fdopen(infd, 'wb')
463 fp = os.fdopen(infd, 'wb')
464 fp.write(s)
464 fp.write(s)
465 fp.close()
465 fp.close()
466 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
466 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
467 os.close(outfd)
467 os.close(outfd)
468 cmd = cmd.replace('INFILE', inname)
468 cmd = cmd.replace('INFILE', inname)
469 cmd = cmd.replace('OUTFILE', outname)
469 cmd = cmd.replace('OUTFILE', outname)
470 code = os.system(cmd)
470 code = os.system(cmd)
471 if sys.platform == 'OpenVMS' and code & 1:
471 if sys.platform == 'OpenVMS' and code & 1:
472 code = 0
472 code = 0
473 if code:
473 if code:
474 raise Abort(_("command '%s' failed: %s") %
474 raise Abort(_("command '%s' failed: %s") %
475 (cmd, explainexit(code)))
475 (cmd, explainexit(code)))
476 fp = open(outname, 'rb')
476 fp = open(outname, 'rb')
477 r = fp.read()
477 r = fp.read()
478 fp.close()
478 fp.close()
479 return r
479 return r
480 finally:
480 finally:
481 try:
481 try:
482 if inname:
482 if inname:
483 os.unlink(inname)
483 os.unlink(inname)
484 except OSError:
484 except OSError:
485 pass
485 pass
486 try:
486 try:
487 if outname:
487 if outname:
488 os.unlink(outname)
488 os.unlink(outname)
489 except OSError:
489 except OSError:
490 pass
490 pass
491
491
492 filtertable = {
492 filtertable = {
493 'tempfile:': tempfilter,
493 'tempfile:': tempfilter,
494 'pipe:': pipefilter,
494 'pipe:': pipefilter,
495 }
495 }
496
496
497 def filter(s, cmd):
497 def filter(s, cmd):
498 "filter a string through a command that transforms its input to its output"
498 "filter a string through a command that transforms its input to its output"
499 for name, fn in filtertable.iteritems():
499 for name, fn in filtertable.iteritems():
500 if cmd.startswith(name):
500 if cmd.startswith(name):
501 return fn(s, cmd[len(name):].lstrip())
501 return fn(s, cmd[len(name):].lstrip())
502 return pipefilter(s, cmd)
502 return pipefilter(s, cmd)
503
503
504 def binary(s):
504 def binary(s):
505 """return true if a string is binary data"""
505 """return true if a string is binary data"""
506 return bool(s and '\0' in s)
506 return bool(s and '\0' in s)
507
507
508 def increasingchunks(source, min=1024, max=65536):
508 def increasingchunks(source, min=1024, max=65536):
509 '''return no less than min bytes per chunk while data remains,
509 '''return no less than min bytes per chunk while data remains,
510 doubling min after each chunk until it reaches max'''
510 doubling min after each chunk until it reaches max'''
511 def log2(x):
511 def log2(x):
512 if not x:
512 if not x:
513 return 0
513 return 0
514 i = 0
514 i = 0
515 while x:
515 while x:
516 x >>= 1
516 x >>= 1
517 i += 1
517 i += 1
518 return i - 1
518 return i - 1
519
519
520 buf = []
520 buf = []
521 blen = 0
521 blen = 0
522 for chunk in source:
522 for chunk in source:
523 buf.append(chunk)
523 buf.append(chunk)
524 blen += len(chunk)
524 blen += len(chunk)
525 if blen >= min:
525 if blen >= min:
526 if min < max:
526 if min < max:
527 min = min << 1
527 min = min << 1
528 nmin = 1 << log2(blen)
528 nmin = 1 << log2(blen)
529 if nmin > min:
529 if nmin > min:
530 min = nmin
530 min = nmin
531 if min > max:
531 if min > max:
532 min = max
532 min = max
533 yield ''.join(buf)
533 yield ''.join(buf)
534 blen = 0
534 blen = 0
535 buf = []
535 buf = []
536 if buf:
536 if buf:
537 yield ''.join(buf)
537 yield ''.join(buf)
538
538
539 Abort = error.Abort
539 Abort = error.Abort
540
540
541 def always(fn):
541 def always(fn):
542 return True
542 return True
543
543
544 def never(fn):
544 def never(fn):
545 return False
545 return False
546
546
547 def pathto(root, n1, n2):
547 def pathto(root, n1, n2):
548 '''return the relative path from one place to another.
548 '''return the relative path from one place to another.
549 root should use os.sep to separate directories
549 root should use os.sep to separate directories
550 n1 should use os.sep to separate directories
550 n1 should use os.sep to separate directories
551 n2 should use "/" to separate directories
551 n2 should use "/" to separate directories
552 returns an os.sep-separated path.
552 returns an os.sep-separated path.
553
553
554 If n1 is a relative path, it's assumed it's
554 If n1 is a relative path, it's assumed it's
555 relative to root.
555 relative to root.
556 n2 should always be relative to root.
556 n2 should always be relative to root.
557 '''
557 '''
558 if not n1:
558 if not n1:
559 return localpath(n2)
559 return localpath(n2)
560 if os.path.isabs(n1):
560 if os.path.isabs(n1):
561 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
561 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
562 return os.path.join(root, localpath(n2))
562 return os.path.join(root, localpath(n2))
563 n2 = '/'.join((pconvert(root), n2))
563 n2 = '/'.join((pconvert(root), n2))
564 a, b = splitpath(n1), n2.split('/')
564 a, b = splitpath(n1), n2.split('/')
565 a.reverse()
565 a.reverse()
566 b.reverse()
566 b.reverse()
567 while a and b and a[-1] == b[-1]:
567 while a and b and a[-1] == b[-1]:
568 a.pop()
568 a.pop()
569 b.pop()
569 b.pop()
570 b.reverse()
570 b.reverse()
571 return os.sep.join((['..'] * len(a)) + b) or '.'
571 return os.sep.join((['..'] * len(a)) + b) or '.'
572
572
573 def mainfrozen():
573 def mainfrozen():
574 """return True if we are a frozen executable.
574 """return True if we are a frozen executable.
575
575
576 The code supports py2exe (most common, Windows only) and tools/freeze
576 The code supports py2exe (most common, Windows only) and tools/freeze
577 (portable, not much used).
577 (portable, not much used).
578 """
578 """
579 return (safehasattr(sys, "frozen") or # new py2exe
579 return (safehasattr(sys, "frozen") or # new py2exe
580 safehasattr(sys, "importers") or # old py2exe
580 safehasattr(sys, "importers") or # old py2exe
581 imp.is_frozen("__main__")) # tools/freeze
581 imp.is_frozen("__main__")) # tools/freeze
582
582
583 # the location of data files matching the source code
583 # the location of data files matching the source code
584 if mainfrozen():
584 if mainfrozen():
585 # executable version (py2exe) doesn't support __file__
585 # executable version (py2exe) doesn't support __file__
586 datapath = os.path.dirname(sys.executable)
586 datapath = os.path.dirname(sys.executable)
587 else:
587 else:
588 datapath = os.path.dirname(__file__)
588 datapath = os.path.dirname(__file__)
589
589
590 i18n.setdatapath(datapath)
590 i18n.setdatapath(datapath)
591
591
592 _hgexecutable = None
592 _hgexecutable = None
593
593
594 def hgexecutable():
594 def hgexecutable():
595 """return location of the 'hg' executable.
595 """return location of the 'hg' executable.
596
596
597 Defaults to $HG or 'hg' in the search path.
597 Defaults to $HG or 'hg' in the search path.
598 """
598 """
599 if _hgexecutable is None:
599 if _hgexecutable is None:
600 hg = os.environ.get('HG')
600 hg = os.environ.get('HG')
601 mainmod = sys.modules['__main__']
601 mainmod = sys.modules['__main__']
602 if hg:
602 if hg:
603 _sethgexecutable(hg)
603 _sethgexecutable(hg)
604 elif mainfrozen():
604 elif mainfrozen():
605 _sethgexecutable(sys.executable)
605 _sethgexecutable(sys.executable)
606 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
606 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
607 _sethgexecutable(mainmod.__file__)
607 _sethgexecutable(mainmod.__file__)
608 else:
608 else:
609 exe = findexe('hg') or os.path.basename(sys.argv[0])
609 exe = findexe('hg') or os.path.basename(sys.argv[0])
610 _sethgexecutable(exe)
610 _sethgexecutable(exe)
611 return _hgexecutable
611 return _hgexecutable
612
612
613 def _sethgexecutable(path):
613 def _sethgexecutable(path):
614 """set location of the 'hg' executable"""
614 """set location of the 'hg' executable"""
615 global _hgexecutable
615 global _hgexecutable
616 _hgexecutable = path
616 _hgexecutable = path
617
617
618 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
618 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
619 '''enhanced shell command execution.
619 '''enhanced shell command execution.
620 run with environment maybe modified, maybe in different dir.
620 run with environment maybe modified, maybe in different dir.
621
621
622 if command fails and onerr is None, return status, else raise onerr
622 if command fails and onerr is None, return status, else raise onerr
623 object as exception.
623 object as exception.
624
624
625 if out is specified, it is assumed to be a file-like object that has a
625 if out is specified, it is assumed to be a file-like object that has a
626 write() method. stdout and stderr will be redirected to out.'''
626 write() method. stdout and stderr will be redirected to out.'''
627 try:
627 try:
628 sys.stdout.flush()
628 sys.stdout.flush()
629 except Exception:
629 except Exception:
630 pass
630 pass
631 def py2shell(val):
631 def py2shell(val):
632 'convert python object into string that is useful to shell'
632 'convert python object into string that is useful to shell'
633 if val is None or val is False:
633 if val is None or val is False:
634 return '0'
634 return '0'
635 if val is True:
635 if val is True:
636 return '1'
636 return '1'
637 return str(val)
637 return str(val)
638 origcmd = cmd
638 origcmd = cmd
639 cmd = quotecommand(cmd)
639 cmd = quotecommand(cmd)
640 if sys.platform == 'plan9' and (sys.version_info[0] == 2
640 if sys.platform == 'plan9' and (sys.version_info[0] == 2
641 and sys.version_info[1] < 7):
641 and sys.version_info[1] < 7):
642 # subprocess kludge to work around issues in half-baked Python
642 # subprocess kludge to work around issues in half-baked Python
643 # ports, notably bichued/python:
643 # ports, notably bichued/python:
644 if not cwd is None:
644 if not cwd is None:
645 os.chdir(cwd)
645 os.chdir(cwd)
646 rc = os.system(cmd)
646 rc = os.system(cmd)
647 else:
647 else:
648 env = dict(os.environ)
648 env = dict(os.environ)
649 env.update((k, py2shell(v)) for k, v in environ.iteritems())
649 env.update((k, py2shell(v)) for k, v in environ.iteritems())
650 env['HG'] = hgexecutable()
650 env['HG'] = hgexecutable()
651 if out is None or out == sys.__stdout__:
651 if out is None or out == sys.__stdout__:
652 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
652 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
653 env=env, cwd=cwd)
653 env=env, cwd=cwd)
654 else:
654 else:
655 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
655 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
656 env=env, cwd=cwd, stdout=subprocess.PIPE,
656 env=env, cwd=cwd, stdout=subprocess.PIPE,
657 stderr=subprocess.STDOUT)
657 stderr=subprocess.STDOUT)
658 while True:
658 while True:
659 line = proc.stdout.readline()
659 line = proc.stdout.readline()
660 if not line:
660 if not line:
661 break
661 break
662 out.write(line)
662 out.write(line)
663 proc.wait()
663 proc.wait()
664 rc = proc.returncode
664 rc = proc.returncode
665 if sys.platform == 'OpenVMS' and rc & 1:
665 if sys.platform == 'OpenVMS' and rc & 1:
666 rc = 0
666 rc = 0
667 if rc and onerr:
667 if rc and onerr:
668 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
668 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
669 explainexit(rc)[0])
669 explainexit(rc)[0])
670 if errprefix:
670 if errprefix:
671 errmsg = '%s: %s' % (errprefix, errmsg)
671 errmsg = '%s: %s' % (errprefix, errmsg)
672 raise onerr(errmsg)
672 raise onerr(errmsg)
673 return rc
673 return rc
674
674
675 def checksignature(func):
675 def checksignature(func):
676 '''wrap a function with code to check for calling errors'''
676 '''wrap a function with code to check for calling errors'''
677 def check(*args, **kwargs):
677 def check(*args, **kwargs):
678 try:
678 try:
679 return func(*args, **kwargs)
679 return func(*args, **kwargs)
680 except TypeError:
680 except TypeError:
681 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
681 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
682 raise error.SignatureError
682 raise error.SignatureError
683 raise
683 raise
684
684
685 return check
685 return check
686
686
687 def copyfile(src, dest):
687 def copyfile(src, dest):
688 "copy a file, preserving mode and atime/mtime"
688 "copy a file, preserving mode and atime/mtime"
689 if os.path.lexists(dest):
689 if os.path.lexists(dest):
690 unlink(dest)
690 unlink(dest)
691 if os.path.islink(src):
691 if os.path.islink(src):
692 os.symlink(os.readlink(src), dest)
692 os.symlink(os.readlink(src), dest)
693 else:
693 else:
694 try:
694 try:
695 shutil.copyfile(src, dest)
695 shutil.copyfile(src, dest)
696 shutil.copymode(src, dest)
696 shutil.copymode(src, dest)
697 except shutil.Error, inst:
697 except shutil.Error, inst:
698 raise Abort(str(inst))
698 raise Abort(str(inst))
699
699
700 def copyfiles(src, dst, hardlink=None):
700 def copyfiles(src, dst, hardlink=None):
701 """Copy a directory tree using hardlinks if possible"""
701 """Copy a directory tree using hardlinks if possible"""
702
702
703 if hardlink is None:
703 if hardlink is None:
704 hardlink = (os.stat(src).st_dev ==
704 hardlink = (os.stat(src).st_dev ==
705 os.stat(os.path.dirname(dst)).st_dev)
705 os.stat(os.path.dirname(dst)).st_dev)
706
706
707 num = 0
707 num = 0
708 if os.path.isdir(src):
708 if os.path.isdir(src):
709 os.mkdir(dst)
709 os.mkdir(dst)
710 for name, kind in osutil.listdir(src):
710 for name, kind in osutil.listdir(src):
711 srcname = os.path.join(src, name)
711 srcname = os.path.join(src, name)
712 dstname = os.path.join(dst, name)
712 dstname = os.path.join(dst, name)
713 hardlink, n = copyfiles(srcname, dstname, hardlink)
713 hardlink, n = copyfiles(srcname, dstname, hardlink)
714 num += n
714 num += n
715 else:
715 else:
716 if hardlink:
716 if hardlink:
717 try:
717 try:
718 oslink(src, dst)
718 oslink(src, dst)
719 except (IOError, OSError):
719 except (IOError, OSError):
720 hardlink = False
720 hardlink = False
721 shutil.copy(src, dst)
721 shutil.copy(src, dst)
722 else:
722 else:
723 shutil.copy(src, dst)
723 shutil.copy(src, dst)
724 num += 1
724 num += 1
725
725
726 return hardlink, num
726 return hardlink, num
727
727
728 _winreservednames = '''con prn aux nul
728 _winreservednames = '''con prn aux nul
729 com1 com2 com3 com4 com5 com6 com7 com8 com9
729 com1 com2 com3 com4 com5 com6 com7 com8 com9
730 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
730 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
731 _winreservedchars = ':*?"<>|'
731 _winreservedchars = ':*?"<>|'
732 def checkwinfilename(path):
732 def checkwinfilename(path):
733 r'''Check that the base-relative path is a valid filename on Windows.
733 r'''Check that the base-relative path is a valid filename on Windows.
734 Returns None if the path is ok, or a UI string describing the problem.
734 Returns None if the path is ok, or a UI string describing the problem.
735
735
736 >>> checkwinfilename("just/a/normal/path")
736 >>> checkwinfilename("just/a/normal/path")
737 >>> checkwinfilename("foo/bar/con.xml")
737 >>> checkwinfilename("foo/bar/con.xml")
738 "filename contains 'con', which is reserved on Windows"
738 "filename contains 'con', which is reserved on Windows"
739 >>> checkwinfilename("foo/con.xml/bar")
739 >>> checkwinfilename("foo/con.xml/bar")
740 "filename contains 'con', which is reserved on Windows"
740 "filename contains 'con', which is reserved on Windows"
741 >>> checkwinfilename("foo/bar/xml.con")
741 >>> checkwinfilename("foo/bar/xml.con")
742 >>> checkwinfilename("foo/bar/AUX/bla.txt")
742 >>> checkwinfilename("foo/bar/AUX/bla.txt")
743 "filename contains 'AUX', which is reserved on Windows"
743 "filename contains 'AUX', which is reserved on Windows"
744 >>> checkwinfilename("foo/bar/bla:.txt")
744 >>> checkwinfilename("foo/bar/bla:.txt")
745 "filename contains ':', which is reserved on Windows"
745 "filename contains ':', which is reserved on Windows"
746 >>> checkwinfilename("foo/bar/b\07la.txt")
746 >>> checkwinfilename("foo/bar/b\07la.txt")
747 "filename contains '\\x07', which is invalid on Windows"
747 "filename contains '\\x07', which is invalid on Windows"
748 >>> checkwinfilename("foo/bar/bla ")
748 >>> checkwinfilename("foo/bar/bla ")
749 "filename ends with ' ', which is not allowed on Windows"
749 "filename ends with ' ', which is not allowed on Windows"
750 >>> checkwinfilename("../bar")
750 >>> checkwinfilename("../bar")
751 >>> checkwinfilename("foo\\")
751 >>> checkwinfilename("foo\\")
752 "filename ends with '\\', which is invalid on Windows"
752 "filename ends with '\\', which is invalid on Windows"
753 >>> checkwinfilename("foo\\/bar")
753 >>> checkwinfilename("foo\\/bar")
754 "directory name ends with '\\', which is invalid on Windows"
754 "directory name ends with '\\', which is invalid on Windows"
755 '''
755 '''
756 if path.endswith('\\'):
756 if path.endswith('\\'):
757 return _("filename ends with '\\', which is invalid on Windows")
757 return _("filename ends with '\\', which is invalid on Windows")
758 if '\\/' in path:
758 if '\\/' in path:
759 return _("directory name ends with '\\', which is invalid on Windows")
759 return _("directory name ends with '\\', which is invalid on Windows")
760 for n in path.replace('\\', '/').split('/'):
760 for n in path.replace('\\', '/').split('/'):
761 if not n:
761 if not n:
762 continue
762 continue
763 for c in n:
763 for c in n:
764 if c in _winreservedchars:
764 if c in _winreservedchars:
765 return _("filename contains '%s', which is reserved "
765 return _("filename contains '%s', which is reserved "
766 "on Windows") % c
766 "on Windows") % c
767 if ord(c) <= 31:
767 if ord(c) <= 31:
768 return _("filename contains %r, which is invalid "
768 return _("filename contains %r, which is invalid "
769 "on Windows") % c
769 "on Windows") % c
770 base = n.split('.')[0]
770 base = n.split('.')[0]
771 if base and base.lower() in _winreservednames:
771 if base and base.lower() in _winreservednames:
772 return _("filename contains '%s', which is reserved "
772 return _("filename contains '%s', which is reserved "
773 "on Windows") % base
773 "on Windows") % base
774 t = n[-1]
774 t = n[-1]
775 if t in '. ' and n not in '..':
775 if t in '. ' and n not in '..':
776 return _("filename ends with '%s', which is not allowed "
776 return _("filename ends with '%s', which is not allowed "
777 "on Windows") % t
777 "on Windows") % t
778
778
779 if os.name == 'nt':
779 if os.name == 'nt':
780 checkosfilename = checkwinfilename
780 checkosfilename = checkwinfilename
781 else:
781 else:
782 checkosfilename = platform.checkosfilename
782 checkosfilename = platform.checkosfilename
783
783
784 def makelock(info, pathname):
784 def makelock(info, pathname):
785 try:
785 try:
786 return os.symlink(info, pathname)
786 return os.symlink(info, pathname)
787 except OSError, why:
787 except OSError, why:
788 if why.errno == errno.EEXIST:
788 if why.errno == errno.EEXIST:
789 raise
789 raise
790 except AttributeError: # no symlink in os
790 except AttributeError: # no symlink in os
791 pass
791 pass
792
792
793 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
793 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
794 os.write(ld, info)
794 os.write(ld, info)
795 os.close(ld)
795 os.close(ld)
796
796
797 def readlock(pathname):
797 def readlock(pathname):
798 try:
798 try:
799 return os.readlink(pathname)
799 return os.readlink(pathname)
800 except OSError, why:
800 except OSError, why:
801 if why.errno not in (errno.EINVAL, errno.ENOSYS):
801 if why.errno not in (errno.EINVAL, errno.ENOSYS):
802 raise
802 raise
803 except AttributeError: # no symlink in os
803 except AttributeError: # no symlink in os
804 pass
804 pass
805 fp = posixfile(pathname)
805 fp = posixfile(pathname)
806 r = fp.read()
806 r = fp.read()
807 fp.close()
807 fp.close()
808 return r
808 return r
809
809
810 def fstat(fp):
810 def fstat(fp):
811 '''stat file object that may not have fileno method.'''
811 '''stat file object that may not have fileno method.'''
812 try:
812 try:
813 return os.fstat(fp.fileno())
813 return os.fstat(fp.fileno())
814 except AttributeError:
814 except AttributeError:
815 return os.stat(fp.name)
815 return os.stat(fp.name)
816
816
817 # File system features
817 # File system features
818
818
819 def checkcase(path):
819 def checkcase(path):
820 """
820 """
821 Return true if the given path is on a case-sensitive filesystem
821 Return true if the given path is on a case-sensitive filesystem
822
822
823 Requires a path (like /foo/.hg) ending with a foldable final
823 Requires a path (like /foo/.hg) ending with a foldable final
824 directory component.
824 directory component.
825 """
825 """
826 s1 = os.stat(path)
826 s1 = os.stat(path)
827 d, b = os.path.split(path)
827 d, b = os.path.split(path)
828 b2 = b.upper()
828 b2 = b.upper()
829 if b == b2:
829 if b == b2:
830 b2 = b.lower()
830 b2 = b.lower()
831 if b == b2:
831 if b == b2:
832 return True # no evidence against case sensitivity
832 return True # no evidence against case sensitivity
833 p2 = os.path.join(d, b2)
833 p2 = os.path.join(d, b2)
834 try:
834 try:
835 s2 = os.stat(p2)
835 s2 = os.stat(p2)
836 if s2 == s1:
836 if s2 == s1:
837 return False
837 return False
838 return True
838 return True
839 except OSError:
839 except OSError:
840 return True
840 return True
841
841
842 try:
842 try:
843 import re2
843 import re2
844 _re2 = None
844 _re2 = None
845 except ImportError:
845 except ImportError:
846 _re2 = False
846 _re2 = False
847
847
848 class _re(object):
848 class _re(object):
849 def _checkre2(self):
849 def _checkre2(self):
850 global _re2
850 global _re2
851 try:
851 try:
852 # check if match works, see issue3964
852 # check if match works, see issue3964
853 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
853 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
854 except ImportError:
854 except ImportError:
855 _re2 = False
855 _re2 = False
856
856
857 def compile(self, pat, flags=0):
857 def compile(self, pat, flags=0):
858 '''Compile a regular expression, using re2 if possible
858 '''Compile a regular expression, using re2 if possible
859
859
860 For best performance, use only re2-compatible regexp features. The
860 For best performance, use only re2-compatible regexp features. The
861 only flags from the re module that are re2-compatible are
861 only flags from the re module that are re2-compatible are
862 IGNORECASE and MULTILINE.'''
862 IGNORECASE and MULTILINE.'''
863 if _re2 is None:
863 if _re2 is None:
864 self._checkre2()
864 self._checkre2()
865 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
865 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
866 if flags & remod.IGNORECASE:
866 if flags & remod.IGNORECASE:
867 pat = '(?i)' + pat
867 pat = '(?i)' + pat
868 if flags & remod.MULTILINE:
868 if flags & remod.MULTILINE:
869 pat = '(?m)' + pat
869 pat = '(?m)' + pat
870 try:
870 try:
871 return re2.compile(pat)
871 return re2.compile(pat)
872 except re2.error:
872 except re2.error:
873 pass
873 pass
874 return remod.compile(pat, flags)
874 return remod.compile(pat, flags)
875
875
876 @propertycache
876 @propertycache
877 def escape(self):
877 def escape(self):
878 '''Return the version of escape corresponding to self.compile.
878 '''Return the version of escape corresponding to self.compile.
879
879
880 This is imperfect because whether re2 or re is used for a particular
880 This is imperfect because whether re2 or re is used for a particular
881 function depends on the flags, etc, but it's the best we can do.
881 function depends on the flags, etc, but it's the best we can do.
882 '''
882 '''
883 global _re2
883 global _re2
884 if _re2 is None:
884 if _re2 is None:
885 self._checkre2()
885 self._checkre2()
886 if _re2:
886 if _re2:
887 return re2.escape
887 return re2.escape
888 else:
888 else:
889 return remod.escape
889 return remod.escape
890
890
891 re = _re()
891 re = _re()
892
892
893 _fspathcache = {}
893 _fspathcache = {}
894 def fspath(name, root):
894 def fspath(name, root):
895 '''Get name in the case stored in the filesystem
895 '''Get name in the case stored in the filesystem
896
896
897 The name should be relative to root, and be normcase-ed for efficiency.
897 The name should be relative to root, and be normcase-ed for efficiency.
898
898
899 Note that this function is unnecessary, and should not be
899 Note that this function is unnecessary, and should not be
900 called, for case-sensitive filesystems (simply because it's expensive).
900 called, for case-sensitive filesystems (simply because it's expensive).
901
901
902 The root should be normcase-ed, too.
902 The root should be normcase-ed, too.
903 '''
903 '''
904 def _makefspathcacheentry(dir):
904 def _makefspathcacheentry(dir):
905 return dict((normcase(n), n) for n in os.listdir(dir))
905 return dict((normcase(n), n) for n in os.listdir(dir))
906
906
907 seps = os.sep
907 seps = os.sep
908 if os.altsep:
908 if os.altsep:
909 seps = seps + os.altsep
909 seps = seps + os.altsep
910 # Protect backslashes. This gets silly very quickly.
910 # Protect backslashes. This gets silly very quickly.
911 seps.replace('\\','\\\\')
911 seps.replace('\\','\\\\')
912 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
912 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
913 dir = os.path.normpath(root)
913 dir = os.path.normpath(root)
914 result = []
914 result = []
915 for part, sep in pattern.findall(name):
915 for part, sep in pattern.findall(name):
916 if sep:
916 if sep:
917 result.append(sep)
917 result.append(sep)
918 continue
918 continue
919
919
920 if dir not in _fspathcache:
920 if dir not in _fspathcache:
921 _fspathcache[dir] = _makefspathcacheentry(dir)
921 _fspathcache[dir] = _makefspathcacheentry(dir)
922 contents = _fspathcache[dir]
922 contents = _fspathcache[dir]
923
923
924 found = contents.get(part)
924 found = contents.get(part)
925 if not found:
925 if not found:
926 # retry "once per directory" per "dirstate.walk" which
926 # retry "once per directory" per "dirstate.walk" which
927 # may take place for each patches of "hg qpush", for example
927 # may take place for each patches of "hg qpush", for example
928 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
928 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
929 found = contents.get(part)
929 found = contents.get(part)
930
930
931 result.append(found or part)
931 result.append(found or part)
932 dir = os.path.join(dir, part)
932 dir = os.path.join(dir, part)
933
933
934 return ''.join(result)
934 return ''.join(result)
935
935
936 def checknlink(testfile):
936 def checknlink(testfile):
937 '''check whether hardlink count reporting works properly'''
937 '''check whether hardlink count reporting works properly'''
938
938
939 # testfile may be open, so we need a separate file for checking to
939 # testfile may be open, so we need a separate file for checking to
940 # work around issue2543 (or testfile may get lost on Samba shares)
940 # work around issue2543 (or testfile may get lost on Samba shares)
941 f1 = testfile + ".hgtmp1"
941 f1 = testfile + ".hgtmp1"
942 if os.path.lexists(f1):
942 if os.path.lexists(f1):
943 return False
943 return False
944 try:
944 try:
945 posixfile(f1, 'w').close()
945 posixfile(f1, 'w').close()
946 except IOError:
946 except IOError:
947 return False
947 return False
948
948
949 f2 = testfile + ".hgtmp2"
949 f2 = testfile + ".hgtmp2"
950 fd = None
950 fd = None
951 try:
951 try:
952 try:
952 try:
953 oslink(f1, f2)
953 oslink(f1, f2)
954 except OSError:
954 except OSError:
955 return False
955 return False
956
956
957 # nlinks() may behave differently for files on Windows shares if
957 # nlinks() may behave differently for files on Windows shares if
958 # the file is open.
958 # the file is open.
959 fd = posixfile(f2)
959 fd = posixfile(f2)
960 return nlinks(f2) > 1
960 return nlinks(f2) > 1
961 finally:
961 finally:
962 if fd is not None:
962 if fd is not None:
963 fd.close()
963 fd.close()
964 for f in (f1, f2):
964 for f in (f1, f2):
965 try:
965 try:
966 os.unlink(f)
966 os.unlink(f)
967 except OSError:
967 except OSError:
968 pass
968 pass
969
969
970 def endswithsep(path):
970 def endswithsep(path):
971 '''Check path ends with os.sep or os.altsep.'''
971 '''Check path ends with os.sep or os.altsep.'''
972 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
972 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
973
973
974 def splitpath(path):
974 def splitpath(path):
975 '''Split path by os.sep.
975 '''Split path by os.sep.
976 Note that this function does not use os.altsep because this is
976 Note that this function does not use os.altsep because this is
977 an alternative of simple "xxx.split(os.sep)".
977 an alternative of simple "xxx.split(os.sep)".
978 It is recommended to use os.path.normpath() before using this
978 It is recommended to use os.path.normpath() before using this
979 function if need.'''
979 function if need.'''
980 return path.split(os.sep)
980 return path.split(os.sep)
981
981
982 def gui():
982 def gui():
983 '''Are we running in a GUI?'''
983 '''Are we running in a GUI?'''
984 if sys.platform == 'darwin':
984 if sys.platform == 'darwin':
985 if 'SSH_CONNECTION' in os.environ:
985 if 'SSH_CONNECTION' in os.environ:
986 # handle SSH access to a box where the user is logged in
986 # handle SSH access to a box where the user is logged in
987 return False
987 return False
988 elif getattr(osutil, 'isgui', None):
988 elif getattr(osutil, 'isgui', None):
989 # check if a CoreGraphics session is available
989 # check if a CoreGraphics session is available
990 return osutil.isgui()
990 return osutil.isgui()
991 else:
991 else:
992 # pure build; use a safe default
992 # pure build; use a safe default
993 return True
993 return True
994 else:
994 else:
995 return os.name == "nt" or os.environ.get("DISPLAY")
995 return os.name == "nt" or os.environ.get("DISPLAY")
996
996
997 def mktempcopy(name, emptyok=False, createmode=None):
997 def mktempcopy(name, emptyok=False, createmode=None):
998 """Create a temporary file with the same contents from name
998 """Create a temporary file with the same contents from name
999
999
1000 The permission bits are copied from the original file.
1000 The permission bits are copied from the original file.
1001
1001
1002 If the temporary file is going to be truncated immediately, you
1002 If the temporary file is going to be truncated immediately, you
1003 can use emptyok=True as an optimization.
1003 can use emptyok=True as an optimization.
1004
1004
1005 Returns the name of the temporary file.
1005 Returns the name of the temporary file.
1006 """
1006 """
1007 d, fn = os.path.split(name)
1007 d, fn = os.path.split(name)
1008 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1008 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1009 os.close(fd)
1009 os.close(fd)
1010 # Temporary files are created with mode 0600, which is usually not
1010 # Temporary files are created with mode 0600, which is usually not
1011 # what we want. If the original file already exists, just copy
1011 # what we want. If the original file already exists, just copy
1012 # its mode. Otherwise, manually obey umask.
1012 # its mode. Otherwise, manually obey umask.
1013 copymode(name, temp, createmode)
1013 copymode(name, temp, createmode)
1014 if emptyok:
1014 if emptyok:
1015 return temp
1015 return temp
1016 try:
1016 try:
1017 try:
1017 try:
1018 ifp = posixfile(name, "rb")
1018 ifp = posixfile(name, "rb")
1019 except IOError, inst:
1019 except IOError, inst:
1020 if inst.errno == errno.ENOENT:
1020 if inst.errno == errno.ENOENT:
1021 return temp
1021 return temp
1022 if not getattr(inst, 'filename', None):
1022 if not getattr(inst, 'filename', None):
1023 inst.filename = name
1023 inst.filename = name
1024 raise
1024 raise
1025 ofp = posixfile(temp, "wb")
1025 ofp = posixfile(temp, "wb")
1026 for chunk in filechunkiter(ifp):
1026 for chunk in filechunkiter(ifp):
1027 ofp.write(chunk)
1027 ofp.write(chunk)
1028 ifp.close()
1028 ifp.close()
1029 ofp.close()
1029 ofp.close()
1030 except: # re-raises
1030 except: # re-raises
1031 try: os.unlink(temp)
1031 try: os.unlink(temp)
1032 except OSError: pass
1032 except OSError: pass
1033 raise
1033 raise
1034 return temp
1034 return temp
1035
1035
1036 class atomictempfile(object):
1036 class atomictempfile(object):
1037 '''writable file object that atomically updates a file
1037 '''writable file object that atomically updates a file
1038
1038
1039 All writes will go to a temporary copy of the original file. Call
1039 All writes will go to a temporary copy of the original file. Call
1040 close() when you are done writing, and atomictempfile will rename
1040 close() when you are done writing, and atomictempfile will rename
1041 the temporary copy to the original name, making the changes
1041 the temporary copy to the original name, making the changes
1042 visible. If the object is destroyed without being closed, all your
1042 visible. If the object is destroyed without being closed, all your
1043 writes are discarded.
1043 writes are discarded.
1044 '''
1044 '''
1045 def __init__(self, name, mode='w+b', createmode=None):
1045 def __init__(self, name, mode='w+b', createmode=None):
1046 self.__name = name # permanent name
1046 self.__name = name # permanent name
1047 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1047 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1048 createmode=createmode)
1048 createmode=createmode)
1049 self._fp = posixfile(self._tempname, mode)
1049 self._fp = posixfile(self._tempname, mode)
1050
1050
1051 # delegated methods
1051 # delegated methods
1052 self.write = self._fp.write
1052 self.write = self._fp.write
1053 self.seek = self._fp.seek
1053 self.seek = self._fp.seek
1054 self.tell = self._fp.tell
1054 self.tell = self._fp.tell
1055 self.fileno = self._fp.fileno
1055 self.fileno = self._fp.fileno
1056
1056
1057 def close(self):
1057 def close(self):
1058 if not self._fp.closed:
1058 if not self._fp.closed:
1059 self._fp.close()
1059 self._fp.close()
1060 rename(self._tempname, localpath(self.__name))
1060 rename(self._tempname, localpath(self.__name))
1061
1061
1062 def discard(self):
1062 def discard(self):
1063 if not self._fp.closed:
1063 if not self._fp.closed:
1064 try:
1064 try:
1065 os.unlink(self._tempname)
1065 os.unlink(self._tempname)
1066 except OSError:
1066 except OSError:
1067 pass
1067 pass
1068 self._fp.close()
1068 self._fp.close()
1069
1069
1070 def __del__(self):
1070 def __del__(self):
1071 if safehasattr(self, '_fp'): # constructor actually did something
1071 if safehasattr(self, '_fp'): # constructor actually did something
1072 self.discard()
1072 self.discard()
1073
1073
1074 def makedirs(name, mode=None, notindexed=False):
1074 def makedirs(name, mode=None, notindexed=False):
1075 """recursive directory creation with parent mode inheritance"""
1075 """recursive directory creation with parent mode inheritance"""
1076 try:
1076 try:
1077 makedir(name, notindexed)
1077 makedir(name, notindexed)
1078 except OSError, err:
1078 except OSError, err:
1079 if err.errno == errno.EEXIST:
1079 if err.errno == errno.EEXIST:
1080 return
1080 return
1081 if err.errno != errno.ENOENT or not name:
1081 if err.errno != errno.ENOENT or not name:
1082 raise
1082 raise
1083 parent = os.path.dirname(os.path.abspath(name))
1083 parent = os.path.dirname(os.path.abspath(name))
1084 if parent == name:
1084 if parent == name:
1085 raise
1085 raise
1086 makedirs(parent, mode, notindexed)
1086 makedirs(parent, mode, notindexed)
1087 makedir(name, notindexed)
1087 makedir(name, notindexed)
1088 if mode is not None:
1088 if mode is not None:
1089 os.chmod(name, mode)
1089 os.chmod(name, mode)
1090
1090
1091 def ensuredirs(name, mode=None):
1091 def ensuredirs(name, mode=None, notindexed=False):
1092 """race-safe recursive directory creation"""
1092 """race-safe recursive directory creation
1093
1094 Newly created directories are marked as "not to be indexed by
1095 the content indexing service", if ``notindexed`` is specified
1096 for "write" mode access.
1097 """
1093 if os.path.isdir(name):
1098 if os.path.isdir(name):
1094 return
1099 return
1095 parent = os.path.dirname(os.path.abspath(name))
1100 parent = os.path.dirname(os.path.abspath(name))
1096 if parent != name:
1101 if parent != name:
1097 ensuredirs(parent, mode)
1102 ensuredirs(parent, mode, notindexed)
1098 try:
1103 try:
1099 os.mkdir(name)
1104 makedir(name, notindexed)
1100 except OSError, err:
1105 except OSError, err:
1101 if err.errno == errno.EEXIST and os.path.isdir(name):
1106 if err.errno == errno.EEXIST and os.path.isdir(name):
1102 # someone else seems to have won a directory creation race
1107 # someone else seems to have won a directory creation race
1103 return
1108 return
1104 raise
1109 raise
1105 if mode is not None:
1110 if mode is not None:
1106 os.chmod(name, mode)
1111 os.chmod(name, mode)
1107
1112
1108 def readfile(path):
1113 def readfile(path):
1109 fp = open(path, 'rb')
1114 fp = open(path, 'rb')
1110 try:
1115 try:
1111 return fp.read()
1116 return fp.read()
1112 finally:
1117 finally:
1113 fp.close()
1118 fp.close()
1114
1119
1115 def writefile(path, text):
1120 def writefile(path, text):
1116 fp = open(path, 'wb')
1121 fp = open(path, 'wb')
1117 try:
1122 try:
1118 fp.write(text)
1123 fp.write(text)
1119 finally:
1124 finally:
1120 fp.close()
1125 fp.close()
1121
1126
1122 def appendfile(path, text):
1127 def appendfile(path, text):
1123 fp = open(path, 'ab')
1128 fp = open(path, 'ab')
1124 try:
1129 try:
1125 fp.write(text)
1130 fp.write(text)
1126 finally:
1131 finally:
1127 fp.close()
1132 fp.close()
1128
1133
1129 class chunkbuffer(object):
1134 class chunkbuffer(object):
1130 """Allow arbitrary sized chunks of data to be efficiently read from an
1135 """Allow arbitrary sized chunks of data to be efficiently read from an
1131 iterator over chunks of arbitrary size."""
1136 iterator over chunks of arbitrary size."""
1132
1137
1133 def __init__(self, in_iter):
1138 def __init__(self, in_iter):
1134 """in_iter is the iterator that's iterating over the input chunks.
1139 """in_iter is the iterator that's iterating over the input chunks.
1135 targetsize is how big a buffer to try to maintain."""
1140 targetsize is how big a buffer to try to maintain."""
1136 def splitbig(chunks):
1141 def splitbig(chunks):
1137 for chunk in chunks:
1142 for chunk in chunks:
1138 if len(chunk) > 2**20:
1143 if len(chunk) > 2**20:
1139 pos = 0
1144 pos = 0
1140 while pos < len(chunk):
1145 while pos < len(chunk):
1141 end = pos + 2 ** 18
1146 end = pos + 2 ** 18
1142 yield chunk[pos:end]
1147 yield chunk[pos:end]
1143 pos = end
1148 pos = end
1144 else:
1149 else:
1145 yield chunk
1150 yield chunk
1146 self.iter = splitbig(in_iter)
1151 self.iter = splitbig(in_iter)
1147 self._queue = deque()
1152 self._queue = deque()
1148
1153
1149 def read(self, l=None):
1154 def read(self, l=None):
1150 """Read L bytes of data from the iterator of chunks of data.
1155 """Read L bytes of data from the iterator of chunks of data.
1151 Returns less than L bytes if the iterator runs dry.
1156 Returns less than L bytes if the iterator runs dry.
1152
1157
1153 If size parameter is omitted, read everything"""
1158 If size parameter is omitted, read everything"""
1154 left = l
1159 left = l
1155 buf = []
1160 buf = []
1156 queue = self._queue
1161 queue = self._queue
1157 while left is None or left > 0:
1162 while left is None or left > 0:
1158 # refill the queue
1163 # refill the queue
1159 if not queue:
1164 if not queue:
1160 target = 2**18
1165 target = 2**18
1161 for chunk in self.iter:
1166 for chunk in self.iter:
1162 queue.append(chunk)
1167 queue.append(chunk)
1163 target -= len(chunk)
1168 target -= len(chunk)
1164 if target <= 0:
1169 if target <= 0:
1165 break
1170 break
1166 if not queue:
1171 if not queue:
1167 break
1172 break
1168
1173
1169 chunk = queue.popleft()
1174 chunk = queue.popleft()
1170 if left is not None:
1175 if left is not None:
1171 left -= len(chunk)
1176 left -= len(chunk)
1172 if left is not None and left < 0:
1177 if left is not None and left < 0:
1173 queue.appendleft(chunk[left:])
1178 queue.appendleft(chunk[left:])
1174 buf.append(chunk[:left])
1179 buf.append(chunk[:left])
1175 else:
1180 else:
1176 buf.append(chunk)
1181 buf.append(chunk)
1177
1182
1178 return ''.join(buf)
1183 return ''.join(buf)
1179
1184
1180 def filechunkiter(f, size=65536, limit=None):
1185 def filechunkiter(f, size=65536, limit=None):
1181 """Create a generator that produces the data in the file size
1186 """Create a generator that produces the data in the file size
1182 (default 65536) bytes at a time, up to optional limit (default is
1187 (default 65536) bytes at a time, up to optional limit (default is
1183 to read all data). Chunks may be less than size bytes if the
1188 to read all data). Chunks may be less than size bytes if the
1184 chunk is the last chunk in the file, or the file is a socket or
1189 chunk is the last chunk in the file, or the file is a socket or
1185 some other type of file that sometimes reads less data than is
1190 some other type of file that sometimes reads less data than is
1186 requested."""
1191 requested."""
1187 assert size >= 0
1192 assert size >= 0
1188 assert limit is None or limit >= 0
1193 assert limit is None or limit >= 0
1189 while True:
1194 while True:
1190 if limit is None:
1195 if limit is None:
1191 nbytes = size
1196 nbytes = size
1192 else:
1197 else:
1193 nbytes = min(limit, size)
1198 nbytes = min(limit, size)
1194 s = nbytes and f.read(nbytes)
1199 s = nbytes and f.read(nbytes)
1195 if not s:
1200 if not s:
1196 break
1201 break
1197 if limit:
1202 if limit:
1198 limit -= len(s)
1203 limit -= len(s)
1199 yield s
1204 yield s
1200
1205
1201 def makedate(timestamp=None):
1206 def makedate(timestamp=None):
1202 '''Return a unix timestamp (or the current time) as a (unixtime,
1207 '''Return a unix timestamp (or the current time) as a (unixtime,
1203 offset) tuple based off the local timezone.'''
1208 offset) tuple based off the local timezone.'''
1204 if timestamp is None:
1209 if timestamp is None:
1205 timestamp = time.time()
1210 timestamp = time.time()
1206 if timestamp < 0:
1211 if timestamp < 0:
1207 hint = _("check your clock")
1212 hint = _("check your clock")
1208 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1213 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1209 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1214 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1210 datetime.datetime.fromtimestamp(timestamp))
1215 datetime.datetime.fromtimestamp(timestamp))
1211 tz = delta.days * 86400 + delta.seconds
1216 tz = delta.days * 86400 + delta.seconds
1212 return timestamp, tz
1217 return timestamp, tz
1213
1218
1214 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1219 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1215 """represent a (unixtime, offset) tuple as a localized time.
1220 """represent a (unixtime, offset) tuple as a localized time.
1216 unixtime is seconds since the epoch, and offset is the time zone's
1221 unixtime is seconds since the epoch, and offset is the time zone's
1217 number of seconds away from UTC. if timezone is false, do not
1222 number of seconds away from UTC. if timezone is false, do not
1218 append time zone to string."""
1223 append time zone to string."""
1219 t, tz = date or makedate()
1224 t, tz = date or makedate()
1220 if t < 0:
1225 if t < 0:
1221 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1226 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1222 tz = 0
1227 tz = 0
1223 if "%1" in format or "%2" in format or "%z" in format:
1228 if "%1" in format or "%2" in format or "%z" in format:
1224 sign = (tz > 0) and "-" or "+"
1229 sign = (tz > 0) and "-" or "+"
1225 minutes = abs(tz) // 60
1230 minutes = abs(tz) // 60
1226 format = format.replace("%z", "%1%2")
1231 format = format.replace("%z", "%1%2")
1227 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
1232 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
1228 format = format.replace("%2", "%02d" % (minutes % 60))
1233 format = format.replace("%2", "%02d" % (minutes % 60))
1229 try:
1234 try:
1230 t = time.gmtime(float(t) - tz)
1235 t = time.gmtime(float(t) - tz)
1231 except ValueError:
1236 except ValueError:
1232 # time was out of range
1237 # time was out of range
1233 t = time.gmtime(sys.maxint)
1238 t = time.gmtime(sys.maxint)
1234 s = time.strftime(format, t)
1239 s = time.strftime(format, t)
1235 return s
1240 return s
1236
1241
1237 def shortdate(date=None):
1242 def shortdate(date=None):
1238 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1243 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1239 return datestr(date, format='%Y-%m-%d')
1244 return datestr(date, format='%Y-%m-%d')
1240
1245
1241 def strdate(string, format, defaults=[]):
1246 def strdate(string, format, defaults=[]):
1242 """parse a localized time string and return a (unixtime, offset) tuple.
1247 """parse a localized time string and return a (unixtime, offset) tuple.
1243 if the string cannot be parsed, ValueError is raised."""
1248 if the string cannot be parsed, ValueError is raised."""
1244 def timezone(string):
1249 def timezone(string):
1245 tz = string.split()[-1]
1250 tz = string.split()[-1]
1246 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1251 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1247 sign = (tz[0] == "+") and 1 or -1
1252 sign = (tz[0] == "+") and 1 or -1
1248 hours = int(tz[1:3])
1253 hours = int(tz[1:3])
1249 minutes = int(tz[3:5])
1254 minutes = int(tz[3:5])
1250 return -sign * (hours * 60 + minutes) * 60
1255 return -sign * (hours * 60 + minutes) * 60
1251 if tz == "GMT" or tz == "UTC":
1256 if tz == "GMT" or tz == "UTC":
1252 return 0
1257 return 0
1253 return None
1258 return None
1254
1259
1255 # NOTE: unixtime = localunixtime + offset
1260 # NOTE: unixtime = localunixtime + offset
1256 offset, date = timezone(string), string
1261 offset, date = timezone(string), string
1257 if offset is not None:
1262 if offset is not None:
1258 date = " ".join(string.split()[:-1])
1263 date = " ".join(string.split()[:-1])
1259
1264
1260 # add missing elements from defaults
1265 # add missing elements from defaults
1261 usenow = False # default to using biased defaults
1266 usenow = False # default to using biased defaults
1262 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1267 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1263 found = [True for p in part if ("%"+p) in format]
1268 found = [True for p in part if ("%"+p) in format]
1264 if not found:
1269 if not found:
1265 date += "@" + defaults[part][usenow]
1270 date += "@" + defaults[part][usenow]
1266 format += "@%" + part[0]
1271 format += "@%" + part[0]
1267 else:
1272 else:
1268 # We've found a specific time element, less specific time
1273 # We've found a specific time element, less specific time
1269 # elements are relative to today
1274 # elements are relative to today
1270 usenow = True
1275 usenow = True
1271
1276
1272 timetuple = time.strptime(date, format)
1277 timetuple = time.strptime(date, format)
1273 localunixtime = int(calendar.timegm(timetuple))
1278 localunixtime = int(calendar.timegm(timetuple))
1274 if offset is None:
1279 if offset is None:
1275 # local timezone
1280 # local timezone
1276 unixtime = int(time.mktime(timetuple))
1281 unixtime = int(time.mktime(timetuple))
1277 offset = unixtime - localunixtime
1282 offset = unixtime - localunixtime
1278 else:
1283 else:
1279 unixtime = localunixtime + offset
1284 unixtime = localunixtime + offset
1280 return unixtime, offset
1285 return unixtime, offset
1281
1286
1282 def parsedate(date, formats=None, bias={}):
1287 def parsedate(date, formats=None, bias={}):
1283 """parse a localized date/time and return a (unixtime, offset) tuple.
1288 """parse a localized date/time and return a (unixtime, offset) tuple.
1284
1289
1285 The date may be a "unixtime offset" string or in one of the specified
1290 The date may be a "unixtime offset" string or in one of the specified
1286 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1291 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1287
1292
1288 >>> parsedate(' today ') == parsedate(\
1293 >>> parsedate(' today ') == parsedate(\
1289 datetime.date.today().strftime('%b %d'))
1294 datetime.date.today().strftime('%b %d'))
1290 True
1295 True
1291 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1296 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1292 datetime.timedelta(days=1)\
1297 datetime.timedelta(days=1)\
1293 ).strftime('%b %d'))
1298 ).strftime('%b %d'))
1294 True
1299 True
1295 >>> now, tz = makedate()
1300 >>> now, tz = makedate()
1296 >>> strnow, strtz = parsedate('now')
1301 >>> strnow, strtz = parsedate('now')
1297 >>> (strnow - now) < 1
1302 >>> (strnow - now) < 1
1298 True
1303 True
1299 >>> tz == strtz
1304 >>> tz == strtz
1300 True
1305 True
1301 """
1306 """
1302 if not date:
1307 if not date:
1303 return 0, 0
1308 return 0, 0
1304 if isinstance(date, tuple) and len(date) == 2:
1309 if isinstance(date, tuple) and len(date) == 2:
1305 return date
1310 return date
1306 if not formats:
1311 if not formats:
1307 formats = defaultdateformats
1312 formats = defaultdateformats
1308 date = date.strip()
1313 date = date.strip()
1309
1314
1310 if date == _('now'):
1315 if date == _('now'):
1311 return makedate()
1316 return makedate()
1312 if date == _('today'):
1317 if date == _('today'):
1313 date = datetime.date.today().strftime('%b %d')
1318 date = datetime.date.today().strftime('%b %d')
1314 elif date == _('yesterday'):
1319 elif date == _('yesterday'):
1315 date = (datetime.date.today() -
1320 date = (datetime.date.today() -
1316 datetime.timedelta(days=1)).strftime('%b %d')
1321 datetime.timedelta(days=1)).strftime('%b %d')
1317
1322
1318 try:
1323 try:
1319 when, offset = map(int, date.split(' '))
1324 when, offset = map(int, date.split(' '))
1320 except ValueError:
1325 except ValueError:
1321 # fill out defaults
1326 # fill out defaults
1322 now = makedate()
1327 now = makedate()
1323 defaults = {}
1328 defaults = {}
1324 for part in ("d", "mb", "yY", "HI", "M", "S"):
1329 for part in ("d", "mb", "yY", "HI", "M", "S"):
1325 # this piece is for rounding the specific end of unknowns
1330 # this piece is for rounding the specific end of unknowns
1326 b = bias.get(part)
1331 b = bias.get(part)
1327 if b is None:
1332 if b is None:
1328 if part[0] in "HMS":
1333 if part[0] in "HMS":
1329 b = "00"
1334 b = "00"
1330 else:
1335 else:
1331 b = "0"
1336 b = "0"
1332
1337
1333 # this piece is for matching the generic end to today's date
1338 # this piece is for matching the generic end to today's date
1334 n = datestr(now, "%" + part[0])
1339 n = datestr(now, "%" + part[0])
1335
1340
1336 defaults[part] = (b, n)
1341 defaults[part] = (b, n)
1337
1342
1338 for format in formats:
1343 for format in formats:
1339 try:
1344 try:
1340 when, offset = strdate(date, format, defaults)
1345 when, offset = strdate(date, format, defaults)
1341 except (ValueError, OverflowError):
1346 except (ValueError, OverflowError):
1342 pass
1347 pass
1343 else:
1348 else:
1344 break
1349 break
1345 else:
1350 else:
1346 raise Abort(_('invalid date: %r') % date)
1351 raise Abort(_('invalid date: %r') % date)
1347 # validate explicit (probably user-specified) date and
1352 # validate explicit (probably user-specified) date and
1348 # time zone offset. values must fit in signed 32 bits for
1353 # time zone offset. values must fit in signed 32 bits for
1349 # current 32-bit linux runtimes. timezones go from UTC-12
1354 # current 32-bit linux runtimes. timezones go from UTC-12
1350 # to UTC+14
1355 # to UTC+14
1351 if abs(when) > 0x7fffffff:
1356 if abs(when) > 0x7fffffff:
1352 raise Abort(_('date exceeds 32 bits: %d') % when)
1357 raise Abort(_('date exceeds 32 bits: %d') % when)
1353 if when < 0:
1358 if when < 0:
1354 raise Abort(_('negative date value: %d') % when)
1359 raise Abort(_('negative date value: %d') % when)
1355 if offset < -50400 or offset > 43200:
1360 if offset < -50400 or offset > 43200:
1356 raise Abort(_('impossible time zone offset: %d') % offset)
1361 raise Abort(_('impossible time zone offset: %d') % offset)
1357 return when, offset
1362 return when, offset
1358
1363
1359 def matchdate(date):
1364 def matchdate(date):
1360 """Return a function that matches a given date match specifier
1365 """Return a function that matches a given date match specifier
1361
1366
1362 Formats include:
1367 Formats include:
1363
1368
1364 '{date}' match a given date to the accuracy provided
1369 '{date}' match a given date to the accuracy provided
1365
1370
1366 '<{date}' on or before a given date
1371 '<{date}' on or before a given date
1367
1372
1368 '>{date}' on or after a given date
1373 '>{date}' on or after a given date
1369
1374
1370 >>> p1 = parsedate("10:29:59")
1375 >>> p1 = parsedate("10:29:59")
1371 >>> p2 = parsedate("10:30:00")
1376 >>> p2 = parsedate("10:30:00")
1372 >>> p3 = parsedate("10:30:59")
1377 >>> p3 = parsedate("10:30:59")
1373 >>> p4 = parsedate("10:31:00")
1378 >>> p4 = parsedate("10:31:00")
1374 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1379 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1375 >>> f = matchdate("10:30")
1380 >>> f = matchdate("10:30")
1376 >>> f(p1[0])
1381 >>> f(p1[0])
1377 False
1382 False
1378 >>> f(p2[0])
1383 >>> f(p2[0])
1379 True
1384 True
1380 >>> f(p3[0])
1385 >>> f(p3[0])
1381 True
1386 True
1382 >>> f(p4[0])
1387 >>> f(p4[0])
1383 False
1388 False
1384 >>> f(p5[0])
1389 >>> f(p5[0])
1385 False
1390 False
1386 """
1391 """
1387
1392
1388 def lower(date):
1393 def lower(date):
1389 d = {'mb': "1", 'd': "1"}
1394 d = {'mb': "1", 'd': "1"}
1390 return parsedate(date, extendeddateformats, d)[0]
1395 return parsedate(date, extendeddateformats, d)[0]
1391
1396
1392 def upper(date):
1397 def upper(date):
1393 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1398 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1394 for days in ("31", "30", "29"):
1399 for days in ("31", "30", "29"):
1395 try:
1400 try:
1396 d["d"] = days
1401 d["d"] = days
1397 return parsedate(date, extendeddateformats, d)[0]
1402 return parsedate(date, extendeddateformats, d)[0]
1398 except Abort:
1403 except Abort:
1399 pass
1404 pass
1400 d["d"] = "28"
1405 d["d"] = "28"
1401 return parsedate(date, extendeddateformats, d)[0]
1406 return parsedate(date, extendeddateformats, d)[0]
1402
1407
1403 date = date.strip()
1408 date = date.strip()
1404
1409
1405 if not date:
1410 if not date:
1406 raise Abort(_("dates cannot consist entirely of whitespace"))
1411 raise Abort(_("dates cannot consist entirely of whitespace"))
1407 elif date[0] == "<":
1412 elif date[0] == "<":
1408 if not date[1:]:
1413 if not date[1:]:
1409 raise Abort(_("invalid day spec, use '<DATE'"))
1414 raise Abort(_("invalid day spec, use '<DATE'"))
1410 when = upper(date[1:])
1415 when = upper(date[1:])
1411 return lambda x: x <= when
1416 return lambda x: x <= when
1412 elif date[0] == ">":
1417 elif date[0] == ">":
1413 if not date[1:]:
1418 if not date[1:]:
1414 raise Abort(_("invalid day spec, use '>DATE'"))
1419 raise Abort(_("invalid day spec, use '>DATE'"))
1415 when = lower(date[1:])
1420 when = lower(date[1:])
1416 return lambda x: x >= when
1421 return lambda x: x >= when
1417 elif date[0] == "-":
1422 elif date[0] == "-":
1418 try:
1423 try:
1419 days = int(date[1:])
1424 days = int(date[1:])
1420 except ValueError:
1425 except ValueError:
1421 raise Abort(_("invalid day spec: %s") % date[1:])
1426 raise Abort(_("invalid day spec: %s") % date[1:])
1422 if days < 0:
1427 if days < 0:
1423 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
1428 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
1424 % date[1:])
1429 % date[1:])
1425 when = makedate()[0] - days * 3600 * 24
1430 when = makedate()[0] - days * 3600 * 24
1426 return lambda x: x >= when
1431 return lambda x: x >= when
1427 elif " to " in date:
1432 elif " to " in date:
1428 a, b = date.split(" to ")
1433 a, b = date.split(" to ")
1429 start, stop = lower(a), upper(b)
1434 start, stop = lower(a), upper(b)
1430 return lambda x: x >= start and x <= stop
1435 return lambda x: x >= start and x <= stop
1431 else:
1436 else:
1432 start, stop = lower(date), upper(date)
1437 start, stop = lower(date), upper(date)
1433 return lambda x: x >= start and x <= stop
1438 return lambda x: x >= start and x <= stop
1434
1439
1435 def shortuser(user):
1440 def shortuser(user):
1436 """Return a short representation of a user name or email address."""
1441 """Return a short representation of a user name or email address."""
1437 f = user.find('@')
1442 f = user.find('@')
1438 if f >= 0:
1443 if f >= 0:
1439 user = user[:f]
1444 user = user[:f]
1440 f = user.find('<')
1445 f = user.find('<')
1441 if f >= 0:
1446 if f >= 0:
1442 user = user[f + 1:]
1447 user = user[f + 1:]
1443 f = user.find(' ')
1448 f = user.find(' ')
1444 if f >= 0:
1449 if f >= 0:
1445 user = user[:f]
1450 user = user[:f]
1446 f = user.find('.')
1451 f = user.find('.')
1447 if f >= 0:
1452 if f >= 0:
1448 user = user[:f]
1453 user = user[:f]
1449 return user
1454 return user
1450
1455
1451 def emailuser(user):
1456 def emailuser(user):
1452 """Return the user portion of an email address."""
1457 """Return the user portion of an email address."""
1453 f = user.find('@')
1458 f = user.find('@')
1454 if f >= 0:
1459 if f >= 0:
1455 user = user[:f]
1460 user = user[:f]
1456 f = user.find('<')
1461 f = user.find('<')
1457 if f >= 0:
1462 if f >= 0:
1458 user = user[f + 1:]
1463 user = user[f + 1:]
1459 return user
1464 return user
1460
1465
1461 def email(author):
1466 def email(author):
1462 '''get email of author.'''
1467 '''get email of author.'''
1463 r = author.find('>')
1468 r = author.find('>')
1464 if r == -1:
1469 if r == -1:
1465 r = None
1470 r = None
1466 return author[author.find('<') + 1:r]
1471 return author[author.find('<') + 1:r]
1467
1472
1468 def ellipsis(text, maxlength=400):
1473 def ellipsis(text, maxlength=400):
1469 """Trim string to at most maxlength (default: 400) columns in display."""
1474 """Trim string to at most maxlength (default: 400) columns in display."""
1470 return encoding.trim(text, maxlength, ellipsis='...')
1475 return encoding.trim(text, maxlength, ellipsis='...')
1471
1476
1472 def unitcountfn(*unittable):
1477 def unitcountfn(*unittable):
1473 '''return a function that renders a readable count of some quantity'''
1478 '''return a function that renders a readable count of some quantity'''
1474
1479
1475 def go(count):
1480 def go(count):
1476 for multiplier, divisor, format in unittable:
1481 for multiplier, divisor, format in unittable:
1477 if count >= divisor * multiplier:
1482 if count >= divisor * multiplier:
1478 return format % (count / float(divisor))
1483 return format % (count / float(divisor))
1479 return unittable[-1][2] % count
1484 return unittable[-1][2] % count
1480
1485
1481 return go
1486 return go
1482
1487
1483 bytecount = unitcountfn(
1488 bytecount = unitcountfn(
1484 (100, 1 << 30, _('%.0f GB')),
1489 (100, 1 << 30, _('%.0f GB')),
1485 (10, 1 << 30, _('%.1f GB')),
1490 (10, 1 << 30, _('%.1f GB')),
1486 (1, 1 << 30, _('%.2f GB')),
1491 (1, 1 << 30, _('%.2f GB')),
1487 (100, 1 << 20, _('%.0f MB')),
1492 (100, 1 << 20, _('%.0f MB')),
1488 (10, 1 << 20, _('%.1f MB')),
1493 (10, 1 << 20, _('%.1f MB')),
1489 (1, 1 << 20, _('%.2f MB')),
1494 (1, 1 << 20, _('%.2f MB')),
1490 (100, 1 << 10, _('%.0f KB')),
1495 (100, 1 << 10, _('%.0f KB')),
1491 (10, 1 << 10, _('%.1f KB')),
1496 (10, 1 << 10, _('%.1f KB')),
1492 (1, 1 << 10, _('%.2f KB')),
1497 (1, 1 << 10, _('%.2f KB')),
1493 (1, 1, _('%.0f bytes')),
1498 (1, 1, _('%.0f bytes')),
1494 )
1499 )
1495
1500
1496 def uirepr(s):
1501 def uirepr(s):
1497 # Avoid double backslash in Windows path repr()
1502 # Avoid double backslash in Windows path repr()
1498 return repr(s).replace('\\\\', '\\')
1503 return repr(s).replace('\\\\', '\\')
1499
1504
1500 # delay import of textwrap
1505 # delay import of textwrap
1501 def MBTextWrapper(**kwargs):
1506 def MBTextWrapper(**kwargs):
1502 class tw(textwrap.TextWrapper):
1507 class tw(textwrap.TextWrapper):
1503 """
1508 """
1504 Extend TextWrapper for width-awareness.
1509 Extend TextWrapper for width-awareness.
1505
1510
1506 Neither number of 'bytes' in any encoding nor 'characters' is
1511 Neither number of 'bytes' in any encoding nor 'characters' is
1507 appropriate to calculate terminal columns for specified string.
1512 appropriate to calculate terminal columns for specified string.
1508
1513
1509 Original TextWrapper implementation uses built-in 'len()' directly,
1514 Original TextWrapper implementation uses built-in 'len()' directly,
1510 so overriding is needed to use width information of each characters.
1515 so overriding is needed to use width information of each characters.
1511
1516
1512 In addition, characters classified into 'ambiguous' width are
1517 In addition, characters classified into 'ambiguous' width are
1513 treated as wide in East Asian area, but as narrow in other.
1518 treated as wide in East Asian area, but as narrow in other.
1514
1519
1515 This requires use decision to determine width of such characters.
1520 This requires use decision to determine width of such characters.
1516 """
1521 """
1517 def __init__(self, **kwargs):
1522 def __init__(self, **kwargs):
1518 textwrap.TextWrapper.__init__(self, **kwargs)
1523 textwrap.TextWrapper.__init__(self, **kwargs)
1519
1524
1520 # for compatibility between 2.4 and 2.6
1525 # for compatibility between 2.4 and 2.6
1521 if getattr(self, 'drop_whitespace', None) is None:
1526 if getattr(self, 'drop_whitespace', None) is None:
1522 self.drop_whitespace = kwargs.get('drop_whitespace', True)
1527 self.drop_whitespace = kwargs.get('drop_whitespace', True)
1523
1528
1524 def _cutdown(self, ucstr, space_left):
1529 def _cutdown(self, ucstr, space_left):
1525 l = 0
1530 l = 0
1526 colwidth = encoding.ucolwidth
1531 colwidth = encoding.ucolwidth
1527 for i in xrange(len(ucstr)):
1532 for i in xrange(len(ucstr)):
1528 l += colwidth(ucstr[i])
1533 l += colwidth(ucstr[i])
1529 if space_left < l:
1534 if space_left < l:
1530 return (ucstr[:i], ucstr[i:])
1535 return (ucstr[:i], ucstr[i:])
1531 return ucstr, ''
1536 return ucstr, ''
1532
1537
1533 # overriding of base class
1538 # overriding of base class
1534 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1539 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1535 space_left = max(width - cur_len, 1)
1540 space_left = max(width - cur_len, 1)
1536
1541
1537 if self.break_long_words:
1542 if self.break_long_words:
1538 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1543 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1539 cur_line.append(cut)
1544 cur_line.append(cut)
1540 reversed_chunks[-1] = res
1545 reversed_chunks[-1] = res
1541 elif not cur_line:
1546 elif not cur_line:
1542 cur_line.append(reversed_chunks.pop())
1547 cur_line.append(reversed_chunks.pop())
1543
1548
1544 # this overriding code is imported from TextWrapper of python 2.6
1549 # this overriding code is imported from TextWrapper of python 2.6
1545 # to calculate columns of string by 'encoding.ucolwidth()'
1550 # to calculate columns of string by 'encoding.ucolwidth()'
1546 def _wrap_chunks(self, chunks):
1551 def _wrap_chunks(self, chunks):
1547 colwidth = encoding.ucolwidth
1552 colwidth = encoding.ucolwidth
1548
1553
1549 lines = []
1554 lines = []
1550 if self.width <= 0:
1555 if self.width <= 0:
1551 raise ValueError("invalid width %r (must be > 0)" % self.width)
1556 raise ValueError("invalid width %r (must be > 0)" % self.width)
1552
1557
1553 # Arrange in reverse order so items can be efficiently popped
1558 # Arrange in reverse order so items can be efficiently popped
1554 # from a stack of chucks.
1559 # from a stack of chucks.
1555 chunks.reverse()
1560 chunks.reverse()
1556
1561
1557 while chunks:
1562 while chunks:
1558
1563
1559 # Start the list of chunks that will make up the current line.
1564 # Start the list of chunks that will make up the current line.
1560 # cur_len is just the length of all the chunks in cur_line.
1565 # cur_len is just the length of all the chunks in cur_line.
1561 cur_line = []
1566 cur_line = []
1562 cur_len = 0
1567 cur_len = 0
1563
1568
1564 # Figure out which static string will prefix this line.
1569 # Figure out which static string will prefix this line.
1565 if lines:
1570 if lines:
1566 indent = self.subsequent_indent
1571 indent = self.subsequent_indent
1567 else:
1572 else:
1568 indent = self.initial_indent
1573 indent = self.initial_indent
1569
1574
1570 # Maximum width for this line.
1575 # Maximum width for this line.
1571 width = self.width - len(indent)
1576 width = self.width - len(indent)
1572
1577
1573 # First chunk on line is whitespace -- drop it, unless this
1578 # First chunk on line is whitespace -- drop it, unless this
1574 # is the very beginning of the text (i.e. no lines started yet).
1579 # is the very beginning of the text (i.e. no lines started yet).
1575 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1580 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1576 del chunks[-1]
1581 del chunks[-1]
1577
1582
1578 while chunks:
1583 while chunks:
1579 l = colwidth(chunks[-1])
1584 l = colwidth(chunks[-1])
1580
1585
1581 # Can at least squeeze this chunk onto the current line.
1586 # Can at least squeeze this chunk onto the current line.
1582 if cur_len + l <= width:
1587 if cur_len + l <= width:
1583 cur_line.append(chunks.pop())
1588 cur_line.append(chunks.pop())
1584 cur_len += l
1589 cur_len += l
1585
1590
1586 # Nope, this line is full.
1591 # Nope, this line is full.
1587 else:
1592 else:
1588 break
1593 break
1589
1594
1590 # The current line is full, and the next chunk is too big to
1595 # The current line is full, and the next chunk is too big to
1591 # fit on *any* line (not just this one).
1596 # fit on *any* line (not just this one).
1592 if chunks and colwidth(chunks[-1]) > width:
1597 if chunks and colwidth(chunks[-1]) > width:
1593 self._handle_long_word(chunks, cur_line, cur_len, width)
1598 self._handle_long_word(chunks, cur_line, cur_len, width)
1594
1599
1595 # If the last chunk on this line is all whitespace, drop it.
1600 # If the last chunk on this line is all whitespace, drop it.
1596 if (self.drop_whitespace and
1601 if (self.drop_whitespace and
1597 cur_line and cur_line[-1].strip() == ''):
1602 cur_line and cur_line[-1].strip() == ''):
1598 del cur_line[-1]
1603 del cur_line[-1]
1599
1604
1600 # Convert current line back to a string and store it in list
1605 # Convert current line back to a string and store it in list
1601 # of all lines (return value).
1606 # of all lines (return value).
1602 if cur_line:
1607 if cur_line:
1603 lines.append(indent + ''.join(cur_line))
1608 lines.append(indent + ''.join(cur_line))
1604
1609
1605 return lines
1610 return lines
1606
1611
1607 global MBTextWrapper
1612 global MBTextWrapper
1608 MBTextWrapper = tw
1613 MBTextWrapper = tw
1609 return tw(**kwargs)
1614 return tw(**kwargs)
1610
1615
1611 def wrap(line, width, initindent='', hangindent=''):
1616 def wrap(line, width, initindent='', hangindent=''):
1612 maxindent = max(len(hangindent), len(initindent))
1617 maxindent = max(len(hangindent), len(initindent))
1613 if width <= maxindent:
1618 if width <= maxindent:
1614 # adjust for weird terminal size
1619 # adjust for weird terminal size
1615 width = max(78, maxindent + 1)
1620 width = max(78, maxindent + 1)
1616 line = line.decode(encoding.encoding, encoding.encodingmode)
1621 line = line.decode(encoding.encoding, encoding.encodingmode)
1617 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1622 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1618 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1623 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1619 wrapper = MBTextWrapper(width=width,
1624 wrapper = MBTextWrapper(width=width,
1620 initial_indent=initindent,
1625 initial_indent=initindent,
1621 subsequent_indent=hangindent)
1626 subsequent_indent=hangindent)
1622 return wrapper.fill(line).encode(encoding.encoding)
1627 return wrapper.fill(line).encode(encoding.encoding)
1623
1628
1624 def iterlines(iterator):
1629 def iterlines(iterator):
1625 for chunk in iterator:
1630 for chunk in iterator:
1626 for line in chunk.splitlines():
1631 for line in chunk.splitlines():
1627 yield line
1632 yield line
1628
1633
1629 def expandpath(path):
1634 def expandpath(path):
1630 return os.path.expanduser(os.path.expandvars(path))
1635 return os.path.expanduser(os.path.expandvars(path))
1631
1636
1632 def hgcmd():
1637 def hgcmd():
1633 """Return the command used to execute current hg
1638 """Return the command used to execute current hg
1634
1639
1635 This is different from hgexecutable() because on Windows we want
1640 This is different from hgexecutable() because on Windows we want
1636 to avoid things opening new shell windows like batch files, so we
1641 to avoid things opening new shell windows like batch files, so we
1637 get either the python call or current executable.
1642 get either the python call or current executable.
1638 """
1643 """
1639 if mainfrozen():
1644 if mainfrozen():
1640 return [sys.executable]
1645 return [sys.executable]
1641 return gethgcmd()
1646 return gethgcmd()
1642
1647
1643 def rundetached(args, condfn):
1648 def rundetached(args, condfn):
1644 """Execute the argument list in a detached process.
1649 """Execute the argument list in a detached process.
1645
1650
1646 condfn is a callable which is called repeatedly and should return
1651 condfn is a callable which is called repeatedly and should return
1647 True once the child process is known to have started successfully.
1652 True once the child process is known to have started successfully.
1648 At this point, the child process PID is returned. If the child
1653 At this point, the child process PID is returned. If the child
1649 process fails to start or finishes before condfn() evaluates to
1654 process fails to start or finishes before condfn() evaluates to
1650 True, return -1.
1655 True, return -1.
1651 """
1656 """
1652 # Windows case is easier because the child process is either
1657 # Windows case is easier because the child process is either
1653 # successfully starting and validating the condition or exiting
1658 # successfully starting and validating the condition or exiting
1654 # on failure. We just poll on its PID. On Unix, if the child
1659 # on failure. We just poll on its PID. On Unix, if the child
1655 # process fails to start, it will be left in a zombie state until
1660 # process fails to start, it will be left in a zombie state until
1656 # the parent wait on it, which we cannot do since we expect a long
1661 # the parent wait on it, which we cannot do since we expect a long
1657 # running process on success. Instead we listen for SIGCHLD telling
1662 # running process on success. Instead we listen for SIGCHLD telling
1658 # us our child process terminated.
1663 # us our child process terminated.
1659 terminated = set()
1664 terminated = set()
1660 def handler(signum, frame):
1665 def handler(signum, frame):
1661 terminated.add(os.wait())
1666 terminated.add(os.wait())
1662 prevhandler = None
1667 prevhandler = None
1663 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1668 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1664 if SIGCHLD is not None:
1669 if SIGCHLD is not None:
1665 prevhandler = signal.signal(SIGCHLD, handler)
1670 prevhandler = signal.signal(SIGCHLD, handler)
1666 try:
1671 try:
1667 pid = spawndetached(args)
1672 pid = spawndetached(args)
1668 while not condfn():
1673 while not condfn():
1669 if ((pid in terminated or not testpid(pid))
1674 if ((pid in terminated or not testpid(pid))
1670 and not condfn()):
1675 and not condfn()):
1671 return -1
1676 return -1
1672 time.sleep(0.1)
1677 time.sleep(0.1)
1673 return pid
1678 return pid
1674 finally:
1679 finally:
1675 if prevhandler is not None:
1680 if prevhandler is not None:
1676 signal.signal(signal.SIGCHLD, prevhandler)
1681 signal.signal(signal.SIGCHLD, prevhandler)
1677
1682
1678 try:
1683 try:
1679 any, all = any, all
1684 any, all = any, all
1680 except NameError:
1685 except NameError:
1681 def any(iterable):
1686 def any(iterable):
1682 for i in iterable:
1687 for i in iterable:
1683 if i:
1688 if i:
1684 return True
1689 return True
1685 return False
1690 return False
1686
1691
1687 def all(iterable):
1692 def all(iterable):
1688 for i in iterable:
1693 for i in iterable:
1689 if not i:
1694 if not i:
1690 return False
1695 return False
1691 return True
1696 return True
1692
1697
1693 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1698 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1694 """Return the result of interpolating items in the mapping into string s.
1699 """Return the result of interpolating items in the mapping into string s.
1695
1700
1696 prefix is a single character string, or a two character string with
1701 prefix is a single character string, or a two character string with
1697 a backslash as the first character if the prefix needs to be escaped in
1702 a backslash as the first character if the prefix needs to be escaped in
1698 a regular expression.
1703 a regular expression.
1699
1704
1700 fn is an optional function that will be applied to the replacement text
1705 fn is an optional function that will be applied to the replacement text
1701 just before replacement.
1706 just before replacement.
1702
1707
1703 escape_prefix is an optional flag that allows using doubled prefix for
1708 escape_prefix is an optional flag that allows using doubled prefix for
1704 its escaping.
1709 its escaping.
1705 """
1710 """
1706 fn = fn or (lambda s: s)
1711 fn = fn or (lambda s: s)
1707 patterns = '|'.join(mapping.keys())
1712 patterns = '|'.join(mapping.keys())
1708 if escape_prefix:
1713 if escape_prefix:
1709 patterns += '|' + prefix
1714 patterns += '|' + prefix
1710 if len(prefix) > 1:
1715 if len(prefix) > 1:
1711 prefix_char = prefix[1:]
1716 prefix_char = prefix[1:]
1712 else:
1717 else:
1713 prefix_char = prefix
1718 prefix_char = prefix
1714 mapping[prefix_char] = prefix_char
1719 mapping[prefix_char] = prefix_char
1715 r = remod.compile(r'%s(%s)' % (prefix, patterns))
1720 r = remod.compile(r'%s(%s)' % (prefix, patterns))
1716 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1721 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1717
1722
1718 def getport(port):
1723 def getport(port):
1719 """Return the port for a given network service.
1724 """Return the port for a given network service.
1720
1725
1721 If port is an integer, it's returned as is. If it's a string, it's
1726 If port is an integer, it's returned as is. If it's a string, it's
1722 looked up using socket.getservbyname(). If there's no matching
1727 looked up using socket.getservbyname(). If there's no matching
1723 service, util.Abort is raised.
1728 service, util.Abort is raised.
1724 """
1729 """
1725 try:
1730 try:
1726 return int(port)
1731 return int(port)
1727 except ValueError:
1732 except ValueError:
1728 pass
1733 pass
1729
1734
1730 try:
1735 try:
1731 return socket.getservbyname(port)
1736 return socket.getservbyname(port)
1732 except socket.error:
1737 except socket.error:
1733 raise Abort(_("no port number associated with service '%s'") % port)
1738 raise Abort(_("no port number associated with service '%s'") % port)
1734
1739
1735 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1740 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1736 '0': False, 'no': False, 'false': False, 'off': False,
1741 '0': False, 'no': False, 'false': False, 'off': False,
1737 'never': False}
1742 'never': False}
1738
1743
1739 def parsebool(s):
1744 def parsebool(s):
1740 """Parse s into a boolean.
1745 """Parse s into a boolean.
1741
1746
1742 If s is not a valid boolean, returns None.
1747 If s is not a valid boolean, returns None.
1743 """
1748 """
1744 return _booleans.get(s.lower(), None)
1749 return _booleans.get(s.lower(), None)
1745
1750
1746 _hexdig = '0123456789ABCDEFabcdef'
1751 _hexdig = '0123456789ABCDEFabcdef'
1747 _hextochr = dict((a + b, chr(int(a + b, 16)))
1752 _hextochr = dict((a + b, chr(int(a + b, 16)))
1748 for a in _hexdig for b in _hexdig)
1753 for a in _hexdig for b in _hexdig)
1749
1754
1750 def _urlunquote(s):
1755 def _urlunquote(s):
1751 """Decode HTTP/HTML % encoding.
1756 """Decode HTTP/HTML % encoding.
1752
1757
1753 >>> _urlunquote('abc%20def')
1758 >>> _urlunquote('abc%20def')
1754 'abc def'
1759 'abc def'
1755 """
1760 """
1756 res = s.split('%')
1761 res = s.split('%')
1757 # fastpath
1762 # fastpath
1758 if len(res) == 1:
1763 if len(res) == 1:
1759 return s
1764 return s
1760 s = res[0]
1765 s = res[0]
1761 for item in res[1:]:
1766 for item in res[1:]:
1762 try:
1767 try:
1763 s += _hextochr[item[:2]] + item[2:]
1768 s += _hextochr[item[:2]] + item[2:]
1764 except KeyError:
1769 except KeyError:
1765 s += '%' + item
1770 s += '%' + item
1766 except UnicodeDecodeError:
1771 except UnicodeDecodeError:
1767 s += unichr(int(item[:2], 16)) + item[2:]
1772 s += unichr(int(item[:2], 16)) + item[2:]
1768 return s
1773 return s
1769
1774
1770 class url(object):
1775 class url(object):
1771 r"""Reliable URL parser.
1776 r"""Reliable URL parser.
1772
1777
1773 This parses URLs and provides attributes for the following
1778 This parses URLs and provides attributes for the following
1774 components:
1779 components:
1775
1780
1776 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
1781 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
1777
1782
1778 Missing components are set to None. The only exception is
1783 Missing components are set to None. The only exception is
1779 fragment, which is set to '' if present but empty.
1784 fragment, which is set to '' if present but empty.
1780
1785
1781 If parsefragment is False, fragment is included in query. If
1786 If parsefragment is False, fragment is included in query. If
1782 parsequery is False, query is included in path. If both are
1787 parsequery is False, query is included in path. If both are
1783 False, both fragment and query are included in path.
1788 False, both fragment and query are included in path.
1784
1789
1785 See http://www.ietf.org/rfc/rfc2396.txt for more information.
1790 See http://www.ietf.org/rfc/rfc2396.txt for more information.
1786
1791
1787 Note that for backward compatibility reasons, bundle URLs do not
1792 Note that for backward compatibility reasons, bundle URLs do not
1788 take host names. That means 'bundle://../' has a path of '../'.
1793 take host names. That means 'bundle://../' has a path of '../'.
1789
1794
1790 Examples:
1795 Examples:
1791
1796
1792 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
1797 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
1793 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
1798 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
1794 >>> url('ssh://[::1]:2200//home/joe/repo')
1799 >>> url('ssh://[::1]:2200//home/joe/repo')
1795 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
1800 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
1796 >>> url('file:///home/joe/repo')
1801 >>> url('file:///home/joe/repo')
1797 <url scheme: 'file', path: '/home/joe/repo'>
1802 <url scheme: 'file', path: '/home/joe/repo'>
1798 >>> url('file:///c:/temp/foo/')
1803 >>> url('file:///c:/temp/foo/')
1799 <url scheme: 'file', path: 'c:/temp/foo/'>
1804 <url scheme: 'file', path: 'c:/temp/foo/'>
1800 >>> url('bundle:foo')
1805 >>> url('bundle:foo')
1801 <url scheme: 'bundle', path: 'foo'>
1806 <url scheme: 'bundle', path: 'foo'>
1802 >>> url('bundle://../foo')
1807 >>> url('bundle://../foo')
1803 <url scheme: 'bundle', path: '../foo'>
1808 <url scheme: 'bundle', path: '../foo'>
1804 >>> url(r'c:\foo\bar')
1809 >>> url(r'c:\foo\bar')
1805 <url path: 'c:\\foo\\bar'>
1810 <url path: 'c:\\foo\\bar'>
1806 >>> url(r'\\blah\blah\blah')
1811 >>> url(r'\\blah\blah\blah')
1807 <url path: '\\\\blah\\blah\\blah'>
1812 <url path: '\\\\blah\\blah\\blah'>
1808 >>> url(r'\\blah\blah\blah#baz')
1813 >>> url(r'\\blah\blah\blah#baz')
1809 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
1814 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
1810 >>> url(r'file:///C:\users\me')
1815 >>> url(r'file:///C:\users\me')
1811 <url scheme: 'file', path: 'C:\\users\\me'>
1816 <url scheme: 'file', path: 'C:\\users\\me'>
1812
1817
1813 Authentication credentials:
1818 Authentication credentials:
1814
1819
1815 >>> url('ssh://joe:xyz@x/repo')
1820 >>> url('ssh://joe:xyz@x/repo')
1816 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
1821 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
1817 >>> url('ssh://joe@x/repo')
1822 >>> url('ssh://joe@x/repo')
1818 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
1823 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
1819
1824
1820 Query strings and fragments:
1825 Query strings and fragments:
1821
1826
1822 >>> url('http://host/a?b#c')
1827 >>> url('http://host/a?b#c')
1823 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
1828 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
1824 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
1829 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
1825 <url scheme: 'http', host: 'host', path: 'a?b#c'>
1830 <url scheme: 'http', host: 'host', path: 'a?b#c'>
1826 """
1831 """
1827
1832
1828 _safechars = "!~*'()+"
1833 _safechars = "!~*'()+"
1829 _safepchars = "/!~*'()+:\\"
1834 _safepchars = "/!~*'()+:\\"
1830 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
1835 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
1831
1836
1832 def __init__(self, path, parsequery=True, parsefragment=True):
1837 def __init__(self, path, parsequery=True, parsefragment=True):
1833 # We slowly chomp away at path until we have only the path left
1838 # We slowly chomp away at path until we have only the path left
1834 self.scheme = self.user = self.passwd = self.host = None
1839 self.scheme = self.user = self.passwd = self.host = None
1835 self.port = self.path = self.query = self.fragment = None
1840 self.port = self.path = self.query = self.fragment = None
1836 self._localpath = True
1841 self._localpath = True
1837 self._hostport = ''
1842 self._hostport = ''
1838 self._origpath = path
1843 self._origpath = path
1839
1844
1840 if parsefragment and '#' in path:
1845 if parsefragment and '#' in path:
1841 path, self.fragment = path.split('#', 1)
1846 path, self.fragment = path.split('#', 1)
1842 if not path:
1847 if not path:
1843 path = None
1848 path = None
1844
1849
1845 # special case for Windows drive letters and UNC paths
1850 # special case for Windows drive letters and UNC paths
1846 if hasdriveletter(path) or path.startswith(r'\\'):
1851 if hasdriveletter(path) or path.startswith(r'\\'):
1847 self.path = path
1852 self.path = path
1848 return
1853 return
1849
1854
1850 # For compatibility reasons, we can't handle bundle paths as
1855 # For compatibility reasons, we can't handle bundle paths as
1851 # normal URLS
1856 # normal URLS
1852 if path.startswith('bundle:'):
1857 if path.startswith('bundle:'):
1853 self.scheme = 'bundle'
1858 self.scheme = 'bundle'
1854 path = path[7:]
1859 path = path[7:]
1855 if path.startswith('//'):
1860 if path.startswith('//'):
1856 path = path[2:]
1861 path = path[2:]
1857 self.path = path
1862 self.path = path
1858 return
1863 return
1859
1864
1860 if self._matchscheme(path):
1865 if self._matchscheme(path):
1861 parts = path.split(':', 1)
1866 parts = path.split(':', 1)
1862 if parts[0]:
1867 if parts[0]:
1863 self.scheme, path = parts
1868 self.scheme, path = parts
1864 self._localpath = False
1869 self._localpath = False
1865
1870
1866 if not path:
1871 if not path:
1867 path = None
1872 path = None
1868 if self._localpath:
1873 if self._localpath:
1869 self.path = ''
1874 self.path = ''
1870 return
1875 return
1871 else:
1876 else:
1872 if self._localpath:
1877 if self._localpath:
1873 self.path = path
1878 self.path = path
1874 return
1879 return
1875
1880
1876 if parsequery and '?' in path:
1881 if parsequery and '?' in path:
1877 path, self.query = path.split('?', 1)
1882 path, self.query = path.split('?', 1)
1878 if not path:
1883 if not path:
1879 path = None
1884 path = None
1880 if not self.query:
1885 if not self.query:
1881 self.query = None
1886 self.query = None
1882
1887
1883 # // is required to specify a host/authority
1888 # // is required to specify a host/authority
1884 if path and path.startswith('//'):
1889 if path and path.startswith('//'):
1885 parts = path[2:].split('/', 1)
1890 parts = path[2:].split('/', 1)
1886 if len(parts) > 1:
1891 if len(parts) > 1:
1887 self.host, path = parts
1892 self.host, path = parts
1888 else:
1893 else:
1889 self.host = parts[0]
1894 self.host = parts[0]
1890 path = None
1895 path = None
1891 if not self.host:
1896 if not self.host:
1892 self.host = None
1897 self.host = None
1893 # path of file:///d is /d
1898 # path of file:///d is /d
1894 # path of file:///d:/ is d:/, not /d:/
1899 # path of file:///d:/ is d:/, not /d:/
1895 if path and not hasdriveletter(path):
1900 if path and not hasdriveletter(path):
1896 path = '/' + path
1901 path = '/' + path
1897
1902
1898 if self.host and '@' in self.host:
1903 if self.host and '@' in self.host:
1899 self.user, self.host = self.host.rsplit('@', 1)
1904 self.user, self.host = self.host.rsplit('@', 1)
1900 if ':' in self.user:
1905 if ':' in self.user:
1901 self.user, self.passwd = self.user.split(':', 1)
1906 self.user, self.passwd = self.user.split(':', 1)
1902 if not self.host:
1907 if not self.host:
1903 self.host = None
1908 self.host = None
1904
1909
1905 # Don't split on colons in IPv6 addresses without ports
1910 # Don't split on colons in IPv6 addresses without ports
1906 if (self.host and ':' in self.host and
1911 if (self.host and ':' in self.host and
1907 not (self.host.startswith('[') and self.host.endswith(']'))):
1912 not (self.host.startswith('[') and self.host.endswith(']'))):
1908 self._hostport = self.host
1913 self._hostport = self.host
1909 self.host, self.port = self.host.rsplit(':', 1)
1914 self.host, self.port = self.host.rsplit(':', 1)
1910 if not self.host:
1915 if not self.host:
1911 self.host = None
1916 self.host = None
1912
1917
1913 if (self.host and self.scheme == 'file' and
1918 if (self.host and self.scheme == 'file' and
1914 self.host not in ('localhost', '127.0.0.1', '[::1]')):
1919 self.host not in ('localhost', '127.0.0.1', '[::1]')):
1915 raise Abort(_('file:// URLs can only refer to localhost'))
1920 raise Abort(_('file:// URLs can only refer to localhost'))
1916
1921
1917 self.path = path
1922 self.path = path
1918
1923
1919 # leave the query string escaped
1924 # leave the query string escaped
1920 for a in ('user', 'passwd', 'host', 'port',
1925 for a in ('user', 'passwd', 'host', 'port',
1921 'path', 'fragment'):
1926 'path', 'fragment'):
1922 v = getattr(self, a)
1927 v = getattr(self, a)
1923 if v is not None:
1928 if v is not None:
1924 setattr(self, a, _urlunquote(v))
1929 setattr(self, a, _urlunquote(v))
1925
1930
1926 def __repr__(self):
1931 def __repr__(self):
1927 attrs = []
1932 attrs = []
1928 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
1933 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
1929 'query', 'fragment'):
1934 'query', 'fragment'):
1930 v = getattr(self, a)
1935 v = getattr(self, a)
1931 if v is not None:
1936 if v is not None:
1932 attrs.append('%s: %r' % (a, v))
1937 attrs.append('%s: %r' % (a, v))
1933 return '<url %s>' % ', '.join(attrs)
1938 return '<url %s>' % ', '.join(attrs)
1934
1939
1935 def __str__(self):
1940 def __str__(self):
1936 r"""Join the URL's components back into a URL string.
1941 r"""Join the URL's components back into a URL string.
1937
1942
1938 Examples:
1943 Examples:
1939
1944
1940 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
1945 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
1941 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
1946 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
1942 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
1947 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
1943 'http://user:pw@host:80/?foo=bar&baz=42'
1948 'http://user:pw@host:80/?foo=bar&baz=42'
1944 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
1949 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
1945 'http://user:pw@host:80/?foo=bar%3dbaz'
1950 'http://user:pw@host:80/?foo=bar%3dbaz'
1946 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
1951 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
1947 'ssh://user:pw@[::1]:2200//home/joe#'
1952 'ssh://user:pw@[::1]:2200//home/joe#'
1948 >>> str(url('http://localhost:80//'))
1953 >>> str(url('http://localhost:80//'))
1949 'http://localhost:80//'
1954 'http://localhost:80//'
1950 >>> str(url('http://localhost:80/'))
1955 >>> str(url('http://localhost:80/'))
1951 'http://localhost:80/'
1956 'http://localhost:80/'
1952 >>> str(url('http://localhost:80'))
1957 >>> str(url('http://localhost:80'))
1953 'http://localhost:80/'
1958 'http://localhost:80/'
1954 >>> str(url('bundle:foo'))
1959 >>> str(url('bundle:foo'))
1955 'bundle:foo'
1960 'bundle:foo'
1956 >>> str(url('bundle://../foo'))
1961 >>> str(url('bundle://../foo'))
1957 'bundle:../foo'
1962 'bundle:../foo'
1958 >>> str(url('path'))
1963 >>> str(url('path'))
1959 'path'
1964 'path'
1960 >>> str(url('file:///tmp/foo/bar'))
1965 >>> str(url('file:///tmp/foo/bar'))
1961 'file:///tmp/foo/bar'
1966 'file:///tmp/foo/bar'
1962 >>> str(url('file:///c:/tmp/foo/bar'))
1967 >>> str(url('file:///c:/tmp/foo/bar'))
1963 'file:///c:/tmp/foo/bar'
1968 'file:///c:/tmp/foo/bar'
1964 >>> print url(r'bundle:foo\bar')
1969 >>> print url(r'bundle:foo\bar')
1965 bundle:foo\bar
1970 bundle:foo\bar
1966 >>> print url(r'file:///D:\data\hg')
1971 >>> print url(r'file:///D:\data\hg')
1967 file:///D:\data\hg
1972 file:///D:\data\hg
1968 """
1973 """
1969 if self._localpath:
1974 if self._localpath:
1970 s = self.path
1975 s = self.path
1971 if self.scheme == 'bundle':
1976 if self.scheme == 'bundle':
1972 s = 'bundle:' + s
1977 s = 'bundle:' + s
1973 if self.fragment:
1978 if self.fragment:
1974 s += '#' + self.fragment
1979 s += '#' + self.fragment
1975 return s
1980 return s
1976
1981
1977 s = self.scheme + ':'
1982 s = self.scheme + ':'
1978 if self.user or self.passwd or self.host:
1983 if self.user or self.passwd or self.host:
1979 s += '//'
1984 s += '//'
1980 elif self.scheme and (not self.path or self.path.startswith('/')
1985 elif self.scheme and (not self.path or self.path.startswith('/')
1981 or hasdriveletter(self.path)):
1986 or hasdriveletter(self.path)):
1982 s += '//'
1987 s += '//'
1983 if hasdriveletter(self.path):
1988 if hasdriveletter(self.path):
1984 s += '/'
1989 s += '/'
1985 if self.user:
1990 if self.user:
1986 s += urllib.quote(self.user, safe=self._safechars)
1991 s += urllib.quote(self.user, safe=self._safechars)
1987 if self.passwd:
1992 if self.passwd:
1988 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
1993 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
1989 if self.user or self.passwd:
1994 if self.user or self.passwd:
1990 s += '@'
1995 s += '@'
1991 if self.host:
1996 if self.host:
1992 if not (self.host.startswith('[') and self.host.endswith(']')):
1997 if not (self.host.startswith('[') and self.host.endswith(']')):
1993 s += urllib.quote(self.host)
1998 s += urllib.quote(self.host)
1994 else:
1999 else:
1995 s += self.host
2000 s += self.host
1996 if self.port:
2001 if self.port:
1997 s += ':' + urllib.quote(self.port)
2002 s += ':' + urllib.quote(self.port)
1998 if self.host:
2003 if self.host:
1999 s += '/'
2004 s += '/'
2000 if self.path:
2005 if self.path:
2001 # TODO: similar to the query string, we should not unescape the
2006 # TODO: similar to the query string, we should not unescape the
2002 # path when we store it, the path might contain '%2f' = '/',
2007 # path when we store it, the path might contain '%2f' = '/',
2003 # which we should *not* escape.
2008 # which we should *not* escape.
2004 s += urllib.quote(self.path, safe=self._safepchars)
2009 s += urllib.quote(self.path, safe=self._safepchars)
2005 if self.query:
2010 if self.query:
2006 # we store the query in escaped form.
2011 # we store the query in escaped form.
2007 s += '?' + self.query
2012 s += '?' + self.query
2008 if self.fragment is not None:
2013 if self.fragment is not None:
2009 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2014 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2010 return s
2015 return s
2011
2016
2012 def authinfo(self):
2017 def authinfo(self):
2013 user, passwd = self.user, self.passwd
2018 user, passwd = self.user, self.passwd
2014 try:
2019 try:
2015 self.user, self.passwd = None, None
2020 self.user, self.passwd = None, None
2016 s = str(self)
2021 s = str(self)
2017 finally:
2022 finally:
2018 self.user, self.passwd = user, passwd
2023 self.user, self.passwd = user, passwd
2019 if not self.user:
2024 if not self.user:
2020 return (s, None)
2025 return (s, None)
2021 # authinfo[1] is passed to urllib2 password manager, and its
2026 # authinfo[1] is passed to urllib2 password manager, and its
2022 # URIs must not contain credentials. The host is passed in the
2027 # URIs must not contain credentials. The host is passed in the
2023 # URIs list because Python < 2.4.3 uses only that to search for
2028 # URIs list because Python < 2.4.3 uses only that to search for
2024 # a password.
2029 # a password.
2025 return (s, (None, (s, self.host),
2030 return (s, (None, (s, self.host),
2026 self.user, self.passwd or ''))
2031 self.user, self.passwd or ''))
2027
2032
2028 def isabs(self):
2033 def isabs(self):
2029 if self.scheme and self.scheme != 'file':
2034 if self.scheme and self.scheme != 'file':
2030 return True # remote URL
2035 return True # remote URL
2031 if hasdriveletter(self.path):
2036 if hasdriveletter(self.path):
2032 return True # absolute for our purposes - can't be joined()
2037 return True # absolute for our purposes - can't be joined()
2033 if self.path.startswith(r'\\'):
2038 if self.path.startswith(r'\\'):
2034 return True # Windows UNC path
2039 return True # Windows UNC path
2035 if self.path.startswith('/'):
2040 if self.path.startswith('/'):
2036 return True # POSIX-style
2041 return True # POSIX-style
2037 return False
2042 return False
2038
2043
2039 def localpath(self):
2044 def localpath(self):
2040 if self.scheme == 'file' or self.scheme == 'bundle':
2045 if self.scheme == 'file' or self.scheme == 'bundle':
2041 path = self.path or '/'
2046 path = self.path or '/'
2042 # For Windows, we need to promote hosts containing drive
2047 # For Windows, we need to promote hosts containing drive
2043 # letters to paths with drive letters.
2048 # letters to paths with drive letters.
2044 if hasdriveletter(self._hostport):
2049 if hasdriveletter(self._hostport):
2045 path = self._hostport + '/' + self.path
2050 path = self._hostport + '/' + self.path
2046 elif (self.host is not None and self.path
2051 elif (self.host is not None and self.path
2047 and not hasdriveletter(path)):
2052 and not hasdriveletter(path)):
2048 path = '/' + path
2053 path = '/' + path
2049 return path
2054 return path
2050 return self._origpath
2055 return self._origpath
2051
2056
2052 def islocal(self):
2057 def islocal(self):
2053 '''whether localpath will return something that posixfile can open'''
2058 '''whether localpath will return something that posixfile can open'''
2054 return (not self.scheme or self.scheme == 'file'
2059 return (not self.scheme or self.scheme == 'file'
2055 or self.scheme == 'bundle')
2060 or self.scheme == 'bundle')
2056
2061
2057 def hasscheme(path):
2062 def hasscheme(path):
2058 return bool(url(path).scheme)
2063 return bool(url(path).scheme)
2059
2064
2060 def hasdriveletter(path):
2065 def hasdriveletter(path):
2061 return path and path[1:2] == ':' and path[0:1].isalpha()
2066 return path and path[1:2] == ':' and path[0:1].isalpha()
2062
2067
2063 def urllocalpath(path):
2068 def urllocalpath(path):
2064 return url(path, parsequery=False, parsefragment=False).localpath()
2069 return url(path, parsequery=False, parsefragment=False).localpath()
2065
2070
2066 def hidepassword(u):
2071 def hidepassword(u):
2067 '''hide user credential in a url string'''
2072 '''hide user credential in a url string'''
2068 u = url(u)
2073 u = url(u)
2069 if u.passwd:
2074 if u.passwd:
2070 u.passwd = '***'
2075 u.passwd = '***'
2071 return str(u)
2076 return str(u)
2072
2077
2073 def removeauth(u):
2078 def removeauth(u):
2074 '''remove all authentication information from a url string'''
2079 '''remove all authentication information from a url string'''
2075 u = url(u)
2080 u = url(u)
2076 u.user = u.passwd = None
2081 u.user = u.passwd = None
2077 return str(u)
2082 return str(u)
2078
2083
2079 def isatty(fd):
2084 def isatty(fd):
2080 try:
2085 try:
2081 return fd.isatty()
2086 return fd.isatty()
2082 except AttributeError:
2087 except AttributeError:
2083 return False
2088 return False
2084
2089
2085 timecount = unitcountfn(
2090 timecount = unitcountfn(
2086 (1, 1e3, _('%.0f s')),
2091 (1, 1e3, _('%.0f s')),
2087 (100, 1, _('%.1f s')),
2092 (100, 1, _('%.1f s')),
2088 (10, 1, _('%.2f s')),
2093 (10, 1, _('%.2f s')),
2089 (1, 1, _('%.3f s')),
2094 (1, 1, _('%.3f s')),
2090 (100, 0.001, _('%.1f ms')),
2095 (100, 0.001, _('%.1f ms')),
2091 (10, 0.001, _('%.2f ms')),
2096 (10, 0.001, _('%.2f ms')),
2092 (1, 0.001, _('%.3f ms')),
2097 (1, 0.001, _('%.3f ms')),
2093 (100, 0.000001, _('%.1f us')),
2098 (100, 0.000001, _('%.1f us')),
2094 (10, 0.000001, _('%.2f us')),
2099 (10, 0.000001, _('%.2f us')),
2095 (1, 0.000001, _('%.3f us')),
2100 (1, 0.000001, _('%.3f us')),
2096 (100, 0.000000001, _('%.1f ns')),
2101 (100, 0.000000001, _('%.1f ns')),
2097 (10, 0.000000001, _('%.2f ns')),
2102 (10, 0.000000001, _('%.2f ns')),
2098 (1, 0.000000001, _('%.3f ns')),
2103 (1, 0.000000001, _('%.3f ns')),
2099 )
2104 )
2100
2105
2101 _timenesting = [0]
2106 _timenesting = [0]
2102
2107
2103 def timed(func):
2108 def timed(func):
2104 '''Report the execution time of a function call to stderr.
2109 '''Report the execution time of a function call to stderr.
2105
2110
2106 During development, use as a decorator when you need to measure
2111 During development, use as a decorator when you need to measure
2107 the cost of a function, e.g. as follows:
2112 the cost of a function, e.g. as follows:
2108
2113
2109 @util.timed
2114 @util.timed
2110 def foo(a, b, c):
2115 def foo(a, b, c):
2111 pass
2116 pass
2112 '''
2117 '''
2113
2118
2114 def wrapper(*args, **kwargs):
2119 def wrapper(*args, **kwargs):
2115 start = time.time()
2120 start = time.time()
2116 indent = 2
2121 indent = 2
2117 _timenesting[0] += indent
2122 _timenesting[0] += indent
2118 try:
2123 try:
2119 return func(*args, **kwargs)
2124 return func(*args, **kwargs)
2120 finally:
2125 finally:
2121 elapsed = time.time() - start
2126 elapsed = time.time() - start
2122 _timenesting[0] -= indent
2127 _timenesting[0] -= indent
2123 sys.stderr.write('%s%s: %s\n' %
2128 sys.stderr.write('%s%s: %s\n' %
2124 (' ' * _timenesting[0], func.__name__,
2129 (' ' * _timenesting[0], func.__name__,
2125 timecount(elapsed)))
2130 timecount(elapsed)))
2126 return wrapper
2131 return wrapper
2127
2132
2128 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2133 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2129 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2134 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2130
2135
2131 def sizetoint(s):
2136 def sizetoint(s):
2132 '''Convert a space specifier to a byte count.
2137 '''Convert a space specifier to a byte count.
2133
2138
2134 >>> sizetoint('30')
2139 >>> sizetoint('30')
2135 30
2140 30
2136 >>> sizetoint('2.2kb')
2141 >>> sizetoint('2.2kb')
2137 2252
2142 2252
2138 >>> sizetoint('6M')
2143 >>> sizetoint('6M')
2139 6291456
2144 6291456
2140 '''
2145 '''
2141 t = s.strip().lower()
2146 t = s.strip().lower()
2142 try:
2147 try:
2143 for k, u in _sizeunits:
2148 for k, u in _sizeunits:
2144 if t.endswith(k):
2149 if t.endswith(k):
2145 return int(float(t[:-len(k)]) * u)
2150 return int(float(t[:-len(k)]) * u)
2146 return int(t)
2151 return int(t)
2147 except ValueError:
2152 except ValueError:
2148 raise error.ParseError(_("couldn't parse size: %s") % s)
2153 raise error.ParseError(_("couldn't parse size: %s") % s)
2149
2154
2150 class hooks(object):
2155 class hooks(object):
2151 '''A collection of hook functions that can be used to extend a
2156 '''A collection of hook functions that can be used to extend a
2152 function's behaviour. Hooks are called in lexicographic order,
2157 function's behaviour. Hooks are called in lexicographic order,
2153 based on the names of their sources.'''
2158 based on the names of their sources.'''
2154
2159
2155 def __init__(self):
2160 def __init__(self):
2156 self._hooks = []
2161 self._hooks = []
2157
2162
2158 def add(self, source, hook):
2163 def add(self, source, hook):
2159 self._hooks.append((source, hook))
2164 self._hooks.append((source, hook))
2160
2165
2161 def __call__(self, *args):
2166 def __call__(self, *args):
2162 self._hooks.sort(key=lambda x: x[0])
2167 self._hooks.sort(key=lambda x: x[0])
2163 results = []
2168 results = []
2164 for source, hook in self._hooks:
2169 for source, hook in self._hooks:
2165 results.append(hook(*args))
2170 results.append(hook(*args))
2166 return results
2171 return results
2167
2172
2168 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2173 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2169 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2174 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2170 Skips the 'skip' last entries. By default it will flush stdout first.
2175 Skips the 'skip' last entries. By default it will flush stdout first.
2171 It can be used everywhere and do intentionally not require an ui object.
2176 It can be used everywhere and do intentionally not require an ui object.
2172 Not be used in production code but very convenient while developing.
2177 Not be used in production code but very convenient while developing.
2173 '''
2178 '''
2174 if otherf:
2179 if otherf:
2175 otherf.flush()
2180 otherf.flush()
2176 f.write('%s at:\n' % msg)
2181 f.write('%s at:\n' % msg)
2177 entries = [('%s:%s' % (fn, ln), func)
2182 entries = [('%s:%s' % (fn, ln), func)
2178 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2183 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2179 if entries:
2184 if entries:
2180 fnmax = max(len(entry[0]) for entry in entries)
2185 fnmax = max(len(entry[0]) for entry in entries)
2181 for fnln, func in entries:
2186 for fnln, func in entries:
2182 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2187 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2183 f.flush()
2188 f.flush()
2184
2189
2185 # convenient shortcut
2190 # convenient shortcut
2186 dst = debugstacktrace
2191 dst = debugstacktrace
General Comments 0
You need to be logged in to leave comments. Login now