##// END OF EJS Templates
clone: print number of linked/copied files on --debug
Adrian Buehlmann -
r11251:c61442f6 default
parent child Browse files
Show More
@@ -1,396 +1,402 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from i18n import _
9 from i18n import _
10 from lock import release
10 from lock import release
11 import localrepo, bundlerepo, httprepo, sshrepo, statichttprepo
11 import localrepo, bundlerepo, httprepo, sshrepo, statichttprepo
12 import lock, util, extensions, error, encoding, node
12 import lock, util, extensions, error, encoding, node
13 import merge as mergemod
13 import merge as mergemod
14 import verify as verifymod
14 import verify as verifymod
15 import errno, os, shutil
15 import errno, os, shutil
16
16
17 def _local(path):
17 def _local(path):
18 path = util.expandpath(util.drop_scheme('file', path))
18 path = util.expandpath(util.drop_scheme('file', path))
19 return (os.path.isfile(path) and bundlerepo or localrepo)
19 return (os.path.isfile(path) and bundlerepo or localrepo)
20
20
21 def addbranchrevs(lrepo, repo, branches, revs):
21 def addbranchrevs(lrepo, repo, branches, revs):
22 if not branches:
22 if not branches:
23 return revs or None, revs and revs[0] or None
23 return revs or None, revs and revs[0] or None
24 revs = revs and list(revs) or []
24 revs = revs and list(revs) or []
25 if not repo.capable('branchmap'):
25 if not repo.capable('branchmap'):
26 revs.extend(branches)
26 revs.extend(branches)
27 return revs, revs[0]
27 return revs, revs[0]
28 branchmap = repo.branchmap()
28 branchmap = repo.branchmap()
29 for branch in branches:
29 for branch in branches:
30 if branch == '.':
30 if branch == '.':
31 if not lrepo or not lrepo.local():
31 if not lrepo or not lrepo.local():
32 raise util.Abort(_("dirstate branch not accessible"))
32 raise util.Abort(_("dirstate branch not accessible"))
33 revs.append(lrepo.dirstate.branch())
33 revs.append(lrepo.dirstate.branch())
34 else:
34 else:
35 butf8 = encoding.fromlocal(branch)
35 butf8 = encoding.fromlocal(branch)
36 if butf8 in branchmap:
36 if butf8 in branchmap:
37 revs.extend(node.hex(r) for r in reversed(branchmap[butf8]))
37 revs.extend(node.hex(r) for r in reversed(branchmap[butf8]))
38 else:
38 else:
39 revs.append(branch)
39 revs.append(branch)
40 return revs, revs[0]
40 return revs, revs[0]
41
41
42 def parseurl(url, branches=None):
42 def parseurl(url, branches=None):
43 '''parse url#branch, returning url, branches+[branch]'''
43 '''parse url#branch, returning url, branches+[branch]'''
44
44
45 if '#' not in url:
45 if '#' not in url:
46 return url, branches or []
46 return url, branches or []
47 url, branch = url.split('#', 1)
47 url, branch = url.split('#', 1)
48 return url, (branches or []) + [branch]
48 return url, (branches or []) + [branch]
49
49
50 schemes = {
50 schemes = {
51 'bundle': bundlerepo,
51 'bundle': bundlerepo,
52 'file': _local,
52 'file': _local,
53 'http': httprepo,
53 'http': httprepo,
54 'https': httprepo,
54 'https': httprepo,
55 'ssh': sshrepo,
55 'ssh': sshrepo,
56 'static-http': statichttprepo,
56 'static-http': statichttprepo,
57 }
57 }
58
58
59 def _lookup(path):
59 def _lookup(path):
60 scheme = 'file'
60 scheme = 'file'
61 if path:
61 if path:
62 c = path.find(':')
62 c = path.find(':')
63 if c > 0:
63 if c > 0:
64 scheme = path[:c]
64 scheme = path[:c]
65 thing = schemes.get(scheme) or schemes['file']
65 thing = schemes.get(scheme) or schemes['file']
66 try:
66 try:
67 return thing(path)
67 return thing(path)
68 except TypeError:
68 except TypeError:
69 return thing
69 return thing
70
70
71 def islocal(repo):
71 def islocal(repo):
72 '''return true if repo or path is local'''
72 '''return true if repo or path is local'''
73 if isinstance(repo, str):
73 if isinstance(repo, str):
74 try:
74 try:
75 return _lookup(repo).islocal(repo)
75 return _lookup(repo).islocal(repo)
76 except AttributeError:
76 except AttributeError:
77 return False
77 return False
78 return repo.local()
78 return repo.local()
79
79
80 def repository(ui, path='', create=False):
80 def repository(ui, path='', create=False):
81 """return a repository object for the specified path"""
81 """return a repository object for the specified path"""
82 repo = _lookup(path).instance(ui, path, create)
82 repo = _lookup(path).instance(ui, path, create)
83 ui = getattr(repo, "ui", ui)
83 ui = getattr(repo, "ui", ui)
84 for name, module in extensions.extensions():
84 for name, module in extensions.extensions():
85 hook = getattr(module, 'reposetup', None)
85 hook = getattr(module, 'reposetup', None)
86 if hook:
86 if hook:
87 hook(ui, repo)
87 hook(ui, repo)
88 return repo
88 return repo
89
89
90 def defaultdest(source):
90 def defaultdest(source):
91 '''return default destination of clone if none is given'''
91 '''return default destination of clone if none is given'''
92 return os.path.basename(os.path.normpath(source))
92 return os.path.basename(os.path.normpath(source))
93
93
94 def localpath(path):
94 def localpath(path):
95 if path.startswith('file://localhost/'):
95 if path.startswith('file://localhost/'):
96 return path[16:]
96 return path[16:]
97 if path.startswith('file://'):
97 if path.startswith('file://'):
98 return path[7:]
98 return path[7:]
99 if path.startswith('file:'):
99 if path.startswith('file:'):
100 return path[5:]
100 return path[5:]
101 return path
101 return path
102
102
103 def share(ui, source, dest=None, update=True):
103 def share(ui, source, dest=None, update=True):
104 '''create a shared repository'''
104 '''create a shared repository'''
105
105
106 if not islocal(source):
106 if not islocal(source):
107 raise util.Abort(_('can only share local repositories'))
107 raise util.Abort(_('can only share local repositories'))
108
108
109 if not dest:
109 if not dest:
110 dest = defaultdest(source)
110 dest = defaultdest(source)
111 else:
111 else:
112 dest = ui.expandpath(dest)
112 dest = ui.expandpath(dest)
113
113
114 if isinstance(source, str):
114 if isinstance(source, str):
115 origsource = ui.expandpath(source)
115 origsource = ui.expandpath(source)
116 source, branches = parseurl(origsource)
116 source, branches = parseurl(origsource)
117 srcrepo = repository(ui, source)
117 srcrepo = repository(ui, source)
118 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
118 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
119 else:
119 else:
120 srcrepo = source
120 srcrepo = source
121 origsource = source = srcrepo.url()
121 origsource = source = srcrepo.url()
122 checkout = None
122 checkout = None
123
123
124 sharedpath = srcrepo.sharedpath # if our source is already sharing
124 sharedpath = srcrepo.sharedpath # if our source is already sharing
125
125
126 root = os.path.realpath(dest)
126 root = os.path.realpath(dest)
127 roothg = os.path.join(root, '.hg')
127 roothg = os.path.join(root, '.hg')
128
128
129 if os.path.exists(roothg):
129 if os.path.exists(roothg):
130 raise util.Abort(_('destination already exists'))
130 raise util.Abort(_('destination already exists'))
131
131
132 if not os.path.isdir(root):
132 if not os.path.isdir(root):
133 os.mkdir(root)
133 os.mkdir(root)
134 os.mkdir(roothg)
134 os.mkdir(roothg)
135
135
136 requirements = ''
136 requirements = ''
137 try:
137 try:
138 requirements = srcrepo.opener('requires').read()
138 requirements = srcrepo.opener('requires').read()
139 except IOError, inst:
139 except IOError, inst:
140 if inst.errno != errno.ENOENT:
140 if inst.errno != errno.ENOENT:
141 raise
141 raise
142
142
143 requirements += 'shared\n'
143 requirements += 'shared\n'
144 file(os.path.join(roothg, 'requires'), 'w').write(requirements)
144 file(os.path.join(roothg, 'requires'), 'w').write(requirements)
145 file(os.path.join(roothg, 'sharedpath'), 'w').write(sharedpath)
145 file(os.path.join(roothg, 'sharedpath'), 'w').write(sharedpath)
146
146
147 default = srcrepo.ui.config('paths', 'default')
147 default = srcrepo.ui.config('paths', 'default')
148 if default:
148 if default:
149 f = file(os.path.join(roothg, 'hgrc'), 'w')
149 f = file(os.path.join(roothg, 'hgrc'), 'w')
150 f.write('[paths]\ndefault = %s\n' % default)
150 f.write('[paths]\ndefault = %s\n' % default)
151 f.close()
151 f.close()
152
152
153 r = repository(ui, root)
153 r = repository(ui, root)
154
154
155 if update:
155 if update:
156 r.ui.status(_("updating working directory\n"))
156 r.ui.status(_("updating working directory\n"))
157 if update is not True:
157 if update is not True:
158 checkout = update
158 checkout = update
159 for test in (checkout, 'default', 'tip'):
159 for test in (checkout, 'default', 'tip'):
160 if test is None:
160 if test is None:
161 continue
161 continue
162 try:
162 try:
163 uprev = r.lookup(test)
163 uprev = r.lookup(test)
164 break
164 break
165 except error.RepoLookupError:
165 except error.RepoLookupError:
166 continue
166 continue
167 _update(r, uprev)
167 _update(r, uprev)
168
168
169 def clone(ui, source, dest=None, pull=False, rev=None, update=True,
169 def clone(ui, source, dest=None, pull=False, rev=None, update=True,
170 stream=False, branch=None):
170 stream=False, branch=None):
171 """Make a copy of an existing repository.
171 """Make a copy of an existing repository.
172
172
173 Create a copy of an existing repository in a new directory. The
173 Create a copy of an existing repository in a new directory. The
174 source and destination are URLs, as passed to the repository
174 source and destination are URLs, as passed to the repository
175 function. Returns a pair of repository objects, the source and
175 function. Returns a pair of repository objects, the source and
176 newly created destination.
176 newly created destination.
177
177
178 The location of the source is added to the new repository's
178 The location of the source is added to the new repository's
179 .hg/hgrc file, as the default to be used for future pulls and
179 .hg/hgrc file, as the default to be used for future pulls and
180 pushes.
180 pushes.
181
181
182 If an exception is raised, the partly cloned/updated destination
182 If an exception is raised, the partly cloned/updated destination
183 repository will be deleted.
183 repository will be deleted.
184
184
185 Arguments:
185 Arguments:
186
186
187 source: repository object or URL
187 source: repository object or URL
188
188
189 dest: URL of destination repository to create (defaults to base
189 dest: URL of destination repository to create (defaults to base
190 name of source repository)
190 name of source repository)
191
191
192 pull: always pull from source repository, even in local case
192 pull: always pull from source repository, even in local case
193
193
194 stream: stream raw data uncompressed from repository (fast over
194 stream: stream raw data uncompressed from repository (fast over
195 LAN, slow over WAN)
195 LAN, slow over WAN)
196
196
197 rev: revision to clone up to (implies pull=True)
197 rev: revision to clone up to (implies pull=True)
198
198
199 update: update working directory after clone completes, if
199 update: update working directory after clone completes, if
200 destination is local repository (True means update to default rev,
200 destination is local repository (True means update to default rev,
201 anything else is treated as a revision)
201 anything else is treated as a revision)
202
202
203 branch: branches to clone
203 branch: branches to clone
204 """
204 """
205
205
206 if isinstance(source, str):
206 if isinstance(source, str):
207 origsource = ui.expandpath(source)
207 origsource = ui.expandpath(source)
208 source, branch = parseurl(origsource, branch)
208 source, branch = parseurl(origsource, branch)
209 src_repo = repository(ui, source)
209 src_repo = repository(ui, source)
210 else:
210 else:
211 src_repo = source
211 src_repo = source
212 branch = None
212 branch = None
213 origsource = source = src_repo.url()
213 origsource = source = src_repo.url()
214 rev, checkout = addbranchrevs(src_repo, src_repo, branch, rev)
214 rev, checkout = addbranchrevs(src_repo, src_repo, branch, rev)
215
215
216 if dest is None:
216 if dest is None:
217 dest = defaultdest(source)
217 dest = defaultdest(source)
218 ui.status(_("destination directory: %s\n") % dest)
218 ui.status(_("destination directory: %s\n") % dest)
219 else:
219 else:
220 dest = ui.expandpath(dest)
220 dest = ui.expandpath(dest)
221
221
222 dest = localpath(dest)
222 dest = localpath(dest)
223 source = localpath(source)
223 source = localpath(source)
224
224
225 if os.path.exists(dest):
225 if os.path.exists(dest):
226 if not os.path.isdir(dest):
226 if not os.path.isdir(dest):
227 raise util.Abort(_("destination '%s' already exists") % dest)
227 raise util.Abort(_("destination '%s' already exists") % dest)
228 elif os.listdir(dest):
228 elif os.listdir(dest):
229 raise util.Abort(_("destination '%s' is not empty") % dest)
229 raise util.Abort(_("destination '%s' is not empty") % dest)
230
230
231 class DirCleanup(object):
231 class DirCleanup(object):
232 def __init__(self, dir_):
232 def __init__(self, dir_):
233 self.rmtree = shutil.rmtree
233 self.rmtree = shutil.rmtree
234 self.dir_ = dir_
234 self.dir_ = dir_
235 def close(self):
235 def close(self):
236 self.dir_ = None
236 self.dir_ = None
237 def cleanup(self):
237 def cleanup(self):
238 if self.dir_:
238 if self.dir_:
239 self.rmtree(self.dir_, True)
239 self.rmtree(self.dir_, True)
240
240
241 src_lock = dest_lock = dir_cleanup = None
241 src_lock = dest_lock = dir_cleanup = None
242 try:
242 try:
243 if islocal(dest):
243 if islocal(dest):
244 dir_cleanup = DirCleanup(dest)
244 dir_cleanup = DirCleanup(dest)
245
245
246 abspath = origsource
246 abspath = origsource
247 copy = False
247 copy = False
248 if src_repo.cancopy() and islocal(dest):
248 if src_repo.cancopy() and islocal(dest):
249 abspath = os.path.abspath(util.drop_scheme('file', origsource))
249 abspath = os.path.abspath(util.drop_scheme('file', origsource))
250 copy = not pull and not rev
250 copy = not pull and not rev
251
251
252 if copy:
252 if copy:
253 try:
253 try:
254 # we use a lock here because if we race with commit, we
254 # we use a lock here because if we race with commit, we
255 # can end up with extra data in the cloned revlogs that's
255 # can end up with extra data in the cloned revlogs that's
256 # not pointed to by changesets, thus causing verify to
256 # not pointed to by changesets, thus causing verify to
257 # fail
257 # fail
258 src_lock = src_repo.lock(wait=False)
258 src_lock = src_repo.lock(wait=False)
259 except error.LockError:
259 except error.LockError:
260 copy = False
260 copy = False
261
261
262 if copy:
262 if copy:
263 src_repo.hook('preoutgoing', throw=True, source='clone')
263 src_repo.hook('preoutgoing', throw=True, source='clone')
264 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
264 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
265 if not os.path.exists(dest):
265 if not os.path.exists(dest):
266 os.mkdir(dest)
266 os.mkdir(dest)
267 else:
267 else:
268 # only clean up directories we create ourselves
268 # only clean up directories we create ourselves
269 dir_cleanup.dir_ = hgdir
269 dir_cleanup.dir_ = hgdir
270 try:
270 try:
271 dest_path = hgdir
271 dest_path = hgdir
272 os.mkdir(dest_path)
272 os.mkdir(dest_path)
273 except OSError, inst:
273 except OSError, inst:
274 if inst.errno == errno.EEXIST:
274 if inst.errno == errno.EEXIST:
275 dir_cleanup.close()
275 dir_cleanup.close()
276 raise util.Abort(_("destination '%s' already exists")
276 raise util.Abort(_("destination '%s' already exists")
277 % dest)
277 % dest)
278 raise
278 raise
279
279
280 hardlink = None
280 hardlink = None
281 num = 0
281 for f in src_repo.store.copylist():
282 for f in src_repo.store.copylist():
282 src = os.path.join(src_repo.sharedpath, f)
283 src = os.path.join(src_repo.sharedpath, f)
283 dst = os.path.join(dest_path, f)
284 dst = os.path.join(dest_path, f)
284 dstbase = os.path.dirname(dst)
285 dstbase = os.path.dirname(dst)
285 if dstbase and not os.path.exists(dstbase):
286 if dstbase and not os.path.exists(dstbase):
286 os.mkdir(dstbase)
287 os.mkdir(dstbase)
287 if os.path.exists(src):
288 if os.path.exists(src):
288 if dst.endswith('data'):
289 if dst.endswith('data'):
289 # lock to avoid premature writing to the target
290 # lock to avoid premature writing to the target
290 dest_lock = lock.lock(os.path.join(dstbase, "lock"))
291 dest_lock = lock.lock(os.path.join(dstbase, "lock"))
291 hardlink = util.copyfiles(src, dst, hardlink)
292 hardlink, n = util.copyfiles(src, dst, hardlink)
293 num += n
294 if hardlink:
295 ui.debug("linked %d files\n" % num)
296 else:
297 ui.debug("copied %d files\n" % num)
292
298
293 # we need to re-init the repo after manually copying the data
299 # we need to re-init the repo after manually copying the data
294 # into it
300 # into it
295 dest_repo = repository(ui, dest)
301 dest_repo = repository(ui, dest)
296 src_repo.hook('outgoing', source='clone', node='0'*40)
302 src_repo.hook('outgoing', source='clone', node='0'*40)
297 else:
303 else:
298 try:
304 try:
299 dest_repo = repository(ui, dest, create=True)
305 dest_repo = repository(ui, dest, create=True)
300 except OSError, inst:
306 except OSError, inst:
301 if inst.errno == errno.EEXIST:
307 if inst.errno == errno.EEXIST:
302 dir_cleanup.close()
308 dir_cleanup.close()
303 raise util.Abort(_("destination '%s' already exists")
309 raise util.Abort(_("destination '%s' already exists")
304 % dest)
310 % dest)
305 raise
311 raise
306
312
307 revs = None
313 revs = None
308 if rev:
314 if rev:
309 if 'lookup' not in src_repo.capabilities:
315 if 'lookup' not in src_repo.capabilities:
310 raise util.Abort(_("src repository does not support "
316 raise util.Abort(_("src repository does not support "
311 "revision lookup and so doesn't "
317 "revision lookup and so doesn't "
312 "support clone by revision"))
318 "support clone by revision"))
313 revs = [src_repo.lookup(r) for r in rev]
319 revs = [src_repo.lookup(r) for r in rev]
314 checkout = revs[0]
320 checkout = revs[0]
315 if dest_repo.local():
321 if dest_repo.local():
316 dest_repo.clone(src_repo, heads=revs, stream=stream)
322 dest_repo.clone(src_repo, heads=revs, stream=stream)
317 elif src_repo.local():
323 elif src_repo.local():
318 src_repo.push(dest_repo, revs=revs)
324 src_repo.push(dest_repo, revs=revs)
319 else:
325 else:
320 raise util.Abort(_("clone from remote to remote not supported"))
326 raise util.Abort(_("clone from remote to remote not supported"))
321
327
322 if dir_cleanup:
328 if dir_cleanup:
323 dir_cleanup.close()
329 dir_cleanup.close()
324
330
325 if dest_repo.local():
331 if dest_repo.local():
326 fp = dest_repo.opener("hgrc", "w", text=True)
332 fp = dest_repo.opener("hgrc", "w", text=True)
327 fp.write("[paths]\n")
333 fp.write("[paths]\n")
328 fp.write("default = %s\n" % abspath)
334 fp.write("default = %s\n" % abspath)
329 fp.close()
335 fp.close()
330
336
331 dest_repo.ui.setconfig('paths', 'default', abspath)
337 dest_repo.ui.setconfig('paths', 'default', abspath)
332
338
333 if update:
339 if update:
334 if update is not True:
340 if update is not True:
335 checkout = update
341 checkout = update
336 if src_repo.local():
342 if src_repo.local():
337 checkout = src_repo.lookup(update)
343 checkout = src_repo.lookup(update)
338 for test in (checkout, 'default', 'tip'):
344 for test in (checkout, 'default', 'tip'):
339 if test is None:
345 if test is None:
340 continue
346 continue
341 try:
347 try:
342 uprev = dest_repo.lookup(test)
348 uprev = dest_repo.lookup(test)
343 break
349 break
344 except error.RepoLookupError:
350 except error.RepoLookupError:
345 continue
351 continue
346 bn = dest_repo[uprev].branch()
352 bn = dest_repo[uprev].branch()
347 dest_repo.ui.status(_("updating to branch %s\n")
353 dest_repo.ui.status(_("updating to branch %s\n")
348 % encoding.tolocal(bn))
354 % encoding.tolocal(bn))
349 _update(dest_repo, uprev)
355 _update(dest_repo, uprev)
350
356
351 return src_repo, dest_repo
357 return src_repo, dest_repo
352 finally:
358 finally:
353 release(src_lock, dest_lock)
359 release(src_lock, dest_lock)
354 if dir_cleanup is not None:
360 if dir_cleanup is not None:
355 dir_cleanup.cleanup()
361 dir_cleanup.cleanup()
356
362
357 def _showstats(repo, stats):
363 def _showstats(repo, stats):
358 repo.ui.status(_("%d files updated, %d files merged, "
364 repo.ui.status(_("%d files updated, %d files merged, "
359 "%d files removed, %d files unresolved\n") % stats)
365 "%d files removed, %d files unresolved\n") % stats)
360
366
361 def update(repo, node):
367 def update(repo, node):
362 """update the working directory to node, merging linear changes"""
368 """update the working directory to node, merging linear changes"""
363 stats = mergemod.update(repo, node, False, False, None)
369 stats = mergemod.update(repo, node, False, False, None)
364 _showstats(repo, stats)
370 _showstats(repo, stats)
365 if stats[3]:
371 if stats[3]:
366 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
372 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
367 return stats[3] > 0
373 return stats[3] > 0
368
374
369 # naming conflict in clone()
375 # naming conflict in clone()
370 _update = update
376 _update = update
371
377
372 def clean(repo, node, show_stats=True):
378 def clean(repo, node, show_stats=True):
373 """forcibly switch the working directory to node, clobbering changes"""
379 """forcibly switch the working directory to node, clobbering changes"""
374 stats = mergemod.update(repo, node, False, True, None)
380 stats = mergemod.update(repo, node, False, True, None)
375 if show_stats:
381 if show_stats:
376 _showstats(repo, stats)
382 _showstats(repo, stats)
377 return stats[3] > 0
383 return stats[3] > 0
378
384
379 def merge(repo, node, force=None, remind=True):
385 def merge(repo, node, force=None, remind=True):
380 """branch merge with node, resolving changes"""
386 """branch merge with node, resolving changes"""
381 stats = mergemod.update(repo, node, True, force, False)
387 stats = mergemod.update(repo, node, True, force, False)
382 _showstats(repo, stats)
388 _showstats(repo, stats)
383 if stats[3]:
389 if stats[3]:
384 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
390 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
385 "or 'hg update -C' to abandon\n"))
391 "or 'hg update -C' to abandon\n"))
386 elif remind:
392 elif remind:
387 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
393 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
388 return stats[3] > 0
394 return stats[3] > 0
389
395
390 def revert(repo, node, choose):
396 def revert(repo, node, choose):
391 """revert changes to revision in node without updating dirstate"""
397 """revert changes to revision in node without updating dirstate"""
392 return mergemod.update(repo, node, False, True, choose)[3] > 0
398 return mergemod.update(repo, node, False, True, choose)[3] > 0
393
399
394 def verify(repo):
400 def verify(repo):
395 """verify the consistency of a repository"""
401 """verify the consistency of a repository"""
396 return verifymod.verify(repo)
402 return verifymod.verify(repo)
@@ -1,1347 +1,1350 b''
1 # util.py - Mercurial utility functions and platform specfic implementations
1 # util.py - Mercurial utility functions and platform specfic implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specfic implementations.
10 """Mercurial utility functions and platform specfic implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from i18n import _
16 from i18n import _
17 import error, osutil, encoding
17 import error, osutil, encoding
18 import cStringIO, errno, re, shutil, sys, tempfile, traceback
18 import cStringIO, errno, re, shutil, sys, tempfile, traceback
19 import os, stat, time, calendar, textwrap, signal
19 import os, stat, time, calendar, textwrap, signal
20 import imp
20 import imp
21
21
22 # Python compatibility
22 # Python compatibility
23
23
24 def sha1(s):
24 def sha1(s):
25 return _fastsha1(s)
25 return _fastsha1(s)
26
26
27 def _fastsha1(s):
27 def _fastsha1(s):
28 # This function will import sha1 from hashlib or sha (whichever is
28 # This function will import sha1 from hashlib or sha (whichever is
29 # available) and overwrite itself with it on the first call.
29 # available) and overwrite itself with it on the first call.
30 # Subsequent calls will go directly to the imported function.
30 # Subsequent calls will go directly to the imported function.
31 try:
31 try:
32 from hashlib import sha1 as _sha1
32 from hashlib import sha1 as _sha1
33 except ImportError:
33 except ImportError:
34 from sha import sha as _sha1
34 from sha import sha as _sha1
35 global _fastsha1, sha1
35 global _fastsha1, sha1
36 _fastsha1 = sha1 = _sha1
36 _fastsha1 = sha1 = _sha1
37 return _sha1(s)
37 return _sha1(s)
38
38
39 import __builtin__
39 import __builtin__
40
40
41 def fakebuffer(sliceable, offset=0):
41 def fakebuffer(sliceable, offset=0):
42 return sliceable[offset:]
42 return sliceable[offset:]
43 if not hasattr(__builtin__, 'buffer'):
43 if not hasattr(__builtin__, 'buffer'):
44 __builtin__.buffer = fakebuffer
44 __builtin__.buffer = fakebuffer
45
45
46 import subprocess
46 import subprocess
47 closefds = os.name == 'posix'
47 closefds = os.name == 'posix'
48
48
49 def popen2(cmd, env=None, newlines=False):
49 def popen2(cmd, env=None, newlines=False):
50 # Setting bufsize to -1 lets the system decide the buffer size.
50 # Setting bufsize to -1 lets the system decide the buffer size.
51 # The default for bufsize is 0, meaning unbuffered. This leads to
51 # The default for bufsize is 0, meaning unbuffered. This leads to
52 # poor performance on Mac OS X: http://bugs.python.org/issue4194
52 # poor performance on Mac OS X: http://bugs.python.org/issue4194
53 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
53 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
54 close_fds=closefds,
54 close_fds=closefds,
55 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
55 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
56 universal_newlines=newlines,
56 universal_newlines=newlines,
57 env=env)
57 env=env)
58 return p.stdin, p.stdout
58 return p.stdin, p.stdout
59
59
60 def popen3(cmd, env=None, newlines=False):
60 def popen3(cmd, env=None, newlines=False):
61 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
61 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
62 close_fds=closefds,
62 close_fds=closefds,
63 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
63 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
64 stderr=subprocess.PIPE,
64 stderr=subprocess.PIPE,
65 universal_newlines=newlines,
65 universal_newlines=newlines,
66 env=env)
66 env=env)
67 return p.stdin, p.stdout, p.stderr
67 return p.stdin, p.stdout, p.stderr
68
68
69 def version():
69 def version():
70 """Return version information if available."""
70 """Return version information if available."""
71 try:
71 try:
72 import __version__
72 import __version__
73 return __version__.version
73 return __version__.version
74 except ImportError:
74 except ImportError:
75 return 'unknown'
75 return 'unknown'
76
76
77 # used by parsedate
77 # used by parsedate
78 defaultdateformats = (
78 defaultdateformats = (
79 '%Y-%m-%d %H:%M:%S',
79 '%Y-%m-%d %H:%M:%S',
80 '%Y-%m-%d %I:%M:%S%p',
80 '%Y-%m-%d %I:%M:%S%p',
81 '%Y-%m-%d %H:%M',
81 '%Y-%m-%d %H:%M',
82 '%Y-%m-%d %I:%M%p',
82 '%Y-%m-%d %I:%M%p',
83 '%Y-%m-%d',
83 '%Y-%m-%d',
84 '%m-%d',
84 '%m-%d',
85 '%m/%d',
85 '%m/%d',
86 '%m/%d/%y',
86 '%m/%d/%y',
87 '%m/%d/%Y',
87 '%m/%d/%Y',
88 '%a %b %d %H:%M:%S %Y',
88 '%a %b %d %H:%M:%S %Y',
89 '%a %b %d %I:%M:%S%p %Y',
89 '%a %b %d %I:%M:%S%p %Y',
90 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
90 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
91 '%b %d %H:%M:%S %Y',
91 '%b %d %H:%M:%S %Y',
92 '%b %d %I:%M:%S%p %Y',
92 '%b %d %I:%M:%S%p %Y',
93 '%b %d %H:%M:%S',
93 '%b %d %H:%M:%S',
94 '%b %d %I:%M:%S%p',
94 '%b %d %I:%M:%S%p',
95 '%b %d %H:%M',
95 '%b %d %H:%M',
96 '%b %d %I:%M%p',
96 '%b %d %I:%M%p',
97 '%b %d %Y',
97 '%b %d %Y',
98 '%b %d',
98 '%b %d',
99 '%H:%M:%S',
99 '%H:%M:%S',
100 '%I:%M:%S%p',
100 '%I:%M:%S%p',
101 '%H:%M',
101 '%H:%M',
102 '%I:%M%p',
102 '%I:%M%p',
103 )
103 )
104
104
105 extendeddateformats = defaultdateformats + (
105 extendeddateformats = defaultdateformats + (
106 "%Y",
106 "%Y",
107 "%Y-%m",
107 "%Y-%m",
108 "%b",
108 "%b",
109 "%b %Y",
109 "%b %Y",
110 )
110 )
111
111
112 def cachefunc(func):
112 def cachefunc(func):
113 '''cache the result of function calls'''
113 '''cache the result of function calls'''
114 # XXX doesn't handle keywords args
114 # XXX doesn't handle keywords args
115 cache = {}
115 cache = {}
116 if func.func_code.co_argcount == 1:
116 if func.func_code.co_argcount == 1:
117 # we gain a small amount of time because
117 # we gain a small amount of time because
118 # we don't need to pack/unpack the list
118 # we don't need to pack/unpack the list
119 def f(arg):
119 def f(arg):
120 if arg not in cache:
120 if arg not in cache:
121 cache[arg] = func(arg)
121 cache[arg] = func(arg)
122 return cache[arg]
122 return cache[arg]
123 else:
123 else:
124 def f(*args):
124 def f(*args):
125 if args not in cache:
125 if args not in cache:
126 cache[args] = func(*args)
126 cache[args] = func(*args)
127 return cache[args]
127 return cache[args]
128
128
129 return f
129 return f
130
130
131 def lrucachefunc(func):
131 def lrucachefunc(func):
132 '''cache most recent results of function calls'''
132 '''cache most recent results of function calls'''
133 cache = {}
133 cache = {}
134 order = []
134 order = []
135 if func.func_code.co_argcount == 1:
135 if func.func_code.co_argcount == 1:
136 def f(arg):
136 def f(arg):
137 if arg not in cache:
137 if arg not in cache:
138 if len(cache) > 20:
138 if len(cache) > 20:
139 del cache[order.pop(0)]
139 del cache[order.pop(0)]
140 cache[arg] = func(arg)
140 cache[arg] = func(arg)
141 else:
141 else:
142 order.remove(arg)
142 order.remove(arg)
143 order.append(arg)
143 order.append(arg)
144 return cache[arg]
144 return cache[arg]
145 else:
145 else:
146 def f(*args):
146 def f(*args):
147 if args not in cache:
147 if args not in cache:
148 if len(cache) > 20:
148 if len(cache) > 20:
149 del cache[order.pop(0)]
149 del cache[order.pop(0)]
150 cache[args] = func(*args)
150 cache[args] = func(*args)
151 else:
151 else:
152 order.remove(args)
152 order.remove(args)
153 order.append(args)
153 order.append(args)
154 return cache[args]
154 return cache[args]
155
155
156 return f
156 return f
157
157
158 class propertycache(object):
158 class propertycache(object):
159 def __init__(self, func):
159 def __init__(self, func):
160 self.func = func
160 self.func = func
161 self.name = func.__name__
161 self.name = func.__name__
162 def __get__(self, obj, type=None):
162 def __get__(self, obj, type=None):
163 result = self.func(obj)
163 result = self.func(obj)
164 setattr(obj, self.name, result)
164 setattr(obj, self.name, result)
165 return result
165 return result
166
166
167 def pipefilter(s, cmd):
167 def pipefilter(s, cmd):
168 '''filter string S through command CMD, returning its output'''
168 '''filter string S through command CMD, returning its output'''
169 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
169 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
170 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
170 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
171 pout, perr = p.communicate(s)
171 pout, perr = p.communicate(s)
172 return pout
172 return pout
173
173
174 def tempfilter(s, cmd):
174 def tempfilter(s, cmd):
175 '''filter string S through a pair of temporary files with CMD.
175 '''filter string S through a pair of temporary files with CMD.
176 CMD is used as a template to create the real command to be run,
176 CMD is used as a template to create the real command to be run,
177 with the strings INFILE and OUTFILE replaced by the real names of
177 with the strings INFILE and OUTFILE replaced by the real names of
178 the temporary files generated.'''
178 the temporary files generated.'''
179 inname, outname = None, None
179 inname, outname = None, None
180 try:
180 try:
181 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
181 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
182 fp = os.fdopen(infd, 'wb')
182 fp = os.fdopen(infd, 'wb')
183 fp.write(s)
183 fp.write(s)
184 fp.close()
184 fp.close()
185 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
185 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
186 os.close(outfd)
186 os.close(outfd)
187 cmd = cmd.replace('INFILE', inname)
187 cmd = cmd.replace('INFILE', inname)
188 cmd = cmd.replace('OUTFILE', outname)
188 cmd = cmd.replace('OUTFILE', outname)
189 code = os.system(cmd)
189 code = os.system(cmd)
190 if sys.platform == 'OpenVMS' and code & 1:
190 if sys.platform == 'OpenVMS' and code & 1:
191 code = 0
191 code = 0
192 if code:
192 if code:
193 raise Abort(_("command '%s' failed: %s") %
193 raise Abort(_("command '%s' failed: %s") %
194 (cmd, explain_exit(code)))
194 (cmd, explain_exit(code)))
195 return open(outname, 'rb').read()
195 return open(outname, 'rb').read()
196 finally:
196 finally:
197 try:
197 try:
198 if inname:
198 if inname:
199 os.unlink(inname)
199 os.unlink(inname)
200 except:
200 except:
201 pass
201 pass
202 try:
202 try:
203 if outname:
203 if outname:
204 os.unlink(outname)
204 os.unlink(outname)
205 except:
205 except:
206 pass
206 pass
207
207
208 filtertable = {
208 filtertable = {
209 'tempfile:': tempfilter,
209 'tempfile:': tempfilter,
210 'pipe:': pipefilter,
210 'pipe:': pipefilter,
211 }
211 }
212
212
213 def filter(s, cmd):
213 def filter(s, cmd):
214 "filter a string through a command that transforms its input to its output"
214 "filter a string through a command that transforms its input to its output"
215 for name, fn in filtertable.iteritems():
215 for name, fn in filtertable.iteritems():
216 if cmd.startswith(name):
216 if cmd.startswith(name):
217 return fn(s, cmd[len(name):].lstrip())
217 return fn(s, cmd[len(name):].lstrip())
218 return pipefilter(s, cmd)
218 return pipefilter(s, cmd)
219
219
220 def binary(s):
220 def binary(s):
221 """return true if a string is binary data"""
221 """return true if a string is binary data"""
222 return bool(s and '\0' in s)
222 return bool(s and '\0' in s)
223
223
224 def increasingchunks(source, min=1024, max=65536):
224 def increasingchunks(source, min=1024, max=65536):
225 '''return no less than min bytes per chunk while data remains,
225 '''return no less than min bytes per chunk while data remains,
226 doubling min after each chunk until it reaches max'''
226 doubling min after each chunk until it reaches max'''
227 def log2(x):
227 def log2(x):
228 if not x:
228 if not x:
229 return 0
229 return 0
230 i = 0
230 i = 0
231 while x:
231 while x:
232 x >>= 1
232 x >>= 1
233 i += 1
233 i += 1
234 return i - 1
234 return i - 1
235
235
236 buf = []
236 buf = []
237 blen = 0
237 blen = 0
238 for chunk in source:
238 for chunk in source:
239 buf.append(chunk)
239 buf.append(chunk)
240 blen += len(chunk)
240 blen += len(chunk)
241 if blen >= min:
241 if blen >= min:
242 if min < max:
242 if min < max:
243 min = min << 1
243 min = min << 1
244 nmin = 1 << log2(blen)
244 nmin = 1 << log2(blen)
245 if nmin > min:
245 if nmin > min:
246 min = nmin
246 min = nmin
247 if min > max:
247 if min > max:
248 min = max
248 min = max
249 yield ''.join(buf)
249 yield ''.join(buf)
250 blen = 0
250 blen = 0
251 buf = []
251 buf = []
252 if buf:
252 if buf:
253 yield ''.join(buf)
253 yield ''.join(buf)
254
254
255 Abort = error.Abort
255 Abort = error.Abort
256
256
257 def always(fn):
257 def always(fn):
258 return True
258 return True
259
259
260 def never(fn):
260 def never(fn):
261 return False
261 return False
262
262
263 def pathto(root, n1, n2):
263 def pathto(root, n1, n2):
264 '''return the relative path from one place to another.
264 '''return the relative path from one place to another.
265 root should use os.sep to separate directories
265 root should use os.sep to separate directories
266 n1 should use os.sep to separate directories
266 n1 should use os.sep to separate directories
267 n2 should use "/" to separate directories
267 n2 should use "/" to separate directories
268 returns an os.sep-separated path.
268 returns an os.sep-separated path.
269
269
270 If n1 is a relative path, it's assumed it's
270 If n1 is a relative path, it's assumed it's
271 relative to root.
271 relative to root.
272 n2 should always be relative to root.
272 n2 should always be relative to root.
273 '''
273 '''
274 if not n1:
274 if not n1:
275 return localpath(n2)
275 return localpath(n2)
276 if os.path.isabs(n1):
276 if os.path.isabs(n1):
277 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
277 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
278 return os.path.join(root, localpath(n2))
278 return os.path.join(root, localpath(n2))
279 n2 = '/'.join((pconvert(root), n2))
279 n2 = '/'.join((pconvert(root), n2))
280 a, b = splitpath(n1), n2.split('/')
280 a, b = splitpath(n1), n2.split('/')
281 a.reverse()
281 a.reverse()
282 b.reverse()
282 b.reverse()
283 while a and b and a[-1] == b[-1]:
283 while a and b and a[-1] == b[-1]:
284 a.pop()
284 a.pop()
285 b.pop()
285 b.pop()
286 b.reverse()
286 b.reverse()
287 return os.sep.join((['..'] * len(a)) + b) or '.'
287 return os.sep.join((['..'] * len(a)) + b) or '.'
288
288
289 def canonpath(root, cwd, myname):
289 def canonpath(root, cwd, myname):
290 """return the canonical path of myname, given cwd and root"""
290 """return the canonical path of myname, given cwd and root"""
291 if endswithsep(root):
291 if endswithsep(root):
292 rootsep = root
292 rootsep = root
293 else:
293 else:
294 rootsep = root + os.sep
294 rootsep = root + os.sep
295 name = myname
295 name = myname
296 if not os.path.isabs(name):
296 if not os.path.isabs(name):
297 name = os.path.join(root, cwd, name)
297 name = os.path.join(root, cwd, name)
298 name = os.path.normpath(name)
298 name = os.path.normpath(name)
299 audit_path = path_auditor(root)
299 audit_path = path_auditor(root)
300 if name != rootsep and name.startswith(rootsep):
300 if name != rootsep and name.startswith(rootsep):
301 name = name[len(rootsep):]
301 name = name[len(rootsep):]
302 audit_path(name)
302 audit_path(name)
303 return pconvert(name)
303 return pconvert(name)
304 elif name == root:
304 elif name == root:
305 return ''
305 return ''
306 else:
306 else:
307 # Determine whether `name' is in the hierarchy at or beneath `root',
307 # Determine whether `name' is in the hierarchy at or beneath `root',
308 # by iterating name=dirname(name) until that causes no change (can't
308 # by iterating name=dirname(name) until that causes no change (can't
309 # check name == '/', because that doesn't work on windows). For each
309 # check name == '/', because that doesn't work on windows). For each
310 # `name', compare dev/inode numbers. If they match, the list `rel'
310 # `name', compare dev/inode numbers. If they match, the list `rel'
311 # holds the reversed list of components making up the relative file
311 # holds the reversed list of components making up the relative file
312 # name we want.
312 # name we want.
313 root_st = os.stat(root)
313 root_st = os.stat(root)
314 rel = []
314 rel = []
315 while True:
315 while True:
316 try:
316 try:
317 name_st = os.stat(name)
317 name_st = os.stat(name)
318 except OSError:
318 except OSError:
319 break
319 break
320 if samestat(name_st, root_st):
320 if samestat(name_st, root_st):
321 if not rel:
321 if not rel:
322 # name was actually the same as root (maybe a symlink)
322 # name was actually the same as root (maybe a symlink)
323 return ''
323 return ''
324 rel.reverse()
324 rel.reverse()
325 name = os.path.join(*rel)
325 name = os.path.join(*rel)
326 audit_path(name)
326 audit_path(name)
327 return pconvert(name)
327 return pconvert(name)
328 dirname, basename = os.path.split(name)
328 dirname, basename = os.path.split(name)
329 rel.append(basename)
329 rel.append(basename)
330 if dirname == name:
330 if dirname == name:
331 break
331 break
332 name = dirname
332 name = dirname
333
333
334 raise Abort('%s not under root' % myname)
334 raise Abort('%s not under root' % myname)
335
335
336 _hgexecutable = None
336 _hgexecutable = None
337
337
338 def main_is_frozen():
338 def main_is_frozen():
339 """return True if we are a frozen executable.
339 """return True if we are a frozen executable.
340
340
341 The code supports py2exe (most common, Windows only) and tools/freeze
341 The code supports py2exe (most common, Windows only) and tools/freeze
342 (portable, not much used).
342 (portable, not much used).
343 """
343 """
344 return (hasattr(sys, "frozen") or # new py2exe
344 return (hasattr(sys, "frozen") or # new py2exe
345 hasattr(sys, "importers") or # old py2exe
345 hasattr(sys, "importers") or # old py2exe
346 imp.is_frozen("__main__")) # tools/freeze
346 imp.is_frozen("__main__")) # tools/freeze
347
347
348 def hgexecutable():
348 def hgexecutable():
349 """return location of the 'hg' executable.
349 """return location of the 'hg' executable.
350
350
351 Defaults to $HG or 'hg' in the search path.
351 Defaults to $HG or 'hg' in the search path.
352 """
352 """
353 if _hgexecutable is None:
353 if _hgexecutable is None:
354 hg = os.environ.get('HG')
354 hg = os.environ.get('HG')
355 if hg:
355 if hg:
356 set_hgexecutable(hg)
356 set_hgexecutable(hg)
357 elif main_is_frozen():
357 elif main_is_frozen():
358 set_hgexecutable(sys.executable)
358 set_hgexecutable(sys.executable)
359 else:
359 else:
360 exe = find_exe('hg') or os.path.basename(sys.argv[0])
360 exe = find_exe('hg') or os.path.basename(sys.argv[0])
361 set_hgexecutable(exe)
361 set_hgexecutable(exe)
362 return _hgexecutable
362 return _hgexecutable
363
363
364 def set_hgexecutable(path):
364 def set_hgexecutable(path):
365 """set location of the 'hg' executable"""
365 """set location of the 'hg' executable"""
366 global _hgexecutable
366 global _hgexecutable
367 _hgexecutable = path
367 _hgexecutable = path
368
368
369 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None):
369 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None):
370 '''enhanced shell command execution.
370 '''enhanced shell command execution.
371 run with environment maybe modified, maybe in different dir.
371 run with environment maybe modified, maybe in different dir.
372
372
373 if command fails and onerr is None, return status. if ui object,
373 if command fails and onerr is None, return status. if ui object,
374 print error message and return status, else raise onerr object as
374 print error message and return status, else raise onerr object as
375 exception.'''
375 exception.'''
376 def py2shell(val):
376 def py2shell(val):
377 'convert python object into string that is useful to shell'
377 'convert python object into string that is useful to shell'
378 if val is None or val is False:
378 if val is None or val is False:
379 return '0'
379 return '0'
380 if val is True:
380 if val is True:
381 return '1'
381 return '1'
382 return str(val)
382 return str(val)
383 origcmd = cmd
383 origcmd = cmd
384 if os.name == 'nt':
384 if os.name == 'nt':
385 cmd = '"%s"' % cmd
385 cmd = '"%s"' % cmd
386 env = dict(os.environ)
386 env = dict(os.environ)
387 env.update((k, py2shell(v)) for k, v in environ.iteritems())
387 env.update((k, py2shell(v)) for k, v in environ.iteritems())
388 env['HG'] = hgexecutable()
388 env['HG'] = hgexecutable()
389 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
389 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
390 env=env, cwd=cwd)
390 env=env, cwd=cwd)
391 if sys.platform == 'OpenVMS' and rc & 1:
391 if sys.platform == 'OpenVMS' and rc & 1:
392 rc = 0
392 rc = 0
393 if rc and onerr:
393 if rc and onerr:
394 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
394 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
395 explain_exit(rc)[0])
395 explain_exit(rc)[0])
396 if errprefix:
396 if errprefix:
397 errmsg = '%s: %s' % (errprefix, errmsg)
397 errmsg = '%s: %s' % (errprefix, errmsg)
398 try:
398 try:
399 onerr.warn(errmsg + '\n')
399 onerr.warn(errmsg + '\n')
400 except AttributeError:
400 except AttributeError:
401 raise onerr(errmsg)
401 raise onerr(errmsg)
402 return rc
402 return rc
403
403
404 def checksignature(func):
404 def checksignature(func):
405 '''wrap a function with code to check for calling errors'''
405 '''wrap a function with code to check for calling errors'''
406 def check(*args, **kwargs):
406 def check(*args, **kwargs):
407 try:
407 try:
408 return func(*args, **kwargs)
408 return func(*args, **kwargs)
409 except TypeError:
409 except TypeError:
410 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
410 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
411 raise error.SignatureError
411 raise error.SignatureError
412 raise
412 raise
413
413
414 return check
414 return check
415
415
416 # os.path.lexists is not available on python2.3
416 # os.path.lexists is not available on python2.3
417 def lexists(filename):
417 def lexists(filename):
418 "test whether a file with this name exists. does not follow symlinks"
418 "test whether a file with this name exists. does not follow symlinks"
419 try:
419 try:
420 os.lstat(filename)
420 os.lstat(filename)
421 except:
421 except:
422 return False
422 return False
423 return True
423 return True
424
424
425 def unlink(f):
425 def unlink(f):
426 """unlink and remove the directory if it is empty"""
426 """unlink and remove the directory if it is empty"""
427 os.unlink(f)
427 os.unlink(f)
428 # try removing directories that might now be empty
428 # try removing directories that might now be empty
429 try:
429 try:
430 os.removedirs(os.path.dirname(f))
430 os.removedirs(os.path.dirname(f))
431 except OSError:
431 except OSError:
432 pass
432 pass
433
433
434 def copyfile(src, dest):
434 def copyfile(src, dest):
435 "copy a file, preserving mode and atime/mtime"
435 "copy a file, preserving mode and atime/mtime"
436 if os.path.islink(src):
436 if os.path.islink(src):
437 try:
437 try:
438 os.unlink(dest)
438 os.unlink(dest)
439 except:
439 except:
440 pass
440 pass
441 os.symlink(os.readlink(src), dest)
441 os.symlink(os.readlink(src), dest)
442 else:
442 else:
443 try:
443 try:
444 shutil.copyfile(src, dest)
444 shutil.copyfile(src, dest)
445 shutil.copystat(src, dest)
445 shutil.copystat(src, dest)
446 except shutil.Error, inst:
446 except shutil.Error, inst:
447 raise Abort(str(inst))
447 raise Abort(str(inst))
448
448
449 def copyfiles(src, dst, hardlink=None):
449 def copyfiles(src, dst, hardlink=None):
450 """Copy a directory tree using hardlinks if possible"""
450 """Copy a directory tree using hardlinks if possible"""
451
451
452 if hardlink is None:
452 if hardlink is None:
453 hardlink = (os.stat(src).st_dev ==
453 hardlink = (os.stat(src).st_dev ==
454 os.stat(os.path.dirname(dst)).st_dev)
454 os.stat(os.path.dirname(dst)).st_dev)
455
455
456 num = 0
456 if os.path.isdir(src):
457 if os.path.isdir(src):
457 os.mkdir(dst)
458 os.mkdir(dst)
458 for name, kind in osutil.listdir(src):
459 for name, kind in osutil.listdir(src):
459 srcname = os.path.join(src, name)
460 srcname = os.path.join(src, name)
460 dstname = os.path.join(dst, name)
461 dstname = os.path.join(dst, name)
461 hardlink = copyfiles(srcname, dstname, hardlink)
462 hardlink, n = copyfiles(srcname, dstname, hardlink)
463 num += n
462 else:
464 else:
463 if hardlink:
465 if hardlink:
464 try:
466 try:
465 os_link(src, dst)
467 os_link(src, dst)
466 except (IOError, OSError):
468 except (IOError, OSError):
467 hardlink = False
469 hardlink = False
468 shutil.copy(src, dst)
470 shutil.copy(src, dst)
469 else:
471 else:
470 shutil.copy(src, dst)
472 shutil.copy(src, dst)
473 num += 1
471
474
472 return hardlink
475 return hardlink, num
473
476
474 class path_auditor(object):
477 class path_auditor(object):
475 '''ensure that a filesystem path contains no banned components.
478 '''ensure that a filesystem path contains no banned components.
476 the following properties of a path are checked:
479 the following properties of a path are checked:
477
480
478 - under top-level .hg
481 - under top-level .hg
479 - starts at the root of a windows drive
482 - starts at the root of a windows drive
480 - contains ".."
483 - contains ".."
481 - traverses a symlink (e.g. a/symlink_here/b)
484 - traverses a symlink (e.g. a/symlink_here/b)
482 - inside a nested repository'''
485 - inside a nested repository'''
483
486
484 def __init__(self, root):
487 def __init__(self, root):
485 self.audited = set()
488 self.audited = set()
486 self.auditeddir = set()
489 self.auditeddir = set()
487 self.root = root
490 self.root = root
488
491
489 def __call__(self, path):
492 def __call__(self, path):
490 if path in self.audited:
493 if path in self.audited:
491 return
494 return
492 normpath = os.path.normcase(path)
495 normpath = os.path.normcase(path)
493 parts = splitpath(normpath)
496 parts = splitpath(normpath)
494 if (os.path.splitdrive(path)[0]
497 if (os.path.splitdrive(path)[0]
495 or parts[0].lower() in ('.hg', '.hg.', '')
498 or parts[0].lower() in ('.hg', '.hg.', '')
496 or os.pardir in parts):
499 or os.pardir in parts):
497 raise Abort(_("path contains illegal component: %s") % path)
500 raise Abort(_("path contains illegal component: %s") % path)
498 if '.hg' in path.lower():
501 if '.hg' in path.lower():
499 lparts = [p.lower() for p in parts]
502 lparts = [p.lower() for p in parts]
500 for p in '.hg', '.hg.':
503 for p in '.hg', '.hg.':
501 if p in lparts[1:]:
504 if p in lparts[1:]:
502 pos = lparts.index(p)
505 pos = lparts.index(p)
503 base = os.path.join(*parts[:pos])
506 base = os.path.join(*parts[:pos])
504 raise Abort(_('path %r is inside repo %r') % (path, base))
507 raise Abort(_('path %r is inside repo %r') % (path, base))
505 def check(prefix):
508 def check(prefix):
506 curpath = os.path.join(self.root, prefix)
509 curpath = os.path.join(self.root, prefix)
507 try:
510 try:
508 st = os.lstat(curpath)
511 st = os.lstat(curpath)
509 except OSError, err:
512 except OSError, err:
510 # EINVAL can be raised as invalid path syntax under win32.
513 # EINVAL can be raised as invalid path syntax under win32.
511 # They must be ignored for patterns can be checked too.
514 # They must be ignored for patterns can be checked too.
512 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
515 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
513 raise
516 raise
514 else:
517 else:
515 if stat.S_ISLNK(st.st_mode):
518 if stat.S_ISLNK(st.st_mode):
516 raise Abort(_('path %r traverses symbolic link %r') %
519 raise Abort(_('path %r traverses symbolic link %r') %
517 (path, prefix))
520 (path, prefix))
518 elif (stat.S_ISDIR(st.st_mode) and
521 elif (stat.S_ISDIR(st.st_mode) and
519 os.path.isdir(os.path.join(curpath, '.hg'))):
522 os.path.isdir(os.path.join(curpath, '.hg'))):
520 raise Abort(_('path %r is inside repo %r') %
523 raise Abort(_('path %r is inside repo %r') %
521 (path, prefix))
524 (path, prefix))
522 parts.pop()
525 parts.pop()
523 prefixes = []
526 prefixes = []
524 while parts:
527 while parts:
525 prefix = os.sep.join(parts)
528 prefix = os.sep.join(parts)
526 if prefix in self.auditeddir:
529 if prefix in self.auditeddir:
527 break
530 break
528 check(prefix)
531 check(prefix)
529 prefixes.append(prefix)
532 prefixes.append(prefix)
530 parts.pop()
533 parts.pop()
531
534
532 self.audited.add(path)
535 self.audited.add(path)
533 # only add prefixes to the cache after checking everything: we don't
536 # only add prefixes to the cache after checking everything: we don't
534 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
537 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
535 self.auditeddir.update(prefixes)
538 self.auditeddir.update(prefixes)
536
539
537 def nlinks(pathname):
540 def nlinks(pathname):
538 """Return number of hardlinks for the given file."""
541 """Return number of hardlinks for the given file."""
539 return os.lstat(pathname).st_nlink
542 return os.lstat(pathname).st_nlink
540
543
541 if hasattr(os, 'link'):
544 if hasattr(os, 'link'):
542 os_link = os.link
545 os_link = os.link
543 else:
546 else:
544 def os_link(src, dst):
547 def os_link(src, dst):
545 raise OSError(0, _("Hardlinks not supported"))
548 raise OSError(0, _("Hardlinks not supported"))
546
549
547 def lookup_reg(key, name=None, scope=None):
550 def lookup_reg(key, name=None, scope=None):
548 return None
551 return None
549
552
550 def hidewindow():
553 def hidewindow():
551 """Hide current shell window.
554 """Hide current shell window.
552
555
553 Used to hide the window opened when starting asynchronous
556 Used to hide the window opened when starting asynchronous
554 child process under Windows, unneeded on other systems.
557 child process under Windows, unneeded on other systems.
555 """
558 """
556 pass
559 pass
557
560
558 if os.name == 'nt':
561 if os.name == 'nt':
559 from windows import *
562 from windows import *
560 else:
563 else:
561 from posix import *
564 from posix import *
562
565
563 def makelock(info, pathname):
566 def makelock(info, pathname):
564 try:
567 try:
565 return os.symlink(info, pathname)
568 return os.symlink(info, pathname)
566 except OSError, why:
569 except OSError, why:
567 if why.errno == errno.EEXIST:
570 if why.errno == errno.EEXIST:
568 raise
571 raise
569 except AttributeError: # no symlink in os
572 except AttributeError: # no symlink in os
570 pass
573 pass
571
574
572 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
575 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
573 os.write(ld, info)
576 os.write(ld, info)
574 os.close(ld)
577 os.close(ld)
575
578
576 def readlock(pathname):
579 def readlock(pathname):
577 try:
580 try:
578 return os.readlink(pathname)
581 return os.readlink(pathname)
579 except OSError, why:
582 except OSError, why:
580 if why.errno not in (errno.EINVAL, errno.ENOSYS):
583 if why.errno not in (errno.EINVAL, errno.ENOSYS):
581 raise
584 raise
582 except AttributeError: # no symlink in os
585 except AttributeError: # no symlink in os
583 pass
586 pass
584 return posixfile(pathname).read()
587 return posixfile(pathname).read()
585
588
586 def fstat(fp):
589 def fstat(fp):
587 '''stat file object that may not have fileno method.'''
590 '''stat file object that may not have fileno method.'''
588 try:
591 try:
589 return os.fstat(fp.fileno())
592 return os.fstat(fp.fileno())
590 except AttributeError:
593 except AttributeError:
591 return os.stat(fp.name)
594 return os.stat(fp.name)
592
595
593 # File system features
596 # File system features
594
597
595 def checkcase(path):
598 def checkcase(path):
596 """
599 """
597 Check whether the given path is on a case-sensitive filesystem
600 Check whether the given path is on a case-sensitive filesystem
598
601
599 Requires a path (like /foo/.hg) ending with a foldable final
602 Requires a path (like /foo/.hg) ending with a foldable final
600 directory component.
603 directory component.
601 """
604 """
602 s1 = os.stat(path)
605 s1 = os.stat(path)
603 d, b = os.path.split(path)
606 d, b = os.path.split(path)
604 p2 = os.path.join(d, b.upper())
607 p2 = os.path.join(d, b.upper())
605 if path == p2:
608 if path == p2:
606 p2 = os.path.join(d, b.lower())
609 p2 = os.path.join(d, b.lower())
607 try:
610 try:
608 s2 = os.stat(p2)
611 s2 = os.stat(p2)
609 if s2 == s1:
612 if s2 == s1:
610 return False
613 return False
611 return True
614 return True
612 except:
615 except:
613 return True
616 return True
614
617
615 _fspathcache = {}
618 _fspathcache = {}
616 def fspath(name, root):
619 def fspath(name, root):
617 '''Get name in the case stored in the filesystem
620 '''Get name in the case stored in the filesystem
618
621
619 The name is either relative to root, or it is an absolute path starting
622 The name is either relative to root, or it is an absolute path starting
620 with root. Note that this function is unnecessary, and should not be
623 with root. Note that this function is unnecessary, and should not be
621 called, for case-sensitive filesystems (simply because it's expensive).
624 called, for case-sensitive filesystems (simply because it's expensive).
622 '''
625 '''
623 # If name is absolute, make it relative
626 # If name is absolute, make it relative
624 if name.lower().startswith(root.lower()):
627 if name.lower().startswith(root.lower()):
625 l = len(root)
628 l = len(root)
626 if name[l] == os.sep or name[l] == os.altsep:
629 if name[l] == os.sep or name[l] == os.altsep:
627 l = l + 1
630 l = l + 1
628 name = name[l:]
631 name = name[l:]
629
632
630 if not os.path.exists(os.path.join(root, name)):
633 if not os.path.exists(os.path.join(root, name)):
631 return None
634 return None
632
635
633 seps = os.sep
636 seps = os.sep
634 if os.altsep:
637 if os.altsep:
635 seps = seps + os.altsep
638 seps = seps + os.altsep
636 # Protect backslashes. This gets silly very quickly.
639 # Protect backslashes. This gets silly very quickly.
637 seps.replace('\\','\\\\')
640 seps.replace('\\','\\\\')
638 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
641 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
639 dir = os.path.normcase(os.path.normpath(root))
642 dir = os.path.normcase(os.path.normpath(root))
640 result = []
643 result = []
641 for part, sep in pattern.findall(name):
644 for part, sep in pattern.findall(name):
642 if sep:
645 if sep:
643 result.append(sep)
646 result.append(sep)
644 continue
647 continue
645
648
646 if dir not in _fspathcache:
649 if dir not in _fspathcache:
647 _fspathcache[dir] = os.listdir(dir)
650 _fspathcache[dir] = os.listdir(dir)
648 contents = _fspathcache[dir]
651 contents = _fspathcache[dir]
649
652
650 lpart = part.lower()
653 lpart = part.lower()
651 lenp = len(part)
654 lenp = len(part)
652 for n in contents:
655 for n in contents:
653 if lenp == len(n) and n.lower() == lpart:
656 if lenp == len(n) and n.lower() == lpart:
654 result.append(n)
657 result.append(n)
655 break
658 break
656 else:
659 else:
657 # Cannot happen, as the file exists!
660 # Cannot happen, as the file exists!
658 result.append(part)
661 result.append(part)
659 dir = os.path.join(dir, lpart)
662 dir = os.path.join(dir, lpart)
660
663
661 return ''.join(result)
664 return ''.join(result)
662
665
663 def checkexec(path):
666 def checkexec(path):
664 """
667 """
665 Check whether the given path is on a filesystem with UNIX-like exec flags
668 Check whether the given path is on a filesystem with UNIX-like exec flags
666
669
667 Requires a directory (like /foo/.hg)
670 Requires a directory (like /foo/.hg)
668 """
671 """
669
672
670 # VFAT on some Linux versions can flip mode but it doesn't persist
673 # VFAT on some Linux versions can flip mode but it doesn't persist
671 # a FS remount. Frequently we can detect it if files are created
674 # a FS remount. Frequently we can detect it if files are created
672 # with exec bit on.
675 # with exec bit on.
673
676
674 try:
677 try:
675 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
678 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
676 fh, fn = tempfile.mkstemp(dir=path, prefix='hg-checkexec-')
679 fh, fn = tempfile.mkstemp(dir=path, prefix='hg-checkexec-')
677 try:
680 try:
678 os.close(fh)
681 os.close(fh)
679 m = os.stat(fn).st_mode & 0777
682 m = os.stat(fn).st_mode & 0777
680 new_file_has_exec = m & EXECFLAGS
683 new_file_has_exec = m & EXECFLAGS
681 os.chmod(fn, m ^ EXECFLAGS)
684 os.chmod(fn, m ^ EXECFLAGS)
682 exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0777) == m)
685 exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0777) == m)
683 finally:
686 finally:
684 os.unlink(fn)
687 os.unlink(fn)
685 except (IOError, OSError):
688 except (IOError, OSError):
686 # we don't care, the user probably won't be able to commit anyway
689 # we don't care, the user probably won't be able to commit anyway
687 return False
690 return False
688 return not (new_file_has_exec or exec_flags_cannot_flip)
691 return not (new_file_has_exec or exec_flags_cannot_flip)
689
692
690 def checklink(path):
693 def checklink(path):
691 """check whether the given path is on a symlink-capable filesystem"""
694 """check whether the given path is on a symlink-capable filesystem"""
692 # mktemp is not racy because symlink creation will fail if the
695 # mktemp is not racy because symlink creation will fail if the
693 # file already exists
696 # file already exists
694 name = tempfile.mktemp(dir=path, prefix='hg-checklink-')
697 name = tempfile.mktemp(dir=path, prefix='hg-checklink-')
695 try:
698 try:
696 os.symlink(".", name)
699 os.symlink(".", name)
697 os.unlink(name)
700 os.unlink(name)
698 return True
701 return True
699 except (OSError, AttributeError):
702 except (OSError, AttributeError):
700 return False
703 return False
701
704
702 def needbinarypatch():
705 def needbinarypatch():
703 """return True if patches should be applied in binary mode by default."""
706 """return True if patches should be applied in binary mode by default."""
704 return os.name == 'nt'
707 return os.name == 'nt'
705
708
706 def endswithsep(path):
709 def endswithsep(path):
707 '''Check path ends with os.sep or os.altsep.'''
710 '''Check path ends with os.sep or os.altsep.'''
708 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
711 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
709
712
710 def splitpath(path):
713 def splitpath(path):
711 '''Split path by os.sep.
714 '''Split path by os.sep.
712 Note that this function does not use os.altsep because this is
715 Note that this function does not use os.altsep because this is
713 an alternative of simple "xxx.split(os.sep)".
716 an alternative of simple "xxx.split(os.sep)".
714 It is recommended to use os.path.normpath() before using this
717 It is recommended to use os.path.normpath() before using this
715 function if need.'''
718 function if need.'''
716 return path.split(os.sep)
719 return path.split(os.sep)
717
720
718 def gui():
721 def gui():
719 '''Are we running in a GUI?'''
722 '''Are we running in a GUI?'''
720 return os.name == "nt" or os.name == "mac" or os.environ.get("DISPLAY")
723 return os.name == "nt" or os.name == "mac" or os.environ.get("DISPLAY")
721
724
722 def mktempcopy(name, emptyok=False, createmode=None):
725 def mktempcopy(name, emptyok=False, createmode=None):
723 """Create a temporary file with the same contents from name
726 """Create a temporary file with the same contents from name
724
727
725 The permission bits are copied from the original file.
728 The permission bits are copied from the original file.
726
729
727 If the temporary file is going to be truncated immediately, you
730 If the temporary file is going to be truncated immediately, you
728 can use emptyok=True as an optimization.
731 can use emptyok=True as an optimization.
729
732
730 Returns the name of the temporary file.
733 Returns the name of the temporary file.
731 """
734 """
732 d, fn = os.path.split(name)
735 d, fn = os.path.split(name)
733 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
736 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
734 os.close(fd)
737 os.close(fd)
735 # Temporary files are created with mode 0600, which is usually not
738 # Temporary files are created with mode 0600, which is usually not
736 # what we want. If the original file already exists, just copy
739 # what we want. If the original file already exists, just copy
737 # its mode. Otherwise, manually obey umask.
740 # its mode. Otherwise, manually obey umask.
738 try:
741 try:
739 st_mode = os.lstat(name).st_mode & 0777
742 st_mode = os.lstat(name).st_mode & 0777
740 except OSError, inst:
743 except OSError, inst:
741 if inst.errno != errno.ENOENT:
744 if inst.errno != errno.ENOENT:
742 raise
745 raise
743 st_mode = createmode
746 st_mode = createmode
744 if st_mode is None:
747 if st_mode is None:
745 st_mode = ~umask
748 st_mode = ~umask
746 st_mode &= 0666
749 st_mode &= 0666
747 os.chmod(temp, st_mode)
750 os.chmod(temp, st_mode)
748 if emptyok:
751 if emptyok:
749 return temp
752 return temp
750 try:
753 try:
751 try:
754 try:
752 ifp = posixfile(name, "rb")
755 ifp = posixfile(name, "rb")
753 except IOError, inst:
756 except IOError, inst:
754 if inst.errno == errno.ENOENT:
757 if inst.errno == errno.ENOENT:
755 return temp
758 return temp
756 if not getattr(inst, 'filename', None):
759 if not getattr(inst, 'filename', None):
757 inst.filename = name
760 inst.filename = name
758 raise
761 raise
759 ofp = posixfile(temp, "wb")
762 ofp = posixfile(temp, "wb")
760 for chunk in filechunkiter(ifp):
763 for chunk in filechunkiter(ifp):
761 ofp.write(chunk)
764 ofp.write(chunk)
762 ifp.close()
765 ifp.close()
763 ofp.close()
766 ofp.close()
764 except:
767 except:
765 try: os.unlink(temp)
768 try: os.unlink(temp)
766 except: pass
769 except: pass
767 raise
770 raise
768 return temp
771 return temp
769
772
770 class atomictempfile(object):
773 class atomictempfile(object):
771 """file-like object that atomically updates a file
774 """file-like object that atomically updates a file
772
775
773 All writes will be redirected to a temporary copy of the original
776 All writes will be redirected to a temporary copy of the original
774 file. When rename is called, the copy is renamed to the original
777 file. When rename is called, the copy is renamed to the original
775 name, making the changes visible.
778 name, making the changes visible.
776 """
779 """
777 def __init__(self, name, mode='w+b', createmode=None):
780 def __init__(self, name, mode='w+b', createmode=None):
778 self.__name = name
781 self.__name = name
779 self._fp = None
782 self._fp = None
780 self.temp = mktempcopy(name, emptyok=('w' in mode),
783 self.temp = mktempcopy(name, emptyok=('w' in mode),
781 createmode=createmode)
784 createmode=createmode)
782 self._fp = posixfile(self.temp, mode)
785 self._fp = posixfile(self.temp, mode)
783
786
784 def __getattr__(self, name):
787 def __getattr__(self, name):
785 return getattr(self._fp, name)
788 return getattr(self._fp, name)
786
789
787 def rename(self):
790 def rename(self):
788 if not self._fp.closed:
791 if not self._fp.closed:
789 self._fp.close()
792 self._fp.close()
790 rename(self.temp, localpath(self.__name))
793 rename(self.temp, localpath(self.__name))
791
794
792 def __del__(self):
795 def __del__(self):
793 if not self._fp:
796 if not self._fp:
794 return
797 return
795 if not self._fp.closed:
798 if not self._fp.closed:
796 try:
799 try:
797 os.unlink(self.temp)
800 os.unlink(self.temp)
798 except: pass
801 except: pass
799 self._fp.close()
802 self._fp.close()
800
803
801 def makedirs(name, mode=None):
804 def makedirs(name, mode=None):
802 """recursive directory creation with parent mode inheritance"""
805 """recursive directory creation with parent mode inheritance"""
803 try:
806 try:
804 os.mkdir(name)
807 os.mkdir(name)
805 if mode is not None:
808 if mode is not None:
806 os.chmod(name, mode)
809 os.chmod(name, mode)
807 return
810 return
808 except OSError, err:
811 except OSError, err:
809 if err.errno == errno.EEXIST:
812 if err.errno == errno.EEXIST:
810 return
813 return
811 if err.errno != errno.ENOENT:
814 if err.errno != errno.ENOENT:
812 raise
815 raise
813 parent = os.path.abspath(os.path.dirname(name))
816 parent = os.path.abspath(os.path.dirname(name))
814 makedirs(parent, mode)
817 makedirs(parent, mode)
815 makedirs(name, mode)
818 makedirs(name, mode)
816
819
817 class opener(object):
820 class opener(object):
818 """Open files relative to a base directory
821 """Open files relative to a base directory
819
822
820 This class is used to hide the details of COW semantics and
823 This class is used to hide the details of COW semantics and
821 remote file access from higher level code.
824 remote file access from higher level code.
822 """
825 """
823 def __init__(self, base, audit=True):
826 def __init__(self, base, audit=True):
824 self.base = base
827 self.base = base
825 if audit:
828 if audit:
826 self.audit_path = path_auditor(base)
829 self.audit_path = path_auditor(base)
827 else:
830 else:
828 self.audit_path = always
831 self.audit_path = always
829 self.createmode = None
832 self.createmode = None
830
833
831 @propertycache
834 @propertycache
832 def _can_symlink(self):
835 def _can_symlink(self):
833 return checklink(self.base)
836 return checklink(self.base)
834
837
835 def _fixfilemode(self, name):
838 def _fixfilemode(self, name):
836 if self.createmode is None:
839 if self.createmode is None:
837 return
840 return
838 os.chmod(name, self.createmode & 0666)
841 os.chmod(name, self.createmode & 0666)
839
842
840 def __call__(self, path, mode="r", text=False, atomictemp=False):
843 def __call__(self, path, mode="r", text=False, atomictemp=False):
841 self.audit_path(path)
844 self.audit_path(path)
842 f = os.path.join(self.base, path)
845 f = os.path.join(self.base, path)
843
846
844 if not text and "b" not in mode:
847 if not text and "b" not in mode:
845 mode += "b" # for that other OS
848 mode += "b" # for that other OS
846
849
847 nlink = -1
850 nlink = -1
848 if mode not in ("r", "rb"):
851 if mode not in ("r", "rb"):
849 try:
852 try:
850 nlink = nlinks(f)
853 nlink = nlinks(f)
851 except OSError:
854 except OSError:
852 nlink = 0
855 nlink = 0
853 d = os.path.dirname(f)
856 d = os.path.dirname(f)
854 if not os.path.isdir(d):
857 if not os.path.isdir(d):
855 makedirs(d, self.createmode)
858 makedirs(d, self.createmode)
856 if atomictemp:
859 if atomictemp:
857 return atomictempfile(f, mode, self.createmode)
860 return atomictempfile(f, mode, self.createmode)
858 if nlink > 1:
861 if nlink > 1:
859 rename(mktempcopy(f), f)
862 rename(mktempcopy(f), f)
860 fp = posixfile(f, mode)
863 fp = posixfile(f, mode)
861 if nlink == 0:
864 if nlink == 0:
862 self._fixfilemode(f)
865 self._fixfilemode(f)
863 return fp
866 return fp
864
867
865 def symlink(self, src, dst):
868 def symlink(self, src, dst):
866 self.audit_path(dst)
869 self.audit_path(dst)
867 linkname = os.path.join(self.base, dst)
870 linkname = os.path.join(self.base, dst)
868 try:
871 try:
869 os.unlink(linkname)
872 os.unlink(linkname)
870 except OSError:
873 except OSError:
871 pass
874 pass
872
875
873 dirname = os.path.dirname(linkname)
876 dirname = os.path.dirname(linkname)
874 if not os.path.exists(dirname):
877 if not os.path.exists(dirname):
875 makedirs(dirname, self.createmode)
878 makedirs(dirname, self.createmode)
876
879
877 if self._can_symlink:
880 if self._can_symlink:
878 try:
881 try:
879 os.symlink(src, linkname)
882 os.symlink(src, linkname)
880 except OSError, err:
883 except OSError, err:
881 raise OSError(err.errno, _('could not symlink to %r: %s') %
884 raise OSError(err.errno, _('could not symlink to %r: %s') %
882 (src, err.strerror), linkname)
885 (src, err.strerror), linkname)
883 else:
886 else:
884 f = self(dst, "w")
887 f = self(dst, "w")
885 f.write(src)
888 f.write(src)
886 f.close()
889 f.close()
887 self._fixfilemode(dst)
890 self._fixfilemode(dst)
888
891
889 class chunkbuffer(object):
892 class chunkbuffer(object):
890 """Allow arbitrary sized chunks of data to be efficiently read from an
893 """Allow arbitrary sized chunks of data to be efficiently read from an
891 iterator over chunks of arbitrary size."""
894 iterator over chunks of arbitrary size."""
892
895
893 def __init__(self, in_iter):
896 def __init__(self, in_iter):
894 """in_iter is the iterator that's iterating over the input chunks.
897 """in_iter is the iterator that's iterating over the input chunks.
895 targetsize is how big a buffer to try to maintain."""
898 targetsize is how big a buffer to try to maintain."""
896 self.iter = iter(in_iter)
899 self.iter = iter(in_iter)
897 self.buf = ''
900 self.buf = ''
898 self.targetsize = 2**16
901 self.targetsize = 2**16
899
902
900 def read(self, l):
903 def read(self, l):
901 """Read L bytes of data from the iterator of chunks of data.
904 """Read L bytes of data from the iterator of chunks of data.
902 Returns less than L bytes if the iterator runs dry."""
905 Returns less than L bytes if the iterator runs dry."""
903 if l > len(self.buf) and self.iter:
906 if l > len(self.buf) and self.iter:
904 # Clamp to a multiple of self.targetsize
907 # Clamp to a multiple of self.targetsize
905 targetsize = max(l, self.targetsize)
908 targetsize = max(l, self.targetsize)
906 collector = cStringIO.StringIO()
909 collector = cStringIO.StringIO()
907 collector.write(self.buf)
910 collector.write(self.buf)
908 collected = len(self.buf)
911 collected = len(self.buf)
909 for chunk in self.iter:
912 for chunk in self.iter:
910 collector.write(chunk)
913 collector.write(chunk)
911 collected += len(chunk)
914 collected += len(chunk)
912 if collected >= targetsize:
915 if collected >= targetsize:
913 break
916 break
914 if collected < targetsize:
917 if collected < targetsize:
915 self.iter = False
918 self.iter = False
916 self.buf = collector.getvalue()
919 self.buf = collector.getvalue()
917 if len(self.buf) == l:
920 if len(self.buf) == l:
918 s, self.buf = str(self.buf), ''
921 s, self.buf = str(self.buf), ''
919 else:
922 else:
920 s, self.buf = self.buf[:l], buffer(self.buf, l)
923 s, self.buf = self.buf[:l], buffer(self.buf, l)
921 return s
924 return s
922
925
923 def filechunkiter(f, size=65536, limit=None):
926 def filechunkiter(f, size=65536, limit=None):
924 """Create a generator that produces the data in the file size
927 """Create a generator that produces the data in the file size
925 (default 65536) bytes at a time, up to optional limit (default is
928 (default 65536) bytes at a time, up to optional limit (default is
926 to read all data). Chunks may be less than size bytes if the
929 to read all data). Chunks may be less than size bytes if the
927 chunk is the last chunk in the file, or the file is a socket or
930 chunk is the last chunk in the file, or the file is a socket or
928 some other type of file that sometimes reads less data than is
931 some other type of file that sometimes reads less data than is
929 requested."""
932 requested."""
930 assert size >= 0
933 assert size >= 0
931 assert limit is None or limit >= 0
934 assert limit is None or limit >= 0
932 while True:
935 while True:
933 if limit is None:
936 if limit is None:
934 nbytes = size
937 nbytes = size
935 else:
938 else:
936 nbytes = min(limit, size)
939 nbytes = min(limit, size)
937 s = nbytes and f.read(nbytes)
940 s = nbytes and f.read(nbytes)
938 if not s:
941 if not s:
939 break
942 break
940 if limit:
943 if limit:
941 limit -= len(s)
944 limit -= len(s)
942 yield s
945 yield s
943
946
944 def makedate():
947 def makedate():
945 lt = time.localtime()
948 lt = time.localtime()
946 if lt[8] == 1 and time.daylight:
949 if lt[8] == 1 and time.daylight:
947 tz = time.altzone
950 tz = time.altzone
948 else:
951 else:
949 tz = time.timezone
952 tz = time.timezone
950 return time.mktime(lt), tz
953 return time.mktime(lt), tz
951
954
952 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
955 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
953 """represent a (unixtime, offset) tuple as a localized time.
956 """represent a (unixtime, offset) tuple as a localized time.
954 unixtime is seconds since the epoch, and offset is the time zone's
957 unixtime is seconds since the epoch, and offset is the time zone's
955 number of seconds away from UTC. if timezone is false, do not
958 number of seconds away from UTC. if timezone is false, do not
956 append time zone to string."""
959 append time zone to string."""
957 t, tz = date or makedate()
960 t, tz = date or makedate()
958 if "%1" in format or "%2" in format:
961 if "%1" in format or "%2" in format:
959 sign = (tz > 0) and "-" or "+"
962 sign = (tz > 0) and "-" or "+"
960 minutes = abs(tz) // 60
963 minutes = abs(tz) // 60
961 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
964 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
962 format = format.replace("%2", "%02d" % (minutes % 60))
965 format = format.replace("%2", "%02d" % (minutes % 60))
963 s = time.strftime(format, time.gmtime(float(t) - tz))
966 s = time.strftime(format, time.gmtime(float(t) - tz))
964 return s
967 return s
965
968
966 def shortdate(date=None):
969 def shortdate(date=None):
967 """turn (timestamp, tzoff) tuple into iso 8631 date."""
970 """turn (timestamp, tzoff) tuple into iso 8631 date."""
968 return datestr(date, format='%Y-%m-%d')
971 return datestr(date, format='%Y-%m-%d')
969
972
970 def strdate(string, format, defaults=[]):
973 def strdate(string, format, defaults=[]):
971 """parse a localized time string and return a (unixtime, offset) tuple.
974 """parse a localized time string and return a (unixtime, offset) tuple.
972 if the string cannot be parsed, ValueError is raised."""
975 if the string cannot be parsed, ValueError is raised."""
973 def timezone(string):
976 def timezone(string):
974 tz = string.split()[-1]
977 tz = string.split()[-1]
975 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
978 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
976 sign = (tz[0] == "+") and 1 or -1
979 sign = (tz[0] == "+") and 1 or -1
977 hours = int(tz[1:3])
980 hours = int(tz[1:3])
978 minutes = int(tz[3:5])
981 minutes = int(tz[3:5])
979 return -sign * (hours * 60 + minutes) * 60
982 return -sign * (hours * 60 + minutes) * 60
980 if tz == "GMT" or tz == "UTC":
983 if tz == "GMT" or tz == "UTC":
981 return 0
984 return 0
982 return None
985 return None
983
986
984 # NOTE: unixtime = localunixtime + offset
987 # NOTE: unixtime = localunixtime + offset
985 offset, date = timezone(string), string
988 offset, date = timezone(string), string
986 if offset != None:
989 if offset != None:
987 date = " ".join(string.split()[:-1])
990 date = " ".join(string.split()[:-1])
988
991
989 # add missing elements from defaults
992 # add missing elements from defaults
990 for part in defaults:
993 for part in defaults:
991 found = [True for p in part if ("%"+p) in format]
994 found = [True for p in part if ("%"+p) in format]
992 if not found:
995 if not found:
993 date += "@" + defaults[part]
996 date += "@" + defaults[part]
994 format += "@%" + part[0]
997 format += "@%" + part[0]
995
998
996 timetuple = time.strptime(date, format)
999 timetuple = time.strptime(date, format)
997 localunixtime = int(calendar.timegm(timetuple))
1000 localunixtime = int(calendar.timegm(timetuple))
998 if offset is None:
1001 if offset is None:
999 # local timezone
1002 # local timezone
1000 unixtime = int(time.mktime(timetuple))
1003 unixtime = int(time.mktime(timetuple))
1001 offset = unixtime - localunixtime
1004 offset = unixtime - localunixtime
1002 else:
1005 else:
1003 unixtime = localunixtime + offset
1006 unixtime = localunixtime + offset
1004 return unixtime, offset
1007 return unixtime, offset
1005
1008
1006 def parsedate(date, formats=None, defaults=None):
1009 def parsedate(date, formats=None, defaults=None):
1007 """parse a localized date/time string and return a (unixtime, offset) tuple.
1010 """parse a localized date/time string and return a (unixtime, offset) tuple.
1008
1011
1009 The date may be a "unixtime offset" string or in one of the specified
1012 The date may be a "unixtime offset" string or in one of the specified
1010 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1013 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1011 """
1014 """
1012 if not date:
1015 if not date:
1013 return 0, 0
1016 return 0, 0
1014 if isinstance(date, tuple) and len(date) == 2:
1017 if isinstance(date, tuple) and len(date) == 2:
1015 return date
1018 return date
1016 if not formats:
1019 if not formats:
1017 formats = defaultdateformats
1020 formats = defaultdateformats
1018 date = date.strip()
1021 date = date.strip()
1019 try:
1022 try:
1020 when, offset = map(int, date.split(' '))
1023 when, offset = map(int, date.split(' '))
1021 except ValueError:
1024 except ValueError:
1022 # fill out defaults
1025 # fill out defaults
1023 if not defaults:
1026 if not defaults:
1024 defaults = {}
1027 defaults = {}
1025 now = makedate()
1028 now = makedate()
1026 for part in "d mb yY HI M S".split():
1029 for part in "d mb yY HI M S".split():
1027 if part not in defaults:
1030 if part not in defaults:
1028 if part[0] in "HMS":
1031 if part[0] in "HMS":
1029 defaults[part] = "00"
1032 defaults[part] = "00"
1030 else:
1033 else:
1031 defaults[part] = datestr(now, "%" + part[0])
1034 defaults[part] = datestr(now, "%" + part[0])
1032
1035
1033 for format in formats:
1036 for format in formats:
1034 try:
1037 try:
1035 when, offset = strdate(date, format, defaults)
1038 when, offset = strdate(date, format, defaults)
1036 except (ValueError, OverflowError):
1039 except (ValueError, OverflowError):
1037 pass
1040 pass
1038 else:
1041 else:
1039 break
1042 break
1040 else:
1043 else:
1041 raise Abort(_('invalid date: %r ') % date)
1044 raise Abort(_('invalid date: %r ') % date)
1042 # validate explicit (probably user-specified) date and
1045 # validate explicit (probably user-specified) date and
1043 # time zone offset. values must fit in signed 32 bits for
1046 # time zone offset. values must fit in signed 32 bits for
1044 # current 32-bit linux runtimes. timezones go from UTC-12
1047 # current 32-bit linux runtimes. timezones go from UTC-12
1045 # to UTC+14
1048 # to UTC+14
1046 if abs(when) > 0x7fffffff:
1049 if abs(when) > 0x7fffffff:
1047 raise Abort(_('date exceeds 32 bits: %d') % when)
1050 raise Abort(_('date exceeds 32 bits: %d') % when)
1048 if offset < -50400 or offset > 43200:
1051 if offset < -50400 or offset > 43200:
1049 raise Abort(_('impossible time zone offset: %d') % offset)
1052 raise Abort(_('impossible time zone offset: %d') % offset)
1050 return when, offset
1053 return when, offset
1051
1054
1052 def matchdate(date):
1055 def matchdate(date):
1053 """Return a function that matches a given date match specifier
1056 """Return a function that matches a given date match specifier
1054
1057
1055 Formats include:
1058 Formats include:
1056
1059
1057 '{date}' match a given date to the accuracy provided
1060 '{date}' match a given date to the accuracy provided
1058
1061
1059 '<{date}' on or before a given date
1062 '<{date}' on or before a given date
1060
1063
1061 '>{date}' on or after a given date
1064 '>{date}' on or after a given date
1062
1065
1063 """
1066 """
1064
1067
1065 def lower(date):
1068 def lower(date):
1066 d = dict(mb="1", d="1")
1069 d = dict(mb="1", d="1")
1067 return parsedate(date, extendeddateformats, d)[0]
1070 return parsedate(date, extendeddateformats, d)[0]
1068
1071
1069 def upper(date):
1072 def upper(date):
1070 d = dict(mb="12", HI="23", M="59", S="59")
1073 d = dict(mb="12", HI="23", M="59", S="59")
1071 for days in "31 30 29".split():
1074 for days in "31 30 29".split():
1072 try:
1075 try:
1073 d["d"] = days
1076 d["d"] = days
1074 return parsedate(date, extendeddateformats, d)[0]
1077 return parsedate(date, extendeddateformats, d)[0]
1075 except:
1078 except:
1076 pass
1079 pass
1077 d["d"] = "28"
1080 d["d"] = "28"
1078 return parsedate(date, extendeddateformats, d)[0]
1081 return parsedate(date, extendeddateformats, d)[0]
1079
1082
1080 date = date.strip()
1083 date = date.strip()
1081 if date[0] == "<":
1084 if date[0] == "<":
1082 when = upper(date[1:])
1085 when = upper(date[1:])
1083 return lambda x: x <= when
1086 return lambda x: x <= when
1084 elif date[0] == ">":
1087 elif date[0] == ">":
1085 when = lower(date[1:])
1088 when = lower(date[1:])
1086 return lambda x: x >= when
1089 return lambda x: x >= when
1087 elif date[0] == "-":
1090 elif date[0] == "-":
1088 try:
1091 try:
1089 days = int(date[1:])
1092 days = int(date[1:])
1090 except ValueError:
1093 except ValueError:
1091 raise Abort(_("invalid day spec: %s") % date[1:])
1094 raise Abort(_("invalid day spec: %s") % date[1:])
1092 when = makedate()[0] - days * 3600 * 24
1095 when = makedate()[0] - days * 3600 * 24
1093 return lambda x: x >= when
1096 return lambda x: x >= when
1094 elif " to " in date:
1097 elif " to " in date:
1095 a, b = date.split(" to ")
1098 a, b = date.split(" to ")
1096 start, stop = lower(a), upper(b)
1099 start, stop = lower(a), upper(b)
1097 return lambda x: x >= start and x <= stop
1100 return lambda x: x >= start and x <= stop
1098 else:
1101 else:
1099 start, stop = lower(date), upper(date)
1102 start, stop = lower(date), upper(date)
1100 return lambda x: x >= start and x <= stop
1103 return lambda x: x >= start and x <= stop
1101
1104
1102 def shortuser(user):
1105 def shortuser(user):
1103 """Return a short representation of a user name or email address."""
1106 """Return a short representation of a user name or email address."""
1104 f = user.find('@')
1107 f = user.find('@')
1105 if f >= 0:
1108 if f >= 0:
1106 user = user[:f]
1109 user = user[:f]
1107 f = user.find('<')
1110 f = user.find('<')
1108 if f >= 0:
1111 if f >= 0:
1109 user = user[f + 1:]
1112 user = user[f + 1:]
1110 f = user.find(' ')
1113 f = user.find(' ')
1111 if f >= 0:
1114 if f >= 0:
1112 user = user[:f]
1115 user = user[:f]
1113 f = user.find('.')
1116 f = user.find('.')
1114 if f >= 0:
1117 if f >= 0:
1115 user = user[:f]
1118 user = user[:f]
1116 return user
1119 return user
1117
1120
1118 def email(author):
1121 def email(author):
1119 '''get email of author.'''
1122 '''get email of author.'''
1120 r = author.find('>')
1123 r = author.find('>')
1121 if r == -1:
1124 if r == -1:
1122 r = None
1125 r = None
1123 return author[author.find('<') + 1:r]
1126 return author[author.find('<') + 1:r]
1124
1127
1125 def ellipsis(text, maxlength=400):
1128 def ellipsis(text, maxlength=400):
1126 """Trim string to at most maxlength (default: 400) characters."""
1129 """Trim string to at most maxlength (default: 400) characters."""
1127 if len(text) <= maxlength:
1130 if len(text) <= maxlength:
1128 return text
1131 return text
1129 else:
1132 else:
1130 return "%s..." % (text[:maxlength - 3])
1133 return "%s..." % (text[:maxlength - 3])
1131
1134
1132 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
1135 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
1133 '''yield every hg repository under path, recursively.'''
1136 '''yield every hg repository under path, recursively.'''
1134 def errhandler(err):
1137 def errhandler(err):
1135 if err.filename == path:
1138 if err.filename == path:
1136 raise err
1139 raise err
1137 if followsym and hasattr(os.path, 'samestat'):
1140 if followsym and hasattr(os.path, 'samestat'):
1138 def _add_dir_if_not_there(dirlst, dirname):
1141 def _add_dir_if_not_there(dirlst, dirname):
1139 match = False
1142 match = False
1140 samestat = os.path.samestat
1143 samestat = os.path.samestat
1141 dirstat = os.stat(dirname)
1144 dirstat = os.stat(dirname)
1142 for lstdirstat in dirlst:
1145 for lstdirstat in dirlst:
1143 if samestat(dirstat, lstdirstat):
1146 if samestat(dirstat, lstdirstat):
1144 match = True
1147 match = True
1145 break
1148 break
1146 if not match:
1149 if not match:
1147 dirlst.append(dirstat)
1150 dirlst.append(dirstat)
1148 return not match
1151 return not match
1149 else:
1152 else:
1150 followsym = False
1153 followsym = False
1151
1154
1152 if (seen_dirs is None) and followsym:
1155 if (seen_dirs is None) and followsym:
1153 seen_dirs = []
1156 seen_dirs = []
1154 _add_dir_if_not_there(seen_dirs, path)
1157 _add_dir_if_not_there(seen_dirs, path)
1155 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
1158 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
1156 dirs.sort()
1159 dirs.sort()
1157 if '.hg' in dirs:
1160 if '.hg' in dirs:
1158 yield root # found a repository
1161 yield root # found a repository
1159 qroot = os.path.join(root, '.hg', 'patches')
1162 qroot = os.path.join(root, '.hg', 'patches')
1160 if os.path.isdir(os.path.join(qroot, '.hg')):
1163 if os.path.isdir(os.path.join(qroot, '.hg')):
1161 yield qroot # we have a patch queue repo here
1164 yield qroot # we have a patch queue repo here
1162 if recurse:
1165 if recurse:
1163 # avoid recursing inside the .hg directory
1166 # avoid recursing inside the .hg directory
1164 dirs.remove('.hg')
1167 dirs.remove('.hg')
1165 else:
1168 else:
1166 dirs[:] = [] # don't descend further
1169 dirs[:] = [] # don't descend further
1167 elif followsym:
1170 elif followsym:
1168 newdirs = []
1171 newdirs = []
1169 for d in dirs:
1172 for d in dirs:
1170 fname = os.path.join(root, d)
1173 fname = os.path.join(root, d)
1171 if _add_dir_if_not_there(seen_dirs, fname):
1174 if _add_dir_if_not_there(seen_dirs, fname):
1172 if os.path.islink(fname):
1175 if os.path.islink(fname):
1173 for hgname in walkrepos(fname, True, seen_dirs):
1176 for hgname in walkrepos(fname, True, seen_dirs):
1174 yield hgname
1177 yield hgname
1175 else:
1178 else:
1176 newdirs.append(d)
1179 newdirs.append(d)
1177 dirs[:] = newdirs
1180 dirs[:] = newdirs
1178
1181
1179 _rcpath = None
1182 _rcpath = None
1180
1183
1181 def os_rcpath():
1184 def os_rcpath():
1182 '''return default os-specific hgrc search path'''
1185 '''return default os-specific hgrc search path'''
1183 path = system_rcpath()
1186 path = system_rcpath()
1184 path.extend(user_rcpath())
1187 path.extend(user_rcpath())
1185 path = [os.path.normpath(f) for f in path]
1188 path = [os.path.normpath(f) for f in path]
1186 return path
1189 return path
1187
1190
1188 def rcpath():
1191 def rcpath():
1189 '''return hgrc search path. if env var HGRCPATH is set, use it.
1192 '''return hgrc search path. if env var HGRCPATH is set, use it.
1190 for each item in path, if directory, use files ending in .rc,
1193 for each item in path, if directory, use files ending in .rc,
1191 else use item.
1194 else use item.
1192 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1195 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1193 if no HGRCPATH, use default os-specific path.'''
1196 if no HGRCPATH, use default os-specific path.'''
1194 global _rcpath
1197 global _rcpath
1195 if _rcpath is None:
1198 if _rcpath is None:
1196 if 'HGRCPATH' in os.environ:
1199 if 'HGRCPATH' in os.environ:
1197 _rcpath = []
1200 _rcpath = []
1198 for p in os.environ['HGRCPATH'].split(os.pathsep):
1201 for p in os.environ['HGRCPATH'].split(os.pathsep):
1199 if not p:
1202 if not p:
1200 continue
1203 continue
1201 p = expandpath(p)
1204 p = expandpath(p)
1202 if os.path.isdir(p):
1205 if os.path.isdir(p):
1203 for f, kind in osutil.listdir(p):
1206 for f, kind in osutil.listdir(p):
1204 if f.endswith('.rc'):
1207 if f.endswith('.rc'):
1205 _rcpath.append(os.path.join(p, f))
1208 _rcpath.append(os.path.join(p, f))
1206 else:
1209 else:
1207 _rcpath.append(p)
1210 _rcpath.append(p)
1208 else:
1211 else:
1209 _rcpath = os_rcpath()
1212 _rcpath = os_rcpath()
1210 return _rcpath
1213 return _rcpath
1211
1214
1212 def bytecount(nbytes):
1215 def bytecount(nbytes):
1213 '''return byte count formatted as readable string, with units'''
1216 '''return byte count formatted as readable string, with units'''
1214
1217
1215 units = (
1218 units = (
1216 (100, 1 << 30, _('%.0f GB')),
1219 (100, 1 << 30, _('%.0f GB')),
1217 (10, 1 << 30, _('%.1f GB')),
1220 (10, 1 << 30, _('%.1f GB')),
1218 (1, 1 << 30, _('%.2f GB')),
1221 (1, 1 << 30, _('%.2f GB')),
1219 (100, 1 << 20, _('%.0f MB')),
1222 (100, 1 << 20, _('%.0f MB')),
1220 (10, 1 << 20, _('%.1f MB')),
1223 (10, 1 << 20, _('%.1f MB')),
1221 (1, 1 << 20, _('%.2f MB')),
1224 (1, 1 << 20, _('%.2f MB')),
1222 (100, 1 << 10, _('%.0f KB')),
1225 (100, 1 << 10, _('%.0f KB')),
1223 (10, 1 << 10, _('%.1f KB')),
1226 (10, 1 << 10, _('%.1f KB')),
1224 (1, 1 << 10, _('%.2f KB')),
1227 (1, 1 << 10, _('%.2f KB')),
1225 (1, 1, _('%.0f bytes')),
1228 (1, 1, _('%.0f bytes')),
1226 )
1229 )
1227
1230
1228 for multiplier, divisor, format in units:
1231 for multiplier, divisor, format in units:
1229 if nbytes >= divisor * multiplier:
1232 if nbytes >= divisor * multiplier:
1230 return format % (nbytes / float(divisor))
1233 return format % (nbytes / float(divisor))
1231 return units[-1][2] % nbytes
1234 return units[-1][2] % nbytes
1232
1235
1233 def drop_scheme(scheme, path):
1236 def drop_scheme(scheme, path):
1234 sc = scheme + ':'
1237 sc = scheme + ':'
1235 if path.startswith(sc):
1238 if path.startswith(sc):
1236 path = path[len(sc):]
1239 path = path[len(sc):]
1237 if path.startswith('//'):
1240 if path.startswith('//'):
1238 if scheme == 'file':
1241 if scheme == 'file':
1239 i = path.find('/', 2)
1242 i = path.find('/', 2)
1240 if i == -1:
1243 if i == -1:
1241 return ''
1244 return ''
1242 # On Windows, absolute paths are rooted at the current drive
1245 # On Windows, absolute paths are rooted at the current drive
1243 # root. On POSIX they are rooted at the file system root.
1246 # root. On POSIX they are rooted at the file system root.
1244 if os.name == 'nt':
1247 if os.name == 'nt':
1245 droot = os.path.splitdrive(os.getcwd())[0] + '/'
1248 droot = os.path.splitdrive(os.getcwd())[0] + '/'
1246 path = os.path.join(droot, path[i + 1:])
1249 path = os.path.join(droot, path[i + 1:])
1247 else:
1250 else:
1248 path = path[i:]
1251 path = path[i:]
1249 else:
1252 else:
1250 path = path[2:]
1253 path = path[2:]
1251 return path
1254 return path
1252
1255
1253 def uirepr(s):
1256 def uirepr(s):
1254 # Avoid double backslash in Windows path repr()
1257 # Avoid double backslash in Windows path repr()
1255 return repr(s).replace('\\\\', '\\')
1258 return repr(s).replace('\\\\', '\\')
1256
1259
1257 def wrap(line, hangindent, width=None):
1260 def wrap(line, hangindent, width=None):
1258 if width is None:
1261 if width is None:
1259 width = termwidth() - 2
1262 width = termwidth() - 2
1260 if width <= hangindent:
1263 if width <= hangindent:
1261 # adjust for weird terminal size
1264 # adjust for weird terminal size
1262 width = max(78, hangindent + 1)
1265 width = max(78, hangindent + 1)
1263 padding = '\n' + ' ' * hangindent
1266 padding = '\n' + ' ' * hangindent
1264 # To avoid corrupting multi-byte characters in line, we must wrap
1267 # To avoid corrupting multi-byte characters in line, we must wrap
1265 # a Unicode string instead of a bytestring.
1268 # a Unicode string instead of a bytestring.
1266 try:
1269 try:
1267 u = line.decode(encoding.encoding)
1270 u = line.decode(encoding.encoding)
1268 w = padding.join(textwrap.wrap(u, width=width - hangindent))
1271 w = padding.join(textwrap.wrap(u, width=width - hangindent))
1269 return w.encode(encoding.encoding)
1272 return w.encode(encoding.encoding)
1270 except UnicodeDecodeError:
1273 except UnicodeDecodeError:
1271 return padding.join(textwrap.wrap(line, width=width - hangindent))
1274 return padding.join(textwrap.wrap(line, width=width - hangindent))
1272
1275
1273 def iterlines(iterator):
1276 def iterlines(iterator):
1274 for chunk in iterator:
1277 for chunk in iterator:
1275 for line in chunk.splitlines():
1278 for line in chunk.splitlines():
1276 yield line
1279 yield line
1277
1280
1278 def expandpath(path):
1281 def expandpath(path):
1279 return os.path.expanduser(os.path.expandvars(path))
1282 return os.path.expanduser(os.path.expandvars(path))
1280
1283
1281 def hgcmd():
1284 def hgcmd():
1282 """Return the command used to execute current hg
1285 """Return the command used to execute current hg
1283
1286
1284 This is different from hgexecutable() because on Windows we want
1287 This is different from hgexecutable() because on Windows we want
1285 to avoid things opening new shell windows like batch files, so we
1288 to avoid things opening new shell windows like batch files, so we
1286 get either the python call or current executable.
1289 get either the python call or current executable.
1287 """
1290 """
1288 if main_is_frozen():
1291 if main_is_frozen():
1289 return [sys.executable]
1292 return [sys.executable]
1290 return gethgcmd()
1293 return gethgcmd()
1291
1294
1292 def rundetached(args, condfn):
1295 def rundetached(args, condfn):
1293 """Execute the argument list in a detached process.
1296 """Execute the argument list in a detached process.
1294
1297
1295 condfn is a callable which is called repeatedly and should return
1298 condfn is a callable which is called repeatedly and should return
1296 True once the child process is known to have started successfully.
1299 True once the child process is known to have started successfully.
1297 At this point, the child process PID is returned. If the child
1300 At this point, the child process PID is returned. If the child
1298 process fails to start or finishes before condfn() evaluates to
1301 process fails to start or finishes before condfn() evaluates to
1299 True, return -1.
1302 True, return -1.
1300 """
1303 """
1301 # Windows case is easier because the child process is either
1304 # Windows case is easier because the child process is either
1302 # successfully starting and validating the condition or exiting
1305 # successfully starting and validating the condition or exiting
1303 # on failure. We just poll on its PID. On Unix, if the child
1306 # on failure. We just poll on its PID. On Unix, if the child
1304 # process fails to start, it will be left in a zombie state until
1307 # process fails to start, it will be left in a zombie state until
1305 # the parent wait on it, which we cannot do since we expect a long
1308 # the parent wait on it, which we cannot do since we expect a long
1306 # running process on success. Instead we listen for SIGCHLD telling
1309 # running process on success. Instead we listen for SIGCHLD telling
1307 # us our child process terminated.
1310 # us our child process terminated.
1308 terminated = set()
1311 terminated = set()
1309 def handler(signum, frame):
1312 def handler(signum, frame):
1310 terminated.add(os.wait())
1313 terminated.add(os.wait())
1311 prevhandler = None
1314 prevhandler = None
1312 if hasattr(signal, 'SIGCHLD'):
1315 if hasattr(signal, 'SIGCHLD'):
1313 prevhandler = signal.signal(signal.SIGCHLD, handler)
1316 prevhandler = signal.signal(signal.SIGCHLD, handler)
1314 try:
1317 try:
1315 pid = spawndetached(args)
1318 pid = spawndetached(args)
1316 while not condfn():
1319 while not condfn():
1317 if ((pid in terminated or not testpid(pid))
1320 if ((pid in terminated or not testpid(pid))
1318 and not condfn()):
1321 and not condfn()):
1319 return -1
1322 return -1
1320 time.sleep(0.1)
1323 time.sleep(0.1)
1321 return pid
1324 return pid
1322 finally:
1325 finally:
1323 if prevhandler is not None:
1326 if prevhandler is not None:
1324 signal.signal(signal.SIGCHLD, prevhandler)
1327 signal.signal(signal.SIGCHLD, prevhandler)
1325
1328
1326 try:
1329 try:
1327 any, all = any, all
1330 any, all = any, all
1328 except NameError:
1331 except NameError:
1329 def any(iterable):
1332 def any(iterable):
1330 for i in iterable:
1333 for i in iterable:
1331 if i:
1334 if i:
1332 return True
1335 return True
1333 return False
1336 return False
1334
1337
1335 def all(iterable):
1338 def all(iterable):
1336 for i in iterable:
1339 for i in iterable:
1337 if not i:
1340 if not i:
1338 return False
1341 return False
1339 return True
1342 return True
1340
1343
1341 def termwidth():
1344 def termwidth():
1342 if 'COLUMNS' in os.environ:
1345 if 'COLUMNS' in os.environ:
1343 try:
1346 try:
1344 return int(os.environ['COLUMNS'])
1347 return int(os.environ['COLUMNS'])
1345 except ValueError:
1348 except ValueError:
1346 pass
1349 pass
1347 return termwidth_()
1350 return termwidth_()
General Comments 0
You need to be logged in to leave comments. Login now