##// END OF EJS Templates
error: move UnexpectedOutput (now ResponseError)
Matt Mackall -
r7641:d2f75383 default
parent child Browse files
Show More
@@ -1,412 +1,412
1 1 # dispatch.py - command dispatching for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from i18n import _
9 9 import os, sys, atexit, signal, pdb, socket, errno, shlex, time
10 10 import util, commands, hg, lock, fancyopts, extensions, hook, error
11 11 import cmdutil
12 12 import ui as _ui
13 13
14 14 def run():
15 15 "run the command in sys.argv"
16 16 sys.exit(dispatch(sys.argv[1:]))
17 17
18 18 def dispatch(args):
19 19 "run the command specified in args"
20 20 try:
21 21 u = _ui.ui(traceback='--traceback' in args)
22 22 except util.Abort, inst:
23 23 sys.stderr.write(_("abort: %s\n") % inst)
24 24 return -1
25 25 return _runcatch(u, args)
26 26
27 27 def _runcatch(ui, args):
28 28 def catchterm(*args):
29 29 raise util.SignalInterrupt
30 30
31 31 for name in 'SIGBREAK', 'SIGHUP', 'SIGTERM':
32 32 num = getattr(signal, name, None)
33 33 if num: signal.signal(num, catchterm)
34 34
35 35 try:
36 36 try:
37 37 # enter the debugger before command execution
38 38 if '--debugger' in args:
39 39 pdb.set_trace()
40 40 try:
41 41 return _dispatch(ui, args)
42 42 finally:
43 43 ui.flush()
44 44 except:
45 45 # enter the debugger when we hit an exception
46 46 if '--debugger' in args:
47 47 pdb.post_mortem(sys.exc_info()[2])
48 48 ui.print_exc()
49 49 raise
50 50
51 51 except error.ParseError, inst:
52 52 if inst.args[0]:
53 53 ui.warn(_("hg %s: %s\n") % (inst.args[0], inst.args[1]))
54 54 commands.help_(ui, inst.args[0])
55 55 else:
56 56 ui.warn(_("hg: %s\n") % inst.args[1])
57 57 commands.help_(ui, 'shortlist')
58 58 except cmdutil.AmbiguousCommand, inst:
59 59 ui.warn(_("hg: command '%s' is ambiguous:\n %s\n") %
60 60 (inst.args[0], " ".join(inst.args[1])))
61 61 except cmdutil.UnknownCommand, inst:
62 62 ui.warn(_("hg: unknown command '%s'\n") % inst.args[0])
63 63 commands.help_(ui, 'shortlist')
64 64 except error.RepoError, inst:
65 65 ui.warn(_("abort: %s!\n") % inst)
66 66 except error.LockHeld, inst:
67 67 if inst.errno == errno.ETIMEDOUT:
68 68 reason = _('timed out waiting for lock held by %s') % inst.locker
69 69 else:
70 70 reason = _('lock held by %s') % inst.locker
71 71 ui.warn(_("abort: %s: %s\n") % (inst.desc or inst.filename, reason))
72 72 except error.LockUnavailable, inst:
73 73 ui.warn(_("abort: could not lock %s: %s\n") %
74 74 (inst.desc or inst.filename, inst.strerror))
75 75 except error.RevlogError, inst:
76 76 ui.warn(_("abort: %s!\n") % inst)
77 77 except util.SignalInterrupt:
78 78 ui.warn(_("killed!\n"))
79 79 except KeyboardInterrupt:
80 80 try:
81 81 ui.warn(_("interrupted!\n"))
82 82 except IOError, inst:
83 83 if inst.errno == errno.EPIPE:
84 84 if ui.debugflag:
85 85 ui.warn(_("\nbroken pipe\n"))
86 86 else:
87 87 raise
88 88 except socket.error, inst:
89 89 ui.warn(_("abort: %s\n") % inst.args[-1])
90 90 except IOError, inst:
91 91 if hasattr(inst, "code"):
92 92 ui.warn(_("abort: %s\n") % inst)
93 93 elif hasattr(inst, "reason"):
94 94 try: # usually it is in the form (errno, strerror)
95 95 reason = inst.reason.args[1]
96 96 except: # it might be anything, for example a string
97 97 reason = inst.reason
98 98 ui.warn(_("abort: error: %s\n") % reason)
99 99 elif hasattr(inst, "args") and inst.args[0] == errno.EPIPE:
100 100 if ui.debugflag:
101 101 ui.warn(_("broken pipe\n"))
102 102 elif getattr(inst, "strerror", None):
103 103 if getattr(inst, "filename", None):
104 104 ui.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename))
105 105 else:
106 106 ui.warn(_("abort: %s\n") % inst.strerror)
107 107 else:
108 108 raise
109 109 except OSError, inst:
110 110 if getattr(inst, "filename", None):
111 111 ui.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename))
112 112 else:
113 113 ui.warn(_("abort: %s\n") % inst.strerror)
114 except util.UnexpectedOutput, inst:
114 except error.ResponseError, inst:
115 115 ui.warn(_("abort: %s") % inst.args[0])
116 116 if not isinstance(inst.args[1], basestring):
117 117 ui.warn(" %r\n" % (inst.args[1],))
118 118 elif not inst.args[1]:
119 119 ui.warn(_(" empty string\n"))
120 120 else:
121 121 ui.warn("\n%r\n" % util.ellipsis(inst.args[1]))
122 122 except ImportError, inst:
123 123 m = str(inst).split()[-1]
124 124 ui.warn(_("abort: could not import module %s!\n") % m)
125 125 if m in "mpatch bdiff".split():
126 126 ui.warn(_("(did you forget to compile extensions?)\n"))
127 127 elif m in "zlib".split():
128 128 ui.warn(_("(is your Python install correct?)\n"))
129 129
130 130 except util.Abort, inst:
131 131 ui.warn(_("abort: %s\n") % inst)
132 132 except MemoryError:
133 133 ui.warn(_("abort: out of memory\n"))
134 134 except SystemExit, inst:
135 135 # Commands shouldn't sys.exit directly, but give a return code.
136 136 # Just in case catch this and and pass exit code to caller.
137 137 return inst.code
138 138 except:
139 139 ui.warn(_("** unknown exception encountered, details follow\n"))
140 140 ui.warn(_("** report bug details to "
141 141 "http://www.selenic.com/mercurial/bts\n"))
142 142 ui.warn(_("** or mercurial@selenic.com\n"))
143 143 ui.warn(_("** Mercurial Distributed SCM (version %s)\n")
144 144 % util.version())
145 145 ui.warn(_("** Extensions loaded: %s\n")
146 146 % ", ".join([x[0] for x in extensions.extensions()]))
147 147 raise
148 148
149 149 return -1
150 150
151 151 def _findrepo(p):
152 152 while not os.path.isdir(os.path.join(p, ".hg")):
153 153 oldp, p = p, os.path.dirname(p)
154 154 if p == oldp:
155 155 return None
156 156
157 157 return p
158 158
159 159 def _parse(ui, args):
160 160 options = {}
161 161 cmdoptions = {}
162 162
163 163 try:
164 164 args = fancyopts.fancyopts(args, commands.globalopts, options)
165 165 except fancyopts.getopt.GetoptError, inst:
166 166 raise error.ParseError(None, inst)
167 167
168 168 if args:
169 169 cmd, args = args[0], args[1:]
170 170 aliases, i = cmdutil.findcmd(cmd, commands.table,
171 171 ui.config("ui", "strict"))
172 172 cmd = aliases[0]
173 173 defaults = ui.config("defaults", cmd)
174 174 if defaults:
175 175 args = shlex.split(defaults) + args
176 176 c = list(i[1])
177 177 else:
178 178 cmd = None
179 179 c = []
180 180
181 181 # combine global options into local
182 182 for o in commands.globalopts:
183 183 c.append((o[0], o[1], options[o[1]], o[3]))
184 184
185 185 try:
186 186 args = fancyopts.fancyopts(args, c, cmdoptions)
187 187 except fancyopts.getopt.GetoptError, inst:
188 188 raise error.ParseError(cmd, inst)
189 189
190 190 # separate global options back out
191 191 for o in commands.globalopts:
192 192 n = o[1]
193 193 options[n] = cmdoptions[n]
194 194 del cmdoptions[n]
195 195
196 196 return (cmd, cmd and i[0] or None, args, options, cmdoptions)
197 197
198 198 def _parseconfig(config):
199 199 """parse the --config options from the command line"""
200 200 parsed = []
201 201 for cfg in config:
202 202 try:
203 203 name, value = cfg.split('=', 1)
204 204 section, name = name.split('.', 1)
205 205 if not section or not name:
206 206 raise IndexError
207 207 parsed.append((section, name, value))
208 208 except (IndexError, ValueError):
209 209 raise util.Abort(_('malformed --config option: %s') % cfg)
210 210 return parsed
211 211
212 212 def _earlygetopt(aliases, args):
213 213 """Return list of values for an option (or aliases).
214 214
215 215 The values are listed in the order they appear in args.
216 216 The options and values are removed from args.
217 217 """
218 218 try:
219 219 argcount = args.index("--")
220 220 except ValueError:
221 221 argcount = len(args)
222 222 shortopts = [opt for opt in aliases if len(opt) == 2]
223 223 values = []
224 224 pos = 0
225 225 while pos < argcount:
226 226 if args[pos] in aliases:
227 227 if pos + 1 >= argcount:
228 228 # ignore and let getopt report an error if there is no value
229 229 break
230 230 del args[pos]
231 231 values.append(args.pop(pos))
232 232 argcount -= 2
233 233 elif args[pos][:2] in shortopts:
234 234 # short option can have no following space, e.g. hg log -Rfoo
235 235 values.append(args.pop(pos)[2:])
236 236 argcount -= 1
237 237 else:
238 238 pos += 1
239 239 return values
240 240
241 241 _loaded = {}
242 242 def _dispatch(ui, args):
243 243 # read --config before doing anything else
244 244 # (e.g. to change trust settings for reading .hg/hgrc)
245 245 config = _earlygetopt(['--config'], args)
246 246 if config:
247 247 ui.updateopts(config=_parseconfig(config))
248 248
249 249 # check for cwd
250 250 cwd = _earlygetopt(['--cwd'], args)
251 251 if cwd:
252 252 os.chdir(cwd[-1])
253 253
254 254 # read the local repository .hgrc into a local ui object
255 255 path = _findrepo(os.getcwd()) or ""
256 256 if not path:
257 257 lui = ui
258 258 if path:
259 259 try:
260 260 lui = _ui.ui(parentui=ui)
261 261 lui.readconfig(os.path.join(path, ".hg", "hgrc"))
262 262 except IOError:
263 263 pass
264 264
265 265 # now we can expand paths, even ones in .hg/hgrc
266 266 rpath = _earlygetopt(["-R", "--repository", "--repo"], args)
267 267 if rpath:
268 268 path = lui.expandpath(rpath[-1])
269 269 lui = _ui.ui(parentui=ui)
270 270 lui.readconfig(os.path.join(path, ".hg", "hgrc"))
271 271
272 272 extensions.loadall(lui)
273 273 for name, module in extensions.extensions():
274 274 if name in _loaded:
275 275 continue
276 276
277 277 # setup extensions
278 278 # TODO this should be generalized to scheme, where extensions can
279 279 # redepend on other extensions. then we should toposort them, and
280 280 # do initialization in correct order
281 281 extsetup = getattr(module, 'extsetup', None)
282 282 if extsetup:
283 283 extsetup()
284 284
285 285 cmdtable = getattr(module, 'cmdtable', {})
286 286 overrides = [cmd for cmd in cmdtable if cmd in commands.table]
287 287 if overrides:
288 288 ui.warn(_("extension '%s' overrides commands: %s\n")
289 289 % (name, " ".join(overrides)))
290 290 commands.table.update(cmdtable)
291 291 _loaded[name] = 1
292 292 # check for fallback encoding
293 293 fallback = lui.config('ui', 'fallbackencoding')
294 294 if fallback:
295 295 util._fallbackencoding = fallback
296 296
297 297 fullargs = args
298 298 cmd, func, args, options, cmdoptions = _parse(lui, args)
299 299
300 300 if options["config"]:
301 301 raise util.Abort(_("Option --config may not be abbreviated!"))
302 302 if options["cwd"]:
303 303 raise util.Abort(_("Option --cwd may not be abbreviated!"))
304 304 if options["repository"]:
305 305 raise util.Abort(_(
306 306 "Option -R has to be separated from other options (i.e. not -qR) "
307 307 "and --repository may only be abbreviated as --repo!"))
308 308
309 309 if options["encoding"]:
310 310 util._encoding = options["encoding"]
311 311 if options["encodingmode"]:
312 312 util._encodingmode = options["encodingmode"]
313 313 if options["time"]:
314 314 def get_times():
315 315 t = os.times()
316 316 if t[4] == 0.0: # Windows leaves this as zero, so use time.clock()
317 317 t = (t[0], t[1], t[2], t[3], time.clock())
318 318 return t
319 319 s = get_times()
320 320 def print_time():
321 321 t = get_times()
322 322 ui.warn(_("Time: real %.3f secs (user %.3f+%.3f sys %.3f+%.3f)\n") %
323 323 (t[4]-s[4], t[0]-s[0], t[2]-s[2], t[1]-s[1], t[3]-s[3]))
324 324 atexit.register(print_time)
325 325
326 326 ui.updateopts(options["verbose"], options["debug"], options["quiet"],
327 327 not options["noninteractive"], options["traceback"])
328 328
329 329 if options['help']:
330 330 return commands.help_(ui, cmd, options['version'])
331 331 elif options['version']:
332 332 return commands.version_(ui)
333 333 elif not cmd:
334 334 return commands.help_(ui, 'shortlist')
335 335
336 336 repo = None
337 337 if cmd not in commands.norepo.split():
338 338 try:
339 339 repo = hg.repository(ui, path=path)
340 340 ui = repo.ui
341 341 if not repo.local():
342 342 raise util.Abort(_("repository '%s' is not local") % path)
343 343 ui.setconfig("bundle", "mainreporoot", repo.root)
344 344 except error.RepoError:
345 345 if cmd not in commands.optionalrepo.split():
346 346 if args and not path: # try to infer -R from command args
347 347 repos = map(_findrepo, args)
348 348 guess = repos[0]
349 349 if guess and repos.count(guess) == len(repos):
350 350 return _dispatch(ui, ['--repository', guess] + fullargs)
351 351 if not path:
352 352 raise error.RepoError(_("There is no Mercurial repository"
353 353 " here (.hg not found)"))
354 354 raise
355 355 args.insert(0, repo)
356 356
357 357 d = lambda: util.checksignature(func)(ui, *args, **cmdoptions)
358 358
359 359 # run pre-hook, and abort if it fails
360 360 ret = hook.hook(lui, repo, "pre-%s" % cmd, False, args=" ".join(fullargs))
361 361 if ret:
362 362 return ret
363 363 ret = _runcommand(ui, options, cmd, d)
364 364 # run post-hook, passing command result
365 365 hook.hook(lui, repo, "post-%s" % cmd, False, args=" ".join(fullargs),
366 366 result = ret)
367 367 return ret
368 368
369 369 def _runcommand(ui, options, cmd, cmdfunc):
370 370 def checkargs():
371 371 try:
372 372 return cmdfunc()
373 373 except util.SignatureError:
374 374 raise error.ParseError(cmd, _("invalid arguments"))
375 375
376 376 if options['profile']:
377 377 import hotshot, hotshot.stats
378 378 prof = hotshot.Profile("hg.prof")
379 379 try:
380 380 try:
381 381 return prof.runcall(checkargs)
382 382 except:
383 383 try:
384 384 ui.warn(_('exception raised - generating '
385 385 'profile anyway\n'))
386 386 except:
387 387 pass
388 388 raise
389 389 finally:
390 390 prof.close()
391 391 stats = hotshot.stats.load("hg.prof")
392 392 stats.strip_dirs()
393 393 stats.sort_stats('time', 'calls')
394 394 stats.print_stats(40)
395 395 elif options['lsprof']:
396 396 try:
397 397 from mercurial import lsprof
398 398 except ImportError:
399 399 raise util.Abort(_(
400 400 'lsprof not available - install from '
401 401 'http://codespeak.net/svn/user/arigo/hack/misc/lsprof/'))
402 402 p = lsprof.Profiler()
403 403 p.enable(subcalls=True)
404 404 try:
405 405 return checkargs()
406 406 finally:
407 407 p.disable()
408 408 stats = lsprof.Stats(p.getstats())
409 409 stats.sort()
410 410 stats.pprint(top=10, file=sys.stderr, climit=5)
411 411 else:
412 412 return checkargs()
@@ -1,48 +1,52
1 1 """
2 2 error.py - Mercurial exceptions
3 3
4 4 This allows us to catch exceptions at higher levels without forcing imports
5 5
6 6 Copyright 2005-2008 Matt Mackall <mpm@selenic.com>
7 7
8 8 This software may be used and distributed according to the terms
9 9 of the GNU General Public License, incorporated herein by reference.
10 10 """
11 11
12 12 # Do not import anything here, please
13 13
14 14 class RevlogError(Exception):
15 15 pass
16 16
17 17 class LookupError(RevlogError, KeyError):
18 18 def __init__(self, name, index, message):
19 19 self.name = name
20 20 if isinstance(name, str) and len(name) == 20:
21 21 from node import short
22 22 name = short(name)
23 23 RevlogError.__init__(self, '%s@%s: %s' % (index, name, message))
24 24
25 25 def __str__(self):
26 26 return RevlogError.__str__(self)
27 27
28 28 class ParseError(Exception):
29 29 """Exception raised on errors in parsing the command line."""
30 30
31 31 class RepoError(Exception):
32 32 pass
33 33
34 34 class CapabilityError(RepoError):
35 35 pass
36 36
37 37 class LockError(IOError):
38 38 def __init__(self, errno, strerror, filename, desc):
39 39 IOError.__init__(self, errno, strerror, filename)
40 40 self.desc = desc
41 41
42 42 class LockHeld(LockError):
43 43 def __init__(self, errno, filename, desc, locker):
44 44 LockError.__init__(self, errno, 'Lock held', filename, desc)
45 45 self.locker = locker
46 46
47 47 class LockUnavailable(LockError):
48 48 pass
49
50 class ResponseError(Exception):
51 """Raised to print an error with part of output and exit."""
52
@@ -1,237 +1,237
1 1 # httprepo.py - HTTP repository proxy classes for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms
7 7 # of the GNU General Public License, incorporated herein by reference.
8 8
9 9 from node import bin, hex, nullid
10 10 from i18n import _
11 11 import repo, os, urllib, urllib2, urlparse, zlib, util, httplib
12 12 import errno, socket, changegroup, statichttprepo, error, url
13 13
14 14 def zgenerator(f):
15 15 zd = zlib.decompressobj()
16 16 try:
17 17 for chunk in util.filechunkiter(f):
18 18 yield zd.decompress(chunk)
19 19 except httplib.HTTPException:
20 20 raise IOError(None, _('connection ended unexpectedly'))
21 21 yield zd.flush()
22 22
23 23 class httprepository(repo.repository):
24 24 def __init__(self, ui, path):
25 25 self.path = path
26 26 self.caps = None
27 27 self.handler = None
28 28 scheme, netloc, urlpath, query, frag = urlparse.urlsplit(path)
29 29 if query or frag:
30 30 raise util.Abort(_('unsupported URL component: "%s"') %
31 31 (query or frag))
32 32
33 33 # urllib cannot handle URLs with embedded user or passwd
34 34 self._url, authinfo = url.getauthinfo(path)
35 35
36 36 self.ui = ui
37 37 self.ui.debug(_('using %s\n') % self._url)
38 38
39 39 self.urlopener = url.opener(ui, authinfo)
40 40
41 41 def url(self):
42 42 return self.path
43 43
44 44 # look up capabilities only when needed
45 45
46 46 def get_caps(self):
47 47 if self.caps is None:
48 48 try:
49 49 self.caps = util.set(self.do_read('capabilities').split())
50 50 except error.RepoError:
51 51 self.caps = util.set()
52 52 self.ui.debug(_('capabilities: %s\n') %
53 53 (' '.join(self.caps or ['none'])))
54 54 return self.caps
55 55
56 56 capabilities = property(get_caps)
57 57
58 58 def lock(self):
59 59 raise util.Abort(_('operation not supported over http'))
60 60
61 61 def do_cmd(self, cmd, **args):
62 62 data = args.pop('data', None)
63 63 headers = args.pop('headers', {})
64 64 self.ui.debug(_("sending %s command\n") % cmd)
65 65 q = {"cmd": cmd}
66 66 q.update(args)
67 67 qs = '?%s' % urllib.urlencode(q)
68 68 cu = "%s%s" % (self._url, qs)
69 69 try:
70 70 if data:
71 71 self.ui.debug(_("sending %s bytes\n") % len(data))
72 72 resp = self.urlopener.open(urllib2.Request(cu, data, headers))
73 73 except urllib2.HTTPError, inst:
74 74 if inst.code == 401:
75 75 raise util.Abort(_('authorization failed'))
76 76 raise
77 77 except httplib.HTTPException, inst:
78 78 self.ui.debug(_('http error while sending %s command\n') % cmd)
79 79 self.ui.print_exc()
80 80 raise IOError(None, inst)
81 81 except IndexError:
82 82 # this only happens with Python 2.3, later versions raise URLError
83 83 raise util.Abort(_('http error, possibly caused by proxy setting'))
84 84 # record the url we got redirected to
85 85 resp_url = resp.geturl()
86 86 if resp_url.endswith(qs):
87 87 resp_url = resp_url[:-len(qs)]
88 88 if self._url != resp_url:
89 89 self.ui.status(_('real URL is %s\n') % resp_url)
90 90 self._url = resp_url
91 91 try:
92 92 proto = resp.getheader('content-type')
93 93 except AttributeError:
94 94 proto = resp.headers['content-type']
95 95
96 96 # accept old "text/plain" and "application/hg-changegroup" for now
97 97 if not (proto.startswith('application/mercurial-') or
98 98 proto.startswith('text/plain') or
99 99 proto.startswith('application/hg-changegroup')):
100 100 self.ui.debug(_("Requested URL: '%s'\n") % cu)
101 101 raise error.RepoError(_("'%s' does not appear to be an hg repository")
102 102 % self._url)
103 103
104 104 if proto.startswith('application/mercurial-'):
105 105 try:
106 106 version = proto.split('-', 1)[1]
107 107 version_info = tuple([int(n) for n in version.split('.')])
108 108 except ValueError:
109 109 raise error.RepoError(_("'%s' sent a broken Content-Type "
110 110 "header (%s)") % (self._url, proto))
111 111 if version_info > (0, 1):
112 112 raise error.RepoError(_("'%s' uses newer protocol %s") %
113 113 (self._url, version))
114 114
115 115 return resp
116 116
117 117 def do_read(self, cmd, **args):
118 118 fp = self.do_cmd(cmd, **args)
119 119 try:
120 120 return fp.read()
121 121 finally:
122 122 # if using keepalive, allow connection to be reused
123 123 fp.close()
124 124
125 125 def lookup(self, key):
126 126 self.requirecap('lookup', _('look up remote revision'))
127 127 d = self.do_cmd("lookup", key = key).read()
128 128 success, data = d[:-1].split(' ', 1)
129 129 if int(success):
130 130 return bin(data)
131 131 raise error.RepoError(data)
132 132
133 133 def heads(self):
134 134 d = self.do_read("heads")
135 135 try:
136 136 return map(bin, d[:-1].split(" "))
137 137 except:
138 raise util.UnexpectedOutput(_("unexpected response:"), d)
138 raise error.ResponseError(_("unexpected response:"), d)
139 139
140 140 def branches(self, nodes):
141 141 n = " ".join(map(hex, nodes))
142 142 d = self.do_read("branches", nodes=n)
143 143 try:
144 144 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
145 145 return br
146 146 except:
147 raise util.UnexpectedOutput(_("unexpected response:"), d)
147 raise error.ResponseError(_("unexpected response:"), d)
148 148
149 149 def between(self, pairs):
150 150 batch = 8 # avoid giant requests
151 151 r = []
152 152 for i in xrange(0, len(pairs), batch):
153 153 n = " ".join(["-".join(map(hex, p)) for p in pairs[i:i + batch]])
154 154 d = self.do_read("between", pairs=n)
155 155 try:
156 156 r += [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
157 157 except:
158 raise util.UnexpectedOutput(_("unexpected response:"), d)
158 raise error.ResponseError(_("unexpected response:"), d)
159 159 return r
160 160
161 161 def changegroup(self, nodes, kind):
162 162 n = " ".join(map(hex, nodes))
163 163 f = self.do_cmd("changegroup", roots=n)
164 164 return util.chunkbuffer(zgenerator(f))
165 165
166 166 def changegroupsubset(self, bases, heads, source):
167 167 self.requirecap('changegroupsubset', _('look up remote changes'))
168 168 baselst = " ".join([hex(n) for n in bases])
169 169 headlst = " ".join([hex(n) for n in heads])
170 170 f = self.do_cmd("changegroupsubset", bases=baselst, heads=headlst)
171 171 return util.chunkbuffer(zgenerator(f))
172 172
173 173 def unbundle(self, cg, heads, source):
174 174 # have to stream bundle to a temp file because we do not have
175 175 # http 1.1 chunked transfer.
176 176
177 177 type = ""
178 178 types = self.capable('unbundle')
179 179 # servers older than d1b16a746db6 will send 'unbundle' as a
180 180 # boolean capability
181 181 try:
182 182 types = types.split(',')
183 183 except AttributeError:
184 184 types = [""]
185 185 if types:
186 186 for x in types:
187 187 if x in changegroup.bundletypes:
188 188 type = x
189 189 break
190 190
191 191 tempname = changegroup.writebundle(cg, None, type)
192 192 fp = url.httpsendfile(tempname, "rb")
193 193 try:
194 194 try:
195 195 resp = self.do_read(
196 196 'unbundle', data=fp,
197 197 headers={'Content-Type': 'application/octet-stream'},
198 198 heads=' '.join(map(hex, heads)))
199 199 resp_code, output = resp.split('\n', 1)
200 200 try:
201 201 ret = int(resp_code)
202 202 except ValueError, err:
203 raise util.UnexpectedOutput(
203 raise error.ResponseError(
204 204 _('push failed (unexpected response):'), resp)
205 205 self.ui.write(output)
206 206 return ret
207 207 except socket.error, err:
208 208 if err[0] in (errno.ECONNRESET, errno.EPIPE):
209 209 raise util.Abort(_('push failed: %s') % err[1])
210 210 raise util.Abort(err[1])
211 211 finally:
212 212 fp.close()
213 213 os.unlink(tempname)
214 214
215 215 def stream_out(self):
216 216 return self.do_cmd('stream_out')
217 217
218 218 class httpsrepository(httprepository):
219 219 def __init__(self, ui, path):
220 220 if not url.has_https:
221 221 raise util.Abort(_('Python support for SSL and HTTPS '
222 222 'is not installed'))
223 223 httprepository.__init__(self, ui, path)
224 224
225 225 def instance(ui, path, create):
226 226 if create:
227 227 raise util.Abort(_('cannot create new http repository'))
228 228 try:
229 229 if path.startswith('https:'):
230 230 inst = httpsrepository(ui, path)
231 231 else:
232 232 inst = httprepository(ui, path)
233 233 inst.between([(nullid, nullid)])
234 234 return inst
235 235 except error.RepoError:
236 236 ui.note('(falling back to static-http)\n')
237 237 return statichttprepo.instance(ui, "static-" + path, create)
@@ -1,2151 +1,2151
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup
11 11 import changelog, dirstate, filelog, manifest, context, weakref
12 12 import lock, transaction, stat, errno, ui, store
13 13 import os, time, util, extensions, hook, inspect, error
14 14 import match as match_
15 15 import merge as merge_
16 16
17 17 class localrepository(repo.repository):
18 18 capabilities = util.set(('lookup', 'changegroupsubset'))
19 19 supported = ('revlogv1', 'store', 'fncache')
20 20
21 21 def __init__(self, parentui, path=None, create=0):
22 22 repo.repository.__init__(self)
23 23 self.root = os.path.realpath(path)
24 24 self.path = os.path.join(self.root, ".hg")
25 25 self.origroot = path
26 26 self.opener = util.opener(self.path)
27 27 self.wopener = util.opener(self.root)
28 28
29 29 if not os.path.isdir(self.path):
30 30 if create:
31 31 if not os.path.exists(path):
32 32 os.mkdir(path)
33 33 os.mkdir(self.path)
34 34 requirements = ["revlogv1"]
35 35 if parentui.configbool('format', 'usestore', True):
36 36 os.mkdir(os.path.join(self.path, "store"))
37 37 requirements.append("store")
38 38 if parentui.configbool('format', 'usefncache', True):
39 39 requirements.append("fncache")
40 40 # create an invalid changelog
41 41 self.opener("00changelog.i", "a").write(
42 42 '\0\0\0\2' # represents revlogv2
43 43 ' dummy changelog to prevent using the old repo layout'
44 44 )
45 45 reqfile = self.opener("requires", "w")
46 46 for r in requirements:
47 47 reqfile.write("%s\n" % r)
48 48 reqfile.close()
49 49 else:
50 50 raise error.RepoError(_("repository %s not found") % path)
51 51 elif create:
52 52 raise error.RepoError(_("repository %s already exists") % path)
53 53 else:
54 54 # find requirements
55 55 requirements = []
56 56 try:
57 57 requirements = self.opener("requires").read().splitlines()
58 58 for r in requirements:
59 59 if r not in self.supported:
60 60 raise error.RepoError(_("requirement '%s' not supported") % r)
61 61 except IOError, inst:
62 62 if inst.errno != errno.ENOENT:
63 63 raise
64 64
65 65 self.store = store.store(requirements, self.path, util.opener)
66 66 self.spath = self.store.path
67 67 self.sopener = self.store.opener
68 68 self.sjoin = self.store.join
69 69 self.opener.createmode = self.store.createmode
70 70
71 71 self.ui = ui.ui(parentui=parentui)
72 72 try:
73 73 self.ui.readconfig(self.join("hgrc"), self.root)
74 74 extensions.loadall(self.ui)
75 75 except IOError:
76 76 pass
77 77
78 78 self.tagscache = None
79 79 self._tagstypecache = None
80 80 self.branchcache = None
81 81 self._ubranchcache = None # UTF-8 version of branchcache
82 82 self._branchcachetip = None
83 83 self.nodetagscache = None
84 84 self.filterpats = {}
85 85 self._datafilters = {}
86 86 self._transref = self._lockref = self._wlockref = None
87 87
88 88 def __getattr__(self, name):
89 89 if name == 'changelog':
90 90 self.changelog = changelog.changelog(self.sopener)
91 91 self.sopener.defversion = self.changelog.version
92 92 return self.changelog
93 93 if name == 'manifest':
94 94 self.changelog
95 95 self.manifest = manifest.manifest(self.sopener)
96 96 return self.manifest
97 97 if name == 'dirstate':
98 98 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
99 99 return self.dirstate
100 100 else:
101 101 raise AttributeError(name)
102 102
103 103 def __getitem__(self, changeid):
104 104 if changeid == None:
105 105 return context.workingctx(self)
106 106 return context.changectx(self, changeid)
107 107
108 108 def __nonzero__(self):
109 109 return True
110 110
111 111 def __len__(self):
112 112 return len(self.changelog)
113 113
114 114 def __iter__(self):
115 115 for i in xrange(len(self)):
116 116 yield i
117 117
118 118 def url(self):
119 119 return 'file:' + self.root
120 120
121 121 def hook(self, name, throw=False, **args):
122 122 return hook.hook(self.ui, self, name, throw, **args)
123 123
124 124 tag_disallowed = ':\r\n'
125 125
126 126 def _tag(self, names, node, message, local, user, date, parent=None,
127 127 extra={}):
128 128 use_dirstate = parent is None
129 129
130 130 if isinstance(names, str):
131 131 allchars = names
132 132 names = (names,)
133 133 else:
134 134 allchars = ''.join(names)
135 135 for c in self.tag_disallowed:
136 136 if c in allchars:
137 137 raise util.Abort(_('%r cannot be used in a tag name') % c)
138 138
139 139 for name in names:
140 140 self.hook('pretag', throw=True, node=hex(node), tag=name,
141 141 local=local)
142 142
143 143 def writetags(fp, names, munge, prevtags):
144 144 fp.seek(0, 2)
145 145 if prevtags and prevtags[-1] != '\n':
146 146 fp.write('\n')
147 147 for name in names:
148 148 m = munge and munge(name) or name
149 149 if self._tagstypecache and name in self._tagstypecache:
150 150 old = self.tagscache.get(name, nullid)
151 151 fp.write('%s %s\n' % (hex(old), m))
152 152 fp.write('%s %s\n' % (hex(node), m))
153 153 fp.close()
154 154
155 155 prevtags = ''
156 156 if local:
157 157 try:
158 158 fp = self.opener('localtags', 'r+')
159 159 except IOError, err:
160 160 fp = self.opener('localtags', 'a')
161 161 else:
162 162 prevtags = fp.read()
163 163
164 164 # local tags are stored in the current charset
165 165 writetags(fp, names, None, prevtags)
166 166 for name in names:
167 167 self.hook('tag', node=hex(node), tag=name, local=local)
168 168 return
169 169
170 170 if use_dirstate:
171 171 try:
172 172 fp = self.wfile('.hgtags', 'rb+')
173 173 except IOError, err:
174 174 fp = self.wfile('.hgtags', 'ab')
175 175 else:
176 176 prevtags = fp.read()
177 177 else:
178 178 try:
179 179 prevtags = self.filectx('.hgtags', parent).data()
180 180 except error.LookupError:
181 181 pass
182 182 fp = self.wfile('.hgtags', 'wb')
183 183 if prevtags:
184 184 fp.write(prevtags)
185 185
186 186 # committed tags are stored in UTF-8
187 187 writetags(fp, names, util.fromlocal, prevtags)
188 188
189 189 if use_dirstate and '.hgtags' not in self.dirstate:
190 190 self.add(['.hgtags'])
191 191
192 192 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
193 193 extra=extra)
194 194
195 195 for name in names:
196 196 self.hook('tag', node=hex(node), tag=name, local=local)
197 197
198 198 return tagnode
199 199
200 200 def tag(self, names, node, message, local, user, date):
201 201 '''tag a revision with one or more symbolic names.
202 202
203 203 names is a list of strings or, when adding a single tag, names may be a
204 204 string.
205 205
206 206 if local is True, the tags are stored in a per-repository file.
207 207 otherwise, they are stored in the .hgtags file, and a new
208 208 changeset is committed with the change.
209 209
210 210 keyword arguments:
211 211
212 212 local: whether to store tags in non-version-controlled file
213 213 (default False)
214 214
215 215 message: commit message to use if committing
216 216
217 217 user: name of user to use if committing
218 218
219 219 date: date tuple to use if committing'''
220 220
221 221 for x in self.status()[:5]:
222 222 if '.hgtags' in x:
223 223 raise util.Abort(_('working copy of .hgtags is changed '
224 224 '(please commit .hgtags manually)'))
225 225
226 226 self._tag(names, node, message, local, user, date)
227 227
228 228 def tags(self):
229 229 '''return a mapping of tag to node'''
230 230 if self.tagscache:
231 231 return self.tagscache
232 232
233 233 globaltags = {}
234 234 tagtypes = {}
235 235
236 236 def readtags(lines, fn, tagtype):
237 237 filetags = {}
238 238 count = 0
239 239
240 240 def warn(msg):
241 241 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
242 242
243 243 for l in lines:
244 244 count += 1
245 245 if not l:
246 246 continue
247 247 s = l.split(" ", 1)
248 248 if len(s) != 2:
249 249 warn(_("cannot parse entry"))
250 250 continue
251 251 node, key = s
252 252 key = util.tolocal(key.strip()) # stored in UTF-8
253 253 try:
254 254 bin_n = bin(node)
255 255 except TypeError:
256 256 warn(_("node '%s' is not well formed") % node)
257 257 continue
258 258 if bin_n not in self.changelog.nodemap:
259 259 warn(_("tag '%s' refers to unknown node") % key)
260 260 continue
261 261
262 262 h = []
263 263 if key in filetags:
264 264 n, h = filetags[key]
265 265 h.append(n)
266 266 filetags[key] = (bin_n, h)
267 267
268 268 for k, nh in filetags.iteritems():
269 269 if k not in globaltags:
270 270 globaltags[k] = nh
271 271 tagtypes[k] = tagtype
272 272 continue
273 273
274 274 # we prefer the global tag if:
275 275 # it supercedes us OR
276 276 # mutual supercedes and it has a higher rank
277 277 # otherwise we win because we're tip-most
278 278 an, ah = nh
279 279 bn, bh = globaltags[k]
280 280 if (bn != an and an in bh and
281 281 (bn not in ah or len(bh) > len(ah))):
282 282 an = bn
283 283 ah.extend([n for n in bh if n not in ah])
284 284 globaltags[k] = an, ah
285 285 tagtypes[k] = tagtype
286 286
287 287 # read the tags file from each head, ending with the tip
288 288 f = None
289 289 for rev, node, fnode in self._hgtagsnodes():
290 290 f = (f and f.filectx(fnode) or
291 291 self.filectx('.hgtags', fileid=fnode))
292 292 readtags(f.data().splitlines(), f, "global")
293 293
294 294 try:
295 295 data = util.fromlocal(self.opener("localtags").read())
296 296 # localtags are stored in the local character set
297 297 # while the internal tag table is stored in UTF-8
298 298 readtags(data.splitlines(), "localtags", "local")
299 299 except IOError:
300 300 pass
301 301
302 302 self.tagscache = {}
303 303 self._tagstypecache = {}
304 304 for k, nh in globaltags.iteritems():
305 305 n = nh[0]
306 306 if n != nullid:
307 307 self.tagscache[k] = n
308 308 self._tagstypecache[k] = tagtypes[k]
309 309 self.tagscache['tip'] = self.changelog.tip()
310 310 return self.tagscache
311 311
312 312 def tagtype(self, tagname):
313 313 '''
314 314 return the type of the given tag. result can be:
315 315
316 316 'local' : a local tag
317 317 'global' : a global tag
318 318 None : tag does not exist
319 319 '''
320 320
321 321 self.tags()
322 322
323 323 return self._tagstypecache.get(tagname)
324 324
325 325 def _hgtagsnodes(self):
326 326 heads = self.heads()
327 327 heads.reverse()
328 328 last = {}
329 329 ret = []
330 330 for node in heads:
331 331 c = self[node]
332 332 rev = c.rev()
333 333 try:
334 334 fnode = c.filenode('.hgtags')
335 335 except error.LookupError:
336 336 continue
337 337 ret.append((rev, node, fnode))
338 338 if fnode in last:
339 339 ret[last[fnode]] = None
340 340 last[fnode] = len(ret) - 1
341 341 return [item for item in ret if item]
342 342
343 343 def tagslist(self):
344 344 '''return a list of tags ordered by revision'''
345 345 l = []
346 346 for t, n in self.tags().iteritems():
347 347 try:
348 348 r = self.changelog.rev(n)
349 349 except:
350 350 r = -2 # sort to the beginning of the list if unknown
351 351 l.append((r, t, n))
352 352 return [(t, n) for r, t, n in util.sort(l)]
353 353
354 354 def nodetags(self, node):
355 355 '''return the tags associated with a node'''
356 356 if not self.nodetagscache:
357 357 self.nodetagscache = {}
358 358 for t, n in self.tags().iteritems():
359 359 self.nodetagscache.setdefault(n, []).append(t)
360 360 return self.nodetagscache.get(node, [])
361 361
362 362 def _branchtags(self, partial, lrev):
363 363 tiprev = len(self) - 1
364 364 if lrev != tiprev:
365 365 self._updatebranchcache(partial, lrev+1, tiprev+1)
366 366 self._writebranchcache(partial, self.changelog.tip(), tiprev)
367 367
368 368 return partial
369 369
370 370 def branchtags(self):
371 371 tip = self.changelog.tip()
372 372 if self.branchcache is not None and self._branchcachetip == tip:
373 373 return self.branchcache
374 374
375 375 oldtip = self._branchcachetip
376 376 self._branchcachetip = tip
377 377 if self.branchcache is None:
378 378 self.branchcache = {} # avoid recursion in changectx
379 379 else:
380 380 self.branchcache.clear() # keep using the same dict
381 381 if oldtip is None or oldtip not in self.changelog.nodemap:
382 382 partial, last, lrev = self._readbranchcache()
383 383 else:
384 384 lrev = self.changelog.rev(oldtip)
385 385 partial = self._ubranchcache
386 386
387 387 self._branchtags(partial, lrev)
388 388
389 389 # the branch cache is stored on disk as UTF-8, but in the local
390 390 # charset internally
391 391 for k, v in partial.iteritems():
392 392 self.branchcache[util.tolocal(k)] = v
393 393 self._ubranchcache = partial
394 394 return self.branchcache
395 395
396 396 def _readbranchcache(self):
397 397 partial = {}
398 398 try:
399 399 f = self.opener("branch.cache")
400 400 lines = f.read().split('\n')
401 401 f.close()
402 402 except (IOError, OSError):
403 403 return {}, nullid, nullrev
404 404
405 405 try:
406 406 last, lrev = lines.pop(0).split(" ", 1)
407 407 last, lrev = bin(last), int(lrev)
408 408 if lrev >= len(self) or self[lrev].node() != last:
409 409 # invalidate the cache
410 410 raise ValueError('invalidating branch cache (tip differs)')
411 411 for l in lines:
412 412 if not l: continue
413 413 node, label = l.split(" ", 1)
414 414 partial[label.strip()] = bin(node)
415 415 except (KeyboardInterrupt, util.SignalInterrupt):
416 416 raise
417 417 except Exception, inst:
418 418 if self.ui.debugflag:
419 419 self.ui.warn(str(inst), '\n')
420 420 partial, last, lrev = {}, nullid, nullrev
421 421 return partial, last, lrev
422 422
423 423 def _writebranchcache(self, branches, tip, tiprev):
424 424 try:
425 425 f = self.opener("branch.cache", "w", atomictemp=True)
426 426 f.write("%s %s\n" % (hex(tip), tiprev))
427 427 for label, node in branches.iteritems():
428 428 f.write("%s %s\n" % (hex(node), label))
429 429 f.rename()
430 430 except (IOError, OSError):
431 431 pass
432 432
433 433 def _updatebranchcache(self, partial, start, end):
434 434 for r in xrange(start, end):
435 435 c = self[r]
436 436 b = c.branch()
437 437 partial[b] = c.node()
438 438
439 439 def lookup(self, key):
440 440 if isinstance(key, int):
441 441 return self.changelog.node(key)
442 442 elif key == '.':
443 443 return self.dirstate.parents()[0]
444 444 elif key == 'null':
445 445 return nullid
446 446 elif key == 'tip':
447 447 return self.changelog.tip()
448 448 n = self.changelog._match(key)
449 449 if n:
450 450 return n
451 451 if key in self.tags():
452 452 return self.tags()[key]
453 453 if key in self.branchtags():
454 454 return self.branchtags()[key]
455 455 n = self.changelog._partialmatch(key)
456 456 if n:
457 457 return n
458 458 try:
459 459 if len(key) == 20:
460 460 key = hex(key)
461 461 except:
462 462 pass
463 463 raise error.RepoError(_("unknown revision '%s'") % key)
464 464
465 465 def local(self):
466 466 return True
467 467
468 468 def join(self, f):
469 469 return os.path.join(self.path, f)
470 470
471 471 def wjoin(self, f):
472 472 return os.path.join(self.root, f)
473 473
474 474 def rjoin(self, f):
475 475 return os.path.join(self.root, util.pconvert(f))
476 476
477 477 def file(self, f):
478 478 if f[0] == '/':
479 479 f = f[1:]
480 480 return filelog.filelog(self.sopener, f)
481 481
482 482 def changectx(self, changeid):
483 483 return self[changeid]
484 484
485 485 def parents(self, changeid=None):
486 486 '''get list of changectxs for parents of changeid'''
487 487 return self[changeid].parents()
488 488
489 489 def filectx(self, path, changeid=None, fileid=None):
490 490 """changeid can be a changeset revision, node, or tag.
491 491 fileid can be a file revision or node."""
492 492 return context.filectx(self, path, changeid, fileid)
493 493
494 494 def getcwd(self):
495 495 return self.dirstate.getcwd()
496 496
497 497 def pathto(self, f, cwd=None):
498 498 return self.dirstate.pathto(f, cwd)
499 499
500 500 def wfile(self, f, mode='r'):
501 501 return self.wopener(f, mode)
502 502
503 503 def _link(self, f):
504 504 return os.path.islink(self.wjoin(f))
505 505
506 506 def _filter(self, filter, filename, data):
507 507 if filter not in self.filterpats:
508 508 l = []
509 509 for pat, cmd in self.ui.configitems(filter):
510 510 if cmd == '!':
511 511 continue
512 512 mf = util.matcher(self.root, "", [pat], [], [])[1]
513 513 fn = None
514 514 params = cmd
515 515 for name, filterfn in self._datafilters.iteritems():
516 516 if cmd.startswith(name):
517 517 fn = filterfn
518 518 params = cmd[len(name):].lstrip()
519 519 break
520 520 if not fn:
521 521 fn = lambda s, c, **kwargs: util.filter(s, c)
522 522 # Wrap old filters not supporting keyword arguments
523 523 if not inspect.getargspec(fn)[2]:
524 524 oldfn = fn
525 525 fn = lambda s, c, **kwargs: oldfn(s, c)
526 526 l.append((mf, fn, params))
527 527 self.filterpats[filter] = l
528 528
529 529 for mf, fn, cmd in self.filterpats[filter]:
530 530 if mf(filename):
531 531 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
532 532 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
533 533 break
534 534
535 535 return data
536 536
537 537 def adddatafilter(self, name, filter):
538 538 self._datafilters[name] = filter
539 539
540 540 def wread(self, filename):
541 541 if self._link(filename):
542 542 data = os.readlink(self.wjoin(filename))
543 543 else:
544 544 data = self.wopener(filename, 'r').read()
545 545 return self._filter("encode", filename, data)
546 546
547 547 def wwrite(self, filename, data, flags):
548 548 data = self._filter("decode", filename, data)
549 549 try:
550 550 os.unlink(self.wjoin(filename))
551 551 except OSError:
552 552 pass
553 553 if 'l' in flags:
554 554 self.wopener.symlink(data, filename)
555 555 else:
556 556 self.wopener(filename, 'w').write(data)
557 557 if 'x' in flags:
558 558 util.set_flags(self.wjoin(filename), False, True)
559 559
560 560 def wwritedata(self, filename, data):
561 561 return self._filter("decode", filename, data)
562 562
563 563 def transaction(self):
564 564 if self._transref and self._transref():
565 565 return self._transref().nest()
566 566
567 567 # abort here if the journal already exists
568 568 if os.path.exists(self.sjoin("journal")):
569 569 raise error.RepoError(_("journal already exists - run hg recover"))
570 570
571 571 # save dirstate for rollback
572 572 try:
573 573 ds = self.opener("dirstate").read()
574 574 except IOError:
575 575 ds = ""
576 576 self.opener("journal.dirstate", "w").write(ds)
577 577 self.opener("journal.branch", "w").write(self.dirstate.branch())
578 578
579 579 renames = [(self.sjoin("journal"), self.sjoin("undo")),
580 580 (self.join("journal.dirstate"), self.join("undo.dirstate")),
581 581 (self.join("journal.branch"), self.join("undo.branch"))]
582 582 tr = transaction.transaction(self.ui.warn, self.sopener,
583 583 self.sjoin("journal"),
584 584 aftertrans(renames),
585 585 self.store.createmode)
586 586 self._transref = weakref.ref(tr)
587 587 return tr
588 588
589 589 def recover(self):
590 590 l = self.lock()
591 591 try:
592 592 if os.path.exists(self.sjoin("journal")):
593 593 self.ui.status(_("rolling back interrupted transaction\n"))
594 594 transaction.rollback(self.sopener, self.sjoin("journal"))
595 595 self.invalidate()
596 596 return True
597 597 else:
598 598 self.ui.warn(_("no interrupted transaction available\n"))
599 599 return False
600 600 finally:
601 601 del l
602 602
603 603 def rollback(self):
604 604 wlock = lock = None
605 605 try:
606 606 wlock = self.wlock()
607 607 lock = self.lock()
608 608 if os.path.exists(self.sjoin("undo")):
609 609 self.ui.status(_("rolling back last transaction\n"))
610 610 transaction.rollback(self.sopener, self.sjoin("undo"))
611 611 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
612 612 try:
613 613 branch = self.opener("undo.branch").read()
614 614 self.dirstate.setbranch(branch)
615 615 except IOError:
616 616 self.ui.warn(_("Named branch could not be reset, "
617 617 "current branch still is: %s\n")
618 618 % util.tolocal(self.dirstate.branch()))
619 619 self.invalidate()
620 620 self.dirstate.invalidate()
621 621 else:
622 622 self.ui.warn(_("no rollback information available\n"))
623 623 finally:
624 624 del lock, wlock
625 625
626 626 def invalidate(self):
627 627 for a in "changelog manifest".split():
628 628 if a in self.__dict__:
629 629 delattr(self, a)
630 630 self.tagscache = None
631 631 self._tagstypecache = None
632 632 self.nodetagscache = None
633 633 self.branchcache = None
634 634 self._ubranchcache = None
635 635 self._branchcachetip = None
636 636
637 637 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
638 638 try:
639 639 l = lock.lock(lockname, 0, releasefn, desc=desc)
640 640 except error.LockHeld, inst:
641 641 if not wait:
642 642 raise
643 643 self.ui.warn(_("waiting for lock on %s held by %r\n") %
644 644 (desc, inst.locker))
645 645 # default to 600 seconds timeout
646 646 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
647 647 releasefn, desc=desc)
648 648 if acquirefn:
649 649 acquirefn()
650 650 return l
651 651
652 652 def lock(self, wait=True):
653 653 if self._lockref and self._lockref():
654 654 return self._lockref()
655 655
656 656 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
657 657 _('repository %s') % self.origroot)
658 658 self._lockref = weakref.ref(l)
659 659 return l
660 660
661 661 def wlock(self, wait=True):
662 662 if self._wlockref and self._wlockref():
663 663 return self._wlockref()
664 664
665 665 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
666 666 self.dirstate.invalidate, _('working directory of %s') %
667 667 self.origroot)
668 668 self._wlockref = weakref.ref(l)
669 669 return l
670 670
671 671 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
672 672 """
673 673 commit an individual file as part of a larger transaction
674 674 """
675 675
676 676 fn = fctx.path()
677 677 t = fctx.data()
678 678 fl = self.file(fn)
679 679 fp1 = manifest1.get(fn, nullid)
680 680 fp2 = manifest2.get(fn, nullid)
681 681
682 682 meta = {}
683 683 cp = fctx.renamed()
684 684 if cp and cp[0] != fn:
685 685 # Mark the new revision of this file as a copy of another
686 686 # file. This copy data will effectively act as a parent
687 687 # of this new revision. If this is a merge, the first
688 688 # parent will be the nullid (meaning "look up the copy data")
689 689 # and the second one will be the other parent. For example:
690 690 #
691 691 # 0 --- 1 --- 3 rev1 changes file foo
692 692 # \ / rev2 renames foo to bar and changes it
693 693 # \- 2 -/ rev3 should have bar with all changes and
694 694 # should record that bar descends from
695 695 # bar in rev2 and foo in rev1
696 696 #
697 697 # this allows this merge to succeed:
698 698 #
699 699 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
700 700 # \ / merging rev3 and rev4 should use bar@rev2
701 701 # \- 2 --- 4 as the merge base
702 702 #
703 703
704 704 cf = cp[0]
705 705 cr = manifest1.get(cf)
706 706 nfp = fp2
707 707
708 708 if manifest2: # branch merge
709 709 if fp2 == nullid: # copied on remote side
710 710 if fp1 != nullid or cf in manifest2:
711 711 cr = manifest2[cf]
712 712 nfp = fp1
713 713
714 714 # find source in nearest ancestor if we've lost track
715 715 if not cr:
716 716 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
717 717 (fn, cf))
718 718 for a in self['.'].ancestors():
719 719 if cf in a:
720 720 cr = a[cf].filenode()
721 721 break
722 722
723 723 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
724 724 meta["copy"] = cf
725 725 meta["copyrev"] = hex(cr)
726 726 fp1, fp2 = nullid, nfp
727 727 elif fp2 != nullid:
728 728 # is one parent an ancestor of the other?
729 729 fpa = fl.ancestor(fp1, fp2)
730 730 if fpa == fp1:
731 731 fp1, fp2 = fp2, nullid
732 732 elif fpa == fp2:
733 733 fp2 = nullid
734 734
735 735 # is the file unmodified from the parent? report existing entry
736 736 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
737 737 return fp1
738 738
739 739 changelist.append(fn)
740 740 return fl.add(t, meta, tr, linkrev, fp1, fp2)
741 741
742 742 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
743 743 if p1 is None:
744 744 p1, p2 = self.dirstate.parents()
745 745 return self.commit(files=files, text=text, user=user, date=date,
746 746 p1=p1, p2=p2, extra=extra, empty_ok=True)
747 747
748 748 def commit(self, files=None, text="", user=None, date=None,
749 749 match=None, force=False, force_editor=False,
750 750 p1=None, p2=None, extra={}, empty_ok=False):
751 751 wlock = lock = None
752 752 if files:
753 753 files = util.unique(files)
754 754 try:
755 755 wlock = self.wlock()
756 756 lock = self.lock()
757 757 use_dirstate = (p1 is None) # not rawcommit
758 758
759 759 if use_dirstate:
760 760 p1, p2 = self.dirstate.parents()
761 761 update_dirstate = True
762 762
763 763 if (not force and p2 != nullid and
764 764 (match and (match.files() or match.anypats()))):
765 765 raise util.Abort(_('cannot partially commit a merge '
766 766 '(do not specify files or patterns)'))
767 767
768 768 if files:
769 769 modified, removed = [], []
770 770 for f in files:
771 771 s = self.dirstate[f]
772 772 if s in 'nma':
773 773 modified.append(f)
774 774 elif s == 'r':
775 775 removed.append(f)
776 776 else:
777 777 self.ui.warn(_("%s not tracked!\n") % f)
778 778 changes = [modified, [], removed, [], []]
779 779 else:
780 780 changes = self.status(match=match)
781 781 else:
782 782 p1, p2 = p1, p2 or nullid
783 783 update_dirstate = (self.dirstate.parents()[0] == p1)
784 784 changes = [files, [], [], [], []]
785 785
786 786 ms = merge_.mergestate(self)
787 787 for f in changes[0]:
788 788 if f in ms and ms[f] == 'u':
789 789 raise util.Abort(_("unresolved merge conflicts "
790 790 "(see hg resolve)"))
791 791 wctx = context.workingctx(self, (p1, p2), text, user, date,
792 792 extra, changes)
793 793 return self._commitctx(wctx, force, force_editor, empty_ok,
794 794 use_dirstate, update_dirstate)
795 795 finally:
796 796 del lock, wlock
797 797
798 798 def commitctx(self, ctx):
799 799 """Add a new revision to current repository.
800 800
801 801 Revision information is passed in the context.memctx argument.
802 802 commitctx() does not touch the working directory.
803 803 """
804 804 wlock = lock = None
805 805 try:
806 806 wlock = self.wlock()
807 807 lock = self.lock()
808 808 return self._commitctx(ctx, force=True, force_editor=False,
809 809 empty_ok=True, use_dirstate=False,
810 810 update_dirstate=False)
811 811 finally:
812 812 del lock, wlock
813 813
814 814 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
815 815 use_dirstate=True, update_dirstate=True):
816 816 tr = None
817 817 valid = 0 # don't save the dirstate if this isn't set
818 818 try:
819 819 commit = util.sort(wctx.modified() + wctx.added())
820 820 remove = wctx.removed()
821 821 extra = wctx.extra().copy()
822 822 branchname = extra['branch']
823 823 user = wctx.user()
824 824 text = wctx.description()
825 825
826 826 p1, p2 = [p.node() for p in wctx.parents()]
827 827 c1 = self.changelog.read(p1)
828 828 c2 = self.changelog.read(p2)
829 829 m1 = self.manifest.read(c1[0]).copy()
830 830 m2 = self.manifest.read(c2[0])
831 831
832 832 if use_dirstate:
833 833 oldname = c1[5].get("branch") # stored in UTF-8
834 834 if (not commit and not remove and not force and p2 == nullid
835 835 and branchname == oldname):
836 836 self.ui.status(_("nothing changed\n"))
837 837 return None
838 838
839 839 xp1 = hex(p1)
840 840 if p2 == nullid: xp2 = ''
841 841 else: xp2 = hex(p2)
842 842
843 843 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
844 844
845 845 tr = self.transaction()
846 846 trp = weakref.proxy(tr)
847 847
848 848 # check in files
849 849 new = {}
850 850 changed = []
851 851 linkrev = len(self)
852 852 for f in commit:
853 853 self.ui.note(f + "\n")
854 854 try:
855 855 fctx = wctx.filectx(f)
856 856 newflags = fctx.flags()
857 857 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
858 858 if ((not changed or changed[-1] != f) and
859 859 m2.get(f) != new[f]):
860 860 # mention the file in the changelog if some
861 861 # flag changed, even if there was no content
862 862 # change.
863 863 if m1.flags(f) != newflags:
864 864 changed.append(f)
865 865 m1.set(f, newflags)
866 866 if use_dirstate:
867 867 self.dirstate.normal(f)
868 868
869 869 except (OSError, IOError):
870 870 if use_dirstate:
871 871 self.ui.warn(_("trouble committing %s!\n") % f)
872 872 raise
873 873 else:
874 874 remove.append(f)
875 875
876 876 updated, added = [], []
877 877 for f in util.sort(changed):
878 878 if f in m1 or f in m2:
879 879 updated.append(f)
880 880 else:
881 881 added.append(f)
882 882
883 883 # update manifest
884 884 m1.update(new)
885 885 removed = [f for f in util.sort(remove) if f in m1 or f in m2]
886 886 removed1 = []
887 887
888 888 for f in removed:
889 889 if f in m1:
890 890 del m1[f]
891 891 removed1.append(f)
892 892 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
893 893 (new, removed1))
894 894
895 895 # add changeset
896 896 if (not empty_ok and not text) or force_editor:
897 897 edittext = []
898 898 if text:
899 899 edittext.append(text)
900 900 edittext.append("")
901 901 edittext.append("") # Empty line between message and comments.
902 902 edittext.append(_("HG: Enter commit message."
903 903 " Lines beginning with 'HG:' are removed."))
904 904 edittext.append("HG: --")
905 905 edittext.append("HG: user: %s" % user)
906 906 if p2 != nullid:
907 907 edittext.append("HG: branch merge")
908 908 if branchname:
909 909 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
910 910 edittext.extend(["HG: added %s" % f for f in added])
911 911 edittext.extend(["HG: changed %s" % f for f in updated])
912 912 edittext.extend(["HG: removed %s" % f for f in removed])
913 913 if not added and not updated and not removed:
914 914 edittext.append("HG: no files changed")
915 915 edittext.append("")
916 916 # run editor in the repository root
917 917 olddir = os.getcwd()
918 918 os.chdir(self.root)
919 919 text = self.ui.edit("\n".join(edittext), user)
920 920 os.chdir(olddir)
921 921
922 922 lines = [line.rstrip() for line in text.rstrip().splitlines()]
923 923 while lines and not lines[0]:
924 924 del lines[0]
925 925 if not lines and use_dirstate:
926 926 raise util.Abort(_("empty commit message"))
927 927 text = '\n'.join(lines)
928 928
929 929 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
930 930 user, wctx.date(), extra)
931 931 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
932 932 parent2=xp2)
933 933 tr.close()
934 934
935 935 if self.branchcache:
936 936 self.branchtags()
937 937
938 938 if use_dirstate or update_dirstate:
939 939 self.dirstate.setparents(n)
940 940 if use_dirstate:
941 941 for f in removed:
942 942 self.dirstate.forget(f)
943 943 valid = 1 # our dirstate updates are complete
944 944
945 945 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
946 946 return n
947 947 finally:
948 948 if not valid: # don't save our updated dirstate
949 949 self.dirstate.invalidate()
950 950 del tr
951 951
952 952 def walk(self, match, node=None):
953 953 '''
954 954 walk recursively through the directory tree or a given
955 955 changeset, finding all files matched by the match
956 956 function
957 957 '''
958 958 return self[node].walk(match)
959 959
960 960 def status(self, node1='.', node2=None, match=None,
961 961 ignored=False, clean=False, unknown=False):
962 962 """return status of files between two nodes or node and working directory
963 963
964 964 If node1 is None, use the first dirstate parent instead.
965 965 If node2 is None, compare node1 with working directory.
966 966 """
967 967
968 968 def mfmatches(ctx):
969 969 mf = ctx.manifest().copy()
970 970 for fn in mf.keys():
971 971 if not match(fn):
972 972 del mf[fn]
973 973 return mf
974 974
975 975 if isinstance(node1, context.changectx):
976 976 ctx1 = node1
977 977 else:
978 978 ctx1 = self[node1]
979 979 if isinstance(node2, context.changectx):
980 980 ctx2 = node2
981 981 else:
982 982 ctx2 = self[node2]
983 983
984 984 working = ctx2.rev() is None
985 985 parentworking = working and ctx1 == self['.']
986 986 match = match or match_.always(self.root, self.getcwd())
987 987 listignored, listclean, listunknown = ignored, clean, unknown
988 988
989 989 # load earliest manifest first for caching reasons
990 990 if not working and ctx2.rev() < ctx1.rev():
991 991 ctx2.manifest()
992 992
993 993 if not parentworking:
994 994 def bad(f, msg):
995 995 if f not in ctx1:
996 996 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
997 997 return False
998 998 match.bad = bad
999 999
1000 1000 if working: # we need to scan the working dir
1001 1001 s = self.dirstate.status(match, listignored, listclean, listunknown)
1002 1002 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1003 1003
1004 1004 # check for any possibly clean files
1005 1005 if parentworking and cmp:
1006 1006 fixup = []
1007 1007 # do a full compare of any files that might have changed
1008 1008 for f in cmp:
1009 1009 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1010 1010 or ctx1[f].cmp(ctx2[f].data())):
1011 1011 modified.append(f)
1012 1012 else:
1013 1013 fixup.append(f)
1014 1014
1015 1015 if listclean:
1016 1016 clean += fixup
1017 1017
1018 1018 # update dirstate for files that are actually clean
1019 1019 if fixup:
1020 1020 wlock = None
1021 1021 try:
1022 1022 try:
1023 1023 wlock = self.wlock(False)
1024 1024 for f in fixup:
1025 1025 self.dirstate.normal(f)
1026 1026 except lock.LockError:
1027 1027 pass
1028 1028 finally:
1029 1029 del wlock
1030 1030
1031 1031 if not parentworking:
1032 1032 mf1 = mfmatches(ctx1)
1033 1033 if working:
1034 1034 # we are comparing working dir against non-parent
1035 1035 # generate a pseudo-manifest for the working dir
1036 1036 mf2 = mfmatches(self['.'])
1037 1037 for f in cmp + modified + added:
1038 1038 mf2[f] = None
1039 1039 mf2.set(f, ctx2.flags(f))
1040 1040 for f in removed:
1041 1041 if f in mf2:
1042 1042 del mf2[f]
1043 1043 else:
1044 1044 # we are comparing two revisions
1045 1045 deleted, unknown, ignored = [], [], []
1046 1046 mf2 = mfmatches(ctx2)
1047 1047
1048 1048 modified, added, clean = [], [], []
1049 1049 for fn in mf2:
1050 1050 if fn in mf1:
1051 1051 if (mf1.flags(fn) != mf2.flags(fn) or
1052 1052 (mf1[fn] != mf2[fn] and
1053 1053 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1054 1054 modified.append(fn)
1055 1055 elif listclean:
1056 1056 clean.append(fn)
1057 1057 del mf1[fn]
1058 1058 else:
1059 1059 added.append(fn)
1060 1060 removed = mf1.keys()
1061 1061
1062 1062 r = modified, added, removed, deleted, unknown, ignored, clean
1063 1063 [l.sort() for l in r]
1064 1064 return r
1065 1065
1066 1066 def add(self, list):
1067 1067 wlock = self.wlock()
1068 1068 try:
1069 1069 rejected = []
1070 1070 for f in list:
1071 1071 p = self.wjoin(f)
1072 1072 try:
1073 1073 st = os.lstat(p)
1074 1074 except:
1075 1075 self.ui.warn(_("%s does not exist!\n") % f)
1076 1076 rejected.append(f)
1077 1077 continue
1078 1078 if st.st_size > 10000000:
1079 1079 self.ui.warn(_("%s: files over 10MB may cause memory and"
1080 1080 " performance problems\n"
1081 1081 "(use 'hg revert %s' to unadd the file)\n")
1082 1082 % (f, f))
1083 1083 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1084 1084 self.ui.warn(_("%s not added: only files and symlinks "
1085 1085 "supported currently\n") % f)
1086 1086 rejected.append(p)
1087 1087 elif self.dirstate[f] in 'amn':
1088 1088 self.ui.warn(_("%s already tracked!\n") % f)
1089 1089 elif self.dirstate[f] == 'r':
1090 1090 self.dirstate.normallookup(f)
1091 1091 else:
1092 1092 self.dirstate.add(f)
1093 1093 return rejected
1094 1094 finally:
1095 1095 del wlock
1096 1096
1097 1097 def forget(self, list):
1098 1098 wlock = self.wlock()
1099 1099 try:
1100 1100 for f in list:
1101 1101 if self.dirstate[f] != 'a':
1102 1102 self.ui.warn(_("%s not added!\n") % f)
1103 1103 else:
1104 1104 self.dirstate.forget(f)
1105 1105 finally:
1106 1106 del wlock
1107 1107
1108 1108 def remove(self, list, unlink=False):
1109 1109 wlock = None
1110 1110 try:
1111 1111 if unlink:
1112 1112 for f in list:
1113 1113 try:
1114 1114 util.unlink(self.wjoin(f))
1115 1115 except OSError, inst:
1116 1116 if inst.errno != errno.ENOENT:
1117 1117 raise
1118 1118 wlock = self.wlock()
1119 1119 for f in list:
1120 1120 if unlink and os.path.exists(self.wjoin(f)):
1121 1121 self.ui.warn(_("%s still exists!\n") % f)
1122 1122 elif self.dirstate[f] == 'a':
1123 1123 self.dirstate.forget(f)
1124 1124 elif f not in self.dirstate:
1125 1125 self.ui.warn(_("%s not tracked!\n") % f)
1126 1126 else:
1127 1127 self.dirstate.remove(f)
1128 1128 finally:
1129 1129 del wlock
1130 1130
1131 1131 def undelete(self, list):
1132 1132 wlock = None
1133 1133 try:
1134 1134 manifests = [self.manifest.read(self.changelog.read(p)[0])
1135 1135 for p in self.dirstate.parents() if p != nullid]
1136 1136 wlock = self.wlock()
1137 1137 for f in list:
1138 1138 if self.dirstate[f] != 'r':
1139 1139 self.ui.warn(_("%s not removed!\n") % f)
1140 1140 else:
1141 1141 m = f in manifests[0] and manifests[0] or manifests[1]
1142 1142 t = self.file(f).read(m[f])
1143 1143 self.wwrite(f, t, m.flags(f))
1144 1144 self.dirstate.normal(f)
1145 1145 finally:
1146 1146 del wlock
1147 1147
1148 1148 def copy(self, source, dest):
1149 1149 wlock = None
1150 1150 try:
1151 1151 p = self.wjoin(dest)
1152 1152 if not (os.path.exists(p) or os.path.islink(p)):
1153 1153 self.ui.warn(_("%s does not exist!\n") % dest)
1154 1154 elif not (os.path.isfile(p) or os.path.islink(p)):
1155 1155 self.ui.warn(_("copy failed: %s is not a file or a "
1156 1156 "symbolic link\n") % dest)
1157 1157 else:
1158 1158 wlock = self.wlock()
1159 1159 if self.dirstate[dest] in '?r':
1160 1160 self.dirstate.add(dest)
1161 1161 self.dirstate.copy(source, dest)
1162 1162 finally:
1163 1163 del wlock
1164 1164
1165 1165 def heads(self, start=None):
1166 1166 heads = self.changelog.heads(start)
1167 1167 # sort the output in rev descending order
1168 1168 heads = [(-self.changelog.rev(h), h) for h in heads]
1169 1169 return [n for (r, n) in util.sort(heads)]
1170 1170
1171 1171 def branchheads(self, branch=None, start=None):
1172 1172 if branch is None:
1173 1173 branch = self[None].branch()
1174 1174 branches = self.branchtags()
1175 1175 if branch not in branches:
1176 1176 return []
1177 1177 # The basic algorithm is this:
1178 1178 #
1179 1179 # Start from the branch tip since there are no later revisions that can
1180 1180 # possibly be in this branch, and the tip is a guaranteed head.
1181 1181 #
1182 1182 # Remember the tip's parents as the first ancestors, since these by
1183 1183 # definition are not heads.
1184 1184 #
1185 1185 # Step backwards from the brach tip through all the revisions. We are
1186 1186 # guaranteed by the rules of Mercurial that we will now be visiting the
1187 1187 # nodes in reverse topological order (children before parents).
1188 1188 #
1189 1189 # If a revision is one of the ancestors of a head then we can toss it
1190 1190 # out of the ancestors set (we've already found it and won't be
1191 1191 # visiting it again) and put its parents in the ancestors set.
1192 1192 #
1193 1193 # Otherwise, if a revision is in the branch it's another head, since it
1194 1194 # wasn't in the ancestor list of an existing head. So add it to the
1195 1195 # head list, and add its parents to the ancestor list.
1196 1196 #
1197 1197 # If it is not in the branch ignore it.
1198 1198 #
1199 1199 # Once we have a list of heads, use nodesbetween to filter out all the
1200 1200 # heads that cannot be reached from startrev. There may be a more
1201 1201 # efficient way to do this as part of the previous algorithm.
1202 1202
1203 1203 set = util.set
1204 1204 heads = [self.changelog.rev(branches[branch])]
1205 1205 # Don't care if ancestors contains nullrev or not.
1206 1206 ancestors = set(self.changelog.parentrevs(heads[0]))
1207 1207 for rev in xrange(heads[0] - 1, nullrev, -1):
1208 1208 if rev in ancestors:
1209 1209 ancestors.update(self.changelog.parentrevs(rev))
1210 1210 ancestors.remove(rev)
1211 1211 elif self[rev].branch() == branch:
1212 1212 heads.append(rev)
1213 1213 ancestors.update(self.changelog.parentrevs(rev))
1214 1214 heads = [self.changelog.node(rev) for rev in heads]
1215 1215 if start is not None:
1216 1216 heads = self.changelog.nodesbetween([start], heads)[2]
1217 1217 return heads
1218 1218
1219 1219 def branches(self, nodes):
1220 1220 if not nodes:
1221 1221 nodes = [self.changelog.tip()]
1222 1222 b = []
1223 1223 for n in nodes:
1224 1224 t = n
1225 1225 while 1:
1226 1226 p = self.changelog.parents(n)
1227 1227 if p[1] != nullid or p[0] == nullid:
1228 1228 b.append((t, n, p[0], p[1]))
1229 1229 break
1230 1230 n = p[0]
1231 1231 return b
1232 1232
1233 1233 def between(self, pairs):
1234 1234 r = []
1235 1235
1236 1236 for top, bottom in pairs:
1237 1237 n, l, i = top, [], 0
1238 1238 f = 1
1239 1239
1240 1240 while n != bottom:
1241 1241 p = self.changelog.parents(n)[0]
1242 1242 if i == f:
1243 1243 l.append(n)
1244 1244 f = f * 2
1245 1245 n = p
1246 1246 i += 1
1247 1247
1248 1248 r.append(l)
1249 1249
1250 1250 return r
1251 1251
1252 1252 def findincoming(self, remote, base=None, heads=None, force=False):
1253 1253 """Return list of roots of the subsets of missing nodes from remote
1254 1254
1255 1255 If base dict is specified, assume that these nodes and their parents
1256 1256 exist on the remote side and that no child of a node of base exists
1257 1257 in both remote and self.
1258 1258 Furthermore base will be updated to include the nodes that exists
1259 1259 in self and remote but no children exists in self and remote.
1260 1260 If a list of heads is specified, return only nodes which are heads
1261 1261 or ancestors of these heads.
1262 1262
1263 1263 All the ancestors of base are in self and in remote.
1264 1264 All the descendants of the list returned are missing in self.
1265 1265 (and so we know that the rest of the nodes are missing in remote, see
1266 1266 outgoing)
1267 1267 """
1268 1268 return self.findcommonincoming(remote, base, heads, force)[1]
1269 1269
1270 1270 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1271 1271 """Return a tuple (common, missing roots, heads) used to identify
1272 1272 missing nodes from remote.
1273 1273
1274 1274 If base dict is specified, assume that these nodes and their parents
1275 1275 exist on the remote side and that no child of a node of base exists
1276 1276 in both remote and self.
1277 1277 Furthermore base will be updated to include the nodes that exists
1278 1278 in self and remote but no children exists in self and remote.
1279 1279 If a list of heads is specified, return only nodes which are heads
1280 1280 or ancestors of these heads.
1281 1281
1282 1282 All the ancestors of base are in self and in remote.
1283 1283 """
1284 1284 m = self.changelog.nodemap
1285 1285 search = []
1286 1286 fetch = {}
1287 1287 seen = {}
1288 1288 seenbranch = {}
1289 1289 if base == None:
1290 1290 base = {}
1291 1291
1292 1292 if not heads:
1293 1293 heads = remote.heads()
1294 1294
1295 1295 if self.changelog.tip() == nullid:
1296 1296 base[nullid] = 1
1297 1297 if heads != [nullid]:
1298 1298 return [nullid], [nullid], list(heads)
1299 1299 return [nullid], [], []
1300 1300
1301 1301 # assume we're closer to the tip than the root
1302 1302 # and start by examining the heads
1303 1303 self.ui.status(_("searching for changes\n"))
1304 1304
1305 1305 unknown = []
1306 1306 for h in heads:
1307 1307 if h not in m:
1308 1308 unknown.append(h)
1309 1309 else:
1310 1310 base[h] = 1
1311 1311
1312 1312 heads = unknown
1313 1313 if not unknown:
1314 1314 return base.keys(), [], []
1315 1315
1316 1316 req = dict.fromkeys(unknown)
1317 1317 reqcnt = 0
1318 1318
1319 1319 # search through remote branches
1320 1320 # a 'branch' here is a linear segment of history, with four parts:
1321 1321 # head, root, first parent, second parent
1322 1322 # (a branch always has two parents (or none) by definition)
1323 1323 unknown = remote.branches(unknown)
1324 1324 while unknown:
1325 1325 r = []
1326 1326 while unknown:
1327 1327 n = unknown.pop(0)
1328 1328 if n[0] in seen:
1329 1329 continue
1330 1330
1331 1331 self.ui.debug(_("examining %s:%s\n")
1332 1332 % (short(n[0]), short(n[1])))
1333 1333 if n[0] == nullid: # found the end of the branch
1334 1334 pass
1335 1335 elif n in seenbranch:
1336 1336 self.ui.debug(_("branch already found\n"))
1337 1337 continue
1338 1338 elif n[1] and n[1] in m: # do we know the base?
1339 1339 self.ui.debug(_("found incomplete branch %s:%s\n")
1340 1340 % (short(n[0]), short(n[1])))
1341 1341 search.append(n[0:2]) # schedule branch range for scanning
1342 1342 seenbranch[n] = 1
1343 1343 else:
1344 1344 if n[1] not in seen and n[1] not in fetch:
1345 1345 if n[2] in m and n[3] in m:
1346 1346 self.ui.debug(_("found new changeset %s\n") %
1347 1347 short(n[1]))
1348 1348 fetch[n[1]] = 1 # earliest unknown
1349 1349 for p in n[2:4]:
1350 1350 if p in m:
1351 1351 base[p] = 1 # latest known
1352 1352
1353 1353 for p in n[2:4]:
1354 1354 if p not in req and p not in m:
1355 1355 r.append(p)
1356 1356 req[p] = 1
1357 1357 seen[n[0]] = 1
1358 1358
1359 1359 if r:
1360 1360 reqcnt += 1
1361 1361 self.ui.debug(_("request %d: %s\n") %
1362 1362 (reqcnt, " ".join(map(short, r))))
1363 1363 for p in xrange(0, len(r), 10):
1364 1364 for b in remote.branches(r[p:p+10]):
1365 1365 self.ui.debug(_("received %s:%s\n") %
1366 1366 (short(b[0]), short(b[1])))
1367 1367 unknown.append(b)
1368 1368
1369 1369 # do binary search on the branches we found
1370 1370 while search:
1371 1371 newsearch = []
1372 1372 reqcnt += 1
1373 1373 for n, l in zip(search, remote.between(search)):
1374 1374 l.append(n[1])
1375 1375 p = n[0]
1376 1376 f = 1
1377 1377 for i in l:
1378 1378 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1379 1379 if i in m:
1380 1380 if f <= 2:
1381 1381 self.ui.debug(_("found new branch changeset %s\n") %
1382 1382 short(p))
1383 1383 fetch[p] = 1
1384 1384 base[i] = 1
1385 1385 else:
1386 1386 self.ui.debug(_("narrowed branch search to %s:%s\n")
1387 1387 % (short(p), short(i)))
1388 1388 newsearch.append((p, i))
1389 1389 break
1390 1390 p, f = i, f * 2
1391 1391 search = newsearch
1392 1392
1393 1393 # sanity check our fetch list
1394 1394 for f in fetch.keys():
1395 1395 if f in m:
1396 1396 raise error.RepoError(_("already have changeset ")
1397 1397 + short(f[:4]))
1398 1398
1399 1399 if base.keys() == [nullid]:
1400 1400 if force:
1401 1401 self.ui.warn(_("warning: repository is unrelated\n"))
1402 1402 else:
1403 1403 raise util.Abort(_("repository is unrelated"))
1404 1404
1405 1405 self.ui.debug(_("found new changesets starting at ") +
1406 1406 " ".join([short(f) for f in fetch]) + "\n")
1407 1407
1408 1408 self.ui.debug(_("%d total queries\n") % reqcnt)
1409 1409
1410 1410 return base.keys(), fetch.keys(), heads
1411 1411
1412 1412 def findoutgoing(self, remote, base=None, heads=None, force=False):
1413 1413 """Return list of nodes that are roots of subsets not in remote
1414 1414
1415 1415 If base dict is specified, assume that these nodes and their parents
1416 1416 exist on the remote side.
1417 1417 If a list of heads is specified, return only nodes which are heads
1418 1418 or ancestors of these heads, and return a second element which
1419 1419 contains all remote heads which get new children.
1420 1420 """
1421 1421 if base == None:
1422 1422 base = {}
1423 1423 self.findincoming(remote, base, heads, force=force)
1424 1424
1425 1425 self.ui.debug(_("common changesets up to ")
1426 1426 + " ".join(map(short, base.keys())) + "\n")
1427 1427
1428 1428 remain = dict.fromkeys(self.changelog.nodemap)
1429 1429
1430 1430 # prune everything remote has from the tree
1431 1431 del remain[nullid]
1432 1432 remove = base.keys()
1433 1433 while remove:
1434 1434 n = remove.pop(0)
1435 1435 if n in remain:
1436 1436 del remain[n]
1437 1437 for p in self.changelog.parents(n):
1438 1438 remove.append(p)
1439 1439
1440 1440 # find every node whose parents have been pruned
1441 1441 subset = []
1442 1442 # find every remote head that will get new children
1443 1443 updated_heads = {}
1444 1444 for n in remain:
1445 1445 p1, p2 = self.changelog.parents(n)
1446 1446 if p1 not in remain and p2 not in remain:
1447 1447 subset.append(n)
1448 1448 if heads:
1449 1449 if p1 in heads:
1450 1450 updated_heads[p1] = True
1451 1451 if p2 in heads:
1452 1452 updated_heads[p2] = True
1453 1453
1454 1454 # this is the set of all roots we have to push
1455 1455 if heads:
1456 1456 return subset, updated_heads.keys()
1457 1457 else:
1458 1458 return subset
1459 1459
1460 1460 def pull(self, remote, heads=None, force=False):
1461 1461 lock = self.lock()
1462 1462 try:
1463 1463 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1464 1464 force=force)
1465 1465 if fetch == [nullid]:
1466 1466 self.ui.status(_("requesting all changes\n"))
1467 1467
1468 1468 if not fetch:
1469 1469 self.ui.status(_("no changes found\n"))
1470 1470 return 0
1471 1471
1472 1472 if heads is None and remote.capable('changegroupsubset'):
1473 1473 heads = rheads
1474 1474
1475 1475 if heads is None:
1476 1476 cg = remote.changegroup(fetch, 'pull')
1477 1477 else:
1478 1478 if not remote.capable('changegroupsubset'):
1479 1479 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1480 1480 cg = remote.changegroupsubset(fetch, heads, 'pull')
1481 1481 return self.addchangegroup(cg, 'pull', remote.url())
1482 1482 finally:
1483 1483 del lock
1484 1484
1485 1485 def push(self, remote, force=False, revs=None):
1486 1486 # there are two ways to push to remote repo:
1487 1487 #
1488 1488 # addchangegroup assumes local user can lock remote
1489 1489 # repo (local filesystem, old ssh servers).
1490 1490 #
1491 1491 # unbundle assumes local user cannot lock remote repo (new ssh
1492 1492 # servers, http servers).
1493 1493
1494 1494 if remote.capable('unbundle'):
1495 1495 return self.push_unbundle(remote, force, revs)
1496 1496 return self.push_addchangegroup(remote, force, revs)
1497 1497
1498 1498 def prepush(self, remote, force, revs):
1499 1499 common = {}
1500 1500 remote_heads = remote.heads()
1501 1501 inc = self.findincoming(remote, common, remote_heads, force=force)
1502 1502
1503 1503 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1504 1504 if revs is not None:
1505 1505 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1506 1506 else:
1507 1507 bases, heads = update, self.changelog.heads()
1508 1508
1509 1509 if not bases:
1510 1510 self.ui.status(_("no changes found\n"))
1511 1511 return None, 1
1512 1512 elif not force:
1513 1513 # check if we're creating new remote heads
1514 1514 # to be a remote head after push, node must be either
1515 1515 # - unknown locally
1516 1516 # - a local outgoing head descended from update
1517 1517 # - a remote head that's known locally and not
1518 1518 # ancestral to an outgoing head
1519 1519
1520 1520 warn = 0
1521 1521
1522 1522 if remote_heads == [nullid]:
1523 1523 warn = 0
1524 1524 elif not revs and len(heads) > len(remote_heads):
1525 1525 warn = 1
1526 1526 else:
1527 1527 newheads = list(heads)
1528 1528 for r in remote_heads:
1529 1529 if r in self.changelog.nodemap:
1530 1530 desc = self.changelog.heads(r, heads)
1531 1531 l = [h for h in heads if h in desc]
1532 1532 if not l:
1533 1533 newheads.append(r)
1534 1534 else:
1535 1535 newheads.append(r)
1536 1536 if len(newheads) > len(remote_heads):
1537 1537 warn = 1
1538 1538
1539 1539 if warn:
1540 1540 self.ui.warn(_("abort: push creates new remote heads!\n"))
1541 1541 self.ui.status(_("(did you forget to merge?"
1542 1542 " use push -f to force)\n"))
1543 1543 return None, 0
1544 1544 elif inc:
1545 1545 self.ui.warn(_("note: unsynced remote changes!\n"))
1546 1546
1547 1547
1548 1548 if revs is None:
1549 1549 # use the fast path, no race possible on push
1550 1550 cg = self._changegroup(common.keys(), 'push')
1551 1551 else:
1552 1552 cg = self.changegroupsubset(update, revs, 'push')
1553 1553 return cg, remote_heads
1554 1554
1555 1555 def push_addchangegroup(self, remote, force, revs):
1556 1556 lock = remote.lock()
1557 1557 try:
1558 1558 ret = self.prepush(remote, force, revs)
1559 1559 if ret[0] is not None:
1560 1560 cg, remote_heads = ret
1561 1561 return remote.addchangegroup(cg, 'push', self.url())
1562 1562 return ret[1]
1563 1563 finally:
1564 1564 del lock
1565 1565
1566 1566 def push_unbundle(self, remote, force, revs):
1567 1567 # local repo finds heads on server, finds out what revs it
1568 1568 # must push. once revs transferred, if server finds it has
1569 1569 # different heads (someone else won commit/push race), server
1570 1570 # aborts.
1571 1571
1572 1572 ret = self.prepush(remote, force, revs)
1573 1573 if ret[0] is not None:
1574 1574 cg, remote_heads = ret
1575 1575 if force: remote_heads = ['force']
1576 1576 return remote.unbundle(cg, remote_heads, 'push')
1577 1577 return ret[1]
1578 1578
1579 1579 def changegroupinfo(self, nodes, source):
1580 1580 if self.ui.verbose or source == 'bundle':
1581 1581 self.ui.status(_("%d changesets found\n") % len(nodes))
1582 1582 if self.ui.debugflag:
1583 1583 self.ui.debug(_("list of changesets:\n"))
1584 1584 for node in nodes:
1585 1585 self.ui.debug("%s\n" % hex(node))
1586 1586
1587 1587 def changegroupsubset(self, bases, heads, source, extranodes=None):
1588 1588 """This function generates a changegroup consisting of all the nodes
1589 1589 that are descendents of any of the bases, and ancestors of any of
1590 1590 the heads.
1591 1591
1592 1592 It is fairly complex as determining which filenodes and which
1593 1593 manifest nodes need to be included for the changeset to be complete
1594 1594 is non-trivial.
1595 1595
1596 1596 Another wrinkle is doing the reverse, figuring out which changeset in
1597 1597 the changegroup a particular filenode or manifestnode belongs to.
1598 1598
1599 1599 The caller can specify some nodes that must be included in the
1600 1600 changegroup using the extranodes argument. It should be a dict
1601 1601 where the keys are the filenames (or 1 for the manifest), and the
1602 1602 values are lists of (node, linknode) tuples, where node is a wanted
1603 1603 node and linknode is the changelog node that should be transmitted as
1604 1604 the linkrev.
1605 1605 """
1606 1606
1607 1607 if extranodes is None:
1608 1608 # can we go through the fast path ?
1609 1609 heads.sort()
1610 1610 allheads = self.heads()
1611 1611 allheads.sort()
1612 1612 if heads == allheads:
1613 1613 common = []
1614 1614 # parents of bases are known from both sides
1615 1615 for n in bases:
1616 1616 for p in self.changelog.parents(n):
1617 1617 if p != nullid:
1618 1618 common.append(p)
1619 1619 return self._changegroup(common, source)
1620 1620
1621 1621 self.hook('preoutgoing', throw=True, source=source)
1622 1622
1623 1623 # Set up some initial variables
1624 1624 # Make it easy to refer to self.changelog
1625 1625 cl = self.changelog
1626 1626 # msng is short for missing - compute the list of changesets in this
1627 1627 # changegroup.
1628 1628 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1629 1629 self.changegroupinfo(msng_cl_lst, source)
1630 1630 # Some bases may turn out to be superfluous, and some heads may be
1631 1631 # too. nodesbetween will return the minimal set of bases and heads
1632 1632 # necessary to re-create the changegroup.
1633 1633
1634 1634 # Known heads are the list of heads that it is assumed the recipient
1635 1635 # of this changegroup will know about.
1636 1636 knownheads = {}
1637 1637 # We assume that all parents of bases are known heads.
1638 1638 for n in bases:
1639 1639 for p in cl.parents(n):
1640 1640 if p != nullid:
1641 1641 knownheads[p] = 1
1642 1642 knownheads = knownheads.keys()
1643 1643 if knownheads:
1644 1644 # Now that we know what heads are known, we can compute which
1645 1645 # changesets are known. The recipient must know about all
1646 1646 # changesets required to reach the known heads from the null
1647 1647 # changeset.
1648 1648 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1649 1649 junk = None
1650 1650 # Transform the list into an ersatz set.
1651 1651 has_cl_set = dict.fromkeys(has_cl_set)
1652 1652 else:
1653 1653 # If there were no known heads, the recipient cannot be assumed to
1654 1654 # know about any changesets.
1655 1655 has_cl_set = {}
1656 1656
1657 1657 # Make it easy to refer to self.manifest
1658 1658 mnfst = self.manifest
1659 1659 # We don't know which manifests are missing yet
1660 1660 msng_mnfst_set = {}
1661 1661 # Nor do we know which filenodes are missing.
1662 1662 msng_filenode_set = {}
1663 1663
1664 1664 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1665 1665 junk = None
1666 1666
1667 1667 # A changeset always belongs to itself, so the changenode lookup
1668 1668 # function for a changenode is identity.
1669 1669 def identity(x):
1670 1670 return x
1671 1671
1672 1672 # A function generating function. Sets up an environment for the
1673 1673 # inner function.
1674 1674 def cmp_by_rev_func(revlog):
1675 1675 # Compare two nodes by their revision number in the environment's
1676 1676 # revision history. Since the revision number both represents the
1677 1677 # most efficient order to read the nodes in, and represents a
1678 1678 # topological sorting of the nodes, this function is often useful.
1679 1679 def cmp_by_rev(a, b):
1680 1680 return cmp(revlog.rev(a), revlog.rev(b))
1681 1681 return cmp_by_rev
1682 1682
1683 1683 # If we determine that a particular file or manifest node must be a
1684 1684 # node that the recipient of the changegroup will already have, we can
1685 1685 # also assume the recipient will have all the parents. This function
1686 1686 # prunes them from the set of missing nodes.
1687 1687 def prune_parents(revlog, hasset, msngset):
1688 1688 haslst = hasset.keys()
1689 1689 haslst.sort(cmp_by_rev_func(revlog))
1690 1690 for node in haslst:
1691 1691 parentlst = [p for p in revlog.parents(node) if p != nullid]
1692 1692 while parentlst:
1693 1693 n = parentlst.pop()
1694 1694 if n not in hasset:
1695 1695 hasset[n] = 1
1696 1696 p = [p for p in revlog.parents(n) if p != nullid]
1697 1697 parentlst.extend(p)
1698 1698 for n in hasset:
1699 1699 msngset.pop(n, None)
1700 1700
1701 1701 # This is a function generating function used to set up an environment
1702 1702 # for the inner function to execute in.
1703 1703 def manifest_and_file_collector(changedfileset):
1704 1704 # This is an information gathering function that gathers
1705 1705 # information from each changeset node that goes out as part of
1706 1706 # the changegroup. The information gathered is a list of which
1707 1707 # manifest nodes are potentially required (the recipient may
1708 1708 # already have them) and total list of all files which were
1709 1709 # changed in any changeset in the changegroup.
1710 1710 #
1711 1711 # We also remember the first changenode we saw any manifest
1712 1712 # referenced by so we can later determine which changenode 'owns'
1713 1713 # the manifest.
1714 1714 def collect_manifests_and_files(clnode):
1715 1715 c = cl.read(clnode)
1716 1716 for f in c[3]:
1717 1717 # This is to make sure we only have one instance of each
1718 1718 # filename string for each filename.
1719 1719 changedfileset.setdefault(f, f)
1720 1720 msng_mnfst_set.setdefault(c[0], clnode)
1721 1721 return collect_manifests_and_files
1722 1722
1723 1723 # Figure out which manifest nodes (of the ones we think might be part
1724 1724 # of the changegroup) the recipient must know about and remove them
1725 1725 # from the changegroup.
1726 1726 def prune_manifests():
1727 1727 has_mnfst_set = {}
1728 1728 for n in msng_mnfst_set:
1729 1729 # If a 'missing' manifest thinks it belongs to a changenode
1730 1730 # the recipient is assumed to have, obviously the recipient
1731 1731 # must have that manifest.
1732 1732 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1733 1733 if linknode in has_cl_set:
1734 1734 has_mnfst_set[n] = 1
1735 1735 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1736 1736
1737 1737 # Use the information collected in collect_manifests_and_files to say
1738 1738 # which changenode any manifestnode belongs to.
1739 1739 def lookup_manifest_link(mnfstnode):
1740 1740 return msng_mnfst_set[mnfstnode]
1741 1741
1742 1742 # A function generating function that sets up the initial environment
1743 1743 # the inner function.
1744 1744 def filenode_collector(changedfiles):
1745 1745 next_rev = [0]
1746 1746 # This gathers information from each manifestnode included in the
1747 1747 # changegroup about which filenodes the manifest node references
1748 1748 # so we can include those in the changegroup too.
1749 1749 #
1750 1750 # It also remembers which changenode each filenode belongs to. It
1751 1751 # does this by assuming the a filenode belongs to the changenode
1752 1752 # the first manifest that references it belongs to.
1753 1753 def collect_msng_filenodes(mnfstnode):
1754 1754 r = mnfst.rev(mnfstnode)
1755 1755 if r == next_rev[0]:
1756 1756 # If the last rev we looked at was the one just previous,
1757 1757 # we only need to see a diff.
1758 1758 deltamf = mnfst.readdelta(mnfstnode)
1759 1759 # For each line in the delta
1760 1760 for f, fnode in deltamf.iteritems():
1761 1761 f = changedfiles.get(f, None)
1762 1762 # And if the file is in the list of files we care
1763 1763 # about.
1764 1764 if f is not None:
1765 1765 # Get the changenode this manifest belongs to
1766 1766 clnode = msng_mnfst_set[mnfstnode]
1767 1767 # Create the set of filenodes for the file if
1768 1768 # there isn't one already.
1769 1769 ndset = msng_filenode_set.setdefault(f, {})
1770 1770 # And set the filenode's changelog node to the
1771 1771 # manifest's if it hasn't been set already.
1772 1772 ndset.setdefault(fnode, clnode)
1773 1773 else:
1774 1774 # Otherwise we need a full manifest.
1775 1775 m = mnfst.read(mnfstnode)
1776 1776 # For every file in we care about.
1777 1777 for f in changedfiles:
1778 1778 fnode = m.get(f, None)
1779 1779 # If it's in the manifest
1780 1780 if fnode is not None:
1781 1781 # See comments above.
1782 1782 clnode = msng_mnfst_set[mnfstnode]
1783 1783 ndset = msng_filenode_set.setdefault(f, {})
1784 1784 ndset.setdefault(fnode, clnode)
1785 1785 # Remember the revision we hope to see next.
1786 1786 next_rev[0] = r + 1
1787 1787 return collect_msng_filenodes
1788 1788
1789 1789 # We have a list of filenodes we think we need for a file, lets remove
1790 1790 # all those we now the recipient must have.
1791 1791 def prune_filenodes(f, filerevlog):
1792 1792 msngset = msng_filenode_set[f]
1793 1793 hasset = {}
1794 1794 # If a 'missing' filenode thinks it belongs to a changenode we
1795 1795 # assume the recipient must have, then the recipient must have
1796 1796 # that filenode.
1797 1797 for n in msngset:
1798 1798 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1799 1799 if clnode in has_cl_set:
1800 1800 hasset[n] = 1
1801 1801 prune_parents(filerevlog, hasset, msngset)
1802 1802
1803 1803 # A function generator function that sets up the a context for the
1804 1804 # inner function.
1805 1805 def lookup_filenode_link_func(fname):
1806 1806 msngset = msng_filenode_set[fname]
1807 1807 # Lookup the changenode the filenode belongs to.
1808 1808 def lookup_filenode_link(fnode):
1809 1809 return msngset[fnode]
1810 1810 return lookup_filenode_link
1811 1811
1812 1812 # Add the nodes that were explicitly requested.
1813 1813 def add_extra_nodes(name, nodes):
1814 1814 if not extranodes or name not in extranodes:
1815 1815 return
1816 1816
1817 1817 for node, linknode in extranodes[name]:
1818 1818 if node not in nodes:
1819 1819 nodes[node] = linknode
1820 1820
1821 1821 # Now that we have all theses utility functions to help out and
1822 1822 # logically divide up the task, generate the group.
1823 1823 def gengroup():
1824 1824 # The set of changed files starts empty.
1825 1825 changedfiles = {}
1826 1826 # Create a changenode group generator that will call our functions
1827 1827 # back to lookup the owning changenode and collect information.
1828 1828 group = cl.group(msng_cl_lst, identity,
1829 1829 manifest_and_file_collector(changedfiles))
1830 1830 for chnk in group:
1831 1831 yield chnk
1832 1832
1833 1833 # The list of manifests has been collected by the generator
1834 1834 # calling our functions back.
1835 1835 prune_manifests()
1836 1836 add_extra_nodes(1, msng_mnfst_set)
1837 1837 msng_mnfst_lst = msng_mnfst_set.keys()
1838 1838 # Sort the manifestnodes by revision number.
1839 1839 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1840 1840 # Create a generator for the manifestnodes that calls our lookup
1841 1841 # and data collection functions back.
1842 1842 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1843 1843 filenode_collector(changedfiles))
1844 1844 for chnk in group:
1845 1845 yield chnk
1846 1846
1847 1847 # These are no longer needed, dereference and toss the memory for
1848 1848 # them.
1849 1849 msng_mnfst_lst = None
1850 1850 msng_mnfst_set.clear()
1851 1851
1852 1852 if extranodes:
1853 1853 for fname in extranodes:
1854 1854 if isinstance(fname, int):
1855 1855 continue
1856 1856 msng_filenode_set.setdefault(fname, {})
1857 1857 changedfiles[fname] = 1
1858 1858 # Go through all our files in order sorted by name.
1859 1859 for fname in util.sort(changedfiles):
1860 1860 filerevlog = self.file(fname)
1861 1861 if not len(filerevlog):
1862 1862 raise util.Abort(_("empty or missing revlog for %s") % fname)
1863 1863 # Toss out the filenodes that the recipient isn't really
1864 1864 # missing.
1865 1865 if fname in msng_filenode_set:
1866 1866 prune_filenodes(fname, filerevlog)
1867 1867 add_extra_nodes(fname, msng_filenode_set[fname])
1868 1868 msng_filenode_lst = msng_filenode_set[fname].keys()
1869 1869 else:
1870 1870 msng_filenode_lst = []
1871 1871 # If any filenodes are left, generate the group for them,
1872 1872 # otherwise don't bother.
1873 1873 if len(msng_filenode_lst) > 0:
1874 1874 yield changegroup.chunkheader(len(fname))
1875 1875 yield fname
1876 1876 # Sort the filenodes by their revision #
1877 1877 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1878 1878 # Create a group generator and only pass in a changenode
1879 1879 # lookup function as we need to collect no information
1880 1880 # from filenodes.
1881 1881 group = filerevlog.group(msng_filenode_lst,
1882 1882 lookup_filenode_link_func(fname))
1883 1883 for chnk in group:
1884 1884 yield chnk
1885 1885 if fname in msng_filenode_set:
1886 1886 # Don't need this anymore, toss it to free memory.
1887 1887 del msng_filenode_set[fname]
1888 1888 # Signal that no more groups are left.
1889 1889 yield changegroup.closechunk()
1890 1890
1891 1891 if msng_cl_lst:
1892 1892 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1893 1893
1894 1894 return util.chunkbuffer(gengroup())
1895 1895
1896 1896 def changegroup(self, basenodes, source):
1897 1897 # to avoid a race we use changegroupsubset() (issue1320)
1898 1898 return self.changegroupsubset(basenodes, self.heads(), source)
1899 1899
1900 1900 def _changegroup(self, common, source):
1901 1901 """Generate a changegroup of all nodes that we have that a recipient
1902 1902 doesn't.
1903 1903
1904 1904 This is much easier than the previous function as we can assume that
1905 1905 the recipient has any changenode we aren't sending them.
1906 1906
1907 1907 common is the set of common nodes between remote and self"""
1908 1908
1909 1909 self.hook('preoutgoing', throw=True, source=source)
1910 1910
1911 1911 cl = self.changelog
1912 1912 nodes = cl.findmissing(common)
1913 1913 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1914 1914 self.changegroupinfo(nodes, source)
1915 1915
1916 1916 def identity(x):
1917 1917 return x
1918 1918
1919 1919 def gennodelst(log):
1920 1920 for r in log:
1921 1921 if log.linkrev(r) in revset:
1922 1922 yield log.node(r)
1923 1923
1924 1924 def changed_file_collector(changedfileset):
1925 1925 def collect_changed_files(clnode):
1926 1926 c = cl.read(clnode)
1927 1927 for fname in c[3]:
1928 1928 changedfileset[fname] = 1
1929 1929 return collect_changed_files
1930 1930
1931 1931 def lookuprevlink_func(revlog):
1932 1932 def lookuprevlink(n):
1933 1933 return cl.node(revlog.linkrev(revlog.rev(n)))
1934 1934 return lookuprevlink
1935 1935
1936 1936 def gengroup():
1937 1937 # construct a list of all changed files
1938 1938 changedfiles = {}
1939 1939
1940 1940 for chnk in cl.group(nodes, identity,
1941 1941 changed_file_collector(changedfiles)):
1942 1942 yield chnk
1943 1943
1944 1944 mnfst = self.manifest
1945 1945 nodeiter = gennodelst(mnfst)
1946 1946 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1947 1947 yield chnk
1948 1948
1949 1949 for fname in util.sort(changedfiles):
1950 1950 filerevlog = self.file(fname)
1951 1951 if not len(filerevlog):
1952 1952 raise util.Abort(_("empty or missing revlog for %s") % fname)
1953 1953 nodeiter = gennodelst(filerevlog)
1954 1954 nodeiter = list(nodeiter)
1955 1955 if nodeiter:
1956 1956 yield changegroup.chunkheader(len(fname))
1957 1957 yield fname
1958 1958 lookup = lookuprevlink_func(filerevlog)
1959 1959 for chnk in filerevlog.group(nodeiter, lookup):
1960 1960 yield chnk
1961 1961
1962 1962 yield changegroup.closechunk()
1963 1963
1964 1964 if nodes:
1965 1965 self.hook('outgoing', node=hex(nodes[0]), source=source)
1966 1966
1967 1967 return util.chunkbuffer(gengroup())
1968 1968
1969 1969 def addchangegroup(self, source, srctype, url, emptyok=False):
1970 1970 """add changegroup to repo.
1971 1971
1972 1972 return values:
1973 1973 - nothing changed or no source: 0
1974 1974 - more heads than before: 1+added heads (2..n)
1975 1975 - less heads than before: -1-removed heads (-2..-n)
1976 1976 - number of heads stays the same: 1
1977 1977 """
1978 1978 def csmap(x):
1979 1979 self.ui.debug(_("add changeset %s\n") % short(x))
1980 1980 return len(cl)
1981 1981
1982 1982 def revmap(x):
1983 1983 return cl.rev(x)
1984 1984
1985 1985 if not source:
1986 1986 return 0
1987 1987
1988 1988 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1989 1989
1990 1990 changesets = files = revisions = 0
1991 1991
1992 1992 # write changelog data to temp files so concurrent readers will not see
1993 1993 # inconsistent view
1994 1994 cl = self.changelog
1995 1995 cl.delayupdate()
1996 1996 oldheads = len(cl.heads())
1997 1997
1998 1998 tr = self.transaction()
1999 1999 try:
2000 2000 trp = weakref.proxy(tr)
2001 2001 # pull off the changeset group
2002 2002 self.ui.status(_("adding changesets\n"))
2003 2003 cor = len(cl) - 1
2004 2004 chunkiter = changegroup.chunkiter(source)
2005 2005 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2006 2006 raise util.Abort(_("received changelog group is empty"))
2007 2007 cnr = len(cl) - 1
2008 2008 changesets = cnr - cor
2009 2009
2010 2010 # pull off the manifest group
2011 2011 self.ui.status(_("adding manifests\n"))
2012 2012 chunkiter = changegroup.chunkiter(source)
2013 2013 # no need to check for empty manifest group here:
2014 2014 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2015 2015 # no new manifest will be created and the manifest group will
2016 2016 # be empty during the pull
2017 2017 self.manifest.addgroup(chunkiter, revmap, trp)
2018 2018
2019 2019 # process the files
2020 2020 self.ui.status(_("adding file changes\n"))
2021 2021 while 1:
2022 2022 f = changegroup.getchunk(source)
2023 2023 if not f:
2024 2024 break
2025 2025 self.ui.debug(_("adding %s revisions\n") % f)
2026 2026 fl = self.file(f)
2027 2027 o = len(fl)
2028 2028 chunkiter = changegroup.chunkiter(source)
2029 2029 if fl.addgroup(chunkiter, revmap, trp) is None:
2030 2030 raise util.Abort(_("received file revlog group is empty"))
2031 2031 revisions += len(fl) - o
2032 2032 files += 1
2033 2033
2034 2034 # make changelog see real files again
2035 2035 cl.finalize(trp)
2036 2036
2037 2037 newheads = len(self.changelog.heads())
2038 2038 heads = ""
2039 2039 if oldheads and newheads != oldheads:
2040 2040 heads = _(" (%+d heads)") % (newheads - oldheads)
2041 2041
2042 2042 self.ui.status(_("added %d changesets"
2043 2043 " with %d changes to %d files%s\n")
2044 2044 % (changesets, revisions, files, heads))
2045 2045
2046 2046 if changesets > 0:
2047 2047 self.hook('pretxnchangegroup', throw=True,
2048 2048 node=hex(self.changelog.node(cor+1)), source=srctype,
2049 2049 url=url)
2050 2050
2051 2051 tr.close()
2052 2052 finally:
2053 2053 del tr
2054 2054
2055 2055 if changesets > 0:
2056 2056 # forcefully update the on-disk branch cache
2057 2057 self.ui.debug(_("updating the branch cache\n"))
2058 2058 self.branchtags()
2059 2059 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2060 2060 source=srctype, url=url)
2061 2061
2062 2062 for i in xrange(cor + 1, cnr + 1):
2063 2063 self.hook("incoming", node=hex(self.changelog.node(i)),
2064 2064 source=srctype, url=url)
2065 2065
2066 2066 # never return 0 here:
2067 2067 if newheads < oldheads:
2068 2068 return newheads - oldheads - 1
2069 2069 else:
2070 2070 return newheads - oldheads + 1
2071 2071
2072 2072
2073 2073 def stream_in(self, remote):
2074 2074 fp = remote.stream_out()
2075 2075 l = fp.readline()
2076 2076 try:
2077 2077 resp = int(l)
2078 2078 except ValueError:
2079 raise util.UnexpectedOutput(
2079 raise error.ResponseError(
2080 2080 _('Unexpected response from remote server:'), l)
2081 2081 if resp == 1:
2082 2082 raise util.Abort(_('operation forbidden by server'))
2083 2083 elif resp == 2:
2084 2084 raise util.Abort(_('locking the remote repository failed'))
2085 2085 elif resp != 0:
2086 2086 raise util.Abort(_('the server sent an unknown error code'))
2087 2087 self.ui.status(_('streaming all changes\n'))
2088 2088 l = fp.readline()
2089 2089 try:
2090 2090 total_files, total_bytes = map(int, l.split(' ', 1))
2091 2091 except (ValueError, TypeError):
2092 raise util.UnexpectedOutput(
2092 raise error.ResponseError(
2093 2093 _('Unexpected response from remote server:'), l)
2094 2094 self.ui.status(_('%d files to transfer, %s of data\n') %
2095 2095 (total_files, util.bytecount(total_bytes)))
2096 2096 start = time.time()
2097 2097 for i in xrange(total_files):
2098 2098 # XXX doesn't support '\n' or '\r' in filenames
2099 2099 l = fp.readline()
2100 2100 try:
2101 2101 name, size = l.split('\0', 1)
2102 2102 size = int(size)
2103 2103 except (ValueError, TypeError):
2104 raise util.UnexpectedOutput(
2104 raise error.ResponseError(
2105 2105 _('Unexpected response from remote server:'), l)
2106 2106 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2107 2107 ofp = self.sopener(name, 'w')
2108 2108 for chunk in util.filechunkiter(fp, limit=size):
2109 2109 ofp.write(chunk)
2110 2110 ofp.close()
2111 2111 elapsed = time.time() - start
2112 2112 if elapsed <= 0:
2113 2113 elapsed = 0.001
2114 2114 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2115 2115 (util.bytecount(total_bytes), elapsed,
2116 2116 util.bytecount(total_bytes / elapsed)))
2117 2117 self.invalidate()
2118 2118 return len(self.heads()) + 1
2119 2119
2120 2120 def clone(self, remote, heads=[], stream=False):
2121 2121 '''clone remote repository.
2122 2122
2123 2123 keyword arguments:
2124 2124 heads: list of revs to clone (forces use of pull)
2125 2125 stream: use streaming clone if possible'''
2126 2126
2127 2127 # now, all clients that can request uncompressed clones can
2128 2128 # read repo formats supported by all servers that can serve
2129 2129 # them.
2130 2130
2131 2131 # if revlog format changes, client will have to check version
2132 2132 # and format flags on "stream" capability, and use
2133 2133 # uncompressed only if compatible.
2134 2134
2135 2135 if stream and not heads and remote.capable('stream'):
2136 2136 return self.stream_in(remote)
2137 2137 return self.pull(remote, heads)
2138 2138
2139 2139 # used to avoid circular references so destructors work
2140 2140 def aftertrans(files):
2141 2141 renamefiles = [tuple(t) for t in files]
2142 2142 def a():
2143 2143 for src, dest in renamefiles:
2144 2144 util.rename(src, dest)
2145 2145 return a
2146 2146
2147 2147 def instance(ui, path, create):
2148 2148 return localrepository(ui, util.drop_scheme('file', path), create)
2149 2149
2150 2150 def islocal(path):
2151 2151 return True
@@ -1,247 +1,247
1 1 # sshrepo.py - ssh repository proxy class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import bin, hex
9 9 from i18n import _
10 10 import repo, os, re, util, error
11 11
12 12 class remotelock(object):
13 13 def __init__(self, repo):
14 14 self.repo = repo
15 15 def release(self):
16 16 self.repo.unlock()
17 17 self.repo = None
18 18 def __del__(self):
19 19 if self.repo:
20 20 self.release()
21 21
22 22 class sshrepository(repo.repository):
23 23 def __init__(self, ui, path, create=0):
24 24 self._url = path
25 25 self.ui = ui
26 26
27 27 m = re.match(r'^ssh://(([^@]+)@)?([^:/]+)(:(\d+))?(/(.*))?$', path)
28 28 if not m:
29 29 self.raise_(error.RepoError(_("couldn't parse location %s") % path))
30 30
31 31 self.user = m.group(2)
32 32 self.host = m.group(3)
33 33 self.port = m.group(5)
34 34 self.path = m.group(7) or "."
35 35
36 36 sshcmd = self.ui.config("ui", "ssh", "ssh")
37 37 remotecmd = self.ui.config("ui", "remotecmd", "hg")
38 38
39 39 args = util.sshargs(sshcmd, self.host, self.user, self.port)
40 40
41 41 if create:
42 42 cmd = '%s %s "%s init %s"'
43 43 cmd = cmd % (sshcmd, args, remotecmd, self.path)
44 44
45 45 ui.note(_('running %s\n') % cmd)
46 46 res = util.system(cmd)
47 47 if res != 0:
48 48 self.raise_(error.RepoError(_("could not create remote repo")))
49 49
50 50 self.validate_repo(ui, sshcmd, args, remotecmd)
51 51
52 52 def url(self):
53 53 return self._url
54 54
55 55 def validate_repo(self, ui, sshcmd, args, remotecmd):
56 56 # cleanup up previous run
57 57 self.cleanup()
58 58
59 59 cmd = '%s %s "%s -R %s serve --stdio"'
60 60 cmd = cmd % (sshcmd, args, remotecmd, self.path)
61 61
62 62 cmd = util.quotecommand(cmd)
63 63 ui.note(_('running %s\n') % cmd)
64 64 self.pipeo, self.pipei, self.pipee = util.popen3(cmd, 'b')
65 65
66 66 # skip any noise generated by remote shell
67 67 self.do_cmd("hello")
68 68 r = self.do_cmd("between", pairs=("%s-%s" % ("0"*40, "0"*40)))
69 69 lines = ["", "dummy"]
70 70 max_noise = 500
71 71 while lines[-1] and max_noise:
72 72 l = r.readline()
73 73 self.readerr()
74 74 if lines[-1] == "1\n" and l == "\n":
75 75 break
76 76 if l:
77 77 ui.debug(_("remote: "), l)
78 78 lines.append(l)
79 79 max_noise -= 1
80 80 else:
81 81 self.raise_(error.RepoError(_("no suitable response from remote hg")))
82 82
83 83 self.capabilities = util.set()
84 84 lines.reverse()
85 85 for l in lines:
86 86 if l.startswith("capabilities:"):
87 87 self.capabilities.update(l[:-1].split(":")[1].split())
88 88 break
89 89
90 90 def readerr(self):
91 91 while 1:
92 92 size = util.fstat(self.pipee).st_size
93 93 if size == 0: break
94 94 l = self.pipee.readline()
95 95 if not l: break
96 96 self.ui.status(_("remote: "), l)
97 97
98 98 def raise_(self, exception):
99 99 self.cleanup()
100 100 raise exception
101 101
102 102 def cleanup(self):
103 103 try:
104 104 self.pipeo.close()
105 105 self.pipei.close()
106 106 # read the error descriptor until EOF
107 107 for l in self.pipee:
108 108 self.ui.status(_("remote: "), l)
109 109 self.pipee.close()
110 110 except:
111 111 pass
112 112
113 113 __del__ = cleanup
114 114
115 115 def do_cmd(self, cmd, **args):
116 116 self.ui.debug(_("sending %s command\n") % cmd)
117 117 self.pipeo.write("%s\n" % cmd)
118 118 for k, v in args.iteritems():
119 119 self.pipeo.write("%s %d\n" % (k, len(v)))
120 120 self.pipeo.write(v)
121 121 self.pipeo.flush()
122 122
123 123 return self.pipei
124 124
125 125 def call(self, cmd, **args):
126 126 self.do_cmd(cmd, **args)
127 127 return self._recv()
128 128
129 129 def _recv(self):
130 130 l = self.pipei.readline()
131 131 self.readerr()
132 132 try:
133 133 l = int(l)
134 134 except:
135 self.raise_(util.UnexpectedOutput(_("unexpected response:"), l))
135 self.raise_(error.ResponseError(_("unexpected response:"), l))
136 136 return self.pipei.read(l)
137 137
138 138 def _send(self, data, flush=False):
139 139 self.pipeo.write("%d\n" % len(data))
140 140 if data:
141 141 self.pipeo.write(data)
142 142 if flush:
143 143 self.pipeo.flush()
144 144 self.readerr()
145 145
146 146 def lock(self):
147 147 self.call("lock")
148 148 return remotelock(self)
149 149
150 150 def unlock(self):
151 151 self.call("unlock")
152 152
153 153 def lookup(self, key):
154 154 self.requirecap('lookup', _('look up remote revision'))
155 155 d = self.call("lookup", key=key)
156 156 success, data = d[:-1].split(" ", 1)
157 157 if int(success):
158 158 return bin(data)
159 159 else:
160 160 self.raise_(error.RepoError(data))
161 161
162 162 def heads(self):
163 163 d = self.call("heads")
164 164 try:
165 165 return map(bin, d[:-1].split(" "))
166 166 except:
167 self.raise_(util.UnexpectedOutput(_("unexpected response:"), d))
167 self.raise_(error.ResponseError(_("unexpected response:"), d))
168 168
169 169 def branches(self, nodes):
170 170 n = " ".join(map(hex, nodes))
171 171 d = self.call("branches", nodes=n)
172 172 try:
173 173 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
174 174 return br
175 175 except:
176 self.raise_(util.UnexpectedOutput(_("unexpected response:"), d))
176 self.raise_(error.ResponseError(_("unexpected response:"), d))
177 177
178 178 def between(self, pairs):
179 179 n = " ".join(["-".join(map(hex, p)) for p in pairs])
180 180 d = self.call("between", pairs=n)
181 181 try:
182 182 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
183 183 return p
184 184 except:
185 self.raise_(util.UnexpectedOutput(_("unexpected response:"), d))
185 self.raise_(error.ResponseError(_("unexpected response:"), d))
186 186
187 187 def changegroup(self, nodes, kind):
188 188 n = " ".join(map(hex, nodes))
189 189 return self.do_cmd("changegroup", roots=n)
190 190
191 191 def changegroupsubset(self, bases, heads, kind):
192 192 self.requirecap('changegroupsubset', _('look up remote changes'))
193 193 bases = " ".join(map(hex, bases))
194 194 heads = " ".join(map(hex, heads))
195 195 return self.do_cmd("changegroupsubset", bases=bases, heads=heads)
196 196
197 197 def unbundle(self, cg, heads, source):
198 198 d = self.call("unbundle", heads=' '.join(map(hex, heads)))
199 199 if d:
200 200 # remote may send "unsynced changes"
201 201 self.raise_(error.RepoError(_("push refused: %s") % d))
202 202
203 203 while 1:
204 204 d = cg.read(4096)
205 205 if not d:
206 206 break
207 207 self._send(d)
208 208
209 209 self._send("", flush=True)
210 210
211 211 r = self._recv()
212 212 if r:
213 213 # remote may send "unsynced changes"
214 214 self.raise_(error.RepoError(_("push failed: %s") % r))
215 215
216 216 r = self._recv()
217 217 try:
218 218 return int(r)
219 219 except:
220 self.raise_(util.UnexpectedOutput(_("unexpected response:"), r))
220 self.raise_(error.ResponseError(_("unexpected response:"), r))
221 221
222 222 def addchangegroup(self, cg, source, url):
223 223 d = self.call("addchangegroup")
224 224 if d:
225 225 self.raise_(error.RepoError(_("push refused: %s") % d))
226 226 while 1:
227 227 d = cg.read(4096)
228 228 if not d:
229 229 break
230 230 self.pipeo.write(d)
231 231 self.readerr()
232 232
233 233 self.pipeo.flush()
234 234
235 235 self.readerr()
236 236 r = self._recv()
237 237 if not r:
238 238 return 1
239 239 try:
240 240 return int(r)
241 241 except:
242 self.raise_(util.UnexpectedOutput(_("unexpected response:"), r))
242 self.raise_(error.ResponseError(_("unexpected response:"), r))
243 243
244 244 def stream_out(self):
245 245 return self.do_cmd('stream_out')
246 246
247 247 instance = sshrepository
@@ -1,2022 +1,2019
1 1 """
2 2 util.py - Mercurial utility functions and platform specfic implementations
3 3
4 4 Copyright 2005 K. Thananchayan <thananck@yahoo.com>
5 5 Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
6 6 Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
7 7
8 8 This software may be used and distributed according to the terms
9 9 of the GNU General Public License, incorporated herein by reference.
10 10
11 11 This contains helper routines that are independent of the SCM core and hide
12 12 platform-specific details from the core.
13 13 """
14 14
15 15 from i18n import _
16 16 import cStringIO, errno, getpass, re, shutil, sys, tempfile, traceback
17 17 import os, stat, threading, time, calendar, ConfigParser, locale, glob, osutil
18 18 import imp
19 19
20 20 # Python compatibility
21 21
22 22 try:
23 23 set = set
24 24 frozenset = frozenset
25 25 except NameError:
26 26 from sets import Set as set, ImmutableSet as frozenset
27 27
28 28 _md5 = None
29 29 def md5(s):
30 30 global _md5
31 31 if _md5 is None:
32 32 try:
33 33 import hashlib
34 34 _md5 = hashlib.md5
35 35 except ImportError:
36 36 import md5
37 37 _md5 = md5.md5
38 38 return _md5(s)
39 39
40 40 _sha1 = None
41 41 def sha1(s):
42 42 global _sha1
43 43 if _sha1 is None:
44 44 try:
45 45 import hashlib
46 46 _sha1 = hashlib.sha1
47 47 except ImportError:
48 48 import sha
49 49 _sha1 = sha.sha
50 50 return _sha1(s)
51 51
52 52 try:
53 53 import subprocess
54 54 subprocess.Popen # trigger ImportError early
55 55 closefds = os.name == 'posix'
56 56 def popen2(cmd, mode='t', bufsize=-1):
57 57 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
58 58 close_fds=closefds,
59 59 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
60 60 return p.stdin, p.stdout
61 61 def popen3(cmd, mode='t', bufsize=-1):
62 62 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
63 63 close_fds=closefds,
64 64 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
65 65 stderr=subprocess.PIPE)
66 66 return p.stdin, p.stdout, p.stderr
67 67 def Popen3(cmd, capturestderr=False, bufsize=-1):
68 68 stderr = capturestderr and subprocess.PIPE or None
69 69 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
70 70 close_fds=closefds,
71 71 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
72 72 stderr=stderr)
73 73 p.fromchild = p.stdout
74 74 p.tochild = p.stdin
75 75 p.childerr = p.stderr
76 76 return p
77 77 except ImportError:
78 78 subprocess = None
79 79 from popen2 import Popen3
80 80 popen2 = os.popen2
81 81 popen3 = os.popen3
82 82
83 83
84 84 _encodingfixup = {'646': 'ascii', 'ANSI_X3.4-1968': 'ascii'}
85 85
86 86 try:
87 87 _encoding = os.environ.get("HGENCODING")
88 88 if sys.platform == 'darwin' and not _encoding:
89 89 # On darwin, getpreferredencoding ignores the locale environment and
90 90 # always returns mac-roman. We override this if the environment is
91 91 # not C (has been customized by the user).
92 92 locale.setlocale(locale.LC_CTYPE, '')
93 93 _encoding = locale.getlocale()[1]
94 94 if not _encoding:
95 95 _encoding = locale.getpreferredencoding() or 'ascii'
96 96 _encoding = _encodingfixup.get(_encoding, _encoding)
97 97 except locale.Error:
98 98 _encoding = 'ascii'
99 99 _encodingmode = os.environ.get("HGENCODINGMODE", "strict")
100 100 _fallbackencoding = 'ISO-8859-1'
101 101
102 102 def tolocal(s):
103 103 """
104 104 Convert a string from internal UTF-8 to local encoding
105 105
106 106 All internal strings should be UTF-8 but some repos before the
107 107 implementation of locale support may contain latin1 or possibly
108 108 other character sets. We attempt to decode everything strictly
109 109 using UTF-8, then Latin-1, and failing that, we use UTF-8 and
110 110 replace unknown characters.
111 111 """
112 112 for e in ('UTF-8', _fallbackencoding):
113 113 try:
114 114 u = s.decode(e) # attempt strict decoding
115 115 return u.encode(_encoding, "replace")
116 116 except LookupError, k:
117 117 raise Abort(_("%s, please check your locale settings") % k)
118 118 except UnicodeDecodeError:
119 119 pass
120 120 u = s.decode("utf-8", "replace") # last ditch
121 121 return u.encode(_encoding, "replace")
122 122
123 123 def fromlocal(s):
124 124 """
125 125 Convert a string from the local character encoding to UTF-8
126 126
127 127 We attempt to decode strings using the encoding mode set by
128 128 HGENCODINGMODE, which defaults to 'strict'. In this mode, unknown
129 129 characters will cause an error message. Other modes include
130 130 'replace', which replaces unknown characters with a special
131 131 Unicode character, and 'ignore', which drops the character.
132 132 """
133 133 try:
134 134 return s.decode(_encoding, _encodingmode).encode("utf-8")
135 135 except UnicodeDecodeError, inst:
136 136 sub = s[max(0, inst.start-10):inst.start+10]
137 137 raise Abort("decoding near '%s': %s!" % (sub, inst))
138 138 except LookupError, k:
139 139 raise Abort(_("%s, please check your locale settings") % k)
140 140
141 141 def locallen(s):
142 142 """Find the length in characters of a local string"""
143 143 return len(s.decode(_encoding, "replace"))
144 144
145 145 def version():
146 146 """Return version information if available."""
147 147 try:
148 148 import __version__
149 149 return __version__.version
150 150 except ImportError:
151 151 return 'unknown'
152 152
153 153 # used by parsedate
154 154 defaultdateformats = (
155 155 '%Y-%m-%d %H:%M:%S',
156 156 '%Y-%m-%d %I:%M:%S%p',
157 157 '%Y-%m-%d %H:%M',
158 158 '%Y-%m-%d %I:%M%p',
159 159 '%Y-%m-%d',
160 160 '%m-%d',
161 161 '%m/%d',
162 162 '%m/%d/%y',
163 163 '%m/%d/%Y',
164 164 '%a %b %d %H:%M:%S %Y',
165 165 '%a %b %d %I:%M:%S%p %Y',
166 166 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
167 167 '%b %d %H:%M:%S %Y',
168 168 '%b %d %I:%M:%S%p %Y',
169 169 '%b %d %H:%M:%S',
170 170 '%b %d %I:%M:%S%p',
171 171 '%b %d %H:%M',
172 172 '%b %d %I:%M%p',
173 173 '%b %d %Y',
174 174 '%b %d',
175 175 '%H:%M:%S',
176 176 '%I:%M:%SP',
177 177 '%H:%M',
178 178 '%I:%M%p',
179 179 )
180 180
181 181 extendeddateformats = defaultdateformats + (
182 182 "%Y",
183 183 "%Y-%m",
184 184 "%b",
185 185 "%b %Y",
186 186 )
187 187
188 188 class SignalInterrupt(Exception):
189 189 """Exception raised on SIGTERM and SIGHUP."""
190 190
191 191 # differences from SafeConfigParser:
192 192 # - case-sensitive keys
193 193 # - allows values that are not strings (this means that you may not
194 194 # be able to save the configuration to a file)
195 195 class configparser(ConfigParser.SafeConfigParser):
196 196 def optionxform(self, optionstr):
197 197 return optionstr
198 198
199 199 def set(self, section, option, value):
200 200 return ConfigParser.ConfigParser.set(self, section, option, value)
201 201
202 202 def _interpolate(self, section, option, rawval, vars):
203 203 if not isinstance(rawval, basestring):
204 204 return rawval
205 205 return ConfigParser.SafeConfigParser._interpolate(self, section,
206 206 option, rawval, vars)
207 207
208 208 def cachefunc(func):
209 209 '''cache the result of function calls'''
210 210 # XXX doesn't handle keywords args
211 211 cache = {}
212 212 if func.func_code.co_argcount == 1:
213 213 # we gain a small amount of time because
214 214 # we don't need to pack/unpack the list
215 215 def f(arg):
216 216 if arg not in cache:
217 217 cache[arg] = func(arg)
218 218 return cache[arg]
219 219 else:
220 220 def f(*args):
221 221 if args not in cache:
222 222 cache[args] = func(*args)
223 223 return cache[args]
224 224
225 225 return f
226 226
227 227 def pipefilter(s, cmd):
228 228 '''filter string S through command CMD, returning its output'''
229 229 (pin, pout) = popen2(cmd, 'b')
230 230 def writer():
231 231 try:
232 232 pin.write(s)
233 233 pin.close()
234 234 except IOError, inst:
235 235 if inst.errno != errno.EPIPE:
236 236 raise
237 237
238 238 # we should use select instead on UNIX, but this will work on most
239 239 # systems, including Windows
240 240 w = threading.Thread(target=writer)
241 241 w.start()
242 242 f = pout.read()
243 243 pout.close()
244 244 w.join()
245 245 return f
246 246
247 247 def tempfilter(s, cmd):
248 248 '''filter string S through a pair of temporary files with CMD.
249 249 CMD is used as a template to create the real command to be run,
250 250 with the strings INFILE and OUTFILE replaced by the real names of
251 251 the temporary files generated.'''
252 252 inname, outname = None, None
253 253 try:
254 254 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
255 255 fp = os.fdopen(infd, 'wb')
256 256 fp.write(s)
257 257 fp.close()
258 258 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
259 259 os.close(outfd)
260 260 cmd = cmd.replace('INFILE', inname)
261 261 cmd = cmd.replace('OUTFILE', outname)
262 262 code = os.system(cmd)
263 263 if sys.platform == 'OpenVMS' and code & 1:
264 264 code = 0
265 265 if code: raise Abort(_("command '%s' failed: %s") %
266 266 (cmd, explain_exit(code)))
267 267 return open(outname, 'rb').read()
268 268 finally:
269 269 try:
270 270 if inname: os.unlink(inname)
271 271 except: pass
272 272 try:
273 273 if outname: os.unlink(outname)
274 274 except: pass
275 275
276 276 filtertable = {
277 277 'tempfile:': tempfilter,
278 278 'pipe:': pipefilter,
279 279 }
280 280
281 281 def filter(s, cmd):
282 282 "filter a string through a command that transforms its input to its output"
283 283 for name, fn in filtertable.iteritems():
284 284 if cmd.startswith(name):
285 285 return fn(s, cmd[len(name):].lstrip())
286 286 return pipefilter(s, cmd)
287 287
288 288 def binary(s):
289 289 """return true if a string is binary data"""
290 290 if s and '\0' in s:
291 291 return True
292 292 return False
293 293
294 294 def unique(g):
295 295 """return the uniq elements of iterable g"""
296 296 return dict.fromkeys(g).keys()
297 297
298 298 def sort(l):
299 299 if not isinstance(l, list):
300 300 l = list(l)
301 301 l.sort()
302 302 return l
303 303
304 304 def increasingchunks(source, min=1024, max=65536):
305 305 '''return no less than min bytes per chunk while data remains,
306 306 doubling min after each chunk until it reaches max'''
307 307 def log2(x):
308 308 if not x:
309 309 return 0
310 310 i = 0
311 311 while x:
312 312 x >>= 1
313 313 i += 1
314 314 return i - 1
315 315
316 316 buf = []
317 317 blen = 0
318 318 for chunk in source:
319 319 buf.append(chunk)
320 320 blen += len(chunk)
321 321 if blen >= min:
322 322 if min < max:
323 323 min = min << 1
324 324 nmin = 1 << log2(blen)
325 325 if nmin > min:
326 326 min = nmin
327 327 if min > max:
328 328 min = max
329 329 yield ''.join(buf)
330 330 blen = 0
331 331 buf = []
332 332 if buf:
333 333 yield ''.join(buf)
334 334
335 335 class Abort(Exception):
336 336 """Raised if a command needs to print an error and exit."""
337 337
338 class UnexpectedOutput(Abort):
339 """Raised to print an error with part of output and exit."""
340
341 338 def always(fn): return True
342 339 def never(fn): return False
343 340
344 341 def expand_glob(pats):
345 342 '''On Windows, expand the implicit globs in a list of patterns'''
346 343 if os.name != 'nt':
347 344 return list(pats)
348 345 ret = []
349 346 for p in pats:
350 347 kind, name = patkind(p, None)
351 348 if kind is None:
352 349 globbed = glob.glob(name)
353 350 if globbed:
354 351 ret.extend(globbed)
355 352 continue
356 353 # if we couldn't expand the glob, just keep it around
357 354 ret.append(p)
358 355 return ret
359 356
360 357 def patkind(name, default):
361 358 """Split a string into an optional pattern kind prefix and the
362 359 actual pattern."""
363 360 for prefix in 're', 'glob', 'path', 'relglob', 'relpath', 'relre':
364 361 if name.startswith(prefix + ':'): return name.split(':', 1)
365 362 return default, name
366 363
367 364 def globre(pat, head='^', tail='$'):
368 365 "convert a glob pattern into a regexp"
369 366 i, n = 0, len(pat)
370 367 res = ''
371 368 group = 0
372 369 def peek(): return i < n and pat[i]
373 370 while i < n:
374 371 c = pat[i]
375 372 i = i+1
376 373 if c == '*':
377 374 if peek() == '*':
378 375 i += 1
379 376 res += '.*'
380 377 else:
381 378 res += '[^/]*'
382 379 elif c == '?':
383 380 res += '.'
384 381 elif c == '[':
385 382 j = i
386 383 if j < n and pat[j] in '!]':
387 384 j += 1
388 385 while j < n and pat[j] != ']':
389 386 j += 1
390 387 if j >= n:
391 388 res += '\\['
392 389 else:
393 390 stuff = pat[i:j].replace('\\','\\\\')
394 391 i = j + 1
395 392 if stuff[0] == '!':
396 393 stuff = '^' + stuff[1:]
397 394 elif stuff[0] == '^':
398 395 stuff = '\\' + stuff
399 396 res = '%s[%s]' % (res, stuff)
400 397 elif c == '{':
401 398 group += 1
402 399 res += '(?:'
403 400 elif c == '}' and group:
404 401 res += ')'
405 402 group -= 1
406 403 elif c == ',' and group:
407 404 res += '|'
408 405 elif c == '\\':
409 406 p = peek()
410 407 if p:
411 408 i += 1
412 409 res += re.escape(p)
413 410 else:
414 411 res += re.escape(c)
415 412 else:
416 413 res += re.escape(c)
417 414 return head + res + tail
418 415
419 416 _globchars = {'[': 1, '{': 1, '*': 1, '?': 1}
420 417
421 418 def pathto(root, n1, n2):
422 419 '''return the relative path from one place to another.
423 420 root should use os.sep to separate directories
424 421 n1 should use os.sep to separate directories
425 422 n2 should use "/" to separate directories
426 423 returns an os.sep-separated path.
427 424
428 425 If n1 is a relative path, it's assumed it's
429 426 relative to root.
430 427 n2 should always be relative to root.
431 428 '''
432 429 if not n1: return localpath(n2)
433 430 if os.path.isabs(n1):
434 431 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
435 432 return os.path.join(root, localpath(n2))
436 433 n2 = '/'.join((pconvert(root), n2))
437 434 a, b = splitpath(n1), n2.split('/')
438 435 a.reverse()
439 436 b.reverse()
440 437 while a and b and a[-1] == b[-1]:
441 438 a.pop()
442 439 b.pop()
443 440 b.reverse()
444 441 return os.sep.join((['..'] * len(a)) + b) or '.'
445 442
446 443 def canonpath(root, cwd, myname):
447 444 """return the canonical path of myname, given cwd and root"""
448 445 if root == os.sep:
449 446 rootsep = os.sep
450 447 elif endswithsep(root):
451 448 rootsep = root
452 449 else:
453 450 rootsep = root + os.sep
454 451 name = myname
455 452 if not os.path.isabs(name):
456 453 name = os.path.join(root, cwd, name)
457 454 name = os.path.normpath(name)
458 455 audit_path = path_auditor(root)
459 456 if name != rootsep and name.startswith(rootsep):
460 457 name = name[len(rootsep):]
461 458 audit_path(name)
462 459 return pconvert(name)
463 460 elif name == root:
464 461 return ''
465 462 else:
466 463 # Determine whether `name' is in the hierarchy at or beneath `root',
467 464 # by iterating name=dirname(name) until that causes no change (can't
468 465 # check name == '/', because that doesn't work on windows). For each
469 466 # `name', compare dev/inode numbers. If they match, the list `rel'
470 467 # holds the reversed list of components making up the relative file
471 468 # name we want.
472 469 root_st = os.stat(root)
473 470 rel = []
474 471 while True:
475 472 try:
476 473 name_st = os.stat(name)
477 474 except OSError:
478 475 break
479 476 if samestat(name_st, root_st):
480 477 if not rel:
481 478 # name was actually the same as root (maybe a symlink)
482 479 return ''
483 480 rel.reverse()
484 481 name = os.path.join(*rel)
485 482 audit_path(name)
486 483 return pconvert(name)
487 484 dirname, basename = os.path.split(name)
488 485 rel.append(basename)
489 486 if dirname == name:
490 487 break
491 488 name = dirname
492 489
493 490 raise Abort('%s not under root' % myname)
494 491
495 492 def matcher(canonroot, cwd='', names=[], inc=[], exc=[], src=None, dflt_pat='glob'):
496 493 """build a function to match a set of file patterns
497 494
498 495 arguments:
499 496 canonroot - the canonical root of the tree you're matching against
500 497 cwd - the current working directory, if relevant
501 498 names - patterns to find
502 499 inc - patterns to include
503 500 exc - patterns to exclude
504 501 dflt_pat - if a pattern in names has no explicit type, assume this one
505 502 src - where these patterns came from (e.g. .hgignore)
506 503
507 504 a pattern is one of:
508 505 'glob:<glob>' - a glob relative to cwd
509 506 're:<regexp>' - a regular expression
510 507 'path:<path>' - a path relative to canonroot
511 508 'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs)
512 509 'relpath:<path>' - a path relative to cwd
513 510 'relre:<regexp>' - a regexp that doesn't have to match the start of a name
514 511 '<something>' - one of the cases above, selected by the dflt_pat argument
515 512
516 513 returns:
517 514 a 3-tuple containing
518 515 - list of roots (places where one should start a recursive walk of the fs);
519 516 this often matches the explicit non-pattern names passed in, but also
520 517 includes the initial part of glob: patterns that has no glob characters
521 518 - a bool match(filename) function
522 519 - a bool indicating if any patterns were passed in
523 520 """
524 521
525 522 # a common case: no patterns at all
526 523 if not names and not inc and not exc:
527 524 return [], always, False
528 525
529 526 def contains_glob(name):
530 527 for c in name:
531 528 if c in _globchars: return True
532 529 return False
533 530
534 531 def regex(kind, name, tail):
535 532 '''convert a pattern into a regular expression'''
536 533 if not name:
537 534 return ''
538 535 if kind == 're':
539 536 return name
540 537 elif kind == 'path':
541 538 return '^' + re.escape(name) + '(?:/|$)'
542 539 elif kind == 'relglob':
543 540 return globre(name, '(?:|.*/)', tail)
544 541 elif kind == 'relpath':
545 542 return re.escape(name) + '(?:/|$)'
546 543 elif kind == 'relre':
547 544 if name.startswith('^'):
548 545 return name
549 546 return '.*' + name
550 547 return globre(name, '', tail)
551 548
552 549 def matchfn(pats, tail):
553 550 """build a matching function from a set of patterns"""
554 551 if not pats:
555 552 return
556 553 try:
557 554 pat = '(?:%s)' % '|'.join([regex(k, p, tail) for (k, p) in pats])
558 555 if len(pat) > 20000:
559 556 raise OverflowError()
560 557 return re.compile(pat).match
561 558 except OverflowError:
562 559 # We're using a Python with a tiny regex engine and we
563 560 # made it explode, so we'll divide the pattern list in two
564 561 # until it works
565 562 l = len(pats)
566 563 if l < 2:
567 564 raise
568 565 a, b = matchfn(pats[:l//2], tail), matchfn(pats[l//2:], tail)
569 566 return lambda s: a(s) or b(s)
570 567 except re.error:
571 568 for k, p in pats:
572 569 try:
573 570 re.compile('(?:%s)' % regex(k, p, tail))
574 571 except re.error:
575 572 if src:
576 573 raise Abort("%s: invalid pattern (%s): %s" %
577 574 (src, k, p))
578 575 else:
579 576 raise Abort("invalid pattern (%s): %s" % (k, p))
580 577 raise Abort("invalid pattern")
581 578
582 579 def globprefix(pat):
583 580 '''return the non-glob prefix of a path, e.g. foo/* -> foo'''
584 581 root = []
585 582 for p in pat.split('/'):
586 583 if contains_glob(p): break
587 584 root.append(p)
588 585 return '/'.join(root) or '.'
589 586
590 587 def normalizepats(names, default):
591 588 pats = []
592 589 roots = []
593 590 anypats = False
594 591 for kind, name in [patkind(p, default) for p in names]:
595 592 if kind in ('glob', 'relpath'):
596 593 name = canonpath(canonroot, cwd, name)
597 594 elif kind in ('relglob', 'path'):
598 595 name = normpath(name)
599 596
600 597 pats.append((kind, name))
601 598
602 599 if kind in ('glob', 're', 'relglob', 'relre'):
603 600 anypats = True
604 601
605 602 if kind == 'glob':
606 603 root = globprefix(name)
607 604 roots.append(root)
608 605 elif kind in ('relpath', 'path'):
609 606 roots.append(name or '.')
610 607 elif kind == 'relglob':
611 608 roots.append('.')
612 609 return roots, pats, anypats
613 610
614 611 roots, pats, anypats = normalizepats(names, dflt_pat)
615 612
616 613 patmatch = matchfn(pats, '$') or always
617 614 incmatch = always
618 615 if inc:
619 616 dummy, inckinds, dummy = normalizepats(inc, 'glob')
620 617 incmatch = matchfn(inckinds, '(?:/|$)')
621 618 excmatch = never
622 619 if exc:
623 620 dummy, exckinds, dummy = normalizepats(exc, 'glob')
624 621 excmatch = matchfn(exckinds, '(?:/|$)')
625 622
626 623 if not names and inc and not exc:
627 624 # common case: hgignore patterns
628 625 match = incmatch
629 626 else:
630 627 match = lambda fn: incmatch(fn) and not excmatch(fn) and patmatch(fn)
631 628
632 629 return (roots, match, (inc or exc or anypats) and True)
633 630
634 631 _hgexecutable = None
635 632
636 633 def main_is_frozen():
637 634 """return True if we are a frozen executable.
638 635
639 636 The code supports py2exe (most common, Windows only) and tools/freeze
640 637 (portable, not much used).
641 638 """
642 639 return (hasattr(sys, "frozen") or # new py2exe
643 640 hasattr(sys, "importers") or # old py2exe
644 641 imp.is_frozen("__main__")) # tools/freeze
645 642
646 643 def hgexecutable():
647 644 """return location of the 'hg' executable.
648 645
649 646 Defaults to $HG or 'hg' in the search path.
650 647 """
651 648 if _hgexecutable is None:
652 649 hg = os.environ.get('HG')
653 650 if hg:
654 651 set_hgexecutable(hg)
655 652 elif main_is_frozen():
656 653 set_hgexecutable(sys.executable)
657 654 else:
658 655 set_hgexecutable(find_exe('hg', 'hg'))
659 656 return _hgexecutable
660 657
661 658 def set_hgexecutable(path):
662 659 """set location of the 'hg' executable"""
663 660 global _hgexecutable
664 661 _hgexecutable = path
665 662
666 663 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None):
667 664 '''enhanced shell command execution.
668 665 run with environment maybe modified, maybe in different dir.
669 666
670 667 if command fails and onerr is None, return status. if ui object,
671 668 print error message and return status, else raise onerr object as
672 669 exception.'''
673 670 def py2shell(val):
674 671 'convert python object into string that is useful to shell'
675 672 if val in (None, False):
676 673 return '0'
677 674 if val == True:
678 675 return '1'
679 676 return str(val)
680 677 oldenv = {}
681 678 for k in environ:
682 679 oldenv[k] = os.environ.get(k)
683 680 if cwd is not None:
684 681 oldcwd = os.getcwd()
685 682 origcmd = cmd
686 683 if os.name == 'nt':
687 684 cmd = '"%s"' % cmd
688 685 try:
689 686 for k, v in environ.iteritems():
690 687 os.environ[k] = py2shell(v)
691 688 os.environ['HG'] = hgexecutable()
692 689 if cwd is not None and oldcwd != cwd:
693 690 os.chdir(cwd)
694 691 rc = os.system(cmd)
695 692 if sys.platform == 'OpenVMS' and rc & 1:
696 693 rc = 0
697 694 if rc and onerr:
698 695 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
699 696 explain_exit(rc)[0])
700 697 if errprefix:
701 698 errmsg = '%s: %s' % (errprefix, errmsg)
702 699 try:
703 700 onerr.warn(errmsg + '\n')
704 701 except AttributeError:
705 702 raise onerr(errmsg)
706 703 return rc
707 704 finally:
708 705 for k, v in oldenv.iteritems():
709 706 if v is None:
710 707 del os.environ[k]
711 708 else:
712 709 os.environ[k] = v
713 710 if cwd is not None and oldcwd != cwd:
714 711 os.chdir(oldcwd)
715 712
716 713 class SignatureError(Exception):
717 714 pass
718 715
719 716 def checksignature(func):
720 717 '''wrap a function with code to check for calling errors'''
721 718 def check(*args, **kwargs):
722 719 try:
723 720 return func(*args, **kwargs)
724 721 except TypeError:
725 722 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
726 723 raise SignatureError
727 724 raise
728 725
729 726 return check
730 727
731 728 # os.path.lexists is not available on python2.3
732 729 def lexists(filename):
733 730 "test whether a file with this name exists. does not follow symlinks"
734 731 try:
735 732 os.lstat(filename)
736 733 except:
737 734 return False
738 735 return True
739 736
740 737 def rename(src, dst):
741 738 """forcibly rename a file"""
742 739 try:
743 740 os.rename(src, dst)
744 741 except OSError, err: # FIXME: check err (EEXIST ?)
745 742 # on windows, rename to existing file is not allowed, so we
746 743 # must delete destination first. but if file is open, unlink
747 744 # schedules it for delete but does not delete it. rename
748 745 # happens immediately even for open files, so we create
749 746 # temporary file, delete it, rename destination to that name,
750 747 # then delete that. then rename is safe to do.
751 748 fd, temp = tempfile.mkstemp(dir=os.path.dirname(dst) or '.')
752 749 os.close(fd)
753 750 os.unlink(temp)
754 751 os.rename(dst, temp)
755 752 os.unlink(temp)
756 753 os.rename(src, dst)
757 754
758 755 def unlink(f):
759 756 """unlink and remove the directory if it is empty"""
760 757 os.unlink(f)
761 758 # try removing directories that might now be empty
762 759 try:
763 760 os.removedirs(os.path.dirname(f))
764 761 except OSError:
765 762 pass
766 763
767 764 def copyfile(src, dest):
768 765 "copy a file, preserving mode"
769 766 if os.path.islink(src):
770 767 try:
771 768 os.unlink(dest)
772 769 except:
773 770 pass
774 771 os.symlink(os.readlink(src), dest)
775 772 else:
776 773 try:
777 774 shutil.copyfile(src, dest)
778 775 shutil.copymode(src, dest)
779 776 except shutil.Error, inst:
780 777 raise Abort(str(inst))
781 778
782 779 def copyfiles(src, dst, hardlink=None):
783 780 """Copy a directory tree using hardlinks if possible"""
784 781
785 782 if hardlink is None:
786 783 hardlink = (os.stat(src).st_dev ==
787 784 os.stat(os.path.dirname(dst)).st_dev)
788 785
789 786 if os.path.isdir(src):
790 787 os.mkdir(dst)
791 788 for name, kind in osutil.listdir(src):
792 789 srcname = os.path.join(src, name)
793 790 dstname = os.path.join(dst, name)
794 791 copyfiles(srcname, dstname, hardlink)
795 792 else:
796 793 if hardlink:
797 794 try:
798 795 os_link(src, dst)
799 796 except (IOError, OSError):
800 797 hardlink = False
801 798 shutil.copy(src, dst)
802 799 else:
803 800 shutil.copy(src, dst)
804 801
805 802 class path_auditor(object):
806 803 '''ensure that a filesystem path contains no banned components.
807 804 the following properties of a path are checked:
808 805
809 806 - under top-level .hg
810 807 - starts at the root of a windows drive
811 808 - contains ".."
812 809 - traverses a symlink (e.g. a/symlink_here/b)
813 810 - inside a nested repository'''
814 811
815 812 def __init__(self, root):
816 813 self.audited = set()
817 814 self.auditeddir = set()
818 815 self.root = root
819 816
820 817 def __call__(self, path):
821 818 if path in self.audited:
822 819 return
823 820 normpath = os.path.normcase(path)
824 821 parts = splitpath(normpath)
825 822 if (os.path.splitdrive(path)[0] or parts[0] in ('.hg', '.hg.', '')
826 823 or os.pardir in parts):
827 824 raise Abort(_("path contains illegal component: %s") % path)
828 825 if '.hg' in path:
829 826 for p in '.hg', '.hg.':
830 827 if p in parts[1:-1]:
831 828 pos = parts.index(p)
832 829 base = os.path.join(*parts[:pos])
833 830 raise Abort(_('path %r is inside repo %r') % (path, base))
834 831 def check(prefix):
835 832 curpath = os.path.join(self.root, prefix)
836 833 try:
837 834 st = os.lstat(curpath)
838 835 except OSError, err:
839 836 # EINVAL can be raised as invalid path syntax under win32.
840 837 # They must be ignored for patterns can be checked too.
841 838 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
842 839 raise
843 840 else:
844 841 if stat.S_ISLNK(st.st_mode):
845 842 raise Abort(_('path %r traverses symbolic link %r') %
846 843 (path, prefix))
847 844 elif (stat.S_ISDIR(st.st_mode) and
848 845 os.path.isdir(os.path.join(curpath, '.hg'))):
849 846 raise Abort(_('path %r is inside repo %r') %
850 847 (path, prefix))
851 848 parts.pop()
852 849 prefixes = []
853 850 for n in range(len(parts)):
854 851 prefix = os.sep.join(parts)
855 852 if prefix in self.auditeddir:
856 853 break
857 854 check(prefix)
858 855 prefixes.append(prefix)
859 856 parts.pop()
860 857
861 858 self.audited.add(path)
862 859 # only add prefixes to the cache after checking everything: we don't
863 860 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
864 861 self.auditeddir.update(prefixes)
865 862
866 863 def _makelock_file(info, pathname):
867 864 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
868 865 os.write(ld, info)
869 866 os.close(ld)
870 867
871 868 def _readlock_file(pathname):
872 869 return posixfile(pathname).read()
873 870
874 871 def nlinks(pathname):
875 872 """Return number of hardlinks for the given file."""
876 873 return os.lstat(pathname).st_nlink
877 874
878 875 if hasattr(os, 'link'):
879 876 os_link = os.link
880 877 else:
881 878 def os_link(src, dst):
882 879 raise OSError(0, _("Hardlinks not supported"))
883 880
884 881 def fstat(fp):
885 882 '''stat file object that may not have fileno method.'''
886 883 try:
887 884 return os.fstat(fp.fileno())
888 885 except AttributeError:
889 886 return os.stat(fp.name)
890 887
891 888 posixfile = file
892 889
893 890 def openhardlinks():
894 891 '''return true if it is safe to hold open file handles to hardlinks'''
895 892 return True
896 893
897 894 def _statfiles(files):
898 895 'Stat each file in files and yield stat or None if file does not exist.'
899 896 lstat = os.lstat
900 897 for nf in files:
901 898 try:
902 899 st = lstat(nf)
903 900 except OSError, err:
904 901 if err.errno not in (errno.ENOENT, errno.ENOTDIR):
905 902 raise
906 903 st = None
907 904 yield st
908 905
909 906 def _statfiles_clustered(files):
910 907 '''Stat each file in files and yield stat or None if file does not exist.
911 908 Cluster and cache stat per directory to minimize number of OS stat calls.'''
912 909 lstat = os.lstat
913 910 ncase = os.path.normcase
914 911 sep = os.sep
915 912 dircache = {} # dirname -> filename -> status | None if file does not exist
916 913 for nf in files:
917 914 nf = ncase(nf)
918 915 pos = nf.rfind(sep)
919 916 if pos == -1:
920 917 dir, base = '.', nf
921 918 else:
922 919 dir, base = nf[:pos+1], nf[pos+1:]
923 920 cache = dircache.get(dir, None)
924 921 if cache is None:
925 922 try:
926 923 dmap = dict([(ncase(n), s)
927 924 for n, k, s in osutil.listdir(dir, True)])
928 925 except OSError, err:
929 926 # handle directory not found in Python version prior to 2.5
930 927 # Python <= 2.4 returns native Windows code 3 in errno
931 928 # Python >= 2.5 returns ENOENT and adds winerror field
932 929 # EINVAL is raised if dir is not a directory.
933 930 if err.errno not in (3, errno.ENOENT, errno.EINVAL,
934 931 errno.ENOTDIR):
935 932 raise
936 933 dmap = {}
937 934 cache = dircache.setdefault(dir, dmap)
938 935 yield cache.get(base, None)
939 936
940 937 if sys.platform == 'win32':
941 938 statfiles = _statfiles_clustered
942 939 else:
943 940 statfiles = _statfiles
944 941
945 942 getuser_fallback = None
946 943
947 944 def getuser():
948 945 '''return name of current user'''
949 946 try:
950 947 return getpass.getuser()
951 948 except ImportError:
952 949 # import of pwd will fail on windows - try fallback
953 950 if getuser_fallback:
954 951 return getuser_fallback()
955 952 # raised if win32api not available
956 953 raise Abort(_('user name not available - set USERNAME '
957 954 'environment variable'))
958 955
959 956 def username(uid=None):
960 957 """Return the name of the user with the given uid.
961 958
962 959 If uid is None, return the name of the current user."""
963 960 try:
964 961 import pwd
965 962 if uid is None:
966 963 uid = os.getuid()
967 964 try:
968 965 return pwd.getpwuid(uid)[0]
969 966 except KeyError:
970 967 return str(uid)
971 968 except ImportError:
972 969 return None
973 970
974 971 def groupname(gid=None):
975 972 """Return the name of the group with the given gid.
976 973
977 974 If gid is None, return the name of the current group."""
978 975 try:
979 976 import grp
980 977 if gid is None:
981 978 gid = os.getgid()
982 979 try:
983 980 return grp.getgrgid(gid)[0]
984 981 except KeyError:
985 982 return str(gid)
986 983 except ImportError:
987 984 return None
988 985
989 986 # File system features
990 987
991 988 def checkcase(path):
992 989 """
993 990 Check whether the given path is on a case-sensitive filesystem
994 991
995 992 Requires a path (like /foo/.hg) ending with a foldable final
996 993 directory component.
997 994 """
998 995 s1 = os.stat(path)
999 996 d, b = os.path.split(path)
1000 997 p2 = os.path.join(d, b.upper())
1001 998 if path == p2:
1002 999 p2 = os.path.join(d, b.lower())
1003 1000 try:
1004 1001 s2 = os.stat(p2)
1005 1002 if s2 == s1:
1006 1003 return False
1007 1004 return True
1008 1005 except:
1009 1006 return True
1010 1007
1011 1008 _fspathcache = {}
1012 1009 def fspath(name, root):
1013 1010 '''Get name in the case stored in the filesystem
1014 1011
1015 1012 The name is either relative to root, or it is an absolute path starting
1016 1013 with root. Note that this function is unnecessary, and should not be
1017 1014 called, for case-sensitive filesystems (simply because it's expensive).
1018 1015 '''
1019 1016 # If name is absolute, make it relative
1020 1017 if name.lower().startswith(root.lower()):
1021 1018 l = len(root)
1022 1019 if name[l] == os.sep or name[l] == os.altsep:
1023 1020 l = l + 1
1024 1021 name = name[l:]
1025 1022
1026 1023 if not os.path.exists(os.path.join(root, name)):
1027 1024 return None
1028 1025
1029 1026 seps = os.sep
1030 1027 if os.altsep:
1031 1028 seps = seps + os.altsep
1032 1029 # Protect backslashes. This gets silly very quickly.
1033 1030 seps.replace('\\','\\\\')
1034 1031 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1035 1032 dir = os.path.normcase(os.path.normpath(root))
1036 1033 result = []
1037 1034 for part, sep in pattern.findall(name):
1038 1035 if sep:
1039 1036 result.append(sep)
1040 1037 continue
1041 1038
1042 1039 if dir not in _fspathcache:
1043 1040 _fspathcache[dir] = os.listdir(dir)
1044 1041 contents = _fspathcache[dir]
1045 1042
1046 1043 lpart = part.lower()
1047 1044 for n in contents:
1048 1045 if n.lower() == lpart:
1049 1046 result.append(n)
1050 1047 break
1051 1048 else:
1052 1049 # Cannot happen, as the file exists!
1053 1050 result.append(part)
1054 1051 dir = os.path.join(dir, lpart)
1055 1052
1056 1053 return ''.join(result)
1057 1054
1058 1055 def checkexec(path):
1059 1056 """
1060 1057 Check whether the given path is on a filesystem with UNIX-like exec flags
1061 1058
1062 1059 Requires a directory (like /foo/.hg)
1063 1060 """
1064 1061
1065 1062 # VFAT on some Linux versions can flip mode but it doesn't persist
1066 1063 # a FS remount. Frequently we can detect it if files are created
1067 1064 # with exec bit on.
1068 1065
1069 1066 try:
1070 1067 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
1071 1068 fh, fn = tempfile.mkstemp("", "", path)
1072 1069 try:
1073 1070 os.close(fh)
1074 1071 m = os.stat(fn).st_mode & 0777
1075 1072 new_file_has_exec = m & EXECFLAGS
1076 1073 os.chmod(fn, m ^ EXECFLAGS)
1077 1074 exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0777) == m)
1078 1075 finally:
1079 1076 os.unlink(fn)
1080 1077 except (IOError, OSError):
1081 1078 # we don't care, the user probably won't be able to commit anyway
1082 1079 return False
1083 1080 return not (new_file_has_exec or exec_flags_cannot_flip)
1084 1081
1085 1082 def checklink(path):
1086 1083 """check whether the given path is on a symlink-capable filesystem"""
1087 1084 # mktemp is not racy because symlink creation will fail if the
1088 1085 # file already exists
1089 1086 name = tempfile.mktemp(dir=path)
1090 1087 try:
1091 1088 os.symlink(".", name)
1092 1089 os.unlink(name)
1093 1090 return True
1094 1091 except (OSError, AttributeError):
1095 1092 return False
1096 1093
1097 1094 _umask = os.umask(0)
1098 1095 os.umask(_umask)
1099 1096
1100 1097 def needbinarypatch():
1101 1098 """return True if patches should be applied in binary mode by default."""
1102 1099 return os.name == 'nt'
1103 1100
1104 1101 def endswithsep(path):
1105 1102 '''Check path ends with os.sep or os.altsep.'''
1106 1103 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1107 1104
1108 1105 def splitpath(path):
1109 1106 '''Split path by os.sep.
1110 1107 Note that this function does not use os.altsep because this is
1111 1108 an alternative of simple "xxx.split(os.sep)".
1112 1109 It is recommended to use os.path.normpath() before using this
1113 1110 function if need.'''
1114 1111 return path.split(os.sep)
1115 1112
1116 1113 def gui():
1117 1114 '''Are we running in a GUI?'''
1118 1115 return os.name == "nt" or os.name == "mac" or os.environ.get("DISPLAY")
1119 1116
1120 1117 def lookup_reg(key, name=None, scope=None):
1121 1118 return None
1122 1119
1123 1120 # Platform specific variants
1124 1121 if os.name == 'nt':
1125 1122 import msvcrt
1126 1123 nulldev = 'NUL:'
1127 1124
1128 1125 class winstdout:
1129 1126 '''stdout on windows misbehaves if sent through a pipe'''
1130 1127
1131 1128 def __init__(self, fp):
1132 1129 self.fp = fp
1133 1130
1134 1131 def __getattr__(self, key):
1135 1132 return getattr(self.fp, key)
1136 1133
1137 1134 def close(self):
1138 1135 try:
1139 1136 self.fp.close()
1140 1137 except: pass
1141 1138
1142 1139 def write(self, s):
1143 1140 try:
1144 1141 # This is workaround for "Not enough space" error on
1145 1142 # writing large size of data to console.
1146 1143 limit = 16000
1147 1144 l = len(s)
1148 1145 start = 0
1149 1146 while start < l:
1150 1147 end = start + limit
1151 1148 self.fp.write(s[start:end])
1152 1149 start = end
1153 1150 except IOError, inst:
1154 1151 if inst.errno != 0: raise
1155 1152 self.close()
1156 1153 raise IOError(errno.EPIPE, 'Broken pipe')
1157 1154
1158 1155 def flush(self):
1159 1156 try:
1160 1157 return self.fp.flush()
1161 1158 except IOError, inst:
1162 1159 if inst.errno != errno.EINVAL: raise
1163 1160 self.close()
1164 1161 raise IOError(errno.EPIPE, 'Broken pipe')
1165 1162
1166 1163 sys.stdout = winstdout(sys.stdout)
1167 1164
1168 1165 def _is_win_9x():
1169 1166 '''return true if run on windows 95, 98 or me.'''
1170 1167 try:
1171 1168 return sys.getwindowsversion()[3] == 1
1172 1169 except AttributeError:
1173 1170 return 'command' in os.environ.get('comspec', '')
1174 1171
1175 1172 def openhardlinks():
1176 1173 return not _is_win_9x and "win32api" in locals()
1177 1174
1178 1175 def system_rcpath():
1179 1176 try:
1180 1177 return system_rcpath_win32()
1181 1178 except:
1182 1179 return [r'c:\mercurial\mercurial.ini']
1183 1180
1184 1181 def user_rcpath():
1185 1182 '''return os-specific hgrc search path to the user dir'''
1186 1183 try:
1187 1184 path = user_rcpath_win32()
1188 1185 except:
1189 1186 home = os.path.expanduser('~')
1190 1187 path = [os.path.join(home, 'mercurial.ini'),
1191 1188 os.path.join(home, '.hgrc')]
1192 1189 userprofile = os.environ.get('USERPROFILE')
1193 1190 if userprofile:
1194 1191 path.append(os.path.join(userprofile, 'mercurial.ini'))
1195 1192 path.append(os.path.join(userprofile, '.hgrc'))
1196 1193 return path
1197 1194
1198 1195 def parse_patch_output(output_line):
1199 1196 """parses the output produced by patch and returns the file name"""
1200 1197 pf = output_line[14:]
1201 1198 if pf[0] == '`':
1202 1199 pf = pf[1:-1] # Remove the quotes
1203 1200 return pf
1204 1201
1205 1202 def sshargs(sshcmd, host, user, port):
1206 1203 '''Build argument list for ssh or Plink'''
1207 1204 pflag = 'plink' in sshcmd.lower() and '-P' or '-p'
1208 1205 args = user and ("%s@%s" % (user, host)) or host
1209 1206 return port and ("%s %s %s" % (args, pflag, port)) or args
1210 1207
1211 1208 def testpid(pid):
1212 1209 '''return False if pid dead, True if running or not known'''
1213 1210 return True
1214 1211
1215 1212 def set_flags(f, l, x):
1216 1213 pass
1217 1214
1218 1215 def set_binary(fd):
1219 1216 # When run without console, pipes may expose invalid
1220 1217 # fileno(), usually set to -1.
1221 1218 if hasattr(fd, 'fileno') and fd.fileno() >= 0:
1222 1219 msvcrt.setmode(fd.fileno(), os.O_BINARY)
1223 1220
1224 1221 def pconvert(path):
1225 1222 return '/'.join(splitpath(path))
1226 1223
1227 1224 def localpath(path):
1228 1225 return path.replace('/', '\\')
1229 1226
1230 1227 def normpath(path):
1231 1228 return pconvert(os.path.normpath(path))
1232 1229
1233 1230 makelock = _makelock_file
1234 1231 readlock = _readlock_file
1235 1232
1236 1233 def samestat(s1, s2):
1237 1234 return False
1238 1235
1239 1236 # A sequence of backslashes is special iff it precedes a double quote:
1240 1237 # - if there's an even number of backslashes, the double quote is not
1241 1238 # quoted (i.e. it ends the quoted region)
1242 1239 # - if there's an odd number of backslashes, the double quote is quoted
1243 1240 # - in both cases, every pair of backslashes is unquoted into a single
1244 1241 # backslash
1245 1242 # (See http://msdn2.microsoft.com/en-us/library/a1y7w461.aspx )
1246 1243 # So, to quote a string, we must surround it in double quotes, double
1247 1244 # the number of backslashes that preceed double quotes and add another
1248 1245 # backslash before every double quote (being careful with the double
1249 1246 # quote we've appended to the end)
1250 1247 _quotere = None
1251 1248 def shellquote(s):
1252 1249 global _quotere
1253 1250 if _quotere is None:
1254 1251 _quotere = re.compile(r'(\\*)("|\\$)')
1255 1252 return '"%s"' % _quotere.sub(r'\1\1\\\2', s)
1256 1253
1257 1254 def quotecommand(cmd):
1258 1255 """Build a command string suitable for os.popen* calls."""
1259 1256 # The extra quotes are needed because popen* runs the command
1260 1257 # through the current COMSPEC. cmd.exe suppress enclosing quotes.
1261 1258 return '"' + cmd + '"'
1262 1259
1263 1260 def popen(command, mode='r'):
1264 1261 # Work around "popen spawned process may not write to stdout
1265 1262 # under windows"
1266 1263 # http://bugs.python.org/issue1366
1267 1264 command += " 2> %s" % nulldev
1268 1265 return os.popen(quotecommand(command), mode)
1269 1266
1270 1267 def explain_exit(code):
1271 1268 return _("exited with status %d") % code, code
1272 1269
1273 1270 # if you change this stub into a real check, please try to implement the
1274 1271 # username and groupname functions above, too.
1275 1272 def isowner(fp, st=None):
1276 1273 return True
1277 1274
1278 1275 def find_in_path(name, path, default=None):
1279 1276 '''find name in search path. path can be string (will be split
1280 1277 with os.pathsep), or iterable thing that returns strings. if name
1281 1278 found, return path to name. else return default. name is looked up
1282 1279 using cmd.exe rules, using PATHEXT.'''
1283 1280 if isinstance(path, str):
1284 1281 path = path.split(os.pathsep)
1285 1282
1286 1283 pathext = os.environ.get('PATHEXT', '.COM;.EXE;.BAT;.CMD')
1287 1284 pathext = pathext.lower().split(os.pathsep)
1288 1285 isexec = os.path.splitext(name)[1].lower() in pathext
1289 1286
1290 1287 for p in path:
1291 1288 p_name = os.path.join(p, name)
1292 1289
1293 1290 if isexec and os.path.exists(p_name):
1294 1291 return p_name
1295 1292
1296 1293 for ext in pathext:
1297 1294 p_name_ext = p_name + ext
1298 1295 if os.path.exists(p_name_ext):
1299 1296 return p_name_ext
1300 1297 return default
1301 1298
1302 1299 def set_signal_handler():
1303 1300 try:
1304 1301 set_signal_handler_win32()
1305 1302 except NameError:
1306 1303 pass
1307 1304
1308 1305 try:
1309 1306 # override functions with win32 versions if possible
1310 1307 from util_win32 import *
1311 1308 if not _is_win_9x():
1312 1309 posixfile = posixfile_nt
1313 1310 except ImportError:
1314 1311 pass
1315 1312
1316 1313 else:
1317 1314 nulldev = '/dev/null'
1318 1315
1319 1316 def rcfiles(path):
1320 1317 rcs = [os.path.join(path, 'hgrc')]
1321 1318 rcdir = os.path.join(path, 'hgrc.d')
1322 1319 try:
1323 1320 rcs.extend([os.path.join(rcdir, f)
1324 1321 for f, kind in osutil.listdir(rcdir)
1325 1322 if f.endswith(".rc")])
1326 1323 except OSError:
1327 1324 pass
1328 1325 return rcs
1329 1326
1330 1327 def system_rcpath():
1331 1328 path = []
1332 1329 # old mod_python does not set sys.argv
1333 1330 if len(getattr(sys, 'argv', [])) > 0:
1334 1331 path.extend(rcfiles(os.path.dirname(sys.argv[0]) +
1335 1332 '/../etc/mercurial'))
1336 1333 path.extend(rcfiles('/etc/mercurial'))
1337 1334 return path
1338 1335
1339 1336 def user_rcpath():
1340 1337 return [os.path.expanduser('~/.hgrc')]
1341 1338
1342 1339 def parse_patch_output(output_line):
1343 1340 """parses the output produced by patch and returns the file name"""
1344 1341 pf = output_line[14:]
1345 1342 if os.sys.platform == 'OpenVMS':
1346 1343 if pf[0] == '`':
1347 1344 pf = pf[1:-1] # Remove the quotes
1348 1345 else:
1349 1346 if pf.startswith("'") and pf.endswith("'") and " " in pf:
1350 1347 pf = pf[1:-1] # Remove the quotes
1351 1348 return pf
1352 1349
1353 1350 def sshargs(sshcmd, host, user, port):
1354 1351 '''Build argument list for ssh'''
1355 1352 args = user and ("%s@%s" % (user, host)) or host
1356 1353 return port and ("%s -p %s" % (args, port)) or args
1357 1354
1358 1355 def is_exec(f):
1359 1356 """check whether a file is executable"""
1360 1357 return (os.lstat(f).st_mode & 0100 != 0)
1361 1358
1362 1359 def set_flags(f, l, x):
1363 1360 s = os.lstat(f).st_mode
1364 1361 if l:
1365 1362 if not stat.S_ISLNK(s):
1366 1363 # switch file to link
1367 1364 data = file(f).read()
1368 1365 os.unlink(f)
1369 1366 try:
1370 1367 os.symlink(data, f)
1371 1368 except:
1372 1369 # failed to make a link, rewrite file
1373 1370 file(f, "w").write(data)
1374 1371 # no chmod needed at this point
1375 1372 return
1376 1373 if stat.S_ISLNK(s):
1377 1374 # switch link to file
1378 1375 data = os.readlink(f)
1379 1376 os.unlink(f)
1380 1377 file(f, "w").write(data)
1381 1378 s = 0666 & ~_umask # avoid restatting for chmod
1382 1379
1383 1380 sx = s & 0100
1384 1381 if x and not sx:
1385 1382 # Turn on +x for every +r bit when making a file executable
1386 1383 # and obey umask.
1387 1384 os.chmod(f, s | (s & 0444) >> 2 & ~_umask)
1388 1385 elif not x and sx:
1389 1386 # Turn off all +x bits
1390 1387 os.chmod(f, s & 0666)
1391 1388
1392 1389 def set_binary(fd):
1393 1390 pass
1394 1391
1395 1392 def pconvert(path):
1396 1393 return path
1397 1394
1398 1395 def localpath(path):
1399 1396 return path
1400 1397
1401 1398 normpath = os.path.normpath
1402 1399 samestat = os.path.samestat
1403 1400
1404 1401 def makelock(info, pathname):
1405 1402 try:
1406 1403 os.symlink(info, pathname)
1407 1404 except OSError, why:
1408 1405 if why.errno == errno.EEXIST:
1409 1406 raise
1410 1407 else:
1411 1408 _makelock_file(info, pathname)
1412 1409
1413 1410 def readlock(pathname):
1414 1411 try:
1415 1412 return os.readlink(pathname)
1416 1413 except OSError, why:
1417 1414 if why.errno in (errno.EINVAL, errno.ENOSYS):
1418 1415 return _readlock_file(pathname)
1419 1416 else:
1420 1417 raise
1421 1418
1422 1419 def shellquote(s):
1423 1420 if os.sys.platform == 'OpenVMS':
1424 1421 return '"%s"' % s
1425 1422 else:
1426 1423 return "'%s'" % s.replace("'", "'\\''")
1427 1424
1428 1425 def quotecommand(cmd):
1429 1426 return cmd
1430 1427
1431 1428 def popen(command, mode='r'):
1432 1429 return os.popen(command, mode)
1433 1430
1434 1431 def testpid(pid):
1435 1432 '''return False if pid dead, True if running or not sure'''
1436 1433 if os.sys.platform == 'OpenVMS':
1437 1434 return True
1438 1435 try:
1439 1436 os.kill(pid, 0)
1440 1437 return True
1441 1438 except OSError, inst:
1442 1439 return inst.errno != errno.ESRCH
1443 1440
1444 1441 def explain_exit(code):
1445 1442 """return a 2-tuple (desc, code) describing a process's status"""
1446 1443 if os.WIFEXITED(code):
1447 1444 val = os.WEXITSTATUS(code)
1448 1445 return _("exited with status %d") % val, val
1449 1446 elif os.WIFSIGNALED(code):
1450 1447 val = os.WTERMSIG(code)
1451 1448 return _("killed by signal %d") % val, val
1452 1449 elif os.WIFSTOPPED(code):
1453 1450 val = os.WSTOPSIG(code)
1454 1451 return _("stopped by signal %d") % val, val
1455 1452 raise ValueError(_("invalid exit code"))
1456 1453
1457 1454 def isowner(fp, st=None):
1458 1455 """Return True if the file object f belongs to the current user.
1459 1456
1460 1457 The return value of a util.fstat(f) may be passed as the st argument.
1461 1458 """
1462 1459 if st is None:
1463 1460 st = fstat(fp)
1464 1461 return st.st_uid == os.getuid()
1465 1462
1466 1463 def find_in_path(name, path, default=None):
1467 1464 '''find name in search path. path can be string (will be split
1468 1465 with os.pathsep), or iterable thing that returns strings. if name
1469 1466 found, return path to name. else return default.'''
1470 1467 if isinstance(path, str):
1471 1468 path = path.split(os.pathsep)
1472 1469 for p in path:
1473 1470 p_name = os.path.join(p, name)
1474 1471 if os.path.exists(p_name):
1475 1472 return p_name
1476 1473 return default
1477 1474
1478 1475 def set_signal_handler():
1479 1476 pass
1480 1477
1481 1478 def find_exe(name, default=None):
1482 1479 '''find path of an executable.
1483 1480 if name contains a path component, return it as is. otherwise,
1484 1481 use normal executable search path.'''
1485 1482
1486 1483 if os.sep in name or sys.platform == 'OpenVMS':
1487 1484 # don't check the executable bit. if the file isn't
1488 1485 # executable, whoever tries to actually run it will give a
1489 1486 # much more useful error message.
1490 1487 return name
1491 1488 return find_in_path(name, os.environ.get('PATH', ''), default=default)
1492 1489
1493 1490 def mktempcopy(name, emptyok=False, createmode=None):
1494 1491 """Create a temporary file with the same contents from name
1495 1492
1496 1493 The permission bits are copied from the original file.
1497 1494
1498 1495 If the temporary file is going to be truncated immediately, you
1499 1496 can use emptyok=True as an optimization.
1500 1497
1501 1498 Returns the name of the temporary file.
1502 1499 """
1503 1500 d, fn = os.path.split(name)
1504 1501 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1505 1502 os.close(fd)
1506 1503 # Temporary files are created with mode 0600, which is usually not
1507 1504 # what we want. If the original file already exists, just copy
1508 1505 # its mode. Otherwise, manually obey umask.
1509 1506 try:
1510 1507 st_mode = os.lstat(name).st_mode & 0777
1511 1508 except OSError, inst:
1512 1509 if inst.errno != errno.ENOENT:
1513 1510 raise
1514 1511 st_mode = createmode
1515 1512 if st_mode is None:
1516 1513 st_mode = ~_umask
1517 1514 st_mode &= 0666
1518 1515 os.chmod(temp, st_mode)
1519 1516 if emptyok:
1520 1517 return temp
1521 1518 try:
1522 1519 try:
1523 1520 ifp = posixfile(name, "rb")
1524 1521 except IOError, inst:
1525 1522 if inst.errno == errno.ENOENT:
1526 1523 return temp
1527 1524 if not getattr(inst, 'filename', None):
1528 1525 inst.filename = name
1529 1526 raise
1530 1527 ofp = posixfile(temp, "wb")
1531 1528 for chunk in filechunkiter(ifp):
1532 1529 ofp.write(chunk)
1533 1530 ifp.close()
1534 1531 ofp.close()
1535 1532 except:
1536 1533 try: os.unlink(temp)
1537 1534 except: pass
1538 1535 raise
1539 1536 return temp
1540 1537
1541 1538 class atomictempfile(posixfile):
1542 1539 """file-like object that atomically updates a file
1543 1540
1544 1541 All writes will be redirected to a temporary copy of the original
1545 1542 file. When rename is called, the copy is renamed to the original
1546 1543 name, making the changes visible.
1547 1544 """
1548 1545 def __init__(self, name, mode, createmode):
1549 1546 self.__name = name
1550 1547 self.temp = mktempcopy(name, emptyok=('w' in mode),
1551 1548 createmode=createmode)
1552 1549 posixfile.__init__(self, self.temp, mode)
1553 1550
1554 1551 def rename(self):
1555 1552 if not self.closed:
1556 1553 posixfile.close(self)
1557 1554 rename(self.temp, localpath(self.__name))
1558 1555
1559 1556 def __del__(self):
1560 1557 if not self.closed:
1561 1558 try:
1562 1559 os.unlink(self.temp)
1563 1560 except: pass
1564 1561 posixfile.close(self)
1565 1562
1566 1563 def makedirs(name, mode=None):
1567 1564 """recursive directory creation with parent mode inheritance"""
1568 1565 try:
1569 1566 os.mkdir(name)
1570 1567 if mode is not None:
1571 1568 os.chmod(name, mode)
1572 1569 return
1573 1570 except OSError, err:
1574 1571 if err.errno == errno.EEXIST:
1575 1572 return
1576 1573 if err.errno != errno.ENOENT:
1577 1574 raise
1578 1575 parent = os.path.abspath(os.path.dirname(name))
1579 1576 makedirs(parent, mode)
1580 1577 makedirs(name, mode)
1581 1578
1582 1579 class opener(object):
1583 1580 """Open files relative to a base directory
1584 1581
1585 1582 This class is used to hide the details of COW semantics and
1586 1583 remote file access from higher level code.
1587 1584 """
1588 1585 def __init__(self, base, audit=True):
1589 1586 self.base = base
1590 1587 if audit:
1591 1588 self.audit_path = path_auditor(base)
1592 1589 else:
1593 1590 self.audit_path = always
1594 1591 self.createmode = None
1595 1592
1596 1593 def __getattr__(self, name):
1597 1594 if name == '_can_symlink':
1598 1595 self._can_symlink = checklink(self.base)
1599 1596 return self._can_symlink
1600 1597 raise AttributeError(name)
1601 1598
1602 1599 def _fixfilemode(self, name):
1603 1600 if self.createmode is None:
1604 1601 return
1605 1602 os.chmod(name, self.createmode & 0666)
1606 1603
1607 1604 def __call__(self, path, mode="r", text=False, atomictemp=False):
1608 1605 self.audit_path(path)
1609 1606 f = os.path.join(self.base, path)
1610 1607
1611 1608 if not text and "b" not in mode:
1612 1609 mode += "b" # for that other OS
1613 1610
1614 1611 nlink = -1
1615 1612 if mode not in ("r", "rb"):
1616 1613 try:
1617 1614 nlink = nlinks(f)
1618 1615 except OSError:
1619 1616 nlink = 0
1620 1617 d = os.path.dirname(f)
1621 1618 if not os.path.isdir(d):
1622 1619 makedirs(d, self.createmode)
1623 1620 if atomictemp:
1624 1621 return atomictempfile(f, mode, self.createmode)
1625 1622 if nlink > 1:
1626 1623 rename(mktempcopy(f), f)
1627 1624 fp = posixfile(f, mode)
1628 1625 if nlink == 0:
1629 1626 self._fixfilemode(f)
1630 1627 return fp
1631 1628
1632 1629 def symlink(self, src, dst):
1633 1630 self.audit_path(dst)
1634 1631 linkname = os.path.join(self.base, dst)
1635 1632 try:
1636 1633 os.unlink(linkname)
1637 1634 except OSError:
1638 1635 pass
1639 1636
1640 1637 dirname = os.path.dirname(linkname)
1641 1638 if not os.path.exists(dirname):
1642 1639 makedirs(dirname, self.createmode)
1643 1640
1644 1641 if self._can_symlink:
1645 1642 try:
1646 1643 os.symlink(src, linkname)
1647 1644 except OSError, err:
1648 1645 raise OSError(err.errno, _('could not symlink to %r: %s') %
1649 1646 (src, err.strerror), linkname)
1650 1647 else:
1651 1648 f = self(dst, "w")
1652 1649 f.write(src)
1653 1650 f.close()
1654 1651 self._fixfilemode(dst)
1655 1652
1656 1653 class chunkbuffer(object):
1657 1654 """Allow arbitrary sized chunks of data to be efficiently read from an
1658 1655 iterator over chunks of arbitrary size."""
1659 1656
1660 1657 def __init__(self, in_iter):
1661 1658 """in_iter is the iterator that's iterating over the input chunks.
1662 1659 targetsize is how big a buffer to try to maintain."""
1663 1660 self.iter = iter(in_iter)
1664 1661 self.buf = ''
1665 1662 self.targetsize = 2**16
1666 1663
1667 1664 def read(self, l):
1668 1665 """Read L bytes of data from the iterator of chunks of data.
1669 1666 Returns less than L bytes if the iterator runs dry."""
1670 1667 if l > len(self.buf) and self.iter:
1671 1668 # Clamp to a multiple of self.targetsize
1672 1669 targetsize = max(l, self.targetsize)
1673 1670 collector = cStringIO.StringIO()
1674 1671 collector.write(self.buf)
1675 1672 collected = len(self.buf)
1676 1673 for chunk in self.iter:
1677 1674 collector.write(chunk)
1678 1675 collected += len(chunk)
1679 1676 if collected >= targetsize:
1680 1677 break
1681 1678 if collected < targetsize:
1682 1679 self.iter = False
1683 1680 self.buf = collector.getvalue()
1684 1681 if len(self.buf) == l:
1685 1682 s, self.buf = str(self.buf), ''
1686 1683 else:
1687 1684 s, self.buf = self.buf[:l], buffer(self.buf, l)
1688 1685 return s
1689 1686
1690 1687 def filechunkiter(f, size=65536, limit=None):
1691 1688 """Create a generator that produces the data in the file size
1692 1689 (default 65536) bytes at a time, up to optional limit (default is
1693 1690 to read all data). Chunks may be less than size bytes if the
1694 1691 chunk is the last chunk in the file, or the file is a socket or
1695 1692 some other type of file that sometimes reads less data than is
1696 1693 requested."""
1697 1694 assert size >= 0
1698 1695 assert limit is None or limit >= 0
1699 1696 while True:
1700 1697 if limit is None: nbytes = size
1701 1698 else: nbytes = min(limit, size)
1702 1699 s = nbytes and f.read(nbytes)
1703 1700 if not s: break
1704 1701 if limit: limit -= len(s)
1705 1702 yield s
1706 1703
1707 1704 def makedate():
1708 1705 lt = time.localtime()
1709 1706 if lt[8] == 1 and time.daylight:
1710 1707 tz = time.altzone
1711 1708 else:
1712 1709 tz = time.timezone
1713 1710 return time.mktime(lt), tz
1714 1711
1715 1712 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1716 1713 """represent a (unixtime, offset) tuple as a localized time.
1717 1714 unixtime is seconds since the epoch, and offset is the time zone's
1718 1715 number of seconds away from UTC. if timezone is false, do not
1719 1716 append time zone to string."""
1720 1717 t, tz = date or makedate()
1721 1718 if "%1" in format or "%2" in format:
1722 1719 sign = (tz > 0) and "-" or "+"
1723 1720 minutes = abs(tz) / 60
1724 1721 format = format.replace("%1", "%c%02d" % (sign, minutes / 60))
1725 1722 format = format.replace("%2", "%02d" % (minutes % 60))
1726 1723 s = time.strftime(format, time.gmtime(float(t) - tz))
1727 1724 return s
1728 1725
1729 1726 def shortdate(date=None):
1730 1727 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1731 1728 return datestr(date, format='%Y-%m-%d')
1732 1729
1733 1730 def strdate(string, format, defaults=[]):
1734 1731 """parse a localized time string and return a (unixtime, offset) tuple.
1735 1732 if the string cannot be parsed, ValueError is raised."""
1736 1733 def timezone(string):
1737 1734 tz = string.split()[-1]
1738 1735 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1739 1736 sign = (tz[0] == "+") and 1 or -1
1740 1737 hours = int(tz[1:3])
1741 1738 minutes = int(tz[3:5])
1742 1739 return -sign * (hours * 60 + minutes) * 60
1743 1740 if tz == "GMT" or tz == "UTC":
1744 1741 return 0
1745 1742 return None
1746 1743
1747 1744 # NOTE: unixtime = localunixtime + offset
1748 1745 offset, date = timezone(string), string
1749 1746 if offset != None:
1750 1747 date = " ".join(string.split()[:-1])
1751 1748
1752 1749 # add missing elements from defaults
1753 1750 for part in defaults:
1754 1751 found = [True for p in part if ("%"+p) in format]
1755 1752 if not found:
1756 1753 date += "@" + defaults[part]
1757 1754 format += "@%" + part[0]
1758 1755
1759 1756 timetuple = time.strptime(date, format)
1760 1757 localunixtime = int(calendar.timegm(timetuple))
1761 1758 if offset is None:
1762 1759 # local timezone
1763 1760 unixtime = int(time.mktime(timetuple))
1764 1761 offset = unixtime - localunixtime
1765 1762 else:
1766 1763 unixtime = localunixtime + offset
1767 1764 return unixtime, offset
1768 1765
1769 1766 def parsedate(date, formats=None, defaults=None):
1770 1767 """parse a localized date/time string and return a (unixtime, offset) tuple.
1771 1768
1772 1769 The date may be a "unixtime offset" string or in one of the specified
1773 1770 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1774 1771 """
1775 1772 if not date:
1776 1773 return 0, 0
1777 1774 if isinstance(date, tuple) and len(date) == 2:
1778 1775 return date
1779 1776 if not formats:
1780 1777 formats = defaultdateformats
1781 1778 date = date.strip()
1782 1779 try:
1783 1780 when, offset = map(int, date.split(' '))
1784 1781 except ValueError:
1785 1782 # fill out defaults
1786 1783 if not defaults:
1787 1784 defaults = {}
1788 1785 now = makedate()
1789 1786 for part in "d mb yY HI M S".split():
1790 1787 if part not in defaults:
1791 1788 if part[0] in "HMS":
1792 1789 defaults[part] = "00"
1793 1790 else:
1794 1791 defaults[part] = datestr(now, "%" + part[0])
1795 1792
1796 1793 for format in formats:
1797 1794 try:
1798 1795 when, offset = strdate(date, format, defaults)
1799 1796 except (ValueError, OverflowError):
1800 1797 pass
1801 1798 else:
1802 1799 break
1803 1800 else:
1804 1801 raise Abort(_('invalid date: %r ') % date)
1805 1802 # validate explicit (probably user-specified) date and
1806 1803 # time zone offset. values must fit in signed 32 bits for
1807 1804 # current 32-bit linux runtimes. timezones go from UTC-12
1808 1805 # to UTC+14
1809 1806 if abs(when) > 0x7fffffff:
1810 1807 raise Abort(_('date exceeds 32 bits: %d') % when)
1811 1808 if offset < -50400 or offset > 43200:
1812 1809 raise Abort(_('impossible time zone offset: %d') % offset)
1813 1810 return when, offset
1814 1811
1815 1812 def matchdate(date):
1816 1813 """Return a function that matches a given date match specifier
1817 1814
1818 1815 Formats include:
1819 1816
1820 1817 '{date}' match a given date to the accuracy provided
1821 1818
1822 1819 '<{date}' on or before a given date
1823 1820
1824 1821 '>{date}' on or after a given date
1825 1822
1826 1823 """
1827 1824
1828 1825 def lower(date):
1829 1826 d = dict(mb="1", d="1")
1830 1827 return parsedate(date, extendeddateformats, d)[0]
1831 1828
1832 1829 def upper(date):
1833 1830 d = dict(mb="12", HI="23", M="59", S="59")
1834 1831 for days in "31 30 29".split():
1835 1832 try:
1836 1833 d["d"] = days
1837 1834 return parsedate(date, extendeddateformats, d)[0]
1838 1835 except:
1839 1836 pass
1840 1837 d["d"] = "28"
1841 1838 return parsedate(date, extendeddateformats, d)[0]
1842 1839
1843 1840 if date[0] == "<":
1844 1841 when = upper(date[1:])
1845 1842 return lambda x: x <= when
1846 1843 elif date[0] == ">":
1847 1844 when = lower(date[1:])
1848 1845 return lambda x: x >= when
1849 1846 elif date[0] == "-":
1850 1847 try:
1851 1848 days = int(date[1:])
1852 1849 except ValueError:
1853 1850 raise Abort(_("invalid day spec: %s") % date[1:])
1854 1851 when = makedate()[0] - days * 3600 * 24
1855 1852 return lambda x: x >= when
1856 1853 elif " to " in date:
1857 1854 a, b = date.split(" to ")
1858 1855 start, stop = lower(a), upper(b)
1859 1856 return lambda x: x >= start and x <= stop
1860 1857 else:
1861 1858 start, stop = lower(date), upper(date)
1862 1859 return lambda x: x >= start and x <= stop
1863 1860
1864 1861 def shortuser(user):
1865 1862 """Return a short representation of a user name or email address."""
1866 1863 f = user.find('@')
1867 1864 if f >= 0:
1868 1865 user = user[:f]
1869 1866 f = user.find('<')
1870 1867 if f >= 0:
1871 1868 user = user[f+1:]
1872 1869 f = user.find(' ')
1873 1870 if f >= 0:
1874 1871 user = user[:f]
1875 1872 f = user.find('.')
1876 1873 if f >= 0:
1877 1874 user = user[:f]
1878 1875 return user
1879 1876
1880 1877 def email(author):
1881 1878 '''get email of author.'''
1882 1879 r = author.find('>')
1883 1880 if r == -1: r = None
1884 1881 return author[author.find('<')+1:r]
1885 1882
1886 1883 def ellipsis(text, maxlength=400):
1887 1884 """Trim string to at most maxlength (default: 400) characters."""
1888 1885 if len(text) <= maxlength:
1889 1886 return text
1890 1887 else:
1891 1888 return "%s..." % (text[:maxlength-3])
1892 1889
1893 1890 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
1894 1891 '''yield every hg repository under path, recursively.'''
1895 1892 def errhandler(err):
1896 1893 if err.filename == path:
1897 1894 raise err
1898 1895 if followsym and hasattr(os.path, 'samestat'):
1899 1896 def _add_dir_if_not_there(dirlst, dirname):
1900 1897 match = False
1901 1898 samestat = os.path.samestat
1902 1899 dirstat = os.stat(dirname)
1903 1900 for lstdirstat in dirlst:
1904 1901 if samestat(dirstat, lstdirstat):
1905 1902 match = True
1906 1903 break
1907 1904 if not match:
1908 1905 dirlst.append(dirstat)
1909 1906 return not match
1910 1907 else:
1911 1908 followsym = False
1912 1909
1913 1910 if (seen_dirs is None) and followsym:
1914 1911 seen_dirs = []
1915 1912 _add_dir_if_not_there(seen_dirs, path)
1916 1913 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
1917 1914 if '.hg' in dirs:
1918 1915 yield root # found a repository
1919 1916 qroot = os.path.join(root, '.hg', 'patches')
1920 1917 if os.path.isdir(os.path.join(qroot, '.hg')):
1921 1918 yield qroot # we have a patch queue repo here
1922 1919 if recurse:
1923 1920 # avoid recursing inside the .hg directory
1924 1921 dirs.remove('.hg')
1925 1922 else:
1926 1923 dirs[:] = [] # don't descend further
1927 1924 elif followsym:
1928 1925 newdirs = []
1929 1926 for d in dirs:
1930 1927 fname = os.path.join(root, d)
1931 1928 if _add_dir_if_not_there(seen_dirs, fname):
1932 1929 if os.path.islink(fname):
1933 1930 for hgname in walkrepos(fname, True, seen_dirs):
1934 1931 yield hgname
1935 1932 else:
1936 1933 newdirs.append(d)
1937 1934 dirs[:] = newdirs
1938 1935
1939 1936 _rcpath = None
1940 1937
1941 1938 def os_rcpath():
1942 1939 '''return default os-specific hgrc search path'''
1943 1940 path = system_rcpath()
1944 1941 path.extend(user_rcpath())
1945 1942 path = [os.path.normpath(f) for f in path]
1946 1943 return path
1947 1944
1948 1945 def rcpath():
1949 1946 '''return hgrc search path. if env var HGRCPATH is set, use it.
1950 1947 for each item in path, if directory, use files ending in .rc,
1951 1948 else use item.
1952 1949 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1953 1950 if no HGRCPATH, use default os-specific path.'''
1954 1951 global _rcpath
1955 1952 if _rcpath is None:
1956 1953 if 'HGRCPATH' in os.environ:
1957 1954 _rcpath = []
1958 1955 for p in os.environ['HGRCPATH'].split(os.pathsep):
1959 1956 if not p: continue
1960 1957 if os.path.isdir(p):
1961 1958 for f, kind in osutil.listdir(p):
1962 1959 if f.endswith('.rc'):
1963 1960 _rcpath.append(os.path.join(p, f))
1964 1961 else:
1965 1962 _rcpath.append(p)
1966 1963 else:
1967 1964 _rcpath = os_rcpath()
1968 1965 return _rcpath
1969 1966
1970 1967 def bytecount(nbytes):
1971 1968 '''return byte count formatted as readable string, with units'''
1972 1969
1973 1970 units = (
1974 1971 (100, 1<<30, _('%.0f GB')),
1975 1972 (10, 1<<30, _('%.1f GB')),
1976 1973 (1, 1<<30, _('%.2f GB')),
1977 1974 (100, 1<<20, _('%.0f MB')),
1978 1975 (10, 1<<20, _('%.1f MB')),
1979 1976 (1, 1<<20, _('%.2f MB')),
1980 1977 (100, 1<<10, _('%.0f KB')),
1981 1978 (10, 1<<10, _('%.1f KB')),
1982 1979 (1, 1<<10, _('%.2f KB')),
1983 1980 (1, 1, _('%.0f bytes')),
1984 1981 )
1985 1982
1986 1983 for multiplier, divisor, format in units:
1987 1984 if nbytes >= divisor * multiplier:
1988 1985 return format % (nbytes / float(divisor))
1989 1986 return units[-1][2] % nbytes
1990 1987
1991 1988 def drop_scheme(scheme, path):
1992 1989 sc = scheme + ':'
1993 1990 if path.startswith(sc):
1994 1991 path = path[len(sc):]
1995 1992 if path.startswith('//'):
1996 1993 path = path[2:]
1997 1994 return path
1998 1995
1999 1996 def uirepr(s):
2000 1997 # Avoid double backslash in Windows path repr()
2001 1998 return repr(s).replace('\\\\', '\\')
2002 1999
2003 2000 def termwidth():
2004 2001 if 'COLUMNS' in os.environ:
2005 2002 try:
2006 2003 return int(os.environ['COLUMNS'])
2007 2004 except ValueError:
2008 2005 pass
2009 2006 try:
2010 2007 import termios, array, fcntl
2011 2008 for dev in (sys.stdout, sys.stdin):
2012 2009 try:
2013 2010 fd = dev.fileno()
2014 2011 if not os.isatty(fd):
2015 2012 continue
2016 2013 arri = fcntl.ioctl(fd, termios.TIOCGWINSZ, '\0' * 8)
2017 2014 return array.array('h', arri)[1]
2018 2015 except ValueError:
2019 2016 pass
2020 2017 except ImportError:
2021 2018 pass
2022 2019 return 80
General Comments 0
You need to be logged in to leave comments. Login now