##// END OF EJS Templates
introduce new RequirementError (issue2649)...
Adrian Buehlmann -
r13447:931a72e0 stable
parent child Browse files
Show More
@@ -1,653 +1,655 b''
1 # dispatch.py - command dispatching for mercurial
1 # dispatch.py - command dispatching for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 import os, sys, atexit, signal, pdb, socket, errno, shlex, time, traceback, re
9 import os, sys, atexit, signal, pdb, socket, errno, shlex, time, traceback, re
10 import util, commands, hg, fancyopts, extensions, hook, error
10 import util, commands, hg, fancyopts, extensions, hook, error
11 import cmdutil, encoding
11 import cmdutil, encoding
12 import ui as uimod
12 import ui as uimod
13
13
14 def run():
14 def run():
15 "run the command in sys.argv"
15 "run the command in sys.argv"
16 sys.exit(dispatch(sys.argv[1:]))
16 sys.exit(dispatch(sys.argv[1:]))
17
17
18 def dispatch(args):
18 def dispatch(args):
19 "run the command specified in args"
19 "run the command specified in args"
20 try:
20 try:
21 u = uimod.ui()
21 u = uimod.ui()
22 if '--traceback' in args:
22 if '--traceback' in args:
23 u.setconfig('ui', 'traceback', 'on')
23 u.setconfig('ui', 'traceback', 'on')
24 except util.Abort, inst:
24 except util.Abort, inst:
25 sys.stderr.write(_("abort: %s\n") % inst)
25 sys.stderr.write(_("abort: %s\n") % inst)
26 if inst.hint:
26 if inst.hint:
27 sys.stderr.write(_("(%s)\n") % inst.hint)
27 sys.stderr.write(_("(%s)\n") % inst.hint)
28 return -1
28 return -1
29 except error.ParseError, inst:
29 except error.ParseError, inst:
30 if len(inst.args) > 1:
30 if len(inst.args) > 1:
31 sys.stderr.write(_("hg: parse error at %s: %s\n") %
31 sys.stderr.write(_("hg: parse error at %s: %s\n") %
32 (inst.args[1], inst.args[0]))
32 (inst.args[1], inst.args[0]))
33 else:
33 else:
34 sys.stderr.write(_("hg: parse error: %s\n") % inst.args[0])
34 sys.stderr.write(_("hg: parse error: %s\n") % inst.args[0])
35 return -1
35 return -1
36 return _runcatch(u, args)
36 return _runcatch(u, args)
37
37
38 def _runcatch(ui, args):
38 def _runcatch(ui, args):
39 def catchterm(*args):
39 def catchterm(*args):
40 raise error.SignalInterrupt
40 raise error.SignalInterrupt
41
41
42 try:
42 try:
43 for name in 'SIGBREAK', 'SIGHUP', 'SIGTERM':
43 for name in 'SIGBREAK', 'SIGHUP', 'SIGTERM':
44 num = getattr(signal, name, None)
44 num = getattr(signal, name, None)
45 if num:
45 if num:
46 signal.signal(num, catchterm)
46 signal.signal(num, catchterm)
47 except ValueError:
47 except ValueError:
48 pass # happens if called in a thread
48 pass # happens if called in a thread
49
49
50 try:
50 try:
51 try:
51 try:
52 # enter the debugger before command execution
52 # enter the debugger before command execution
53 if '--debugger' in args:
53 if '--debugger' in args:
54 ui.warn(_("entering debugger - "
54 ui.warn(_("entering debugger - "
55 "type c to continue starting hg or h for help\n"))
55 "type c to continue starting hg or h for help\n"))
56 pdb.set_trace()
56 pdb.set_trace()
57 try:
57 try:
58 return _dispatch(ui, args)
58 return _dispatch(ui, args)
59 finally:
59 finally:
60 ui.flush()
60 ui.flush()
61 except:
61 except:
62 # enter the debugger when we hit an exception
62 # enter the debugger when we hit an exception
63 if '--debugger' in args:
63 if '--debugger' in args:
64 traceback.print_exc()
64 traceback.print_exc()
65 pdb.post_mortem(sys.exc_info()[2])
65 pdb.post_mortem(sys.exc_info()[2])
66 ui.traceback()
66 ui.traceback()
67 raise
67 raise
68
68
69 # Global exception handling, alphabetically
69 # Global exception handling, alphabetically
70 # Mercurial-specific first, followed by built-in and library exceptions
70 # Mercurial-specific first, followed by built-in and library exceptions
71 except error.AmbiguousCommand, inst:
71 except error.AmbiguousCommand, inst:
72 ui.warn(_("hg: command '%s' is ambiguous:\n %s\n") %
72 ui.warn(_("hg: command '%s' is ambiguous:\n %s\n") %
73 (inst.args[0], " ".join(inst.args[1])))
73 (inst.args[0], " ".join(inst.args[1])))
74 except error.ParseError, inst:
74 except error.ParseError, inst:
75 if len(inst.args) > 1:
75 if len(inst.args) > 1:
76 ui.warn(_("hg: parse error at %s: %s\n") %
76 ui.warn(_("hg: parse error at %s: %s\n") %
77 (inst.args[1], inst.args[0]))
77 (inst.args[1], inst.args[0]))
78 else:
78 else:
79 ui.warn(_("hg: parse error: %s\n") % inst.args[0])
79 ui.warn(_("hg: parse error: %s\n") % inst.args[0])
80 return -1
80 return -1
81 except error.LockHeld, inst:
81 except error.LockHeld, inst:
82 if inst.errno == errno.ETIMEDOUT:
82 if inst.errno == errno.ETIMEDOUT:
83 reason = _('timed out waiting for lock held by %s') % inst.locker
83 reason = _('timed out waiting for lock held by %s') % inst.locker
84 else:
84 else:
85 reason = _('lock held by %s') % inst.locker
85 reason = _('lock held by %s') % inst.locker
86 ui.warn(_("abort: %s: %s\n") % (inst.desc or inst.filename, reason))
86 ui.warn(_("abort: %s: %s\n") % (inst.desc or inst.filename, reason))
87 except error.LockUnavailable, inst:
87 except error.LockUnavailable, inst:
88 ui.warn(_("abort: could not lock %s: %s\n") %
88 ui.warn(_("abort: could not lock %s: %s\n") %
89 (inst.desc or inst.filename, inst.strerror))
89 (inst.desc or inst.filename, inst.strerror))
90 except error.CommandError, inst:
90 except error.CommandError, inst:
91 if inst.args[0]:
91 if inst.args[0]:
92 ui.warn(_("hg %s: %s\n") % (inst.args[0], inst.args[1]))
92 ui.warn(_("hg %s: %s\n") % (inst.args[0], inst.args[1]))
93 commands.help_(ui, inst.args[0])
93 commands.help_(ui, inst.args[0])
94 else:
94 else:
95 ui.warn(_("hg: %s\n") % inst.args[1])
95 ui.warn(_("hg: %s\n") % inst.args[1])
96 commands.help_(ui, 'shortlist')
96 commands.help_(ui, 'shortlist')
97 except error.RepoError, inst:
97 except error.RepoError, inst:
98 ui.warn(_("abort: %s!\n") % inst)
98 ui.warn(_("abort: %s!\n") % inst)
99 except error.ResponseError, inst:
99 except error.ResponseError, inst:
100 ui.warn(_("abort: %s") % inst.args[0])
100 ui.warn(_("abort: %s") % inst.args[0])
101 if not isinstance(inst.args[1], basestring):
101 if not isinstance(inst.args[1], basestring):
102 ui.warn(" %r\n" % (inst.args[1],))
102 ui.warn(" %r\n" % (inst.args[1],))
103 elif not inst.args[1]:
103 elif not inst.args[1]:
104 ui.warn(_(" empty string\n"))
104 ui.warn(_(" empty string\n"))
105 else:
105 else:
106 ui.warn("\n%r\n" % util.ellipsis(inst.args[1]))
106 ui.warn("\n%r\n" % util.ellipsis(inst.args[1]))
107 except error.RevlogError, inst:
107 except error.RevlogError, inst:
108 ui.warn(_("abort: %s!\n") % inst)
108 ui.warn(_("abort: %s!\n") % inst)
109 except error.SignalInterrupt:
109 except error.SignalInterrupt:
110 ui.warn(_("killed!\n"))
110 ui.warn(_("killed!\n"))
111 except error.UnknownCommand, inst:
111 except error.UnknownCommand, inst:
112 ui.warn(_("hg: unknown command '%s'\n") % inst.args[0])
112 ui.warn(_("hg: unknown command '%s'\n") % inst.args[0])
113 try:
113 try:
114 # check if the command is in a disabled extension
114 # check if the command is in a disabled extension
115 # (but don't check for extensions themselves)
115 # (but don't check for extensions themselves)
116 commands.help_(ui, inst.args[0], unknowncmd=True)
116 commands.help_(ui, inst.args[0], unknowncmd=True)
117 except error.UnknownCommand:
117 except error.UnknownCommand:
118 commands.help_(ui, 'shortlist')
118 commands.help_(ui, 'shortlist')
119 except util.Abort, inst:
119 except util.Abort, inst:
120 ui.warn(_("abort: %s\n") % inst)
120 ui.warn(_("abort: %s\n") % inst)
121 if inst.hint:
121 if inst.hint:
122 ui.warn(_("(%s)\n") % inst.hint)
122 ui.warn(_("(%s)\n") % inst.hint)
123 except ImportError, inst:
123 except ImportError, inst:
124 ui.warn(_("abort: %s!\n") % inst)
124 ui.warn(_("abort: %s!\n") % inst)
125 m = str(inst).split()[-1]
125 m = str(inst).split()[-1]
126 if m in "mpatch bdiff".split():
126 if m in "mpatch bdiff".split():
127 ui.warn(_("(did you forget to compile extensions?)\n"))
127 ui.warn(_("(did you forget to compile extensions?)\n"))
128 elif m in "zlib".split():
128 elif m in "zlib".split():
129 ui.warn(_("(is your Python install correct?)\n"))
129 ui.warn(_("(is your Python install correct?)\n"))
130 except IOError, inst:
130 except IOError, inst:
131 if hasattr(inst, "code"):
131 if hasattr(inst, "code"):
132 ui.warn(_("abort: %s\n") % inst)
132 ui.warn(_("abort: %s\n") % inst)
133 elif hasattr(inst, "reason"):
133 elif hasattr(inst, "reason"):
134 try: # usually it is in the form (errno, strerror)
134 try: # usually it is in the form (errno, strerror)
135 reason = inst.reason.args[1]
135 reason = inst.reason.args[1]
136 except: # it might be anything, for example a string
136 except: # it might be anything, for example a string
137 reason = inst.reason
137 reason = inst.reason
138 ui.warn(_("abort: error: %s\n") % reason)
138 ui.warn(_("abort: error: %s\n") % reason)
139 elif hasattr(inst, "args") and inst.args[0] == errno.EPIPE:
139 elif hasattr(inst, "args") and inst.args[0] == errno.EPIPE:
140 if ui.debugflag:
140 if ui.debugflag:
141 ui.warn(_("broken pipe\n"))
141 ui.warn(_("broken pipe\n"))
142 elif getattr(inst, "strerror", None):
142 elif getattr(inst, "strerror", None):
143 if getattr(inst, "filename", None):
143 if getattr(inst, "filename", None):
144 ui.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename))
144 ui.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename))
145 else:
145 else:
146 ui.warn(_("abort: %s\n") % inst.strerror)
146 ui.warn(_("abort: %s\n") % inst.strerror)
147 else:
147 else:
148 raise
148 raise
149 except OSError, inst:
149 except OSError, inst:
150 if getattr(inst, "filename", None):
150 if getattr(inst, "filename", None):
151 ui.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename))
151 ui.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename))
152 else:
152 else:
153 ui.warn(_("abort: %s\n") % inst.strerror)
153 ui.warn(_("abort: %s\n") % inst.strerror)
154 except KeyboardInterrupt:
154 except KeyboardInterrupt:
155 try:
155 try:
156 ui.warn(_("interrupted!\n"))
156 ui.warn(_("interrupted!\n"))
157 except IOError, inst:
157 except IOError, inst:
158 if inst.errno == errno.EPIPE:
158 if inst.errno == errno.EPIPE:
159 if ui.debugflag:
159 if ui.debugflag:
160 ui.warn(_("\nbroken pipe\n"))
160 ui.warn(_("\nbroken pipe\n"))
161 else:
161 else:
162 raise
162 raise
163 except MemoryError:
163 except MemoryError:
164 ui.warn(_("abort: out of memory\n"))
164 ui.warn(_("abort: out of memory\n"))
165 except SystemExit, inst:
165 except SystemExit, inst:
166 # Commands shouldn't sys.exit directly, but give a return code.
166 # Commands shouldn't sys.exit directly, but give a return code.
167 # Just in case catch this and and pass exit code to caller.
167 # Just in case catch this and and pass exit code to caller.
168 return inst.code
168 return inst.code
169 except socket.error, inst:
169 except socket.error, inst:
170 ui.warn(_("abort: %s\n") % inst.args[-1])
170 ui.warn(_("abort: %s\n") % inst.args[-1])
171 except:
171 except:
172 ui.warn(_("** unknown exception encountered,"
172 ui.warn(_("** unknown exception encountered,"
173 " please report by visiting\n"))
173 " please report by visiting\n"))
174 ui.warn(_("** http://mercurial.selenic.com/wiki/BugTracker\n"))
174 ui.warn(_("** http://mercurial.selenic.com/wiki/BugTracker\n"))
175 ui.warn(_("** Python %s\n") % sys.version.replace('\n', ''))
175 ui.warn(_("** Python %s\n") % sys.version.replace('\n', ''))
176 ui.warn(_("** Mercurial Distributed SCM (version %s)\n")
176 ui.warn(_("** Mercurial Distributed SCM (version %s)\n")
177 % util.version())
177 % util.version())
178 ui.warn(_("** Extensions loaded: %s\n")
178 ui.warn(_("** Extensions loaded: %s\n")
179 % ", ".join([x[0] for x in extensions.extensions()]))
179 % ", ".join([x[0] for x in extensions.extensions()]))
180 raise
180 raise
181
181
182 return -1
182 return -1
183
183
184 def aliasargs(fn):
184 def aliasargs(fn):
185 if hasattr(fn, 'args'):
185 if hasattr(fn, 'args'):
186 return fn.args
186 return fn.args
187 return []
187 return []
188
188
189 class cmdalias(object):
189 class cmdalias(object):
190 def __init__(self, name, definition, cmdtable):
190 def __init__(self, name, definition, cmdtable):
191 self.name = self.cmd = name
191 self.name = self.cmd = name
192 self.cmdname = ''
192 self.cmdname = ''
193 self.definition = definition
193 self.definition = definition
194 self.args = []
194 self.args = []
195 self.opts = []
195 self.opts = []
196 self.help = ''
196 self.help = ''
197 self.norepo = True
197 self.norepo = True
198 self.badalias = False
198 self.badalias = False
199
199
200 try:
200 try:
201 aliases, entry = cmdutil.findcmd(self.name, cmdtable)
201 aliases, entry = cmdutil.findcmd(self.name, cmdtable)
202 for alias, e in cmdtable.iteritems():
202 for alias, e in cmdtable.iteritems():
203 if e is entry:
203 if e is entry:
204 self.cmd = alias
204 self.cmd = alias
205 break
205 break
206 self.shadows = True
206 self.shadows = True
207 except error.UnknownCommand:
207 except error.UnknownCommand:
208 self.shadows = False
208 self.shadows = False
209
209
210 if not self.definition:
210 if not self.definition:
211 def fn(ui, *args):
211 def fn(ui, *args):
212 ui.warn(_("no definition for alias '%s'\n") % self.name)
212 ui.warn(_("no definition for alias '%s'\n") % self.name)
213 return 1
213 return 1
214 self.fn = fn
214 self.fn = fn
215 self.badalias = True
215 self.badalias = True
216
216
217 return
217 return
218
218
219 if self.definition.startswith('!'):
219 if self.definition.startswith('!'):
220 self.shell = True
220 self.shell = True
221 def fn(ui, *args):
221 def fn(ui, *args):
222 env = {'HG_ARGS': ' '.join((self.name,) + args)}
222 env = {'HG_ARGS': ' '.join((self.name,) + args)}
223 def _checkvar(m):
223 def _checkvar(m):
224 if m.groups()[0] == '$':
224 if m.groups()[0] == '$':
225 return m.group()
225 return m.group()
226 elif int(m.groups()[0]) <= len(args):
226 elif int(m.groups()[0]) <= len(args):
227 return m.group()
227 return m.group()
228 else:
228 else:
229 ui.debug(_("No argument found for substitution "
229 ui.debug(_("No argument found for substitution "
230 "of %i variable in alias '%s' definition.")
230 "of %i variable in alias '%s' definition.")
231 % (int(m.groups()[0]), self.name))
231 % (int(m.groups()[0]), self.name))
232 return ''
232 return ''
233 cmd = re.sub(r'\$(\d+|\$)', _checkvar, self.definition[1:])
233 cmd = re.sub(r'\$(\d+|\$)', _checkvar, self.definition[1:])
234 replace = dict((str(i + 1), arg) for i, arg in enumerate(args))
234 replace = dict((str(i + 1), arg) for i, arg in enumerate(args))
235 replace['0'] = self.name
235 replace['0'] = self.name
236 replace['@'] = ' '.join(args)
236 replace['@'] = ' '.join(args)
237 cmd = util.interpolate(r'\$', replace, cmd, escape_prefix=True)
237 cmd = util.interpolate(r'\$', replace, cmd, escape_prefix=True)
238 return util.system(cmd, environ=env)
238 return util.system(cmd, environ=env)
239 self.fn = fn
239 self.fn = fn
240 return
240 return
241
241
242 args = shlex.split(self.definition)
242 args = shlex.split(self.definition)
243 self.cmdname = cmd = args.pop(0)
243 self.cmdname = cmd = args.pop(0)
244 args = map(util.expandpath, args)
244 args = map(util.expandpath, args)
245
245
246 for invalidarg in ("--cwd", "-R", "--repository", "--repo"):
246 for invalidarg in ("--cwd", "-R", "--repository", "--repo"):
247 if _earlygetopt([invalidarg], args):
247 if _earlygetopt([invalidarg], args):
248 def fn(ui, *args):
248 def fn(ui, *args):
249 ui.warn(_("error in definition for alias '%s': %s may only "
249 ui.warn(_("error in definition for alias '%s': %s may only "
250 "be given on the command line\n")
250 "be given on the command line\n")
251 % (self.name, invalidarg))
251 % (self.name, invalidarg))
252 return 1
252 return 1
253
253
254 self.fn = fn
254 self.fn = fn
255 self.badalias = True
255 self.badalias = True
256 return
256 return
257
257
258 try:
258 try:
259 tableentry = cmdutil.findcmd(cmd, cmdtable, False)[1]
259 tableentry = cmdutil.findcmd(cmd, cmdtable, False)[1]
260 if len(tableentry) > 2:
260 if len(tableentry) > 2:
261 self.fn, self.opts, self.help = tableentry
261 self.fn, self.opts, self.help = tableentry
262 else:
262 else:
263 self.fn, self.opts = tableentry
263 self.fn, self.opts = tableentry
264
264
265 self.args = aliasargs(self.fn) + args
265 self.args = aliasargs(self.fn) + args
266 if cmd not in commands.norepo.split(' '):
266 if cmd not in commands.norepo.split(' '):
267 self.norepo = False
267 self.norepo = False
268 if self.help.startswith("hg " + cmd):
268 if self.help.startswith("hg " + cmd):
269 # drop prefix in old-style help lines so hg shows the alias
269 # drop prefix in old-style help lines so hg shows the alias
270 self.help = self.help[4 + len(cmd):]
270 self.help = self.help[4 + len(cmd):]
271 self.__doc__ = self.fn.__doc__
271 self.__doc__ = self.fn.__doc__
272
272
273 except error.UnknownCommand:
273 except error.UnknownCommand:
274 def fn(ui, *args):
274 def fn(ui, *args):
275 ui.warn(_("alias '%s' resolves to unknown command '%s'\n") \
275 ui.warn(_("alias '%s' resolves to unknown command '%s'\n") \
276 % (self.name, cmd))
276 % (self.name, cmd))
277 try:
277 try:
278 # check if the command is in a disabled extension
278 # check if the command is in a disabled extension
279 commands.help_(ui, cmd, unknowncmd=True)
279 commands.help_(ui, cmd, unknowncmd=True)
280 except error.UnknownCommand:
280 except error.UnknownCommand:
281 pass
281 pass
282 return 1
282 return 1
283 self.fn = fn
283 self.fn = fn
284 self.badalias = True
284 self.badalias = True
285 except error.AmbiguousCommand:
285 except error.AmbiguousCommand:
286 def fn(ui, *args):
286 def fn(ui, *args):
287 ui.warn(_("alias '%s' resolves to ambiguous command '%s'\n") \
287 ui.warn(_("alias '%s' resolves to ambiguous command '%s'\n") \
288 % (self.name, cmd))
288 % (self.name, cmd))
289 return 1
289 return 1
290 self.fn = fn
290 self.fn = fn
291 self.badalias = True
291 self.badalias = True
292
292
293 def __call__(self, ui, *args, **opts):
293 def __call__(self, ui, *args, **opts):
294 if self.shadows:
294 if self.shadows:
295 ui.debug("alias '%s' shadows command '%s'\n" %
295 ui.debug("alias '%s' shadows command '%s'\n" %
296 (self.name, self.cmdname))
296 (self.name, self.cmdname))
297
297
298 if hasattr(self, 'shell'):
298 if hasattr(self, 'shell'):
299 return self.fn(ui, *args, **opts)
299 return self.fn(ui, *args, **opts)
300 else:
300 else:
301 try:
301 try:
302 util.checksignature(self.fn)(ui, *args, **opts)
302 util.checksignature(self.fn)(ui, *args, **opts)
303 except error.SignatureError:
303 except error.SignatureError:
304 args = ' '.join([self.cmdname] + self.args)
304 args = ' '.join([self.cmdname] + self.args)
305 ui.debug("alias '%s' expands to '%s'\n" % (self.name, args))
305 ui.debug("alias '%s' expands to '%s'\n" % (self.name, args))
306 raise
306 raise
307
307
308 def addaliases(ui, cmdtable):
308 def addaliases(ui, cmdtable):
309 # aliases are processed after extensions have been loaded, so they
309 # aliases are processed after extensions have been loaded, so they
310 # may use extension commands. Aliases can also use other alias definitions,
310 # may use extension commands. Aliases can also use other alias definitions,
311 # but only if they have been defined prior to the current definition.
311 # but only if they have been defined prior to the current definition.
312 for alias, definition in ui.configitems('alias'):
312 for alias, definition in ui.configitems('alias'):
313 aliasdef = cmdalias(alias, definition, cmdtable)
313 aliasdef = cmdalias(alias, definition, cmdtable)
314 cmdtable[aliasdef.cmd] = (aliasdef, aliasdef.opts, aliasdef.help)
314 cmdtable[aliasdef.cmd] = (aliasdef, aliasdef.opts, aliasdef.help)
315 if aliasdef.norepo:
315 if aliasdef.norepo:
316 commands.norepo += ' %s' % alias
316 commands.norepo += ' %s' % alias
317
317
318 def _parse(ui, args):
318 def _parse(ui, args):
319 options = {}
319 options = {}
320 cmdoptions = {}
320 cmdoptions = {}
321
321
322 try:
322 try:
323 args = fancyopts.fancyopts(args, commands.globalopts, options)
323 args = fancyopts.fancyopts(args, commands.globalopts, options)
324 except fancyopts.getopt.GetoptError, inst:
324 except fancyopts.getopt.GetoptError, inst:
325 raise error.CommandError(None, inst)
325 raise error.CommandError(None, inst)
326
326
327 if args:
327 if args:
328 cmd, args = args[0], args[1:]
328 cmd, args = args[0], args[1:]
329 aliases, entry = cmdutil.findcmd(cmd, commands.table,
329 aliases, entry = cmdutil.findcmd(cmd, commands.table,
330 ui.config("ui", "strict"))
330 ui.config("ui", "strict"))
331 cmd = aliases[0]
331 cmd = aliases[0]
332 args = aliasargs(entry[0]) + args
332 args = aliasargs(entry[0]) + args
333 defaults = ui.config("defaults", cmd)
333 defaults = ui.config("defaults", cmd)
334 if defaults:
334 if defaults:
335 args = map(util.expandpath, shlex.split(defaults)) + args
335 args = map(util.expandpath, shlex.split(defaults)) + args
336 c = list(entry[1])
336 c = list(entry[1])
337 else:
337 else:
338 cmd = None
338 cmd = None
339 c = []
339 c = []
340
340
341 # combine global options into local
341 # combine global options into local
342 for o in commands.globalopts:
342 for o in commands.globalopts:
343 c.append((o[0], o[1], options[o[1]], o[3]))
343 c.append((o[0], o[1], options[o[1]], o[3]))
344
344
345 try:
345 try:
346 args = fancyopts.fancyopts(args, c, cmdoptions, True)
346 args = fancyopts.fancyopts(args, c, cmdoptions, True)
347 except fancyopts.getopt.GetoptError, inst:
347 except fancyopts.getopt.GetoptError, inst:
348 raise error.CommandError(cmd, inst)
348 raise error.CommandError(cmd, inst)
349
349
350 # separate global options back out
350 # separate global options back out
351 for o in commands.globalopts:
351 for o in commands.globalopts:
352 n = o[1]
352 n = o[1]
353 options[n] = cmdoptions[n]
353 options[n] = cmdoptions[n]
354 del cmdoptions[n]
354 del cmdoptions[n]
355
355
356 return (cmd, cmd and entry[0] or None, args, options, cmdoptions)
356 return (cmd, cmd and entry[0] or None, args, options, cmdoptions)
357
357
358 def _parseconfig(ui, config):
358 def _parseconfig(ui, config):
359 """parse the --config options from the command line"""
359 """parse the --config options from the command line"""
360 for cfg in config:
360 for cfg in config:
361 try:
361 try:
362 name, value = cfg.split('=', 1)
362 name, value = cfg.split('=', 1)
363 section, name = name.split('.', 1)
363 section, name = name.split('.', 1)
364 if not section or not name:
364 if not section or not name:
365 raise IndexError
365 raise IndexError
366 ui.setconfig(section, name, value)
366 ui.setconfig(section, name, value)
367 except (IndexError, ValueError):
367 except (IndexError, ValueError):
368 raise util.Abort(_('malformed --config option: %r '
368 raise util.Abort(_('malformed --config option: %r '
369 '(use --config section.name=value)') % cfg)
369 '(use --config section.name=value)') % cfg)
370
370
371 def _earlygetopt(aliases, args):
371 def _earlygetopt(aliases, args):
372 """Return list of values for an option (or aliases).
372 """Return list of values for an option (or aliases).
373
373
374 The values are listed in the order they appear in args.
374 The values are listed in the order they appear in args.
375 The options and values are removed from args.
375 The options and values are removed from args.
376 """
376 """
377 try:
377 try:
378 argcount = args.index("--")
378 argcount = args.index("--")
379 except ValueError:
379 except ValueError:
380 argcount = len(args)
380 argcount = len(args)
381 shortopts = [opt for opt in aliases if len(opt) == 2]
381 shortopts = [opt for opt in aliases if len(opt) == 2]
382 values = []
382 values = []
383 pos = 0
383 pos = 0
384 while pos < argcount:
384 while pos < argcount:
385 if args[pos] in aliases:
385 if args[pos] in aliases:
386 if pos + 1 >= argcount:
386 if pos + 1 >= argcount:
387 # ignore and let getopt report an error if there is no value
387 # ignore and let getopt report an error if there is no value
388 break
388 break
389 del args[pos]
389 del args[pos]
390 values.append(args.pop(pos))
390 values.append(args.pop(pos))
391 argcount -= 2
391 argcount -= 2
392 elif args[pos][:2] in shortopts:
392 elif args[pos][:2] in shortopts:
393 # short option can have no following space, e.g. hg log -Rfoo
393 # short option can have no following space, e.g. hg log -Rfoo
394 values.append(args.pop(pos)[2:])
394 values.append(args.pop(pos)[2:])
395 argcount -= 1
395 argcount -= 1
396 else:
396 else:
397 pos += 1
397 pos += 1
398 return values
398 return values
399
399
400 def runcommand(lui, repo, cmd, fullargs, ui, options, d, cmdpats, cmdoptions):
400 def runcommand(lui, repo, cmd, fullargs, ui, options, d, cmdpats, cmdoptions):
401 # run pre-hook, and abort if it fails
401 # run pre-hook, and abort if it fails
402 ret = hook.hook(lui, repo, "pre-%s" % cmd, False, args=" ".join(fullargs),
402 ret = hook.hook(lui, repo, "pre-%s" % cmd, False, args=" ".join(fullargs),
403 pats=cmdpats, opts=cmdoptions)
403 pats=cmdpats, opts=cmdoptions)
404 if ret:
404 if ret:
405 return ret
405 return ret
406 ret = _runcommand(ui, options, cmd, d)
406 ret = _runcommand(ui, options, cmd, d)
407 # run post-hook, passing command result
407 # run post-hook, passing command result
408 hook.hook(lui, repo, "post-%s" % cmd, False, args=" ".join(fullargs),
408 hook.hook(lui, repo, "post-%s" % cmd, False, args=" ".join(fullargs),
409 result=ret, pats=cmdpats, opts=cmdoptions)
409 result=ret, pats=cmdpats, opts=cmdoptions)
410 return ret
410 return ret
411
411
412 def _getlocal(ui, rpath):
412 def _getlocal(ui, rpath):
413 """Return (path, local ui object) for the given target path.
413 """Return (path, local ui object) for the given target path.
414
414
415 Takes paths in [cwd]/.hg/hgrc into account."
415 Takes paths in [cwd]/.hg/hgrc into account."
416 """
416 """
417 try:
417 try:
418 wd = os.getcwd()
418 wd = os.getcwd()
419 except OSError, e:
419 except OSError, e:
420 raise util.Abort(_("error getting current working directory: %s") %
420 raise util.Abort(_("error getting current working directory: %s") %
421 e.strerror)
421 e.strerror)
422 path = cmdutil.findrepo(wd) or ""
422 path = cmdutil.findrepo(wd) or ""
423 if not path:
423 if not path:
424 lui = ui
424 lui = ui
425 else:
425 else:
426 lui = ui.copy()
426 lui = ui.copy()
427 lui.readconfig(os.path.join(path, ".hg", "hgrc"), path)
427 lui.readconfig(os.path.join(path, ".hg", "hgrc"), path)
428
428
429 if rpath:
429 if rpath:
430 path = lui.expandpath(rpath[-1])
430 path = lui.expandpath(rpath[-1])
431 lui = ui.copy()
431 lui = ui.copy()
432 lui.readconfig(os.path.join(path, ".hg", "hgrc"), path)
432 lui.readconfig(os.path.join(path, ".hg", "hgrc"), path)
433
433
434 return path, lui
434 return path, lui
435
435
436 def _checkshellalias(ui, args):
436 def _checkshellalias(ui, args):
437 cwd = os.getcwd()
437 cwd = os.getcwd()
438 norepo = commands.norepo
438 norepo = commands.norepo
439 options = {}
439 options = {}
440
440
441 try:
441 try:
442 args = fancyopts.fancyopts(args, commands.globalopts, options)
442 args = fancyopts.fancyopts(args, commands.globalopts, options)
443 except fancyopts.getopt.GetoptError:
443 except fancyopts.getopt.GetoptError:
444 return
444 return
445
445
446 if not args:
446 if not args:
447 return
447 return
448
448
449 _parseconfig(ui, options['config'])
449 _parseconfig(ui, options['config'])
450 if options['cwd']:
450 if options['cwd']:
451 os.chdir(options['cwd'])
451 os.chdir(options['cwd'])
452
452
453 path, lui = _getlocal(ui, [options['repository']])
453 path, lui = _getlocal(ui, [options['repository']])
454
454
455 cmdtable = commands.table.copy()
455 cmdtable = commands.table.copy()
456 addaliases(lui, cmdtable)
456 addaliases(lui, cmdtable)
457
457
458 cmd = args[0]
458 cmd = args[0]
459 try:
459 try:
460 aliases, entry = cmdutil.findcmd(cmd, cmdtable, lui.config("ui", "strict"))
460 aliases, entry = cmdutil.findcmd(cmd, cmdtable, lui.config("ui", "strict"))
461 except (error.AmbiguousCommand, error.UnknownCommand):
461 except (error.AmbiguousCommand, error.UnknownCommand):
462 commands.norepo = norepo
462 commands.norepo = norepo
463 os.chdir(cwd)
463 os.chdir(cwd)
464 return
464 return
465
465
466 cmd = aliases[0]
466 cmd = aliases[0]
467 fn = entry[0]
467 fn = entry[0]
468
468
469 if cmd and hasattr(fn, 'shell'):
469 if cmd and hasattr(fn, 'shell'):
470 d = lambda: fn(ui, *args[1:])
470 d = lambda: fn(ui, *args[1:])
471 return lambda: runcommand(lui, None, cmd, args[:1], ui, options, d, [], {})
471 return lambda: runcommand(lui, None, cmd, args[:1], ui, options, d, [], {})
472
472
473 commands.norepo = norepo
473 commands.norepo = norepo
474 os.chdir(cwd)
474 os.chdir(cwd)
475
475
476 _loaded = set()
476 _loaded = set()
477 def _dispatch(ui, args):
477 def _dispatch(ui, args):
478 shellaliasfn = _checkshellalias(ui, args)
478 shellaliasfn = _checkshellalias(ui, args)
479 if shellaliasfn:
479 if shellaliasfn:
480 return shellaliasfn()
480 return shellaliasfn()
481
481
482 # read --config before doing anything else
482 # read --config before doing anything else
483 # (e.g. to change trust settings for reading .hg/hgrc)
483 # (e.g. to change trust settings for reading .hg/hgrc)
484 _parseconfig(ui, _earlygetopt(['--config'], args))
484 _parseconfig(ui, _earlygetopt(['--config'], args))
485
485
486 # check for cwd
486 # check for cwd
487 cwd = _earlygetopt(['--cwd'], args)
487 cwd = _earlygetopt(['--cwd'], args)
488 if cwd:
488 if cwd:
489 os.chdir(cwd[-1])
489 os.chdir(cwd[-1])
490
490
491 rpath = _earlygetopt(["-R", "--repository", "--repo"], args)
491 rpath = _earlygetopt(["-R", "--repository", "--repo"], args)
492 path, lui = _getlocal(ui, rpath)
492 path, lui = _getlocal(ui, rpath)
493
493
494 # Configure extensions in phases: uisetup, extsetup, cmdtable, and
494 # Configure extensions in phases: uisetup, extsetup, cmdtable, and
495 # reposetup. Programs like TortoiseHg will call _dispatch several
495 # reposetup. Programs like TortoiseHg will call _dispatch several
496 # times so we keep track of configured extensions in _loaded.
496 # times so we keep track of configured extensions in _loaded.
497 extensions.loadall(lui)
497 extensions.loadall(lui)
498 exts = [ext for ext in extensions.extensions() if ext[0] not in _loaded]
498 exts = [ext for ext in extensions.extensions() if ext[0] not in _loaded]
499 # Propagate any changes to lui.__class__ by extensions
499 # Propagate any changes to lui.__class__ by extensions
500 ui.__class__ = lui.__class__
500 ui.__class__ = lui.__class__
501
501
502 # (uisetup and extsetup are handled in extensions.loadall)
502 # (uisetup and extsetup are handled in extensions.loadall)
503
503
504 for name, module in exts:
504 for name, module in exts:
505 cmdtable = getattr(module, 'cmdtable', {})
505 cmdtable = getattr(module, 'cmdtable', {})
506 overrides = [cmd for cmd in cmdtable if cmd in commands.table]
506 overrides = [cmd for cmd in cmdtable if cmd in commands.table]
507 if overrides:
507 if overrides:
508 ui.warn(_("extension '%s' overrides commands: %s\n")
508 ui.warn(_("extension '%s' overrides commands: %s\n")
509 % (name, " ".join(overrides)))
509 % (name, " ".join(overrides)))
510 commands.table.update(cmdtable)
510 commands.table.update(cmdtable)
511 _loaded.add(name)
511 _loaded.add(name)
512
512
513 # (reposetup is handled in hg.repository)
513 # (reposetup is handled in hg.repository)
514
514
515 addaliases(lui, commands.table)
515 addaliases(lui, commands.table)
516
516
517 # check for fallback encoding
517 # check for fallback encoding
518 fallback = lui.config('ui', 'fallbackencoding')
518 fallback = lui.config('ui', 'fallbackencoding')
519 if fallback:
519 if fallback:
520 encoding.fallbackencoding = fallback
520 encoding.fallbackencoding = fallback
521
521
522 fullargs = args
522 fullargs = args
523 cmd, func, args, options, cmdoptions = _parse(lui, args)
523 cmd, func, args, options, cmdoptions = _parse(lui, args)
524
524
525 if options["config"]:
525 if options["config"]:
526 raise util.Abort(_("option --config may not be abbreviated!"))
526 raise util.Abort(_("option --config may not be abbreviated!"))
527 if options["cwd"]:
527 if options["cwd"]:
528 raise util.Abort(_("option --cwd may not be abbreviated!"))
528 raise util.Abort(_("option --cwd may not be abbreviated!"))
529 if options["repository"]:
529 if options["repository"]:
530 raise util.Abort(_(
530 raise util.Abort(_(
531 "Option -R has to be separated from other options (e.g. not -qR) "
531 "Option -R has to be separated from other options (e.g. not -qR) "
532 "and --repository may only be abbreviated as --repo!"))
532 "and --repository may only be abbreviated as --repo!"))
533
533
534 if options["encoding"]:
534 if options["encoding"]:
535 encoding.encoding = options["encoding"]
535 encoding.encoding = options["encoding"]
536 if options["encodingmode"]:
536 if options["encodingmode"]:
537 encoding.encodingmode = options["encodingmode"]
537 encoding.encodingmode = options["encodingmode"]
538 if options["time"]:
538 if options["time"]:
539 def get_times():
539 def get_times():
540 t = os.times()
540 t = os.times()
541 if t[4] == 0.0: # Windows leaves this as zero, so use time.clock()
541 if t[4] == 0.0: # Windows leaves this as zero, so use time.clock()
542 t = (t[0], t[1], t[2], t[3], time.clock())
542 t = (t[0], t[1], t[2], t[3], time.clock())
543 return t
543 return t
544 s = get_times()
544 s = get_times()
545 def print_time():
545 def print_time():
546 t = get_times()
546 t = get_times()
547 ui.warn(_("Time: real %.3f secs (user %.3f+%.3f sys %.3f+%.3f)\n") %
547 ui.warn(_("Time: real %.3f secs (user %.3f+%.3f sys %.3f+%.3f)\n") %
548 (t[4]-s[4], t[0]-s[0], t[2]-s[2], t[1]-s[1], t[3]-s[3]))
548 (t[4]-s[4], t[0]-s[0], t[2]-s[2], t[1]-s[1], t[3]-s[3]))
549 atexit.register(print_time)
549 atexit.register(print_time)
550
550
551 if options['verbose'] or options['debug'] or options['quiet']:
551 if options['verbose'] or options['debug'] or options['quiet']:
552 ui.setconfig('ui', 'verbose', str(bool(options['verbose'])))
552 ui.setconfig('ui', 'verbose', str(bool(options['verbose'])))
553 ui.setconfig('ui', 'debug', str(bool(options['debug'])))
553 ui.setconfig('ui', 'debug', str(bool(options['debug'])))
554 ui.setconfig('ui', 'quiet', str(bool(options['quiet'])))
554 ui.setconfig('ui', 'quiet', str(bool(options['quiet'])))
555 if options['traceback']:
555 if options['traceback']:
556 ui.setconfig('ui', 'traceback', 'on')
556 ui.setconfig('ui', 'traceback', 'on')
557 if options['noninteractive']:
557 if options['noninteractive']:
558 ui.setconfig('ui', 'interactive', 'off')
558 ui.setconfig('ui', 'interactive', 'off')
559
559
560 if cmdoptions.get('insecure', False):
560 if cmdoptions.get('insecure', False):
561 ui.setconfig('web', 'cacerts', '')
561 ui.setconfig('web', 'cacerts', '')
562
562
563 if options['help']:
563 if options['help']:
564 return commands.help_(ui, cmd, options['version'])
564 return commands.help_(ui, cmd, options['version'])
565 elif options['version']:
565 elif options['version']:
566 return commands.version_(ui)
566 return commands.version_(ui)
567 elif not cmd:
567 elif not cmd:
568 return commands.help_(ui, 'shortlist')
568 return commands.help_(ui, 'shortlist')
569
569
570 repo = None
570 repo = None
571 cmdpats = args[:]
571 cmdpats = args[:]
572 if cmd not in commands.norepo.split():
572 if cmd not in commands.norepo.split():
573 try:
573 try:
574 repo = hg.repository(ui, path=path)
574 repo = hg.repository(ui, path=path)
575 ui = repo.ui
575 ui = repo.ui
576 if not repo.local():
576 if not repo.local():
577 raise util.Abort(_("repository '%s' is not local") % path)
577 raise util.Abort(_("repository '%s' is not local") % path)
578 ui.setconfig("bundle", "mainreporoot", repo.root)
578 ui.setconfig("bundle", "mainreporoot", repo.root)
579 except error.RequirementError:
580 raise
579 except error.RepoError:
581 except error.RepoError:
580 if cmd not in commands.optionalrepo.split():
582 if cmd not in commands.optionalrepo.split():
581 if args and not path: # try to infer -R from command args
583 if args and not path: # try to infer -R from command args
582 repos = map(cmdutil.findrepo, args)
584 repos = map(cmdutil.findrepo, args)
583 guess = repos[0]
585 guess = repos[0]
584 if guess and repos.count(guess) == len(repos):
586 if guess and repos.count(guess) == len(repos):
585 return _dispatch(ui, ['--repository', guess] + fullargs)
587 return _dispatch(ui, ['--repository', guess] + fullargs)
586 if not path:
588 if not path:
587 raise error.RepoError(_("There is no Mercurial repository"
589 raise error.RepoError(_("There is no Mercurial repository"
588 " here (.hg not found)"))
590 " here (.hg not found)"))
589 raise
591 raise
590 args.insert(0, repo)
592 args.insert(0, repo)
591 elif rpath:
593 elif rpath:
592 ui.warn(_("warning: --repository ignored\n"))
594 ui.warn(_("warning: --repository ignored\n"))
593
595
594 msg = ' '.join(' ' in a and repr(a) or a for a in fullargs)
596 msg = ' '.join(' ' in a and repr(a) or a for a in fullargs)
595 ui.log("command", msg + "\n")
597 ui.log("command", msg + "\n")
596 d = lambda: util.checksignature(func)(ui, *args, **cmdoptions)
598 d = lambda: util.checksignature(func)(ui, *args, **cmdoptions)
597 try:
599 try:
598 return runcommand(lui, repo, cmd, fullargs, ui, options, d,
600 return runcommand(lui, repo, cmd, fullargs, ui, options, d,
599 cmdpats, cmdoptions)
601 cmdpats, cmdoptions)
600 finally:
602 finally:
601 if repo:
603 if repo:
602 repo.close()
604 repo.close()
603
605
604 def _runcommand(ui, options, cmd, cmdfunc):
606 def _runcommand(ui, options, cmd, cmdfunc):
605 def checkargs():
607 def checkargs():
606 try:
608 try:
607 return cmdfunc()
609 return cmdfunc()
608 except error.SignatureError:
610 except error.SignatureError:
609 raise error.CommandError(cmd, _("invalid arguments"))
611 raise error.CommandError(cmd, _("invalid arguments"))
610
612
611 if options['profile']:
613 if options['profile']:
612 format = ui.config('profiling', 'format', default='text')
614 format = ui.config('profiling', 'format', default='text')
613
615
614 if not format in ['text', 'kcachegrind']:
616 if not format in ['text', 'kcachegrind']:
615 ui.warn(_("unrecognized profiling format '%s'"
617 ui.warn(_("unrecognized profiling format '%s'"
616 " - Ignored\n") % format)
618 " - Ignored\n") % format)
617 format = 'text'
619 format = 'text'
618
620
619 output = ui.config('profiling', 'output')
621 output = ui.config('profiling', 'output')
620
622
621 if output:
623 if output:
622 path = ui.expandpath(output)
624 path = ui.expandpath(output)
623 ostream = open(path, 'wb')
625 ostream = open(path, 'wb')
624 else:
626 else:
625 ostream = sys.stderr
627 ostream = sys.stderr
626
628
627 try:
629 try:
628 from mercurial import lsprof
630 from mercurial import lsprof
629 except ImportError:
631 except ImportError:
630 raise util.Abort(_(
632 raise util.Abort(_(
631 'lsprof not available - install from '
633 'lsprof not available - install from '
632 'http://codespeak.net/svn/user/arigo/hack/misc/lsprof/'))
634 'http://codespeak.net/svn/user/arigo/hack/misc/lsprof/'))
633 p = lsprof.Profiler()
635 p = lsprof.Profiler()
634 p.enable(subcalls=True)
636 p.enable(subcalls=True)
635 try:
637 try:
636 return checkargs()
638 return checkargs()
637 finally:
639 finally:
638 p.disable()
640 p.disable()
639
641
640 if format == 'kcachegrind':
642 if format == 'kcachegrind':
641 import lsprofcalltree
643 import lsprofcalltree
642 calltree = lsprofcalltree.KCacheGrind(p)
644 calltree = lsprofcalltree.KCacheGrind(p)
643 calltree.output(ostream)
645 calltree.output(ostream)
644 else:
646 else:
645 # format == 'text'
647 # format == 'text'
646 stats = lsprof.Stats(p.getstats())
648 stats = lsprof.Stats(p.getstats())
647 stats.sort()
649 stats.sort()
648 stats.pprint(top=10, file=ostream, climit=5)
650 stats.pprint(top=10, file=ostream, climit=5)
649
651
650 if output:
652 if output:
651 ostream.close()
653 ostream.close()
652 else:
654 else:
653 return checkargs()
655 return checkargs()
@@ -1,81 +1,85 b''
1 # error.py - Mercurial exceptions
1 # error.py - Mercurial exceptions
2 #
2 #
3 # Copyright 2005-2008 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2008 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Mercurial exceptions.
8 """Mercurial exceptions.
9
9
10 This allows us to catch exceptions at higher levels without forcing
10 This allows us to catch exceptions at higher levels without forcing
11 imports.
11 imports.
12 """
12 """
13
13
14 # Do not import anything here, please
14 # Do not import anything here, please
15
15
16 class RevlogError(Exception):
16 class RevlogError(Exception):
17 pass
17 pass
18
18
19 class LookupError(RevlogError, KeyError):
19 class LookupError(RevlogError, KeyError):
20 def __init__(self, name, index, message):
20 def __init__(self, name, index, message):
21 self.name = name
21 self.name = name
22 if isinstance(name, str) and len(name) == 20:
22 if isinstance(name, str) and len(name) == 20:
23 from node import short
23 from node import short
24 name = short(name)
24 name = short(name)
25 RevlogError.__init__(self, '%s@%s: %s' % (index, name, message))
25 RevlogError.__init__(self, '%s@%s: %s' % (index, name, message))
26
26
27 def __str__(self):
27 def __str__(self):
28 return RevlogError.__str__(self)
28 return RevlogError.__str__(self)
29
29
30 class CommandError(Exception):
30 class CommandError(Exception):
31 """Exception raised on errors in parsing the command line."""
31 """Exception raised on errors in parsing the command line."""
32
32
33 class Abort(Exception):
33 class Abort(Exception):
34 """Raised if a command needs to print an error and exit."""
34 """Raised if a command needs to print an error and exit."""
35 def __init__(self, *args, **kw):
35 def __init__(self, *args, **kw):
36 Exception.__init__(self, *args)
36 Exception.__init__(self, *args)
37 self.hint = kw.get('hint')
37 self.hint = kw.get('hint')
38
38
39 class ConfigError(Abort):
39 class ConfigError(Abort):
40 'Exception raised when parsing config files'
40 'Exception raised when parsing config files'
41
41
42 class ParseError(Exception):
42 class ParseError(Exception):
43 'Exception raised when parsing config files (msg[, pos])'
43 'Exception raised when parsing config files (msg[, pos])'
44
44
45 class RepoError(Exception):
45 class RepoError(Exception):
46 pass
46 pass
47
47
48 class RepoLookupError(RepoError):
48 class RepoLookupError(RepoError):
49 pass
49 pass
50
50
51 class CapabilityError(RepoError):
51 class CapabilityError(RepoError):
52 pass
52 pass
53
53
54 class RequirementError(RepoError):
55 """Exception raised if .hg/requires has an unknown entry."""
56 pass
57
54 class LockError(IOError):
58 class LockError(IOError):
55 def __init__(self, errno, strerror, filename, desc):
59 def __init__(self, errno, strerror, filename, desc):
56 IOError.__init__(self, errno, strerror, filename)
60 IOError.__init__(self, errno, strerror, filename)
57 self.desc = desc
61 self.desc = desc
58
62
59 class LockHeld(LockError):
63 class LockHeld(LockError):
60 def __init__(self, errno, filename, desc, locker):
64 def __init__(self, errno, filename, desc, locker):
61 LockError.__init__(self, errno, 'Lock held', filename, desc)
65 LockError.__init__(self, errno, 'Lock held', filename, desc)
62 self.locker = locker
66 self.locker = locker
63
67
64 class LockUnavailable(LockError):
68 class LockUnavailable(LockError):
65 pass
69 pass
66
70
67 class ResponseError(Exception):
71 class ResponseError(Exception):
68 """Raised to print an error with part of output and exit."""
72 """Raised to print an error with part of output and exit."""
69
73
70 class UnknownCommand(Exception):
74 class UnknownCommand(Exception):
71 """Exception raised if command is not in the command table."""
75 """Exception raised if command is not in the command table."""
72
76
73 class AmbiguousCommand(Exception):
77 class AmbiguousCommand(Exception):
74 """Exception raised if command shortcut matches more than one command."""
78 """Exception raised if command shortcut matches more than one command."""
75
79
76 # derived from KeyboardInterrupt to simplify some breakout code
80 # derived from KeyboardInterrupt to simplify some breakout code
77 class SignalInterrupt(KeyboardInterrupt):
81 class SignalInterrupt(KeyboardInterrupt):
78 """Exception raised on SIGTERM and SIGHUP."""
82 """Exception raised on SIGTERM and SIGHUP."""
79
83
80 class SignatureError(Exception):
84 class SignatureError(Exception):
81 pass
85 pass
@@ -1,2029 +1,2030 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks
11 import changelog, dirstate, filelog, manifest, context, bookmarks
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 import url as urlmod
17 import url as urlmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 propertycache = util.propertycache
20 propertycache = util.propertycache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
24 supportedformats = set(('revlogv1', 'parentdelta'))
24 supportedformats = set(('revlogv1', 'parentdelta'))
25 supported = supportedformats | set(('store', 'fncache', 'shared',
25 supported = supportedformats | set(('store', 'fncache', 'shared',
26 'dotencode'))
26 'dotencode'))
27
27
28 def __init__(self, baseui, path=None, create=0):
28 def __init__(self, baseui, path=None, create=0):
29 repo.repository.__init__(self)
29 repo.repository.__init__(self)
30 self.root = os.path.realpath(util.expandpath(path))
30 self.root = os.path.realpath(util.expandpath(path))
31 self.path = os.path.join(self.root, ".hg")
31 self.path = os.path.join(self.root, ".hg")
32 self.origroot = path
32 self.origroot = path
33 self.auditor = util.path_auditor(self.root, self._checknested)
33 self.auditor = util.path_auditor(self.root, self._checknested)
34 self.opener = util.opener(self.path)
34 self.opener = util.opener(self.path)
35 self.wopener = util.opener(self.root)
35 self.wopener = util.opener(self.root)
36 self.baseui = baseui
36 self.baseui = baseui
37 self.ui = baseui.copy()
37 self.ui = baseui.copy()
38
38
39 try:
39 try:
40 self.ui.readconfig(self.join("hgrc"), self.root)
40 self.ui.readconfig(self.join("hgrc"), self.root)
41 extensions.loadall(self.ui)
41 extensions.loadall(self.ui)
42 except IOError:
42 except IOError:
43 pass
43 pass
44
44
45 if not os.path.isdir(self.path):
45 if not os.path.isdir(self.path):
46 if create:
46 if create:
47 if not os.path.exists(path):
47 if not os.path.exists(path):
48 util.makedirs(path)
48 util.makedirs(path)
49 os.mkdir(self.path)
49 os.mkdir(self.path)
50 requirements = ["revlogv1"]
50 requirements = ["revlogv1"]
51 if self.ui.configbool('format', 'usestore', True):
51 if self.ui.configbool('format', 'usestore', True):
52 os.mkdir(os.path.join(self.path, "store"))
52 os.mkdir(os.path.join(self.path, "store"))
53 requirements.append("store")
53 requirements.append("store")
54 if self.ui.configbool('format', 'usefncache', True):
54 if self.ui.configbool('format', 'usefncache', True):
55 requirements.append("fncache")
55 requirements.append("fncache")
56 if self.ui.configbool('format', 'dotencode', True):
56 if self.ui.configbool('format', 'dotencode', True):
57 requirements.append('dotencode')
57 requirements.append('dotencode')
58 # create an invalid changelog
58 # create an invalid changelog
59 self.opener("00changelog.i", "a").write(
59 self.opener("00changelog.i", "a").write(
60 '\0\0\0\2' # represents revlogv2
60 '\0\0\0\2' # represents revlogv2
61 ' dummy changelog to prevent using the old repo layout'
61 ' dummy changelog to prevent using the old repo layout'
62 )
62 )
63 if self.ui.configbool('format', 'parentdelta', False):
63 if self.ui.configbool('format', 'parentdelta', False):
64 requirements.append("parentdelta")
64 requirements.append("parentdelta")
65 else:
65 else:
66 raise error.RepoError(_("repository %s not found") % path)
66 raise error.RepoError(_("repository %s not found") % path)
67 elif create:
67 elif create:
68 raise error.RepoError(_("repository %s already exists") % path)
68 raise error.RepoError(_("repository %s already exists") % path)
69 else:
69 else:
70 # find requirements
70 # find requirements
71 requirements = set()
71 requirements = set()
72 try:
72 try:
73 requirements = set(self.opener("requires").read().splitlines())
73 requirements = set(self.opener("requires").read().splitlines())
74 except IOError, inst:
74 except IOError, inst:
75 if inst.errno != errno.ENOENT:
75 if inst.errno != errno.ENOENT:
76 raise
76 raise
77 for r in requirements - self.supported:
77 for r in requirements - self.supported:
78 raise error.RepoError(_("requirement '%s' not supported") % r)
78 raise error.RequirementError(
79 _("requirement '%s' not supported") % r)
79
80
80 self.sharedpath = self.path
81 self.sharedpath = self.path
81 try:
82 try:
82 s = os.path.realpath(self.opener("sharedpath").read())
83 s = os.path.realpath(self.opener("sharedpath").read())
83 if not os.path.exists(s):
84 if not os.path.exists(s):
84 raise error.RepoError(
85 raise error.RepoError(
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 self.sharedpath = s
87 self.sharedpath = s
87 except IOError, inst:
88 except IOError, inst:
88 if inst.errno != errno.ENOENT:
89 if inst.errno != errno.ENOENT:
89 raise
90 raise
90
91
91 self.store = store.store(requirements, self.sharedpath, util.opener)
92 self.store = store.store(requirements, self.sharedpath, util.opener)
92 self.spath = self.store.path
93 self.spath = self.store.path
93 self.sopener = self.store.opener
94 self.sopener = self.store.opener
94 self.sjoin = self.store.join
95 self.sjoin = self.store.join
95 self.opener.createmode = self.store.createmode
96 self.opener.createmode = self.store.createmode
96 self._applyrequirements(requirements)
97 self._applyrequirements(requirements)
97 if create:
98 if create:
98 self._writerequirements()
99 self._writerequirements()
99
100
100 # These two define the set of tags for this repository. _tags
101 # These two define the set of tags for this repository. _tags
101 # maps tag name to node; _tagtypes maps tag name to 'global' or
102 # maps tag name to node; _tagtypes maps tag name to 'global' or
102 # 'local'. (Global tags are defined by .hgtags across all
103 # 'local'. (Global tags are defined by .hgtags across all
103 # heads, and local tags are defined in .hg/localtags.) They
104 # heads, and local tags are defined in .hg/localtags.) They
104 # constitute the in-memory cache of tags.
105 # constitute the in-memory cache of tags.
105 self._tags = None
106 self._tags = None
106 self._tagtypes = None
107 self._tagtypes = None
107
108
108 self._branchcache = None
109 self._branchcache = None
109 self._branchcachetip = None
110 self._branchcachetip = None
110 self.nodetagscache = None
111 self.nodetagscache = None
111 self.filterpats = {}
112 self.filterpats = {}
112 self._datafilters = {}
113 self._datafilters = {}
113 self._transref = self._lockref = self._wlockref = None
114 self._transref = self._lockref = self._wlockref = None
114
115
115 def _applyrequirements(self, requirements):
116 def _applyrequirements(self, requirements):
116 self.requirements = requirements
117 self.requirements = requirements
117 self.sopener.options = {}
118 self.sopener.options = {}
118 if 'parentdelta' in requirements:
119 if 'parentdelta' in requirements:
119 self.sopener.options['parentdelta'] = 1
120 self.sopener.options['parentdelta'] = 1
120
121
121 def _writerequirements(self):
122 def _writerequirements(self):
122 reqfile = self.opener("requires", "w")
123 reqfile = self.opener("requires", "w")
123 for r in self.requirements:
124 for r in self.requirements:
124 reqfile.write("%s\n" % r)
125 reqfile.write("%s\n" % r)
125 reqfile.close()
126 reqfile.close()
126
127
127 def _checknested(self, path):
128 def _checknested(self, path):
128 """Determine if path is a legal nested repository."""
129 """Determine if path is a legal nested repository."""
129 if not path.startswith(self.root):
130 if not path.startswith(self.root):
130 return False
131 return False
131 subpath = path[len(self.root) + 1:]
132 subpath = path[len(self.root) + 1:]
132
133
133 # XXX: Checking against the current working copy is wrong in
134 # XXX: Checking against the current working copy is wrong in
134 # the sense that it can reject things like
135 # the sense that it can reject things like
135 #
136 #
136 # $ hg cat -r 10 sub/x.txt
137 # $ hg cat -r 10 sub/x.txt
137 #
138 #
138 # if sub/ is no longer a subrepository in the working copy
139 # if sub/ is no longer a subrepository in the working copy
139 # parent revision.
140 # parent revision.
140 #
141 #
141 # However, it can of course also allow things that would have
142 # However, it can of course also allow things that would have
142 # been rejected before, such as the above cat command if sub/
143 # been rejected before, such as the above cat command if sub/
143 # is a subrepository now, but was a normal directory before.
144 # is a subrepository now, but was a normal directory before.
144 # The old path auditor would have rejected by mistake since it
145 # The old path auditor would have rejected by mistake since it
145 # panics when it sees sub/.hg/.
146 # panics when it sees sub/.hg/.
146 #
147 #
147 # All in all, checking against the working copy seems sensible
148 # All in all, checking against the working copy seems sensible
148 # since we want to prevent access to nested repositories on
149 # since we want to prevent access to nested repositories on
149 # the filesystem *now*.
150 # the filesystem *now*.
150 ctx = self[None]
151 ctx = self[None]
151 parts = util.splitpath(subpath)
152 parts = util.splitpath(subpath)
152 while parts:
153 while parts:
153 prefix = os.sep.join(parts)
154 prefix = os.sep.join(parts)
154 if prefix in ctx.substate:
155 if prefix in ctx.substate:
155 if prefix == subpath:
156 if prefix == subpath:
156 return True
157 return True
157 else:
158 else:
158 sub = ctx.sub(prefix)
159 sub = ctx.sub(prefix)
159 return sub.checknested(subpath[len(prefix) + 1:])
160 return sub.checknested(subpath[len(prefix) + 1:])
160 else:
161 else:
161 parts.pop()
162 parts.pop()
162 return False
163 return False
163
164
164 @util.propertycache
165 @util.propertycache
165 def _bookmarks(self):
166 def _bookmarks(self):
166 return bookmarks.read(self)
167 return bookmarks.read(self)
167
168
168 @util.propertycache
169 @util.propertycache
169 def _bookmarkcurrent(self):
170 def _bookmarkcurrent(self):
170 return bookmarks.readcurrent(self)
171 return bookmarks.readcurrent(self)
171
172
172 @propertycache
173 @propertycache
173 def changelog(self):
174 def changelog(self):
174 c = changelog.changelog(self.sopener)
175 c = changelog.changelog(self.sopener)
175 if 'HG_PENDING' in os.environ:
176 if 'HG_PENDING' in os.environ:
176 p = os.environ['HG_PENDING']
177 p = os.environ['HG_PENDING']
177 if p.startswith(self.root):
178 if p.startswith(self.root):
178 c.readpending('00changelog.i.a')
179 c.readpending('00changelog.i.a')
179 self.sopener.options['defversion'] = c.version
180 self.sopener.options['defversion'] = c.version
180 return c
181 return c
181
182
182 @propertycache
183 @propertycache
183 def manifest(self):
184 def manifest(self):
184 return manifest.manifest(self.sopener)
185 return manifest.manifest(self.sopener)
185
186
186 @propertycache
187 @propertycache
187 def dirstate(self):
188 def dirstate(self):
188 warned = [0]
189 warned = [0]
189 def validate(node):
190 def validate(node):
190 try:
191 try:
191 r = self.changelog.rev(node)
192 r = self.changelog.rev(node)
192 return node
193 return node
193 except error.LookupError:
194 except error.LookupError:
194 if not warned[0]:
195 if not warned[0]:
195 warned[0] = True
196 warned[0] = True
196 self.ui.warn(_("warning: ignoring unknown"
197 self.ui.warn(_("warning: ignoring unknown"
197 " working parent %s!\n") % short(node))
198 " working parent %s!\n") % short(node))
198 return nullid
199 return nullid
199
200
200 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
201 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
201
202
202 def __getitem__(self, changeid):
203 def __getitem__(self, changeid):
203 if changeid is None:
204 if changeid is None:
204 return context.workingctx(self)
205 return context.workingctx(self)
205 return context.changectx(self, changeid)
206 return context.changectx(self, changeid)
206
207
207 def __contains__(self, changeid):
208 def __contains__(self, changeid):
208 try:
209 try:
209 return bool(self.lookup(changeid))
210 return bool(self.lookup(changeid))
210 except error.RepoLookupError:
211 except error.RepoLookupError:
211 return False
212 return False
212
213
213 def __nonzero__(self):
214 def __nonzero__(self):
214 return True
215 return True
215
216
216 def __len__(self):
217 def __len__(self):
217 return len(self.changelog)
218 return len(self.changelog)
218
219
219 def __iter__(self):
220 def __iter__(self):
220 for i in xrange(len(self)):
221 for i in xrange(len(self)):
221 yield i
222 yield i
222
223
223 def url(self):
224 def url(self):
224 return 'file:' + self.root
225 return 'file:' + self.root
225
226
226 def hook(self, name, throw=False, **args):
227 def hook(self, name, throw=False, **args):
227 return hook.hook(self.ui, self, name, throw, **args)
228 return hook.hook(self.ui, self, name, throw, **args)
228
229
229 tag_disallowed = ':\r\n'
230 tag_disallowed = ':\r\n'
230
231
231 def _tag(self, names, node, message, local, user, date, extra={}):
232 def _tag(self, names, node, message, local, user, date, extra={}):
232 if isinstance(names, str):
233 if isinstance(names, str):
233 allchars = names
234 allchars = names
234 names = (names,)
235 names = (names,)
235 else:
236 else:
236 allchars = ''.join(names)
237 allchars = ''.join(names)
237 for c in self.tag_disallowed:
238 for c in self.tag_disallowed:
238 if c in allchars:
239 if c in allchars:
239 raise util.Abort(_('%r cannot be used in a tag name') % c)
240 raise util.Abort(_('%r cannot be used in a tag name') % c)
240
241
241 branches = self.branchmap()
242 branches = self.branchmap()
242 for name in names:
243 for name in names:
243 self.hook('pretag', throw=True, node=hex(node), tag=name,
244 self.hook('pretag', throw=True, node=hex(node), tag=name,
244 local=local)
245 local=local)
245 if name in branches:
246 if name in branches:
246 self.ui.warn(_("warning: tag %s conflicts with existing"
247 self.ui.warn(_("warning: tag %s conflicts with existing"
247 " branch name\n") % name)
248 " branch name\n") % name)
248
249
249 def writetags(fp, names, munge, prevtags):
250 def writetags(fp, names, munge, prevtags):
250 fp.seek(0, 2)
251 fp.seek(0, 2)
251 if prevtags and prevtags[-1] != '\n':
252 if prevtags and prevtags[-1] != '\n':
252 fp.write('\n')
253 fp.write('\n')
253 for name in names:
254 for name in names:
254 m = munge and munge(name) or name
255 m = munge and munge(name) or name
255 if self._tagtypes and name in self._tagtypes:
256 if self._tagtypes and name in self._tagtypes:
256 old = self._tags.get(name, nullid)
257 old = self._tags.get(name, nullid)
257 fp.write('%s %s\n' % (hex(old), m))
258 fp.write('%s %s\n' % (hex(old), m))
258 fp.write('%s %s\n' % (hex(node), m))
259 fp.write('%s %s\n' % (hex(node), m))
259 fp.close()
260 fp.close()
260
261
261 prevtags = ''
262 prevtags = ''
262 if local:
263 if local:
263 try:
264 try:
264 fp = self.opener('localtags', 'r+')
265 fp = self.opener('localtags', 'r+')
265 except IOError:
266 except IOError:
266 fp = self.opener('localtags', 'a')
267 fp = self.opener('localtags', 'a')
267 else:
268 else:
268 prevtags = fp.read()
269 prevtags = fp.read()
269
270
270 # local tags are stored in the current charset
271 # local tags are stored in the current charset
271 writetags(fp, names, None, prevtags)
272 writetags(fp, names, None, prevtags)
272 for name in names:
273 for name in names:
273 self.hook('tag', node=hex(node), tag=name, local=local)
274 self.hook('tag', node=hex(node), tag=name, local=local)
274 return
275 return
275
276
276 try:
277 try:
277 fp = self.wfile('.hgtags', 'rb+')
278 fp = self.wfile('.hgtags', 'rb+')
278 except IOError:
279 except IOError:
279 fp = self.wfile('.hgtags', 'ab')
280 fp = self.wfile('.hgtags', 'ab')
280 else:
281 else:
281 prevtags = fp.read()
282 prevtags = fp.read()
282
283
283 # committed tags are stored in UTF-8
284 # committed tags are stored in UTF-8
284 writetags(fp, names, encoding.fromlocal, prevtags)
285 writetags(fp, names, encoding.fromlocal, prevtags)
285
286
286 fp.close()
287 fp.close()
287
288
288 if '.hgtags' not in self.dirstate:
289 if '.hgtags' not in self.dirstate:
289 self[None].add(['.hgtags'])
290 self[None].add(['.hgtags'])
290
291
291 m = matchmod.exact(self.root, '', ['.hgtags'])
292 m = matchmod.exact(self.root, '', ['.hgtags'])
292 tagnode = self.commit(message, user, date, extra=extra, match=m)
293 tagnode = self.commit(message, user, date, extra=extra, match=m)
293
294
294 for name in names:
295 for name in names:
295 self.hook('tag', node=hex(node), tag=name, local=local)
296 self.hook('tag', node=hex(node), tag=name, local=local)
296
297
297 return tagnode
298 return tagnode
298
299
299 def tag(self, names, node, message, local, user, date):
300 def tag(self, names, node, message, local, user, date):
300 '''tag a revision with one or more symbolic names.
301 '''tag a revision with one or more symbolic names.
301
302
302 names is a list of strings or, when adding a single tag, names may be a
303 names is a list of strings or, when adding a single tag, names may be a
303 string.
304 string.
304
305
305 if local is True, the tags are stored in a per-repository file.
306 if local is True, the tags are stored in a per-repository file.
306 otherwise, they are stored in the .hgtags file, and a new
307 otherwise, they are stored in the .hgtags file, and a new
307 changeset is committed with the change.
308 changeset is committed with the change.
308
309
309 keyword arguments:
310 keyword arguments:
310
311
311 local: whether to store tags in non-version-controlled file
312 local: whether to store tags in non-version-controlled file
312 (default False)
313 (default False)
313
314
314 message: commit message to use if committing
315 message: commit message to use if committing
315
316
316 user: name of user to use if committing
317 user: name of user to use if committing
317
318
318 date: date tuple to use if committing'''
319 date: date tuple to use if committing'''
319
320
320 if not local:
321 if not local:
321 for x in self.status()[:5]:
322 for x in self.status()[:5]:
322 if '.hgtags' in x:
323 if '.hgtags' in x:
323 raise util.Abort(_('working copy of .hgtags is changed '
324 raise util.Abort(_('working copy of .hgtags is changed '
324 '(please commit .hgtags manually)'))
325 '(please commit .hgtags manually)'))
325
326
326 self.tags() # instantiate the cache
327 self.tags() # instantiate the cache
327 self._tag(names, node, message, local, user, date)
328 self._tag(names, node, message, local, user, date)
328
329
329 def tags(self):
330 def tags(self):
330 '''return a mapping of tag to node'''
331 '''return a mapping of tag to node'''
331 if self._tags is None:
332 if self._tags is None:
332 (self._tags, self._tagtypes) = self._findtags()
333 (self._tags, self._tagtypes) = self._findtags()
333
334
334 return self._tags
335 return self._tags
335
336
336 def _findtags(self):
337 def _findtags(self):
337 '''Do the hard work of finding tags. Return a pair of dicts
338 '''Do the hard work of finding tags. Return a pair of dicts
338 (tags, tagtypes) where tags maps tag name to node, and tagtypes
339 (tags, tagtypes) where tags maps tag name to node, and tagtypes
339 maps tag name to a string like \'global\' or \'local\'.
340 maps tag name to a string like \'global\' or \'local\'.
340 Subclasses or extensions are free to add their own tags, but
341 Subclasses or extensions are free to add their own tags, but
341 should be aware that the returned dicts will be retained for the
342 should be aware that the returned dicts will be retained for the
342 duration of the localrepo object.'''
343 duration of the localrepo object.'''
343
344
344 # XXX what tagtype should subclasses/extensions use? Currently
345 # XXX what tagtype should subclasses/extensions use? Currently
345 # mq and bookmarks add tags, but do not set the tagtype at all.
346 # mq and bookmarks add tags, but do not set the tagtype at all.
346 # Should each extension invent its own tag type? Should there
347 # Should each extension invent its own tag type? Should there
347 # be one tagtype for all such "virtual" tags? Or is the status
348 # be one tagtype for all such "virtual" tags? Or is the status
348 # quo fine?
349 # quo fine?
349
350
350 alltags = {} # map tag name to (node, hist)
351 alltags = {} # map tag name to (node, hist)
351 tagtypes = {}
352 tagtypes = {}
352
353
353 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
354 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
354 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
355 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
355
356
356 # Build the return dicts. Have to re-encode tag names because
357 # Build the return dicts. Have to re-encode tag names because
357 # the tags module always uses UTF-8 (in order not to lose info
358 # the tags module always uses UTF-8 (in order not to lose info
358 # writing to the cache), but the rest of Mercurial wants them in
359 # writing to the cache), but the rest of Mercurial wants them in
359 # local encoding.
360 # local encoding.
360 tags = {}
361 tags = {}
361 for (name, (node, hist)) in alltags.iteritems():
362 for (name, (node, hist)) in alltags.iteritems():
362 if node != nullid:
363 if node != nullid:
363 tags[encoding.tolocal(name)] = node
364 tags[encoding.tolocal(name)] = node
364 tags['tip'] = self.changelog.tip()
365 tags['tip'] = self.changelog.tip()
365 tagtypes = dict([(encoding.tolocal(name), value)
366 tagtypes = dict([(encoding.tolocal(name), value)
366 for (name, value) in tagtypes.iteritems()])
367 for (name, value) in tagtypes.iteritems()])
367 return (tags, tagtypes)
368 return (tags, tagtypes)
368
369
369 def tagtype(self, tagname):
370 def tagtype(self, tagname):
370 '''
371 '''
371 return the type of the given tag. result can be:
372 return the type of the given tag. result can be:
372
373
373 'local' : a local tag
374 'local' : a local tag
374 'global' : a global tag
375 'global' : a global tag
375 None : tag does not exist
376 None : tag does not exist
376 '''
377 '''
377
378
378 self.tags()
379 self.tags()
379
380
380 return self._tagtypes.get(tagname)
381 return self._tagtypes.get(tagname)
381
382
382 def tagslist(self):
383 def tagslist(self):
383 '''return a list of tags ordered by revision'''
384 '''return a list of tags ordered by revision'''
384 l = []
385 l = []
385 for t, n in self.tags().iteritems():
386 for t, n in self.tags().iteritems():
386 try:
387 try:
387 r = self.changelog.rev(n)
388 r = self.changelog.rev(n)
388 except:
389 except:
389 r = -2 # sort to the beginning of the list if unknown
390 r = -2 # sort to the beginning of the list if unknown
390 l.append((r, t, n))
391 l.append((r, t, n))
391 return [(t, n) for r, t, n in sorted(l)]
392 return [(t, n) for r, t, n in sorted(l)]
392
393
393 def nodetags(self, node):
394 def nodetags(self, node):
394 '''return the tags associated with a node'''
395 '''return the tags associated with a node'''
395 if not self.nodetagscache:
396 if not self.nodetagscache:
396 self.nodetagscache = {}
397 self.nodetagscache = {}
397 for t, n in self.tags().iteritems():
398 for t, n in self.tags().iteritems():
398 self.nodetagscache.setdefault(n, []).append(t)
399 self.nodetagscache.setdefault(n, []).append(t)
399 for tags in self.nodetagscache.itervalues():
400 for tags in self.nodetagscache.itervalues():
400 tags.sort()
401 tags.sort()
401 return self.nodetagscache.get(node, [])
402 return self.nodetagscache.get(node, [])
402
403
403 def nodebookmarks(self, node):
404 def nodebookmarks(self, node):
404 marks = []
405 marks = []
405 for bookmark, n in self._bookmarks.iteritems():
406 for bookmark, n in self._bookmarks.iteritems():
406 if n == node:
407 if n == node:
407 marks.append(bookmark)
408 marks.append(bookmark)
408 return sorted(marks)
409 return sorted(marks)
409
410
410 def _branchtags(self, partial, lrev):
411 def _branchtags(self, partial, lrev):
411 # TODO: rename this function?
412 # TODO: rename this function?
412 tiprev = len(self) - 1
413 tiprev = len(self) - 1
413 if lrev != tiprev:
414 if lrev != tiprev:
414 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
415 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
415 self._updatebranchcache(partial, ctxgen)
416 self._updatebranchcache(partial, ctxgen)
416 self._writebranchcache(partial, self.changelog.tip(), tiprev)
417 self._writebranchcache(partial, self.changelog.tip(), tiprev)
417
418
418 return partial
419 return partial
419
420
420 def updatebranchcache(self):
421 def updatebranchcache(self):
421 tip = self.changelog.tip()
422 tip = self.changelog.tip()
422 if self._branchcache is not None and self._branchcachetip == tip:
423 if self._branchcache is not None and self._branchcachetip == tip:
423 return self._branchcache
424 return self._branchcache
424
425
425 oldtip = self._branchcachetip
426 oldtip = self._branchcachetip
426 self._branchcachetip = tip
427 self._branchcachetip = tip
427 if oldtip is None or oldtip not in self.changelog.nodemap:
428 if oldtip is None or oldtip not in self.changelog.nodemap:
428 partial, last, lrev = self._readbranchcache()
429 partial, last, lrev = self._readbranchcache()
429 else:
430 else:
430 lrev = self.changelog.rev(oldtip)
431 lrev = self.changelog.rev(oldtip)
431 partial = self._branchcache
432 partial = self._branchcache
432
433
433 self._branchtags(partial, lrev)
434 self._branchtags(partial, lrev)
434 # this private cache holds all heads (not just tips)
435 # this private cache holds all heads (not just tips)
435 self._branchcache = partial
436 self._branchcache = partial
436
437
437 def branchmap(self):
438 def branchmap(self):
438 '''returns a dictionary {branch: [branchheads]}'''
439 '''returns a dictionary {branch: [branchheads]}'''
439 self.updatebranchcache()
440 self.updatebranchcache()
440 return self._branchcache
441 return self._branchcache
441
442
442 def branchtags(self):
443 def branchtags(self):
443 '''return a dict where branch names map to the tipmost head of
444 '''return a dict where branch names map to the tipmost head of
444 the branch, open heads come before closed'''
445 the branch, open heads come before closed'''
445 bt = {}
446 bt = {}
446 for bn, heads in self.branchmap().iteritems():
447 for bn, heads in self.branchmap().iteritems():
447 tip = heads[-1]
448 tip = heads[-1]
448 for h in reversed(heads):
449 for h in reversed(heads):
449 if 'close' not in self.changelog.read(h)[5]:
450 if 'close' not in self.changelog.read(h)[5]:
450 tip = h
451 tip = h
451 break
452 break
452 bt[bn] = tip
453 bt[bn] = tip
453 return bt
454 return bt
454
455
455 def _readbranchcache(self):
456 def _readbranchcache(self):
456 partial = {}
457 partial = {}
457 try:
458 try:
458 f = self.opener("cache/branchheads")
459 f = self.opener("cache/branchheads")
459 lines = f.read().split('\n')
460 lines = f.read().split('\n')
460 f.close()
461 f.close()
461 except (IOError, OSError):
462 except (IOError, OSError):
462 return {}, nullid, nullrev
463 return {}, nullid, nullrev
463
464
464 try:
465 try:
465 last, lrev = lines.pop(0).split(" ", 1)
466 last, lrev = lines.pop(0).split(" ", 1)
466 last, lrev = bin(last), int(lrev)
467 last, lrev = bin(last), int(lrev)
467 if lrev >= len(self) or self[lrev].node() != last:
468 if lrev >= len(self) or self[lrev].node() != last:
468 # invalidate the cache
469 # invalidate the cache
469 raise ValueError('invalidating branch cache (tip differs)')
470 raise ValueError('invalidating branch cache (tip differs)')
470 for l in lines:
471 for l in lines:
471 if not l:
472 if not l:
472 continue
473 continue
473 node, label = l.split(" ", 1)
474 node, label = l.split(" ", 1)
474 label = encoding.tolocal(label.strip())
475 label = encoding.tolocal(label.strip())
475 partial.setdefault(label, []).append(bin(node))
476 partial.setdefault(label, []).append(bin(node))
476 except KeyboardInterrupt:
477 except KeyboardInterrupt:
477 raise
478 raise
478 except Exception, inst:
479 except Exception, inst:
479 if self.ui.debugflag:
480 if self.ui.debugflag:
480 self.ui.warn(str(inst), '\n')
481 self.ui.warn(str(inst), '\n')
481 partial, last, lrev = {}, nullid, nullrev
482 partial, last, lrev = {}, nullid, nullrev
482 return partial, last, lrev
483 return partial, last, lrev
483
484
484 def _writebranchcache(self, branches, tip, tiprev):
485 def _writebranchcache(self, branches, tip, tiprev):
485 try:
486 try:
486 f = self.opener("cache/branchheads", "w", atomictemp=True)
487 f = self.opener("cache/branchheads", "w", atomictemp=True)
487 f.write("%s %s\n" % (hex(tip), tiprev))
488 f.write("%s %s\n" % (hex(tip), tiprev))
488 for label, nodes in branches.iteritems():
489 for label, nodes in branches.iteritems():
489 for node in nodes:
490 for node in nodes:
490 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
491 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
491 f.rename()
492 f.rename()
492 except (IOError, OSError):
493 except (IOError, OSError):
493 pass
494 pass
494
495
495 def _updatebranchcache(self, partial, ctxgen):
496 def _updatebranchcache(self, partial, ctxgen):
496 # collect new branch entries
497 # collect new branch entries
497 newbranches = {}
498 newbranches = {}
498 for c in ctxgen:
499 for c in ctxgen:
499 newbranches.setdefault(c.branch(), []).append(c.node())
500 newbranches.setdefault(c.branch(), []).append(c.node())
500 # if older branchheads are reachable from new ones, they aren't
501 # if older branchheads are reachable from new ones, they aren't
501 # really branchheads. Note checking parents is insufficient:
502 # really branchheads. Note checking parents is insufficient:
502 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
503 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
503 for branch, newnodes in newbranches.iteritems():
504 for branch, newnodes in newbranches.iteritems():
504 bheads = partial.setdefault(branch, [])
505 bheads = partial.setdefault(branch, [])
505 bheads.extend(newnodes)
506 bheads.extend(newnodes)
506 if len(bheads) <= 1:
507 if len(bheads) <= 1:
507 continue
508 continue
508 # starting from tip means fewer passes over reachable
509 # starting from tip means fewer passes over reachable
509 while newnodes:
510 while newnodes:
510 latest = newnodes.pop()
511 latest = newnodes.pop()
511 if latest not in bheads:
512 if latest not in bheads:
512 continue
513 continue
513 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
514 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
514 reachable = self.changelog.reachable(latest, minbhrev)
515 reachable = self.changelog.reachable(latest, minbhrev)
515 reachable.remove(latest)
516 reachable.remove(latest)
516 bheads = [b for b in bheads if b not in reachable]
517 bheads = [b for b in bheads if b not in reachable]
517 partial[branch] = bheads
518 partial[branch] = bheads
518
519
519 def lookup(self, key):
520 def lookup(self, key):
520 if isinstance(key, int):
521 if isinstance(key, int):
521 return self.changelog.node(key)
522 return self.changelog.node(key)
522 elif key == '.':
523 elif key == '.':
523 return self.dirstate.parents()[0]
524 return self.dirstate.parents()[0]
524 elif key == 'null':
525 elif key == 'null':
525 return nullid
526 return nullid
526 elif key == 'tip':
527 elif key == 'tip':
527 return self.changelog.tip()
528 return self.changelog.tip()
528 n = self.changelog._match(key)
529 n = self.changelog._match(key)
529 if n:
530 if n:
530 return n
531 return n
531 if key in self._bookmarks:
532 if key in self._bookmarks:
532 return self._bookmarks[key]
533 return self._bookmarks[key]
533 if key in self.tags():
534 if key in self.tags():
534 return self.tags()[key]
535 return self.tags()[key]
535 if key in self.branchtags():
536 if key in self.branchtags():
536 return self.branchtags()[key]
537 return self.branchtags()[key]
537 n = self.changelog._partialmatch(key)
538 n = self.changelog._partialmatch(key)
538 if n:
539 if n:
539 return n
540 return n
540
541
541 # can't find key, check if it might have come from damaged dirstate
542 # can't find key, check if it might have come from damaged dirstate
542 if key in self.dirstate.parents():
543 if key in self.dirstate.parents():
543 raise error.Abort(_("working directory has unknown parent '%s'!")
544 raise error.Abort(_("working directory has unknown parent '%s'!")
544 % short(key))
545 % short(key))
545 try:
546 try:
546 if len(key) == 20:
547 if len(key) == 20:
547 key = hex(key)
548 key = hex(key)
548 except:
549 except:
549 pass
550 pass
550 raise error.RepoLookupError(_("unknown revision '%s'") % key)
551 raise error.RepoLookupError(_("unknown revision '%s'") % key)
551
552
552 def lookupbranch(self, key, remote=None):
553 def lookupbranch(self, key, remote=None):
553 repo = remote or self
554 repo = remote or self
554 if key in repo.branchmap():
555 if key in repo.branchmap():
555 return key
556 return key
556
557
557 repo = (remote and remote.local()) and remote or self
558 repo = (remote and remote.local()) and remote or self
558 return repo[key].branch()
559 return repo[key].branch()
559
560
560 def local(self):
561 def local(self):
561 return True
562 return True
562
563
563 def join(self, f):
564 def join(self, f):
564 return os.path.join(self.path, f)
565 return os.path.join(self.path, f)
565
566
566 def wjoin(self, f):
567 def wjoin(self, f):
567 return os.path.join(self.root, f)
568 return os.path.join(self.root, f)
568
569
569 def file(self, f):
570 def file(self, f):
570 if f[0] == '/':
571 if f[0] == '/':
571 f = f[1:]
572 f = f[1:]
572 return filelog.filelog(self.sopener, f)
573 return filelog.filelog(self.sopener, f)
573
574
574 def changectx(self, changeid):
575 def changectx(self, changeid):
575 return self[changeid]
576 return self[changeid]
576
577
577 def parents(self, changeid=None):
578 def parents(self, changeid=None):
578 '''get list of changectxs for parents of changeid'''
579 '''get list of changectxs for parents of changeid'''
579 return self[changeid].parents()
580 return self[changeid].parents()
580
581
581 def filectx(self, path, changeid=None, fileid=None):
582 def filectx(self, path, changeid=None, fileid=None):
582 """changeid can be a changeset revision, node, or tag.
583 """changeid can be a changeset revision, node, or tag.
583 fileid can be a file revision or node."""
584 fileid can be a file revision or node."""
584 return context.filectx(self, path, changeid, fileid)
585 return context.filectx(self, path, changeid, fileid)
585
586
586 def getcwd(self):
587 def getcwd(self):
587 return self.dirstate.getcwd()
588 return self.dirstate.getcwd()
588
589
589 def pathto(self, f, cwd=None):
590 def pathto(self, f, cwd=None):
590 return self.dirstate.pathto(f, cwd)
591 return self.dirstate.pathto(f, cwd)
591
592
592 def wfile(self, f, mode='r'):
593 def wfile(self, f, mode='r'):
593 return self.wopener(f, mode)
594 return self.wopener(f, mode)
594
595
595 def _link(self, f):
596 def _link(self, f):
596 return os.path.islink(self.wjoin(f))
597 return os.path.islink(self.wjoin(f))
597
598
598 def _loadfilter(self, filter):
599 def _loadfilter(self, filter):
599 if filter not in self.filterpats:
600 if filter not in self.filterpats:
600 l = []
601 l = []
601 for pat, cmd in self.ui.configitems(filter):
602 for pat, cmd in self.ui.configitems(filter):
602 if cmd == '!':
603 if cmd == '!':
603 continue
604 continue
604 mf = matchmod.match(self.root, '', [pat])
605 mf = matchmod.match(self.root, '', [pat])
605 fn = None
606 fn = None
606 params = cmd
607 params = cmd
607 for name, filterfn in self._datafilters.iteritems():
608 for name, filterfn in self._datafilters.iteritems():
608 if cmd.startswith(name):
609 if cmd.startswith(name):
609 fn = filterfn
610 fn = filterfn
610 params = cmd[len(name):].lstrip()
611 params = cmd[len(name):].lstrip()
611 break
612 break
612 if not fn:
613 if not fn:
613 fn = lambda s, c, **kwargs: util.filter(s, c)
614 fn = lambda s, c, **kwargs: util.filter(s, c)
614 # Wrap old filters not supporting keyword arguments
615 # Wrap old filters not supporting keyword arguments
615 if not inspect.getargspec(fn)[2]:
616 if not inspect.getargspec(fn)[2]:
616 oldfn = fn
617 oldfn = fn
617 fn = lambda s, c, **kwargs: oldfn(s, c)
618 fn = lambda s, c, **kwargs: oldfn(s, c)
618 l.append((mf, fn, params))
619 l.append((mf, fn, params))
619 self.filterpats[filter] = l
620 self.filterpats[filter] = l
620 return self.filterpats[filter]
621 return self.filterpats[filter]
621
622
622 def _filter(self, filterpats, filename, data):
623 def _filter(self, filterpats, filename, data):
623 for mf, fn, cmd in filterpats:
624 for mf, fn, cmd in filterpats:
624 if mf(filename):
625 if mf(filename):
625 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
626 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
626 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
627 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
627 break
628 break
628
629
629 return data
630 return data
630
631
631 @propertycache
632 @propertycache
632 def _encodefilterpats(self):
633 def _encodefilterpats(self):
633 return self._loadfilter('encode')
634 return self._loadfilter('encode')
634
635
635 @propertycache
636 @propertycache
636 def _decodefilterpats(self):
637 def _decodefilterpats(self):
637 return self._loadfilter('decode')
638 return self._loadfilter('decode')
638
639
639 def adddatafilter(self, name, filter):
640 def adddatafilter(self, name, filter):
640 self._datafilters[name] = filter
641 self._datafilters[name] = filter
641
642
642 def wread(self, filename):
643 def wread(self, filename):
643 if self._link(filename):
644 if self._link(filename):
644 data = os.readlink(self.wjoin(filename))
645 data = os.readlink(self.wjoin(filename))
645 else:
646 else:
646 data = self.wopener(filename, 'r').read()
647 data = self.wopener(filename, 'r').read()
647 return self._filter(self._encodefilterpats, filename, data)
648 return self._filter(self._encodefilterpats, filename, data)
648
649
649 def wwrite(self, filename, data, flags):
650 def wwrite(self, filename, data, flags):
650 data = self._filter(self._decodefilterpats, filename, data)
651 data = self._filter(self._decodefilterpats, filename, data)
651 if 'l' in flags:
652 if 'l' in flags:
652 self.wopener.symlink(data, filename)
653 self.wopener.symlink(data, filename)
653 else:
654 else:
654 self.wopener(filename, 'w').write(data)
655 self.wopener(filename, 'w').write(data)
655 if 'x' in flags:
656 if 'x' in flags:
656 util.set_flags(self.wjoin(filename), False, True)
657 util.set_flags(self.wjoin(filename), False, True)
657
658
658 def wwritedata(self, filename, data):
659 def wwritedata(self, filename, data):
659 return self._filter(self._decodefilterpats, filename, data)
660 return self._filter(self._decodefilterpats, filename, data)
660
661
661 def transaction(self, desc):
662 def transaction(self, desc):
662 tr = self._transref and self._transref() or None
663 tr = self._transref and self._transref() or None
663 if tr and tr.running():
664 if tr and tr.running():
664 return tr.nest()
665 return tr.nest()
665
666
666 # abort here if the journal already exists
667 # abort here if the journal already exists
667 if os.path.exists(self.sjoin("journal")):
668 if os.path.exists(self.sjoin("journal")):
668 raise error.RepoError(
669 raise error.RepoError(
669 _("abandoned transaction found - run hg recover"))
670 _("abandoned transaction found - run hg recover"))
670
671
671 # save dirstate for rollback
672 # save dirstate for rollback
672 try:
673 try:
673 ds = self.opener("dirstate").read()
674 ds = self.opener("dirstate").read()
674 except IOError:
675 except IOError:
675 ds = ""
676 ds = ""
676 self.opener("journal.dirstate", "w").write(ds)
677 self.opener("journal.dirstate", "w").write(ds)
677 self.opener("journal.branch", "w").write(
678 self.opener("journal.branch", "w").write(
678 encoding.fromlocal(self.dirstate.branch()))
679 encoding.fromlocal(self.dirstate.branch()))
679 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
680 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
680
681
681 renames = [(self.sjoin("journal"), self.sjoin("undo")),
682 renames = [(self.sjoin("journal"), self.sjoin("undo")),
682 (self.join("journal.dirstate"), self.join("undo.dirstate")),
683 (self.join("journal.dirstate"), self.join("undo.dirstate")),
683 (self.join("journal.branch"), self.join("undo.branch")),
684 (self.join("journal.branch"), self.join("undo.branch")),
684 (self.join("journal.desc"), self.join("undo.desc"))]
685 (self.join("journal.desc"), self.join("undo.desc"))]
685 tr = transaction.transaction(self.ui.warn, self.sopener,
686 tr = transaction.transaction(self.ui.warn, self.sopener,
686 self.sjoin("journal"),
687 self.sjoin("journal"),
687 aftertrans(renames),
688 aftertrans(renames),
688 self.store.createmode)
689 self.store.createmode)
689 self._transref = weakref.ref(tr)
690 self._transref = weakref.ref(tr)
690 return tr
691 return tr
691
692
692 def recover(self):
693 def recover(self):
693 lock = self.lock()
694 lock = self.lock()
694 try:
695 try:
695 if os.path.exists(self.sjoin("journal")):
696 if os.path.exists(self.sjoin("journal")):
696 self.ui.status(_("rolling back interrupted transaction\n"))
697 self.ui.status(_("rolling back interrupted transaction\n"))
697 transaction.rollback(self.sopener, self.sjoin("journal"),
698 transaction.rollback(self.sopener, self.sjoin("journal"),
698 self.ui.warn)
699 self.ui.warn)
699 self.invalidate()
700 self.invalidate()
700 return True
701 return True
701 else:
702 else:
702 self.ui.warn(_("no interrupted transaction available\n"))
703 self.ui.warn(_("no interrupted transaction available\n"))
703 return False
704 return False
704 finally:
705 finally:
705 lock.release()
706 lock.release()
706
707
707 def rollback(self, dryrun=False):
708 def rollback(self, dryrun=False):
708 wlock = lock = None
709 wlock = lock = None
709 try:
710 try:
710 wlock = self.wlock()
711 wlock = self.wlock()
711 lock = self.lock()
712 lock = self.lock()
712 if os.path.exists(self.sjoin("undo")):
713 if os.path.exists(self.sjoin("undo")):
713 try:
714 try:
714 args = self.opener("undo.desc", "r").read().splitlines()
715 args = self.opener("undo.desc", "r").read().splitlines()
715 if len(args) >= 3 and self.ui.verbose:
716 if len(args) >= 3 and self.ui.verbose:
716 desc = _("repository tip rolled back to revision %s"
717 desc = _("repository tip rolled back to revision %s"
717 " (undo %s: %s)\n") % (
718 " (undo %s: %s)\n") % (
718 int(args[0]) - 1, args[1], args[2])
719 int(args[0]) - 1, args[1], args[2])
719 elif len(args) >= 2:
720 elif len(args) >= 2:
720 desc = _("repository tip rolled back to revision %s"
721 desc = _("repository tip rolled back to revision %s"
721 " (undo %s)\n") % (
722 " (undo %s)\n") % (
722 int(args[0]) - 1, args[1])
723 int(args[0]) - 1, args[1])
723 except IOError:
724 except IOError:
724 desc = _("rolling back unknown transaction\n")
725 desc = _("rolling back unknown transaction\n")
725 self.ui.status(desc)
726 self.ui.status(desc)
726 if dryrun:
727 if dryrun:
727 return
728 return
728 transaction.rollback(self.sopener, self.sjoin("undo"),
729 transaction.rollback(self.sopener, self.sjoin("undo"),
729 self.ui.warn)
730 self.ui.warn)
730 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
731 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
731 if os.path.exists(self.join('undo.bookmarks')):
732 if os.path.exists(self.join('undo.bookmarks')):
732 util.rename(self.join('undo.bookmarks'),
733 util.rename(self.join('undo.bookmarks'),
733 self.join('bookmarks'))
734 self.join('bookmarks'))
734 try:
735 try:
735 branch = self.opener("undo.branch").read()
736 branch = self.opener("undo.branch").read()
736 self.dirstate.setbranch(branch)
737 self.dirstate.setbranch(branch)
737 except IOError:
738 except IOError:
738 self.ui.warn(_("Named branch could not be reset, "
739 self.ui.warn(_("Named branch could not be reset, "
739 "current branch still is: %s\n")
740 "current branch still is: %s\n")
740 % self.dirstate.branch())
741 % self.dirstate.branch())
741 self.invalidate()
742 self.invalidate()
742 self.dirstate.invalidate()
743 self.dirstate.invalidate()
743 self.destroyed()
744 self.destroyed()
744 self.ui.status(_("working directory now based on "
745 self.ui.status(_("working directory now based on "
745 "revision %s\n") % (
746 "revision %s\n") % (
746 _(' and ').join(str(p.rev()) for p in self.parents())))
747 _(' and ').join(str(p.rev()) for p in self.parents())))
747 else:
748 else:
748 self.ui.warn(_("no rollback information available\n"))
749 self.ui.warn(_("no rollback information available\n"))
749 return 1
750 return 1
750 finally:
751 finally:
751 release(lock, wlock)
752 release(lock, wlock)
752
753
753 def invalidatecaches(self):
754 def invalidatecaches(self):
754 self._tags = None
755 self._tags = None
755 self._tagtypes = None
756 self._tagtypes = None
756 self.nodetagscache = None
757 self.nodetagscache = None
757 self._branchcache = None # in UTF-8
758 self._branchcache = None # in UTF-8
758 self._branchcachetip = None
759 self._branchcachetip = None
759
760
760 def invalidate(self):
761 def invalidate(self):
761 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkscurrent"):
762 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkscurrent"):
762 if a in self.__dict__:
763 if a in self.__dict__:
763 delattr(self, a)
764 delattr(self, a)
764 self.invalidatecaches()
765 self.invalidatecaches()
765
766
766 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
767 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
767 try:
768 try:
768 l = lock.lock(lockname, 0, releasefn, desc=desc)
769 l = lock.lock(lockname, 0, releasefn, desc=desc)
769 except error.LockHeld, inst:
770 except error.LockHeld, inst:
770 if not wait:
771 if not wait:
771 raise
772 raise
772 self.ui.warn(_("waiting for lock on %s held by %r\n") %
773 self.ui.warn(_("waiting for lock on %s held by %r\n") %
773 (desc, inst.locker))
774 (desc, inst.locker))
774 # default to 600 seconds timeout
775 # default to 600 seconds timeout
775 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
776 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
776 releasefn, desc=desc)
777 releasefn, desc=desc)
777 if acquirefn:
778 if acquirefn:
778 acquirefn()
779 acquirefn()
779 return l
780 return l
780
781
781 def lock(self, wait=True):
782 def lock(self, wait=True):
782 '''Lock the repository store (.hg/store) and return a weak reference
783 '''Lock the repository store (.hg/store) and return a weak reference
783 to the lock. Use this before modifying the store (e.g. committing or
784 to the lock. Use this before modifying the store (e.g. committing or
784 stripping). If you are opening a transaction, get a lock as well.)'''
785 stripping). If you are opening a transaction, get a lock as well.)'''
785 l = self._lockref and self._lockref()
786 l = self._lockref and self._lockref()
786 if l is not None and l.held:
787 if l is not None and l.held:
787 l.lock()
788 l.lock()
788 return l
789 return l
789
790
790 l = self._lock(self.sjoin("lock"), wait, self.store.write,
791 l = self._lock(self.sjoin("lock"), wait, self.store.write,
791 self.invalidate, _('repository %s') % self.origroot)
792 self.invalidate, _('repository %s') % self.origroot)
792 self._lockref = weakref.ref(l)
793 self._lockref = weakref.ref(l)
793 return l
794 return l
794
795
795 def wlock(self, wait=True):
796 def wlock(self, wait=True):
796 '''Lock the non-store parts of the repository (everything under
797 '''Lock the non-store parts of the repository (everything under
797 .hg except .hg/store) and return a weak reference to the lock.
798 .hg except .hg/store) and return a weak reference to the lock.
798 Use this before modifying files in .hg.'''
799 Use this before modifying files in .hg.'''
799 l = self._wlockref and self._wlockref()
800 l = self._wlockref and self._wlockref()
800 if l is not None and l.held:
801 if l is not None and l.held:
801 l.lock()
802 l.lock()
802 return l
803 return l
803
804
804 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
805 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
805 self.dirstate.invalidate, _('working directory of %s') %
806 self.dirstate.invalidate, _('working directory of %s') %
806 self.origroot)
807 self.origroot)
807 self._wlockref = weakref.ref(l)
808 self._wlockref = weakref.ref(l)
808 return l
809 return l
809
810
810 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
811 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
811 """
812 """
812 commit an individual file as part of a larger transaction
813 commit an individual file as part of a larger transaction
813 """
814 """
814
815
815 fname = fctx.path()
816 fname = fctx.path()
816 text = fctx.data()
817 text = fctx.data()
817 flog = self.file(fname)
818 flog = self.file(fname)
818 fparent1 = manifest1.get(fname, nullid)
819 fparent1 = manifest1.get(fname, nullid)
819 fparent2 = fparent2o = manifest2.get(fname, nullid)
820 fparent2 = fparent2o = manifest2.get(fname, nullid)
820
821
821 meta = {}
822 meta = {}
822 copy = fctx.renamed()
823 copy = fctx.renamed()
823 if copy and copy[0] != fname:
824 if copy and copy[0] != fname:
824 # Mark the new revision of this file as a copy of another
825 # Mark the new revision of this file as a copy of another
825 # file. This copy data will effectively act as a parent
826 # file. This copy data will effectively act as a parent
826 # of this new revision. If this is a merge, the first
827 # of this new revision. If this is a merge, the first
827 # parent will be the nullid (meaning "look up the copy data")
828 # parent will be the nullid (meaning "look up the copy data")
828 # and the second one will be the other parent. For example:
829 # and the second one will be the other parent. For example:
829 #
830 #
830 # 0 --- 1 --- 3 rev1 changes file foo
831 # 0 --- 1 --- 3 rev1 changes file foo
831 # \ / rev2 renames foo to bar and changes it
832 # \ / rev2 renames foo to bar and changes it
832 # \- 2 -/ rev3 should have bar with all changes and
833 # \- 2 -/ rev3 should have bar with all changes and
833 # should record that bar descends from
834 # should record that bar descends from
834 # bar in rev2 and foo in rev1
835 # bar in rev2 and foo in rev1
835 #
836 #
836 # this allows this merge to succeed:
837 # this allows this merge to succeed:
837 #
838 #
838 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
839 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
839 # \ / merging rev3 and rev4 should use bar@rev2
840 # \ / merging rev3 and rev4 should use bar@rev2
840 # \- 2 --- 4 as the merge base
841 # \- 2 --- 4 as the merge base
841 #
842 #
842
843
843 cfname = copy[0]
844 cfname = copy[0]
844 crev = manifest1.get(cfname)
845 crev = manifest1.get(cfname)
845 newfparent = fparent2
846 newfparent = fparent2
846
847
847 if manifest2: # branch merge
848 if manifest2: # branch merge
848 if fparent2 == nullid or crev is None: # copied on remote side
849 if fparent2 == nullid or crev is None: # copied on remote side
849 if cfname in manifest2:
850 if cfname in manifest2:
850 crev = manifest2[cfname]
851 crev = manifest2[cfname]
851 newfparent = fparent1
852 newfparent = fparent1
852
853
853 # find source in nearest ancestor if we've lost track
854 # find source in nearest ancestor if we've lost track
854 if not crev:
855 if not crev:
855 self.ui.debug(" %s: searching for copy revision for %s\n" %
856 self.ui.debug(" %s: searching for copy revision for %s\n" %
856 (fname, cfname))
857 (fname, cfname))
857 for ancestor in self[None].ancestors():
858 for ancestor in self[None].ancestors():
858 if cfname in ancestor:
859 if cfname in ancestor:
859 crev = ancestor[cfname].filenode()
860 crev = ancestor[cfname].filenode()
860 break
861 break
861
862
862 if crev:
863 if crev:
863 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
864 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
864 meta["copy"] = cfname
865 meta["copy"] = cfname
865 meta["copyrev"] = hex(crev)
866 meta["copyrev"] = hex(crev)
866 fparent1, fparent2 = nullid, newfparent
867 fparent1, fparent2 = nullid, newfparent
867 else:
868 else:
868 self.ui.warn(_("warning: can't find ancestor for '%s' "
869 self.ui.warn(_("warning: can't find ancestor for '%s' "
869 "copied from '%s'!\n") % (fname, cfname))
870 "copied from '%s'!\n") % (fname, cfname))
870
871
871 elif fparent2 != nullid:
872 elif fparent2 != nullid:
872 # is one parent an ancestor of the other?
873 # is one parent an ancestor of the other?
873 fparentancestor = flog.ancestor(fparent1, fparent2)
874 fparentancestor = flog.ancestor(fparent1, fparent2)
874 if fparentancestor == fparent1:
875 if fparentancestor == fparent1:
875 fparent1, fparent2 = fparent2, nullid
876 fparent1, fparent2 = fparent2, nullid
876 elif fparentancestor == fparent2:
877 elif fparentancestor == fparent2:
877 fparent2 = nullid
878 fparent2 = nullid
878
879
879 # is the file changed?
880 # is the file changed?
880 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
881 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
881 changelist.append(fname)
882 changelist.append(fname)
882 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
883 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
883
884
884 # are just the flags changed during merge?
885 # are just the flags changed during merge?
885 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
886 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
886 changelist.append(fname)
887 changelist.append(fname)
887
888
888 return fparent1
889 return fparent1
889
890
890 def commit(self, text="", user=None, date=None, match=None, force=False,
891 def commit(self, text="", user=None, date=None, match=None, force=False,
891 editor=False, extra={}):
892 editor=False, extra={}):
892 """Add a new revision to current repository.
893 """Add a new revision to current repository.
893
894
894 Revision information is gathered from the working directory,
895 Revision information is gathered from the working directory,
895 match can be used to filter the committed files. If editor is
896 match can be used to filter the committed files. If editor is
896 supplied, it is called to get a commit message.
897 supplied, it is called to get a commit message.
897 """
898 """
898
899
899 def fail(f, msg):
900 def fail(f, msg):
900 raise util.Abort('%s: %s' % (f, msg))
901 raise util.Abort('%s: %s' % (f, msg))
901
902
902 if not match:
903 if not match:
903 match = matchmod.always(self.root, '')
904 match = matchmod.always(self.root, '')
904
905
905 if not force:
906 if not force:
906 vdirs = []
907 vdirs = []
907 match.dir = vdirs.append
908 match.dir = vdirs.append
908 match.bad = fail
909 match.bad = fail
909
910
910 wlock = self.wlock()
911 wlock = self.wlock()
911 try:
912 try:
912 wctx = self[None]
913 wctx = self[None]
913 merge = len(wctx.parents()) > 1
914 merge = len(wctx.parents()) > 1
914
915
915 if (not force and merge and match and
916 if (not force and merge and match and
916 (match.files() or match.anypats())):
917 (match.files() or match.anypats())):
917 raise util.Abort(_('cannot partially commit a merge '
918 raise util.Abort(_('cannot partially commit a merge '
918 '(do not specify files or patterns)'))
919 '(do not specify files or patterns)'))
919
920
920 changes = self.status(match=match, clean=force)
921 changes = self.status(match=match, clean=force)
921 if force:
922 if force:
922 changes[0].extend(changes[6]) # mq may commit unchanged files
923 changes[0].extend(changes[6]) # mq may commit unchanged files
923
924
924 # check subrepos
925 # check subrepos
925 subs = []
926 subs = []
926 removedsubs = set()
927 removedsubs = set()
927 for p in wctx.parents():
928 for p in wctx.parents():
928 removedsubs.update(s for s in p.substate if match(s))
929 removedsubs.update(s for s in p.substate if match(s))
929 for s in wctx.substate:
930 for s in wctx.substate:
930 removedsubs.discard(s)
931 removedsubs.discard(s)
931 if match(s) and wctx.sub(s).dirty():
932 if match(s) and wctx.sub(s).dirty():
932 subs.append(s)
933 subs.append(s)
933 if (subs or removedsubs):
934 if (subs or removedsubs):
934 if (not match('.hgsub') and
935 if (not match('.hgsub') and
935 '.hgsub' in (wctx.modified() + wctx.added())):
936 '.hgsub' in (wctx.modified() + wctx.added())):
936 raise util.Abort(_("can't commit subrepos without .hgsub"))
937 raise util.Abort(_("can't commit subrepos without .hgsub"))
937 if '.hgsubstate' not in changes[0]:
938 if '.hgsubstate' not in changes[0]:
938 changes[0].insert(0, '.hgsubstate')
939 changes[0].insert(0, '.hgsubstate')
939
940
940 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
941 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
941 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
942 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
942 if changedsubs:
943 if changedsubs:
943 raise util.Abort(_("uncommitted changes in subrepo %s")
944 raise util.Abort(_("uncommitted changes in subrepo %s")
944 % changedsubs[0])
945 % changedsubs[0])
945
946
946 # make sure all explicit patterns are matched
947 # make sure all explicit patterns are matched
947 if not force and match.files():
948 if not force and match.files():
948 matched = set(changes[0] + changes[1] + changes[2])
949 matched = set(changes[0] + changes[1] + changes[2])
949
950
950 for f in match.files():
951 for f in match.files():
951 if f == '.' or f in matched or f in wctx.substate:
952 if f == '.' or f in matched or f in wctx.substate:
952 continue
953 continue
953 if f in changes[3]: # missing
954 if f in changes[3]: # missing
954 fail(f, _('file not found!'))
955 fail(f, _('file not found!'))
955 if f in vdirs: # visited directory
956 if f in vdirs: # visited directory
956 d = f + '/'
957 d = f + '/'
957 for mf in matched:
958 for mf in matched:
958 if mf.startswith(d):
959 if mf.startswith(d):
959 break
960 break
960 else:
961 else:
961 fail(f, _("no match under directory!"))
962 fail(f, _("no match under directory!"))
962 elif f not in self.dirstate:
963 elif f not in self.dirstate:
963 fail(f, _("file not tracked!"))
964 fail(f, _("file not tracked!"))
964
965
965 if (not force and not extra.get("close") and not merge
966 if (not force and not extra.get("close") and not merge
966 and not (changes[0] or changes[1] or changes[2])
967 and not (changes[0] or changes[1] or changes[2])
967 and wctx.branch() == wctx.p1().branch()):
968 and wctx.branch() == wctx.p1().branch()):
968 return None
969 return None
969
970
970 ms = mergemod.mergestate(self)
971 ms = mergemod.mergestate(self)
971 for f in changes[0]:
972 for f in changes[0]:
972 if f in ms and ms[f] == 'u':
973 if f in ms and ms[f] == 'u':
973 raise util.Abort(_("unresolved merge conflicts "
974 raise util.Abort(_("unresolved merge conflicts "
974 "(see hg resolve)"))
975 "(see hg resolve)"))
975
976
976 cctx = context.workingctx(self, text, user, date, extra, changes)
977 cctx = context.workingctx(self, text, user, date, extra, changes)
977 if editor:
978 if editor:
978 cctx._text = editor(self, cctx, subs)
979 cctx._text = editor(self, cctx, subs)
979 edited = (text != cctx._text)
980 edited = (text != cctx._text)
980
981
981 # commit subs
982 # commit subs
982 if subs or removedsubs:
983 if subs or removedsubs:
983 state = wctx.substate.copy()
984 state = wctx.substate.copy()
984 for s in sorted(subs):
985 for s in sorted(subs):
985 sub = wctx.sub(s)
986 sub = wctx.sub(s)
986 self.ui.status(_('committing subrepository %s\n') %
987 self.ui.status(_('committing subrepository %s\n') %
987 subrepo.subrelpath(sub))
988 subrepo.subrelpath(sub))
988 sr = sub.commit(cctx._text, user, date)
989 sr = sub.commit(cctx._text, user, date)
989 state[s] = (state[s][0], sr)
990 state[s] = (state[s][0], sr)
990 subrepo.writestate(self, state)
991 subrepo.writestate(self, state)
991
992
992 # Save commit message in case this transaction gets rolled back
993 # Save commit message in case this transaction gets rolled back
993 # (e.g. by a pretxncommit hook). Leave the content alone on
994 # (e.g. by a pretxncommit hook). Leave the content alone on
994 # the assumption that the user will use the same editor again.
995 # the assumption that the user will use the same editor again.
995 msgfile = self.opener('last-message.txt', 'wb')
996 msgfile = self.opener('last-message.txt', 'wb')
996 msgfile.write(cctx._text)
997 msgfile.write(cctx._text)
997 msgfile.close()
998 msgfile.close()
998
999
999 p1, p2 = self.dirstate.parents()
1000 p1, p2 = self.dirstate.parents()
1000 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1001 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1001 try:
1002 try:
1002 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1003 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1003 ret = self.commitctx(cctx, True)
1004 ret = self.commitctx(cctx, True)
1004 except:
1005 except:
1005 if edited:
1006 if edited:
1006 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
1007 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
1007 self.ui.write(
1008 self.ui.write(
1008 _('note: commit message saved in %s\n') % msgfn)
1009 _('note: commit message saved in %s\n') % msgfn)
1009 raise
1010 raise
1010
1011
1011 # update bookmarks, dirstate and mergestate
1012 # update bookmarks, dirstate and mergestate
1012 parents = (p1, p2)
1013 parents = (p1, p2)
1013 if p2 == nullid:
1014 if p2 == nullid:
1014 parents = (p1,)
1015 parents = (p1,)
1015 bookmarks.update(self, parents, ret)
1016 bookmarks.update(self, parents, ret)
1016 for f in changes[0] + changes[1]:
1017 for f in changes[0] + changes[1]:
1017 self.dirstate.normal(f)
1018 self.dirstate.normal(f)
1018 for f in changes[2]:
1019 for f in changes[2]:
1019 self.dirstate.forget(f)
1020 self.dirstate.forget(f)
1020 self.dirstate.setparents(ret)
1021 self.dirstate.setparents(ret)
1021 ms.reset()
1022 ms.reset()
1022 finally:
1023 finally:
1023 wlock.release()
1024 wlock.release()
1024
1025
1025 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1026 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1026 return ret
1027 return ret
1027
1028
1028 def commitctx(self, ctx, error=False):
1029 def commitctx(self, ctx, error=False):
1029 """Add a new revision to current repository.
1030 """Add a new revision to current repository.
1030 Revision information is passed via the context argument.
1031 Revision information is passed via the context argument.
1031 """
1032 """
1032
1033
1033 tr = lock = None
1034 tr = lock = None
1034 removed = list(ctx.removed())
1035 removed = list(ctx.removed())
1035 p1, p2 = ctx.p1(), ctx.p2()
1036 p1, p2 = ctx.p1(), ctx.p2()
1036 m1 = p1.manifest().copy()
1037 m1 = p1.manifest().copy()
1037 m2 = p2.manifest()
1038 m2 = p2.manifest()
1038 user = ctx.user()
1039 user = ctx.user()
1039
1040
1040 lock = self.lock()
1041 lock = self.lock()
1041 try:
1042 try:
1042 tr = self.transaction("commit")
1043 tr = self.transaction("commit")
1043 trp = weakref.proxy(tr)
1044 trp = weakref.proxy(tr)
1044
1045
1045 # check in files
1046 # check in files
1046 new = {}
1047 new = {}
1047 changed = []
1048 changed = []
1048 linkrev = len(self)
1049 linkrev = len(self)
1049 for f in sorted(ctx.modified() + ctx.added()):
1050 for f in sorted(ctx.modified() + ctx.added()):
1050 self.ui.note(f + "\n")
1051 self.ui.note(f + "\n")
1051 try:
1052 try:
1052 fctx = ctx[f]
1053 fctx = ctx[f]
1053 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1054 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1054 changed)
1055 changed)
1055 m1.set(f, fctx.flags())
1056 m1.set(f, fctx.flags())
1056 except OSError, inst:
1057 except OSError, inst:
1057 self.ui.warn(_("trouble committing %s!\n") % f)
1058 self.ui.warn(_("trouble committing %s!\n") % f)
1058 raise
1059 raise
1059 except IOError, inst:
1060 except IOError, inst:
1060 errcode = getattr(inst, 'errno', errno.ENOENT)
1061 errcode = getattr(inst, 'errno', errno.ENOENT)
1061 if error or errcode and errcode != errno.ENOENT:
1062 if error or errcode and errcode != errno.ENOENT:
1062 self.ui.warn(_("trouble committing %s!\n") % f)
1063 self.ui.warn(_("trouble committing %s!\n") % f)
1063 raise
1064 raise
1064 else:
1065 else:
1065 removed.append(f)
1066 removed.append(f)
1066
1067
1067 # update manifest
1068 # update manifest
1068 m1.update(new)
1069 m1.update(new)
1069 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1070 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1070 drop = [f for f in removed if f in m1]
1071 drop = [f for f in removed if f in m1]
1071 for f in drop:
1072 for f in drop:
1072 del m1[f]
1073 del m1[f]
1073 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1074 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1074 p2.manifestnode(), (new, drop))
1075 p2.manifestnode(), (new, drop))
1075
1076
1076 # update changelog
1077 # update changelog
1077 self.changelog.delayupdate()
1078 self.changelog.delayupdate()
1078 n = self.changelog.add(mn, changed + removed, ctx.description(),
1079 n = self.changelog.add(mn, changed + removed, ctx.description(),
1079 trp, p1.node(), p2.node(),
1080 trp, p1.node(), p2.node(),
1080 user, ctx.date(), ctx.extra().copy())
1081 user, ctx.date(), ctx.extra().copy())
1081 p = lambda: self.changelog.writepending() and self.root or ""
1082 p = lambda: self.changelog.writepending() and self.root or ""
1082 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1083 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1083 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1084 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1084 parent2=xp2, pending=p)
1085 parent2=xp2, pending=p)
1085 self.changelog.finalize(trp)
1086 self.changelog.finalize(trp)
1086 tr.close()
1087 tr.close()
1087
1088
1088 if self._branchcache:
1089 if self._branchcache:
1089 self.updatebranchcache()
1090 self.updatebranchcache()
1090 return n
1091 return n
1091 finally:
1092 finally:
1092 if tr:
1093 if tr:
1093 tr.release()
1094 tr.release()
1094 lock.release()
1095 lock.release()
1095
1096
1096 def destroyed(self):
1097 def destroyed(self):
1097 '''Inform the repository that nodes have been destroyed.
1098 '''Inform the repository that nodes have been destroyed.
1098 Intended for use by strip and rollback, so there's a common
1099 Intended for use by strip and rollback, so there's a common
1099 place for anything that has to be done after destroying history.'''
1100 place for anything that has to be done after destroying history.'''
1100 # XXX it might be nice if we could take the list of destroyed
1101 # XXX it might be nice if we could take the list of destroyed
1101 # nodes, but I don't see an easy way for rollback() to do that
1102 # nodes, but I don't see an easy way for rollback() to do that
1102
1103
1103 # Ensure the persistent tag cache is updated. Doing it now
1104 # Ensure the persistent tag cache is updated. Doing it now
1104 # means that the tag cache only has to worry about destroyed
1105 # means that the tag cache only has to worry about destroyed
1105 # heads immediately after a strip/rollback. That in turn
1106 # heads immediately after a strip/rollback. That in turn
1106 # guarantees that "cachetip == currenttip" (comparing both rev
1107 # guarantees that "cachetip == currenttip" (comparing both rev
1107 # and node) always means no nodes have been added or destroyed.
1108 # and node) always means no nodes have been added or destroyed.
1108
1109
1109 # XXX this is suboptimal when qrefresh'ing: we strip the current
1110 # XXX this is suboptimal when qrefresh'ing: we strip the current
1110 # head, refresh the tag cache, then immediately add a new head.
1111 # head, refresh the tag cache, then immediately add a new head.
1111 # But I think doing it this way is necessary for the "instant
1112 # But I think doing it this way is necessary for the "instant
1112 # tag cache retrieval" case to work.
1113 # tag cache retrieval" case to work.
1113 self.invalidatecaches()
1114 self.invalidatecaches()
1114
1115
1115 def walk(self, match, node=None):
1116 def walk(self, match, node=None):
1116 '''
1117 '''
1117 walk recursively through the directory tree or a given
1118 walk recursively through the directory tree or a given
1118 changeset, finding all files matched by the match
1119 changeset, finding all files matched by the match
1119 function
1120 function
1120 '''
1121 '''
1121 return self[node].walk(match)
1122 return self[node].walk(match)
1122
1123
1123 def status(self, node1='.', node2=None, match=None,
1124 def status(self, node1='.', node2=None, match=None,
1124 ignored=False, clean=False, unknown=False,
1125 ignored=False, clean=False, unknown=False,
1125 listsubrepos=False):
1126 listsubrepos=False):
1126 """return status of files between two nodes or node and working directory
1127 """return status of files between two nodes or node and working directory
1127
1128
1128 If node1 is None, use the first dirstate parent instead.
1129 If node1 is None, use the first dirstate parent instead.
1129 If node2 is None, compare node1 with working directory.
1130 If node2 is None, compare node1 with working directory.
1130 """
1131 """
1131
1132
1132 def mfmatches(ctx):
1133 def mfmatches(ctx):
1133 mf = ctx.manifest().copy()
1134 mf = ctx.manifest().copy()
1134 for fn in mf.keys():
1135 for fn in mf.keys():
1135 if not match(fn):
1136 if not match(fn):
1136 del mf[fn]
1137 del mf[fn]
1137 return mf
1138 return mf
1138
1139
1139 if isinstance(node1, context.changectx):
1140 if isinstance(node1, context.changectx):
1140 ctx1 = node1
1141 ctx1 = node1
1141 else:
1142 else:
1142 ctx1 = self[node1]
1143 ctx1 = self[node1]
1143 if isinstance(node2, context.changectx):
1144 if isinstance(node2, context.changectx):
1144 ctx2 = node2
1145 ctx2 = node2
1145 else:
1146 else:
1146 ctx2 = self[node2]
1147 ctx2 = self[node2]
1147
1148
1148 working = ctx2.rev() is None
1149 working = ctx2.rev() is None
1149 parentworking = working and ctx1 == self['.']
1150 parentworking = working and ctx1 == self['.']
1150 match = match or matchmod.always(self.root, self.getcwd())
1151 match = match or matchmod.always(self.root, self.getcwd())
1151 listignored, listclean, listunknown = ignored, clean, unknown
1152 listignored, listclean, listunknown = ignored, clean, unknown
1152
1153
1153 # load earliest manifest first for caching reasons
1154 # load earliest manifest first for caching reasons
1154 if not working and ctx2.rev() < ctx1.rev():
1155 if not working and ctx2.rev() < ctx1.rev():
1155 ctx2.manifest()
1156 ctx2.manifest()
1156
1157
1157 if not parentworking:
1158 if not parentworking:
1158 def bad(f, msg):
1159 def bad(f, msg):
1159 if f not in ctx1:
1160 if f not in ctx1:
1160 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1161 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1161 match.bad = bad
1162 match.bad = bad
1162
1163
1163 if working: # we need to scan the working dir
1164 if working: # we need to scan the working dir
1164 subrepos = []
1165 subrepos = []
1165 if '.hgsub' in self.dirstate:
1166 if '.hgsub' in self.dirstate:
1166 subrepos = ctx1.substate.keys()
1167 subrepos = ctx1.substate.keys()
1167 s = self.dirstate.status(match, subrepos, listignored,
1168 s = self.dirstate.status(match, subrepos, listignored,
1168 listclean, listunknown)
1169 listclean, listunknown)
1169 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1170 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1170
1171
1171 # check for any possibly clean files
1172 # check for any possibly clean files
1172 if parentworking and cmp:
1173 if parentworking and cmp:
1173 fixup = []
1174 fixup = []
1174 # do a full compare of any files that might have changed
1175 # do a full compare of any files that might have changed
1175 for f in sorted(cmp):
1176 for f in sorted(cmp):
1176 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1177 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1177 or ctx1[f].cmp(ctx2[f])):
1178 or ctx1[f].cmp(ctx2[f])):
1178 modified.append(f)
1179 modified.append(f)
1179 else:
1180 else:
1180 fixup.append(f)
1181 fixup.append(f)
1181
1182
1182 # update dirstate for files that are actually clean
1183 # update dirstate for files that are actually clean
1183 if fixup:
1184 if fixup:
1184 if listclean:
1185 if listclean:
1185 clean += fixup
1186 clean += fixup
1186
1187
1187 try:
1188 try:
1188 # updating the dirstate is optional
1189 # updating the dirstate is optional
1189 # so we don't wait on the lock
1190 # so we don't wait on the lock
1190 wlock = self.wlock(False)
1191 wlock = self.wlock(False)
1191 try:
1192 try:
1192 for f in fixup:
1193 for f in fixup:
1193 self.dirstate.normal(f)
1194 self.dirstate.normal(f)
1194 finally:
1195 finally:
1195 wlock.release()
1196 wlock.release()
1196 except error.LockError:
1197 except error.LockError:
1197 pass
1198 pass
1198
1199
1199 if not parentworking:
1200 if not parentworking:
1200 mf1 = mfmatches(ctx1)
1201 mf1 = mfmatches(ctx1)
1201 if working:
1202 if working:
1202 # we are comparing working dir against non-parent
1203 # we are comparing working dir against non-parent
1203 # generate a pseudo-manifest for the working dir
1204 # generate a pseudo-manifest for the working dir
1204 mf2 = mfmatches(self['.'])
1205 mf2 = mfmatches(self['.'])
1205 for f in cmp + modified + added:
1206 for f in cmp + modified + added:
1206 mf2[f] = None
1207 mf2[f] = None
1207 mf2.set(f, ctx2.flags(f))
1208 mf2.set(f, ctx2.flags(f))
1208 for f in removed:
1209 for f in removed:
1209 if f in mf2:
1210 if f in mf2:
1210 del mf2[f]
1211 del mf2[f]
1211 else:
1212 else:
1212 # we are comparing two revisions
1213 # we are comparing two revisions
1213 deleted, unknown, ignored = [], [], []
1214 deleted, unknown, ignored = [], [], []
1214 mf2 = mfmatches(ctx2)
1215 mf2 = mfmatches(ctx2)
1215
1216
1216 modified, added, clean = [], [], []
1217 modified, added, clean = [], [], []
1217 for fn in mf2:
1218 for fn in mf2:
1218 if fn in mf1:
1219 if fn in mf1:
1219 if (mf1.flags(fn) != mf2.flags(fn) or
1220 if (mf1.flags(fn) != mf2.flags(fn) or
1220 (mf1[fn] != mf2[fn] and
1221 (mf1[fn] != mf2[fn] and
1221 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1222 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1222 modified.append(fn)
1223 modified.append(fn)
1223 elif listclean:
1224 elif listclean:
1224 clean.append(fn)
1225 clean.append(fn)
1225 del mf1[fn]
1226 del mf1[fn]
1226 else:
1227 else:
1227 added.append(fn)
1228 added.append(fn)
1228 removed = mf1.keys()
1229 removed = mf1.keys()
1229
1230
1230 r = modified, added, removed, deleted, unknown, ignored, clean
1231 r = modified, added, removed, deleted, unknown, ignored, clean
1231
1232
1232 if listsubrepos:
1233 if listsubrepos:
1233 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1234 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1234 if working:
1235 if working:
1235 rev2 = None
1236 rev2 = None
1236 else:
1237 else:
1237 rev2 = ctx2.substate[subpath][1]
1238 rev2 = ctx2.substate[subpath][1]
1238 try:
1239 try:
1239 submatch = matchmod.narrowmatcher(subpath, match)
1240 submatch = matchmod.narrowmatcher(subpath, match)
1240 s = sub.status(rev2, match=submatch, ignored=listignored,
1241 s = sub.status(rev2, match=submatch, ignored=listignored,
1241 clean=listclean, unknown=listunknown,
1242 clean=listclean, unknown=listunknown,
1242 listsubrepos=True)
1243 listsubrepos=True)
1243 for rfiles, sfiles in zip(r, s):
1244 for rfiles, sfiles in zip(r, s):
1244 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1245 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1245 except error.LookupError:
1246 except error.LookupError:
1246 self.ui.status(_("skipping missing subrepository: %s\n")
1247 self.ui.status(_("skipping missing subrepository: %s\n")
1247 % subpath)
1248 % subpath)
1248
1249
1249 for l in r:
1250 for l in r:
1250 l.sort()
1251 l.sort()
1251 return r
1252 return r
1252
1253
1253 def heads(self, start=None):
1254 def heads(self, start=None):
1254 heads = self.changelog.heads(start)
1255 heads = self.changelog.heads(start)
1255 # sort the output in rev descending order
1256 # sort the output in rev descending order
1256 return sorted(heads, key=self.changelog.rev, reverse=True)
1257 return sorted(heads, key=self.changelog.rev, reverse=True)
1257
1258
1258 def branchheads(self, branch=None, start=None, closed=False):
1259 def branchheads(self, branch=None, start=None, closed=False):
1259 '''return a (possibly filtered) list of heads for the given branch
1260 '''return a (possibly filtered) list of heads for the given branch
1260
1261
1261 Heads are returned in topological order, from newest to oldest.
1262 Heads are returned in topological order, from newest to oldest.
1262 If branch is None, use the dirstate branch.
1263 If branch is None, use the dirstate branch.
1263 If start is not None, return only heads reachable from start.
1264 If start is not None, return only heads reachable from start.
1264 If closed is True, return heads that are marked as closed as well.
1265 If closed is True, return heads that are marked as closed as well.
1265 '''
1266 '''
1266 if branch is None:
1267 if branch is None:
1267 branch = self[None].branch()
1268 branch = self[None].branch()
1268 branches = self.branchmap()
1269 branches = self.branchmap()
1269 if branch not in branches:
1270 if branch not in branches:
1270 return []
1271 return []
1271 # the cache returns heads ordered lowest to highest
1272 # the cache returns heads ordered lowest to highest
1272 bheads = list(reversed(branches[branch]))
1273 bheads = list(reversed(branches[branch]))
1273 if start is not None:
1274 if start is not None:
1274 # filter out the heads that cannot be reached from startrev
1275 # filter out the heads that cannot be reached from startrev
1275 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1276 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1276 bheads = [h for h in bheads if h in fbheads]
1277 bheads = [h for h in bheads if h in fbheads]
1277 if not closed:
1278 if not closed:
1278 bheads = [h for h in bheads if
1279 bheads = [h for h in bheads if
1279 ('close' not in self.changelog.read(h)[5])]
1280 ('close' not in self.changelog.read(h)[5])]
1280 return bheads
1281 return bheads
1281
1282
1282 def branches(self, nodes):
1283 def branches(self, nodes):
1283 if not nodes:
1284 if not nodes:
1284 nodes = [self.changelog.tip()]
1285 nodes = [self.changelog.tip()]
1285 b = []
1286 b = []
1286 for n in nodes:
1287 for n in nodes:
1287 t = n
1288 t = n
1288 while 1:
1289 while 1:
1289 p = self.changelog.parents(n)
1290 p = self.changelog.parents(n)
1290 if p[1] != nullid or p[0] == nullid:
1291 if p[1] != nullid or p[0] == nullid:
1291 b.append((t, n, p[0], p[1]))
1292 b.append((t, n, p[0], p[1]))
1292 break
1293 break
1293 n = p[0]
1294 n = p[0]
1294 return b
1295 return b
1295
1296
1296 def between(self, pairs):
1297 def between(self, pairs):
1297 r = []
1298 r = []
1298
1299
1299 for top, bottom in pairs:
1300 for top, bottom in pairs:
1300 n, l, i = top, [], 0
1301 n, l, i = top, [], 0
1301 f = 1
1302 f = 1
1302
1303
1303 while n != bottom and n != nullid:
1304 while n != bottom and n != nullid:
1304 p = self.changelog.parents(n)[0]
1305 p = self.changelog.parents(n)[0]
1305 if i == f:
1306 if i == f:
1306 l.append(n)
1307 l.append(n)
1307 f = f * 2
1308 f = f * 2
1308 n = p
1309 n = p
1309 i += 1
1310 i += 1
1310
1311
1311 r.append(l)
1312 r.append(l)
1312
1313
1313 return r
1314 return r
1314
1315
1315 def pull(self, remote, heads=None, force=False):
1316 def pull(self, remote, heads=None, force=False):
1316 lock = self.lock()
1317 lock = self.lock()
1317 try:
1318 try:
1318 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1319 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1319 force=force)
1320 force=force)
1320 common, fetch, rheads = tmp
1321 common, fetch, rheads = tmp
1321 if not fetch:
1322 if not fetch:
1322 self.ui.status(_("no changes found\n"))
1323 self.ui.status(_("no changes found\n"))
1323 result = 0
1324 result = 0
1324 else:
1325 else:
1325 if heads is None and fetch == [nullid]:
1326 if heads is None and fetch == [nullid]:
1326 self.ui.status(_("requesting all changes\n"))
1327 self.ui.status(_("requesting all changes\n"))
1327 elif heads is None and remote.capable('changegroupsubset'):
1328 elif heads is None and remote.capable('changegroupsubset'):
1328 # issue1320, avoid a race if remote changed after discovery
1329 # issue1320, avoid a race if remote changed after discovery
1329 heads = rheads
1330 heads = rheads
1330
1331
1331 if heads is None:
1332 if heads is None:
1332 cg = remote.changegroup(fetch, 'pull')
1333 cg = remote.changegroup(fetch, 'pull')
1333 elif not remote.capable('changegroupsubset'):
1334 elif not remote.capable('changegroupsubset'):
1334 raise util.Abort(_("partial pull cannot be done because "
1335 raise util.Abort(_("partial pull cannot be done because "
1335 "other repository doesn't support "
1336 "other repository doesn't support "
1336 "changegroupsubset."))
1337 "changegroupsubset."))
1337 else:
1338 else:
1338 cg = remote.changegroupsubset(fetch, heads, 'pull')
1339 cg = remote.changegroupsubset(fetch, heads, 'pull')
1339 result = self.addchangegroup(cg, 'pull', remote.url(),
1340 result = self.addchangegroup(cg, 'pull', remote.url(),
1340 lock=lock)
1341 lock=lock)
1341 finally:
1342 finally:
1342 lock.release()
1343 lock.release()
1343
1344
1344 self.ui.debug("checking for updated bookmarks\n")
1345 self.ui.debug("checking for updated bookmarks\n")
1345 rb = remote.listkeys('bookmarks')
1346 rb = remote.listkeys('bookmarks')
1346 changed = False
1347 changed = False
1347 for k in rb.keys():
1348 for k in rb.keys():
1348 if k in self._bookmarks:
1349 if k in self._bookmarks:
1349 nr, nl = rb[k], self._bookmarks[k]
1350 nr, nl = rb[k], self._bookmarks[k]
1350 if nr in self:
1351 if nr in self:
1351 cr = self[nr]
1352 cr = self[nr]
1352 cl = self[nl]
1353 cl = self[nl]
1353 if cl.rev() >= cr.rev():
1354 if cl.rev() >= cr.rev():
1354 continue
1355 continue
1355 if cr in cl.descendants():
1356 if cr in cl.descendants():
1356 self._bookmarks[k] = cr.node()
1357 self._bookmarks[k] = cr.node()
1357 changed = True
1358 changed = True
1358 self.ui.status(_("updating bookmark %s\n") % k)
1359 self.ui.status(_("updating bookmark %s\n") % k)
1359 else:
1360 else:
1360 self.ui.warn(_("not updating divergent"
1361 self.ui.warn(_("not updating divergent"
1361 " bookmark %s\n") % k)
1362 " bookmark %s\n") % k)
1362 if changed:
1363 if changed:
1363 bookmarks.write(self)
1364 bookmarks.write(self)
1364
1365
1365 return result
1366 return result
1366
1367
1367 def checkpush(self, force, revs):
1368 def checkpush(self, force, revs):
1368 """Extensions can override this function if additional checks have
1369 """Extensions can override this function if additional checks have
1369 to be performed before pushing, or call it if they override push
1370 to be performed before pushing, or call it if they override push
1370 command.
1371 command.
1371 """
1372 """
1372 pass
1373 pass
1373
1374
1374 def push(self, remote, force=False, revs=None, newbranch=False):
1375 def push(self, remote, force=False, revs=None, newbranch=False):
1375 '''Push outgoing changesets (limited by revs) from the current
1376 '''Push outgoing changesets (limited by revs) from the current
1376 repository to remote. Return an integer:
1377 repository to remote. Return an integer:
1377 - 0 means HTTP error *or* nothing to push
1378 - 0 means HTTP error *or* nothing to push
1378 - 1 means we pushed and remote head count is unchanged *or*
1379 - 1 means we pushed and remote head count is unchanged *or*
1379 we have outgoing changesets but refused to push
1380 we have outgoing changesets but refused to push
1380 - other values as described by addchangegroup()
1381 - other values as described by addchangegroup()
1381 '''
1382 '''
1382 # there are two ways to push to remote repo:
1383 # there are two ways to push to remote repo:
1383 #
1384 #
1384 # addchangegroup assumes local user can lock remote
1385 # addchangegroup assumes local user can lock remote
1385 # repo (local filesystem, old ssh servers).
1386 # repo (local filesystem, old ssh servers).
1386 #
1387 #
1387 # unbundle assumes local user cannot lock remote repo (new ssh
1388 # unbundle assumes local user cannot lock remote repo (new ssh
1388 # servers, http servers).
1389 # servers, http servers).
1389
1390
1390 self.checkpush(force, revs)
1391 self.checkpush(force, revs)
1391 lock = None
1392 lock = None
1392 unbundle = remote.capable('unbundle')
1393 unbundle = remote.capable('unbundle')
1393 if not unbundle:
1394 if not unbundle:
1394 lock = remote.lock()
1395 lock = remote.lock()
1395 try:
1396 try:
1396 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1397 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1397 newbranch)
1398 newbranch)
1398 ret = remote_heads
1399 ret = remote_heads
1399 if cg is not None:
1400 if cg is not None:
1400 if unbundle:
1401 if unbundle:
1401 # local repo finds heads on server, finds out what
1402 # local repo finds heads on server, finds out what
1402 # revs it must push. once revs transferred, if server
1403 # revs it must push. once revs transferred, if server
1403 # finds it has different heads (someone else won
1404 # finds it has different heads (someone else won
1404 # commit/push race), server aborts.
1405 # commit/push race), server aborts.
1405 if force:
1406 if force:
1406 remote_heads = ['force']
1407 remote_heads = ['force']
1407 # ssh: return remote's addchangegroup()
1408 # ssh: return remote's addchangegroup()
1408 # http: return remote's addchangegroup() or 0 for error
1409 # http: return remote's addchangegroup() or 0 for error
1409 ret = remote.unbundle(cg, remote_heads, 'push')
1410 ret = remote.unbundle(cg, remote_heads, 'push')
1410 else:
1411 else:
1411 # we return an integer indicating remote head count change
1412 # we return an integer indicating remote head count change
1412 ret = remote.addchangegroup(cg, 'push', self.url(),
1413 ret = remote.addchangegroup(cg, 'push', self.url(),
1413 lock=lock)
1414 lock=lock)
1414 finally:
1415 finally:
1415 if lock is not None:
1416 if lock is not None:
1416 lock.release()
1417 lock.release()
1417
1418
1418 self.ui.debug("checking for updated bookmarks\n")
1419 self.ui.debug("checking for updated bookmarks\n")
1419 rb = remote.listkeys('bookmarks')
1420 rb = remote.listkeys('bookmarks')
1420 for k in rb.keys():
1421 for k in rb.keys():
1421 if k in self._bookmarks:
1422 if k in self._bookmarks:
1422 nr, nl = rb[k], hex(self._bookmarks[k])
1423 nr, nl = rb[k], hex(self._bookmarks[k])
1423 if nr in self:
1424 if nr in self:
1424 cr = self[nr]
1425 cr = self[nr]
1425 cl = self[nl]
1426 cl = self[nl]
1426 if cl in cr.descendants():
1427 if cl in cr.descendants():
1427 r = remote.pushkey('bookmarks', k, nr, nl)
1428 r = remote.pushkey('bookmarks', k, nr, nl)
1428 if r:
1429 if r:
1429 self.ui.status(_("updating bookmark %s\n") % k)
1430 self.ui.status(_("updating bookmark %s\n") % k)
1430 else:
1431 else:
1431 self.ui.warn(_('updating bookmark %s'
1432 self.ui.warn(_('updating bookmark %s'
1432 ' failed!\n') % k)
1433 ' failed!\n') % k)
1433
1434
1434 return ret
1435 return ret
1435
1436
1436 def changegroupinfo(self, nodes, source):
1437 def changegroupinfo(self, nodes, source):
1437 if self.ui.verbose or source == 'bundle':
1438 if self.ui.verbose or source == 'bundle':
1438 self.ui.status(_("%d changesets found\n") % len(nodes))
1439 self.ui.status(_("%d changesets found\n") % len(nodes))
1439 if self.ui.debugflag:
1440 if self.ui.debugflag:
1440 self.ui.debug("list of changesets:\n")
1441 self.ui.debug("list of changesets:\n")
1441 for node in nodes:
1442 for node in nodes:
1442 self.ui.debug("%s\n" % hex(node))
1443 self.ui.debug("%s\n" % hex(node))
1443
1444
1444 def changegroupsubset(self, bases, heads, source, extranodes=None):
1445 def changegroupsubset(self, bases, heads, source, extranodes=None):
1445 """Compute a changegroup consisting of all the nodes that are
1446 """Compute a changegroup consisting of all the nodes that are
1446 descendents of any of the bases and ancestors of any of the heads.
1447 descendents of any of the bases and ancestors of any of the heads.
1447 Return a chunkbuffer object whose read() method will return
1448 Return a chunkbuffer object whose read() method will return
1448 successive changegroup chunks.
1449 successive changegroup chunks.
1449
1450
1450 It is fairly complex as determining which filenodes and which
1451 It is fairly complex as determining which filenodes and which
1451 manifest nodes need to be included for the changeset to be complete
1452 manifest nodes need to be included for the changeset to be complete
1452 is non-trivial.
1453 is non-trivial.
1453
1454
1454 Another wrinkle is doing the reverse, figuring out which changeset in
1455 Another wrinkle is doing the reverse, figuring out which changeset in
1455 the changegroup a particular filenode or manifestnode belongs to.
1456 the changegroup a particular filenode or manifestnode belongs to.
1456
1457
1457 The caller can specify some nodes that must be included in the
1458 The caller can specify some nodes that must be included in the
1458 changegroup using the extranodes argument. It should be a dict
1459 changegroup using the extranodes argument. It should be a dict
1459 where the keys are the filenames (or 1 for the manifest), and the
1460 where the keys are the filenames (or 1 for the manifest), and the
1460 values are lists of (node, linknode) tuples, where node is a wanted
1461 values are lists of (node, linknode) tuples, where node is a wanted
1461 node and linknode is the changelog node that should be transmitted as
1462 node and linknode is the changelog node that should be transmitted as
1462 the linkrev.
1463 the linkrev.
1463 """
1464 """
1464
1465
1465 # Set up some initial variables
1466 # Set up some initial variables
1466 # Make it easy to refer to self.changelog
1467 # Make it easy to refer to self.changelog
1467 cl = self.changelog
1468 cl = self.changelog
1468 # Compute the list of changesets in this changegroup.
1469 # Compute the list of changesets in this changegroup.
1469 # Some bases may turn out to be superfluous, and some heads may be
1470 # Some bases may turn out to be superfluous, and some heads may be
1470 # too. nodesbetween will return the minimal set of bases and heads
1471 # too. nodesbetween will return the minimal set of bases and heads
1471 # necessary to re-create the changegroup.
1472 # necessary to re-create the changegroup.
1472 if not bases:
1473 if not bases:
1473 bases = [nullid]
1474 bases = [nullid]
1474 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1475 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1475
1476
1476 if extranodes is None:
1477 if extranodes is None:
1477 # can we go through the fast path ?
1478 # can we go through the fast path ?
1478 heads.sort()
1479 heads.sort()
1479 allheads = self.heads()
1480 allheads = self.heads()
1480 allheads.sort()
1481 allheads.sort()
1481 if heads == allheads:
1482 if heads == allheads:
1482 return self._changegroup(msng_cl_lst, source)
1483 return self._changegroup(msng_cl_lst, source)
1483
1484
1484 # slow path
1485 # slow path
1485 self.hook('preoutgoing', throw=True, source=source)
1486 self.hook('preoutgoing', throw=True, source=source)
1486
1487
1487 self.changegroupinfo(msng_cl_lst, source)
1488 self.changegroupinfo(msng_cl_lst, source)
1488
1489
1489 # We assume that all ancestors of bases are known
1490 # We assume that all ancestors of bases are known
1490 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1491 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1491
1492
1492 # Make it easy to refer to self.manifest
1493 # Make it easy to refer to self.manifest
1493 mnfst = self.manifest
1494 mnfst = self.manifest
1494 # We don't know which manifests are missing yet
1495 # We don't know which manifests are missing yet
1495 msng_mnfst_set = {}
1496 msng_mnfst_set = {}
1496 # Nor do we know which filenodes are missing.
1497 # Nor do we know which filenodes are missing.
1497 msng_filenode_set = {}
1498 msng_filenode_set = {}
1498
1499
1499 # A changeset always belongs to itself, so the changenode lookup
1500 # A changeset always belongs to itself, so the changenode lookup
1500 # function for a changenode is identity.
1501 # function for a changenode is identity.
1501 def identity(x):
1502 def identity(x):
1502 return x
1503 return x
1503
1504
1504 # A function generating function that sets up the initial environment
1505 # A function generating function that sets up the initial environment
1505 # the inner function.
1506 # the inner function.
1506 def filenode_collector(changedfiles):
1507 def filenode_collector(changedfiles):
1507 # This gathers information from each manifestnode included in the
1508 # This gathers information from each manifestnode included in the
1508 # changegroup about which filenodes the manifest node references
1509 # changegroup about which filenodes the manifest node references
1509 # so we can include those in the changegroup too.
1510 # so we can include those in the changegroup too.
1510 #
1511 #
1511 # It also remembers which changenode each filenode belongs to. It
1512 # It also remembers which changenode each filenode belongs to. It
1512 # does this by assuming the a filenode belongs to the changenode
1513 # does this by assuming the a filenode belongs to the changenode
1513 # the first manifest that references it belongs to.
1514 # the first manifest that references it belongs to.
1514 def collect_msng_filenodes(mnfstnode):
1515 def collect_msng_filenodes(mnfstnode):
1515 r = mnfst.rev(mnfstnode)
1516 r = mnfst.rev(mnfstnode)
1516 if mnfst.deltaparent(r) in mnfst.parentrevs(r):
1517 if mnfst.deltaparent(r) in mnfst.parentrevs(r):
1517 # If the previous rev is one of the parents,
1518 # If the previous rev is one of the parents,
1518 # we only need to see a diff.
1519 # we only need to see a diff.
1519 deltamf = mnfst.readdelta(mnfstnode)
1520 deltamf = mnfst.readdelta(mnfstnode)
1520 # For each line in the delta
1521 # For each line in the delta
1521 for f, fnode in deltamf.iteritems():
1522 for f, fnode in deltamf.iteritems():
1522 # And if the file is in the list of files we care
1523 # And if the file is in the list of files we care
1523 # about.
1524 # about.
1524 if f in changedfiles:
1525 if f in changedfiles:
1525 # Get the changenode this manifest belongs to
1526 # Get the changenode this manifest belongs to
1526 clnode = msng_mnfst_set[mnfstnode]
1527 clnode = msng_mnfst_set[mnfstnode]
1527 # Create the set of filenodes for the file if
1528 # Create the set of filenodes for the file if
1528 # there isn't one already.
1529 # there isn't one already.
1529 ndset = msng_filenode_set.setdefault(f, {})
1530 ndset = msng_filenode_set.setdefault(f, {})
1530 # And set the filenode's changelog node to the
1531 # And set the filenode's changelog node to the
1531 # manifest's if it hasn't been set already.
1532 # manifest's if it hasn't been set already.
1532 ndset.setdefault(fnode, clnode)
1533 ndset.setdefault(fnode, clnode)
1533 else:
1534 else:
1534 # Otherwise we need a full manifest.
1535 # Otherwise we need a full manifest.
1535 m = mnfst.read(mnfstnode)
1536 m = mnfst.read(mnfstnode)
1536 # For every file in we care about.
1537 # For every file in we care about.
1537 for f in changedfiles:
1538 for f in changedfiles:
1538 fnode = m.get(f, None)
1539 fnode = m.get(f, None)
1539 # If it's in the manifest
1540 # If it's in the manifest
1540 if fnode is not None:
1541 if fnode is not None:
1541 # See comments above.
1542 # See comments above.
1542 clnode = msng_mnfst_set[mnfstnode]
1543 clnode = msng_mnfst_set[mnfstnode]
1543 ndset = msng_filenode_set.setdefault(f, {})
1544 ndset = msng_filenode_set.setdefault(f, {})
1544 ndset.setdefault(fnode, clnode)
1545 ndset.setdefault(fnode, clnode)
1545 return collect_msng_filenodes
1546 return collect_msng_filenodes
1546
1547
1547 # If we determine that a particular file or manifest node must be a
1548 # If we determine that a particular file or manifest node must be a
1548 # node that the recipient of the changegroup will already have, we can
1549 # node that the recipient of the changegroup will already have, we can
1549 # also assume the recipient will have all the parents. This function
1550 # also assume the recipient will have all the parents. This function
1550 # prunes them from the set of missing nodes.
1551 # prunes them from the set of missing nodes.
1551 def prune(revlog, missingnodes):
1552 def prune(revlog, missingnodes):
1552 hasset = set()
1553 hasset = set()
1553 # If a 'missing' filenode thinks it belongs to a changenode we
1554 # If a 'missing' filenode thinks it belongs to a changenode we
1554 # assume the recipient must have, then the recipient must have
1555 # assume the recipient must have, then the recipient must have
1555 # that filenode.
1556 # that filenode.
1556 for n in missingnodes:
1557 for n in missingnodes:
1557 clrev = revlog.linkrev(revlog.rev(n))
1558 clrev = revlog.linkrev(revlog.rev(n))
1558 if clrev in commonrevs:
1559 if clrev in commonrevs:
1559 hasset.add(n)
1560 hasset.add(n)
1560 for n in hasset:
1561 for n in hasset:
1561 missingnodes.pop(n, None)
1562 missingnodes.pop(n, None)
1562 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1563 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1563 missingnodes.pop(revlog.node(r), None)
1564 missingnodes.pop(revlog.node(r), None)
1564
1565
1565 # Add the nodes that were explicitly requested.
1566 # Add the nodes that were explicitly requested.
1566 def add_extra_nodes(name, nodes):
1567 def add_extra_nodes(name, nodes):
1567 if not extranodes or name not in extranodes:
1568 if not extranodes or name not in extranodes:
1568 return
1569 return
1569
1570
1570 for node, linknode in extranodes[name]:
1571 for node, linknode in extranodes[name]:
1571 if node not in nodes:
1572 if node not in nodes:
1572 nodes[node] = linknode
1573 nodes[node] = linknode
1573
1574
1574 # Now that we have all theses utility functions to help out and
1575 # Now that we have all theses utility functions to help out and
1575 # logically divide up the task, generate the group.
1576 # logically divide up the task, generate the group.
1576 def gengroup():
1577 def gengroup():
1577 # The set of changed files starts empty.
1578 # The set of changed files starts empty.
1578 changedfiles = set()
1579 changedfiles = set()
1579 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1580 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1580
1581
1581 # Create a changenode group generator that will call our functions
1582 # Create a changenode group generator that will call our functions
1582 # back to lookup the owning changenode and collect information.
1583 # back to lookup the owning changenode and collect information.
1583 group = cl.group(msng_cl_lst, identity, collect)
1584 group = cl.group(msng_cl_lst, identity, collect)
1584 for cnt, chnk in enumerate(group):
1585 for cnt, chnk in enumerate(group):
1585 yield chnk
1586 yield chnk
1586 # revlog.group yields three entries per node, so
1587 # revlog.group yields three entries per node, so
1587 # dividing by 3 gives an approximation of how many
1588 # dividing by 3 gives an approximation of how many
1588 # nodes have been processed.
1589 # nodes have been processed.
1589 self.ui.progress(_('bundling'), cnt / 3,
1590 self.ui.progress(_('bundling'), cnt / 3,
1590 unit=_('changesets'))
1591 unit=_('changesets'))
1591 changecount = cnt / 3
1592 changecount = cnt / 3
1592 self.ui.progress(_('bundling'), None)
1593 self.ui.progress(_('bundling'), None)
1593
1594
1594 prune(mnfst, msng_mnfst_set)
1595 prune(mnfst, msng_mnfst_set)
1595 add_extra_nodes(1, msng_mnfst_set)
1596 add_extra_nodes(1, msng_mnfst_set)
1596 msng_mnfst_lst = msng_mnfst_set.keys()
1597 msng_mnfst_lst = msng_mnfst_set.keys()
1597 # Sort the manifestnodes by revision number.
1598 # Sort the manifestnodes by revision number.
1598 msng_mnfst_lst.sort(key=mnfst.rev)
1599 msng_mnfst_lst.sort(key=mnfst.rev)
1599 # Create a generator for the manifestnodes that calls our lookup
1600 # Create a generator for the manifestnodes that calls our lookup
1600 # and data collection functions back.
1601 # and data collection functions back.
1601 group = mnfst.group(msng_mnfst_lst,
1602 group = mnfst.group(msng_mnfst_lst,
1602 lambda mnode: msng_mnfst_set[mnode],
1603 lambda mnode: msng_mnfst_set[mnode],
1603 filenode_collector(changedfiles))
1604 filenode_collector(changedfiles))
1604 efiles = {}
1605 efiles = {}
1605 for cnt, chnk in enumerate(group):
1606 for cnt, chnk in enumerate(group):
1606 if cnt % 3 == 1:
1607 if cnt % 3 == 1:
1607 mnode = chnk[:20]
1608 mnode = chnk[:20]
1608 efiles.update(mnfst.readdelta(mnode))
1609 efiles.update(mnfst.readdelta(mnode))
1609 yield chnk
1610 yield chnk
1610 # see above comment for why we divide by 3
1611 # see above comment for why we divide by 3
1611 self.ui.progress(_('bundling'), cnt / 3,
1612 self.ui.progress(_('bundling'), cnt / 3,
1612 unit=_('manifests'), total=changecount)
1613 unit=_('manifests'), total=changecount)
1613 self.ui.progress(_('bundling'), None)
1614 self.ui.progress(_('bundling'), None)
1614 efiles = len(efiles)
1615 efiles = len(efiles)
1615
1616
1616 # These are no longer needed, dereference and toss the memory for
1617 # These are no longer needed, dereference and toss the memory for
1617 # them.
1618 # them.
1618 msng_mnfst_lst = None
1619 msng_mnfst_lst = None
1619 msng_mnfst_set.clear()
1620 msng_mnfst_set.clear()
1620
1621
1621 if extranodes:
1622 if extranodes:
1622 for fname in extranodes:
1623 for fname in extranodes:
1623 if isinstance(fname, int):
1624 if isinstance(fname, int):
1624 continue
1625 continue
1625 msng_filenode_set.setdefault(fname, {})
1626 msng_filenode_set.setdefault(fname, {})
1626 changedfiles.add(fname)
1627 changedfiles.add(fname)
1627 # Go through all our files in order sorted by name.
1628 # Go through all our files in order sorted by name.
1628 for idx, fname in enumerate(sorted(changedfiles)):
1629 for idx, fname in enumerate(sorted(changedfiles)):
1629 filerevlog = self.file(fname)
1630 filerevlog = self.file(fname)
1630 if not len(filerevlog):
1631 if not len(filerevlog):
1631 raise util.Abort(_("empty or missing revlog for %s") % fname)
1632 raise util.Abort(_("empty or missing revlog for %s") % fname)
1632 # Toss out the filenodes that the recipient isn't really
1633 # Toss out the filenodes that the recipient isn't really
1633 # missing.
1634 # missing.
1634 missingfnodes = msng_filenode_set.pop(fname, {})
1635 missingfnodes = msng_filenode_set.pop(fname, {})
1635 prune(filerevlog, missingfnodes)
1636 prune(filerevlog, missingfnodes)
1636 add_extra_nodes(fname, missingfnodes)
1637 add_extra_nodes(fname, missingfnodes)
1637 # If any filenodes are left, generate the group for them,
1638 # If any filenodes are left, generate the group for them,
1638 # otherwise don't bother.
1639 # otherwise don't bother.
1639 if missingfnodes:
1640 if missingfnodes:
1640 yield changegroup.chunkheader(len(fname))
1641 yield changegroup.chunkheader(len(fname))
1641 yield fname
1642 yield fname
1642 # Sort the filenodes by their revision # (topological order)
1643 # Sort the filenodes by their revision # (topological order)
1643 nodeiter = list(missingfnodes)
1644 nodeiter = list(missingfnodes)
1644 nodeiter.sort(key=filerevlog.rev)
1645 nodeiter.sort(key=filerevlog.rev)
1645 # Create a group generator and only pass in a changenode
1646 # Create a group generator and only pass in a changenode
1646 # lookup function as we need to collect no information
1647 # lookup function as we need to collect no information
1647 # from filenodes.
1648 # from filenodes.
1648 group = filerevlog.group(nodeiter,
1649 group = filerevlog.group(nodeiter,
1649 lambda fnode: missingfnodes[fnode])
1650 lambda fnode: missingfnodes[fnode])
1650 for chnk in group:
1651 for chnk in group:
1651 # even though we print the same progress on
1652 # even though we print the same progress on
1652 # most loop iterations, put the progress call
1653 # most loop iterations, put the progress call
1653 # here so that time estimates (if any) can be updated
1654 # here so that time estimates (if any) can be updated
1654 self.ui.progress(
1655 self.ui.progress(
1655 _('bundling'), idx, item=fname,
1656 _('bundling'), idx, item=fname,
1656 unit=_('files'), total=efiles)
1657 unit=_('files'), total=efiles)
1657 yield chnk
1658 yield chnk
1658 # Signal that no more groups are left.
1659 # Signal that no more groups are left.
1659 yield changegroup.closechunk()
1660 yield changegroup.closechunk()
1660 self.ui.progress(_('bundling'), None)
1661 self.ui.progress(_('bundling'), None)
1661
1662
1662 if msng_cl_lst:
1663 if msng_cl_lst:
1663 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1664 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1664
1665
1665 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1666 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1666
1667
1667 def changegroup(self, basenodes, source):
1668 def changegroup(self, basenodes, source):
1668 # to avoid a race we use changegroupsubset() (issue1320)
1669 # to avoid a race we use changegroupsubset() (issue1320)
1669 return self.changegroupsubset(basenodes, self.heads(), source)
1670 return self.changegroupsubset(basenodes, self.heads(), source)
1670
1671
1671 def _changegroup(self, nodes, source):
1672 def _changegroup(self, nodes, source):
1672 """Compute the changegroup of all nodes that we have that a recipient
1673 """Compute the changegroup of all nodes that we have that a recipient
1673 doesn't. Return a chunkbuffer object whose read() method will return
1674 doesn't. Return a chunkbuffer object whose read() method will return
1674 successive changegroup chunks.
1675 successive changegroup chunks.
1675
1676
1676 This is much easier than the previous function as we can assume that
1677 This is much easier than the previous function as we can assume that
1677 the recipient has any changenode we aren't sending them.
1678 the recipient has any changenode we aren't sending them.
1678
1679
1679 nodes is the set of nodes to send"""
1680 nodes is the set of nodes to send"""
1680
1681
1681 self.hook('preoutgoing', throw=True, source=source)
1682 self.hook('preoutgoing', throw=True, source=source)
1682
1683
1683 cl = self.changelog
1684 cl = self.changelog
1684 revset = set([cl.rev(n) for n in nodes])
1685 revset = set([cl.rev(n) for n in nodes])
1685 self.changegroupinfo(nodes, source)
1686 self.changegroupinfo(nodes, source)
1686
1687
1687 def identity(x):
1688 def identity(x):
1688 return x
1689 return x
1689
1690
1690 def gennodelst(log):
1691 def gennodelst(log):
1691 for r in log:
1692 for r in log:
1692 if log.linkrev(r) in revset:
1693 if log.linkrev(r) in revset:
1693 yield log.node(r)
1694 yield log.node(r)
1694
1695
1695 def lookuplinkrev_func(revlog):
1696 def lookuplinkrev_func(revlog):
1696 def lookuplinkrev(n):
1697 def lookuplinkrev(n):
1697 return cl.node(revlog.linkrev(revlog.rev(n)))
1698 return cl.node(revlog.linkrev(revlog.rev(n)))
1698 return lookuplinkrev
1699 return lookuplinkrev
1699
1700
1700 def gengroup():
1701 def gengroup():
1701 '''yield a sequence of changegroup chunks (strings)'''
1702 '''yield a sequence of changegroup chunks (strings)'''
1702 # construct a list of all changed files
1703 # construct a list of all changed files
1703 changedfiles = set()
1704 changedfiles = set()
1704 mmfs = {}
1705 mmfs = {}
1705 collect = changegroup.collector(cl, mmfs, changedfiles)
1706 collect = changegroup.collector(cl, mmfs, changedfiles)
1706
1707
1707 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1708 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1708 # revlog.group yields three entries per node, so
1709 # revlog.group yields three entries per node, so
1709 # dividing by 3 gives an approximation of how many
1710 # dividing by 3 gives an approximation of how many
1710 # nodes have been processed.
1711 # nodes have been processed.
1711 self.ui.progress(_('bundling'), cnt / 3, unit=_('changesets'))
1712 self.ui.progress(_('bundling'), cnt / 3, unit=_('changesets'))
1712 yield chnk
1713 yield chnk
1713 changecount = cnt / 3
1714 changecount = cnt / 3
1714 self.ui.progress(_('bundling'), None)
1715 self.ui.progress(_('bundling'), None)
1715
1716
1716 mnfst = self.manifest
1717 mnfst = self.manifest
1717 nodeiter = gennodelst(mnfst)
1718 nodeiter = gennodelst(mnfst)
1718 efiles = {}
1719 efiles = {}
1719 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1720 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1720 lookuplinkrev_func(mnfst))):
1721 lookuplinkrev_func(mnfst))):
1721 if cnt % 3 == 1:
1722 if cnt % 3 == 1:
1722 mnode = chnk[:20]
1723 mnode = chnk[:20]
1723 efiles.update(mnfst.readdelta(mnode))
1724 efiles.update(mnfst.readdelta(mnode))
1724 # see above comment for why we divide by 3
1725 # see above comment for why we divide by 3
1725 self.ui.progress(_('bundling'), cnt / 3,
1726 self.ui.progress(_('bundling'), cnt / 3,
1726 unit=_('manifests'), total=changecount)
1727 unit=_('manifests'), total=changecount)
1727 yield chnk
1728 yield chnk
1728 efiles = len(efiles)
1729 efiles = len(efiles)
1729 self.ui.progress(_('bundling'), None)
1730 self.ui.progress(_('bundling'), None)
1730
1731
1731 for idx, fname in enumerate(sorted(changedfiles)):
1732 for idx, fname in enumerate(sorted(changedfiles)):
1732 filerevlog = self.file(fname)
1733 filerevlog = self.file(fname)
1733 if not len(filerevlog):
1734 if not len(filerevlog):
1734 raise util.Abort(_("empty or missing revlog for %s") % fname)
1735 raise util.Abort(_("empty or missing revlog for %s") % fname)
1735 nodeiter = gennodelst(filerevlog)
1736 nodeiter = gennodelst(filerevlog)
1736 nodeiter = list(nodeiter)
1737 nodeiter = list(nodeiter)
1737 if nodeiter:
1738 if nodeiter:
1738 yield changegroup.chunkheader(len(fname))
1739 yield changegroup.chunkheader(len(fname))
1739 yield fname
1740 yield fname
1740 lookup = lookuplinkrev_func(filerevlog)
1741 lookup = lookuplinkrev_func(filerevlog)
1741 for chnk in filerevlog.group(nodeiter, lookup):
1742 for chnk in filerevlog.group(nodeiter, lookup):
1742 self.ui.progress(
1743 self.ui.progress(
1743 _('bundling'), idx, item=fname,
1744 _('bundling'), idx, item=fname,
1744 total=efiles, unit=_('files'))
1745 total=efiles, unit=_('files'))
1745 yield chnk
1746 yield chnk
1746 self.ui.progress(_('bundling'), None)
1747 self.ui.progress(_('bundling'), None)
1747
1748
1748 yield changegroup.closechunk()
1749 yield changegroup.closechunk()
1749
1750
1750 if nodes:
1751 if nodes:
1751 self.hook('outgoing', node=hex(nodes[0]), source=source)
1752 self.hook('outgoing', node=hex(nodes[0]), source=source)
1752
1753
1753 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1754 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1754
1755
1755 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1756 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1756 """Add the changegroup returned by source.read() to this repo.
1757 """Add the changegroup returned by source.read() to this repo.
1757 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1758 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1758 the URL of the repo where this changegroup is coming from.
1759 the URL of the repo where this changegroup is coming from.
1759 If lock is not None, the function takes ownership of the lock
1760 If lock is not None, the function takes ownership of the lock
1760 and releases it after the changegroup is added.
1761 and releases it after the changegroup is added.
1761
1762
1762 Return an integer summarizing the change to this repo:
1763 Return an integer summarizing the change to this repo:
1763 - nothing changed or no source: 0
1764 - nothing changed or no source: 0
1764 - more heads than before: 1+added heads (2..n)
1765 - more heads than before: 1+added heads (2..n)
1765 - fewer heads than before: -1-removed heads (-2..-n)
1766 - fewer heads than before: -1-removed heads (-2..-n)
1766 - number of heads stays the same: 1
1767 - number of heads stays the same: 1
1767 """
1768 """
1768 def csmap(x):
1769 def csmap(x):
1769 self.ui.debug("add changeset %s\n" % short(x))
1770 self.ui.debug("add changeset %s\n" % short(x))
1770 return len(cl)
1771 return len(cl)
1771
1772
1772 def revmap(x):
1773 def revmap(x):
1773 return cl.rev(x)
1774 return cl.rev(x)
1774
1775
1775 if not source:
1776 if not source:
1776 return 0
1777 return 0
1777
1778
1778 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1779 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1779
1780
1780 changesets = files = revisions = 0
1781 changesets = files = revisions = 0
1781 efiles = set()
1782 efiles = set()
1782
1783
1783 # write changelog data to temp files so concurrent readers will not see
1784 # write changelog data to temp files so concurrent readers will not see
1784 # inconsistent view
1785 # inconsistent view
1785 cl = self.changelog
1786 cl = self.changelog
1786 cl.delayupdate()
1787 cl.delayupdate()
1787 oldheads = len(cl.heads())
1788 oldheads = len(cl.heads())
1788
1789
1789 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1790 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1790 try:
1791 try:
1791 trp = weakref.proxy(tr)
1792 trp = weakref.proxy(tr)
1792 # pull off the changeset group
1793 # pull off the changeset group
1793 self.ui.status(_("adding changesets\n"))
1794 self.ui.status(_("adding changesets\n"))
1794 clstart = len(cl)
1795 clstart = len(cl)
1795 class prog(object):
1796 class prog(object):
1796 step = _('changesets')
1797 step = _('changesets')
1797 count = 1
1798 count = 1
1798 ui = self.ui
1799 ui = self.ui
1799 total = None
1800 total = None
1800 def __call__(self):
1801 def __call__(self):
1801 self.ui.progress(self.step, self.count, unit=_('chunks'),
1802 self.ui.progress(self.step, self.count, unit=_('chunks'),
1802 total=self.total)
1803 total=self.total)
1803 self.count += 1
1804 self.count += 1
1804 pr = prog()
1805 pr = prog()
1805 source.callback = pr
1806 source.callback = pr
1806
1807
1807 if (cl.addgroup(source, csmap, trp) is None
1808 if (cl.addgroup(source, csmap, trp) is None
1808 and not emptyok):
1809 and not emptyok):
1809 raise util.Abort(_("received changelog group is empty"))
1810 raise util.Abort(_("received changelog group is empty"))
1810 clend = len(cl)
1811 clend = len(cl)
1811 changesets = clend - clstart
1812 changesets = clend - clstart
1812 for c in xrange(clstart, clend):
1813 for c in xrange(clstart, clend):
1813 efiles.update(self[c].files())
1814 efiles.update(self[c].files())
1814 efiles = len(efiles)
1815 efiles = len(efiles)
1815 self.ui.progress(_('changesets'), None)
1816 self.ui.progress(_('changesets'), None)
1816
1817
1817 # pull off the manifest group
1818 # pull off the manifest group
1818 self.ui.status(_("adding manifests\n"))
1819 self.ui.status(_("adding manifests\n"))
1819 pr.step = _('manifests')
1820 pr.step = _('manifests')
1820 pr.count = 1
1821 pr.count = 1
1821 pr.total = changesets # manifests <= changesets
1822 pr.total = changesets # manifests <= changesets
1822 # no need to check for empty manifest group here:
1823 # no need to check for empty manifest group here:
1823 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1824 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1824 # no new manifest will be created and the manifest group will
1825 # no new manifest will be created and the manifest group will
1825 # be empty during the pull
1826 # be empty during the pull
1826 self.manifest.addgroup(source, revmap, trp)
1827 self.manifest.addgroup(source, revmap, trp)
1827 self.ui.progress(_('manifests'), None)
1828 self.ui.progress(_('manifests'), None)
1828
1829
1829 needfiles = {}
1830 needfiles = {}
1830 if self.ui.configbool('server', 'validate', default=False):
1831 if self.ui.configbool('server', 'validate', default=False):
1831 # validate incoming csets have their manifests
1832 # validate incoming csets have their manifests
1832 for cset in xrange(clstart, clend):
1833 for cset in xrange(clstart, clend):
1833 mfest = self.changelog.read(self.changelog.node(cset))[0]
1834 mfest = self.changelog.read(self.changelog.node(cset))[0]
1834 mfest = self.manifest.readdelta(mfest)
1835 mfest = self.manifest.readdelta(mfest)
1835 # store file nodes we must see
1836 # store file nodes we must see
1836 for f, n in mfest.iteritems():
1837 for f, n in mfest.iteritems():
1837 needfiles.setdefault(f, set()).add(n)
1838 needfiles.setdefault(f, set()).add(n)
1838
1839
1839 # process the files
1840 # process the files
1840 self.ui.status(_("adding file changes\n"))
1841 self.ui.status(_("adding file changes\n"))
1841 pr.step = 'files'
1842 pr.step = 'files'
1842 pr.count = 1
1843 pr.count = 1
1843 pr.total = efiles
1844 pr.total = efiles
1844 source.callback = None
1845 source.callback = None
1845
1846
1846 while 1:
1847 while 1:
1847 f = source.chunk()
1848 f = source.chunk()
1848 if not f:
1849 if not f:
1849 break
1850 break
1850 self.ui.debug("adding %s revisions\n" % f)
1851 self.ui.debug("adding %s revisions\n" % f)
1851 pr()
1852 pr()
1852 fl = self.file(f)
1853 fl = self.file(f)
1853 o = len(fl)
1854 o = len(fl)
1854 if fl.addgroup(source, revmap, trp) is None:
1855 if fl.addgroup(source, revmap, trp) is None:
1855 raise util.Abort(_("received file revlog group is empty"))
1856 raise util.Abort(_("received file revlog group is empty"))
1856 revisions += len(fl) - o
1857 revisions += len(fl) - o
1857 files += 1
1858 files += 1
1858 if f in needfiles:
1859 if f in needfiles:
1859 needs = needfiles[f]
1860 needs = needfiles[f]
1860 for new in xrange(o, len(fl)):
1861 for new in xrange(o, len(fl)):
1861 n = fl.node(new)
1862 n = fl.node(new)
1862 if n in needs:
1863 if n in needs:
1863 needs.remove(n)
1864 needs.remove(n)
1864 if not needs:
1865 if not needs:
1865 del needfiles[f]
1866 del needfiles[f]
1866 self.ui.progress(_('files'), None)
1867 self.ui.progress(_('files'), None)
1867
1868
1868 for f, needs in needfiles.iteritems():
1869 for f, needs in needfiles.iteritems():
1869 fl = self.file(f)
1870 fl = self.file(f)
1870 for n in needs:
1871 for n in needs:
1871 try:
1872 try:
1872 fl.rev(n)
1873 fl.rev(n)
1873 except error.LookupError:
1874 except error.LookupError:
1874 raise util.Abort(
1875 raise util.Abort(
1875 _('missing file data for %s:%s - run hg verify') %
1876 _('missing file data for %s:%s - run hg verify') %
1876 (f, hex(n)))
1877 (f, hex(n)))
1877
1878
1878 newheads = len(cl.heads())
1879 newheads = len(cl.heads())
1879 heads = ""
1880 heads = ""
1880 if oldheads and newheads != oldheads:
1881 if oldheads and newheads != oldheads:
1881 heads = _(" (%+d heads)") % (newheads - oldheads)
1882 heads = _(" (%+d heads)") % (newheads - oldheads)
1882
1883
1883 self.ui.status(_("added %d changesets"
1884 self.ui.status(_("added %d changesets"
1884 " with %d changes to %d files%s\n")
1885 " with %d changes to %d files%s\n")
1885 % (changesets, revisions, files, heads))
1886 % (changesets, revisions, files, heads))
1886
1887
1887 if changesets > 0:
1888 if changesets > 0:
1888 p = lambda: cl.writepending() and self.root or ""
1889 p = lambda: cl.writepending() and self.root or ""
1889 self.hook('pretxnchangegroup', throw=True,
1890 self.hook('pretxnchangegroup', throw=True,
1890 node=hex(cl.node(clstart)), source=srctype,
1891 node=hex(cl.node(clstart)), source=srctype,
1891 url=url, pending=p)
1892 url=url, pending=p)
1892
1893
1893 # make changelog see real files again
1894 # make changelog see real files again
1894 cl.finalize(trp)
1895 cl.finalize(trp)
1895
1896
1896 tr.close()
1897 tr.close()
1897 finally:
1898 finally:
1898 tr.release()
1899 tr.release()
1899 if lock:
1900 if lock:
1900 lock.release()
1901 lock.release()
1901
1902
1902 if changesets > 0:
1903 if changesets > 0:
1903 # forcefully update the on-disk branch cache
1904 # forcefully update the on-disk branch cache
1904 self.ui.debug("updating the branch cache\n")
1905 self.ui.debug("updating the branch cache\n")
1905 self.updatebranchcache()
1906 self.updatebranchcache()
1906 self.hook("changegroup", node=hex(cl.node(clstart)),
1907 self.hook("changegroup", node=hex(cl.node(clstart)),
1907 source=srctype, url=url)
1908 source=srctype, url=url)
1908
1909
1909 for i in xrange(clstart, clend):
1910 for i in xrange(clstart, clend):
1910 self.hook("incoming", node=hex(cl.node(i)),
1911 self.hook("incoming", node=hex(cl.node(i)),
1911 source=srctype, url=url)
1912 source=srctype, url=url)
1912
1913
1913 # FIXME - why does this care about tip?
1914 # FIXME - why does this care about tip?
1914 if newheads == oldheads:
1915 if newheads == oldheads:
1915 bookmarks.update(self, self.dirstate.parents(), self['tip'].node())
1916 bookmarks.update(self, self.dirstate.parents(), self['tip'].node())
1916
1917
1917 # never return 0 here:
1918 # never return 0 here:
1918 if newheads < oldheads:
1919 if newheads < oldheads:
1919 return newheads - oldheads - 1
1920 return newheads - oldheads - 1
1920 else:
1921 else:
1921 return newheads - oldheads + 1
1922 return newheads - oldheads + 1
1922
1923
1923
1924
1924 def stream_in(self, remote, requirements):
1925 def stream_in(self, remote, requirements):
1925 lock = self.lock()
1926 lock = self.lock()
1926 try:
1927 try:
1927 fp = remote.stream_out()
1928 fp = remote.stream_out()
1928 l = fp.readline()
1929 l = fp.readline()
1929 try:
1930 try:
1930 resp = int(l)
1931 resp = int(l)
1931 except ValueError:
1932 except ValueError:
1932 raise error.ResponseError(
1933 raise error.ResponseError(
1933 _('Unexpected response from remote server:'), l)
1934 _('Unexpected response from remote server:'), l)
1934 if resp == 1:
1935 if resp == 1:
1935 raise util.Abort(_('operation forbidden by server'))
1936 raise util.Abort(_('operation forbidden by server'))
1936 elif resp == 2:
1937 elif resp == 2:
1937 raise util.Abort(_('locking the remote repository failed'))
1938 raise util.Abort(_('locking the remote repository failed'))
1938 elif resp != 0:
1939 elif resp != 0:
1939 raise util.Abort(_('the server sent an unknown error code'))
1940 raise util.Abort(_('the server sent an unknown error code'))
1940 self.ui.status(_('streaming all changes\n'))
1941 self.ui.status(_('streaming all changes\n'))
1941 l = fp.readline()
1942 l = fp.readline()
1942 try:
1943 try:
1943 total_files, total_bytes = map(int, l.split(' ', 1))
1944 total_files, total_bytes = map(int, l.split(' ', 1))
1944 except (ValueError, TypeError):
1945 except (ValueError, TypeError):
1945 raise error.ResponseError(
1946 raise error.ResponseError(
1946 _('Unexpected response from remote server:'), l)
1947 _('Unexpected response from remote server:'), l)
1947 self.ui.status(_('%d files to transfer, %s of data\n') %
1948 self.ui.status(_('%d files to transfer, %s of data\n') %
1948 (total_files, util.bytecount(total_bytes)))
1949 (total_files, util.bytecount(total_bytes)))
1949 start = time.time()
1950 start = time.time()
1950 for i in xrange(total_files):
1951 for i in xrange(total_files):
1951 # XXX doesn't support '\n' or '\r' in filenames
1952 # XXX doesn't support '\n' or '\r' in filenames
1952 l = fp.readline()
1953 l = fp.readline()
1953 try:
1954 try:
1954 name, size = l.split('\0', 1)
1955 name, size = l.split('\0', 1)
1955 size = int(size)
1956 size = int(size)
1956 except (ValueError, TypeError):
1957 except (ValueError, TypeError):
1957 raise error.ResponseError(
1958 raise error.ResponseError(
1958 _('Unexpected response from remote server:'), l)
1959 _('Unexpected response from remote server:'), l)
1959 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1960 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1960 # for backwards compat, name was partially encoded
1961 # for backwards compat, name was partially encoded
1961 ofp = self.sopener(store.decodedir(name), 'w')
1962 ofp = self.sopener(store.decodedir(name), 'w')
1962 for chunk in util.filechunkiter(fp, limit=size):
1963 for chunk in util.filechunkiter(fp, limit=size):
1963 ofp.write(chunk)
1964 ofp.write(chunk)
1964 ofp.close()
1965 ofp.close()
1965 elapsed = time.time() - start
1966 elapsed = time.time() - start
1966 if elapsed <= 0:
1967 if elapsed <= 0:
1967 elapsed = 0.001
1968 elapsed = 0.001
1968 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1969 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1969 (util.bytecount(total_bytes), elapsed,
1970 (util.bytecount(total_bytes), elapsed,
1970 util.bytecount(total_bytes / elapsed)))
1971 util.bytecount(total_bytes / elapsed)))
1971
1972
1972 # new requirements = old non-format requirements + new format-related
1973 # new requirements = old non-format requirements + new format-related
1973 # requirements from the streamed-in repository
1974 # requirements from the streamed-in repository
1974 requirements.update(set(self.requirements) - self.supportedformats)
1975 requirements.update(set(self.requirements) - self.supportedformats)
1975 self._applyrequirements(requirements)
1976 self._applyrequirements(requirements)
1976 self._writerequirements()
1977 self._writerequirements()
1977
1978
1978 self.invalidate()
1979 self.invalidate()
1979 return len(self.heads()) + 1
1980 return len(self.heads()) + 1
1980 finally:
1981 finally:
1981 lock.release()
1982 lock.release()
1982
1983
1983 def clone(self, remote, heads=[], stream=False):
1984 def clone(self, remote, heads=[], stream=False):
1984 '''clone remote repository.
1985 '''clone remote repository.
1985
1986
1986 keyword arguments:
1987 keyword arguments:
1987 heads: list of revs to clone (forces use of pull)
1988 heads: list of revs to clone (forces use of pull)
1988 stream: use streaming clone if possible'''
1989 stream: use streaming clone if possible'''
1989
1990
1990 # now, all clients that can request uncompressed clones can
1991 # now, all clients that can request uncompressed clones can
1991 # read repo formats supported by all servers that can serve
1992 # read repo formats supported by all servers that can serve
1992 # them.
1993 # them.
1993
1994
1994 # if revlog format changes, client will have to check version
1995 # if revlog format changes, client will have to check version
1995 # and format flags on "stream" capability, and use
1996 # and format flags on "stream" capability, and use
1996 # uncompressed only if compatible.
1997 # uncompressed only if compatible.
1997
1998
1998 if stream and not heads:
1999 if stream and not heads:
1999 # 'stream' means remote revlog format is revlogv1 only
2000 # 'stream' means remote revlog format is revlogv1 only
2000 if remote.capable('stream'):
2001 if remote.capable('stream'):
2001 return self.stream_in(remote, set(('revlogv1',)))
2002 return self.stream_in(remote, set(('revlogv1',)))
2002 # otherwise, 'streamreqs' contains the remote revlog format
2003 # otherwise, 'streamreqs' contains the remote revlog format
2003 streamreqs = remote.capable('streamreqs')
2004 streamreqs = remote.capable('streamreqs')
2004 if streamreqs:
2005 if streamreqs:
2005 streamreqs = set(streamreqs.split(','))
2006 streamreqs = set(streamreqs.split(','))
2006 # if we support it, stream in and adjust our requirements
2007 # if we support it, stream in and adjust our requirements
2007 if not streamreqs - self.supportedformats:
2008 if not streamreqs - self.supportedformats:
2008 return self.stream_in(remote, streamreqs)
2009 return self.stream_in(remote, streamreqs)
2009 return self.pull(remote, heads)
2010 return self.pull(remote, heads)
2010
2011
2011 def pushkey(self, namespace, key, old, new):
2012 def pushkey(self, namespace, key, old, new):
2012 return pushkey.push(self, namespace, key, old, new)
2013 return pushkey.push(self, namespace, key, old, new)
2013
2014
2014 def listkeys(self, namespace):
2015 def listkeys(self, namespace):
2015 return pushkey.list(self, namespace)
2016 return pushkey.list(self, namespace)
2016
2017
2017 # used to avoid circular references so destructors work
2018 # used to avoid circular references so destructors work
2018 def aftertrans(files):
2019 def aftertrans(files):
2019 renamefiles = [tuple(t) for t in files]
2020 renamefiles = [tuple(t) for t in files]
2020 def a():
2021 def a():
2021 for src, dest in renamefiles:
2022 for src, dest in renamefiles:
2022 util.rename(src, dest)
2023 util.rename(src, dest)
2023 return a
2024 return a
2024
2025
2025 def instance(ui, path, create):
2026 def instance(ui, path, create):
2026 return localrepository(ui, util.drop_scheme('file', path), create)
2027 return localrepository(ui, util.drop_scheme('file', path), create)
2027
2028
2028 def islocal(path):
2029 def islocal(path):
2029 return True
2030 return True
@@ -1,145 +1,146 b''
1 # statichttprepo.py - simple http repository class for mercurial
1 # statichttprepo.py - simple http repository class for mercurial
2 #
2 #
3 # This provides read-only repo access to repositories exported via static http
3 # This provides read-only repo access to repositories exported via static http
4 #
4 #
5 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 from i18n import _
10 from i18n import _
11 import changelog, byterange, url, error
11 import changelog, byterange, url, error
12 import localrepo, manifest, util, store
12 import localrepo, manifest, util, store
13 import urllib, urllib2, errno
13 import urllib, urllib2, errno
14
14
15 class httprangereader(object):
15 class httprangereader(object):
16 def __init__(self, url, opener):
16 def __init__(self, url, opener):
17 # we assume opener has HTTPRangeHandler
17 # we assume opener has HTTPRangeHandler
18 self.url = url
18 self.url = url
19 self.pos = 0
19 self.pos = 0
20 self.opener = opener
20 self.opener = opener
21 self.name = url
21 self.name = url
22 def seek(self, pos):
22 def seek(self, pos):
23 self.pos = pos
23 self.pos = pos
24 def read(self, bytes=None):
24 def read(self, bytes=None):
25 req = urllib2.Request(self.url)
25 req = urllib2.Request(self.url)
26 end = ''
26 end = ''
27 if bytes:
27 if bytes:
28 end = self.pos + bytes - 1
28 end = self.pos + bytes - 1
29 req.add_header('Range', 'bytes=%d-%s' % (self.pos, end))
29 req.add_header('Range', 'bytes=%d-%s' % (self.pos, end))
30
30
31 try:
31 try:
32 f = self.opener.open(req)
32 f = self.opener.open(req)
33 data = f.read()
33 data = f.read()
34 if hasattr(f, 'getcode'):
34 if hasattr(f, 'getcode'):
35 # python 2.6+
35 # python 2.6+
36 code = f.getcode()
36 code = f.getcode()
37 elif hasattr(f, 'code'):
37 elif hasattr(f, 'code'):
38 # undocumented attribute, seems to be set in 2.4 and 2.5
38 # undocumented attribute, seems to be set in 2.4 and 2.5
39 code = f.code
39 code = f.code
40 else:
40 else:
41 # Don't know how to check, hope for the best.
41 # Don't know how to check, hope for the best.
42 code = 206
42 code = 206
43 except urllib2.HTTPError, inst:
43 except urllib2.HTTPError, inst:
44 num = inst.code == 404 and errno.ENOENT or None
44 num = inst.code == 404 and errno.ENOENT or None
45 raise IOError(num, inst)
45 raise IOError(num, inst)
46 except urllib2.URLError, inst:
46 except urllib2.URLError, inst:
47 raise IOError(None, inst.reason[1])
47 raise IOError(None, inst.reason[1])
48
48
49 if code == 200:
49 if code == 200:
50 # HTTPRangeHandler does nothing if remote does not support
50 # HTTPRangeHandler does nothing if remote does not support
51 # Range headers and returns the full entity. Let's slice it.
51 # Range headers and returns the full entity. Let's slice it.
52 if bytes:
52 if bytes:
53 data = data[self.pos:self.pos + bytes]
53 data = data[self.pos:self.pos + bytes]
54 else:
54 else:
55 data = data[self.pos:]
55 data = data[self.pos:]
56 elif bytes:
56 elif bytes:
57 data = data[:bytes]
57 data = data[:bytes]
58 self.pos += len(data)
58 self.pos += len(data)
59 return data
59 return data
60 def __iter__(self):
60 def __iter__(self):
61 return iter(self.read().splitlines(1))
61 return iter(self.read().splitlines(1))
62 def close(self):
62 def close(self):
63 pass
63 pass
64
64
65 def build_opener(ui, authinfo):
65 def build_opener(ui, authinfo):
66 # urllib cannot handle URLs with embedded user or passwd
66 # urllib cannot handle URLs with embedded user or passwd
67 urlopener = url.opener(ui, authinfo)
67 urlopener = url.opener(ui, authinfo)
68 urlopener.add_handler(byterange.HTTPRangeHandler())
68 urlopener.add_handler(byterange.HTTPRangeHandler())
69
69
70 def opener(base):
70 def opener(base):
71 """return a function that opens files over http"""
71 """return a function that opens files over http"""
72 p = base
72 p = base
73 def o(path, mode="r", atomictemp=None):
73 def o(path, mode="r", atomictemp=None):
74 if 'a' in mode or 'w' in mode:
74 if 'a' in mode or 'w' in mode:
75 raise IOError('Permission denied')
75 raise IOError('Permission denied')
76 f = "/".join((p, urllib.quote(path)))
76 f = "/".join((p, urllib.quote(path)))
77 return httprangereader(f, urlopener)
77 return httprangereader(f, urlopener)
78 return o
78 return o
79
79
80 return opener
80 return opener
81
81
82 class statichttprepository(localrepo.localrepository):
82 class statichttprepository(localrepo.localrepository):
83 def __init__(self, ui, path):
83 def __init__(self, ui, path):
84 self._url = path
84 self._url = path
85 self.ui = ui
85 self.ui = ui
86
86
87 self.root = path
87 self.root = path
88 self.path, authinfo = url.getauthinfo(path.rstrip('/') + "/.hg")
88 self.path, authinfo = url.getauthinfo(path.rstrip('/') + "/.hg")
89
89
90 opener = build_opener(ui, authinfo)
90 opener = build_opener(ui, authinfo)
91 self.opener = opener(self.path)
91 self.opener = opener(self.path)
92
92
93 # find requirements
93 # find requirements
94 try:
94 try:
95 requirements = self.opener("requires").read().splitlines()
95 requirements = self.opener("requires").read().splitlines()
96 except IOError, inst:
96 except IOError, inst:
97 if inst.errno != errno.ENOENT:
97 if inst.errno != errno.ENOENT:
98 raise
98 raise
99 # check if it is a non-empty old-style repository
99 # check if it is a non-empty old-style repository
100 try:
100 try:
101 fp = self.opener("00changelog.i")
101 fp = self.opener("00changelog.i")
102 fp.read(1)
102 fp.read(1)
103 fp.close()
103 fp.close()
104 except IOError, inst:
104 except IOError, inst:
105 if inst.errno != errno.ENOENT:
105 if inst.errno != errno.ENOENT:
106 raise
106 raise
107 # we do not care about empty old-style repositories here
107 # we do not care about empty old-style repositories here
108 msg = _("'%s' does not appear to be an hg repository") % path
108 msg = _("'%s' does not appear to be an hg repository") % path
109 raise error.RepoError(msg)
109 raise error.RepoError(msg)
110 requirements = []
110 requirements = []
111
111
112 # check them
112 # check them
113 for r in requirements:
113 for r in requirements:
114 if r not in self.supported:
114 if r not in self.supported:
115 raise error.RepoError(_("requirement '%s' not supported") % r)
115 raise error.RequirementError(
116 _("requirement '%s' not supported") % r)
116
117
117 # setup store
118 # setup store
118 self.store = store.store(requirements, self.path, opener)
119 self.store = store.store(requirements, self.path, opener)
119 self.spath = self.store.path
120 self.spath = self.store.path
120 self.sopener = self.store.opener
121 self.sopener = self.store.opener
121 self.sjoin = self.store.join
122 self.sjoin = self.store.join
122
123
123 self.manifest = manifest.manifest(self.sopener)
124 self.manifest = manifest.manifest(self.sopener)
124 self.changelog = changelog.changelog(self.sopener)
125 self.changelog = changelog.changelog(self.sopener)
125 self._tags = None
126 self._tags = None
126 self.nodetagscache = None
127 self.nodetagscache = None
127 self._branchcache = None
128 self._branchcache = None
128 self._branchcachetip = None
129 self._branchcachetip = None
129 self.encodepats = None
130 self.encodepats = None
130 self.decodepats = None
131 self.decodepats = None
131 self.capabilities = self.capabilities.difference(["pushkey"])
132 self.capabilities = self.capabilities.difference(["pushkey"])
132
133
133 def url(self):
134 def url(self):
134 return self._url
135 return self._url
135
136
136 def local(self):
137 def local(self):
137 return False
138 return False
138
139
139 def lock(self, wait=True):
140 def lock(self, wait=True):
140 raise util.Abort(_('cannot lock static-http repository'))
141 raise util.Abort(_('cannot lock static-http repository'))
141
142
142 def instance(ui, path, create):
143 def instance(ui, path, create):
143 if create:
144 if create:
144 raise util.Abort(_('cannot create new static-http repository'))
145 raise util.Abort(_('cannot create new static-http repository'))
145 return statichttprepository(ui, path[7:])
146 return statichttprepository(ui, path[7:])
@@ -1,272 +1,281 b''
1 commit date test
1 commit date test
2
2
3 $ hg init test
3 $ hg init test
4 $ cd test
4 $ cd test
5 $ echo foo > foo
5 $ echo foo > foo
6 $ hg add foo
6 $ hg add foo
7 $ HGEDITOR=true hg commit -m ""
7 $ HGEDITOR=true hg commit -m ""
8 abort: empty commit message
8 abort: empty commit message
9 [255]
9 [255]
10 $ hg commit -d '0 0' -m commit-1
10 $ hg commit -d '0 0' -m commit-1
11 $ echo foo >> foo
11 $ echo foo >> foo
12 $ hg commit -d '1 4444444' -m commit-3
12 $ hg commit -d '1 4444444' -m commit-3
13 abort: impossible time zone offset: 4444444
13 abort: impossible time zone offset: 4444444
14 [255]
14 [255]
15 $ hg commit -d '1 15.1' -m commit-4
15 $ hg commit -d '1 15.1' -m commit-4
16 abort: invalid date: '1\t15.1'
16 abort: invalid date: '1\t15.1'
17 [255]
17 [255]
18 $ hg commit -d 'foo bar' -m commit-5
18 $ hg commit -d 'foo bar' -m commit-5
19 abort: invalid date: 'foo bar'
19 abort: invalid date: 'foo bar'
20 [255]
20 [255]
21 $ hg commit -d ' 1 4444' -m commit-6
21 $ hg commit -d ' 1 4444' -m commit-6
22 $ hg commit -d '111111111111 0' -m commit-7
22 $ hg commit -d '111111111111 0' -m commit-7
23 abort: date exceeds 32 bits: 111111111111
23 abort: date exceeds 32 bits: 111111111111
24 [255]
24 [255]
25 $ hg commit -d '-7654321 3600' -m commit-7
25 $ hg commit -d '-7654321 3600' -m commit-7
26 abort: negative date value: -7654321
26 abort: negative date value: -7654321
27 [255]
27 [255]
28
28
29 commit added file that has been deleted
29 commit added file that has been deleted
30
30
31 $ echo bar > bar
31 $ echo bar > bar
32 $ hg add bar
32 $ hg add bar
33 $ rm bar
33 $ rm bar
34 $ hg commit -m commit-8
34 $ hg commit -m commit-8
35 nothing changed
35 nothing changed
36 [1]
36 [1]
37 $ hg commit -m commit-8-2 bar
37 $ hg commit -m commit-8-2 bar
38 abort: bar: file not found!
38 abort: bar: file not found!
39 [255]
39 [255]
40
40
41 $ hg -q revert -a --no-backup
41 $ hg -q revert -a --no-backup
42
42
43 $ mkdir dir
43 $ mkdir dir
44 $ echo boo > dir/file
44 $ echo boo > dir/file
45 $ hg add
45 $ hg add
46 adding dir/file
46 adding dir/file
47 $ hg -v commit -m commit-9 dir
47 $ hg -v commit -m commit-9 dir
48 dir/file
48 dir/file
49 committed changeset 2:d2a76177cb42
49 committed changeset 2:d2a76177cb42
50
50
51 $ echo > dir.file
51 $ echo > dir.file
52 $ hg add
52 $ hg add
53 adding dir.file
53 adding dir.file
54 $ hg commit -m commit-10 dir dir.file
54 $ hg commit -m commit-10 dir dir.file
55 abort: dir: no match under directory!
55 abort: dir: no match under directory!
56 [255]
56 [255]
57
57
58 $ echo >> dir/file
58 $ echo >> dir/file
59 $ mkdir bleh
59 $ mkdir bleh
60 $ mkdir dir2
60 $ mkdir dir2
61 $ cd bleh
61 $ cd bleh
62 $ hg commit -m commit-11 .
62 $ hg commit -m commit-11 .
63 abort: bleh: no match under directory!
63 abort: bleh: no match under directory!
64 [255]
64 [255]
65 $ hg commit -m commit-12 ../dir ../dir2
65 $ hg commit -m commit-12 ../dir ../dir2
66 abort: dir2: no match under directory!
66 abort: dir2: no match under directory!
67 [255]
67 [255]
68 $ hg -v commit -m commit-13 ../dir
68 $ hg -v commit -m commit-13 ../dir
69 dir/file
69 dir/file
70 committed changeset 3:1cd62a2d8db5
70 committed changeset 3:1cd62a2d8db5
71 $ cd ..
71 $ cd ..
72
72
73 $ hg commit -m commit-14 does-not-exist
73 $ hg commit -m commit-14 does-not-exist
74 abort: does-not-exist: No such file or directory
74 abort: does-not-exist: No such file or directory
75 [255]
75 [255]
76 $ ln -s foo baz
76 $ ln -s foo baz
77 $ hg commit -m commit-15 baz
77 $ hg commit -m commit-15 baz
78 abort: baz: file not tracked!
78 abort: baz: file not tracked!
79 [255]
79 [255]
80 $ touch quux
80 $ touch quux
81 $ hg commit -m commit-16 quux
81 $ hg commit -m commit-16 quux
82 abort: quux: file not tracked!
82 abort: quux: file not tracked!
83 [255]
83 [255]
84 $ echo >> dir/file
84 $ echo >> dir/file
85 $ hg -v commit -m commit-17 dir/file
85 $ hg -v commit -m commit-17 dir/file
86 dir/file
86 dir/file
87 committed changeset 4:49176991390e
87 committed changeset 4:49176991390e
88
88
89 An empty date was interpreted as epoch origin
89 An empty date was interpreted as epoch origin
90
90
91 $ echo foo >> foo
91 $ echo foo >> foo
92 $ hg commit -d '' -m commit-no-date
92 $ hg commit -d '' -m commit-no-date
93 $ hg tip --template '{date|isodate}\n' | grep '1970'
93 $ hg tip --template '{date|isodate}\n' | grep '1970'
94 [1]
94 [1]
95
96 Make sure we do not obscure unknown requires file entries (issue2649)
97
98 $ echo foo >> foo
99 $ echo fake >> .hg/requires
100 $ hg commit -m bla
101 abort: requirement 'fake' not supported!
102 [255]
103
95 $ cd ..
104 $ cd ..
96
105
97
106
98 partial subdir commit test
107 partial subdir commit test
99
108
100 $ hg init test2
109 $ hg init test2
101 $ cd test2
110 $ cd test2
102 $ mkdir foo
111 $ mkdir foo
103 $ echo foo > foo/foo
112 $ echo foo > foo/foo
104 $ mkdir bar
113 $ mkdir bar
105 $ echo bar > bar/bar
114 $ echo bar > bar/bar
106 $ hg add
115 $ hg add
107 adding bar/bar
116 adding bar/bar
108 adding foo/foo
117 adding foo/foo
109 $ hg ci -m commit-subdir-1 foo
118 $ hg ci -m commit-subdir-1 foo
110 $ hg ci -m commit-subdir-2 bar
119 $ hg ci -m commit-subdir-2 bar
111
120
112 subdir log 1
121 subdir log 1
113
122
114 $ hg log -v foo
123 $ hg log -v foo
115 changeset: 0:f97e73a25882
124 changeset: 0:f97e73a25882
116 user: test
125 user: test
117 date: Thu Jan 01 00:00:00 1970 +0000
126 date: Thu Jan 01 00:00:00 1970 +0000
118 files: foo/foo
127 files: foo/foo
119 description:
128 description:
120 commit-subdir-1
129 commit-subdir-1
121
130
122
131
123
132
124 subdir log 2
133 subdir log 2
125
134
126 $ hg log -v bar
135 $ hg log -v bar
127 changeset: 1:aa809156d50d
136 changeset: 1:aa809156d50d
128 tag: tip
137 tag: tip
129 user: test
138 user: test
130 date: Thu Jan 01 00:00:00 1970 +0000
139 date: Thu Jan 01 00:00:00 1970 +0000
131 files: bar/bar
140 files: bar/bar
132 description:
141 description:
133 commit-subdir-2
142 commit-subdir-2
134
143
135
144
136
145
137 full log
146 full log
138
147
139 $ hg log -v
148 $ hg log -v
140 changeset: 1:aa809156d50d
149 changeset: 1:aa809156d50d
141 tag: tip
150 tag: tip
142 user: test
151 user: test
143 date: Thu Jan 01 00:00:00 1970 +0000
152 date: Thu Jan 01 00:00:00 1970 +0000
144 files: bar/bar
153 files: bar/bar
145 description:
154 description:
146 commit-subdir-2
155 commit-subdir-2
147
156
148
157
149 changeset: 0:f97e73a25882
158 changeset: 0:f97e73a25882
150 user: test
159 user: test
151 date: Thu Jan 01 00:00:00 1970 +0000
160 date: Thu Jan 01 00:00:00 1970 +0000
152 files: foo/foo
161 files: foo/foo
153 description:
162 description:
154 commit-subdir-1
163 commit-subdir-1
155
164
156
165
157 $ cd ..
166 $ cd ..
158
167
159
168
160 dot and subdir commit test
169 dot and subdir commit test
161
170
162 $ hg init test3
171 $ hg init test3
163 $ cd test3
172 $ cd test3
164 $ mkdir foo
173 $ mkdir foo
165 $ echo foo content > foo/plain-file
174 $ echo foo content > foo/plain-file
166 $ hg add foo/plain-file
175 $ hg add foo/plain-file
167 $ hg ci -m commit-foo-subdir foo
176 $ hg ci -m commit-foo-subdir foo
168 $ echo modified foo content > foo/plain-file
177 $ echo modified foo content > foo/plain-file
169 $ hg ci -m commit-foo-dot .
178 $ hg ci -m commit-foo-dot .
170
179
171 full log
180 full log
172
181
173 $ hg log -v
182 $ hg log -v
174 changeset: 1:95b38e3a5b2e
183 changeset: 1:95b38e3a5b2e
175 tag: tip
184 tag: tip
176 user: test
185 user: test
177 date: Thu Jan 01 00:00:00 1970 +0000
186 date: Thu Jan 01 00:00:00 1970 +0000
178 files: foo/plain-file
187 files: foo/plain-file
179 description:
188 description:
180 commit-foo-dot
189 commit-foo-dot
181
190
182
191
183 changeset: 0:65d4e9386227
192 changeset: 0:65d4e9386227
184 user: test
193 user: test
185 date: Thu Jan 01 00:00:00 1970 +0000
194 date: Thu Jan 01 00:00:00 1970 +0000
186 files: foo/plain-file
195 files: foo/plain-file
187 description:
196 description:
188 commit-foo-subdir
197 commit-foo-subdir
189
198
190
199
191
200
192 subdir log
201 subdir log
193
202
194 $ cd foo
203 $ cd foo
195 $ hg log .
204 $ hg log .
196 changeset: 1:95b38e3a5b2e
205 changeset: 1:95b38e3a5b2e
197 tag: tip
206 tag: tip
198 user: test
207 user: test
199 date: Thu Jan 01 00:00:00 1970 +0000
208 date: Thu Jan 01 00:00:00 1970 +0000
200 summary: commit-foo-dot
209 summary: commit-foo-dot
201
210
202 changeset: 0:65d4e9386227
211 changeset: 0:65d4e9386227
203 user: test
212 user: test
204 date: Thu Jan 01 00:00:00 1970 +0000
213 date: Thu Jan 01 00:00:00 1970 +0000
205 summary: commit-foo-subdir
214 summary: commit-foo-subdir
206
215
207 $ cd ..
216 $ cd ..
208 $ cd ..
217 $ cd ..
209
218
210 Issue1049: Hg permits partial commit of merge without warning
219 Issue1049: Hg permits partial commit of merge without warning
211
220
212 $ cd ..
221 $ cd ..
213 $ hg init issue1049
222 $ hg init issue1049
214 $ cd issue1049
223 $ cd issue1049
215 $ echo a > a
224 $ echo a > a
216 $ hg ci -Ama
225 $ hg ci -Ama
217 adding a
226 adding a
218 $ echo a >> a
227 $ echo a >> a
219 $ hg ci -mb
228 $ hg ci -mb
220 $ hg up 0
229 $ hg up 0
221 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
230 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
222 $ echo b >> a
231 $ echo b >> a
223 $ hg ci -mc
232 $ hg ci -mc
224 created new head
233 created new head
225 $ HGMERGE=true hg merge
234 $ HGMERGE=true hg merge
226 merging a
235 merging a
227 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
236 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
228 (branch merge, don't forget to commit)
237 (branch merge, don't forget to commit)
229
238
230 should fail because we are specifying a file name
239 should fail because we are specifying a file name
231
240
232 $ hg ci -mmerge a
241 $ hg ci -mmerge a
233 abort: cannot partially commit a merge (do not specify files or patterns)
242 abort: cannot partially commit a merge (do not specify files or patterns)
234 [255]
243 [255]
235
244
236 should fail because we are specifying a pattern
245 should fail because we are specifying a pattern
237
246
238 $ hg ci -mmerge -I a
247 $ hg ci -mmerge -I a
239 abort: cannot partially commit a merge (do not specify files or patterns)
248 abort: cannot partially commit a merge (do not specify files or patterns)
240 [255]
249 [255]
241
250
242 should succeed
251 should succeed
243
252
244 $ hg ci -mmerge
253 $ hg ci -mmerge
245 $ cd ..
254 $ cd ..
246
255
247
256
248 test commit message content
257 test commit message content
249
258
250 $ hg init commitmsg
259 $ hg init commitmsg
251 $ cd commitmsg
260 $ cd commitmsg
252 $ echo changed > changed
261 $ echo changed > changed
253 $ echo removed > removed
262 $ echo removed > removed
254 $ hg ci -qAm init
263 $ hg ci -qAm init
255
264
256 $ hg rm removed
265 $ hg rm removed
257 $ echo changed >> changed
266 $ echo changed >> changed
258 $ echo added > added
267 $ echo added > added
259 $ hg add added
268 $ hg add added
260 $ HGEDITOR=cat hg ci -A
269 $ HGEDITOR=cat hg ci -A
261
270
262
271
263 HG: Enter commit message. Lines beginning with 'HG:' are removed.
272 HG: Enter commit message. Lines beginning with 'HG:' are removed.
264 HG: Leave message empty to abort commit.
273 HG: Leave message empty to abort commit.
265 HG: --
274 HG: --
266 HG: user: test
275 HG: user: test
267 HG: branch 'default'
276 HG: branch 'default'
268 HG: added added
277 HG: added added
269 HG: changed changed
278 HG: changed changed
270 HG: removed removed
279 HG: removed removed
271 abort: empty commit message
280 abort: empty commit message
272 [255]
281 [255]
@@ -1,69 +1,82 b''
1 $ "$TESTDIR/hghave" no-outer-repo || exit 80
1 $ "$TESTDIR/hghave" no-outer-repo || exit 80
2
2
3 no repo
3 no repo
4
4
5 $ hg id
5 $ hg id
6 abort: there is no Mercurial repository here (.hg not found)
6 abort: there is no Mercurial repository here (.hg not found)
7 [255]
7 [255]
8
8
9 create repo
9 create repo
10
10
11 $ hg init test
11 $ hg init test
12 $ cd test
12 $ cd test
13 $ echo a > a
13 $ echo a > a
14 $ hg ci -Ama
14 $ hg ci -Ama
15 adding a
15 adding a
16
16
17 basic id usage
17 basic id usage
18
18
19 $ hg id
19 $ hg id
20 cb9a9f314b8b tip
20 cb9a9f314b8b tip
21 $ hg id --debug
21 $ hg id --debug
22 cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b tip
22 cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b tip
23 $ hg id -q
23 $ hg id -q
24 cb9a9f314b8b
24 cb9a9f314b8b
25 $ hg id -v
25 $ hg id -v
26 cb9a9f314b8b tip
26 cb9a9f314b8b tip
27
27
28 with options
28 with options
29
29
30 $ hg id -r.
30 $ hg id -r.
31 cb9a9f314b8b tip
31 cb9a9f314b8b tip
32 $ hg id -n
32 $ hg id -n
33 0
33 0
34 $ hg id -t
34 $ hg id -t
35 tip
35 tip
36 $ hg id -b
36 $ hg id -b
37 default
37 default
38 $ hg id -i
38 $ hg id -i
39 cb9a9f314b8b
39 cb9a9f314b8b
40 $ hg id -n -t -b -i
40 $ hg id -n -t -b -i
41 cb9a9f314b8b 0 default tip
41 cb9a9f314b8b 0 default tip
42
42
43 with modifications
43 with modifications
44
44
45 $ echo b > a
45 $ echo b > a
46 $ hg id -n -t -b -i
46 $ hg id -n -t -b -i
47 cb9a9f314b8b+ 0+ default tip
47 cb9a9f314b8b+ 0+ default tip
48
48
49 other local repo
49 other local repo
50
50
51 $ cd ..
51 $ cd ..
52 $ hg -R test id
52 $ hg -R test id
53 cb9a9f314b8b+ tip
53 cb9a9f314b8b+ tip
54 $ hg id test
54 $ hg id test
55 cb9a9f314b8b+ tip
55 cb9a9f314b8b+ tip
56
56
57 with remote http repo
57 with remote http repo
58
58
59 $ cd test
59 $ cd test
60 $ hg serve -p $HGPORT1 -d --pid-file=hg.pid
60 $ hg serve -p $HGPORT1 -d --pid-file=hg.pid
61 $ cat hg.pid >> $DAEMON_PIDS
61 $ cat hg.pid >> $DAEMON_PIDS
62 $ hg id http://localhost:$HGPORT1/
62 $ hg id http://localhost:$HGPORT1/
63 cb9a9f314b8b
63 cb9a9f314b8b
64
64
65 remote with tags?
65 remote with tags?
66
66
67 $ hg id -t http://localhost:$HGPORT1/
67 $ hg id -t http://localhost:$HGPORT1/
68 abort: can't query remote revision number, branch, or tags
68 abort: can't query remote revision number, branch, or tags
69 [255]
69 [255]
70
71 Make sure we do not obscure unknown requires file entries (issue2649)
72
73 $ echo fake >> .hg/requires
74 $ hg id
75 abort: requirement 'fake' not supported!
76 [255]
77
78 $ cd ..
79 $ hg id test
80 abort: requirement 'fake' not supported!
81 [255]
82
General Comments 0
You need to be logged in to leave comments. Login now