##// END OF EJS Templates
revert: apply normallookup on reverted file if size isn't changed (issue4583)...
FUJIWARA Katsunori -
r24843:21b33f04 stable
parent child Browse files
Show More
@@ -1,3243 +1,3247 b''
1 # cmdutil.py - help for command processing in mercurial
1 # cmdutil.py - help for command processing in mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import hex, nullid, nullrev, short
8 from node import hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import os, sys, errno, re, tempfile, cStringIO, shutil
10 import os, sys, errno, re, tempfile, cStringIO, shutil
11 import util, scmutil, templater, patch, error, templatekw, revlog, copies
11 import util, scmutil, templater, patch, error, templatekw, revlog, copies
12 import match as matchmod
12 import match as matchmod
13 import context, repair, graphmod, revset, phases, obsolete, pathutil
13 import context, repair, graphmod, revset, phases, obsolete, pathutil
14 import changelog
14 import changelog
15 import bookmarks
15 import bookmarks
16 import encoding
16 import encoding
17 import crecord as crecordmod
17 import crecord as crecordmod
18 import lock as lockmod
18 import lock as lockmod
19
19
20 def parsealiases(cmd):
20 def parsealiases(cmd):
21 return cmd.lstrip("^").split("|")
21 return cmd.lstrip("^").split("|")
22
22
23 def setupwrapcolorwrite(ui):
23 def setupwrapcolorwrite(ui):
24 # wrap ui.write so diff output can be labeled/colorized
24 # wrap ui.write so diff output can be labeled/colorized
25 def wrapwrite(orig, *args, **kw):
25 def wrapwrite(orig, *args, **kw):
26 label = kw.pop('label', '')
26 label = kw.pop('label', '')
27 for chunk, l in patch.difflabel(lambda: args):
27 for chunk, l in patch.difflabel(lambda: args):
28 orig(chunk, label=label + l)
28 orig(chunk, label=label + l)
29
29
30 oldwrite = ui.write
30 oldwrite = ui.write
31 def wrap(*args, **kwargs):
31 def wrap(*args, **kwargs):
32 return wrapwrite(oldwrite, *args, **kwargs)
32 return wrapwrite(oldwrite, *args, **kwargs)
33 setattr(ui, 'write', wrap)
33 setattr(ui, 'write', wrap)
34 return oldwrite
34 return oldwrite
35
35
36 def filterchunks(ui, originalhunks, usecurses, testfile):
36 def filterchunks(ui, originalhunks, usecurses, testfile):
37 if usecurses:
37 if usecurses:
38 if testfile:
38 if testfile:
39 recordfn = crecordmod.testdecorator(testfile,
39 recordfn = crecordmod.testdecorator(testfile,
40 crecordmod.testchunkselector)
40 crecordmod.testchunkselector)
41 else:
41 else:
42 recordfn = crecordmod.chunkselector
42 recordfn = crecordmod.chunkselector
43
43
44 return crecordmod.filterpatch(ui, originalhunks, recordfn)
44 return crecordmod.filterpatch(ui, originalhunks, recordfn)
45
45
46 else:
46 else:
47 return patch.filterpatch(ui, originalhunks)
47 return patch.filterpatch(ui, originalhunks)
48
48
49 def recordfilter(ui, originalhunks):
49 def recordfilter(ui, originalhunks):
50 usecurses = ui.configbool('experimental', 'crecord', False)
50 usecurses = ui.configbool('experimental', 'crecord', False)
51 testfile = ui.config('experimental', 'crecordtest', None)
51 testfile = ui.config('experimental', 'crecordtest', None)
52 oldwrite = setupwrapcolorwrite(ui)
52 oldwrite = setupwrapcolorwrite(ui)
53 try:
53 try:
54 newchunks = filterchunks(ui, originalhunks, usecurses, testfile)
54 newchunks = filterchunks(ui, originalhunks, usecurses, testfile)
55 finally:
55 finally:
56 ui.write = oldwrite
56 ui.write = oldwrite
57 return newchunks
57 return newchunks
58
58
59 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
59 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
60 filterfn, *pats, **opts):
60 filterfn, *pats, **opts):
61 import merge as mergemod
61 import merge as mergemod
62
62
63 if not ui.interactive():
63 if not ui.interactive():
64 raise util.Abort(_('running non-interactively, use %s instead') %
64 raise util.Abort(_('running non-interactively, use %s instead') %
65 cmdsuggest)
65 cmdsuggest)
66
66
67 # make sure username is set before going interactive
67 # make sure username is set before going interactive
68 if not opts.get('user'):
68 if not opts.get('user'):
69 ui.username() # raise exception, username not provided
69 ui.username() # raise exception, username not provided
70
70
71 def recordfunc(ui, repo, message, match, opts):
71 def recordfunc(ui, repo, message, match, opts):
72 """This is generic record driver.
72 """This is generic record driver.
73
73
74 Its job is to interactively filter local changes, and
74 Its job is to interactively filter local changes, and
75 accordingly prepare working directory into a state in which the
75 accordingly prepare working directory into a state in which the
76 job can be delegated to a non-interactive commit command such as
76 job can be delegated to a non-interactive commit command such as
77 'commit' or 'qrefresh'.
77 'commit' or 'qrefresh'.
78
78
79 After the actual job is done by non-interactive command, the
79 After the actual job is done by non-interactive command, the
80 working directory is restored to its original state.
80 working directory is restored to its original state.
81
81
82 In the end we'll record interesting changes, and everything else
82 In the end we'll record interesting changes, and everything else
83 will be left in place, so the user can continue working.
83 will be left in place, so the user can continue working.
84 """
84 """
85
85
86 checkunfinished(repo, commit=True)
86 checkunfinished(repo, commit=True)
87 merge = len(repo[None].parents()) > 1
87 merge = len(repo[None].parents()) > 1
88 if merge:
88 if merge:
89 raise util.Abort(_('cannot partially commit a merge '
89 raise util.Abort(_('cannot partially commit a merge '
90 '(use "hg commit" instead)'))
90 '(use "hg commit" instead)'))
91
91
92 status = repo.status(match=match)
92 status = repo.status(match=match)
93 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
93 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
94 diffopts.nodates = True
94 diffopts.nodates = True
95 diffopts.git = True
95 diffopts.git = True
96 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
96 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
97 originalchunks = patch.parsepatch(originaldiff)
97 originalchunks = patch.parsepatch(originaldiff)
98
98
99 # 1. filter patch, so we have intending-to apply subset of it
99 # 1. filter patch, so we have intending-to apply subset of it
100 try:
100 try:
101 chunks = filterfn(ui, originalchunks)
101 chunks = filterfn(ui, originalchunks)
102 except patch.PatchError, err:
102 except patch.PatchError, err:
103 raise util.Abort(_('error parsing patch: %s') % err)
103 raise util.Abort(_('error parsing patch: %s') % err)
104
104
105 contenders = set()
105 contenders = set()
106 for h in chunks:
106 for h in chunks:
107 try:
107 try:
108 contenders.update(set(h.files()))
108 contenders.update(set(h.files()))
109 except AttributeError:
109 except AttributeError:
110 pass
110 pass
111
111
112 changed = status.modified + status.added + status.removed
112 changed = status.modified + status.added + status.removed
113 newfiles = [f for f in changed if f in contenders]
113 newfiles = [f for f in changed if f in contenders]
114 if not newfiles:
114 if not newfiles:
115 ui.status(_('no changes to record\n'))
115 ui.status(_('no changes to record\n'))
116 return 0
116 return 0
117
117
118 modified = set(status.modified)
118 modified = set(status.modified)
119
119
120 # 2. backup changed files, so we can restore them in the end
120 # 2. backup changed files, so we can restore them in the end
121
121
122 if backupall:
122 if backupall:
123 tobackup = changed
123 tobackup = changed
124 else:
124 else:
125 tobackup = [f for f in newfiles if f in modified]
125 tobackup = [f for f in newfiles if f in modified]
126
126
127 backups = {}
127 backups = {}
128 if tobackup:
128 if tobackup:
129 backupdir = repo.join('record-backups')
129 backupdir = repo.join('record-backups')
130 try:
130 try:
131 os.mkdir(backupdir)
131 os.mkdir(backupdir)
132 except OSError, err:
132 except OSError, err:
133 if err.errno != errno.EEXIST:
133 if err.errno != errno.EEXIST:
134 raise
134 raise
135 try:
135 try:
136 # backup continues
136 # backup continues
137 for f in tobackup:
137 for f in tobackup:
138 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
138 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
139 dir=backupdir)
139 dir=backupdir)
140 os.close(fd)
140 os.close(fd)
141 ui.debug('backup %r as %r\n' % (f, tmpname))
141 ui.debug('backup %r as %r\n' % (f, tmpname))
142 util.copyfile(repo.wjoin(f), tmpname)
142 util.copyfile(repo.wjoin(f), tmpname)
143 shutil.copystat(repo.wjoin(f), tmpname)
143 shutil.copystat(repo.wjoin(f), tmpname)
144 backups[f] = tmpname
144 backups[f] = tmpname
145
145
146 fp = cStringIO.StringIO()
146 fp = cStringIO.StringIO()
147 for c in chunks:
147 for c in chunks:
148 fname = c.filename()
148 fname = c.filename()
149 if fname in backups:
149 if fname in backups:
150 c.write(fp)
150 c.write(fp)
151 dopatch = fp.tell()
151 dopatch = fp.tell()
152 fp.seek(0)
152 fp.seek(0)
153
153
154 # 3a. apply filtered patch to clean repo (clean)
154 # 3a. apply filtered patch to clean repo (clean)
155 if backups:
155 if backups:
156 # Equivalent to hg.revert
156 # Equivalent to hg.revert
157 choices = lambda key: key in backups
157 choices = lambda key: key in backups
158 mergemod.update(repo, repo.dirstate.p1(),
158 mergemod.update(repo, repo.dirstate.p1(),
159 False, True, choices)
159 False, True, choices)
160
160
161 # 3b. (apply)
161 # 3b. (apply)
162 if dopatch:
162 if dopatch:
163 try:
163 try:
164 ui.debug('applying patch\n')
164 ui.debug('applying patch\n')
165 ui.debug(fp.getvalue())
165 ui.debug(fp.getvalue())
166 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
166 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
167 except patch.PatchError, err:
167 except patch.PatchError, err:
168 raise util.Abort(str(err))
168 raise util.Abort(str(err))
169 del fp
169 del fp
170
170
171 # 4. We prepared working directory according to filtered
171 # 4. We prepared working directory according to filtered
172 # patch. Now is the time to delegate the job to
172 # patch. Now is the time to delegate the job to
173 # commit/qrefresh or the like!
173 # commit/qrefresh or the like!
174
174
175 # Make all of the pathnames absolute.
175 # Make all of the pathnames absolute.
176 newfiles = [repo.wjoin(nf) for nf in newfiles]
176 newfiles = [repo.wjoin(nf) for nf in newfiles]
177 return commitfunc(ui, repo, *newfiles, **opts)
177 return commitfunc(ui, repo, *newfiles, **opts)
178 finally:
178 finally:
179 # 5. finally restore backed-up files
179 # 5. finally restore backed-up files
180 try:
180 try:
181 for realname, tmpname in backups.iteritems():
181 for realname, tmpname in backups.iteritems():
182 ui.debug('restoring %r to %r\n' % (tmpname, realname))
182 ui.debug('restoring %r to %r\n' % (tmpname, realname))
183 util.copyfile(tmpname, repo.wjoin(realname))
183 util.copyfile(tmpname, repo.wjoin(realname))
184 # Our calls to copystat() here and above are a
184 # Our calls to copystat() here and above are a
185 # hack to trick any editors that have f open that
185 # hack to trick any editors that have f open that
186 # we haven't modified them.
186 # we haven't modified them.
187 #
187 #
188 # Also note that this racy as an editor could
188 # Also note that this racy as an editor could
189 # notice the file's mtime before we've finished
189 # notice the file's mtime before we've finished
190 # writing it.
190 # writing it.
191 shutil.copystat(tmpname, repo.wjoin(realname))
191 shutil.copystat(tmpname, repo.wjoin(realname))
192 os.unlink(tmpname)
192 os.unlink(tmpname)
193 if tobackup:
193 if tobackup:
194 os.rmdir(backupdir)
194 os.rmdir(backupdir)
195 except OSError:
195 except OSError:
196 pass
196 pass
197
197
198 return commit(ui, repo, recordfunc, pats, opts)
198 return commit(ui, repo, recordfunc, pats, opts)
199
199
200 def findpossible(cmd, table, strict=False):
200 def findpossible(cmd, table, strict=False):
201 """
201 """
202 Return cmd -> (aliases, command table entry)
202 Return cmd -> (aliases, command table entry)
203 for each matching command.
203 for each matching command.
204 Return debug commands (or their aliases) only if no normal command matches.
204 Return debug commands (or their aliases) only if no normal command matches.
205 """
205 """
206 choice = {}
206 choice = {}
207 debugchoice = {}
207 debugchoice = {}
208
208
209 if cmd in table:
209 if cmd in table:
210 # short-circuit exact matches, "log" alias beats "^log|history"
210 # short-circuit exact matches, "log" alias beats "^log|history"
211 keys = [cmd]
211 keys = [cmd]
212 else:
212 else:
213 keys = table.keys()
213 keys = table.keys()
214
214
215 allcmds = []
215 allcmds = []
216 for e in keys:
216 for e in keys:
217 aliases = parsealiases(e)
217 aliases = parsealiases(e)
218 allcmds.extend(aliases)
218 allcmds.extend(aliases)
219 found = None
219 found = None
220 if cmd in aliases:
220 if cmd in aliases:
221 found = cmd
221 found = cmd
222 elif not strict:
222 elif not strict:
223 for a in aliases:
223 for a in aliases:
224 if a.startswith(cmd):
224 if a.startswith(cmd):
225 found = a
225 found = a
226 break
226 break
227 if found is not None:
227 if found is not None:
228 if aliases[0].startswith("debug") or found.startswith("debug"):
228 if aliases[0].startswith("debug") or found.startswith("debug"):
229 debugchoice[found] = (aliases, table[e])
229 debugchoice[found] = (aliases, table[e])
230 else:
230 else:
231 choice[found] = (aliases, table[e])
231 choice[found] = (aliases, table[e])
232
232
233 if not choice and debugchoice:
233 if not choice and debugchoice:
234 choice = debugchoice
234 choice = debugchoice
235
235
236 return choice, allcmds
236 return choice, allcmds
237
237
238 def findcmd(cmd, table, strict=True):
238 def findcmd(cmd, table, strict=True):
239 """Return (aliases, command table entry) for command string."""
239 """Return (aliases, command table entry) for command string."""
240 choice, allcmds = findpossible(cmd, table, strict)
240 choice, allcmds = findpossible(cmd, table, strict)
241
241
242 if cmd in choice:
242 if cmd in choice:
243 return choice[cmd]
243 return choice[cmd]
244
244
245 if len(choice) > 1:
245 if len(choice) > 1:
246 clist = choice.keys()
246 clist = choice.keys()
247 clist.sort()
247 clist.sort()
248 raise error.AmbiguousCommand(cmd, clist)
248 raise error.AmbiguousCommand(cmd, clist)
249
249
250 if choice:
250 if choice:
251 return choice.values()[0]
251 return choice.values()[0]
252
252
253 raise error.UnknownCommand(cmd, allcmds)
253 raise error.UnknownCommand(cmd, allcmds)
254
254
255 def findrepo(p):
255 def findrepo(p):
256 while not os.path.isdir(os.path.join(p, ".hg")):
256 while not os.path.isdir(os.path.join(p, ".hg")):
257 oldp, p = p, os.path.dirname(p)
257 oldp, p = p, os.path.dirname(p)
258 if p == oldp:
258 if p == oldp:
259 return None
259 return None
260
260
261 return p
261 return p
262
262
263 def bailifchanged(repo, merge=True):
263 def bailifchanged(repo, merge=True):
264 if merge and repo.dirstate.p2() != nullid:
264 if merge and repo.dirstate.p2() != nullid:
265 raise util.Abort(_('outstanding uncommitted merge'))
265 raise util.Abort(_('outstanding uncommitted merge'))
266 modified, added, removed, deleted = repo.status()[:4]
266 modified, added, removed, deleted = repo.status()[:4]
267 if modified or added or removed or deleted:
267 if modified or added or removed or deleted:
268 raise util.Abort(_('uncommitted changes'))
268 raise util.Abort(_('uncommitted changes'))
269 ctx = repo[None]
269 ctx = repo[None]
270 for s in sorted(ctx.substate):
270 for s in sorted(ctx.substate):
271 ctx.sub(s).bailifchanged()
271 ctx.sub(s).bailifchanged()
272
272
273 def logmessage(ui, opts):
273 def logmessage(ui, opts):
274 """ get the log message according to -m and -l option """
274 """ get the log message according to -m and -l option """
275 message = opts.get('message')
275 message = opts.get('message')
276 logfile = opts.get('logfile')
276 logfile = opts.get('logfile')
277
277
278 if message and logfile:
278 if message and logfile:
279 raise util.Abort(_('options --message and --logfile are mutually '
279 raise util.Abort(_('options --message and --logfile are mutually '
280 'exclusive'))
280 'exclusive'))
281 if not message and logfile:
281 if not message and logfile:
282 try:
282 try:
283 if logfile == '-':
283 if logfile == '-':
284 message = ui.fin.read()
284 message = ui.fin.read()
285 else:
285 else:
286 message = '\n'.join(util.readfile(logfile).splitlines())
286 message = '\n'.join(util.readfile(logfile).splitlines())
287 except IOError, inst:
287 except IOError, inst:
288 raise util.Abort(_("can't read commit message '%s': %s") %
288 raise util.Abort(_("can't read commit message '%s': %s") %
289 (logfile, inst.strerror))
289 (logfile, inst.strerror))
290 return message
290 return message
291
291
292 def mergeeditform(ctxorbool, baseformname):
292 def mergeeditform(ctxorbool, baseformname):
293 """return appropriate editform name (referencing a committemplate)
293 """return appropriate editform name (referencing a committemplate)
294
294
295 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
295 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
296 merging is committed.
296 merging is committed.
297
297
298 This returns baseformname with '.merge' appended if it is a merge,
298 This returns baseformname with '.merge' appended if it is a merge,
299 otherwise '.normal' is appended.
299 otherwise '.normal' is appended.
300 """
300 """
301 if isinstance(ctxorbool, bool):
301 if isinstance(ctxorbool, bool):
302 if ctxorbool:
302 if ctxorbool:
303 return baseformname + ".merge"
303 return baseformname + ".merge"
304 elif 1 < len(ctxorbool.parents()):
304 elif 1 < len(ctxorbool.parents()):
305 return baseformname + ".merge"
305 return baseformname + ".merge"
306
306
307 return baseformname + ".normal"
307 return baseformname + ".normal"
308
308
309 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
309 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
310 editform='', **opts):
310 editform='', **opts):
311 """get appropriate commit message editor according to '--edit' option
311 """get appropriate commit message editor according to '--edit' option
312
312
313 'finishdesc' is a function to be called with edited commit message
313 'finishdesc' is a function to be called with edited commit message
314 (= 'description' of the new changeset) just after editing, but
314 (= 'description' of the new changeset) just after editing, but
315 before checking empty-ness. It should return actual text to be
315 before checking empty-ness. It should return actual text to be
316 stored into history. This allows to change description before
316 stored into history. This allows to change description before
317 storing.
317 storing.
318
318
319 'extramsg' is a extra message to be shown in the editor instead of
319 'extramsg' is a extra message to be shown in the editor instead of
320 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
320 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
321 is automatically added.
321 is automatically added.
322
322
323 'editform' is a dot-separated list of names, to distinguish
323 'editform' is a dot-separated list of names, to distinguish
324 the purpose of commit text editing.
324 the purpose of commit text editing.
325
325
326 'getcommiteditor' returns 'commitforceeditor' regardless of
326 'getcommiteditor' returns 'commitforceeditor' regardless of
327 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
327 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
328 they are specific for usage in MQ.
328 they are specific for usage in MQ.
329 """
329 """
330 if edit or finishdesc or extramsg:
330 if edit or finishdesc or extramsg:
331 return lambda r, c, s: commitforceeditor(r, c, s,
331 return lambda r, c, s: commitforceeditor(r, c, s,
332 finishdesc=finishdesc,
332 finishdesc=finishdesc,
333 extramsg=extramsg,
333 extramsg=extramsg,
334 editform=editform)
334 editform=editform)
335 elif editform:
335 elif editform:
336 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
336 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
337 else:
337 else:
338 return commiteditor
338 return commiteditor
339
339
340 def loglimit(opts):
340 def loglimit(opts):
341 """get the log limit according to option -l/--limit"""
341 """get the log limit according to option -l/--limit"""
342 limit = opts.get('limit')
342 limit = opts.get('limit')
343 if limit:
343 if limit:
344 try:
344 try:
345 limit = int(limit)
345 limit = int(limit)
346 except ValueError:
346 except ValueError:
347 raise util.Abort(_('limit must be a positive integer'))
347 raise util.Abort(_('limit must be a positive integer'))
348 if limit <= 0:
348 if limit <= 0:
349 raise util.Abort(_('limit must be positive'))
349 raise util.Abort(_('limit must be positive'))
350 else:
350 else:
351 limit = None
351 limit = None
352 return limit
352 return limit
353
353
354 def makefilename(repo, pat, node, desc=None,
354 def makefilename(repo, pat, node, desc=None,
355 total=None, seqno=None, revwidth=None, pathname=None):
355 total=None, seqno=None, revwidth=None, pathname=None):
356 node_expander = {
356 node_expander = {
357 'H': lambda: hex(node),
357 'H': lambda: hex(node),
358 'R': lambda: str(repo.changelog.rev(node)),
358 'R': lambda: str(repo.changelog.rev(node)),
359 'h': lambda: short(node),
359 'h': lambda: short(node),
360 'm': lambda: re.sub('[^\w]', '_', str(desc))
360 'm': lambda: re.sub('[^\w]', '_', str(desc))
361 }
361 }
362 expander = {
362 expander = {
363 '%': lambda: '%',
363 '%': lambda: '%',
364 'b': lambda: os.path.basename(repo.root),
364 'b': lambda: os.path.basename(repo.root),
365 }
365 }
366
366
367 try:
367 try:
368 if node:
368 if node:
369 expander.update(node_expander)
369 expander.update(node_expander)
370 if node:
370 if node:
371 expander['r'] = (lambda:
371 expander['r'] = (lambda:
372 str(repo.changelog.rev(node)).zfill(revwidth or 0))
372 str(repo.changelog.rev(node)).zfill(revwidth or 0))
373 if total is not None:
373 if total is not None:
374 expander['N'] = lambda: str(total)
374 expander['N'] = lambda: str(total)
375 if seqno is not None:
375 if seqno is not None:
376 expander['n'] = lambda: str(seqno)
376 expander['n'] = lambda: str(seqno)
377 if total is not None and seqno is not None:
377 if total is not None and seqno is not None:
378 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
378 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
379 if pathname is not None:
379 if pathname is not None:
380 expander['s'] = lambda: os.path.basename(pathname)
380 expander['s'] = lambda: os.path.basename(pathname)
381 expander['d'] = lambda: os.path.dirname(pathname) or '.'
381 expander['d'] = lambda: os.path.dirname(pathname) or '.'
382 expander['p'] = lambda: pathname
382 expander['p'] = lambda: pathname
383
383
384 newname = []
384 newname = []
385 patlen = len(pat)
385 patlen = len(pat)
386 i = 0
386 i = 0
387 while i < patlen:
387 while i < patlen:
388 c = pat[i]
388 c = pat[i]
389 if c == '%':
389 if c == '%':
390 i += 1
390 i += 1
391 c = pat[i]
391 c = pat[i]
392 c = expander[c]()
392 c = expander[c]()
393 newname.append(c)
393 newname.append(c)
394 i += 1
394 i += 1
395 return ''.join(newname)
395 return ''.join(newname)
396 except KeyError, inst:
396 except KeyError, inst:
397 raise util.Abort(_("invalid format spec '%%%s' in output filename") %
397 raise util.Abort(_("invalid format spec '%%%s' in output filename") %
398 inst.args[0])
398 inst.args[0])
399
399
400 def makefileobj(repo, pat, node=None, desc=None, total=None,
400 def makefileobj(repo, pat, node=None, desc=None, total=None,
401 seqno=None, revwidth=None, mode='wb', modemap=None,
401 seqno=None, revwidth=None, mode='wb', modemap=None,
402 pathname=None):
402 pathname=None):
403
403
404 writable = mode not in ('r', 'rb')
404 writable = mode not in ('r', 'rb')
405
405
406 if not pat or pat == '-':
406 if not pat or pat == '-':
407 if writable:
407 if writable:
408 fp = repo.ui.fout
408 fp = repo.ui.fout
409 else:
409 else:
410 fp = repo.ui.fin
410 fp = repo.ui.fin
411 if util.safehasattr(fp, 'fileno'):
411 if util.safehasattr(fp, 'fileno'):
412 return os.fdopen(os.dup(fp.fileno()), mode)
412 return os.fdopen(os.dup(fp.fileno()), mode)
413 else:
413 else:
414 # if this fp can't be duped properly, return
414 # if this fp can't be duped properly, return
415 # a dummy object that can be closed
415 # a dummy object that can be closed
416 class wrappedfileobj(object):
416 class wrappedfileobj(object):
417 noop = lambda x: None
417 noop = lambda x: None
418 def __init__(self, f):
418 def __init__(self, f):
419 self.f = f
419 self.f = f
420 def __getattr__(self, attr):
420 def __getattr__(self, attr):
421 if attr == 'close':
421 if attr == 'close':
422 return self.noop
422 return self.noop
423 else:
423 else:
424 return getattr(self.f, attr)
424 return getattr(self.f, attr)
425
425
426 return wrappedfileobj(fp)
426 return wrappedfileobj(fp)
427 if util.safehasattr(pat, 'write') and writable:
427 if util.safehasattr(pat, 'write') and writable:
428 return pat
428 return pat
429 if util.safehasattr(pat, 'read') and 'r' in mode:
429 if util.safehasattr(pat, 'read') and 'r' in mode:
430 return pat
430 return pat
431 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
431 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
432 if modemap is not None:
432 if modemap is not None:
433 mode = modemap.get(fn, mode)
433 mode = modemap.get(fn, mode)
434 if mode == 'wb':
434 if mode == 'wb':
435 modemap[fn] = 'ab'
435 modemap[fn] = 'ab'
436 return open(fn, mode)
436 return open(fn, mode)
437
437
438 def openrevlog(repo, cmd, file_, opts):
438 def openrevlog(repo, cmd, file_, opts):
439 """opens the changelog, manifest, a filelog or a given revlog"""
439 """opens the changelog, manifest, a filelog or a given revlog"""
440 cl = opts['changelog']
440 cl = opts['changelog']
441 mf = opts['manifest']
441 mf = opts['manifest']
442 msg = None
442 msg = None
443 if cl and mf:
443 if cl and mf:
444 msg = _('cannot specify --changelog and --manifest at the same time')
444 msg = _('cannot specify --changelog and --manifest at the same time')
445 elif cl or mf:
445 elif cl or mf:
446 if file_:
446 if file_:
447 msg = _('cannot specify filename with --changelog or --manifest')
447 msg = _('cannot specify filename with --changelog or --manifest')
448 elif not repo:
448 elif not repo:
449 msg = _('cannot specify --changelog or --manifest '
449 msg = _('cannot specify --changelog or --manifest '
450 'without a repository')
450 'without a repository')
451 if msg:
451 if msg:
452 raise util.Abort(msg)
452 raise util.Abort(msg)
453
453
454 r = None
454 r = None
455 if repo:
455 if repo:
456 if cl:
456 if cl:
457 r = repo.unfiltered().changelog
457 r = repo.unfiltered().changelog
458 elif mf:
458 elif mf:
459 r = repo.manifest
459 r = repo.manifest
460 elif file_:
460 elif file_:
461 filelog = repo.file(file_)
461 filelog = repo.file(file_)
462 if len(filelog):
462 if len(filelog):
463 r = filelog
463 r = filelog
464 if not r:
464 if not r:
465 if not file_:
465 if not file_:
466 raise error.CommandError(cmd, _('invalid arguments'))
466 raise error.CommandError(cmd, _('invalid arguments'))
467 if not os.path.isfile(file_):
467 if not os.path.isfile(file_):
468 raise util.Abort(_("revlog '%s' not found") % file_)
468 raise util.Abort(_("revlog '%s' not found") % file_)
469 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False),
469 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False),
470 file_[:-2] + ".i")
470 file_[:-2] + ".i")
471 return r
471 return r
472
472
473 def copy(ui, repo, pats, opts, rename=False):
473 def copy(ui, repo, pats, opts, rename=False):
474 # called with the repo lock held
474 # called with the repo lock held
475 #
475 #
476 # hgsep => pathname that uses "/" to separate directories
476 # hgsep => pathname that uses "/" to separate directories
477 # ossep => pathname that uses os.sep to separate directories
477 # ossep => pathname that uses os.sep to separate directories
478 cwd = repo.getcwd()
478 cwd = repo.getcwd()
479 targets = {}
479 targets = {}
480 after = opts.get("after")
480 after = opts.get("after")
481 dryrun = opts.get("dry_run")
481 dryrun = opts.get("dry_run")
482 wctx = repo[None]
482 wctx = repo[None]
483
483
484 def walkpat(pat):
484 def walkpat(pat):
485 srcs = []
485 srcs = []
486 if after:
486 if after:
487 badstates = '?'
487 badstates = '?'
488 else:
488 else:
489 badstates = '?r'
489 badstates = '?r'
490 m = scmutil.match(repo[None], [pat], opts, globbed=True)
490 m = scmutil.match(repo[None], [pat], opts, globbed=True)
491 for abs in repo.walk(m):
491 for abs in repo.walk(m):
492 state = repo.dirstate[abs]
492 state = repo.dirstate[abs]
493 rel = m.rel(abs)
493 rel = m.rel(abs)
494 exact = m.exact(abs)
494 exact = m.exact(abs)
495 if state in badstates:
495 if state in badstates:
496 if exact and state == '?':
496 if exact and state == '?':
497 ui.warn(_('%s: not copying - file is not managed\n') % rel)
497 ui.warn(_('%s: not copying - file is not managed\n') % rel)
498 if exact and state == 'r':
498 if exact and state == 'r':
499 ui.warn(_('%s: not copying - file has been marked for'
499 ui.warn(_('%s: not copying - file has been marked for'
500 ' remove\n') % rel)
500 ' remove\n') % rel)
501 continue
501 continue
502 # abs: hgsep
502 # abs: hgsep
503 # rel: ossep
503 # rel: ossep
504 srcs.append((abs, rel, exact))
504 srcs.append((abs, rel, exact))
505 return srcs
505 return srcs
506
506
507 # abssrc: hgsep
507 # abssrc: hgsep
508 # relsrc: ossep
508 # relsrc: ossep
509 # otarget: ossep
509 # otarget: ossep
510 def copyfile(abssrc, relsrc, otarget, exact):
510 def copyfile(abssrc, relsrc, otarget, exact):
511 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
511 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
512 if '/' in abstarget:
512 if '/' in abstarget:
513 # We cannot normalize abstarget itself, this would prevent
513 # We cannot normalize abstarget itself, this would prevent
514 # case only renames, like a => A.
514 # case only renames, like a => A.
515 abspath, absname = abstarget.rsplit('/', 1)
515 abspath, absname = abstarget.rsplit('/', 1)
516 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
516 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
517 reltarget = repo.pathto(abstarget, cwd)
517 reltarget = repo.pathto(abstarget, cwd)
518 target = repo.wjoin(abstarget)
518 target = repo.wjoin(abstarget)
519 src = repo.wjoin(abssrc)
519 src = repo.wjoin(abssrc)
520 state = repo.dirstate[abstarget]
520 state = repo.dirstate[abstarget]
521
521
522 scmutil.checkportable(ui, abstarget)
522 scmutil.checkportable(ui, abstarget)
523
523
524 # check for collisions
524 # check for collisions
525 prevsrc = targets.get(abstarget)
525 prevsrc = targets.get(abstarget)
526 if prevsrc is not None:
526 if prevsrc is not None:
527 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
527 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
528 (reltarget, repo.pathto(abssrc, cwd),
528 (reltarget, repo.pathto(abssrc, cwd),
529 repo.pathto(prevsrc, cwd)))
529 repo.pathto(prevsrc, cwd)))
530 return
530 return
531
531
532 # check for overwrites
532 # check for overwrites
533 exists = os.path.lexists(target)
533 exists = os.path.lexists(target)
534 samefile = False
534 samefile = False
535 if exists and abssrc != abstarget:
535 if exists and abssrc != abstarget:
536 if (repo.dirstate.normalize(abssrc) ==
536 if (repo.dirstate.normalize(abssrc) ==
537 repo.dirstate.normalize(abstarget)):
537 repo.dirstate.normalize(abstarget)):
538 if not rename:
538 if not rename:
539 ui.warn(_("%s: can't copy - same file\n") % reltarget)
539 ui.warn(_("%s: can't copy - same file\n") % reltarget)
540 return
540 return
541 exists = False
541 exists = False
542 samefile = True
542 samefile = True
543
543
544 if not after and exists or after and state in 'mn':
544 if not after and exists or after and state in 'mn':
545 if not opts['force']:
545 if not opts['force']:
546 ui.warn(_('%s: not overwriting - file exists\n') %
546 ui.warn(_('%s: not overwriting - file exists\n') %
547 reltarget)
547 reltarget)
548 return
548 return
549
549
550 if after:
550 if after:
551 if not exists:
551 if not exists:
552 if rename:
552 if rename:
553 ui.warn(_('%s: not recording move - %s does not exist\n') %
553 ui.warn(_('%s: not recording move - %s does not exist\n') %
554 (relsrc, reltarget))
554 (relsrc, reltarget))
555 else:
555 else:
556 ui.warn(_('%s: not recording copy - %s does not exist\n') %
556 ui.warn(_('%s: not recording copy - %s does not exist\n') %
557 (relsrc, reltarget))
557 (relsrc, reltarget))
558 return
558 return
559 elif not dryrun:
559 elif not dryrun:
560 try:
560 try:
561 if exists:
561 if exists:
562 os.unlink(target)
562 os.unlink(target)
563 targetdir = os.path.dirname(target) or '.'
563 targetdir = os.path.dirname(target) or '.'
564 if not os.path.isdir(targetdir):
564 if not os.path.isdir(targetdir):
565 os.makedirs(targetdir)
565 os.makedirs(targetdir)
566 if samefile:
566 if samefile:
567 tmp = target + "~hgrename"
567 tmp = target + "~hgrename"
568 os.rename(src, tmp)
568 os.rename(src, tmp)
569 os.rename(tmp, target)
569 os.rename(tmp, target)
570 else:
570 else:
571 util.copyfile(src, target)
571 util.copyfile(src, target)
572 srcexists = True
572 srcexists = True
573 except IOError, inst:
573 except IOError, inst:
574 if inst.errno == errno.ENOENT:
574 if inst.errno == errno.ENOENT:
575 ui.warn(_('%s: deleted in working directory\n') % relsrc)
575 ui.warn(_('%s: deleted in working directory\n') % relsrc)
576 srcexists = False
576 srcexists = False
577 else:
577 else:
578 ui.warn(_('%s: cannot copy - %s\n') %
578 ui.warn(_('%s: cannot copy - %s\n') %
579 (relsrc, inst.strerror))
579 (relsrc, inst.strerror))
580 return True # report a failure
580 return True # report a failure
581
581
582 if ui.verbose or not exact:
582 if ui.verbose or not exact:
583 if rename:
583 if rename:
584 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
584 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
585 else:
585 else:
586 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
586 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
587
587
588 targets[abstarget] = abssrc
588 targets[abstarget] = abssrc
589
589
590 # fix up dirstate
590 # fix up dirstate
591 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
591 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
592 dryrun=dryrun, cwd=cwd)
592 dryrun=dryrun, cwd=cwd)
593 if rename and not dryrun:
593 if rename and not dryrun:
594 if not after and srcexists and not samefile:
594 if not after and srcexists and not samefile:
595 util.unlinkpath(repo.wjoin(abssrc))
595 util.unlinkpath(repo.wjoin(abssrc))
596 wctx.forget([abssrc])
596 wctx.forget([abssrc])
597
597
598 # pat: ossep
598 # pat: ossep
599 # dest ossep
599 # dest ossep
600 # srcs: list of (hgsep, hgsep, ossep, bool)
600 # srcs: list of (hgsep, hgsep, ossep, bool)
601 # return: function that takes hgsep and returns ossep
601 # return: function that takes hgsep and returns ossep
602 def targetpathfn(pat, dest, srcs):
602 def targetpathfn(pat, dest, srcs):
603 if os.path.isdir(pat):
603 if os.path.isdir(pat):
604 abspfx = pathutil.canonpath(repo.root, cwd, pat)
604 abspfx = pathutil.canonpath(repo.root, cwd, pat)
605 abspfx = util.localpath(abspfx)
605 abspfx = util.localpath(abspfx)
606 if destdirexists:
606 if destdirexists:
607 striplen = len(os.path.split(abspfx)[0])
607 striplen = len(os.path.split(abspfx)[0])
608 else:
608 else:
609 striplen = len(abspfx)
609 striplen = len(abspfx)
610 if striplen:
610 if striplen:
611 striplen += len(os.sep)
611 striplen += len(os.sep)
612 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
612 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
613 elif destdirexists:
613 elif destdirexists:
614 res = lambda p: os.path.join(dest,
614 res = lambda p: os.path.join(dest,
615 os.path.basename(util.localpath(p)))
615 os.path.basename(util.localpath(p)))
616 else:
616 else:
617 res = lambda p: dest
617 res = lambda p: dest
618 return res
618 return res
619
619
620 # pat: ossep
620 # pat: ossep
621 # dest ossep
621 # dest ossep
622 # srcs: list of (hgsep, hgsep, ossep, bool)
622 # srcs: list of (hgsep, hgsep, ossep, bool)
623 # return: function that takes hgsep and returns ossep
623 # return: function that takes hgsep and returns ossep
624 def targetpathafterfn(pat, dest, srcs):
624 def targetpathafterfn(pat, dest, srcs):
625 if matchmod.patkind(pat):
625 if matchmod.patkind(pat):
626 # a mercurial pattern
626 # a mercurial pattern
627 res = lambda p: os.path.join(dest,
627 res = lambda p: os.path.join(dest,
628 os.path.basename(util.localpath(p)))
628 os.path.basename(util.localpath(p)))
629 else:
629 else:
630 abspfx = pathutil.canonpath(repo.root, cwd, pat)
630 abspfx = pathutil.canonpath(repo.root, cwd, pat)
631 if len(abspfx) < len(srcs[0][0]):
631 if len(abspfx) < len(srcs[0][0]):
632 # A directory. Either the target path contains the last
632 # A directory. Either the target path contains the last
633 # component of the source path or it does not.
633 # component of the source path or it does not.
634 def evalpath(striplen):
634 def evalpath(striplen):
635 score = 0
635 score = 0
636 for s in srcs:
636 for s in srcs:
637 t = os.path.join(dest, util.localpath(s[0])[striplen:])
637 t = os.path.join(dest, util.localpath(s[0])[striplen:])
638 if os.path.lexists(t):
638 if os.path.lexists(t):
639 score += 1
639 score += 1
640 return score
640 return score
641
641
642 abspfx = util.localpath(abspfx)
642 abspfx = util.localpath(abspfx)
643 striplen = len(abspfx)
643 striplen = len(abspfx)
644 if striplen:
644 if striplen:
645 striplen += len(os.sep)
645 striplen += len(os.sep)
646 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
646 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
647 score = evalpath(striplen)
647 score = evalpath(striplen)
648 striplen1 = len(os.path.split(abspfx)[0])
648 striplen1 = len(os.path.split(abspfx)[0])
649 if striplen1:
649 if striplen1:
650 striplen1 += len(os.sep)
650 striplen1 += len(os.sep)
651 if evalpath(striplen1) > score:
651 if evalpath(striplen1) > score:
652 striplen = striplen1
652 striplen = striplen1
653 res = lambda p: os.path.join(dest,
653 res = lambda p: os.path.join(dest,
654 util.localpath(p)[striplen:])
654 util.localpath(p)[striplen:])
655 else:
655 else:
656 # a file
656 # a file
657 if destdirexists:
657 if destdirexists:
658 res = lambda p: os.path.join(dest,
658 res = lambda p: os.path.join(dest,
659 os.path.basename(util.localpath(p)))
659 os.path.basename(util.localpath(p)))
660 else:
660 else:
661 res = lambda p: dest
661 res = lambda p: dest
662 return res
662 return res
663
663
664 pats = scmutil.expandpats(pats)
664 pats = scmutil.expandpats(pats)
665 if not pats:
665 if not pats:
666 raise util.Abort(_('no source or destination specified'))
666 raise util.Abort(_('no source or destination specified'))
667 if len(pats) == 1:
667 if len(pats) == 1:
668 raise util.Abort(_('no destination specified'))
668 raise util.Abort(_('no destination specified'))
669 dest = pats.pop()
669 dest = pats.pop()
670 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
670 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
671 if not destdirexists:
671 if not destdirexists:
672 if len(pats) > 1 or matchmod.patkind(pats[0]):
672 if len(pats) > 1 or matchmod.patkind(pats[0]):
673 raise util.Abort(_('with multiple sources, destination must be an '
673 raise util.Abort(_('with multiple sources, destination must be an '
674 'existing directory'))
674 'existing directory'))
675 if util.endswithsep(dest):
675 if util.endswithsep(dest):
676 raise util.Abort(_('destination %s is not a directory') % dest)
676 raise util.Abort(_('destination %s is not a directory') % dest)
677
677
678 tfn = targetpathfn
678 tfn = targetpathfn
679 if after:
679 if after:
680 tfn = targetpathafterfn
680 tfn = targetpathafterfn
681 copylist = []
681 copylist = []
682 for pat in pats:
682 for pat in pats:
683 srcs = walkpat(pat)
683 srcs = walkpat(pat)
684 if not srcs:
684 if not srcs:
685 continue
685 continue
686 copylist.append((tfn(pat, dest, srcs), srcs))
686 copylist.append((tfn(pat, dest, srcs), srcs))
687 if not copylist:
687 if not copylist:
688 raise util.Abort(_('no files to copy'))
688 raise util.Abort(_('no files to copy'))
689
689
690 errors = 0
690 errors = 0
691 for targetpath, srcs in copylist:
691 for targetpath, srcs in copylist:
692 for abssrc, relsrc, exact in srcs:
692 for abssrc, relsrc, exact in srcs:
693 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
693 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
694 errors += 1
694 errors += 1
695
695
696 if errors:
696 if errors:
697 ui.warn(_('(consider using --after)\n'))
697 ui.warn(_('(consider using --after)\n'))
698
698
699 return errors != 0
699 return errors != 0
700
700
701 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
701 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
702 runargs=None, appendpid=False):
702 runargs=None, appendpid=False):
703 '''Run a command as a service.'''
703 '''Run a command as a service.'''
704
704
705 def writepid(pid):
705 def writepid(pid):
706 if opts['pid_file']:
706 if opts['pid_file']:
707 if appendpid:
707 if appendpid:
708 mode = 'a'
708 mode = 'a'
709 else:
709 else:
710 mode = 'w'
710 mode = 'w'
711 fp = open(opts['pid_file'], mode)
711 fp = open(opts['pid_file'], mode)
712 fp.write(str(pid) + '\n')
712 fp.write(str(pid) + '\n')
713 fp.close()
713 fp.close()
714
714
715 if opts['daemon'] and not opts['daemon_pipefds']:
715 if opts['daemon'] and not opts['daemon_pipefds']:
716 # Signal child process startup with file removal
716 # Signal child process startup with file removal
717 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
717 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
718 os.close(lockfd)
718 os.close(lockfd)
719 try:
719 try:
720 if not runargs:
720 if not runargs:
721 runargs = util.hgcmd() + sys.argv[1:]
721 runargs = util.hgcmd() + sys.argv[1:]
722 runargs.append('--daemon-pipefds=%s' % lockpath)
722 runargs.append('--daemon-pipefds=%s' % lockpath)
723 # Don't pass --cwd to the child process, because we've already
723 # Don't pass --cwd to the child process, because we've already
724 # changed directory.
724 # changed directory.
725 for i in xrange(1, len(runargs)):
725 for i in xrange(1, len(runargs)):
726 if runargs[i].startswith('--cwd='):
726 if runargs[i].startswith('--cwd='):
727 del runargs[i]
727 del runargs[i]
728 break
728 break
729 elif runargs[i].startswith('--cwd'):
729 elif runargs[i].startswith('--cwd'):
730 del runargs[i:i + 2]
730 del runargs[i:i + 2]
731 break
731 break
732 def condfn():
732 def condfn():
733 return not os.path.exists(lockpath)
733 return not os.path.exists(lockpath)
734 pid = util.rundetached(runargs, condfn)
734 pid = util.rundetached(runargs, condfn)
735 if pid < 0:
735 if pid < 0:
736 raise util.Abort(_('child process failed to start'))
736 raise util.Abort(_('child process failed to start'))
737 writepid(pid)
737 writepid(pid)
738 finally:
738 finally:
739 try:
739 try:
740 os.unlink(lockpath)
740 os.unlink(lockpath)
741 except OSError, e:
741 except OSError, e:
742 if e.errno != errno.ENOENT:
742 if e.errno != errno.ENOENT:
743 raise
743 raise
744 if parentfn:
744 if parentfn:
745 return parentfn(pid)
745 return parentfn(pid)
746 else:
746 else:
747 return
747 return
748
748
749 if initfn:
749 if initfn:
750 initfn()
750 initfn()
751
751
752 if not opts['daemon']:
752 if not opts['daemon']:
753 writepid(os.getpid())
753 writepid(os.getpid())
754
754
755 if opts['daemon_pipefds']:
755 if opts['daemon_pipefds']:
756 lockpath = opts['daemon_pipefds']
756 lockpath = opts['daemon_pipefds']
757 try:
757 try:
758 os.setsid()
758 os.setsid()
759 except AttributeError:
759 except AttributeError:
760 pass
760 pass
761 os.unlink(lockpath)
761 os.unlink(lockpath)
762 util.hidewindow()
762 util.hidewindow()
763 sys.stdout.flush()
763 sys.stdout.flush()
764 sys.stderr.flush()
764 sys.stderr.flush()
765
765
766 nullfd = os.open(os.devnull, os.O_RDWR)
766 nullfd = os.open(os.devnull, os.O_RDWR)
767 logfilefd = nullfd
767 logfilefd = nullfd
768 if logfile:
768 if logfile:
769 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
769 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
770 os.dup2(nullfd, 0)
770 os.dup2(nullfd, 0)
771 os.dup2(logfilefd, 1)
771 os.dup2(logfilefd, 1)
772 os.dup2(logfilefd, 2)
772 os.dup2(logfilefd, 2)
773 if nullfd not in (0, 1, 2):
773 if nullfd not in (0, 1, 2):
774 os.close(nullfd)
774 os.close(nullfd)
775 if logfile and logfilefd not in (0, 1, 2):
775 if logfile and logfilefd not in (0, 1, 2):
776 os.close(logfilefd)
776 os.close(logfilefd)
777
777
778 if runfn:
778 if runfn:
779 return runfn()
779 return runfn()
780
780
781 def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
781 def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
782 """Utility function used by commands.import to import a single patch
782 """Utility function used by commands.import to import a single patch
783
783
784 This function is explicitly defined here to help the evolve extension to
784 This function is explicitly defined here to help the evolve extension to
785 wrap this part of the import logic.
785 wrap this part of the import logic.
786
786
787 The API is currently a bit ugly because it a simple code translation from
787 The API is currently a bit ugly because it a simple code translation from
788 the import command. Feel free to make it better.
788 the import command. Feel free to make it better.
789
789
790 :hunk: a patch (as a binary string)
790 :hunk: a patch (as a binary string)
791 :parents: nodes that will be parent of the created commit
791 :parents: nodes that will be parent of the created commit
792 :opts: the full dict of option passed to the import command
792 :opts: the full dict of option passed to the import command
793 :msgs: list to save commit message to.
793 :msgs: list to save commit message to.
794 (used in case we need to save it when failing)
794 (used in case we need to save it when failing)
795 :updatefunc: a function that update a repo to a given node
795 :updatefunc: a function that update a repo to a given node
796 updatefunc(<repo>, <node>)
796 updatefunc(<repo>, <node>)
797 """
797 """
798 tmpname, message, user, date, branch, nodeid, p1, p2 = \
798 tmpname, message, user, date, branch, nodeid, p1, p2 = \
799 patch.extract(ui, hunk)
799 patch.extract(ui, hunk)
800
800
801 update = not opts.get('bypass')
801 update = not opts.get('bypass')
802 strip = opts["strip"]
802 strip = opts["strip"]
803 prefix = opts["prefix"]
803 prefix = opts["prefix"]
804 sim = float(opts.get('similarity') or 0)
804 sim = float(opts.get('similarity') or 0)
805 if not tmpname:
805 if not tmpname:
806 return (None, None, False)
806 return (None, None, False)
807 msg = _('applied to working directory')
807 msg = _('applied to working directory')
808
808
809 rejects = False
809 rejects = False
810
810
811 try:
811 try:
812 cmdline_message = logmessage(ui, opts)
812 cmdline_message = logmessage(ui, opts)
813 if cmdline_message:
813 if cmdline_message:
814 # pickup the cmdline msg
814 # pickup the cmdline msg
815 message = cmdline_message
815 message = cmdline_message
816 elif message:
816 elif message:
817 # pickup the patch msg
817 # pickup the patch msg
818 message = message.strip()
818 message = message.strip()
819 else:
819 else:
820 # launch the editor
820 # launch the editor
821 message = None
821 message = None
822 ui.debug('message:\n%s\n' % message)
822 ui.debug('message:\n%s\n' % message)
823
823
824 if len(parents) == 1:
824 if len(parents) == 1:
825 parents.append(repo[nullid])
825 parents.append(repo[nullid])
826 if opts.get('exact'):
826 if opts.get('exact'):
827 if not nodeid or not p1:
827 if not nodeid or not p1:
828 raise util.Abort(_('not a Mercurial patch'))
828 raise util.Abort(_('not a Mercurial patch'))
829 p1 = repo[p1]
829 p1 = repo[p1]
830 p2 = repo[p2 or nullid]
830 p2 = repo[p2 or nullid]
831 elif p2:
831 elif p2:
832 try:
832 try:
833 p1 = repo[p1]
833 p1 = repo[p1]
834 p2 = repo[p2]
834 p2 = repo[p2]
835 # Without any options, consider p2 only if the
835 # Without any options, consider p2 only if the
836 # patch is being applied on top of the recorded
836 # patch is being applied on top of the recorded
837 # first parent.
837 # first parent.
838 if p1 != parents[0]:
838 if p1 != parents[0]:
839 p1 = parents[0]
839 p1 = parents[0]
840 p2 = repo[nullid]
840 p2 = repo[nullid]
841 except error.RepoError:
841 except error.RepoError:
842 p1, p2 = parents
842 p1, p2 = parents
843 if p2.node() == nullid:
843 if p2.node() == nullid:
844 ui.warn(_("warning: import the patch as a normal revision\n"
844 ui.warn(_("warning: import the patch as a normal revision\n"
845 "(use --exact to import the patch as a merge)\n"))
845 "(use --exact to import the patch as a merge)\n"))
846 else:
846 else:
847 p1, p2 = parents
847 p1, p2 = parents
848
848
849 n = None
849 n = None
850 if update:
850 if update:
851 repo.dirstate.beginparentchange()
851 repo.dirstate.beginparentchange()
852 if p1 != parents[0]:
852 if p1 != parents[0]:
853 updatefunc(repo, p1.node())
853 updatefunc(repo, p1.node())
854 if p2 != parents[1]:
854 if p2 != parents[1]:
855 repo.setparents(p1.node(), p2.node())
855 repo.setparents(p1.node(), p2.node())
856
856
857 if opts.get('exact') or opts.get('import_branch'):
857 if opts.get('exact') or opts.get('import_branch'):
858 repo.dirstate.setbranch(branch or 'default')
858 repo.dirstate.setbranch(branch or 'default')
859
859
860 partial = opts.get('partial', False)
860 partial = opts.get('partial', False)
861 files = set()
861 files = set()
862 try:
862 try:
863 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
863 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
864 files=files, eolmode=None, similarity=sim / 100.0)
864 files=files, eolmode=None, similarity=sim / 100.0)
865 except patch.PatchError, e:
865 except patch.PatchError, e:
866 if not partial:
866 if not partial:
867 raise util.Abort(str(e))
867 raise util.Abort(str(e))
868 if partial:
868 if partial:
869 rejects = True
869 rejects = True
870
870
871 files = list(files)
871 files = list(files)
872 if opts.get('no_commit'):
872 if opts.get('no_commit'):
873 if message:
873 if message:
874 msgs.append(message)
874 msgs.append(message)
875 else:
875 else:
876 if opts.get('exact') or p2:
876 if opts.get('exact') or p2:
877 # If you got here, you either use --force and know what
877 # If you got here, you either use --force and know what
878 # you are doing or used --exact or a merge patch while
878 # you are doing or used --exact or a merge patch while
879 # being updated to its first parent.
879 # being updated to its first parent.
880 m = None
880 m = None
881 else:
881 else:
882 m = scmutil.matchfiles(repo, files or [])
882 m = scmutil.matchfiles(repo, files or [])
883 editform = mergeeditform(repo[None], 'import.normal')
883 editform = mergeeditform(repo[None], 'import.normal')
884 if opts.get('exact'):
884 if opts.get('exact'):
885 editor = None
885 editor = None
886 else:
886 else:
887 editor = getcommiteditor(editform=editform, **opts)
887 editor = getcommiteditor(editform=editform, **opts)
888 n = repo.commit(message, opts.get('user') or user,
888 n = repo.commit(message, opts.get('user') or user,
889 opts.get('date') or date, match=m,
889 opts.get('date') or date, match=m,
890 editor=editor, force=partial)
890 editor=editor, force=partial)
891 repo.dirstate.endparentchange()
891 repo.dirstate.endparentchange()
892 else:
892 else:
893 if opts.get('exact') or opts.get('import_branch'):
893 if opts.get('exact') or opts.get('import_branch'):
894 branch = branch or 'default'
894 branch = branch or 'default'
895 else:
895 else:
896 branch = p1.branch()
896 branch = p1.branch()
897 store = patch.filestore()
897 store = patch.filestore()
898 try:
898 try:
899 files = set()
899 files = set()
900 try:
900 try:
901 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
901 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
902 files, eolmode=None)
902 files, eolmode=None)
903 except patch.PatchError, e:
903 except patch.PatchError, e:
904 raise util.Abort(str(e))
904 raise util.Abort(str(e))
905 if opts.get('exact'):
905 if opts.get('exact'):
906 editor = None
906 editor = None
907 else:
907 else:
908 editor = getcommiteditor(editform='import.bypass')
908 editor = getcommiteditor(editform='import.bypass')
909 memctx = context.makememctx(repo, (p1.node(), p2.node()),
909 memctx = context.makememctx(repo, (p1.node(), p2.node()),
910 message,
910 message,
911 opts.get('user') or user,
911 opts.get('user') or user,
912 opts.get('date') or date,
912 opts.get('date') or date,
913 branch, files, store,
913 branch, files, store,
914 editor=editor)
914 editor=editor)
915 n = memctx.commit()
915 n = memctx.commit()
916 finally:
916 finally:
917 store.close()
917 store.close()
918 if opts.get('exact') and opts.get('no_commit'):
918 if opts.get('exact') and opts.get('no_commit'):
919 # --exact with --no-commit is still useful in that it does merge
919 # --exact with --no-commit is still useful in that it does merge
920 # and branch bits
920 # and branch bits
921 ui.warn(_("warning: can't check exact import with --no-commit\n"))
921 ui.warn(_("warning: can't check exact import with --no-commit\n"))
922 elif opts.get('exact') and hex(n) != nodeid:
922 elif opts.get('exact') and hex(n) != nodeid:
923 raise util.Abort(_('patch is damaged or loses information'))
923 raise util.Abort(_('patch is damaged or loses information'))
924 if n:
924 if n:
925 # i18n: refers to a short changeset id
925 # i18n: refers to a short changeset id
926 msg = _('created %s') % short(n)
926 msg = _('created %s') % short(n)
927 return (msg, n, rejects)
927 return (msg, n, rejects)
928 finally:
928 finally:
929 os.unlink(tmpname)
929 os.unlink(tmpname)
930
930
931 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
931 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
932 opts=None):
932 opts=None):
933 '''export changesets as hg patches.'''
933 '''export changesets as hg patches.'''
934
934
935 total = len(revs)
935 total = len(revs)
936 revwidth = max([len(str(rev)) for rev in revs])
936 revwidth = max([len(str(rev)) for rev in revs])
937 filemode = {}
937 filemode = {}
938
938
939 def single(rev, seqno, fp):
939 def single(rev, seqno, fp):
940 ctx = repo[rev]
940 ctx = repo[rev]
941 node = ctx.node()
941 node = ctx.node()
942 parents = [p.node() for p in ctx.parents() if p]
942 parents = [p.node() for p in ctx.parents() if p]
943 branch = ctx.branch()
943 branch = ctx.branch()
944 if switch_parent:
944 if switch_parent:
945 parents.reverse()
945 parents.reverse()
946
946
947 if parents:
947 if parents:
948 prev = parents[0]
948 prev = parents[0]
949 else:
949 else:
950 prev = nullid
950 prev = nullid
951
951
952 shouldclose = False
952 shouldclose = False
953 if not fp and len(template) > 0:
953 if not fp and len(template) > 0:
954 desc_lines = ctx.description().rstrip().split('\n')
954 desc_lines = ctx.description().rstrip().split('\n')
955 desc = desc_lines[0] #Commit always has a first line.
955 desc = desc_lines[0] #Commit always has a first line.
956 fp = makefileobj(repo, template, node, desc=desc, total=total,
956 fp = makefileobj(repo, template, node, desc=desc, total=total,
957 seqno=seqno, revwidth=revwidth, mode='wb',
957 seqno=seqno, revwidth=revwidth, mode='wb',
958 modemap=filemode)
958 modemap=filemode)
959 if fp != template:
959 if fp != template:
960 shouldclose = True
960 shouldclose = True
961 if fp and fp != sys.stdout and util.safehasattr(fp, 'name'):
961 if fp and fp != sys.stdout and util.safehasattr(fp, 'name'):
962 repo.ui.note("%s\n" % fp.name)
962 repo.ui.note("%s\n" % fp.name)
963
963
964 if not fp:
964 if not fp:
965 write = repo.ui.write
965 write = repo.ui.write
966 else:
966 else:
967 def write(s, **kw):
967 def write(s, **kw):
968 fp.write(s)
968 fp.write(s)
969
969
970 write("# HG changeset patch\n")
970 write("# HG changeset patch\n")
971 write("# User %s\n" % ctx.user())
971 write("# User %s\n" % ctx.user())
972 write("# Date %d %d\n" % ctx.date())
972 write("# Date %d %d\n" % ctx.date())
973 write("# %s\n" % util.datestr(ctx.date()))
973 write("# %s\n" % util.datestr(ctx.date()))
974 if branch and branch != 'default':
974 if branch and branch != 'default':
975 write("# Branch %s\n" % branch)
975 write("# Branch %s\n" % branch)
976 write("# Node ID %s\n" % hex(node))
976 write("# Node ID %s\n" % hex(node))
977 write("# Parent %s\n" % hex(prev))
977 write("# Parent %s\n" % hex(prev))
978 if len(parents) > 1:
978 if len(parents) > 1:
979 write("# Parent %s\n" % hex(parents[1]))
979 write("# Parent %s\n" % hex(parents[1]))
980 write(ctx.description().rstrip())
980 write(ctx.description().rstrip())
981 write("\n\n")
981 write("\n\n")
982
982
983 for chunk, label in patch.diffui(repo, prev, node, opts=opts):
983 for chunk, label in patch.diffui(repo, prev, node, opts=opts):
984 write(chunk, label=label)
984 write(chunk, label=label)
985
985
986 if shouldclose:
986 if shouldclose:
987 fp.close()
987 fp.close()
988
988
989 for seqno, rev in enumerate(revs):
989 for seqno, rev in enumerate(revs):
990 single(rev, seqno + 1, fp)
990 single(rev, seqno + 1, fp)
991
991
992 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
992 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
993 changes=None, stat=False, fp=None, prefix='',
993 changes=None, stat=False, fp=None, prefix='',
994 root='', listsubrepos=False):
994 root='', listsubrepos=False):
995 '''show diff or diffstat.'''
995 '''show diff or diffstat.'''
996 if fp is None:
996 if fp is None:
997 write = ui.write
997 write = ui.write
998 else:
998 else:
999 def write(s, **kw):
999 def write(s, **kw):
1000 fp.write(s)
1000 fp.write(s)
1001
1001
1002 if root:
1002 if root:
1003 relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
1003 relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
1004 else:
1004 else:
1005 relroot = ''
1005 relroot = ''
1006 if relroot != '':
1006 if relroot != '':
1007 # XXX relative roots currently don't work if the root is within a
1007 # XXX relative roots currently don't work if the root is within a
1008 # subrepo
1008 # subrepo
1009 uirelroot = match.uipath(relroot)
1009 uirelroot = match.uipath(relroot)
1010 relroot += '/'
1010 relroot += '/'
1011 for matchroot in match.files():
1011 for matchroot in match.files():
1012 if not matchroot.startswith(relroot):
1012 if not matchroot.startswith(relroot):
1013 ui.warn(_('warning: %s not inside relative root %s\n') % (
1013 ui.warn(_('warning: %s not inside relative root %s\n') % (
1014 match.uipath(matchroot), uirelroot))
1014 match.uipath(matchroot), uirelroot))
1015
1015
1016 if stat:
1016 if stat:
1017 diffopts = diffopts.copy(context=0)
1017 diffopts = diffopts.copy(context=0)
1018 width = 80
1018 width = 80
1019 if not ui.plain():
1019 if not ui.plain():
1020 width = ui.termwidth()
1020 width = ui.termwidth()
1021 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
1021 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
1022 prefix=prefix, relroot=relroot)
1022 prefix=prefix, relroot=relroot)
1023 for chunk, label in patch.diffstatui(util.iterlines(chunks),
1023 for chunk, label in patch.diffstatui(util.iterlines(chunks),
1024 width=width,
1024 width=width,
1025 git=diffopts.git):
1025 git=diffopts.git):
1026 write(chunk, label=label)
1026 write(chunk, label=label)
1027 else:
1027 else:
1028 for chunk, label in patch.diffui(repo, node1, node2, match,
1028 for chunk, label in patch.diffui(repo, node1, node2, match,
1029 changes, diffopts, prefix=prefix,
1029 changes, diffopts, prefix=prefix,
1030 relroot=relroot):
1030 relroot=relroot):
1031 write(chunk, label=label)
1031 write(chunk, label=label)
1032
1032
1033 if listsubrepos:
1033 if listsubrepos:
1034 ctx1 = repo[node1]
1034 ctx1 = repo[node1]
1035 ctx2 = repo[node2]
1035 ctx2 = repo[node2]
1036 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1036 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1037 tempnode2 = node2
1037 tempnode2 = node2
1038 try:
1038 try:
1039 if node2 is not None:
1039 if node2 is not None:
1040 tempnode2 = ctx2.substate[subpath][1]
1040 tempnode2 = ctx2.substate[subpath][1]
1041 except KeyError:
1041 except KeyError:
1042 # A subrepo that existed in node1 was deleted between node1 and
1042 # A subrepo that existed in node1 was deleted between node1 and
1043 # node2 (inclusive). Thus, ctx2's substate won't contain that
1043 # node2 (inclusive). Thus, ctx2's substate won't contain that
1044 # subpath. The best we can do is to ignore it.
1044 # subpath. The best we can do is to ignore it.
1045 tempnode2 = None
1045 tempnode2 = None
1046 submatch = matchmod.narrowmatcher(subpath, match)
1046 submatch = matchmod.narrowmatcher(subpath, match)
1047 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
1047 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
1048 stat=stat, fp=fp, prefix=prefix)
1048 stat=stat, fp=fp, prefix=prefix)
1049
1049
1050 class changeset_printer(object):
1050 class changeset_printer(object):
1051 '''show changeset information when templating not requested.'''
1051 '''show changeset information when templating not requested.'''
1052
1052
1053 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1053 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1054 self.ui = ui
1054 self.ui = ui
1055 self.repo = repo
1055 self.repo = repo
1056 self.buffered = buffered
1056 self.buffered = buffered
1057 self.matchfn = matchfn
1057 self.matchfn = matchfn
1058 self.diffopts = diffopts
1058 self.diffopts = diffopts
1059 self.header = {}
1059 self.header = {}
1060 self.hunk = {}
1060 self.hunk = {}
1061 self.lastheader = None
1061 self.lastheader = None
1062 self.footer = None
1062 self.footer = None
1063
1063
1064 def flush(self, rev):
1064 def flush(self, rev):
1065 if rev in self.header:
1065 if rev in self.header:
1066 h = self.header[rev]
1066 h = self.header[rev]
1067 if h != self.lastheader:
1067 if h != self.lastheader:
1068 self.lastheader = h
1068 self.lastheader = h
1069 self.ui.write(h)
1069 self.ui.write(h)
1070 del self.header[rev]
1070 del self.header[rev]
1071 if rev in self.hunk:
1071 if rev in self.hunk:
1072 self.ui.write(self.hunk[rev])
1072 self.ui.write(self.hunk[rev])
1073 del self.hunk[rev]
1073 del self.hunk[rev]
1074 return 1
1074 return 1
1075 return 0
1075 return 0
1076
1076
1077 def close(self):
1077 def close(self):
1078 if self.footer:
1078 if self.footer:
1079 self.ui.write(self.footer)
1079 self.ui.write(self.footer)
1080
1080
1081 def show(self, ctx, copies=None, matchfn=None, **props):
1081 def show(self, ctx, copies=None, matchfn=None, **props):
1082 if self.buffered:
1082 if self.buffered:
1083 self.ui.pushbuffer()
1083 self.ui.pushbuffer()
1084 self._show(ctx, copies, matchfn, props)
1084 self._show(ctx, copies, matchfn, props)
1085 self.hunk[ctx.rev()] = self.ui.popbuffer(labeled=True)
1085 self.hunk[ctx.rev()] = self.ui.popbuffer(labeled=True)
1086 else:
1086 else:
1087 self._show(ctx, copies, matchfn, props)
1087 self._show(ctx, copies, matchfn, props)
1088
1088
1089 def _show(self, ctx, copies, matchfn, props):
1089 def _show(self, ctx, copies, matchfn, props):
1090 '''show a single changeset or file revision'''
1090 '''show a single changeset or file revision'''
1091 changenode = ctx.node()
1091 changenode = ctx.node()
1092 rev = ctx.rev()
1092 rev = ctx.rev()
1093 if self.ui.debugflag:
1093 if self.ui.debugflag:
1094 hexfunc = hex
1094 hexfunc = hex
1095 else:
1095 else:
1096 hexfunc = short
1096 hexfunc = short
1097 if rev is None:
1097 if rev is None:
1098 pctx = ctx.p1()
1098 pctx = ctx.p1()
1099 revnode = (pctx.rev(), hexfunc(pctx.node()) + '+')
1099 revnode = (pctx.rev(), hexfunc(pctx.node()) + '+')
1100 else:
1100 else:
1101 revnode = (rev, hexfunc(changenode))
1101 revnode = (rev, hexfunc(changenode))
1102
1102
1103 if self.ui.quiet:
1103 if self.ui.quiet:
1104 self.ui.write("%d:%s\n" % revnode, label='log.node')
1104 self.ui.write("%d:%s\n" % revnode, label='log.node')
1105 return
1105 return
1106
1106
1107 date = util.datestr(ctx.date())
1107 date = util.datestr(ctx.date())
1108
1108
1109 # i18n: column positioning for "hg log"
1109 # i18n: column positioning for "hg log"
1110 self.ui.write(_("changeset: %d:%s\n") % revnode,
1110 self.ui.write(_("changeset: %d:%s\n") % revnode,
1111 label='log.changeset changeset.%s' % ctx.phasestr())
1111 label='log.changeset changeset.%s' % ctx.phasestr())
1112
1112
1113 # branches are shown first before any other names due to backwards
1113 # branches are shown first before any other names due to backwards
1114 # compatibility
1114 # compatibility
1115 branch = ctx.branch()
1115 branch = ctx.branch()
1116 # don't show the default branch name
1116 # don't show the default branch name
1117 if branch != 'default':
1117 if branch != 'default':
1118 # i18n: column positioning for "hg log"
1118 # i18n: column positioning for "hg log"
1119 self.ui.write(_("branch: %s\n") % branch,
1119 self.ui.write(_("branch: %s\n") % branch,
1120 label='log.branch')
1120 label='log.branch')
1121
1121
1122 for name, ns in self.repo.names.iteritems():
1122 for name, ns in self.repo.names.iteritems():
1123 # branches has special logic already handled above, so here we just
1123 # branches has special logic already handled above, so here we just
1124 # skip it
1124 # skip it
1125 if name == 'branches':
1125 if name == 'branches':
1126 continue
1126 continue
1127 # we will use the templatename as the color name since those two
1127 # we will use the templatename as the color name since those two
1128 # should be the same
1128 # should be the same
1129 for name in ns.names(self.repo, changenode):
1129 for name in ns.names(self.repo, changenode):
1130 self.ui.write(ns.logfmt % name,
1130 self.ui.write(ns.logfmt % name,
1131 label='log.%s' % ns.colorname)
1131 label='log.%s' % ns.colorname)
1132 if self.ui.debugflag:
1132 if self.ui.debugflag:
1133 # i18n: column positioning for "hg log"
1133 # i18n: column positioning for "hg log"
1134 self.ui.write(_("phase: %s\n") % _(ctx.phasestr()),
1134 self.ui.write(_("phase: %s\n") % _(ctx.phasestr()),
1135 label='log.phase')
1135 label='log.phase')
1136 for pctx in self._meaningful_parentrevs(ctx):
1136 for pctx in self._meaningful_parentrevs(ctx):
1137 label = 'log.parent changeset.%s' % pctx.phasestr()
1137 label = 'log.parent changeset.%s' % pctx.phasestr()
1138 # i18n: column positioning for "hg log"
1138 # i18n: column positioning for "hg log"
1139 self.ui.write(_("parent: %d:%s\n")
1139 self.ui.write(_("parent: %d:%s\n")
1140 % (pctx.rev(), hexfunc(pctx.node())),
1140 % (pctx.rev(), hexfunc(pctx.node())),
1141 label=label)
1141 label=label)
1142
1142
1143 if self.ui.debugflag and rev is not None:
1143 if self.ui.debugflag and rev is not None:
1144 mnode = ctx.manifestnode()
1144 mnode = ctx.manifestnode()
1145 # i18n: column positioning for "hg log"
1145 # i18n: column positioning for "hg log"
1146 self.ui.write(_("manifest: %d:%s\n") %
1146 self.ui.write(_("manifest: %d:%s\n") %
1147 (self.repo.manifest.rev(mnode), hex(mnode)),
1147 (self.repo.manifest.rev(mnode), hex(mnode)),
1148 label='ui.debug log.manifest')
1148 label='ui.debug log.manifest')
1149 # i18n: column positioning for "hg log"
1149 # i18n: column positioning for "hg log"
1150 self.ui.write(_("user: %s\n") % ctx.user(),
1150 self.ui.write(_("user: %s\n") % ctx.user(),
1151 label='log.user')
1151 label='log.user')
1152 # i18n: column positioning for "hg log"
1152 # i18n: column positioning for "hg log"
1153 self.ui.write(_("date: %s\n") % date,
1153 self.ui.write(_("date: %s\n") % date,
1154 label='log.date')
1154 label='log.date')
1155
1155
1156 if self.ui.debugflag:
1156 if self.ui.debugflag:
1157 files = ctx.p1().status(ctx)[:3]
1157 files = ctx.p1().status(ctx)[:3]
1158 for key, value in zip([# i18n: column positioning for "hg log"
1158 for key, value in zip([# i18n: column positioning for "hg log"
1159 _("files:"),
1159 _("files:"),
1160 # i18n: column positioning for "hg log"
1160 # i18n: column positioning for "hg log"
1161 _("files+:"),
1161 _("files+:"),
1162 # i18n: column positioning for "hg log"
1162 # i18n: column positioning for "hg log"
1163 _("files-:")], files):
1163 _("files-:")], files):
1164 if value:
1164 if value:
1165 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
1165 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
1166 label='ui.debug log.files')
1166 label='ui.debug log.files')
1167 elif ctx.files() and self.ui.verbose:
1167 elif ctx.files() and self.ui.verbose:
1168 # i18n: column positioning for "hg log"
1168 # i18n: column positioning for "hg log"
1169 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
1169 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
1170 label='ui.note log.files')
1170 label='ui.note log.files')
1171 if copies and self.ui.verbose:
1171 if copies and self.ui.verbose:
1172 copies = ['%s (%s)' % c for c in copies]
1172 copies = ['%s (%s)' % c for c in copies]
1173 # i18n: column positioning for "hg log"
1173 # i18n: column positioning for "hg log"
1174 self.ui.write(_("copies: %s\n") % ' '.join(copies),
1174 self.ui.write(_("copies: %s\n") % ' '.join(copies),
1175 label='ui.note log.copies')
1175 label='ui.note log.copies')
1176
1176
1177 extra = ctx.extra()
1177 extra = ctx.extra()
1178 if extra and self.ui.debugflag:
1178 if extra and self.ui.debugflag:
1179 for key, value in sorted(extra.items()):
1179 for key, value in sorted(extra.items()):
1180 # i18n: column positioning for "hg log"
1180 # i18n: column positioning for "hg log"
1181 self.ui.write(_("extra: %s=%s\n")
1181 self.ui.write(_("extra: %s=%s\n")
1182 % (key, value.encode('string_escape')),
1182 % (key, value.encode('string_escape')),
1183 label='ui.debug log.extra')
1183 label='ui.debug log.extra')
1184
1184
1185 description = ctx.description().strip()
1185 description = ctx.description().strip()
1186 if description:
1186 if description:
1187 if self.ui.verbose:
1187 if self.ui.verbose:
1188 self.ui.write(_("description:\n"),
1188 self.ui.write(_("description:\n"),
1189 label='ui.note log.description')
1189 label='ui.note log.description')
1190 self.ui.write(description,
1190 self.ui.write(description,
1191 label='ui.note log.description')
1191 label='ui.note log.description')
1192 self.ui.write("\n\n")
1192 self.ui.write("\n\n")
1193 else:
1193 else:
1194 # i18n: column positioning for "hg log"
1194 # i18n: column positioning for "hg log"
1195 self.ui.write(_("summary: %s\n") %
1195 self.ui.write(_("summary: %s\n") %
1196 description.splitlines()[0],
1196 description.splitlines()[0],
1197 label='log.summary')
1197 label='log.summary')
1198 self.ui.write("\n")
1198 self.ui.write("\n")
1199
1199
1200 self.showpatch(changenode, matchfn)
1200 self.showpatch(changenode, matchfn)
1201
1201
1202 def showpatch(self, node, matchfn):
1202 def showpatch(self, node, matchfn):
1203 if not matchfn:
1203 if not matchfn:
1204 matchfn = self.matchfn
1204 matchfn = self.matchfn
1205 if matchfn:
1205 if matchfn:
1206 stat = self.diffopts.get('stat')
1206 stat = self.diffopts.get('stat')
1207 diff = self.diffopts.get('patch')
1207 diff = self.diffopts.get('patch')
1208 diffopts = patch.diffallopts(self.ui, self.diffopts)
1208 diffopts = patch.diffallopts(self.ui, self.diffopts)
1209 prev = self.repo.changelog.parents(node)[0]
1209 prev = self.repo.changelog.parents(node)[0]
1210 if stat:
1210 if stat:
1211 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1211 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1212 match=matchfn, stat=True)
1212 match=matchfn, stat=True)
1213 if diff:
1213 if diff:
1214 if stat:
1214 if stat:
1215 self.ui.write("\n")
1215 self.ui.write("\n")
1216 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1216 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1217 match=matchfn, stat=False)
1217 match=matchfn, stat=False)
1218 self.ui.write("\n")
1218 self.ui.write("\n")
1219
1219
1220 def _meaningful_parentrevs(self, ctx):
1220 def _meaningful_parentrevs(self, ctx):
1221 """Return list of meaningful (or all if debug) parentrevs for rev.
1221 """Return list of meaningful (or all if debug) parentrevs for rev.
1222
1222
1223 For merges (two non-nullrev revisions) both parents are meaningful.
1223 For merges (two non-nullrev revisions) both parents are meaningful.
1224 Otherwise the first parent revision is considered meaningful if it
1224 Otherwise the first parent revision is considered meaningful if it
1225 is not the preceding revision.
1225 is not the preceding revision.
1226 """
1226 """
1227 parents = ctx.parents()
1227 parents = ctx.parents()
1228 if len(parents) > 1:
1228 if len(parents) > 1:
1229 return parents
1229 return parents
1230 if self.ui.debugflag:
1230 if self.ui.debugflag:
1231 return [parents[0], self.repo['null']]
1231 return [parents[0], self.repo['null']]
1232 if parents[0].rev() >= scmutil.intrev(self.repo, ctx.rev()) - 1:
1232 if parents[0].rev() >= scmutil.intrev(self.repo, ctx.rev()) - 1:
1233 return []
1233 return []
1234 return parents
1234 return parents
1235
1235
1236 class jsonchangeset(changeset_printer):
1236 class jsonchangeset(changeset_printer):
1237 '''format changeset information.'''
1237 '''format changeset information.'''
1238
1238
1239 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1239 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1240 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1240 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1241 self.cache = {}
1241 self.cache = {}
1242 self._first = True
1242 self._first = True
1243
1243
1244 def close(self):
1244 def close(self):
1245 if not self._first:
1245 if not self._first:
1246 self.ui.write("\n]\n")
1246 self.ui.write("\n]\n")
1247 else:
1247 else:
1248 self.ui.write("[]\n")
1248 self.ui.write("[]\n")
1249
1249
1250 def _show(self, ctx, copies, matchfn, props):
1250 def _show(self, ctx, copies, matchfn, props):
1251 '''show a single changeset or file revision'''
1251 '''show a single changeset or file revision'''
1252 rev = ctx.rev()
1252 rev = ctx.rev()
1253 if rev is None:
1253 if rev is None:
1254 jrev = jnode = 'null'
1254 jrev = jnode = 'null'
1255 else:
1255 else:
1256 jrev = str(rev)
1256 jrev = str(rev)
1257 jnode = '"%s"' % hex(ctx.node())
1257 jnode = '"%s"' % hex(ctx.node())
1258 j = encoding.jsonescape
1258 j = encoding.jsonescape
1259
1259
1260 if self._first:
1260 if self._first:
1261 self.ui.write("[\n {")
1261 self.ui.write("[\n {")
1262 self._first = False
1262 self._first = False
1263 else:
1263 else:
1264 self.ui.write(",\n {")
1264 self.ui.write(",\n {")
1265
1265
1266 if self.ui.quiet:
1266 if self.ui.quiet:
1267 self.ui.write('\n "rev": %s' % jrev)
1267 self.ui.write('\n "rev": %s' % jrev)
1268 self.ui.write(',\n "node": %s' % jnode)
1268 self.ui.write(',\n "node": %s' % jnode)
1269 self.ui.write('\n }')
1269 self.ui.write('\n }')
1270 return
1270 return
1271
1271
1272 self.ui.write('\n "rev": %s' % jrev)
1272 self.ui.write('\n "rev": %s' % jrev)
1273 self.ui.write(',\n "node": %s' % jnode)
1273 self.ui.write(',\n "node": %s' % jnode)
1274 self.ui.write(',\n "branch": "%s"' % j(ctx.branch()))
1274 self.ui.write(',\n "branch": "%s"' % j(ctx.branch()))
1275 self.ui.write(',\n "phase": "%s"' % ctx.phasestr())
1275 self.ui.write(',\n "phase": "%s"' % ctx.phasestr())
1276 self.ui.write(',\n "user": "%s"' % j(ctx.user()))
1276 self.ui.write(',\n "user": "%s"' % j(ctx.user()))
1277 self.ui.write(',\n "date": [%d, %d]' % ctx.date())
1277 self.ui.write(',\n "date": [%d, %d]' % ctx.date())
1278 self.ui.write(',\n "desc": "%s"' % j(ctx.description()))
1278 self.ui.write(',\n "desc": "%s"' % j(ctx.description()))
1279
1279
1280 self.ui.write(',\n "bookmarks": [%s]' %
1280 self.ui.write(',\n "bookmarks": [%s]' %
1281 ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
1281 ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
1282 self.ui.write(',\n "tags": [%s]' %
1282 self.ui.write(',\n "tags": [%s]' %
1283 ", ".join('"%s"' % j(t) for t in ctx.tags()))
1283 ", ".join('"%s"' % j(t) for t in ctx.tags()))
1284 self.ui.write(',\n "parents": [%s]' %
1284 self.ui.write(',\n "parents": [%s]' %
1285 ", ".join('"%s"' % c.hex() for c in ctx.parents()))
1285 ", ".join('"%s"' % c.hex() for c in ctx.parents()))
1286
1286
1287 if self.ui.debugflag:
1287 if self.ui.debugflag:
1288 if rev is None:
1288 if rev is None:
1289 jmanifestnode = 'null'
1289 jmanifestnode = 'null'
1290 else:
1290 else:
1291 jmanifestnode = '"%s"' % hex(ctx.manifestnode())
1291 jmanifestnode = '"%s"' % hex(ctx.manifestnode())
1292 self.ui.write(',\n "manifest": %s' % jmanifestnode)
1292 self.ui.write(',\n "manifest": %s' % jmanifestnode)
1293
1293
1294 self.ui.write(',\n "extra": {%s}' %
1294 self.ui.write(',\n "extra": {%s}' %
1295 ", ".join('"%s": "%s"' % (j(k), j(v))
1295 ", ".join('"%s": "%s"' % (j(k), j(v))
1296 for k, v in ctx.extra().items()))
1296 for k, v in ctx.extra().items()))
1297
1297
1298 files = ctx.p1().status(ctx)
1298 files = ctx.p1().status(ctx)
1299 self.ui.write(',\n "modified": [%s]' %
1299 self.ui.write(',\n "modified": [%s]' %
1300 ", ".join('"%s"' % j(f) for f in files[0]))
1300 ", ".join('"%s"' % j(f) for f in files[0]))
1301 self.ui.write(',\n "added": [%s]' %
1301 self.ui.write(',\n "added": [%s]' %
1302 ", ".join('"%s"' % j(f) for f in files[1]))
1302 ", ".join('"%s"' % j(f) for f in files[1]))
1303 self.ui.write(',\n "removed": [%s]' %
1303 self.ui.write(',\n "removed": [%s]' %
1304 ", ".join('"%s"' % j(f) for f in files[2]))
1304 ", ".join('"%s"' % j(f) for f in files[2]))
1305
1305
1306 elif self.ui.verbose:
1306 elif self.ui.verbose:
1307 self.ui.write(',\n "files": [%s]' %
1307 self.ui.write(',\n "files": [%s]' %
1308 ", ".join('"%s"' % j(f) for f in ctx.files()))
1308 ", ".join('"%s"' % j(f) for f in ctx.files()))
1309
1309
1310 if copies:
1310 if copies:
1311 self.ui.write(',\n "copies": {%s}' %
1311 self.ui.write(',\n "copies": {%s}' %
1312 ", ".join('"%s": "%s"' % (j(k), j(v))
1312 ", ".join('"%s": "%s"' % (j(k), j(v))
1313 for k, v in copies))
1313 for k, v in copies))
1314
1314
1315 matchfn = self.matchfn
1315 matchfn = self.matchfn
1316 if matchfn:
1316 if matchfn:
1317 stat = self.diffopts.get('stat')
1317 stat = self.diffopts.get('stat')
1318 diff = self.diffopts.get('patch')
1318 diff = self.diffopts.get('patch')
1319 diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
1319 diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
1320 node, prev = ctx.node(), ctx.p1().node()
1320 node, prev = ctx.node(), ctx.p1().node()
1321 if stat:
1321 if stat:
1322 self.ui.pushbuffer()
1322 self.ui.pushbuffer()
1323 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1323 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1324 match=matchfn, stat=True)
1324 match=matchfn, stat=True)
1325 self.ui.write(',\n "diffstat": "%s"' % j(self.ui.popbuffer()))
1325 self.ui.write(',\n "diffstat": "%s"' % j(self.ui.popbuffer()))
1326 if diff:
1326 if diff:
1327 self.ui.pushbuffer()
1327 self.ui.pushbuffer()
1328 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1328 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1329 match=matchfn, stat=False)
1329 match=matchfn, stat=False)
1330 self.ui.write(',\n "diff": "%s"' % j(self.ui.popbuffer()))
1330 self.ui.write(',\n "diff": "%s"' % j(self.ui.popbuffer()))
1331
1331
1332 self.ui.write("\n }")
1332 self.ui.write("\n }")
1333
1333
1334 class changeset_templater(changeset_printer):
1334 class changeset_templater(changeset_printer):
1335 '''format changeset information.'''
1335 '''format changeset information.'''
1336
1336
1337 def __init__(self, ui, repo, matchfn, diffopts, tmpl, mapfile, buffered):
1337 def __init__(self, ui, repo, matchfn, diffopts, tmpl, mapfile, buffered):
1338 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1338 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1339 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
1339 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
1340 defaulttempl = {
1340 defaulttempl = {
1341 'parent': '{rev}:{node|formatnode} ',
1341 'parent': '{rev}:{node|formatnode} ',
1342 'manifest': '{rev}:{node|formatnode}',
1342 'manifest': '{rev}:{node|formatnode}',
1343 'file_copy': '{name} ({source})',
1343 'file_copy': '{name} ({source})',
1344 'extra': '{key}={value|stringescape}'
1344 'extra': '{key}={value|stringescape}'
1345 }
1345 }
1346 # filecopy is preserved for compatibility reasons
1346 # filecopy is preserved for compatibility reasons
1347 defaulttempl['filecopy'] = defaulttempl['file_copy']
1347 defaulttempl['filecopy'] = defaulttempl['file_copy']
1348 self.t = templater.templater(mapfile, {'formatnode': formatnode},
1348 self.t = templater.templater(mapfile, {'formatnode': formatnode},
1349 cache=defaulttempl)
1349 cache=defaulttempl)
1350 if tmpl:
1350 if tmpl:
1351 self.t.cache['changeset'] = tmpl
1351 self.t.cache['changeset'] = tmpl
1352
1352
1353 self.cache = {}
1353 self.cache = {}
1354
1354
1355 def _show(self, ctx, copies, matchfn, props):
1355 def _show(self, ctx, copies, matchfn, props):
1356 '''show a single changeset or file revision'''
1356 '''show a single changeset or file revision'''
1357
1357
1358 showlist = templatekw.showlist
1358 showlist = templatekw.showlist
1359
1359
1360 # showparents() behaviour depends on ui trace level which
1360 # showparents() behaviour depends on ui trace level which
1361 # causes unexpected behaviours at templating level and makes
1361 # causes unexpected behaviours at templating level and makes
1362 # it harder to extract it in a standalone function. Its
1362 # it harder to extract it in a standalone function. Its
1363 # behaviour cannot be changed so leave it here for now.
1363 # behaviour cannot be changed so leave it here for now.
1364 def showparents(**args):
1364 def showparents(**args):
1365 ctx = args['ctx']
1365 ctx = args['ctx']
1366 parents = [[('rev', p.rev()),
1366 parents = [[('rev', p.rev()),
1367 ('node', p.hex()),
1367 ('node', p.hex()),
1368 ('phase', p.phasestr())]
1368 ('phase', p.phasestr())]
1369 for p in self._meaningful_parentrevs(ctx)]
1369 for p in self._meaningful_parentrevs(ctx)]
1370 return showlist('parent', parents, **args)
1370 return showlist('parent', parents, **args)
1371
1371
1372 props = props.copy()
1372 props = props.copy()
1373 props.update(templatekw.keywords)
1373 props.update(templatekw.keywords)
1374 props['parents'] = showparents
1374 props['parents'] = showparents
1375 props['templ'] = self.t
1375 props['templ'] = self.t
1376 props['ctx'] = ctx
1376 props['ctx'] = ctx
1377 props['repo'] = self.repo
1377 props['repo'] = self.repo
1378 props['revcache'] = {'copies': copies}
1378 props['revcache'] = {'copies': copies}
1379 props['cache'] = self.cache
1379 props['cache'] = self.cache
1380
1380
1381 # find correct templates for current mode
1381 # find correct templates for current mode
1382
1382
1383 tmplmodes = [
1383 tmplmodes = [
1384 (True, None),
1384 (True, None),
1385 (self.ui.verbose, 'verbose'),
1385 (self.ui.verbose, 'verbose'),
1386 (self.ui.quiet, 'quiet'),
1386 (self.ui.quiet, 'quiet'),
1387 (self.ui.debugflag, 'debug'),
1387 (self.ui.debugflag, 'debug'),
1388 ]
1388 ]
1389
1389
1390 types = {'header': '', 'footer':'', 'changeset': 'changeset'}
1390 types = {'header': '', 'footer':'', 'changeset': 'changeset'}
1391 for mode, postfix in tmplmodes:
1391 for mode, postfix in tmplmodes:
1392 for type in types:
1392 for type in types:
1393 cur = postfix and ('%s_%s' % (type, postfix)) or type
1393 cur = postfix and ('%s_%s' % (type, postfix)) or type
1394 if mode and cur in self.t:
1394 if mode and cur in self.t:
1395 types[type] = cur
1395 types[type] = cur
1396
1396
1397 try:
1397 try:
1398
1398
1399 # write header
1399 # write header
1400 if types['header']:
1400 if types['header']:
1401 h = templater.stringify(self.t(types['header'], **props))
1401 h = templater.stringify(self.t(types['header'], **props))
1402 if self.buffered:
1402 if self.buffered:
1403 self.header[ctx.rev()] = h
1403 self.header[ctx.rev()] = h
1404 else:
1404 else:
1405 if self.lastheader != h:
1405 if self.lastheader != h:
1406 self.lastheader = h
1406 self.lastheader = h
1407 self.ui.write(h)
1407 self.ui.write(h)
1408
1408
1409 # write changeset metadata, then patch if requested
1409 # write changeset metadata, then patch if requested
1410 key = types['changeset']
1410 key = types['changeset']
1411 self.ui.write(templater.stringify(self.t(key, **props)))
1411 self.ui.write(templater.stringify(self.t(key, **props)))
1412 self.showpatch(ctx.node(), matchfn)
1412 self.showpatch(ctx.node(), matchfn)
1413
1413
1414 if types['footer']:
1414 if types['footer']:
1415 if not self.footer:
1415 if not self.footer:
1416 self.footer = templater.stringify(self.t(types['footer'],
1416 self.footer = templater.stringify(self.t(types['footer'],
1417 **props))
1417 **props))
1418
1418
1419 except KeyError, inst:
1419 except KeyError, inst:
1420 msg = _("%s: no key named '%s'")
1420 msg = _("%s: no key named '%s'")
1421 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
1421 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
1422 except SyntaxError, inst:
1422 except SyntaxError, inst:
1423 raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
1423 raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
1424
1424
1425 def gettemplate(ui, tmpl, style):
1425 def gettemplate(ui, tmpl, style):
1426 """
1426 """
1427 Find the template matching the given template spec or style.
1427 Find the template matching the given template spec or style.
1428 """
1428 """
1429
1429
1430 # ui settings
1430 # ui settings
1431 if not tmpl and not style: # template are stronger than style
1431 if not tmpl and not style: # template are stronger than style
1432 tmpl = ui.config('ui', 'logtemplate')
1432 tmpl = ui.config('ui', 'logtemplate')
1433 if tmpl:
1433 if tmpl:
1434 try:
1434 try:
1435 tmpl = templater.parsestring(tmpl)
1435 tmpl = templater.parsestring(tmpl)
1436 except SyntaxError:
1436 except SyntaxError:
1437 tmpl = templater.parsestring(tmpl, quoted=False)
1437 tmpl = templater.parsestring(tmpl, quoted=False)
1438 return tmpl, None
1438 return tmpl, None
1439 else:
1439 else:
1440 style = util.expandpath(ui.config('ui', 'style', ''))
1440 style = util.expandpath(ui.config('ui', 'style', ''))
1441
1441
1442 if not tmpl and style:
1442 if not tmpl and style:
1443 mapfile = style
1443 mapfile = style
1444 if not os.path.split(mapfile)[0]:
1444 if not os.path.split(mapfile)[0]:
1445 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1445 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1446 or templater.templatepath(mapfile))
1446 or templater.templatepath(mapfile))
1447 if mapname:
1447 if mapname:
1448 mapfile = mapname
1448 mapfile = mapname
1449 return None, mapfile
1449 return None, mapfile
1450
1450
1451 if not tmpl:
1451 if not tmpl:
1452 return None, None
1452 return None, None
1453
1453
1454 # looks like a literal template?
1454 # looks like a literal template?
1455 if '{' in tmpl:
1455 if '{' in tmpl:
1456 return tmpl, None
1456 return tmpl, None
1457
1457
1458 # perhaps a stock style?
1458 # perhaps a stock style?
1459 if not os.path.split(tmpl)[0]:
1459 if not os.path.split(tmpl)[0]:
1460 mapname = (templater.templatepath('map-cmdline.' + tmpl)
1460 mapname = (templater.templatepath('map-cmdline.' + tmpl)
1461 or templater.templatepath(tmpl))
1461 or templater.templatepath(tmpl))
1462 if mapname and os.path.isfile(mapname):
1462 if mapname and os.path.isfile(mapname):
1463 return None, mapname
1463 return None, mapname
1464
1464
1465 # perhaps it's a reference to [templates]
1465 # perhaps it's a reference to [templates]
1466 t = ui.config('templates', tmpl)
1466 t = ui.config('templates', tmpl)
1467 if t:
1467 if t:
1468 try:
1468 try:
1469 tmpl = templater.parsestring(t)
1469 tmpl = templater.parsestring(t)
1470 except SyntaxError:
1470 except SyntaxError:
1471 tmpl = templater.parsestring(t, quoted=False)
1471 tmpl = templater.parsestring(t, quoted=False)
1472 return tmpl, None
1472 return tmpl, None
1473
1473
1474 if tmpl == 'list':
1474 if tmpl == 'list':
1475 ui.write(_("available styles: %s\n") % templater.stylelist())
1475 ui.write(_("available styles: %s\n") % templater.stylelist())
1476 raise util.Abort(_("specify a template"))
1476 raise util.Abort(_("specify a template"))
1477
1477
1478 # perhaps it's a path to a map or a template
1478 # perhaps it's a path to a map or a template
1479 if ('/' in tmpl or '\\' in tmpl) and os.path.isfile(tmpl):
1479 if ('/' in tmpl or '\\' in tmpl) and os.path.isfile(tmpl):
1480 # is it a mapfile for a style?
1480 # is it a mapfile for a style?
1481 if os.path.basename(tmpl).startswith("map-"):
1481 if os.path.basename(tmpl).startswith("map-"):
1482 return None, os.path.realpath(tmpl)
1482 return None, os.path.realpath(tmpl)
1483 tmpl = open(tmpl).read()
1483 tmpl = open(tmpl).read()
1484 return tmpl, None
1484 return tmpl, None
1485
1485
1486 # constant string?
1486 # constant string?
1487 return tmpl, None
1487 return tmpl, None
1488
1488
1489 def show_changeset(ui, repo, opts, buffered=False):
1489 def show_changeset(ui, repo, opts, buffered=False):
1490 """show one changeset using template or regular display.
1490 """show one changeset using template or regular display.
1491
1491
1492 Display format will be the first non-empty hit of:
1492 Display format will be the first non-empty hit of:
1493 1. option 'template'
1493 1. option 'template'
1494 2. option 'style'
1494 2. option 'style'
1495 3. [ui] setting 'logtemplate'
1495 3. [ui] setting 'logtemplate'
1496 4. [ui] setting 'style'
1496 4. [ui] setting 'style'
1497 If all of these values are either the unset or the empty string,
1497 If all of these values are either the unset or the empty string,
1498 regular display via changeset_printer() is done.
1498 regular display via changeset_printer() is done.
1499 """
1499 """
1500 # options
1500 # options
1501 matchfn = None
1501 matchfn = None
1502 if opts.get('patch') or opts.get('stat'):
1502 if opts.get('patch') or opts.get('stat'):
1503 matchfn = scmutil.matchall(repo)
1503 matchfn = scmutil.matchall(repo)
1504
1504
1505 if opts.get('template') == 'json':
1505 if opts.get('template') == 'json':
1506 return jsonchangeset(ui, repo, matchfn, opts, buffered)
1506 return jsonchangeset(ui, repo, matchfn, opts, buffered)
1507
1507
1508 tmpl, mapfile = gettemplate(ui, opts.get('template'), opts.get('style'))
1508 tmpl, mapfile = gettemplate(ui, opts.get('template'), opts.get('style'))
1509
1509
1510 if not tmpl and not mapfile:
1510 if not tmpl and not mapfile:
1511 return changeset_printer(ui, repo, matchfn, opts, buffered)
1511 return changeset_printer(ui, repo, matchfn, opts, buffered)
1512
1512
1513 try:
1513 try:
1514 t = changeset_templater(ui, repo, matchfn, opts, tmpl, mapfile,
1514 t = changeset_templater(ui, repo, matchfn, opts, tmpl, mapfile,
1515 buffered)
1515 buffered)
1516 except SyntaxError, inst:
1516 except SyntaxError, inst:
1517 raise util.Abort(inst.args[0])
1517 raise util.Abort(inst.args[0])
1518 return t
1518 return t
1519
1519
1520 def showmarker(ui, marker):
1520 def showmarker(ui, marker):
1521 """utility function to display obsolescence marker in a readable way
1521 """utility function to display obsolescence marker in a readable way
1522
1522
1523 To be used by debug function."""
1523 To be used by debug function."""
1524 ui.write(hex(marker.precnode()))
1524 ui.write(hex(marker.precnode()))
1525 for repl in marker.succnodes():
1525 for repl in marker.succnodes():
1526 ui.write(' ')
1526 ui.write(' ')
1527 ui.write(hex(repl))
1527 ui.write(hex(repl))
1528 ui.write(' %X ' % marker.flags())
1528 ui.write(' %X ' % marker.flags())
1529 parents = marker.parentnodes()
1529 parents = marker.parentnodes()
1530 if parents is not None:
1530 if parents is not None:
1531 ui.write('{%s} ' % ', '.join(hex(p) for p in parents))
1531 ui.write('{%s} ' % ', '.join(hex(p) for p in parents))
1532 ui.write('(%s) ' % util.datestr(marker.date()))
1532 ui.write('(%s) ' % util.datestr(marker.date()))
1533 ui.write('{%s}' % (', '.join('%r: %r' % t for t in
1533 ui.write('{%s}' % (', '.join('%r: %r' % t for t in
1534 sorted(marker.metadata().items())
1534 sorted(marker.metadata().items())
1535 if t[0] != 'date')))
1535 if t[0] != 'date')))
1536 ui.write('\n')
1536 ui.write('\n')
1537
1537
1538 def finddate(ui, repo, date):
1538 def finddate(ui, repo, date):
1539 """Find the tipmost changeset that matches the given date spec"""
1539 """Find the tipmost changeset that matches the given date spec"""
1540
1540
1541 df = util.matchdate(date)
1541 df = util.matchdate(date)
1542 m = scmutil.matchall(repo)
1542 m = scmutil.matchall(repo)
1543 results = {}
1543 results = {}
1544
1544
1545 def prep(ctx, fns):
1545 def prep(ctx, fns):
1546 d = ctx.date()
1546 d = ctx.date()
1547 if df(d[0]):
1547 if df(d[0]):
1548 results[ctx.rev()] = d
1548 results[ctx.rev()] = d
1549
1549
1550 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1550 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1551 rev = ctx.rev()
1551 rev = ctx.rev()
1552 if rev in results:
1552 if rev in results:
1553 ui.status(_("found revision %s from %s\n") %
1553 ui.status(_("found revision %s from %s\n") %
1554 (rev, util.datestr(results[rev])))
1554 (rev, util.datestr(results[rev])))
1555 return str(rev)
1555 return str(rev)
1556
1556
1557 raise util.Abort(_("revision matching date not found"))
1557 raise util.Abort(_("revision matching date not found"))
1558
1558
1559 def increasingwindows(windowsize=8, sizelimit=512):
1559 def increasingwindows(windowsize=8, sizelimit=512):
1560 while True:
1560 while True:
1561 yield windowsize
1561 yield windowsize
1562 if windowsize < sizelimit:
1562 if windowsize < sizelimit:
1563 windowsize *= 2
1563 windowsize *= 2
1564
1564
1565 class FileWalkError(Exception):
1565 class FileWalkError(Exception):
1566 pass
1566 pass
1567
1567
1568 def walkfilerevs(repo, match, follow, revs, fncache):
1568 def walkfilerevs(repo, match, follow, revs, fncache):
1569 '''Walks the file history for the matched files.
1569 '''Walks the file history for the matched files.
1570
1570
1571 Returns the changeset revs that are involved in the file history.
1571 Returns the changeset revs that are involved in the file history.
1572
1572
1573 Throws FileWalkError if the file history can't be walked using
1573 Throws FileWalkError if the file history can't be walked using
1574 filelogs alone.
1574 filelogs alone.
1575 '''
1575 '''
1576 wanted = set()
1576 wanted = set()
1577 copies = []
1577 copies = []
1578 minrev, maxrev = min(revs), max(revs)
1578 minrev, maxrev = min(revs), max(revs)
1579 def filerevgen(filelog, last):
1579 def filerevgen(filelog, last):
1580 """
1580 """
1581 Only files, no patterns. Check the history of each file.
1581 Only files, no patterns. Check the history of each file.
1582
1582
1583 Examines filelog entries within minrev, maxrev linkrev range
1583 Examines filelog entries within minrev, maxrev linkrev range
1584 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1584 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1585 tuples in backwards order
1585 tuples in backwards order
1586 """
1586 """
1587 cl_count = len(repo)
1587 cl_count = len(repo)
1588 revs = []
1588 revs = []
1589 for j in xrange(0, last + 1):
1589 for j in xrange(0, last + 1):
1590 linkrev = filelog.linkrev(j)
1590 linkrev = filelog.linkrev(j)
1591 if linkrev < minrev:
1591 if linkrev < minrev:
1592 continue
1592 continue
1593 # only yield rev for which we have the changelog, it can
1593 # only yield rev for which we have the changelog, it can
1594 # happen while doing "hg log" during a pull or commit
1594 # happen while doing "hg log" during a pull or commit
1595 if linkrev >= cl_count:
1595 if linkrev >= cl_count:
1596 break
1596 break
1597
1597
1598 parentlinkrevs = []
1598 parentlinkrevs = []
1599 for p in filelog.parentrevs(j):
1599 for p in filelog.parentrevs(j):
1600 if p != nullrev:
1600 if p != nullrev:
1601 parentlinkrevs.append(filelog.linkrev(p))
1601 parentlinkrevs.append(filelog.linkrev(p))
1602 n = filelog.node(j)
1602 n = filelog.node(j)
1603 revs.append((linkrev, parentlinkrevs,
1603 revs.append((linkrev, parentlinkrevs,
1604 follow and filelog.renamed(n)))
1604 follow and filelog.renamed(n)))
1605
1605
1606 return reversed(revs)
1606 return reversed(revs)
1607 def iterfiles():
1607 def iterfiles():
1608 pctx = repo['.']
1608 pctx = repo['.']
1609 for filename in match.files():
1609 for filename in match.files():
1610 if follow:
1610 if follow:
1611 if filename not in pctx:
1611 if filename not in pctx:
1612 raise util.Abort(_('cannot follow file not in parent '
1612 raise util.Abort(_('cannot follow file not in parent '
1613 'revision: "%s"') % filename)
1613 'revision: "%s"') % filename)
1614 yield filename, pctx[filename].filenode()
1614 yield filename, pctx[filename].filenode()
1615 else:
1615 else:
1616 yield filename, None
1616 yield filename, None
1617 for filename_node in copies:
1617 for filename_node in copies:
1618 yield filename_node
1618 yield filename_node
1619
1619
1620 for file_, node in iterfiles():
1620 for file_, node in iterfiles():
1621 filelog = repo.file(file_)
1621 filelog = repo.file(file_)
1622 if not len(filelog):
1622 if not len(filelog):
1623 if node is None:
1623 if node is None:
1624 # A zero count may be a directory or deleted file, so
1624 # A zero count may be a directory or deleted file, so
1625 # try to find matching entries on the slow path.
1625 # try to find matching entries on the slow path.
1626 if follow:
1626 if follow:
1627 raise util.Abort(
1627 raise util.Abort(
1628 _('cannot follow nonexistent file: "%s"') % file_)
1628 _('cannot follow nonexistent file: "%s"') % file_)
1629 raise FileWalkError("Cannot walk via filelog")
1629 raise FileWalkError("Cannot walk via filelog")
1630 else:
1630 else:
1631 continue
1631 continue
1632
1632
1633 if node is None:
1633 if node is None:
1634 last = len(filelog) - 1
1634 last = len(filelog) - 1
1635 else:
1635 else:
1636 last = filelog.rev(node)
1636 last = filelog.rev(node)
1637
1637
1638 # keep track of all ancestors of the file
1638 # keep track of all ancestors of the file
1639 ancestors = set([filelog.linkrev(last)])
1639 ancestors = set([filelog.linkrev(last)])
1640
1640
1641 # iterate from latest to oldest revision
1641 # iterate from latest to oldest revision
1642 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1642 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1643 if not follow:
1643 if not follow:
1644 if rev > maxrev:
1644 if rev > maxrev:
1645 continue
1645 continue
1646 else:
1646 else:
1647 # Note that last might not be the first interesting
1647 # Note that last might not be the first interesting
1648 # rev to us:
1648 # rev to us:
1649 # if the file has been changed after maxrev, we'll
1649 # if the file has been changed after maxrev, we'll
1650 # have linkrev(last) > maxrev, and we still need
1650 # have linkrev(last) > maxrev, and we still need
1651 # to explore the file graph
1651 # to explore the file graph
1652 if rev not in ancestors:
1652 if rev not in ancestors:
1653 continue
1653 continue
1654 # XXX insert 1327 fix here
1654 # XXX insert 1327 fix here
1655 if flparentlinkrevs:
1655 if flparentlinkrevs:
1656 ancestors.update(flparentlinkrevs)
1656 ancestors.update(flparentlinkrevs)
1657
1657
1658 fncache.setdefault(rev, []).append(file_)
1658 fncache.setdefault(rev, []).append(file_)
1659 wanted.add(rev)
1659 wanted.add(rev)
1660 if copied:
1660 if copied:
1661 copies.append(copied)
1661 copies.append(copied)
1662
1662
1663 return wanted
1663 return wanted
1664
1664
1665 class _followfilter(object):
1665 class _followfilter(object):
1666 def __init__(self, repo, onlyfirst=False):
1666 def __init__(self, repo, onlyfirst=False):
1667 self.repo = repo
1667 self.repo = repo
1668 self.startrev = nullrev
1668 self.startrev = nullrev
1669 self.roots = set()
1669 self.roots = set()
1670 self.onlyfirst = onlyfirst
1670 self.onlyfirst = onlyfirst
1671
1671
1672 def match(self, rev):
1672 def match(self, rev):
1673 def realparents(rev):
1673 def realparents(rev):
1674 if self.onlyfirst:
1674 if self.onlyfirst:
1675 return self.repo.changelog.parentrevs(rev)[0:1]
1675 return self.repo.changelog.parentrevs(rev)[0:1]
1676 else:
1676 else:
1677 return filter(lambda x: x != nullrev,
1677 return filter(lambda x: x != nullrev,
1678 self.repo.changelog.parentrevs(rev))
1678 self.repo.changelog.parentrevs(rev))
1679
1679
1680 if self.startrev == nullrev:
1680 if self.startrev == nullrev:
1681 self.startrev = rev
1681 self.startrev = rev
1682 return True
1682 return True
1683
1683
1684 if rev > self.startrev:
1684 if rev > self.startrev:
1685 # forward: all descendants
1685 # forward: all descendants
1686 if not self.roots:
1686 if not self.roots:
1687 self.roots.add(self.startrev)
1687 self.roots.add(self.startrev)
1688 for parent in realparents(rev):
1688 for parent in realparents(rev):
1689 if parent in self.roots:
1689 if parent in self.roots:
1690 self.roots.add(rev)
1690 self.roots.add(rev)
1691 return True
1691 return True
1692 else:
1692 else:
1693 # backwards: all parents
1693 # backwards: all parents
1694 if not self.roots:
1694 if not self.roots:
1695 self.roots.update(realparents(self.startrev))
1695 self.roots.update(realparents(self.startrev))
1696 if rev in self.roots:
1696 if rev in self.roots:
1697 self.roots.remove(rev)
1697 self.roots.remove(rev)
1698 self.roots.update(realparents(rev))
1698 self.roots.update(realparents(rev))
1699 return True
1699 return True
1700
1700
1701 return False
1701 return False
1702
1702
1703 def walkchangerevs(repo, match, opts, prepare):
1703 def walkchangerevs(repo, match, opts, prepare):
1704 '''Iterate over files and the revs in which they changed.
1704 '''Iterate over files and the revs in which they changed.
1705
1705
1706 Callers most commonly need to iterate backwards over the history
1706 Callers most commonly need to iterate backwards over the history
1707 in which they are interested. Doing so has awful (quadratic-looking)
1707 in which they are interested. Doing so has awful (quadratic-looking)
1708 performance, so we use iterators in a "windowed" way.
1708 performance, so we use iterators in a "windowed" way.
1709
1709
1710 We walk a window of revisions in the desired order. Within the
1710 We walk a window of revisions in the desired order. Within the
1711 window, we first walk forwards to gather data, then in the desired
1711 window, we first walk forwards to gather data, then in the desired
1712 order (usually backwards) to display it.
1712 order (usually backwards) to display it.
1713
1713
1714 This function returns an iterator yielding contexts. Before
1714 This function returns an iterator yielding contexts. Before
1715 yielding each context, the iterator will first call the prepare
1715 yielding each context, the iterator will first call the prepare
1716 function on each context in the window in forward order.'''
1716 function on each context in the window in forward order.'''
1717
1717
1718 follow = opts.get('follow') or opts.get('follow_first')
1718 follow = opts.get('follow') or opts.get('follow_first')
1719 revs = _logrevs(repo, opts)
1719 revs = _logrevs(repo, opts)
1720 if not revs:
1720 if not revs:
1721 return []
1721 return []
1722 wanted = set()
1722 wanted = set()
1723 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1723 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1724 fncache = {}
1724 fncache = {}
1725 change = repo.changectx
1725 change = repo.changectx
1726
1726
1727 # First step is to fill wanted, the set of revisions that we want to yield.
1727 # First step is to fill wanted, the set of revisions that we want to yield.
1728 # When it does not induce extra cost, we also fill fncache for revisions in
1728 # When it does not induce extra cost, we also fill fncache for revisions in
1729 # wanted: a cache of filenames that were changed (ctx.files()) and that
1729 # wanted: a cache of filenames that were changed (ctx.files()) and that
1730 # match the file filtering conditions.
1730 # match the file filtering conditions.
1731
1731
1732 if match.always():
1732 if match.always():
1733 # No files, no patterns. Display all revs.
1733 # No files, no patterns. Display all revs.
1734 wanted = revs
1734 wanted = revs
1735
1735
1736 if not slowpath and match.files():
1736 if not slowpath and match.files():
1737 # We only have to read through the filelog to find wanted revisions
1737 # We only have to read through the filelog to find wanted revisions
1738
1738
1739 try:
1739 try:
1740 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1740 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1741 except FileWalkError:
1741 except FileWalkError:
1742 slowpath = True
1742 slowpath = True
1743
1743
1744 # We decided to fall back to the slowpath because at least one
1744 # We decided to fall back to the slowpath because at least one
1745 # of the paths was not a file. Check to see if at least one of them
1745 # of the paths was not a file. Check to see if at least one of them
1746 # existed in history, otherwise simply return
1746 # existed in history, otherwise simply return
1747 for path in match.files():
1747 for path in match.files():
1748 if path == '.' or path in repo.store:
1748 if path == '.' or path in repo.store:
1749 break
1749 break
1750 else:
1750 else:
1751 return []
1751 return []
1752
1752
1753 if slowpath:
1753 if slowpath:
1754 # We have to read the changelog to match filenames against
1754 # We have to read the changelog to match filenames against
1755 # changed files
1755 # changed files
1756
1756
1757 if follow:
1757 if follow:
1758 raise util.Abort(_('can only follow copies/renames for explicit '
1758 raise util.Abort(_('can only follow copies/renames for explicit '
1759 'filenames'))
1759 'filenames'))
1760
1760
1761 # The slow path checks files modified in every changeset.
1761 # The slow path checks files modified in every changeset.
1762 # This is really slow on large repos, so compute the set lazily.
1762 # This is really slow on large repos, so compute the set lazily.
1763 class lazywantedset(object):
1763 class lazywantedset(object):
1764 def __init__(self):
1764 def __init__(self):
1765 self.set = set()
1765 self.set = set()
1766 self.revs = set(revs)
1766 self.revs = set(revs)
1767
1767
1768 # No need to worry about locality here because it will be accessed
1768 # No need to worry about locality here because it will be accessed
1769 # in the same order as the increasing window below.
1769 # in the same order as the increasing window below.
1770 def __contains__(self, value):
1770 def __contains__(self, value):
1771 if value in self.set:
1771 if value in self.set:
1772 return True
1772 return True
1773 elif not value in self.revs:
1773 elif not value in self.revs:
1774 return False
1774 return False
1775 else:
1775 else:
1776 self.revs.discard(value)
1776 self.revs.discard(value)
1777 ctx = change(value)
1777 ctx = change(value)
1778 matches = filter(match, ctx.files())
1778 matches = filter(match, ctx.files())
1779 if matches:
1779 if matches:
1780 fncache[value] = matches
1780 fncache[value] = matches
1781 self.set.add(value)
1781 self.set.add(value)
1782 return True
1782 return True
1783 return False
1783 return False
1784
1784
1785 def discard(self, value):
1785 def discard(self, value):
1786 self.revs.discard(value)
1786 self.revs.discard(value)
1787 self.set.discard(value)
1787 self.set.discard(value)
1788
1788
1789 wanted = lazywantedset()
1789 wanted = lazywantedset()
1790
1790
1791 # it might be worthwhile to do this in the iterator if the rev range
1791 # it might be worthwhile to do this in the iterator if the rev range
1792 # is descending and the prune args are all within that range
1792 # is descending and the prune args are all within that range
1793 for rev in opts.get('prune', ()):
1793 for rev in opts.get('prune', ()):
1794 rev = repo[rev].rev()
1794 rev = repo[rev].rev()
1795 ff = _followfilter(repo)
1795 ff = _followfilter(repo)
1796 stop = min(revs[0], revs[-1])
1796 stop = min(revs[0], revs[-1])
1797 for x in xrange(rev, stop - 1, -1):
1797 for x in xrange(rev, stop - 1, -1):
1798 if ff.match(x):
1798 if ff.match(x):
1799 wanted = wanted - [x]
1799 wanted = wanted - [x]
1800
1800
1801 # Now that wanted is correctly initialized, we can iterate over the
1801 # Now that wanted is correctly initialized, we can iterate over the
1802 # revision range, yielding only revisions in wanted.
1802 # revision range, yielding only revisions in wanted.
1803 def iterate():
1803 def iterate():
1804 if follow and not match.files():
1804 if follow and not match.files():
1805 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
1805 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
1806 def want(rev):
1806 def want(rev):
1807 return ff.match(rev) and rev in wanted
1807 return ff.match(rev) and rev in wanted
1808 else:
1808 else:
1809 def want(rev):
1809 def want(rev):
1810 return rev in wanted
1810 return rev in wanted
1811
1811
1812 it = iter(revs)
1812 it = iter(revs)
1813 stopiteration = False
1813 stopiteration = False
1814 for windowsize in increasingwindows():
1814 for windowsize in increasingwindows():
1815 nrevs = []
1815 nrevs = []
1816 for i in xrange(windowsize):
1816 for i in xrange(windowsize):
1817 try:
1817 try:
1818 rev = it.next()
1818 rev = it.next()
1819 if want(rev):
1819 if want(rev):
1820 nrevs.append(rev)
1820 nrevs.append(rev)
1821 except (StopIteration):
1821 except (StopIteration):
1822 stopiteration = True
1822 stopiteration = True
1823 break
1823 break
1824 for rev in sorted(nrevs):
1824 for rev in sorted(nrevs):
1825 fns = fncache.get(rev)
1825 fns = fncache.get(rev)
1826 ctx = change(rev)
1826 ctx = change(rev)
1827 if not fns:
1827 if not fns:
1828 def fns_generator():
1828 def fns_generator():
1829 for f in ctx.files():
1829 for f in ctx.files():
1830 if match(f):
1830 if match(f):
1831 yield f
1831 yield f
1832 fns = fns_generator()
1832 fns = fns_generator()
1833 prepare(ctx, fns)
1833 prepare(ctx, fns)
1834 for rev in nrevs:
1834 for rev in nrevs:
1835 yield change(rev)
1835 yield change(rev)
1836
1836
1837 if stopiteration:
1837 if stopiteration:
1838 break
1838 break
1839
1839
1840 return iterate()
1840 return iterate()
1841
1841
1842 def _makefollowlogfilematcher(repo, files, followfirst):
1842 def _makefollowlogfilematcher(repo, files, followfirst):
1843 # When displaying a revision with --patch --follow FILE, we have
1843 # When displaying a revision with --patch --follow FILE, we have
1844 # to know which file of the revision must be diffed. With
1844 # to know which file of the revision must be diffed. With
1845 # --follow, we want the names of the ancestors of FILE in the
1845 # --follow, we want the names of the ancestors of FILE in the
1846 # revision, stored in "fcache". "fcache" is populated by
1846 # revision, stored in "fcache". "fcache" is populated by
1847 # reproducing the graph traversal already done by --follow revset
1847 # reproducing the graph traversal already done by --follow revset
1848 # and relating linkrevs to file names (which is not "correct" but
1848 # and relating linkrevs to file names (which is not "correct" but
1849 # good enough).
1849 # good enough).
1850 fcache = {}
1850 fcache = {}
1851 fcacheready = [False]
1851 fcacheready = [False]
1852 pctx = repo['.']
1852 pctx = repo['.']
1853
1853
1854 def populate():
1854 def populate():
1855 for fn in files:
1855 for fn in files:
1856 for i in ((pctx[fn],), pctx[fn].ancestors(followfirst=followfirst)):
1856 for i in ((pctx[fn],), pctx[fn].ancestors(followfirst=followfirst)):
1857 for c in i:
1857 for c in i:
1858 fcache.setdefault(c.linkrev(), set()).add(c.path())
1858 fcache.setdefault(c.linkrev(), set()).add(c.path())
1859
1859
1860 def filematcher(rev):
1860 def filematcher(rev):
1861 if not fcacheready[0]:
1861 if not fcacheready[0]:
1862 # Lazy initialization
1862 # Lazy initialization
1863 fcacheready[0] = True
1863 fcacheready[0] = True
1864 populate()
1864 populate()
1865 return scmutil.matchfiles(repo, fcache.get(rev, []))
1865 return scmutil.matchfiles(repo, fcache.get(rev, []))
1866
1866
1867 return filematcher
1867 return filematcher
1868
1868
1869 def _makenofollowlogfilematcher(repo, pats, opts):
1869 def _makenofollowlogfilematcher(repo, pats, opts):
1870 '''hook for extensions to override the filematcher for non-follow cases'''
1870 '''hook for extensions to override the filematcher for non-follow cases'''
1871 return None
1871 return None
1872
1872
1873 def _makelogrevset(repo, pats, opts, revs):
1873 def _makelogrevset(repo, pats, opts, revs):
1874 """Return (expr, filematcher) where expr is a revset string built
1874 """Return (expr, filematcher) where expr is a revset string built
1875 from log options and file patterns or None. If --stat or --patch
1875 from log options and file patterns or None. If --stat or --patch
1876 are not passed filematcher is None. Otherwise it is a callable
1876 are not passed filematcher is None. Otherwise it is a callable
1877 taking a revision number and returning a match objects filtering
1877 taking a revision number and returning a match objects filtering
1878 the files to be detailed when displaying the revision.
1878 the files to be detailed when displaying the revision.
1879 """
1879 """
1880 opt2revset = {
1880 opt2revset = {
1881 'no_merges': ('not merge()', None),
1881 'no_merges': ('not merge()', None),
1882 'only_merges': ('merge()', None),
1882 'only_merges': ('merge()', None),
1883 '_ancestors': ('ancestors(%(val)s)', None),
1883 '_ancestors': ('ancestors(%(val)s)', None),
1884 '_fancestors': ('_firstancestors(%(val)s)', None),
1884 '_fancestors': ('_firstancestors(%(val)s)', None),
1885 '_descendants': ('descendants(%(val)s)', None),
1885 '_descendants': ('descendants(%(val)s)', None),
1886 '_fdescendants': ('_firstdescendants(%(val)s)', None),
1886 '_fdescendants': ('_firstdescendants(%(val)s)', None),
1887 '_matchfiles': ('_matchfiles(%(val)s)', None),
1887 '_matchfiles': ('_matchfiles(%(val)s)', None),
1888 'date': ('date(%(val)r)', None),
1888 'date': ('date(%(val)r)', None),
1889 'branch': ('branch(%(val)r)', ' or '),
1889 'branch': ('branch(%(val)r)', ' or '),
1890 '_patslog': ('filelog(%(val)r)', ' or '),
1890 '_patslog': ('filelog(%(val)r)', ' or '),
1891 '_patsfollow': ('follow(%(val)r)', ' or '),
1891 '_patsfollow': ('follow(%(val)r)', ' or '),
1892 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
1892 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
1893 'keyword': ('keyword(%(val)r)', ' or '),
1893 'keyword': ('keyword(%(val)r)', ' or '),
1894 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
1894 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
1895 'user': ('user(%(val)r)', ' or '),
1895 'user': ('user(%(val)r)', ' or '),
1896 }
1896 }
1897
1897
1898 opts = dict(opts)
1898 opts = dict(opts)
1899 # follow or not follow?
1899 # follow or not follow?
1900 follow = opts.get('follow') or opts.get('follow_first')
1900 follow = opts.get('follow') or opts.get('follow_first')
1901 if opts.get('follow_first'):
1901 if opts.get('follow_first'):
1902 followfirst = 1
1902 followfirst = 1
1903 else:
1903 else:
1904 followfirst = 0
1904 followfirst = 0
1905 # --follow with FILE behaviour depends on revs...
1905 # --follow with FILE behaviour depends on revs...
1906 it = iter(revs)
1906 it = iter(revs)
1907 startrev = it.next()
1907 startrev = it.next()
1908 try:
1908 try:
1909 followdescendants = startrev < it.next()
1909 followdescendants = startrev < it.next()
1910 except (StopIteration):
1910 except (StopIteration):
1911 followdescendants = False
1911 followdescendants = False
1912
1912
1913 # branch and only_branch are really aliases and must be handled at
1913 # branch and only_branch are really aliases and must be handled at
1914 # the same time
1914 # the same time
1915 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
1915 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
1916 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
1916 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
1917 # pats/include/exclude are passed to match.match() directly in
1917 # pats/include/exclude are passed to match.match() directly in
1918 # _matchfiles() revset but walkchangerevs() builds its matcher with
1918 # _matchfiles() revset but walkchangerevs() builds its matcher with
1919 # scmutil.match(). The difference is input pats are globbed on
1919 # scmutil.match(). The difference is input pats are globbed on
1920 # platforms without shell expansion (windows).
1920 # platforms without shell expansion (windows).
1921 wctx = repo[None]
1921 wctx = repo[None]
1922 match, pats = scmutil.matchandpats(wctx, pats, opts)
1922 match, pats = scmutil.matchandpats(wctx, pats, opts)
1923 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1923 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1924 if not slowpath:
1924 if not slowpath:
1925 for f in match.files():
1925 for f in match.files():
1926 if follow and f not in wctx:
1926 if follow and f not in wctx:
1927 # If the file exists, it may be a directory, so let it
1927 # If the file exists, it may be a directory, so let it
1928 # take the slow path.
1928 # take the slow path.
1929 if os.path.exists(repo.wjoin(f)):
1929 if os.path.exists(repo.wjoin(f)):
1930 slowpath = True
1930 slowpath = True
1931 continue
1931 continue
1932 else:
1932 else:
1933 raise util.Abort(_('cannot follow file not in parent '
1933 raise util.Abort(_('cannot follow file not in parent '
1934 'revision: "%s"') % f)
1934 'revision: "%s"') % f)
1935 filelog = repo.file(f)
1935 filelog = repo.file(f)
1936 if not filelog:
1936 if not filelog:
1937 # A zero count may be a directory or deleted file, so
1937 # A zero count may be a directory or deleted file, so
1938 # try to find matching entries on the slow path.
1938 # try to find matching entries on the slow path.
1939 if follow:
1939 if follow:
1940 raise util.Abort(
1940 raise util.Abort(
1941 _('cannot follow nonexistent file: "%s"') % f)
1941 _('cannot follow nonexistent file: "%s"') % f)
1942 slowpath = True
1942 slowpath = True
1943
1943
1944 # We decided to fall back to the slowpath because at least one
1944 # We decided to fall back to the slowpath because at least one
1945 # of the paths was not a file. Check to see if at least one of them
1945 # of the paths was not a file. Check to see if at least one of them
1946 # existed in history - in that case, we'll continue down the
1946 # existed in history - in that case, we'll continue down the
1947 # slowpath; otherwise, we can turn off the slowpath
1947 # slowpath; otherwise, we can turn off the slowpath
1948 if slowpath:
1948 if slowpath:
1949 for path in match.files():
1949 for path in match.files():
1950 if path == '.' or path in repo.store:
1950 if path == '.' or path in repo.store:
1951 break
1951 break
1952 else:
1952 else:
1953 slowpath = False
1953 slowpath = False
1954
1954
1955 fpats = ('_patsfollow', '_patsfollowfirst')
1955 fpats = ('_patsfollow', '_patsfollowfirst')
1956 fnopats = (('_ancestors', '_fancestors'),
1956 fnopats = (('_ancestors', '_fancestors'),
1957 ('_descendants', '_fdescendants'))
1957 ('_descendants', '_fdescendants'))
1958 if slowpath:
1958 if slowpath:
1959 # See walkchangerevs() slow path.
1959 # See walkchangerevs() slow path.
1960 #
1960 #
1961 # pats/include/exclude cannot be represented as separate
1961 # pats/include/exclude cannot be represented as separate
1962 # revset expressions as their filtering logic applies at file
1962 # revset expressions as their filtering logic applies at file
1963 # level. For instance "-I a -X a" matches a revision touching
1963 # level. For instance "-I a -X a" matches a revision touching
1964 # "a" and "b" while "file(a) and not file(b)" does
1964 # "a" and "b" while "file(a) and not file(b)" does
1965 # not. Besides, filesets are evaluated against the working
1965 # not. Besides, filesets are evaluated against the working
1966 # directory.
1966 # directory.
1967 matchargs = ['r:', 'd:relpath']
1967 matchargs = ['r:', 'd:relpath']
1968 for p in pats:
1968 for p in pats:
1969 matchargs.append('p:' + p)
1969 matchargs.append('p:' + p)
1970 for p in opts.get('include', []):
1970 for p in opts.get('include', []):
1971 matchargs.append('i:' + p)
1971 matchargs.append('i:' + p)
1972 for p in opts.get('exclude', []):
1972 for p in opts.get('exclude', []):
1973 matchargs.append('x:' + p)
1973 matchargs.append('x:' + p)
1974 matchargs = ','.join(('%r' % p) for p in matchargs)
1974 matchargs = ','.join(('%r' % p) for p in matchargs)
1975 opts['_matchfiles'] = matchargs
1975 opts['_matchfiles'] = matchargs
1976 if follow:
1976 if follow:
1977 opts[fnopats[0][followfirst]] = '.'
1977 opts[fnopats[0][followfirst]] = '.'
1978 else:
1978 else:
1979 if follow:
1979 if follow:
1980 if pats:
1980 if pats:
1981 # follow() revset interprets its file argument as a
1981 # follow() revset interprets its file argument as a
1982 # manifest entry, so use match.files(), not pats.
1982 # manifest entry, so use match.files(), not pats.
1983 opts[fpats[followfirst]] = list(match.files())
1983 opts[fpats[followfirst]] = list(match.files())
1984 else:
1984 else:
1985 op = fnopats[followdescendants][followfirst]
1985 op = fnopats[followdescendants][followfirst]
1986 opts[op] = 'rev(%d)' % startrev
1986 opts[op] = 'rev(%d)' % startrev
1987 else:
1987 else:
1988 opts['_patslog'] = list(pats)
1988 opts['_patslog'] = list(pats)
1989
1989
1990 filematcher = None
1990 filematcher = None
1991 if opts.get('patch') or opts.get('stat'):
1991 if opts.get('patch') or opts.get('stat'):
1992 # When following files, track renames via a special matcher.
1992 # When following files, track renames via a special matcher.
1993 # If we're forced to take the slowpath it means we're following
1993 # If we're forced to take the slowpath it means we're following
1994 # at least one pattern/directory, so don't bother with rename tracking.
1994 # at least one pattern/directory, so don't bother with rename tracking.
1995 if follow and not match.always() and not slowpath:
1995 if follow and not match.always() and not slowpath:
1996 # _makefollowlogfilematcher expects its files argument to be
1996 # _makefollowlogfilematcher expects its files argument to be
1997 # relative to the repo root, so use match.files(), not pats.
1997 # relative to the repo root, so use match.files(), not pats.
1998 filematcher = _makefollowlogfilematcher(repo, match.files(),
1998 filematcher = _makefollowlogfilematcher(repo, match.files(),
1999 followfirst)
1999 followfirst)
2000 else:
2000 else:
2001 filematcher = _makenofollowlogfilematcher(repo, pats, opts)
2001 filematcher = _makenofollowlogfilematcher(repo, pats, opts)
2002 if filematcher is None:
2002 if filematcher is None:
2003 filematcher = lambda rev: match
2003 filematcher = lambda rev: match
2004
2004
2005 expr = []
2005 expr = []
2006 for op, val in sorted(opts.iteritems()):
2006 for op, val in sorted(opts.iteritems()):
2007 if not val:
2007 if not val:
2008 continue
2008 continue
2009 if op not in opt2revset:
2009 if op not in opt2revset:
2010 continue
2010 continue
2011 revop, andor = opt2revset[op]
2011 revop, andor = opt2revset[op]
2012 if '%(val)' not in revop:
2012 if '%(val)' not in revop:
2013 expr.append(revop)
2013 expr.append(revop)
2014 else:
2014 else:
2015 if not isinstance(val, list):
2015 if not isinstance(val, list):
2016 e = revop % {'val': val}
2016 e = revop % {'val': val}
2017 else:
2017 else:
2018 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
2018 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
2019 expr.append(e)
2019 expr.append(e)
2020
2020
2021 if expr:
2021 if expr:
2022 expr = '(' + ' and '.join(expr) + ')'
2022 expr = '(' + ' and '.join(expr) + ')'
2023 else:
2023 else:
2024 expr = None
2024 expr = None
2025 return expr, filematcher
2025 return expr, filematcher
2026
2026
2027 def _logrevs(repo, opts):
2027 def _logrevs(repo, opts):
2028 # Default --rev value depends on --follow but --follow behaviour
2028 # Default --rev value depends on --follow but --follow behaviour
2029 # depends on revisions resolved from --rev...
2029 # depends on revisions resolved from --rev...
2030 follow = opts.get('follow') or opts.get('follow_first')
2030 follow = opts.get('follow') or opts.get('follow_first')
2031 if opts.get('rev'):
2031 if opts.get('rev'):
2032 revs = scmutil.revrange(repo, opts['rev'])
2032 revs = scmutil.revrange(repo, opts['rev'])
2033 elif follow and repo.dirstate.p1() == nullid:
2033 elif follow and repo.dirstate.p1() == nullid:
2034 revs = revset.baseset()
2034 revs = revset.baseset()
2035 elif follow:
2035 elif follow:
2036 revs = repo.revs('reverse(:.)')
2036 revs = repo.revs('reverse(:.)')
2037 else:
2037 else:
2038 revs = revset.spanset(repo)
2038 revs = revset.spanset(repo)
2039 revs.reverse()
2039 revs.reverse()
2040 return revs
2040 return revs
2041
2041
2042 def getgraphlogrevs(repo, pats, opts):
2042 def getgraphlogrevs(repo, pats, opts):
2043 """Return (revs, expr, filematcher) where revs is an iterable of
2043 """Return (revs, expr, filematcher) where revs is an iterable of
2044 revision numbers, expr is a revset string built from log options
2044 revision numbers, expr is a revset string built from log options
2045 and file patterns or None, and used to filter 'revs'. If --stat or
2045 and file patterns or None, and used to filter 'revs'. If --stat or
2046 --patch are not passed filematcher is None. Otherwise it is a
2046 --patch are not passed filematcher is None. Otherwise it is a
2047 callable taking a revision number and returning a match objects
2047 callable taking a revision number and returning a match objects
2048 filtering the files to be detailed when displaying the revision.
2048 filtering the files to be detailed when displaying the revision.
2049 """
2049 """
2050 limit = loglimit(opts)
2050 limit = loglimit(opts)
2051 revs = _logrevs(repo, opts)
2051 revs = _logrevs(repo, opts)
2052 if not revs:
2052 if not revs:
2053 return revset.baseset(), None, None
2053 return revset.baseset(), None, None
2054 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2054 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2055 if opts.get('rev'):
2055 if opts.get('rev'):
2056 # User-specified revs might be unsorted, but don't sort before
2056 # User-specified revs might be unsorted, but don't sort before
2057 # _makelogrevset because it might depend on the order of revs
2057 # _makelogrevset because it might depend on the order of revs
2058 revs.sort(reverse=True)
2058 revs.sort(reverse=True)
2059 if expr:
2059 if expr:
2060 # Revset matchers often operate faster on revisions in changelog
2060 # Revset matchers often operate faster on revisions in changelog
2061 # order, because most filters deal with the changelog.
2061 # order, because most filters deal with the changelog.
2062 revs.reverse()
2062 revs.reverse()
2063 matcher = revset.match(repo.ui, expr)
2063 matcher = revset.match(repo.ui, expr)
2064 # Revset matches can reorder revisions. "A or B" typically returns
2064 # Revset matches can reorder revisions. "A or B" typically returns
2065 # returns the revision matching A then the revision matching B. Sort
2065 # returns the revision matching A then the revision matching B. Sort
2066 # again to fix that.
2066 # again to fix that.
2067 revs = matcher(repo, revs)
2067 revs = matcher(repo, revs)
2068 revs.sort(reverse=True)
2068 revs.sort(reverse=True)
2069 if limit is not None:
2069 if limit is not None:
2070 limitedrevs = []
2070 limitedrevs = []
2071 for idx, rev in enumerate(revs):
2071 for idx, rev in enumerate(revs):
2072 if idx >= limit:
2072 if idx >= limit:
2073 break
2073 break
2074 limitedrevs.append(rev)
2074 limitedrevs.append(rev)
2075 revs = revset.baseset(limitedrevs)
2075 revs = revset.baseset(limitedrevs)
2076
2076
2077 return revs, expr, filematcher
2077 return revs, expr, filematcher
2078
2078
2079 def getlogrevs(repo, pats, opts):
2079 def getlogrevs(repo, pats, opts):
2080 """Return (revs, expr, filematcher) where revs is an iterable of
2080 """Return (revs, expr, filematcher) where revs is an iterable of
2081 revision numbers, expr is a revset string built from log options
2081 revision numbers, expr is a revset string built from log options
2082 and file patterns or None, and used to filter 'revs'. If --stat or
2082 and file patterns or None, and used to filter 'revs'. If --stat or
2083 --patch are not passed filematcher is None. Otherwise it is a
2083 --patch are not passed filematcher is None. Otherwise it is a
2084 callable taking a revision number and returning a match objects
2084 callable taking a revision number and returning a match objects
2085 filtering the files to be detailed when displaying the revision.
2085 filtering the files to be detailed when displaying the revision.
2086 """
2086 """
2087 limit = loglimit(opts)
2087 limit = loglimit(opts)
2088 revs = _logrevs(repo, opts)
2088 revs = _logrevs(repo, opts)
2089 if not revs:
2089 if not revs:
2090 return revset.baseset([]), None, None
2090 return revset.baseset([]), None, None
2091 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2091 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2092 if expr:
2092 if expr:
2093 # Revset matchers often operate faster on revisions in changelog
2093 # Revset matchers often operate faster on revisions in changelog
2094 # order, because most filters deal with the changelog.
2094 # order, because most filters deal with the changelog.
2095 if not opts.get('rev'):
2095 if not opts.get('rev'):
2096 revs.reverse()
2096 revs.reverse()
2097 matcher = revset.match(repo.ui, expr)
2097 matcher = revset.match(repo.ui, expr)
2098 # Revset matches can reorder revisions. "A or B" typically returns
2098 # Revset matches can reorder revisions. "A or B" typically returns
2099 # returns the revision matching A then the revision matching B. Sort
2099 # returns the revision matching A then the revision matching B. Sort
2100 # again to fix that.
2100 # again to fix that.
2101 revs = matcher(repo, revs)
2101 revs = matcher(repo, revs)
2102 if not opts.get('rev'):
2102 if not opts.get('rev'):
2103 revs.sort(reverse=True)
2103 revs.sort(reverse=True)
2104 if limit is not None:
2104 if limit is not None:
2105 count = 0
2105 count = 0
2106 limitedrevs = []
2106 limitedrevs = []
2107 it = iter(revs)
2107 it = iter(revs)
2108 while count < limit:
2108 while count < limit:
2109 try:
2109 try:
2110 limitedrevs.append(it.next())
2110 limitedrevs.append(it.next())
2111 except (StopIteration):
2111 except (StopIteration):
2112 break
2112 break
2113 count += 1
2113 count += 1
2114 revs = revset.baseset(limitedrevs)
2114 revs = revset.baseset(limitedrevs)
2115
2115
2116 return revs, expr, filematcher
2116 return revs, expr, filematcher
2117
2117
2118 def displaygraph(ui, dag, displayer, showparents, edgefn, getrenamed=None,
2118 def displaygraph(ui, dag, displayer, showparents, edgefn, getrenamed=None,
2119 filematcher=None):
2119 filematcher=None):
2120 seen, state = [], graphmod.asciistate()
2120 seen, state = [], graphmod.asciistate()
2121 for rev, type, ctx, parents in dag:
2121 for rev, type, ctx, parents in dag:
2122 char = 'o'
2122 char = 'o'
2123 if ctx.node() in showparents:
2123 if ctx.node() in showparents:
2124 char = '@'
2124 char = '@'
2125 elif ctx.obsolete():
2125 elif ctx.obsolete():
2126 char = 'x'
2126 char = 'x'
2127 elif ctx.closesbranch():
2127 elif ctx.closesbranch():
2128 char = '_'
2128 char = '_'
2129 copies = None
2129 copies = None
2130 if getrenamed and ctx.rev():
2130 if getrenamed and ctx.rev():
2131 copies = []
2131 copies = []
2132 for fn in ctx.files():
2132 for fn in ctx.files():
2133 rename = getrenamed(fn, ctx.rev())
2133 rename = getrenamed(fn, ctx.rev())
2134 if rename:
2134 if rename:
2135 copies.append((fn, rename[0]))
2135 copies.append((fn, rename[0]))
2136 revmatchfn = None
2136 revmatchfn = None
2137 if filematcher is not None:
2137 if filematcher is not None:
2138 revmatchfn = filematcher(ctx.rev())
2138 revmatchfn = filematcher(ctx.rev())
2139 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
2139 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
2140 lines = displayer.hunk.pop(rev).split('\n')
2140 lines = displayer.hunk.pop(rev).split('\n')
2141 if not lines[-1]:
2141 if not lines[-1]:
2142 del lines[-1]
2142 del lines[-1]
2143 displayer.flush(rev)
2143 displayer.flush(rev)
2144 edges = edgefn(type, char, lines, seen, rev, parents)
2144 edges = edgefn(type, char, lines, seen, rev, parents)
2145 for type, char, lines, coldata in edges:
2145 for type, char, lines, coldata in edges:
2146 graphmod.ascii(ui, state, type, char, lines, coldata)
2146 graphmod.ascii(ui, state, type, char, lines, coldata)
2147 displayer.close()
2147 displayer.close()
2148
2148
2149 def graphlog(ui, repo, *pats, **opts):
2149 def graphlog(ui, repo, *pats, **opts):
2150 # Parameters are identical to log command ones
2150 # Parameters are identical to log command ones
2151 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
2151 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
2152 revdag = graphmod.dagwalker(repo, revs)
2152 revdag = graphmod.dagwalker(repo, revs)
2153
2153
2154 getrenamed = None
2154 getrenamed = None
2155 if opts.get('copies'):
2155 if opts.get('copies'):
2156 endrev = None
2156 endrev = None
2157 if opts.get('rev'):
2157 if opts.get('rev'):
2158 endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
2158 endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
2159 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
2159 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
2160 displayer = show_changeset(ui, repo, opts, buffered=True)
2160 displayer = show_changeset(ui, repo, opts, buffered=True)
2161 showparents = [ctx.node() for ctx in repo[None].parents()]
2161 showparents = [ctx.node() for ctx in repo[None].parents()]
2162 displaygraph(ui, revdag, displayer, showparents,
2162 displaygraph(ui, revdag, displayer, showparents,
2163 graphmod.asciiedges, getrenamed, filematcher)
2163 graphmod.asciiedges, getrenamed, filematcher)
2164
2164
2165 def checkunsupportedgraphflags(pats, opts):
2165 def checkunsupportedgraphflags(pats, opts):
2166 for op in ["newest_first"]:
2166 for op in ["newest_first"]:
2167 if op in opts and opts[op]:
2167 if op in opts and opts[op]:
2168 raise util.Abort(_("-G/--graph option is incompatible with --%s")
2168 raise util.Abort(_("-G/--graph option is incompatible with --%s")
2169 % op.replace("_", "-"))
2169 % op.replace("_", "-"))
2170
2170
2171 def graphrevs(repo, nodes, opts):
2171 def graphrevs(repo, nodes, opts):
2172 limit = loglimit(opts)
2172 limit = loglimit(opts)
2173 nodes.reverse()
2173 nodes.reverse()
2174 if limit is not None:
2174 if limit is not None:
2175 nodes = nodes[:limit]
2175 nodes = nodes[:limit]
2176 return graphmod.nodes(repo, nodes)
2176 return graphmod.nodes(repo, nodes)
2177
2177
2178 def add(ui, repo, match, prefix, explicitonly, **opts):
2178 def add(ui, repo, match, prefix, explicitonly, **opts):
2179 join = lambda f: os.path.join(prefix, f)
2179 join = lambda f: os.path.join(prefix, f)
2180 bad = []
2180 bad = []
2181 oldbad = match.bad
2181 oldbad = match.bad
2182 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
2182 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
2183 names = []
2183 names = []
2184 wctx = repo[None]
2184 wctx = repo[None]
2185 cca = None
2185 cca = None
2186 abort, warn = scmutil.checkportabilityalert(ui)
2186 abort, warn = scmutil.checkportabilityalert(ui)
2187 if abort or warn:
2187 if abort or warn:
2188 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2188 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2189 for f in wctx.walk(match):
2189 for f in wctx.walk(match):
2190 exact = match.exact(f)
2190 exact = match.exact(f)
2191 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2191 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2192 if cca:
2192 if cca:
2193 cca(f)
2193 cca(f)
2194 names.append(f)
2194 names.append(f)
2195 if ui.verbose or not exact:
2195 if ui.verbose or not exact:
2196 ui.status(_('adding %s\n') % match.rel(f))
2196 ui.status(_('adding %s\n') % match.rel(f))
2197
2197
2198 for subpath in sorted(wctx.substate):
2198 for subpath in sorted(wctx.substate):
2199 sub = wctx.sub(subpath)
2199 sub = wctx.sub(subpath)
2200 try:
2200 try:
2201 submatch = matchmod.narrowmatcher(subpath, match)
2201 submatch = matchmod.narrowmatcher(subpath, match)
2202 if opts.get('subrepos'):
2202 if opts.get('subrepos'):
2203 bad.extend(sub.add(ui, submatch, prefix, False, **opts))
2203 bad.extend(sub.add(ui, submatch, prefix, False, **opts))
2204 else:
2204 else:
2205 bad.extend(sub.add(ui, submatch, prefix, True, **opts))
2205 bad.extend(sub.add(ui, submatch, prefix, True, **opts))
2206 except error.LookupError:
2206 except error.LookupError:
2207 ui.status(_("skipping missing subrepository: %s\n")
2207 ui.status(_("skipping missing subrepository: %s\n")
2208 % join(subpath))
2208 % join(subpath))
2209
2209
2210 if not opts.get('dry_run'):
2210 if not opts.get('dry_run'):
2211 rejected = wctx.add(names, prefix)
2211 rejected = wctx.add(names, prefix)
2212 bad.extend(f for f in rejected if f in match.files())
2212 bad.extend(f for f in rejected if f in match.files())
2213 return bad
2213 return bad
2214
2214
2215 def forget(ui, repo, match, prefix, explicitonly):
2215 def forget(ui, repo, match, prefix, explicitonly):
2216 join = lambda f: os.path.join(prefix, f)
2216 join = lambda f: os.path.join(prefix, f)
2217 bad = []
2217 bad = []
2218 oldbad = match.bad
2218 oldbad = match.bad
2219 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
2219 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
2220 wctx = repo[None]
2220 wctx = repo[None]
2221 forgot = []
2221 forgot = []
2222 s = repo.status(match=match, clean=True)
2222 s = repo.status(match=match, clean=True)
2223 forget = sorted(s[0] + s[1] + s[3] + s[6])
2223 forget = sorted(s[0] + s[1] + s[3] + s[6])
2224 if explicitonly:
2224 if explicitonly:
2225 forget = [f for f in forget if match.exact(f)]
2225 forget = [f for f in forget if match.exact(f)]
2226
2226
2227 for subpath in sorted(wctx.substate):
2227 for subpath in sorted(wctx.substate):
2228 sub = wctx.sub(subpath)
2228 sub = wctx.sub(subpath)
2229 try:
2229 try:
2230 submatch = matchmod.narrowmatcher(subpath, match)
2230 submatch = matchmod.narrowmatcher(subpath, match)
2231 subbad, subforgot = sub.forget(submatch, prefix)
2231 subbad, subforgot = sub.forget(submatch, prefix)
2232 bad.extend([subpath + '/' + f for f in subbad])
2232 bad.extend([subpath + '/' + f for f in subbad])
2233 forgot.extend([subpath + '/' + f for f in subforgot])
2233 forgot.extend([subpath + '/' + f for f in subforgot])
2234 except error.LookupError:
2234 except error.LookupError:
2235 ui.status(_("skipping missing subrepository: %s\n")
2235 ui.status(_("skipping missing subrepository: %s\n")
2236 % join(subpath))
2236 % join(subpath))
2237
2237
2238 if not explicitonly:
2238 if not explicitonly:
2239 for f in match.files():
2239 for f in match.files():
2240 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2240 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2241 if f not in forgot:
2241 if f not in forgot:
2242 if repo.wvfs.exists(f):
2242 if repo.wvfs.exists(f):
2243 # Don't complain if the exact case match wasn't given.
2243 # Don't complain if the exact case match wasn't given.
2244 # But don't do this until after checking 'forgot', so
2244 # But don't do this until after checking 'forgot', so
2245 # that subrepo files aren't normalized, and this op is
2245 # that subrepo files aren't normalized, and this op is
2246 # purely from data cached by the status walk above.
2246 # purely from data cached by the status walk above.
2247 if repo.dirstate.normalize(f) in repo.dirstate:
2247 if repo.dirstate.normalize(f) in repo.dirstate:
2248 continue
2248 continue
2249 ui.warn(_('not removing %s: '
2249 ui.warn(_('not removing %s: '
2250 'file is already untracked\n')
2250 'file is already untracked\n')
2251 % match.rel(f))
2251 % match.rel(f))
2252 bad.append(f)
2252 bad.append(f)
2253
2253
2254 for f in forget:
2254 for f in forget:
2255 if ui.verbose or not match.exact(f):
2255 if ui.verbose or not match.exact(f):
2256 ui.status(_('removing %s\n') % match.rel(f))
2256 ui.status(_('removing %s\n') % match.rel(f))
2257
2257
2258 rejected = wctx.forget(forget, prefix)
2258 rejected = wctx.forget(forget, prefix)
2259 bad.extend(f for f in rejected if f in match.files())
2259 bad.extend(f for f in rejected if f in match.files())
2260 forgot.extend(f for f in forget if f not in rejected)
2260 forgot.extend(f for f in forget if f not in rejected)
2261 return bad, forgot
2261 return bad, forgot
2262
2262
2263 def files(ui, ctx, m, fm, fmt, subrepos):
2263 def files(ui, ctx, m, fm, fmt, subrepos):
2264 rev = ctx.rev()
2264 rev = ctx.rev()
2265 ret = 1
2265 ret = 1
2266 ds = ctx.repo().dirstate
2266 ds = ctx.repo().dirstate
2267
2267
2268 for f in ctx.matches(m):
2268 for f in ctx.matches(m):
2269 if rev is None and ds[f] == 'r':
2269 if rev is None and ds[f] == 'r':
2270 continue
2270 continue
2271 fm.startitem()
2271 fm.startitem()
2272 if ui.verbose:
2272 if ui.verbose:
2273 fc = ctx[f]
2273 fc = ctx[f]
2274 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2274 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2275 fm.data(abspath=f)
2275 fm.data(abspath=f)
2276 fm.write('path', fmt, m.rel(f))
2276 fm.write('path', fmt, m.rel(f))
2277 ret = 0
2277 ret = 0
2278
2278
2279 if subrepos:
2279 if subrepos:
2280 for subpath in sorted(ctx.substate):
2280 for subpath in sorted(ctx.substate):
2281 sub = ctx.sub(subpath)
2281 sub = ctx.sub(subpath)
2282 try:
2282 try:
2283 submatch = matchmod.narrowmatcher(subpath, m)
2283 submatch = matchmod.narrowmatcher(subpath, m)
2284 if sub.printfiles(ui, submatch, fm, fmt) == 0:
2284 if sub.printfiles(ui, submatch, fm, fmt) == 0:
2285 ret = 0
2285 ret = 0
2286 except error.LookupError:
2286 except error.LookupError:
2287 ui.status(_("skipping missing subrepository: %s\n")
2287 ui.status(_("skipping missing subrepository: %s\n")
2288 % m.abs(subpath))
2288 % m.abs(subpath))
2289
2289
2290 return ret
2290 return ret
2291
2291
2292 def remove(ui, repo, m, prefix, after, force, subrepos):
2292 def remove(ui, repo, m, prefix, after, force, subrepos):
2293 join = lambda f: os.path.join(prefix, f)
2293 join = lambda f: os.path.join(prefix, f)
2294 ret = 0
2294 ret = 0
2295 s = repo.status(match=m, clean=True)
2295 s = repo.status(match=m, clean=True)
2296 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2296 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2297
2297
2298 wctx = repo[None]
2298 wctx = repo[None]
2299
2299
2300 for subpath in sorted(wctx.substate):
2300 for subpath in sorted(wctx.substate):
2301 def matchessubrepo(matcher, subpath):
2301 def matchessubrepo(matcher, subpath):
2302 if matcher.exact(subpath):
2302 if matcher.exact(subpath):
2303 return True
2303 return True
2304 for f in matcher.files():
2304 for f in matcher.files():
2305 if f.startswith(subpath):
2305 if f.startswith(subpath):
2306 return True
2306 return True
2307 return False
2307 return False
2308
2308
2309 if subrepos or matchessubrepo(m, subpath):
2309 if subrepos or matchessubrepo(m, subpath):
2310 sub = wctx.sub(subpath)
2310 sub = wctx.sub(subpath)
2311 try:
2311 try:
2312 submatch = matchmod.narrowmatcher(subpath, m)
2312 submatch = matchmod.narrowmatcher(subpath, m)
2313 if sub.removefiles(submatch, prefix, after, force, subrepos):
2313 if sub.removefiles(submatch, prefix, after, force, subrepos):
2314 ret = 1
2314 ret = 1
2315 except error.LookupError:
2315 except error.LookupError:
2316 ui.status(_("skipping missing subrepository: %s\n")
2316 ui.status(_("skipping missing subrepository: %s\n")
2317 % join(subpath))
2317 % join(subpath))
2318
2318
2319 # warn about failure to delete explicit files/dirs
2319 # warn about failure to delete explicit files/dirs
2320 deleteddirs = util.dirs(deleted)
2320 deleteddirs = util.dirs(deleted)
2321 for f in m.files():
2321 for f in m.files():
2322 def insubrepo():
2322 def insubrepo():
2323 for subpath in wctx.substate:
2323 for subpath in wctx.substate:
2324 if f.startswith(subpath):
2324 if f.startswith(subpath):
2325 return True
2325 return True
2326 return False
2326 return False
2327
2327
2328 isdir = f in deleteddirs or f in wctx.dirs()
2328 isdir = f in deleteddirs or f in wctx.dirs()
2329 if f in repo.dirstate or isdir or f == '.' or insubrepo():
2329 if f in repo.dirstate or isdir or f == '.' or insubrepo():
2330 continue
2330 continue
2331
2331
2332 if repo.wvfs.exists(f):
2332 if repo.wvfs.exists(f):
2333 if repo.wvfs.isdir(f):
2333 if repo.wvfs.isdir(f):
2334 ui.warn(_('not removing %s: no tracked files\n')
2334 ui.warn(_('not removing %s: no tracked files\n')
2335 % m.rel(f))
2335 % m.rel(f))
2336 else:
2336 else:
2337 ui.warn(_('not removing %s: file is untracked\n')
2337 ui.warn(_('not removing %s: file is untracked\n')
2338 % m.rel(f))
2338 % m.rel(f))
2339 # missing files will generate a warning elsewhere
2339 # missing files will generate a warning elsewhere
2340 ret = 1
2340 ret = 1
2341
2341
2342 if force:
2342 if force:
2343 list = modified + deleted + clean + added
2343 list = modified + deleted + clean + added
2344 elif after:
2344 elif after:
2345 list = deleted
2345 list = deleted
2346 for f in modified + added + clean:
2346 for f in modified + added + clean:
2347 ui.warn(_('not removing %s: file still exists\n') % m.rel(f))
2347 ui.warn(_('not removing %s: file still exists\n') % m.rel(f))
2348 ret = 1
2348 ret = 1
2349 else:
2349 else:
2350 list = deleted + clean
2350 list = deleted + clean
2351 for f in modified:
2351 for f in modified:
2352 ui.warn(_('not removing %s: file is modified (use -f'
2352 ui.warn(_('not removing %s: file is modified (use -f'
2353 ' to force removal)\n') % m.rel(f))
2353 ' to force removal)\n') % m.rel(f))
2354 ret = 1
2354 ret = 1
2355 for f in added:
2355 for f in added:
2356 ui.warn(_('not removing %s: file has been marked for add'
2356 ui.warn(_('not removing %s: file has been marked for add'
2357 ' (use forget to undo)\n') % m.rel(f))
2357 ' (use forget to undo)\n') % m.rel(f))
2358 ret = 1
2358 ret = 1
2359
2359
2360 for f in sorted(list):
2360 for f in sorted(list):
2361 if ui.verbose or not m.exact(f):
2361 if ui.verbose or not m.exact(f):
2362 ui.status(_('removing %s\n') % m.rel(f))
2362 ui.status(_('removing %s\n') % m.rel(f))
2363
2363
2364 wlock = repo.wlock()
2364 wlock = repo.wlock()
2365 try:
2365 try:
2366 if not after:
2366 if not after:
2367 for f in list:
2367 for f in list:
2368 if f in added:
2368 if f in added:
2369 continue # we never unlink added files on remove
2369 continue # we never unlink added files on remove
2370 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
2370 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
2371 repo[None].forget(list)
2371 repo[None].forget(list)
2372 finally:
2372 finally:
2373 wlock.release()
2373 wlock.release()
2374
2374
2375 return ret
2375 return ret
2376
2376
2377 def cat(ui, repo, ctx, matcher, prefix, **opts):
2377 def cat(ui, repo, ctx, matcher, prefix, **opts):
2378 err = 1
2378 err = 1
2379
2379
2380 def write(path):
2380 def write(path):
2381 fp = makefileobj(repo, opts.get('output'), ctx.node(),
2381 fp = makefileobj(repo, opts.get('output'), ctx.node(),
2382 pathname=os.path.join(prefix, path))
2382 pathname=os.path.join(prefix, path))
2383 data = ctx[path].data()
2383 data = ctx[path].data()
2384 if opts.get('decode'):
2384 if opts.get('decode'):
2385 data = repo.wwritedata(path, data)
2385 data = repo.wwritedata(path, data)
2386 fp.write(data)
2386 fp.write(data)
2387 fp.close()
2387 fp.close()
2388
2388
2389 # Automation often uses hg cat on single files, so special case it
2389 # Automation often uses hg cat on single files, so special case it
2390 # for performance to avoid the cost of parsing the manifest.
2390 # for performance to avoid the cost of parsing the manifest.
2391 if len(matcher.files()) == 1 and not matcher.anypats():
2391 if len(matcher.files()) == 1 and not matcher.anypats():
2392 file = matcher.files()[0]
2392 file = matcher.files()[0]
2393 mf = repo.manifest
2393 mf = repo.manifest
2394 mfnode = ctx.manifestnode()
2394 mfnode = ctx.manifestnode()
2395 if mfnode and mf.find(mfnode, file)[0]:
2395 if mfnode and mf.find(mfnode, file)[0]:
2396 write(file)
2396 write(file)
2397 return 0
2397 return 0
2398
2398
2399 # Don't warn about "missing" files that are really in subrepos
2399 # Don't warn about "missing" files that are really in subrepos
2400 bad = matcher.bad
2400 bad = matcher.bad
2401
2401
2402 def badfn(path, msg):
2402 def badfn(path, msg):
2403 for subpath in ctx.substate:
2403 for subpath in ctx.substate:
2404 if path.startswith(subpath):
2404 if path.startswith(subpath):
2405 return
2405 return
2406 bad(path, msg)
2406 bad(path, msg)
2407
2407
2408 matcher.bad = badfn
2408 matcher.bad = badfn
2409
2409
2410 for abs in ctx.walk(matcher):
2410 for abs in ctx.walk(matcher):
2411 write(abs)
2411 write(abs)
2412 err = 0
2412 err = 0
2413
2413
2414 matcher.bad = bad
2414 matcher.bad = bad
2415
2415
2416 for subpath in sorted(ctx.substate):
2416 for subpath in sorted(ctx.substate):
2417 sub = ctx.sub(subpath)
2417 sub = ctx.sub(subpath)
2418 try:
2418 try:
2419 submatch = matchmod.narrowmatcher(subpath, matcher)
2419 submatch = matchmod.narrowmatcher(subpath, matcher)
2420
2420
2421 if not sub.cat(submatch, os.path.join(prefix, sub._path),
2421 if not sub.cat(submatch, os.path.join(prefix, sub._path),
2422 **opts):
2422 **opts):
2423 err = 0
2423 err = 0
2424 except error.RepoLookupError:
2424 except error.RepoLookupError:
2425 ui.status(_("skipping missing subrepository: %s\n")
2425 ui.status(_("skipping missing subrepository: %s\n")
2426 % os.path.join(prefix, subpath))
2426 % os.path.join(prefix, subpath))
2427
2427
2428 return err
2428 return err
2429
2429
2430 def commit(ui, repo, commitfunc, pats, opts):
2430 def commit(ui, repo, commitfunc, pats, opts):
2431 '''commit the specified files or all outstanding changes'''
2431 '''commit the specified files or all outstanding changes'''
2432 date = opts.get('date')
2432 date = opts.get('date')
2433 if date:
2433 if date:
2434 opts['date'] = util.parsedate(date)
2434 opts['date'] = util.parsedate(date)
2435 message = logmessage(ui, opts)
2435 message = logmessage(ui, opts)
2436 matcher = scmutil.match(repo[None], pats, opts)
2436 matcher = scmutil.match(repo[None], pats, opts)
2437
2437
2438 # extract addremove carefully -- this function can be called from a command
2438 # extract addremove carefully -- this function can be called from a command
2439 # that doesn't support addremove
2439 # that doesn't support addremove
2440 if opts.get('addremove'):
2440 if opts.get('addremove'):
2441 if scmutil.addremove(repo, matcher, "", opts) != 0:
2441 if scmutil.addremove(repo, matcher, "", opts) != 0:
2442 raise util.Abort(
2442 raise util.Abort(
2443 _("failed to mark all new/missing files as added/removed"))
2443 _("failed to mark all new/missing files as added/removed"))
2444
2444
2445 return commitfunc(ui, repo, message, matcher, opts)
2445 return commitfunc(ui, repo, message, matcher, opts)
2446
2446
2447 def amend(ui, repo, commitfunc, old, extra, pats, opts):
2447 def amend(ui, repo, commitfunc, old, extra, pats, opts):
2448 # amend will reuse the existing user if not specified, but the obsolete
2448 # amend will reuse the existing user if not specified, but the obsolete
2449 # marker creation requires that the current user's name is specified.
2449 # marker creation requires that the current user's name is specified.
2450 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2450 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2451 ui.username() # raise exception if username not set
2451 ui.username() # raise exception if username not set
2452
2452
2453 ui.note(_('amending changeset %s\n') % old)
2453 ui.note(_('amending changeset %s\n') % old)
2454 base = old.p1()
2454 base = old.p1()
2455
2455
2456 wlock = lock = newid = None
2456 wlock = lock = newid = None
2457 try:
2457 try:
2458 wlock = repo.wlock()
2458 wlock = repo.wlock()
2459 lock = repo.lock()
2459 lock = repo.lock()
2460 tr = repo.transaction('amend')
2460 tr = repo.transaction('amend')
2461 try:
2461 try:
2462 # See if we got a message from -m or -l, if not, open the editor
2462 # See if we got a message from -m or -l, if not, open the editor
2463 # with the message of the changeset to amend
2463 # with the message of the changeset to amend
2464 message = logmessage(ui, opts)
2464 message = logmessage(ui, opts)
2465 # ensure logfile does not conflict with later enforcement of the
2465 # ensure logfile does not conflict with later enforcement of the
2466 # message. potential logfile content has been processed by
2466 # message. potential logfile content has been processed by
2467 # `logmessage` anyway.
2467 # `logmessage` anyway.
2468 opts.pop('logfile')
2468 opts.pop('logfile')
2469 # First, do a regular commit to record all changes in the working
2469 # First, do a regular commit to record all changes in the working
2470 # directory (if there are any)
2470 # directory (if there are any)
2471 ui.callhooks = False
2471 ui.callhooks = False
2472 currentbookmark = repo._bookmarkcurrent
2472 currentbookmark = repo._bookmarkcurrent
2473 try:
2473 try:
2474 repo._bookmarkcurrent = None
2474 repo._bookmarkcurrent = None
2475 opts['message'] = 'temporary amend commit for %s' % old
2475 opts['message'] = 'temporary amend commit for %s' % old
2476 node = commit(ui, repo, commitfunc, pats, opts)
2476 node = commit(ui, repo, commitfunc, pats, opts)
2477 finally:
2477 finally:
2478 repo._bookmarkcurrent = currentbookmark
2478 repo._bookmarkcurrent = currentbookmark
2479 ui.callhooks = True
2479 ui.callhooks = True
2480 ctx = repo[node]
2480 ctx = repo[node]
2481
2481
2482 # Participating changesets:
2482 # Participating changesets:
2483 #
2483 #
2484 # node/ctx o - new (intermediate) commit that contains changes
2484 # node/ctx o - new (intermediate) commit that contains changes
2485 # | from working dir to go into amending commit
2485 # | from working dir to go into amending commit
2486 # | (or a workingctx if there were no changes)
2486 # | (or a workingctx if there were no changes)
2487 # |
2487 # |
2488 # old o - changeset to amend
2488 # old o - changeset to amend
2489 # |
2489 # |
2490 # base o - parent of amending changeset
2490 # base o - parent of amending changeset
2491
2491
2492 # Update extra dict from amended commit (e.g. to preserve graft
2492 # Update extra dict from amended commit (e.g. to preserve graft
2493 # source)
2493 # source)
2494 extra.update(old.extra())
2494 extra.update(old.extra())
2495
2495
2496 # Also update it from the intermediate commit or from the wctx
2496 # Also update it from the intermediate commit or from the wctx
2497 extra.update(ctx.extra())
2497 extra.update(ctx.extra())
2498
2498
2499 if len(old.parents()) > 1:
2499 if len(old.parents()) > 1:
2500 # ctx.files() isn't reliable for merges, so fall back to the
2500 # ctx.files() isn't reliable for merges, so fall back to the
2501 # slower repo.status() method
2501 # slower repo.status() method
2502 files = set([fn for st in repo.status(base, old)[:3]
2502 files = set([fn for st in repo.status(base, old)[:3]
2503 for fn in st])
2503 for fn in st])
2504 else:
2504 else:
2505 files = set(old.files())
2505 files = set(old.files())
2506
2506
2507 # Second, we use either the commit we just did, or if there were no
2507 # Second, we use either the commit we just did, or if there were no
2508 # changes the parent of the working directory as the version of the
2508 # changes the parent of the working directory as the version of the
2509 # files in the final amend commit
2509 # files in the final amend commit
2510 if node:
2510 if node:
2511 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
2511 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
2512
2512
2513 user = ctx.user()
2513 user = ctx.user()
2514 date = ctx.date()
2514 date = ctx.date()
2515 # Recompute copies (avoid recording a -> b -> a)
2515 # Recompute copies (avoid recording a -> b -> a)
2516 copied = copies.pathcopies(base, ctx)
2516 copied = copies.pathcopies(base, ctx)
2517 if old.p2:
2517 if old.p2:
2518 copied.update(copies.pathcopies(old.p2(), ctx))
2518 copied.update(copies.pathcopies(old.p2(), ctx))
2519
2519
2520 # Prune files which were reverted by the updates: if old
2520 # Prune files which were reverted by the updates: if old
2521 # introduced file X and our intermediate commit, node,
2521 # introduced file X and our intermediate commit, node,
2522 # renamed that file, then those two files are the same and
2522 # renamed that file, then those two files are the same and
2523 # we can discard X from our list of files. Likewise if X
2523 # we can discard X from our list of files. Likewise if X
2524 # was deleted, it's no longer relevant
2524 # was deleted, it's no longer relevant
2525 files.update(ctx.files())
2525 files.update(ctx.files())
2526
2526
2527 def samefile(f):
2527 def samefile(f):
2528 if f in ctx.manifest():
2528 if f in ctx.manifest():
2529 a = ctx.filectx(f)
2529 a = ctx.filectx(f)
2530 if f in base.manifest():
2530 if f in base.manifest():
2531 b = base.filectx(f)
2531 b = base.filectx(f)
2532 return (not a.cmp(b)
2532 return (not a.cmp(b)
2533 and a.flags() == b.flags())
2533 and a.flags() == b.flags())
2534 else:
2534 else:
2535 return False
2535 return False
2536 else:
2536 else:
2537 return f not in base.manifest()
2537 return f not in base.manifest()
2538 files = [f for f in files if not samefile(f)]
2538 files = [f for f in files if not samefile(f)]
2539
2539
2540 def filectxfn(repo, ctx_, path):
2540 def filectxfn(repo, ctx_, path):
2541 try:
2541 try:
2542 fctx = ctx[path]
2542 fctx = ctx[path]
2543 flags = fctx.flags()
2543 flags = fctx.flags()
2544 mctx = context.memfilectx(repo,
2544 mctx = context.memfilectx(repo,
2545 fctx.path(), fctx.data(),
2545 fctx.path(), fctx.data(),
2546 islink='l' in flags,
2546 islink='l' in flags,
2547 isexec='x' in flags,
2547 isexec='x' in flags,
2548 copied=copied.get(path))
2548 copied=copied.get(path))
2549 return mctx
2549 return mctx
2550 except KeyError:
2550 except KeyError:
2551 return None
2551 return None
2552 else:
2552 else:
2553 ui.note(_('copying changeset %s to %s\n') % (old, base))
2553 ui.note(_('copying changeset %s to %s\n') % (old, base))
2554
2554
2555 # Use version of files as in the old cset
2555 # Use version of files as in the old cset
2556 def filectxfn(repo, ctx_, path):
2556 def filectxfn(repo, ctx_, path):
2557 try:
2557 try:
2558 return old.filectx(path)
2558 return old.filectx(path)
2559 except KeyError:
2559 except KeyError:
2560 return None
2560 return None
2561
2561
2562 user = opts.get('user') or old.user()
2562 user = opts.get('user') or old.user()
2563 date = opts.get('date') or old.date()
2563 date = opts.get('date') or old.date()
2564 editform = mergeeditform(old, 'commit.amend')
2564 editform = mergeeditform(old, 'commit.amend')
2565 editor = getcommiteditor(editform=editform, **opts)
2565 editor = getcommiteditor(editform=editform, **opts)
2566 if not message:
2566 if not message:
2567 editor = getcommiteditor(edit=True, editform=editform)
2567 editor = getcommiteditor(edit=True, editform=editform)
2568 message = old.description()
2568 message = old.description()
2569
2569
2570 pureextra = extra.copy()
2570 pureextra = extra.copy()
2571 extra['amend_source'] = old.hex()
2571 extra['amend_source'] = old.hex()
2572
2572
2573 new = context.memctx(repo,
2573 new = context.memctx(repo,
2574 parents=[base.node(), old.p2().node()],
2574 parents=[base.node(), old.p2().node()],
2575 text=message,
2575 text=message,
2576 files=files,
2576 files=files,
2577 filectxfn=filectxfn,
2577 filectxfn=filectxfn,
2578 user=user,
2578 user=user,
2579 date=date,
2579 date=date,
2580 extra=extra,
2580 extra=extra,
2581 editor=editor)
2581 editor=editor)
2582
2582
2583 newdesc = changelog.stripdesc(new.description())
2583 newdesc = changelog.stripdesc(new.description())
2584 if ((not node)
2584 if ((not node)
2585 and newdesc == old.description()
2585 and newdesc == old.description()
2586 and user == old.user()
2586 and user == old.user()
2587 and date == old.date()
2587 and date == old.date()
2588 and pureextra == old.extra()):
2588 and pureextra == old.extra()):
2589 # nothing changed. continuing here would create a new node
2589 # nothing changed. continuing here would create a new node
2590 # anyway because of the amend_source noise.
2590 # anyway because of the amend_source noise.
2591 #
2591 #
2592 # This not what we expect from amend.
2592 # This not what we expect from amend.
2593 return old.node()
2593 return old.node()
2594
2594
2595 ph = repo.ui.config('phases', 'new-commit', phases.draft)
2595 ph = repo.ui.config('phases', 'new-commit', phases.draft)
2596 try:
2596 try:
2597 if opts.get('secret'):
2597 if opts.get('secret'):
2598 commitphase = 'secret'
2598 commitphase = 'secret'
2599 else:
2599 else:
2600 commitphase = old.phase()
2600 commitphase = old.phase()
2601 repo.ui.setconfig('phases', 'new-commit', commitphase, 'amend')
2601 repo.ui.setconfig('phases', 'new-commit', commitphase, 'amend')
2602 newid = repo.commitctx(new)
2602 newid = repo.commitctx(new)
2603 finally:
2603 finally:
2604 repo.ui.setconfig('phases', 'new-commit', ph, 'amend')
2604 repo.ui.setconfig('phases', 'new-commit', ph, 'amend')
2605 if newid != old.node():
2605 if newid != old.node():
2606 # Reroute the working copy parent to the new changeset
2606 # Reroute the working copy parent to the new changeset
2607 repo.setparents(newid, nullid)
2607 repo.setparents(newid, nullid)
2608
2608
2609 # Move bookmarks from old parent to amend commit
2609 # Move bookmarks from old parent to amend commit
2610 bms = repo.nodebookmarks(old.node())
2610 bms = repo.nodebookmarks(old.node())
2611 if bms:
2611 if bms:
2612 marks = repo._bookmarks
2612 marks = repo._bookmarks
2613 for bm in bms:
2613 for bm in bms:
2614 marks[bm] = newid
2614 marks[bm] = newid
2615 marks.write()
2615 marks.write()
2616 #commit the whole amend process
2616 #commit the whole amend process
2617 createmarkers = obsolete.isenabled(repo, obsolete.createmarkersopt)
2617 createmarkers = obsolete.isenabled(repo, obsolete.createmarkersopt)
2618 if createmarkers and newid != old.node():
2618 if createmarkers and newid != old.node():
2619 # mark the new changeset as successor of the rewritten one
2619 # mark the new changeset as successor of the rewritten one
2620 new = repo[newid]
2620 new = repo[newid]
2621 obs = [(old, (new,))]
2621 obs = [(old, (new,))]
2622 if node:
2622 if node:
2623 obs.append((ctx, ()))
2623 obs.append((ctx, ()))
2624
2624
2625 obsolete.createmarkers(repo, obs)
2625 obsolete.createmarkers(repo, obs)
2626 tr.close()
2626 tr.close()
2627 finally:
2627 finally:
2628 tr.release()
2628 tr.release()
2629 if not createmarkers and newid != old.node():
2629 if not createmarkers and newid != old.node():
2630 # Strip the intermediate commit (if there was one) and the amended
2630 # Strip the intermediate commit (if there was one) and the amended
2631 # commit
2631 # commit
2632 if node:
2632 if node:
2633 ui.note(_('stripping intermediate changeset %s\n') % ctx)
2633 ui.note(_('stripping intermediate changeset %s\n') % ctx)
2634 ui.note(_('stripping amended changeset %s\n') % old)
2634 ui.note(_('stripping amended changeset %s\n') % old)
2635 repair.strip(ui, repo, old.node(), topic='amend-backup')
2635 repair.strip(ui, repo, old.node(), topic='amend-backup')
2636 finally:
2636 finally:
2637 if newid is None:
2637 if newid is None:
2638 repo.dirstate.invalidate()
2638 repo.dirstate.invalidate()
2639 lockmod.release(lock, wlock)
2639 lockmod.release(lock, wlock)
2640 return newid
2640 return newid
2641
2641
2642 def commiteditor(repo, ctx, subs, editform=''):
2642 def commiteditor(repo, ctx, subs, editform=''):
2643 if ctx.description():
2643 if ctx.description():
2644 return ctx.description()
2644 return ctx.description()
2645 return commitforceeditor(repo, ctx, subs, editform=editform)
2645 return commitforceeditor(repo, ctx, subs, editform=editform)
2646
2646
2647 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
2647 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
2648 editform=''):
2648 editform=''):
2649 if not extramsg:
2649 if not extramsg:
2650 extramsg = _("Leave message empty to abort commit.")
2650 extramsg = _("Leave message empty to abort commit.")
2651
2651
2652 forms = [e for e in editform.split('.') if e]
2652 forms = [e for e in editform.split('.') if e]
2653 forms.insert(0, 'changeset')
2653 forms.insert(0, 'changeset')
2654 while forms:
2654 while forms:
2655 tmpl = repo.ui.config('committemplate', '.'.join(forms))
2655 tmpl = repo.ui.config('committemplate', '.'.join(forms))
2656 if tmpl:
2656 if tmpl:
2657 committext = buildcommittemplate(repo, ctx, subs, extramsg, tmpl)
2657 committext = buildcommittemplate(repo, ctx, subs, extramsg, tmpl)
2658 break
2658 break
2659 forms.pop()
2659 forms.pop()
2660 else:
2660 else:
2661 committext = buildcommittext(repo, ctx, subs, extramsg)
2661 committext = buildcommittext(repo, ctx, subs, extramsg)
2662
2662
2663 # run editor in the repository root
2663 # run editor in the repository root
2664 olddir = os.getcwd()
2664 olddir = os.getcwd()
2665 os.chdir(repo.root)
2665 os.chdir(repo.root)
2666 text = repo.ui.edit(committext, ctx.user(), ctx.extra(), editform=editform)
2666 text = repo.ui.edit(committext, ctx.user(), ctx.extra(), editform=editform)
2667 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
2667 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
2668 os.chdir(olddir)
2668 os.chdir(olddir)
2669
2669
2670 if finishdesc:
2670 if finishdesc:
2671 text = finishdesc(text)
2671 text = finishdesc(text)
2672 if not text.strip():
2672 if not text.strip():
2673 raise util.Abort(_("empty commit message"))
2673 raise util.Abort(_("empty commit message"))
2674
2674
2675 return text
2675 return text
2676
2676
2677 def buildcommittemplate(repo, ctx, subs, extramsg, tmpl):
2677 def buildcommittemplate(repo, ctx, subs, extramsg, tmpl):
2678 ui = repo.ui
2678 ui = repo.ui
2679 tmpl, mapfile = gettemplate(ui, tmpl, None)
2679 tmpl, mapfile = gettemplate(ui, tmpl, None)
2680
2680
2681 try:
2681 try:
2682 t = changeset_templater(ui, repo, None, {}, tmpl, mapfile, False)
2682 t = changeset_templater(ui, repo, None, {}, tmpl, mapfile, False)
2683 except SyntaxError, inst:
2683 except SyntaxError, inst:
2684 raise util.Abort(inst.args[0])
2684 raise util.Abort(inst.args[0])
2685
2685
2686 for k, v in repo.ui.configitems('committemplate'):
2686 for k, v in repo.ui.configitems('committemplate'):
2687 if k != 'changeset':
2687 if k != 'changeset':
2688 t.t.cache[k] = v
2688 t.t.cache[k] = v
2689
2689
2690 if not extramsg:
2690 if not extramsg:
2691 extramsg = '' # ensure that extramsg is string
2691 extramsg = '' # ensure that extramsg is string
2692
2692
2693 ui.pushbuffer()
2693 ui.pushbuffer()
2694 t.show(ctx, extramsg=extramsg)
2694 t.show(ctx, extramsg=extramsg)
2695 return ui.popbuffer()
2695 return ui.popbuffer()
2696
2696
2697 def buildcommittext(repo, ctx, subs, extramsg):
2697 def buildcommittext(repo, ctx, subs, extramsg):
2698 edittext = []
2698 edittext = []
2699 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
2699 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
2700 if ctx.description():
2700 if ctx.description():
2701 edittext.append(ctx.description())
2701 edittext.append(ctx.description())
2702 edittext.append("")
2702 edittext.append("")
2703 edittext.append("") # Empty line between message and comments.
2703 edittext.append("") # Empty line between message and comments.
2704 edittext.append(_("HG: Enter commit message."
2704 edittext.append(_("HG: Enter commit message."
2705 " Lines beginning with 'HG:' are removed."))
2705 " Lines beginning with 'HG:' are removed."))
2706 edittext.append("HG: %s" % extramsg)
2706 edittext.append("HG: %s" % extramsg)
2707 edittext.append("HG: --")
2707 edittext.append("HG: --")
2708 edittext.append(_("HG: user: %s") % ctx.user())
2708 edittext.append(_("HG: user: %s") % ctx.user())
2709 if ctx.p2():
2709 if ctx.p2():
2710 edittext.append(_("HG: branch merge"))
2710 edittext.append(_("HG: branch merge"))
2711 if ctx.branch():
2711 if ctx.branch():
2712 edittext.append(_("HG: branch '%s'") % ctx.branch())
2712 edittext.append(_("HG: branch '%s'") % ctx.branch())
2713 if bookmarks.iscurrent(repo):
2713 if bookmarks.iscurrent(repo):
2714 edittext.append(_("HG: bookmark '%s'") % repo._bookmarkcurrent)
2714 edittext.append(_("HG: bookmark '%s'") % repo._bookmarkcurrent)
2715 edittext.extend([_("HG: subrepo %s") % s for s in subs])
2715 edittext.extend([_("HG: subrepo %s") % s for s in subs])
2716 edittext.extend([_("HG: added %s") % f for f in added])
2716 edittext.extend([_("HG: added %s") % f for f in added])
2717 edittext.extend([_("HG: changed %s") % f for f in modified])
2717 edittext.extend([_("HG: changed %s") % f for f in modified])
2718 edittext.extend([_("HG: removed %s") % f for f in removed])
2718 edittext.extend([_("HG: removed %s") % f for f in removed])
2719 if not added and not modified and not removed:
2719 if not added and not modified and not removed:
2720 edittext.append(_("HG: no files changed"))
2720 edittext.append(_("HG: no files changed"))
2721 edittext.append("")
2721 edittext.append("")
2722
2722
2723 return "\n".join(edittext)
2723 return "\n".join(edittext)
2724
2724
2725 def commitstatus(repo, node, branch, bheads=None, opts={}):
2725 def commitstatus(repo, node, branch, bheads=None, opts={}):
2726 ctx = repo[node]
2726 ctx = repo[node]
2727 parents = ctx.parents()
2727 parents = ctx.parents()
2728
2728
2729 if (not opts.get('amend') and bheads and node not in bheads and not
2729 if (not opts.get('amend') and bheads and node not in bheads and not
2730 [x for x in parents if x.node() in bheads and x.branch() == branch]):
2730 [x for x in parents if x.node() in bheads and x.branch() == branch]):
2731 repo.ui.status(_('created new head\n'))
2731 repo.ui.status(_('created new head\n'))
2732 # The message is not printed for initial roots. For the other
2732 # The message is not printed for initial roots. For the other
2733 # changesets, it is printed in the following situations:
2733 # changesets, it is printed in the following situations:
2734 #
2734 #
2735 # Par column: for the 2 parents with ...
2735 # Par column: for the 2 parents with ...
2736 # N: null or no parent
2736 # N: null or no parent
2737 # B: parent is on another named branch
2737 # B: parent is on another named branch
2738 # C: parent is a regular non head changeset
2738 # C: parent is a regular non head changeset
2739 # H: parent was a branch head of the current branch
2739 # H: parent was a branch head of the current branch
2740 # Msg column: whether we print "created new head" message
2740 # Msg column: whether we print "created new head" message
2741 # In the following, it is assumed that there already exists some
2741 # In the following, it is assumed that there already exists some
2742 # initial branch heads of the current branch, otherwise nothing is
2742 # initial branch heads of the current branch, otherwise nothing is
2743 # printed anyway.
2743 # printed anyway.
2744 #
2744 #
2745 # Par Msg Comment
2745 # Par Msg Comment
2746 # N N y additional topo root
2746 # N N y additional topo root
2747 #
2747 #
2748 # B N y additional branch root
2748 # B N y additional branch root
2749 # C N y additional topo head
2749 # C N y additional topo head
2750 # H N n usual case
2750 # H N n usual case
2751 #
2751 #
2752 # B B y weird additional branch root
2752 # B B y weird additional branch root
2753 # C B y branch merge
2753 # C B y branch merge
2754 # H B n merge with named branch
2754 # H B n merge with named branch
2755 #
2755 #
2756 # C C y additional head from merge
2756 # C C y additional head from merge
2757 # C H n merge with a head
2757 # C H n merge with a head
2758 #
2758 #
2759 # H H n head merge: head count decreases
2759 # H H n head merge: head count decreases
2760
2760
2761 if not opts.get('close_branch'):
2761 if not opts.get('close_branch'):
2762 for r in parents:
2762 for r in parents:
2763 if r.closesbranch() and r.branch() == branch:
2763 if r.closesbranch() and r.branch() == branch:
2764 repo.ui.status(_('reopening closed branch head %d\n') % r)
2764 repo.ui.status(_('reopening closed branch head %d\n') % r)
2765
2765
2766 if repo.ui.debugflag:
2766 if repo.ui.debugflag:
2767 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
2767 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
2768 elif repo.ui.verbose:
2768 elif repo.ui.verbose:
2769 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
2769 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
2770
2770
2771 def revert(ui, repo, ctx, parents, *pats, **opts):
2771 def revert(ui, repo, ctx, parents, *pats, **opts):
2772 parent, p2 = parents
2772 parent, p2 = parents
2773 node = ctx.node()
2773 node = ctx.node()
2774
2774
2775 mf = ctx.manifest()
2775 mf = ctx.manifest()
2776 if node == p2:
2776 if node == p2:
2777 parent = p2
2777 parent = p2
2778 if node == parent:
2778 if node == parent:
2779 pmf = mf
2779 pmf = mf
2780 else:
2780 else:
2781 pmf = None
2781 pmf = None
2782
2782
2783 # need all matching names in dirstate and manifest of target rev,
2783 # need all matching names in dirstate and manifest of target rev,
2784 # so have to walk both. do not print errors if files exist in one
2784 # so have to walk both. do not print errors if files exist in one
2785 # but not other. in both cases, filesets should be evaluated against
2785 # but not other. in both cases, filesets should be evaluated against
2786 # workingctx to get consistent result (issue4497). this means 'set:**'
2786 # workingctx to get consistent result (issue4497). this means 'set:**'
2787 # cannot be used to select missing files from target rev.
2787 # cannot be used to select missing files from target rev.
2788
2788
2789 # `names` is a mapping for all elements in working copy and target revision
2789 # `names` is a mapping for all elements in working copy and target revision
2790 # The mapping is in the form:
2790 # The mapping is in the form:
2791 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
2791 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
2792 names = {}
2792 names = {}
2793
2793
2794 wlock = repo.wlock()
2794 wlock = repo.wlock()
2795 try:
2795 try:
2796 ## filling of the `names` mapping
2796 ## filling of the `names` mapping
2797 # walk dirstate to fill `names`
2797 # walk dirstate to fill `names`
2798
2798
2799 interactive = opts.get('interactive', False)
2799 interactive = opts.get('interactive', False)
2800 wctx = repo[None]
2800 wctx = repo[None]
2801 m = scmutil.match(wctx, pats, opts)
2801 m = scmutil.match(wctx, pats, opts)
2802
2802
2803 # we'll need this later
2803 # we'll need this later
2804 targetsubs = sorted(s for s in wctx.substate if m(s))
2804 targetsubs = sorted(s for s in wctx.substate if m(s))
2805
2805
2806 if not m.always():
2806 if not m.always():
2807 m.bad = lambda x, y: False
2807 m.bad = lambda x, y: False
2808 for abs in repo.walk(m):
2808 for abs in repo.walk(m):
2809 names[abs] = m.rel(abs), m.exact(abs)
2809 names[abs] = m.rel(abs), m.exact(abs)
2810
2810
2811 # walk target manifest to fill `names`
2811 # walk target manifest to fill `names`
2812
2812
2813 def badfn(path, msg):
2813 def badfn(path, msg):
2814 if path in names:
2814 if path in names:
2815 return
2815 return
2816 if path in ctx.substate:
2816 if path in ctx.substate:
2817 return
2817 return
2818 path_ = path + '/'
2818 path_ = path + '/'
2819 for f in names:
2819 for f in names:
2820 if f.startswith(path_):
2820 if f.startswith(path_):
2821 return
2821 return
2822 ui.warn("%s: %s\n" % (m.rel(path), msg))
2822 ui.warn("%s: %s\n" % (m.rel(path), msg))
2823
2823
2824 m.bad = badfn
2824 m.bad = badfn
2825 for abs in ctx.walk(m):
2825 for abs in ctx.walk(m):
2826 if abs not in names:
2826 if abs not in names:
2827 names[abs] = m.rel(abs), m.exact(abs)
2827 names[abs] = m.rel(abs), m.exact(abs)
2828
2828
2829 # Find status of all file in `names`.
2829 # Find status of all file in `names`.
2830 m = scmutil.matchfiles(repo, names)
2830 m = scmutil.matchfiles(repo, names)
2831
2831
2832 changes = repo.status(node1=node, match=m,
2832 changes = repo.status(node1=node, match=m,
2833 unknown=True, ignored=True, clean=True)
2833 unknown=True, ignored=True, clean=True)
2834 else:
2834 else:
2835 changes = repo.status(node1=node, match=m)
2835 changes = repo.status(node1=node, match=m)
2836 for kind in changes:
2836 for kind in changes:
2837 for abs in kind:
2837 for abs in kind:
2838 names[abs] = m.rel(abs), m.exact(abs)
2838 names[abs] = m.rel(abs), m.exact(abs)
2839
2839
2840 m = scmutil.matchfiles(repo, names)
2840 m = scmutil.matchfiles(repo, names)
2841
2841
2842 modified = set(changes.modified)
2842 modified = set(changes.modified)
2843 added = set(changes.added)
2843 added = set(changes.added)
2844 removed = set(changes.removed)
2844 removed = set(changes.removed)
2845 _deleted = set(changes.deleted)
2845 _deleted = set(changes.deleted)
2846 unknown = set(changes.unknown)
2846 unknown = set(changes.unknown)
2847 unknown.update(changes.ignored)
2847 unknown.update(changes.ignored)
2848 clean = set(changes.clean)
2848 clean = set(changes.clean)
2849 modadded = set()
2849 modadded = set()
2850
2850
2851 # split between files known in target manifest and the others
2851 # split between files known in target manifest and the others
2852 smf = set(mf)
2852 smf = set(mf)
2853
2853
2854 # determine the exact nature of the deleted changesets
2854 # determine the exact nature of the deleted changesets
2855 deladded = _deleted - smf
2855 deladded = _deleted - smf
2856 deleted = _deleted - deladded
2856 deleted = _deleted - deladded
2857
2857
2858 # We need to account for the state of the file in the dirstate,
2858 # We need to account for the state of the file in the dirstate,
2859 # even when we revert against something else than parent. This will
2859 # even when we revert against something else than parent. This will
2860 # slightly alter the behavior of revert (doing back up or not, delete
2860 # slightly alter the behavior of revert (doing back up or not, delete
2861 # or just forget etc).
2861 # or just forget etc).
2862 if parent == node:
2862 if parent == node:
2863 dsmodified = modified
2863 dsmodified = modified
2864 dsadded = added
2864 dsadded = added
2865 dsremoved = removed
2865 dsremoved = removed
2866 # store all local modifications, useful later for rename detection
2866 # store all local modifications, useful later for rename detection
2867 localchanges = dsmodified | dsadded
2867 localchanges = dsmodified | dsadded
2868 modified, added, removed = set(), set(), set()
2868 modified, added, removed = set(), set(), set()
2869 else:
2869 else:
2870 changes = repo.status(node1=parent, match=m)
2870 changes = repo.status(node1=parent, match=m)
2871 dsmodified = set(changes.modified)
2871 dsmodified = set(changes.modified)
2872 dsadded = set(changes.added)
2872 dsadded = set(changes.added)
2873 dsremoved = set(changes.removed)
2873 dsremoved = set(changes.removed)
2874 # store all local modifications, useful later for rename detection
2874 # store all local modifications, useful later for rename detection
2875 localchanges = dsmodified | dsadded
2875 localchanges = dsmodified | dsadded
2876
2876
2877 # only take into account for removes between wc and target
2877 # only take into account for removes between wc and target
2878 clean |= dsremoved - removed
2878 clean |= dsremoved - removed
2879 dsremoved &= removed
2879 dsremoved &= removed
2880 # distinct between dirstate remove and other
2880 # distinct between dirstate remove and other
2881 removed -= dsremoved
2881 removed -= dsremoved
2882
2882
2883 modadded = added & dsmodified
2883 modadded = added & dsmodified
2884 added -= modadded
2884 added -= modadded
2885
2885
2886 # tell newly modified apart.
2886 # tell newly modified apart.
2887 dsmodified &= modified
2887 dsmodified &= modified
2888 dsmodified |= modified & dsadded # dirstate added may needs backup
2888 dsmodified |= modified & dsadded # dirstate added may needs backup
2889 modified -= dsmodified
2889 modified -= dsmodified
2890
2890
2891 # We need to wait for some post-processing to update this set
2891 # We need to wait for some post-processing to update this set
2892 # before making the distinction. The dirstate will be used for
2892 # before making the distinction. The dirstate will be used for
2893 # that purpose.
2893 # that purpose.
2894 dsadded = added
2894 dsadded = added
2895
2895
2896 # in case of merge, files that are actually added can be reported as
2896 # in case of merge, files that are actually added can be reported as
2897 # modified, we need to post process the result
2897 # modified, we need to post process the result
2898 if p2 != nullid:
2898 if p2 != nullid:
2899 if pmf is None:
2899 if pmf is None:
2900 # only need parent manifest in the merge case,
2900 # only need parent manifest in the merge case,
2901 # so do not read by default
2901 # so do not read by default
2902 pmf = repo[parent].manifest()
2902 pmf = repo[parent].manifest()
2903 mergeadd = dsmodified - set(pmf)
2903 mergeadd = dsmodified - set(pmf)
2904 dsadded |= mergeadd
2904 dsadded |= mergeadd
2905 dsmodified -= mergeadd
2905 dsmodified -= mergeadd
2906
2906
2907 # if f is a rename, update `names` to also revert the source
2907 # if f is a rename, update `names` to also revert the source
2908 cwd = repo.getcwd()
2908 cwd = repo.getcwd()
2909 for f in localchanges:
2909 for f in localchanges:
2910 src = repo.dirstate.copied(f)
2910 src = repo.dirstate.copied(f)
2911 # XXX should we check for rename down to target node?
2911 # XXX should we check for rename down to target node?
2912 if src and src not in names and repo.dirstate[src] == 'r':
2912 if src and src not in names and repo.dirstate[src] == 'r':
2913 dsremoved.add(src)
2913 dsremoved.add(src)
2914 names[src] = (repo.pathto(src, cwd), True)
2914 names[src] = (repo.pathto(src, cwd), True)
2915
2915
2916 # distinguish between file to forget and the other
2916 # distinguish between file to forget and the other
2917 added = set()
2917 added = set()
2918 for abs in dsadded:
2918 for abs in dsadded:
2919 if repo.dirstate[abs] != 'a':
2919 if repo.dirstate[abs] != 'a':
2920 added.add(abs)
2920 added.add(abs)
2921 dsadded -= added
2921 dsadded -= added
2922
2922
2923 for abs in deladded:
2923 for abs in deladded:
2924 if repo.dirstate[abs] == 'a':
2924 if repo.dirstate[abs] == 'a':
2925 dsadded.add(abs)
2925 dsadded.add(abs)
2926 deladded -= dsadded
2926 deladded -= dsadded
2927
2927
2928 # For files marked as removed, we check if an unknown file is present at
2928 # For files marked as removed, we check if an unknown file is present at
2929 # the same path. If a such file exists it may need to be backed up.
2929 # the same path. If a such file exists it may need to be backed up.
2930 # Making the distinction at this stage helps have simpler backup
2930 # Making the distinction at this stage helps have simpler backup
2931 # logic.
2931 # logic.
2932 removunk = set()
2932 removunk = set()
2933 for abs in removed:
2933 for abs in removed:
2934 target = repo.wjoin(abs)
2934 target = repo.wjoin(abs)
2935 if os.path.lexists(target):
2935 if os.path.lexists(target):
2936 removunk.add(abs)
2936 removunk.add(abs)
2937 removed -= removunk
2937 removed -= removunk
2938
2938
2939 dsremovunk = set()
2939 dsremovunk = set()
2940 for abs in dsremoved:
2940 for abs in dsremoved:
2941 target = repo.wjoin(abs)
2941 target = repo.wjoin(abs)
2942 if os.path.lexists(target):
2942 if os.path.lexists(target):
2943 dsremovunk.add(abs)
2943 dsremovunk.add(abs)
2944 dsremoved -= dsremovunk
2944 dsremoved -= dsremovunk
2945
2945
2946 # action to be actually performed by revert
2946 # action to be actually performed by revert
2947 # (<list of file>, message>) tuple
2947 # (<list of file>, message>) tuple
2948 actions = {'revert': ([], _('reverting %s\n')),
2948 actions = {'revert': ([], _('reverting %s\n')),
2949 'add': ([], _('adding %s\n')),
2949 'add': ([], _('adding %s\n')),
2950 'remove': ([], _('removing %s\n')),
2950 'remove': ([], _('removing %s\n')),
2951 'drop': ([], _('removing %s\n')),
2951 'drop': ([], _('removing %s\n')),
2952 'forget': ([], _('forgetting %s\n')),
2952 'forget': ([], _('forgetting %s\n')),
2953 'undelete': ([], _('undeleting %s\n')),
2953 'undelete': ([], _('undeleting %s\n')),
2954 'noop': (None, _('no changes needed to %s\n')),
2954 'noop': (None, _('no changes needed to %s\n')),
2955 'unknown': (None, _('file not managed: %s\n')),
2955 'unknown': (None, _('file not managed: %s\n')),
2956 }
2956 }
2957
2957
2958 # "constant" that convey the backup strategy.
2958 # "constant" that convey the backup strategy.
2959 # All set to `discard` if `no-backup` is set do avoid checking
2959 # All set to `discard` if `no-backup` is set do avoid checking
2960 # no_backup lower in the code.
2960 # no_backup lower in the code.
2961 # These values are ordered for comparison purposes
2961 # These values are ordered for comparison purposes
2962 backup = 2 # unconditionally do backup
2962 backup = 2 # unconditionally do backup
2963 check = 1 # check if the existing file differs from target
2963 check = 1 # check if the existing file differs from target
2964 discard = 0 # never do backup
2964 discard = 0 # never do backup
2965 if opts.get('no_backup'):
2965 if opts.get('no_backup'):
2966 backup = check = discard
2966 backup = check = discard
2967
2967
2968 backupanddel = actions['remove']
2968 backupanddel = actions['remove']
2969 if not opts.get('no_backup'):
2969 if not opts.get('no_backup'):
2970 backupanddel = actions['drop']
2970 backupanddel = actions['drop']
2971
2971
2972 disptable = (
2972 disptable = (
2973 # dispatch table:
2973 # dispatch table:
2974 # file state
2974 # file state
2975 # action
2975 # action
2976 # make backup
2976 # make backup
2977
2977
2978 ## Sets that results that will change file on disk
2978 ## Sets that results that will change file on disk
2979 # Modified compared to target, no local change
2979 # Modified compared to target, no local change
2980 (modified, actions['revert'], discard),
2980 (modified, actions['revert'], discard),
2981 # Modified compared to target, but local file is deleted
2981 # Modified compared to target, but local file is deleted
2982 (deleted, actions['revert'], discard),
2982 (deleted, actions['revert'], discard),
2983 # Modified compared to target, local change
2983 # Modified compared to target, local change
2984 (dsmodified, actions['revert'], backup),
2984 (dsmodified, actions['revert'], backup),
2985 # Added since target
2985 # Added since target
2986 (added, actions['remove'], discard),
2986 (added, actions['remove'], discard),
2987 # Added in working directory
2987 # Added in working directory
2988 (dsadded, actions['forget'], discard),
2988 (dsadded, actions['forget'], discard),
2989 # Added since target, have local modification
2989 # Added since target, have local modification
2990 (modadded, backupanddel, backup),
2990 (modadded, backupanddel, backup),
2991 # Added since target but file is missing in working directory
2991 # Added since target but file is missing in working directory
2992 (deladded, actions['drop'], discard),
2992 (deladded, actions['drop'], discard),
2993 # Removed since target, before working copy parent
2993 # Removed since target, before working copy parent
2994 (removed, actions['add'], discard),
2994 (removed, actions['add'], discard),
2995 # Same as `removed` but an unknown file exists at the same path
2995 # Same as `removed` but an unknown file exists at the same path
2996 (removunk, actions['add'], check),
2996 (removunk, actions['add'], check),
2997 # Removed since targe, marked as such in working copy parent
2997 # Removed since targe, marked as such in working copy parent
2998 (dsremoved, actions['undelete'], discard),
2998 (dsremoved, actions['undelete'], discard),
2999 # Same as `dsremoved` but an unknown file exists at the same path
2999 # Same as `dsremoved` but an unknown file exists at the same path
3000 (dsremovunk, actions['undelete'], check),
3000 (dsremovunk, actions['undelete'], check),
3001 ## the following sets does not result in any file changes
3001 ## the following sets does not result in any file changes
3002 # File with no modification
3002 # File with no modification
3003 (clean, actions['noop'], discard),
3003 (clean, actions['noop'], discard),
3004 # Existing file, not tracked anywhere
3004 # Existing file, not tracked anywhere
3005 (unknown, actions['unknown'], discard),
3005 (unknown, actions['unknown'], discard),
3006 )
3006 )
3007
3007
3008 for abs, (rel, exact) in sorted(names.items()):
3008 for abs, (rel, exact) in sorted(names.items()):
3009 # target file to be touch on disk (relative to cwd)
3009 # target file to be touch on disk (relative to cwd)
3010 target = repo.wjoin(abs)
3010 target = repo.wjoin(abs)
3011 # search the entry in the dispatch table.
3011 # search the entry in the dispatch table.
3012 # if the file is in any of these sets, it was touched in the working
3012 # if the file is in any of these sets, it was touched in the working
3013 # directory parent and we are sure it needs to be reverted.
3013 # directory parent and we are sure it needs to be reverted.
3014 for table, (xlist, msg), dobackup in disptable:
3014 for table, (xlist, msg), dobackup in disptable:
3015 if abs not in table:
3015 if abs not in table:
3016 continue
3016 continue
3017 if xlist is not None:
3017 if xlist is not None:
3018 xlist.append(abs)
3018 xlist.append(abs)
3019 if dobackup and (backup <= dobackup
3019 if dobackup and (backup <= dobackup
3020 or wctx[abs].cmp(ctx[abs])):
3020 or wctx[abs].cmp(ctx[abs])):
3021 bakname = "%s.orig" % rel
3021 bakname = "%s.orig" % rel
3022 ui.note(_('saving current version of %s as %s\n') %
3022 ui.note(_('saving current version of %s as %s\n') %
3023 (rel, bakname))
3023 (rel, bakname))
3024 if not opts.get('dry_run'):
3024 if not opts.get('dry_run'):
3025 if interactive:
3025 if interactive:
3026 util.copyfile(target, bakname)
3026 util.copyfile(target, bakname)
3027 else:
3027 else:
3028 util.rename(target, bakname)
3028 util.rename(target, bakname)
3029 if ui.verbose or not exact:
3029 if ui.verbose or not exact:
3030 if not isinstance(msg, basestring):
3030 if not isinstance(msg, basestring):
3031 msg = msg(abs)
3031 msg = msg(abs)
3032 ui.status(msg % rel)
3032 ui.status(msg % rel)
3033 elif exact:
3033 elif exact:
3034 ui.warn(msg % rel)
3034 ui.warn(msg % rel)
3035 break
3035 break
3036
3036
3037 if not opts.get('dry_run'):
3037 if not opts.get('dry_run'):
3038 needdata = ('revert', 'add', 'undelete')
3038 needdata = ('revert', 'add', 'undelete')
3039 _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata])
3039 _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata])
3040 _performrevert(repo, parents, ctx, actions, interactive)
3040 _performrevert(repo, parents, ctx, actions, interactive)
3041
3041
3042 if targetsubs:
3042 if targetsubs:
3043 # Revert the subrepos on the revert list
3043 # Revert the subrepos on the revert list
3044 for sub in targetsubs:
3044 for sub in targetsubs:
3045 try:
3045 try:
3046 wctx.sub(sub).revert(ctx.substate[sub], *pats, **opts)
3046 wctx.sub(sub).revert(ctx.substate[sub], *pats, **opts)
3047 except KeyError:
3047 except KeyError:
3048 raise util.Abort("subrepository '%s' does not exist in %s!"
3048 raise util.Abort("subrepository '%s' does not exist in %s!"
3049 % (sub, short(ctx.node())))
3049 % (sub, short(ctx.node())))
3050 finally:
3050 finally:
3051 wlock.release()
3051 wlock.release()
3052
3052
3053 def _revertprefetch(repo, ctx, *files):
3053 def _revertprefetch(repo, ctx, *files):
3054 """Let extension changing the storage layer prefetch content"""
3054 """Let extension changing the storage layer prefetch content"""
3055 pass
3055 pass
3056
3056
3057 def _performrevert(repo, parents, ctx, actions, interactive=False):
3057 def _performrevert(repo, parents, ctx, actions, interactive=False):
3058 """function that actually perform all the actions computed for revert
3058 """function that actually perform all the actions computed for revert
3059
3059
3060 This is an independent function to let extension to plug in and react to
3060 This is an independent function to let extension to plug in and react to
3061 the imminent revert.
3061 the imminent revert.
3062
3062
3063 Make sure you have the working directory locked when calling this function.
3063 Make sure you have the working directory locked when calling this function.
3064 """
3064 """
3065 parent, p2 = parents
3065 parent, p2 = parents
3066 node = ctx.node()
3066 node = ctx.node()
3067 def checkout(f):
3067 def checkout(f):
3068 fc = ctx[f]
3068 fc = ctx[f]
3069 repo.wwrite(f, fc.data(), fc.flags())
3069 return repo.wwrite(f, fc.data(), fc.flags())
3070
3070
3071 audit_path = pathutil.pathauditor(repo.root)
3071 audit_path = pathutil.pathauditor(repo.root)
3072 for f in actions['forget'][0]:
3072 for f in actions['forget'][0]:
3073 repo.dirstate.drop(f)
3073 repo.dirstate.drop(f)
3074 for f in actions['remove'][0]:
3074 for f in actions['remove'][0]:
3075 audit_path(f)
3075 audit_path(f)
3076 util.unlinkpath(repo.wjoin(f))
3076 util.unlinkpath(repo.wjoin(f))
3077 repo.dirstate.remove(f)
3077 repo.dirstate.remove(f)
3078 for f in actions['drop'][0]:
3078 for f in actions['drop'][0]:
3079 audit_path(f)
3079 audit_path(f)
3080 repo.dirstate.remove(f)
3080 repo.dirstate.remove(f)
3081
3081
3082 normal = None
3082 normal = None
3083 if node == parent:
3083 if node == parent:
3084 # We're reverting to our parent. If possible, we'd like status
3084 # We're reverting to our parent. If possible, we'd like status
3085 # to report the file as clean. We have to use normallookup for
3085 # to report the file as clean. We have to use normallookup for
3086 # merges to avoid losing information about merged/dirty files.
3086 # merges to avoid losing information about merged/dirty files.
3087 if p2 != nullid:
3087 if p2 != nullid:
3088 normal = repo.dirstate.normallookup
3088 normal = repo.dirstate.normallookup
3089 else:
3089 else:
3090 normal = repo.dirstate.normal
3090 normal = repo.dirstate.normal
3091
3091
3092 if interactive:
3092 if interactive:
3093 # Prompt the user for changes to revert
3093 # Prompt the user for changes to revert
3094 torevert = [repo.wjoin(f) for f in actions['revert'][0]]
3094 torevert = [repo.wjoin(f) for f in actions['revert'][0]]
3095 m = scmutil.match(ctx, torevert, {})
3095 m = scmutil.match(ctx, torevert, {})
3096 diff = patch.diff(repo, None, ctx.node(), m)
3096 diff = patch.diff(repo, None, ctx.node(), m)
3097 originalchunks = patch.parsepatch(diff)
3097 originalchunks = patch.parsepatch(diff)
3098 try:
3098 try:
3099 chunks = recordfilter(repo.ui, originalchunks)
3099 chunks = recordfilter(repo.ui, originalchunks)
3100 except patch.PatchError, err:
3100 except patch.PatchError, err:
3101 raise util.Abort(_('error parsing patch: %s') % err)
3101 raise util.Abort(_('error parsing patch: %s') % err)
3102
3102
3103 # Apply changes
3103 # Apply changes
3104 fp = cStringIO.StringIO()
3104 fp = cStringIO.StringIO()
3105 for c in chunks:
3105 for c in chunks:
3106 c.write(fp)
3106 c.write(fp)
3107 dopatch = fp.tell()
3107 dopatch = fp.tell()
3108 fp.seek(0)
3108 fp.seek(0)
3109 if dopatch:
3109 if dopatch:
3110 try:
3110 try:
3111 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3111 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3112 except patch.PatchError, err:
3112 except patch.PatchError, err:
3113 raise util.Abort(str(err))
3113 raise util.Abort(str(err))
3114 del fp
3114 del fp
3115 else:
3115 else:
3116 for f in actions['revert'][0]:
3116 for f in actions['revert'][0]:
3117 checkout(f)
3117 wsize = checkout(f)
3118 if normal:
3118 if normal:
3119 normal(f)
3119 normal(f)
3120 elif wsize == repo.dirstate._map[f][2]:
3121 # changes may be overlooked without normallookup,
3122 # if size isn't changed at reverting
3123 repo.dirstate.normallookup(f)
3120
3124
3121 for f in actions['add'][0]:
3125 for f in actions['add'][0]:
3122 checkout(f)
3126 checkout(f)
3123 repo.dirstate.add(f)
3127 repo.dirstate.add(f)
3124
3128
3125 normal = repo.dirstate.normallookup
3129 normal = repo.dirstate.normallookup
3126 if node == parent and p2 == nullid:
3130 if node == parent and p2 == nullid:
3127 normal = repo.dirstate.normal
3131 normal = repo.dirstate.normal
3128 for f in actions['undelete'][0]:
3132 for f in actions['undelete'][0]:
3129 checkout(f)
3133 checkout(f)
3130 normal(f)
3134 normal(f)
3131
3135
3132 copied = copies.pathcopies(repo[parent], ctx)
3136 copied = copies.pathcopies(repo[parent], ctx)
3133
3137
3134 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3138 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3135 if f in copied:
3139 if f in copied:
3136 repo.dirstate.copy(copied[f], f)
3140 repo.dirstate.copy(copied[f], f)
3137
3141
3138 def command(table):
3142 def command(table):
3139 """Returns a function object to be used as a decorator for making commands.
3143 """Returns a function object to be used as a decorator for making commands.
3140
3144
3141 This function receives a command table as its argument. The table should
3145 This function receives a command table as its argument. The table should
3142 be a dict.
3146 be a dict.
3143
3147
3144 The returned function can be used as a decorator for adding commands
3148 The returned function can be used as a decorator for adding commands
3145 to that command table. This function accepts multiple arguments to define
3149 to that command table. This function accepts multiple arguments to define
3146 a command.
3150 a command.
3147
3151
3148 The first argument is the command name.
3152 The first argument is the command name.
3149
3153
3150 The options argument is an iterable of tuples defining command arguments.
3154 The options argument is an iterable of tuples defining command arguments.
3151 See ``mercurial.fancyopts.fancyopts()`` for the format of each tuple.
3155 See ``mercurial.fancyopts.fancyopts()`` for the format of each tuple.
3152
3156
3153 The synopsis argument defines a short, one line summary of how to use the
3157 The synopsis argument defines a short, one line summary of how to use the
3154 command. This shows up in the help output.
3158 command. This shows up in the help output.
3155
3159
3156 The norepo argument defines whether the command does not require a
3160 The norepo argument defines whether the command does not require a
3157 local repository. Most commands operate against a repository, thus the
3161 local repository. Most commands operate against a repository, thus the
3158 default is False.
3162 default is False.
3159
3163
3160 The optionalrepo argument defines whether the command optionally requires
3164 The optionalrepo argument defines whether the command optionally requires
3161 a local repository.
3165 a local repository.
3162
3166
3163 The inferrepo argument defines whether to try to find a repository from the
3167 The inferrepo argument defines whether to try to find a repository from the
3164 command line arguments. If True, arguments will be examined for potential
3168 command line arguments. If True, arguments will be examined for potential
3165 repository locations. See ``findrepo()``. If a repository is found, it
3169 repository locations. See ``findrepo()``. If a repository is found, it
3166 will be used.
3170 will be used.
3167 """
3171 """
3168 def cmd(name, options=(), synopsis=None, norepo=False, optionalrepo=False,
3172 def cmd(name, options=(), synopsis=None, norepo=False, optionalrepo=False,
3169 inferrepo=False):
3173 inferrepo=False):
3170 def decorator(func):
3174 def decorator(func):
3171 if synopsis:
3175 if synopsis:
3172 table[name] = func, list(options), synopsis
3176 table[name] = func, list(options), synopsis
3173 else:
3177 else:
3174 table[name] = func, list(options)
3178 table[name] = func, list(options)
3175
3179
3176 if norepo:
3180 if norepo:
3177 # Avoid import cycle.
3181 # Avoid import cycle.
3178 import commands
3182 import commands
3179 commands.norepo += ' %s' % ' '.join(parsealiases(name))
3183 commands.norepo += ' %s' % ' '.join(parsealiases(name))
3180
3184
3181 if optionalrepo:
3185 if optionalrepo:
3182 import commands
3186 import commands
3183 commands.optionalrepo += ' %s' % ' '.join(parsealiases(name))
3187 commands.optionalrepo += ' %s' % ' '.join(parsealiases(name))
3184
3188
3185 if inferrepo:
3189 if inferrepo:
3186 import commands
3190 import commands
3187 commands.inferrepo += ' %s' % ' '.join(parsealiases(name))
3191 commands.inferrepo += ' %s' % ' '.join(parsealiases(name))
3188
3192
3189 return func
3193 return func
3190 return decorator
3194 return decorator
3191
3195
3192 return cmd
3196 return cmd
3193
3197
3194 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3198 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3195 # commands.outgoing. "missing" is "missing" of the result of
3199 # commands.outgoing. "missing" is "missing" of the result of
3196 # "findcommonoutgoing()"
3200 # "findcommonoutgoing()"
3197 outgoinghooks = util.hooks()
3201 outgoinghooks = util.hooks()
3198
3202
3199 # a list of (ui, repo) functions called by commands.summary
3203 # a list of (ui, repo) functions called by commands.summary
3200 summaryhooks = util.hooks()
3204 summaryhooks = util.hooks()
3201
3205
3202 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3206 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3203 #
3207 #
3204 # functions should return tuple of booleans below, if 'changes' is None:
3208 # functions should return tuple of booleans below, if 'changes' is None:
3205 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3209 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3206 #
3210 #
3207 # otherwise, 'changes' is a tuple of tuples below:
3211 # otherwise, 'changes' is a tuple of tuples below:
3208 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3212 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3209 # - (desturl, destbranch, destpeer, outgoing)
3213 # - (desturl, destbranch, destpeer, outgoing)
3210 summaryremotehooks = util.hooks()
3214 summaryremotehooks = util.hooks()
3211
3215
3212 # A list of state files kept by multistep operations like graft.
3216 # A list of state files kept by multistep operations like graft.
3213 # Since graft cannot be aborted, it is considered 'clearable' by update.
3217 # Since graft cannot be aborted, it is considered 'clearable' by update.
3214 # note: bisect is intentionally excluded
3218 # note: bisect is intentionally excluded
3215 # (state file, clearable, allowcommit, error, hint)
3219 # (state file, clearable, allowcommit, error, hint)
3216 unfinishedstates = [
3220 unfinishedstates = [
3217 ('graftstate', True, False, _('graft in progress'),
3221 ('graftstate', True, False, _('graft in progress'),
3218 _("use 'hg graft --continue' or 'hg update' to abort")),
3222 _("use 'hg graft --continue' or 'hg update' to abort")),
3219 ('updatestate', True, False, _('last update was interrupted'),
3223 ('updatestate', True, False, _('last update was interrupted'),
3220 _("use 'hg update' to get a consistent checkout"))
3224 _("use 'hg update' to get a consistent checkout"))
3221 ]
3225 ]
3222
3226
3223 def checkunfinished(repo, commit=False):
3227 def checkunfinished(repo, commit=False):
3224 '''Look for an unfinished multistep operation, like graft, and abort
3228 '''Look for an unfinished multistep operation, like graft, and abort
3225 if found. It's probably good to check this right before
3229 if found. It's probably good to check this right before
3226 bailifchanged().
3230 bailifchanged().
3227 '''
3231 '''
3228 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3232 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3229 if commit and allowcommit:
3233 if commit and allowcommit:
3230 continue
3234 continue
3231 if repo.vfs.exists(f):
3235 if repo.vfs.exists(f):
3232 raise util.Abort(msg, hint=hint)
3236 raise util.Abort(msg, hint=hint)
3233
3237
3234 def clearunfinished(repo):
3238 def clearunfinished(repo):
3235 '''Check for unfinished operations (as above), and clear the ones
3239 '''Check for unfinished operations (as above), and clear the ones
3236 that are clearable.
3240 that are clearable.
3237 '''
3241 '''
3238 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3242 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3239 if not clearable and repo.vfs.exists(f):
3243 if not clearable and repo.vfs.exists(f):
3240 raise util.Abort(msg, hint=hint)
3244 raise util.Abort(msg, hint=hint)
3241 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3245 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3242 if clearable and repo.vfs.exists(f):
3246 if clearable and repo.vfs.exists(f):
3243 util.unlink(repo.join(f))
3247 util.unlink(repo.join(f))
@@ -1,1967 +1,1972 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import urllib
9 import urllib
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock as lockmod
12 import lock as lockmod
13 import transaction, store, encoding, exchange, bundle2
13 import transaction, store, encoding, exchange, bundle2
14 import scmutil, util, extensions, hook, error, revset
14 import scmutil, util, extensions, hook, error, revset
15 import match as matchmod
15 import match as matchmod
16 import merge as mergemod
16 import merge as mergemod
17 import tags as tagsmod
17 import tags as tagsmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 import branchmap, pathutil
20 import branchmap, pathutil
21 import namespaces
21 import namespaces
22 propertycache = util.propertycache
22 propertycache = util.propertycache
23 filecache = scmutil.filecache
23 filecache = scmutil.filecache
24
24
25 class repofilecache(filecache):
25 class repofilecache(filecache):
26 """All filecache usage on repo are done for logic that should be unfiltered
26 """All filecache usage on repo are done for logic that should be unfiltered
27 """
27 """
28
28
29 def __get__(self, repo, type=None):
29 def __get__(self, repo, type=None):
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 def __set__(self, repo, value):
31 def __set__(self, repo, value):
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 def __delete__(self, repo):
33 def __delete__(self, repo):
34 return super(repofilecache, self).__delete__(repo.unfiltered())
34 return super(repofilecache, self).__delete__(repo.unfiltered())
35
35
36 class storecache(repofilecache):
36 class storecache(repofilecache):
37 """filecache for files in the store"""
37 """filecache for files in the store"""
38 def join(self, obj, fname):
38 def join(self, obj, fname):
39 return obj.sjoin(fname)
39 return obj.sjoin(fname)
40
40
41 class unfilteredpropertycache(propertycache):
41 class unfilteredpropertycache(propertycache):
42 """propertycache that apply to unfiltered repo only"""
42 """propertycache that apply to unfiltered repo only"""
43
43
44 def __get__(self, repo, type=None):
44 def __get__(self, repo, type=None):
45 unfi = repo.unfiltered()
45 unfi = repo.unfiltered()
46 if unfi is repo:
46 if unfi is repo:
47 return super(unfilteredpropertycache, self).__get__(unfi)
47 return super(unfilteredpropertycache, self).__get__(unfi)
48 return getattr(unfi, self.name)
48 return getattr(unfi, self.name)
49
49
50 class filteredpropertycache(propertycache):
50 class filteredpropertycache(propertycache):
51 """propertycache that must take filtering in account"""
51 """propertycache that must take filtering in account"""
52
52
53 def cachevalue(self, obj, value):
53 def cachevalue(self, obj, value):
54 object.__setattr__(obj, self.name, value)
54 object.__setattr__(obj, self.name, value)
55
55
56
56
57 def hasunfilteredcache(repo, name):
57 def hasunfilteredcache(repo, name):
58 """check if a repo has an unfilteredpropertycache value for <name>"""
58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 return name in vars(repo.unfiltered())
59 return name in vars(repo.unfiltered())
60
60
61 def unfilteredmethod(orig):
61 def unfilteredmethod(orig):
62 """decorate method that always need to be run on unfiltered version"""
62 """decorate method that always need to be run on unfiltered version"""
63 def wrapper(repo, *args, **kwargs):
63 def wrapper(repo, *args, **kwargs):
64 return orig(repo.unfiltered(), *args, **kwargs)
64 return orig(repo.unfiltered(), *args, **kwargs)
65 return wrapper
65 return wrapper
66
66
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 'unbundle'))
68 'unbundle'))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70
70
71 class localpeer(peer.peerrepository):
71 class localpeer(peer.peerrepository):
72 '''peer for a local repo; reflects only the most recent API'''
72 '''peer for a local repo; reflects only the most recent API'''
73
73
74 def __init__(self, repo, caps=moderncaps):
74 def __init__(self, repo, caps=moderncaps):
75 peer.peerrepository.__init__(self)
75 peer.peerrepository.__init__(self)
76 self._repo = repo.filtered('served')
76 self._repo = repo.filtered('served')
77 self.ui = repo.ui
77 self.ui = repo.ui
78 self._caps = repo._restrictcapabilities(caps)
78 self._caps = repo._restrictcapabilities(caps)
79 self.requirements = repo.requirements
79 self.requirements = repo.requirements
80 self.supportedformats = repo.supportedformats
80 self.supportedformats = repo.supportedformats
81
81
82 def close(self):
82 def close(self):
83 self._repo.close()
83 self._repo.close()
84
84
85 def _capabilities(self):
85 def _capabilities(self):
86 return self._caps
86 return self._caps
87
87
88 def local(self):
88 def local(self):
89 return self._repo
89 return self._repo
90
90
91 def canpush(self):
91 def canpush(self):
92 return True
92 return True
93
93
94 def url(self):
94 def url(self):
95 return self._repo.url()
95 return self._repo.url()
96
96
97 def lookup(self, key):
97 def lookup(self, key):
98 return self._repo.lookup(key)
98 return self._repo.lookup(key)
99
99
100 def branchmap(self):
100 def branchmap(self):
101 return self._repo.branchmap()
101 return self._repo.branchmap()
102
102
103 def heads(self):
103 def heads(self):
104 return self._repo.heads()
104 return self._repo.heads()
105
105
106 def known(self, nodes):
106 def known(self, nodes):
107 return self._repo.known(nodes)
107 return self._repo.known(nodes)
108
108
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 **kwargs):
110 **kwargs):
111 cg = exchange.getbundle(self._repo, source, heads=heads,
111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 common=common, bundlecaps=bundlecaps, **kwargs)
112 common=common, bundlecaps=bundlecaps, **kwargs)
113 if bundlecaps is not None and 'HG20' in bundlecaps:
113 if bundlecaps is not None and 'HG20' in bundlecaps:
114 # When requesting a bundle2, getbundle returns a stream to make the
114 # When requesting a bundle2, getbundle returns a stream to make the
115 # wire level function happier. We need to build a proper object
115 # wire level function happier. We need to build a proper object
116 # from it in local peer.
116 # from it in local peer.
117 cg = bundle2.getunbundler(self.ui, cg)
117 cg = bundle2.getunbundler(self.ui, cg)
118 return cg
118 return cg
119
119
120 # TODO We might want to move the next two calls into legacypeer and add
120 # TODO We might want to move the next two calls into legacypeer and add
121 # unbundle instead.
121 # unbundle instead.
122
122
123 def unbundle(self, cg, heads, url):
123 def unbundle(self, cg, heads, url):
124 """apply a bundle on a repo
124 """apply a bundle on a repo
125
125
126 This function handles the repo locking itself."""
126 This function handles the repo locking itself."""
127 try:
127 try:
128 try:
128 try:
129 cg = exchange.readbundle(self.ui, cg, None)
129 cg = exchange.readbundle(self.ui, cg, None)
130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
131 if util.safehasattr(ret, 'getchunks'):
131 if util.safehasattr(ret, 'getchunks'):
132 # This is a bundle20 object, turn it into an unbundler.
132 # This is a bundle20 object, turn it into an unbundler.
133 # This little dance should be dropped eventually when the
133 # This little dance should be dropped eventually when the
134 # API is finally improved.
134 # API is finally improved.
135 stream = util.chunkbuffer(ret.getchunks())
135 stream = util.chunkbuffer(ret.getchunks())
136 ret = bundle2.getunbundler(self.ui, stream)
136 ret = bundle2.getunbundler(self.ui, stream)
137 return ret
137 return ret
138 except Exception, exc:
138 except Exception, exc:
139 # If the exception contains output salvaged from a bundle2
139 # If the exception contains output salvaged from a bundle2
140 # reply, we need to make sure it is printed before continuing
140 # reply, we need to make sure it is printed before continuing
141 # to fail. So we build a bundle2 with such output and consume
141 # to fail. So we build a bundle2 with such output and consume
142 # it directly.
142 # it directly.
143 #
143 #
144 # This is not very elegant but allows a "simple" solution for
144 # This is not very elegant but allows a "simple" solution for
145 # issue4594
145 # issue4594
146 output = getattr(exc, '_bundle2salvagedoutput', ())
146 output = getattr(exc, '_bundle2salvagedoutput', ())
147 if output:
147 if output:
148 bundler = bundle2.bundle20(self._repo.ui)
148 bundler = bundle2.bundle20(self._repo.ui)
149 for out in output:
149 for out in output:
150 bundler.addpart(out)
150 bundler.addpart(out)
151 stream = util.chunkbuffer(bundler.getchunks())
151 stream = util.chunkbuffer(bundler.getchunks())
152 b = bundle2.getunbundler(self.ui, stream)
152 b = bundle2.getunbundler(self.ui, stream)
153 bundle2.processbundle(self._repo, b)
153 bundle2.processbundle(self._repo, b)
154 raise
154 raise
155 except error.PushRaced, exc:
155 except error.PushRaced, exc:
156 raise error.ResponseError(_('push failed:'), str(exc))
156 raise error.ResponseError(_('push failed:'), str(exc))
157
157
158 def lock(self):
158 def lock(self):
159 return self._repo.lock()
159 return self._repo.lock()
160
160
161 def addchangegroup(self, cg, source, url):
161 def addchangegroup(self, cg, source, url):
162 return changegroup.addchangegroup(self._repo, cg, source, url)
162 return changegroup.addchangegroup(self._repo, cg, source, url)
163
163
164 def pushkey(self, namespace, key, old, new):
164 def pushkey(self, namespace, key, old, new):
165 return self._repo.pushkey(namespace, key, old, new)
165 return self._repo.pushkey(namespace, key, old, new)
166
166
167 def listkeys(self, namespace):
167 def listkeys(self, namespace):
168 return self._repo.listkeys(namespace)
168 return self._repo.listkeys(namespace)
169
169
170 def debugwireargs(self, one, two, three=None, four=None, five=None):
170 def debugwireargs(self, one, two, three=None, four=None, five=None):
171 '''used to test argument passing over the wire'''
171 '''used to test argument passing over the wire'''
172 return "%s %s %s %s %s" % (one, two, three, four, five)
172 return "%s %s %s %s %s" % (one, two, three, four, five)
173
173
174 class locallegacypeer(localpeer):
174 class locallegacypeer(localpeer):
175 '''peer extension which implements legacy methods too; used for tests with
175 '''peer extension which implements legacy methods too; used for tests with
176 restricted capabilities'''
176 restricted capabilities'''
177
177
178 def __init__(self, repo):
178 def __init__(self, repo):
179 localpeer.__init__(self, repo, caps=legacycaps)
179 localpeer.__init__(self, repo, caps=legacycaps)
180
180
181 def branches(self, nodes):
181 def branches(self, nodes):
182 return self._repo.branches(nodes)
182 return self._repo.branches(nodes)
183
183
184 def between(self, pairs):
184 def between(self, pairs):
185 return self._repo.between(pairs)
185 return self._repo.between(pairs)
186
186
187 def changegroup(self, basenodes, source):
187 def changegroup(self, basenodes, source):
188 return changegroup.changegroup(self._repo, basenodes, source)
188 return changegroup.changegroup(self._repo, basenodes, source)
189
189
190 def changegroupsubset(self, bases, heads, source):
190 def changegroupsubset(self, bases, heads, source):
191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
192
192
193 class localrepository(object):
193 class localrepository(object):
194
194
195 supportedformats = set(('revlogv1', 'generaldelta', 'manifestv2'))
195 supportedformats = set(('revlogv1', 'generaldelta', 'manifestv2'))
196 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
196 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
197 'dotencode'))
197 'dotencode'))
198 openerreqs = set(('revlogv1', 'generaldelta', 'manifestv2'))
198 openerreqs = set(('revlogv1', 'generaldelta', 'manifestv2'))
199 requirements = ['revlogv1']
199 requirements = ['revlogv1']
200 filtername = None
200 filtername = None
201
201
202 # a list of (ui, featureset) functions.
202 # a list of (ui, featureset) functions.
203 # only functions defined in module of enabled extensions are invoked
203 # only functions defined in module of enabled extensions are invoked
204 featuresetupfuncs = set()
204 featuresetupfuncs = set()
205
205
206 def _baserequirements(self, create):
206 def _baserequirements(self, create):
207 return self.requirements[:]
207 return self.requirements[:]
208
208
209 def __init__(self, baseui, path=None, create=False):
209 def __init__(self, baseui, path=None, create=False):
210 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
210 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
211 self.wopener = self.wvfs
211 self.wopener = self.wvfs
212 self.root = self.wvfs.base
212 self.root = self.wvfs.base
213 self.path = self.wvfs.join(".hg")
213 self.path = self.wvfs.join(".hg")
214 self.origroot = path
214 self.origroot = path
215 self.auditor = pathutil.pathauditor(self.root, self._checknested)
215 self.auditor = pathutil.pathauditor(self.root, self._checknested)
216 self.vfs = scmutil.vfs(self.path)
216 self.vfs = scmutil.vfs(self.path)
217 self.opener = self.vfs
217 self.opener = self.vfs
218 self.baseui = baseui
218 self.baseui = baseui
219 self.ui = baseui.copy()
219 self.ui = baseui.copy()
220 self.ui.copy = baseui.copy # prevent copying repo configuration
220 self.ui.copy = baseui.copy # prevent copying repo configuration
221 # A list of callback to shape the phase if no data were found.
221 # A list of callback to shape the phase if no data were found.
222 # Callback are in the form: func(repo, roots) --> processed root.
222 # Callback are in the form: func(repo, roots) --> processed root.
223 # This list it to be filled by extension during repo setup
223 # This list it to be filled by extension during repo setup
224 self._phasedefaults = []
224 self._phasedefaults = []
225 try:
225 try:
226 self.ui.readconfig(self.join("hgrc"), self.root)
226 self.ui.readconfig(self.join("hgrc"), self.root)
227 extensions.loadall(self.ui)
227 extensions.loadall(self.ui)
228 except IOError:
228 except IOError:
229 pass
229 pass
230
230
231 if self.featuresetupfuncs:
231 if self.featuresetupfuncs:
232 self.supported = set(self._basesupported) # use private copy
232 self.supported = set(self._basesupported) # use private copy
233 extmods = set(m.__name__ for n, m
233 extmods = set(m.__name__ for n, m
234 in extensions.extensions(self.ui))
234 in extensions.extensions(self.ui))
235 for setupfunc in self.featuresetupfuncs:
235 for setupfunc in self.featuresetupfuncs:
236 if setupfunc.__module__ in extmods:
236 if setupfunc.__module__ in extmods:
237 setupfunc(self.ui, self.supported)
237 setupfunc(self.ui, self.supported)
238 else:
238 else:
239 self.supported = self._basesupported
239 self.supported = self._basesupported
240
240
241 if not self.vfs.isdir():
241 if not self.vfs.isdir():
242 if create:
242 if create:
243 if not self.wvfs.exists():
243 if not self.wvfs.exists():
244 self.wvfs.makedirs()
244 self.wvfs.makedirs()
245 self.vfs.makedir(notindexed=True)
245 self.vfs.makedir(notindexed=True)
246 requirements = self._baserequirements(create)
246 requirements = self._baserequirements(create)
247 if self.ui.configbool('format', 'usestore', True):
247 if self.ui.configbool('format', 'usestore', True):
248 self.vfs.mkdir("store")
248 self.vfs.mkdir("store")
249 requirements.append("store")
249 requirements.append("store")
250 if self.ui.configbool('format', 'usefncache', True):
250 if self.ui.configbool('format', 'usefncache', True):
251 requirements.append("fncache")
251 requirements.append("fncache")
252 if self.ui.configbool('format', 'dotencode', True):
252 if self.ui.configbool('format', 'dotencode', True):
253 requirements.append('dotencode')
253 requirements.append('dotencode')
254 # create an invalid changelog
254 # create an invalid changelog
255 self.vfs.append(
255 self.vfs.append(
256 "00changelog.i",
256 "00changelog.i",
257 '\0\0\0\2' # represents revlogv2
257 '\0\0\0\2' # represents revlogv2
258 ' dummy changelog to prevent using the old repo layout'
258 ' dummy changelog to prevent using the old repo layout'
259 )
259 )
260 if self.ui.configbool('format', 'generaldelta', False):
260 if self.ui.configbool('format', 'generaldelta', False):
261 requirements.append("generaldelta")
261 requirements.append("generaldelta")
262 if self.ui.configbool('experimental', 'manifestv2', False):
262 if self.ui.configbool('experimental', 'manifestv2', False):
263 requirements.append("manifestv2")
263 requirements.append("manifestv2")
264 requirements = set(requirements)
264 requirements = set(requirements)
265 else:
265 else:
266 raise error.RepoError(_("repository %s not found") % path)
266 raise error.RepoError(_("repository %s not found") % path)
267 elif create:
267 elif create:
268 raise error.RepoError(_("repository %s already exists") % path)
268 raise error.RepoError(_("repository %s already exists") % path)
269 else:
269 else:
270 try:
270 try:
271 requirements = scmutil.readrequires(self.vfs, self.supported)
271 requirements = scmutil.readrequires(self.vfs, self.supported)
272 except IOError, inst:
272 except IOError, inst:
273 if inst.errno != errno.ENOENT:
273 if inst.errno != errno.ENOENT:
274 raise
274 raise
275 requirements = set()
275 requirements = set()
276
276
277 self.sharedpath = self.path
277 self.sharedpath = self.path
278 try:
278 try:
279 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
279 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
280 realpath=True)
280 realpath=True)
281 s = vfs.base
281 s = vfs.base
282 if not vfs.exists():
282 if not vfs.exists():
283 raise error.RepoError(
283 raise error.RepoError(
284 _('.hg/sharedpath points to nonexistent directory %s') % s)
284 _('.hg/sharedpath points to nonexistent directory %s') % s)
285 self.sharedpath = s
285 self.sharedpath = s
286 except IOError, inst:
286 except IOError, inst:
287 if inst.errno != errno.ENOENT:
287 if inst.errno != errno.ENOENT:
288 raise
288 raise
289
289
290 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
290 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
291 self.spath = self.store.path
291 self.spath = self.store.path
292 self.svfs = self.store.vfs
292 self.svfs = self.store.vfs
293 self.sopener = self.svfs
293 self.sopener = self.svfs
294 self.sjoin = self.store.join
294 self.sjoin = self.store.join
295 self.vfs.createmode = self.store.createmode
295 self.vfs.createmode = self.store.createmode
296 self._applyrequirements(requirements)
296 self._applyrequirements(requirements)
297 if create:
297 if create:
298 self._writerequirements()
298 self._writerequirements()
299
299
300
300
301 self._branchcaches = {}
301 self._branchcaches = {}
302 self._revbranchcache = None
302 self._revbranchcache = None
303 self.filterpats = {}
303 self.filterpats = {}
304 self._datafilters = {}
304 self._datafilters = {}
305 self._transref = self._lockref = self._wlockref = None
305 self._transref = self._lockref = self._wlockref = None
306
306
307 # A cache for various files under .hg/ that tracks file changes,
307 # A cache for various files under .hg/ that tracks file changes,
308 # (used by the filecache decorator)
308 # (used by the filecache decorator)
309 #
309 #
310 # Maps a property name to its util.filecacheentry
310 # Maps a property name to its util.filecacheentry
311 self._filecache = {}
311 self._filecache = {}
312
312
313 # hold sets of revision to be filtered
313 # hold sets of revision to be filtered
314 # should be cleared when something might have changed the filter value:
314 # should be cleared when something might have changed the filter value:
315 # - new changesets,
315 # - new changesets,
316 # - phase change,
316 # - phase change,
317 # - new obsolescence marker,
317 # - new obsolescence marker,
318 # - working directory parent change,
318 # - working directory parent change,
319 # - bookmark changes
319 # - bookmark changes
320 self.filteredrevcache = {}
320 self.filteredrevcache = {}
321
321
322 # generic mapping between names and nodes
322 # generic mapping between names and nodes
323 self.names = namespaces.namespaces()
323 self.names = namespaces.namespaces()
324
324
325 def close(self):
325 def close(self):
326 self._writecaches()
326 self._writecaches()
327
327
328 def _writecaches(self):
328 def _writecaches(self):
329 if self._revbranchcache:
329 if self._revbranchcache:
330 self._revbranchcache.write()
330 self._revbranchcache.write()
331
331
332 def _restrictcapabilities(self, caps):
332 def _restrictcapabilities(self, caps):
333 if self.ui.configbool('experimental', 'bundle2-advertise', True):
333 if self.ui.configbool('experimental', 'bundle2-advertise', True):
334 caps = set(caps)
334 caps = set(caps)
335 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
335 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
336 caps.add('bundle2=' + urllib.quote(capsblob))
336 caps.add('bundle2=' + urllib.quote(capsblob))
337 return caps
337 return caps
338
338
339 def _applyrequirements(self, requirements):
339 def _applyrequirements(self, requirements):
340 self.requirements = requirements
340 self.requirements = requirements
341 self.svfs.options = dict((r, 1) for r in requirements
341 self.svfs.options = dict((r, 1) for r in requirements
342 if r in self.openerreqs)
342 if r in self.openerreqs)
343 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
343 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
344 if chunkcachesize is not None:
344 if chunkcachesize is not None:
345 self.svfs.options['chunkcachesize'] = chunkcachesize
345 self.svfs.options['chunkcachesize'] = chunkcachesize
346 maxchainlen = self.ui.configint('format', 'maxchainlen')
346 maxchainlen = self.ui.configint('format', 'maxchainlen')
347 if maxchainlen is not None:
347 if maxchainlen is not None:
348 self.svfs.options['maxchainlen'] = maxchainlen
348 self.svfs.options['maxchainlen'] = maxchainlen
349 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
349 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
350 if manifestcachesize is not None:
350 if manifestcachesize is not None:
351 self.svfs.options['manifestcachesize'] = manifestcachesize
351 self.svfs.options['manifestcachesize'] = manifestcachesize
352 usetreemanifest = self.ui.configbool('experimental', 'treemanifest')
352 usetreemanifest = self.ui.configbool('experimental', 'treemanifest')
353 if usetreemanifest is not None:
353 if usetreemanifest is not None:
354 self.svfs.options['usetreemanifest'] = usetreemanifest
354 self.svfs.options['usetreemanifest'] = usetreemanifest
355
355
356 def _writerequirements(self):
356 def _writerequirements(self):
357 reqfile = self.vfs("requires", "w")
357 reqfile = self.vfs("requires", "w")
358 for r in sorted(self.requirements):
358 for r in sorted(self.requirements):
359 reqfile.write("%s\n" % r)
359 reqfile.write("%s\n" % r)
360 reqfile.close()
360 reqfile.close()
361
361
362 def _checknested(self, path):
362 def _checknested(self, path):
363 """Determine if path is a legal nested repository."""
363 """Determine if path is a legal nested repository."""
364 if not path.startswith(self.root):
364 if not path.startswith(self.root):
365 return False
365 return False
366 subpath = path[len(self.root) + 1:]
366 subpath = path[len(self.root) + 1:]
367 normsubpath = util.pconvert(subpath)
367 normsubpath = util.pconvert(subpath)
368
368
369 # XXX: Checking against the current working copy is wrong in
369 # XXX: Checking against the current working copy is wrong in
370 # the sense that it can reject things like
370 # the sense that it can reject things like
371 #
371 #
372 # $ hg cat -r 10 sub/x.txt
372 # $ hg cat -r 10 sub/x.txt
373 #
373 #
374 # if sub/ is no longer a subrepository in the working copy
374 # if sub/ is no longer a subrepository in the working copy
375 # parent revision.
375 # parent revision.
376 #
376 #
377 # However, it can of course also allow things that would have
377 # However, it can of course also allow things that would have
378 # been rejected before, such as the above cat command if sub/
378 # been rejected before, such as the above cat command if sub/
379 # is a subrepository now, but was a normal directory before.
379 # is a subrepository now, but was a normal directory before.
380 # The old path auditor would have rejected by mistake since it
380 # The old path auditor would have rejected by mistake since it
381 # panics when it sees sub/.hg/.
381 # panics when it sees sub/.hg/.
382 #
382 #
383 # All in all, checking against the working copy seems sensible
383 # All in all, checking against the working copy seems sensible
384 # since we want to prevent access to nested repositories on
384 # since we want to prevent access to nested repositories on
385 # the filesystem *now*.
385 # the filesystem *now*.
386 ctx = self[None]
386 ctx = self[None]
387 parts = util.splitpath(subpath)
387 parts = util.splitpath(subpath)
388 while parts:
388 while parts:
389 prefix = '/'.join(parts)
389 prefix = '/'.join(parts)
390 if prefix in ctx.substate:
390 if prefix in ctx.substate:
391 if prefix == normsubpath:
391 if prefix == normsubpath:
392 return True
392 return True
393 else:
393 else:
394 sub = ctx.sub(prefix)
394 sub = ctx.sub(prefix)
395 return sub.checknested(subpath[len(prefix) + 1:])
395 return sub.checknested(subpath[len(prefix) + 1:])
396 else:
396 else:
397 parts.pop()
397 parts.pop()
398 return False
398 return False
399
399
400 def peer(self):
400 def peer(self):
401 return localpeer(self) # not cached to avoid reference cycle
401 return localpeer(self) # not cached to avoid reference cycle
402
402
403 def unfiltered(self):
403 def unfiltered(self):
404 """Return unfiltered version of the repository
404 """Return unfiltered version of the repository
405
405
406 Intended to be overwritten by filtered repo."""
406 Intended to be overwritten by filtered repo."""
407 return self
407 return self
408
408
409 def filtered(self, name):
409 def filtered(self, name):
410 """Return a filtered version of a repository"""
410 """Return a filtered version of a repository"""
411 # build a new class with the mixin and the current class
411 # build a new class with the mixin and the current class
412 # (possibly subclass of the repo)
412 # (possibly subclass of the repo)
413 class proxycls(repoview.repoview, self.unfiltered().__class__):
413 class proxycls(repoview.repoview, self.unfiltered().__class__):
414 pass
414 pass
415 return proxycls(self, name)
415 return proxycls(self, name)
416
416
417 @repofilecache('bookmarks')
417 @repofilecache('bookmarks')
418 def _bookmarks(self):
418 def _bookmarks(self):
419 return bookmarks.bmstore(self)
419 return bookmarks.bmstore(self)
420
420
421 @repofilecache('bookmarks.current')
421 @repofilecache('bookmarks.current')
422 def _bookmarkcurrent(self):
422 def _bookmarkcurrent(self):
423 return bookmarks.readcurrent(self)
423 return bookmarks.readcurrent(self)
424
424
425 def bookmarkheads(self, bookmark):
425 def bookmarkheads(self, bookmark):
426 name = bookmark.split('@', 1)[0]
426 name = bookmark.split('@', 1)[0]
427 heads = []
427 heads = []
428 for mark, n in self._bookmarks.iteritems():
428 for mark, n in self._bookmarks.iteritems():
429 if mark.split('@', 1)[0] == name:
429 if mark.split('@', 1)[0] == name:
430 heads.append(n)
430 heads.append(n)
431 return heads
431 return heads
432
432
433 @storecache('phaseroots')
433 @storecache('phaseroots')
434 def _phasecache(self):
434 def _phasecache(self):
435 return phases.phasecache(self, self._phasedefaults)
435 return phases.phasecache(self, self._phasedefaults)
436
436
437 @storecache('obsstore')
437 @storecache('obsstore')
438 def obsstore(self):
438 def obsstore(self):
439 # read default format for new obsstore.
439 # read default format for new obsstore.
440 defaultformat = self.ui.configint('format', 'obsstore-version', None)
440 defaultformat = self.ui.configint('format', 'obsstore-version', None)
441 # rely on obsstore class default when possible.
441 # rely on obsstore class default when possible.
442 kwargs = {}
442 kwargs = {}
443 if defaultformat is not None:
443 if defaultformat is not None:
444 kwargs['defaultformat'] = defaultformat
444 kwargs['defaultformat'] = defaultformat
445 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
445 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
446 store = obsolete.obsstore(self.svfs, readonly=readonly,
446 store = obsolete.obsstore(self.svfs, readonly=readonly,
447 **kwargs)
447 **kwargs)
448 if store and readonly:
448 if store and readonly:
449 self.ui.warn(
449 self.ui.warn(
450 _('obsolete feature not enabled but %i markers found!\n')
450 _('obsolete feature not enabled but %i markers found!\n')
451 % len(list(store)))
451 % len(list(store)))
452 return store
452 return store
453
453
454 @storecache('00changelog.i')
454 @storecache('00changelog.i')
455 def changelog(self):
455 def changelog(self):
456 c = changelog.changelog(self.svfs)
456 c = changelog.changelog(self.svfs)
457 if 'HG_PENDING' in os.environ:
457 if 'HG_PENDING' in os.environ:
458 p = os.environ['HG_PENDING']
458 p = os.environ['HG_PENDING']
459 if p.startswith(self.root):
459 if p.startswith(self.root):
460 c.readpending('00changelog.i.a')
460 c.readpending('00changelog.i.a')
461 return c
461 return c
462
462
463 @storecache('00manifest.i')
463 @storecache('00manifest.i')
464 def manifest(self):
464 def manifest(self):
465 return manifest.manifest(self.svfs)
465 return manifest.manifest(self.svfs)
466
466
467 @repofilecache('dirstate')
467 @repofilecache('dirstate')
468 def dirstate(self):
468 def dirstate(self):
469 warned = [0]
469 warned = [0]
470 def validate(node):
470 def validate(node):
471 try:
471 try:
472 self.changelog.rev(node)
472 self.changelog.rev(node)
473 return node
473 return node
474 except error.LookupError:
474 except error.LookupError:
475 if not warned[0]:
475 if not warned[0]:
476 warned[0] = True
476 warned[0] = True
477 self.ui.warn(_("warning: ignoring unknown"
477 self.ui.warn(_("warning: ignoring unknown"
478 " working parent %s!\n") % short(node))
478 " working parent %s!\n") % short(node))
479 return nullid
479 return nullid
480
480
481 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
481 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
482
482
483 def __getitem__(self, changeid):
483 def __getitem__(self, changeid):
484 if changeid is None:
484 if changeid is None:
485 return context.workingctx(self)
485 return context.workingctx(self)
486 if isinstance(changeid, slice):
486 if isinstance(changeid, slice):
487 return [context.changectx(self, i)
487 return [context.changectx(self, i)
488 for i in xrange(*changeid.indices(len(self)))
488 for i in xrange(*changeid.indices(len(self)))
489 if i not in self.changelog.filteredrevs]
489 if i not in self.changelog.filteredrevs]
490 return context.changectx(self, changeid)
490 return context.changectx(self, changeid)
491
491
492 def __contains__(self, changeid):
492 def __contains__(self, changeid):
493 try:
493 try:
494 self[changeid]
494 self[changeid]
495 return True
495 return True
496 except error.RepoLookupError:
496 except error.RepoLookupError:
497 return False
497 return False
498
498
499 def __nonzero__(self):
499 def __nonzero__(self):
500 return True
500 return True
501
501
502 def __len__(self):
502 def __len__(self):
503 return len(self.changelog)
503 return len(self.changelog)
504
504
505 def __iter__(self):
505 def __iter__(self):
506 return iter(self.changelog)
506 return iter(self.changelog)
507
507
508 def revs(self, expr, *args):
508 def revs(self, expr, *args):
509 '''Return a list of revisions matching the given revset'''
509 '''Return a list of revisions matching the given revset'''
510 expr = revset.formatspec(expr, *args)
510 expr = revset.formatspec(expr, *args)
511 m = revset.match(None, expr)
511 m = revset.match(None, expr)
512 return m(self)
512 return m(self)
513
513
514 def set(self, expr, *args):
514 def set(self, expr, *args):
515 '''
515 '''
516 Yield a context for each matching revision, after doing arg
516 Yield a context for each matching revision, after doing arg
517 replacement via revset.formatspec
517 replacement via revset.formatspec
518 '''
518 '''
519 for r in self.revs(expr, *args):
519 for r in self.revs(expr, *args):
520 yield self[r]
520 yield self[r]
521
521
522 def url(self):
522 def url(self):
523 return 'file:' + self.root
523 return 'file:' + self.root
524
524
525 def hook(self, name, throw=False, **args):
525 def hook(self, name, throw=False, **args):
526 """Call a hook, passing this repo instance.
526 """Call a hook, passing this repo instance.
527
527
528 This a convenience method to aid invoking hooks. Extensions likely
528 This a convenience method to aid invoking hooks. Extensions likely
529 won't call this unless they have registered a custom hook or are
529 won't call this unless they have registered a custom hook or are
530 replacing code that is expected to call a hook.
530 replacing code that is expected to call a hook.
531 """
531 """
532 return hook.hook(self.ui, self, name, throw, **args)
532 return hook.hook(self.ui, self, name, throw, **args)
533
533
534 @unfilteredmethod
534 @unfilteredmethod
535 def _tag(self, names, node, message, local, user, date, extra={},
535 def _tag(self, names, node, message, local, user, date, extra={},
536 editor=False):
536 editor=False):
537 if isinstance(names, str):
537 if isinstance(names, str):
538 names = (names,)
538 names = (names,)
539
539
540 branches = self.branchmap()
540 branches = self.branchmap()
541 for name in names:
541 for name in names:
542 self.hook('pretag', throw=True, node=hex(node), tag=name,
542 self.hook('pretag', throw=True, node=hex(node), tag=name,
543 local=local)
543 local=local)
544 if name in branches:
544 if name in branches:
545 self.ui.warn(_("warning: tag %s conflicts with existing"
545 self.ui.warn(_("warning: tag %s conflicts with existing"
546 " branch name\n") % name)
546 " branch name\n") % name)
547
547
548 def writetags(fp, names, munge, prevtags):
548 def writetags(fp, names, munge, prevtags):
549 fp.seek(0, 2)
549 fp.seek(0, 2)
550 if prevtags and prevtags[-1] != '\n':
550 if prevtags and prevtags[-1] != '\n':
551 fp.write('\n')
551 fp.write('\n')
552 for name in names:
552 for name in names:
553 if munge:
553 if munge:
554 m = munge(name)
554 m = munge(name)
555 else:
555 else:
556 m = name
556 m = name
557
557
558 if (self._tagscache.tagtypes and
558 if (self._tagscache.tagtypes and
559 name in self._tagscache.tagtypes):
559 name in self._tagscache.tagtypes):
560 old = self.tags().get(name, nullid)
560 old = self.tags().get(name, nullid)
561 fp.write('%s %s\n' % (hex(old), m))
561 fp.write('%s %s\n' % (hex(old), m))
562 fp.write('%s %s\n' % (hex(node), m))
562 fp.write('%s %s\n' % (hex(node), m))
563 fp.close()
563 fp.close()
564
564
565 prevtags = ''
565 prevtags = ''
566 if local:
566 if local:
567 try:
567 try:
568 fp = self.vfs('localtags', 'r+')
568 fp = self.vfs('localtags', 'r+')
569 except IOError:
569 except IOError:
570 fp = self.vfs('localtags', 'a')
570 fp = self.vfs('localtags', 'a')
571 else:
571 else:
572 prevtags = fp.read()
572 prevtags = fp.read()
573
573
574 # local tags are stored in the current charset
574 # local tags are stored in the current charset
575 writetags(fp, names, None, prevtags)
575 writetags(fp, names, None, prevtags)
576 for name in names:
576 for name in names:
577 self.hook('tag', node=hex(node), tag=name, local=local)
577 self.hook('tag', node=hex(node), tag=name, local=local)
578 return
578 return
579
579
580 try:
580 try:
581 fp = self.wfile('.hgtags', 'rb+')
581 fp = self.wfile('.hgtags', 'rb+')
582 except IOError, e:
582 except IOError, e:
583 if e.errno != errno.ENOENT:
583 if e.errno != errno.ENOENT:
584 raise
584 raise
585 fp = self.wfile('.hgtags', 'ab')
585 fp = self.wfile('.hgtags', 'ab')
586 else:
586 else:
587 prevtags = fp.read()
587 prevtags = fp.read()
588
588
589 # committed tags are stored in UTF-8
589 # committed tags are stored in UTF-8
590 writetags(fp, names, encoding.fromlocal, prevtags)
590 writetags(fp, names, encoding.fromlocal, prevtags)
591
591
592 fp.close()
592 fp.close()
593
593
594 self.invalidatecaches()
594 self.invalidatecaches()
595
595
596 if '.hgtags' not in self.dirstate:
596 if '.hgtags' not in self.dirstate:
597 self[None].add(['.hgtags'])
597 self[None].add(['.hgtags'])
598
598
599 m = matchmod.exact(self.root, '', ['.hgtags'])
599 m = matchmod.exact(self.root, '', ['.hgtags'])
600 tagnode = self.commit(message, user, date, extra=extra, match=m,
600 tagnode = self.commit(message, user, date, extra=extra, match=m,
601 editor=editor)
601 editor=editor)
602
602
603 for name in names:
603 for name in names:
604 self.hook('tag', node=hex(node), tag=name, local=local)
604 self.hook('tag', node=hex(node), tag=name, local=local)
605
605
606 return tagnode
606 return tagnode
607
607
608 def tag(self, names, node, message, local, user, date, editor=False):
608 def tag(self, names, node, message, local, user, date, editor=False):
609 '''tag a revision with one or more symbolic names.
609 '''tag a revision with one or more symbolic names.
610
610
611 names is a list of strings or, when adding a single tag, names may be a
611 names is a list of strings or, when adding a single tag, names may be a
612 string.
612 string.
613
613
614 if local is True, the tags are stored in a per-repository file.
614 if local is True, the tags are stored in a per-repository file.
615 otherwise, they are stored in the .hgtags file, and a new
615 otherwise, they are stored in the .hgtags file, and a new
616 changeset is committed with the change.
616 changeset is committed with the change.
617
617
618 keyword arguments:
618 keyword arguments:
619
619
620 local: whether to store tags in non-version-controlled file
620 local: whether to store tags in non-version-controlled file
621 (default False)
621 (default False)
622
622
623 message: commit message to use if committing
623 message: commit message to use if committing
624
624
625 user: name of user to use if committing
625 user: name of user to use if committing
626
626
627 date: date tuple to use if committing'''
627 date: date tuple to use if committing'''
628
628
629 if not local:
629 if not local:
630 m = matchmod.exact(self.root, '', ['.hgtags'])
630 m = matchmod.exact(self.root, '', ['.hgtags'])
631 if util.any(self.status(match=m, unknown=True, ignored=True)):
631 if util.any(self.status(match=m, unknown=True, ignored=True)):
632 raise util.Abort(_('working copy of .hgtags is changed'),
632 raise util.Abort(_('working copy of .hgtags is changed'),
633 hint=_('please commit .hgtags manually'))
633 hint=_('please commit .hgtags manually'))
634
634
635 self.tags() # instantiate the cache
635 self.tags() # instantiate the cache
636 self._tag(names, node, message, local, user, date, editor=editor)
636 self._tag(names, node, message, local, user, date, editor=editor)
637
637
638 @filteredpropertycache
638 @filteredpropertycache
639 def _tagscache(self):
639 def _tagscache(self):
640 '''Returns a tagscache object that contains various tags related
640 '''Returns a tagscache object that contains various tags related
641 caches.'''
641 caches.'''
642
642
643 # This simplifies its cache management by having one decorated
643 # This simplifies its cache management by having one decorated
644 # function (this one) and the rest simply fetch things from it.
644 # function (this one) and the rest simply fetch things from it.
645 class tagscache(object):
645 class tagscache(object):
646 def __init__(self):
646 def __init__(self):
647 # These two define the set of tags for this repository. tags
647 # These two define the set of tags for this repository. tags
648 # maps tag name to node; tagtypes maps tag name to 'global' or
648 # maps tag name to node; tagtypes maps tag name to 'global' or
649 # 'local'. (Global tags are defined by .hgtags across all
649 # 'local'. (Global tags are defined by .hgtags across all
650 # heads, and local tags are defined in .hg/localtags.)
650 # heads, and local tags are defined in .hg/localtags.)
651 # They constitute the in-memory cache of tags.
651 # They constitute the in-memory cache of tags.
652 self.tags = self.tagtypes = None
652 self.tags = self.tagtypes = None
653
653
654 self.nodetagscache = self.tagslist = None
654 self.nodetagscache = self.tagslist = None
655
655
656 cache = tagscache()
656 cache = tagscache()
657 cache.tags, cache.tagtypes = self._findtags()
657 cache.tags, cache.tagtypes = self._findtags()
658
658
659 return cache
659 return cache
660
660
661 def tags(self):
661 def tags(self):
662 '''return a mapping of tag to node'''
662 '''return a mapping of tag to node'''
663 t = {}
663 t = {}
664 if self.changelog.filteredrevs:
664 if self.changelog.filteredrevs:
665 tags, tt = self._findtags()
665 tags, tt = self._findtags()
666 else:
666 else:
667 tags = self._tagscache.tags
667 tags = self._tagscache.tags
668 for k, v in tags.iteritems():
668 for k, v in tags.iteritems():
669 try:
669 try:
670 # ignore tags to unknown nodes
670 # ignore tags to unknown nodes
671 self.changelog.rev(v)
671 self.changelog.rev(v)
672 t[k] = v
672 t[k] = v
673 except (error.LookupError, ValueError):
673 except (error.LookupError, ValueError):
674 pass
674 pass
675 return t
675 return t
676
676
677 def _findtags(self):
677 def _findtags(self):
678 '''Do the hard work of finding tags. Return a pair of dicts
678 '''Do the hard work of finding tags. Return a pair of dicts
679 (tags, tagtypes) where tags maps tag name to node, and tagtypes
679 (tags, tagtypes) where tags maps tag name to node, and tagtypes
680 maps tag name to a string like \'global\' or \'local\'.
680 maps tag name to a string like \'global\' or \'local\'.
681 Subclasses or extensions are free to add their own tags, but
681 Subclasses or extensions are free to add their own tags, but
682 should be aware that the returned dicts will be retained for the
682 should be aware that the returned dicts will be retained for the
683 duration of the localrepo object.'''
683 duration of the localrepo object.'''
684
684
685 # XXX what tagtype should subclasses/extensions use? Currently
685 # XXX what tagtype should subclasses/extensions use? Currently
686 # mq and bookmarks add tags, but do not set the tagtype at all.
686 # mq and bookmarks add tags, but do not set the tagtype at all.
687 # Should each extension invent its own tag type? Should there
687 # Should each extension invent its own tag type? Should there
688 # be one tagtype for all such "virtual" tags? Or is the status
688 # be one tagtype for all such "virtual" tags? Or is the status
689 # quo fine?
689 # quo fine?
690
690
691 alltags = {} # map tag name to (node, hist)
691 alltags = {} # map tag name to (node, hist)
692 tagtypes = {}
692 tagtypes = {}
693
693
694 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
694 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
695 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
695 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
696
696
697 # Build the return dicts. Have to re-encode tag names because
697 # Build the return dicts. Have to re-encode tag names because
698 # the tags module always uses UTF-8 (in order not to lose info
698 # the tags module always uses UTF-8 (in order not to lose info
699 # writing to the cache), but the rest of Mercurial wants them in
699 # writing to the cache), but the rest of Mercurial wants them in
700 # local encoding.
700 # local encoding.
701 tags = {}
701 tags = {}
702 for (name, (node, hist)) in alltags.iteritems():
702 for (name, (node, hist)) in alltags.iteritems():
703 if node != nullid:
703 if node != nullid:
704 tags[encoding.tolocal(name)] = node
704 tags[encoding.tolocal(name)] = node
705 tags['tip'] = self.changelog.tip()
705 tags['tip'] = self.changelog.tip()
706 tagtypes = dict([(encoding.tolocal(name), value)
706 tagtypes = dict([(encoding.tolocal(name), value)
707 for (name, value) in tagtypes.iteritems()])
707 for (name, value) in tagtypes.iteritems()])
708 return (tags, tagtypes)
708 return (tags, tagtypes)
709
709
710 def tagtype(self, tagname):
710 def tagtype(self, tagname):
711 '''
711 '''
712 return the type of the given tag. result can be:
712 return the type of the given tag. result can be:
713
713
714 'local' : a local tag
714 'local' : a local tag
715 'global' : a global tag
715 'global' : a global tag
716 None : tag does not exist
716 None : tag does not exist
717 '''
717 '''
718
718
719 return self._tagscache.tagtypes.get(tagname)
719 return self._tagscache.tagtypes.get(tagname)
720
720
721 def tagslist(self):
721 def tagslist(self):
722 '''return a list of tags ordered by revision'''
722 '''return a list of tags ordered by revision'''
723 if not self._tagscache.tagslist:
723 if not self._tagscache.tagslist:
724 l = []
724 l = []
725 for t, n in self.tags().iteritems():
725 for t, n in self.tags().iteritems():
726 l.append((self.changelog.rev(n), t, n))
726 l.append((self.changelog.rev(n), t, n))
727 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
727 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
728
728
729 return self._tagscache.tagslist
729 return self._tagscache.tagslist
730
730
731 def nodetags(self, node):
731 def nodetags(self, node):
732 '''return the tags associated with a node'''
732 '''return the tags associated with a node'''
733 if not self._tagscache.nodetagscache:
733 if not self._tagscache.nodetagscache:
734 nodetagscache = {}
734 nodetagscache = {}
735 for t, n in self._tagscache.tags.iteritems():
735 for t, n in self._tagscache.tags.iteritems():
736 nodetagscache.setdefault(n, []).append(t)
736 nodetagscache.setdefault(n, []).append(t)
737 for tags in nodetagscache.itervalues():
737 for tags in nodetagscache.itervalues():
738 tags.sort()
738 tags.sort()
739 self._tagscache.nodetagscache = nodetagscache
739 self._tagscache.nodetagscache = nodetagscache
740 return self._tagscache.nodetagscache.get(node, [])
740 return self._tagscache.nodetagscache.get(node, [])
741
741
742 def nodebookmarks(self, node):
742 def nodebookmarks(self, node):
743 marks = []
743 marks = []
744 for bookmark, n in self._bookmarks.iteritems():
744 for bookmark, n in self._bookmarks.iteritems():
745 if n == node:
745 if n == node:
746 marks.append(bookmark)
746 marks.append(bookmark)
747 return sorted(marks)
747 return sorted(marks)
748
748
749 def branchmap(self):
749 def branchmap(self):
750 '''returns a dictionary {branch: [branchheads]} with branchheads
750 '''returns a dictionary {branch: [branchheads]} with branchheads
751 ordered by increasing revision number'''
751 ordered by increasing revision number'''
752 branchmap.updatecache(self)
752 branchmap.updatecache(self)
753 return self._branchcaches[self.filtername]
753 return self._branchcaches[self.filtername]
754
754
755 @unfilteredmethod
755 @unfilteredmethod
756 def revbranchcache(self):
756 def revbranchcache(self):
757 if not self._revbranchcache:
757 if not self._revbranchcache:
758 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
758 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
759 return self._revbranchcache
759 return self._revbranchcache
760
760
761 def branchtip(self, branch, ignoremissing=False):
761 def branchtip(self, branch, ignoremissing=False):
762 '''return the tip node for a given branch
762 '''return the tip node for a given branch
763
763
764 If ignoremissing is True, then this method will not raise an error.
764 If ignoremissing is True, then this method will not raise an error.
765 This is helpful for callers that only expect None for a missing branch
765 This is helpful for callers that only expect None for a missing branch
766 (e.g. namespace).
766 (e.g. namespace).
767
767
768 '''
768 '''
769 try:
769 try:
770 return self.branchmap().branchtip(branch)
770 return self.branchmap().branchtip(branch)
771 except KeyError:
771 except KeyError:
772 if not ignoremissing:
772 if not ignoremissing:
773 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
773 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
774 else:
774 else:
775 pass
775 pass
776
776
777 def lookup(self, key):
777 def lookup(self, key):
778 return self[key].node()
778 return self[key].node()
779
779
780 def lookupbranch(self, key, remote=None):
780 def lookupbranch(self, key, remote=None):
781 repo = remote or self
781 repo = remote or self
782 if key in repo.branchmap():
782 if key in repo.branchmap():
783 return key
783 return key
784
784
785 repo = (remote and remote.local()) and remote or self
785 repo = (remote and remote.local()) and remote or self
786 return repo[key].branch()
786 return repo[key].branch()
787
787
788 def known(self, nodes):
788 def known(self, nodes):
789 nm = self.changelog.nodemap
789 nm = self.changelog.nodemap
790 pc = self._phasecache
790 pc = self._phasecache
791 result = []
791 result = []
792 for n in nodes:
792 for n in nodes:
793 r = nm.get(n)
793 r = nm.get(n)
794 resp = not (r is None or pc.phase(self, r) >= phases.secret)
794 resp = not (r is None or pc.phase(self, r) >= phases.secret)
795 result.append(resp)
795 result.append(resp)
796 return result
796 return result
797
797
798 def local(self):
798 def local(self):
799 return self
799 return self
800
800
801 def cancopy(self):
801 def cancopy(self):
802 # so statichttprepo's override of local() works
802 # so statichttprepo's override of local() works
803 if not self.local():
803 if not self.local():
804 return False
804 return False
805 if not self.ui.configbool('phases', 'publish', True):
805 if not self.ui.configbool('phases', 'publish', True):
806 return True
806 return True
807 # if publishing we can't copy if there is filtered content
807 # if publishing we can't copy if there is filtered content
808 return not self.filtered('visible').changelog.filteredrevs
808 return not self.filtered('visible').changelog.filteredrevs
809
809
810 def shared(self):
810 def shared(self):
811 '''the type of shared repository (None if not shared)'''
811 '''the type of shared repository (None if not shared)'''
812 if self.sharedpath != self.path:
812 if self.sharedpath != self.path:
813 return 'store'
813 return 'store'
814 return None
814 return None
815
815
816 def join(self, f, *insidef):
816 def join(self, f, *insidef):
817 return self.vfs.join(os.path.join(f, *insidef))
817 return self.vfs.join(os.path.join(f, *insidef))
818
818
819 def wjoin(self, f, *insidef):
819 def wjoin(self, f, *insidef):
820 return self.vfs.reljoin(self.root, f, *insidef)
820 return self.vfs.reljoin(self.root, f, *insidef)
821
821
822 def file(self, f):
822 def file(self, f):
823 if f[0] == '/':
823 if f[0] == '/':
824 f = f[1:]
824 f = f[1:]
825 return filelog.filelog(self.svfs, f)
825 return filelog.filelog(self.svfs, f)
826
826
827 def changectx(self, changeid):
827 def changectx(self, changeid):
828 return self[changeid]
828 return self[changeid]
829
829
830 def parents(self, changeid=None):
830 def parents(self, changeid=None):
831 '''get list of changectxs for parents of changeid'''
831 '''get list of changectxs for parents of changeid'''
832 return self[changeid].parents()
832 return self[changeid].parents()
833
833
834 def setparents(self, p1, p2=nullid):
834 def setparents(self, p1, p2=nullid):
835 self.dirstate.beginparentchange()
835 self.dirstate.beginparentchange()
836 copies = self.dirstate.setparents(p1, p2)
836 copies = self.dirstate.setparents(p1, p2)
837 pctx = self[p1]
837 pctx = self[p1]
838 if copies:
838 if copies:
839 # Adjust copy records, the dirstate cannot do it, it
839 # Adjust copy records, the dirstate cannot do it, it
840 # requires access to parents manifests. Preserve them
840 # requires access to parents manifests. Preserve them
841 # only for entries added to first parent.
841 # only for entries added to first parent.
842 for f in copies:
842 for f in copies:
843 if f not in pctx and copies[f] in pctx:
843 if f not in pctx and copies[f] in pctx:
844 self.dirstate.copy(copies[f], f)
844 self.dirstate.copy(copies[f], f)
845 if p2 == nullid:
845 if p2 == nullid:
846 for f, s in sorted(self.dirstate.copies().items()):
846 for f, s in sorted(self.dirstate.copies().items()):
847 if f not in pctx and s not in pctx:
847 if f not in pctx and s not in pctx:
848 self.dirstate.copy(None, f)
848 self.dirstate.copy(None, f)
849 self.dirstate.endparentchange()
849 self.dirstate.endparentchange()
850
850
851 def filectx(self, path, changeid=None, fileid=None):
851 def filectx(self, path, changeid=None, fileid=None):
852 """changeid can be a changeset revision, node, or tag.
852 """changeid can be a changeset revision, node, or tag.
853 fileid can be a file revision or node."""
853 fileid can be a file revision or node."""
854 return context.filectx(self, path, changeid, fileid)
854 return context.filectx(self, path, changeid, fileid)
855
855
856 def getcwd(self):
856 def getcwd(self):
857 return self.dirstate.getcwd()
857 return self.dirstate.getcwd()
858
858
859 def pathto(self, f, cwd=None):
859 def pathto(self, f, cwd=None):
860 return self.dirstate.pathto(f, cwd)
860 return self.dirstate.pathto(f, cwd)
861
861
862 def wfile(self, f, mode='r'):
862 def wfile(self, f, mode='r'):
863 return self.wvfs(f, mode)
863 return self.wvfs(f, mode)
864
864
865 def _link(self, f):
865 def _link(self, f):
866 return self.wvfs.islink(f)
866 return self.wvfs.islink(f)
867
867
868 def _loadfilter(self, filter):
868 def _loadfilter(self, filter):
869 if filter not in self.filterpats:
869 if filter not in self.filterpats:
870 l = []
870 l = []
871 for pat, cmd in self.ui.configitems(filter):
871 for pat, cmd in self.ui.configitems(filter):
872 if cmd == '!':
872 if cmd == '!':
873 continue
873 continue
874 mf = matchmod.match(self.root, '', [pat])
874 mf = matchmod.match(self.root, '', [pat])
875 fn = None
875 fn = None
876 params = cmd
876 params = cmd
877 for name, filterfn in self._datafilters.iteritems():
877 for name, filterfn in self._datafilters.iteritems():
878 if cmd.startswith(name):
878 if cmd.startswith(name):
879 fn = filterfn
879 fn = filterfn
880 params = cmd[len(name):].lstrip()
880 params = cmd[len(name):].lstrip()
881 break
881 break
882 if not fn:
882 if not fn:
883 fn = lambda s, c, **kwargs: util.filter(s, c)
883 fn = lambda s, c, **kwargs: util.filter(s, c)
884 # Wrap old filters not supporting keyword arguments
884 # Wrap old filters not supporting keyword arguments
885 if not inspect.getargspec(fn)[2]:
885 if not inspect.getargspec(fn)[2]:
886 oldfn = fn
886 oldfn = fn
887 fn = lambda s, c, **kwargs: oldfn(s, c)
887 fn = lambda s, c, **kwargs: oldfn(s, c)
888 l.append((mf, fn, params))
888 l.append((mf, fn, params))
889 self.filterpats[filter] = l
889 self.filterpats[filter] = l
890 return self.filterpats[filter]
890 return self.filterpats[filter]
891
891
892 def _filter(self, filterpats, filename, data):
892 def _filter(self, filterpats, filename, data):
893 for mf, fn, cmd in filterpats:
893 for mf, fn, cmd in filterpats:
894 if mf(filename):
894 if mf(filename):
895 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
895 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
896 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
896 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
897 break
897 break
898
898
899 return data
899 return data
900
900
901 @unfilteredpropertycache
901 @unfilteredpropertycache
902 def _encodefilterpats(self):
902 def _encodefilterpats(self):
903 return self._loadfilter('encode')
903 return self._loadfilter('encode')
904
904
905 @unfilteredpropertycache
905 @unfilteredpropertycache
906 def _decodefilterpats(self):
906 def _decodefilterpats(self):
907 return self._loadfilter('decode')
907 return self._loadfilter('decode')
908
908
909 def adddatafilter(self, name, filter):
909 def adddatafilter(self, name, filter):
910 self._datafilters[name] = filter
910 self._datafilters[name] = filter
911
911
912 def wread(self, filename):
912 def wread(self, filename):
913 if self._link(filename):
913 if self._link(filename):
914 data = self.wvfs.readlink(filename)
914 data = self.wvfs.readlink(filename)
915 else:
915 else:
916 data = self.wvfs.read(filename)
916 data = self.wvfs.read(filename)
917 return self._filter(self._encodefilterpats, filename, data)
917 return self._filter(self._encodefilterpats, filename, data)
918
918
919 def wwrite(self, filename, data, flags):
919 def wwrite(self, filename, data, flags):
920 """write ``data`` into ``filename`` in the working directory
921
922 This returns length of written (maybe decoded) data.
923 """
920 data = self._filter(self._decodefilterpats, filename, data)
924 data = self._filter(self._decodefilterpats, filename, data)
921 if 'l' in flags:
925 if 'l' in flags:
922 self.wvfs.symlink(data, filename)
926 self.wvfs.symlink(data, filename)
923 else:
927 else:
924 self.wvfs.write(filename, data)
928 self.wvfs.write(filename, data)
925 if 'x' in flags:
929 if 'x' in flags:
926 self.wvfs.setflags(filename, False, True)
930 self.wvfs.setflags(filename, False, True)
931 return len(data)
927
932
928 def wwritedata(self, filename, data):
933 def wwritedata(self, filename, data):
929 return self._filter(self._decodefilterpats, filename, data)
934 return self._filter(self._decodefilterpats, filename, data)
930
935
931 def currenttransaction(self):
936 def currenttransaction(self):
932 """return the current transaction or None if non exists"""
937 """return the current transaction or None if non exists"""
933 if self._transref:
938 if self._transref:
934 tr = self._transref()
939 tr = self._transref()
935 else:
940 else:
936 tr = None
941 tr = None
937
942
938 if tr and tr.running():
943 if tr and tr.running():
939 return tr
944 return tr
940 return None
945 return None
941
946
942 def transaction(self, desc, report=None):
947 def transaction(self, desc, report=None):
943 if (self.ui.configbool('devel', 'all')
948 if (self.ui.configbool('devel', 'all')
944 or self.ui.configbool('devel', 'check-locks')):
949 or self.ui.configbool('devel', 'check-locks')):
945 l = self._lockref and self._lockref()
950 l = self._lockref and self._lockref()
946 if l is None or not l.held:
951 if l is None or not l.held:
947 scmutil.develwarn(self.ui, 'transaction with no lock')
952 scmutil.develwarn(self.ui, 'transaction with no lock')
948 tr = self.currenttransaction()
953 tr = self.currenttransaction()
949 if tr is not None:
954 if tr is not None:
950 return tr.nest()
955 return tr.nest()
951
956
952 # abort here if the journal already exists
957 # abort here if the journal already exists
953 if self.svfs.exists("journal"):
958 if self.svfs.exists("journal"):
954 raise error.RepoError(
959 raise error.RepoError(
955 _("abandoned transaction found"),
960 _("abandoned transaction found"),
956 hint=_("run 'hg recover' to clean up transaction"))
961 hint=_("run 'hg recover' to clean up transaction"))
957
962
958 self.hook('pretxnopen', throw=True, txnname=desc)
963 self.hook('pretxnopen', throw=True, txnname=desc)
959
964
960 self._writejournal(desc)
965 self._writejournal(desc)
961 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
966 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
962 if report:
967 if report:
963 rp = report
968 rp = report
964 else:
969 else:
965 rp = self.ui.warn
970 rp = self.ui.warn
966 vfsmap = {'plain': self.vfs} # root of .hg/
971 vfsmap = {'plain': self.vfs} # root of .hg/
967 # we must avoid cyclic reference between repo and transaction.
972 # we must avoid cyclic reference between repo and transaction.
968 reporef = weakref.ref(self)
973 reporef = weakref.ref(self)
969 def validate(tr):
974 def validate(tr):
970 """will run pre-closing hooks"""
975 """will run pre-closing hooks"""
971 pending = lambda: tr.writepending() and self.root or ""
976 pending = lambda: tr.writepending() and self.root or ""
972 reporef().hook('pretxnclose', throw=True, pending=pending,
977 reporef().hook('pretxnclose', throw=True, pending=pending,
973 xnname=desc, **tr.hookargs)
978 xnname=desc, **tr.hookargs)
974
979
975 tr = transaction.transaction(rp, self.sopener, vfsmap,
980 tr = transaction.transaction(rp, self.sopener, vfsmap,
976 "journal",
981 "journal",
977 "undo",
982 "undo",
978 aftertrans(renames),
983 aftertrans(renames),
979 self.store.createmode,
984 self.store.createmode,
980 validator=validate)
985 validator=validate)
981
986
982 trid = 'TXN:' + util.sha1("%s#%f" % (id(tr), time.time())).hexdigest()
987 trid = 'TXN:' + util.sha1("%s#%f" % (id(tr), time.time())).hexdigest()
983 tr.hookargs['TXNID'] = trid
988 tr.hookargs['TXNID'] = trid
984 # note: writing the fncache only during finalize mean that the file is
989 # note: writing the fncache only during finalize mean that the file is
985 # outdated when running hooks. As fncache is used for streaming clone,
990 # outdated when running hooks. As fncache is used for streaming clone,
986 # this is not expected to break anything that happen during the hooks.
991 # this is not expected to break anything that happen during the hooks.
987 tr.addfinalize('flush-fncache', self.store.write)
992 tr.addfinalize('flush-fncache', self.store.write)
988 def txnclosehook(tr2):
993 def txnclosehook(tr2):
989 """To be run if transaction is successful, will schedule a hook run
994 """To be run if transaction is successful, will schedule a hook run
990 """
995 """
991 def hook():
996 def hook():
992 reporef().hook('txnclose', throw=False, txnname=desc,
997 reporef().hook('txnclose', throw=False, txnname=desc,
993 **tr2.hookargs)
998 **tr2.hookargs)
994 reporef()._afterlock(hook)
999 reporef()._afterlock(hook)
995 tr.addfinalize('txnclose-hook', txnclosehook)
1000 tr.addfinalize('txnclose-hook', txnclosehook)
996 def txnaborthook(tr2):
1001 def txnaborthook(tr2):
997 """To be run if transaction is aborted
1002 """To be run if transaction is aborted
998 """
1003 """
999 reporef().hook('txnabort', throw=False, txnname=desc,
1004 reporef().hook('txnabort', throw=False, txnname=desc,
1000 **tr2.hookargs)
1005 **tr2.hookargs)
1001 tr.addabort('txnabort-hook', txnaborthook)
1006 tr.addabort('txnabort-hook', txnaborthook)
1002 self._transref = weakref.ref(tr)
1007 self._transref = weakref.ref(tr)
1003 return tr
1008 return tr
1004
1009
1005 def _journalfiles(self):
1010 def _journalfiles(self):
1006 return ((self.svfs, 'journal'),
1011 return ((self.svfs, 'journal'),
1007 (self.vfs, 'journal.dirstate'),
1012 (self.vfs, 'journal.dirstate'),
1008 (self.vfs, 'journal.branch'),
1013 (self.vfs, 'journal.branch'),
1009 (self.vfs, 'journal.desc'),
1014 (self.vfs, 'journal.desc'),
1010 (self.vfs, 'journal.bookmarks'),
1015 (self.vfs, 'journal.bookmarks'),
1011 (self.svfs, 'journal.phaseroots'))
1016 (self.svfs, 'journal.phaseroots'))
1012
1017
1013 def undofiles(self):
1018 def undofiles(self):
1014 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1019 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1015
1020
1016 def _writejournal(self, desc):
1021 def _writejournal(self, desc):
1017 self.vfs.write("journal.dirstate",
1022 self.vfs.write("journal.dirstate",
1018 self.vfs.tryread("dirstate"))
1023 self.vfs.tryread("dirstate"))
1019 self.vfs.write("journal.branch",
1024 self.vfs.write("journal.branch",
1020 encoding.fromlocal(self.dirstate.branch()))
1025 encoding.fromlocal(self.dirstate.branch()))
1021 self.vfs.write("journal.desc",
1026 self.vfs.write("journal.desc",
1022 "%d\n%s\n" % (len(self), desc))
1027 "%d\n%s\n" % (len(self), desc))
1023 self.vfs.write("journal.bookmarks",
1028 self.vfs.write("journal.bookmarks",
1024 self.vfs.tryread("bookmarks"))
1029 self.vfs.tryread("bookmarks"))
1025 self.svfs.write("journal.phaseroots",
1030 self.svfs.write("journal.phaseroots",
1026 self.svfs.tryread("phaseroots"))
1031 self.svfs.tryread("phaseroots"))
1027
1032
1028 def recover(self):
1033 def recover(self):
1029 lock = self.lock()
1034 lock = self.lock()
1030 try:
1035 try:
1031 if self.svfs.exists("journal"):
1036 if self.svfs.exists("journal"):
1032 self.ui.status(_("rolling back interrupted transaction\n"))
1037 self.ui.status(_("rolling back interrupted transaction\n"))
1033 vfsmap = {'': self.svfs,
1038 vfsmap = {'': self.svfs,
1034 'plain': self.vfs,}
1039 'plain': self.vfs,}
1035 transaction.rollback(self.svfs, vfsmap, "journal",
1040 transaction.rollback(self.svfs, vfsmap, "journal",
1036 self.ui.warn)
1041 self.ui.warn)
1037 self.invalidate()
1042 self.invalidate()
1038 return True
1043 return True
1039 else:
1044 else:
1040 self.ui.warn(_("no interrupted transaction available\n"))
1045 self.ui.warn(_("no interrupted transaction available\n"))
1041 return False
1046 return False
1042 finally:
1047 finally:
1043 lock.release()
1048 lock.release()
1044
1049
1045 def rollback(self, dryrun=False, force=False):
1050 def rollback(self, dryrun=False, force=False):
1046 wlock = lock = None
1051 wlock = lock = None
1047 try:
1052 try:
1048 wlock = self.wlock()
1053 wlock = self.wlock()
1049 lock = self.lock()
1054 lock = self.lock()
1050 if self.svfs.exists("undo"):
1055 if self.svfs.exists("undo"):
1051 return self._rollback(dryrun, force)
1056 return self._rollback(dryrun, force)
1052 else:
1057 else:
1053 self.ui.warn(_("no rollback information available\n"))
1058 self.ui.warn(_("no rollback information available\n"))
1054 return 1
1059 return 1
1055 finally:
1060 finally:
1056 release(lock, wlock)
1061 release(lock, wlock)
1057
1062
1058 @unfilteredmethod # Until we get smarter cache management
1063 @unfilteredmethod # Until we get smarter cache management
1059 def _rollback(self, dryrun, force):
1064 def _rollback(self, dryrun, force):
1060 ui = self.ui
1065 ui = self.ui
1061 try:
1066 try:
1062 args = self.vfs.read('undo.desc').splitlines()
1067 args = self.vfs.read('undo.desc').splitlines()
1063 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1068 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1064 if len(args) >= 3:
1069 if len(args) >= 3:
1065 detail = args[2]
1070 detail = args[2]
1066 oldtip = oldlen - 1
1071 oldtip = oldlen - 1
1067
1072
1068 if detail and ui.verbose:
1073 if detail and ui.verbose:
1069 msg = (_('repository tip rolled back to revision %s'
1074 msg = (_('repository tip rolled back to revision %s'
1070 ' (undo %s: %s)\n')
1075 ' (undo %s: %s)\n')
1071 % (oldtip, desc, detail))
1076 % (oldtip, desc, detail))
1072 else:
1077 else:
1073 msg = (_('repository tip rolled back to revision %s'
1078 msg = (_('repository tip rolled back to revision %s'
1074 ' (undo %s)\n')
1079 ' (undo %s)\n')
1075 % (oldtip, desc))
1080 % (oldtip, desc))
1076 except IOError:
1081 except IOError:
1077 msg = _('rolling back unknown transaction\n')
1082 msg = _('rolling back unknown transaction\n')
1078 desc = None
1083 desc = None
1079
1084
1080 if not force and self['.'] != self['tip'] and desc == 'commit':
1085 if not force and self['.'] != self['tip'] and desc == 'commit':
1081 raise util.Abort(
1086 raise util.Abort(
1082 _('rollback of last commit while not checked out '
1087 _('rollback of last commit while not checked out '
1083 'may lose data'), hint=_('use -f to force'))
1088 'may lose data'), hint=_('use -f to force'))
1084
1089
1085 ui.status(msg)
1090 ui.status(msg)
1086 if dryrun:
1091 if dryrun:
1087 return 0
1092 return 0
1088
1093
1089 parents = self.dirstate.parents()
1094 parents = self.dirstate.parents()
1090 self.destroying()
1095 self.destroying()
1091 vfsmap = {'plain': self.vfs, '': self.svfs}
1096 vfsmap = {'plain': self.vfs, '': self.svfs}
1092 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1097 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1093 if self.vfs.exists('undo.bookmarks'):
1098 if self.vfs.exists('undo.bookmarks'):
1094 self.vfs.rename('undo.bookmarks', 'bookmarks')
1099 self.vfs.rename('undo.bookmarks', 'bookmarks')
1095 if self.svfs.exists('undo.phaseroots'):
1100 if self.svfs.exists('undo.phaseroots'):
1096 self.svfs.rename('undo.phaseroots', 'phaseroots')
1101 self.svfs.rename('undo.phaseroots', 'phaseroots')
1097 self.invalidate()
1102 self.invalidate()
1098
1103
1099 parentgone = (parents[0] not in self.changelog.nodemap or
1104 parentgone = (parents[0] not in self.changelog.nodemap or
1100 parents[1] not in self.changelog.nodemap)
1105 parents[1] not in self.changelog.nodemap)
1101 if parentgone:
1106 if parentgone:
1102 self.vfs.rename('undo.dirstate', 'dirstate')
1107 self.vfs.rename('undo.dirstate', 'dirstate')
1103 try:
1108 try:
1104 branch = self.vfs.read('undo.branch')
1109 branch = self.vfs.read('undo.branch')
1105 self.dirstate.setbranch(encoding.tolocal(branch))
1110 self.dirstate.setbranch(encoding.tolocal(branch))
1106 except IOError:
1111 except IOError:
1107 ui.warn(_('named branch could not be reset: '
1112 ui.warn(_('named branch could not be reset: '
1108 'current branch is still \'%s\'\n')
1113 'current branch is still \'%s\'\n')
1109 % self.dirstate.branch())
1114 % self.dirstate.branch())
1110
1115
1111 self.dirstate.invalidate()
1116 self.dirstate.invalidate()
1112 parents = tuple([p.rev() for p in self.parents()])
1117 parents = tuple([p.rev() for p in self.parents()])
1113 if len(parents) > 1:
1118 if len(parents) > 1:
1114 ui.status(_('working directory now based on '
1119 ui.status(_('working directory now based on '
1115 'revisions %d and %d\n') % parents)
1120 'revisions %d and %d\n') % parents)
1116 else:
1121 else:
1117 ui.status(_('working directory now based on '
1122 ui.status(_('working directory now based on '
1118 'revision %d\n') % parents)
1123 'revision %d\n') % parents)
1119 ms = mergemod.mergestate(self)
1124 ms = mergemod.mergestate(self)
1120 ms.reset(self['.'].node())
1125 ms.reset(self['.'].node())
1121
1126
1122 # TODO: if we know which new heads may result from this rollback, pass
1127 # TODO: if we know which new heads may result from this rollback, pass
1123 # them to destroy(), which will prevent the branchhead cache from being
1128 # them to destroy(), which will prevent the branchhead cache from being
1124 # invalidated.
1129 # invalidated.
1125 self.destroyed()
1130 self.destroyed()
1126 return 0
1131 return 0
1127
1132
1128 def invalidatecaches(self):
1133 def invalidatecaches(self):
1129
1134
1130 if '_tagscache' in vars(self):
1135 if '_tagscache' in vars(self):
1131 # can't use delattr on proxy
1136 # can't use delattr on proxy
1132 del self.__dict__['_tagscache']
1137 del self.__dict__['_tagscache']
1133
1138
1134 self.unfiltered()._branchcaches.clear()
1139 self.unfiltered()._branchcaches.clear()
1135 self.invalidatevolatilesets()
1140 self.invalidatevolatilesets()
1136
1141
1137 def invalidatevolatilesets(self):
1142 def invalidatevolatilesets(self):
1138 self.filteredrevcache.clear()
1143 self.filteredrevcache.clear()
1139 obsolete.clearobscaches(self)
1144 obsolete.clearobscaches(self)
1140
1145
1141 def invalidatedirstate(self):
1146 def invalidatedirstate(self):
1142 '''Invalidates the dirstate, causing the next call to dirstate
1147 '''Invalidates the dirstate, causing the next call to dirstate
1143 to check if it was modified since the last time it was read,
1148 to check if it was modified since the last time it was read,
1144 rereading it if it has.
1149 rereading it if it has.
1145
1150
1146 This is different to dirstate.invalidate() that it doesn't always
1151 This is different to dirstate.invalidate() that it doesn't always
1147 rereads the dirstate. Use dirstate.invalidate() if you want to
1152 rereads the dirstate. Use dirstate.invalidate() if you want to
1148 explicitly read the dirstate again (i.e. restoring it to a previous
1153 explicitly read the dirstate again (i.e. restoring it to a previous
1149 known good state).'''
1154 known good state).'''
1150 if hasunfilteredcache(self, 'dirstate'):
1155 if hasunfilteredcache(self, 'dirstate'):
1151 for k in self.dirstate._filecache:
1156 for k in self.dirstate._filecache:
1152 try:
1157 try:
1153 delattr(self.dirstate, k)
1158 delattr(self.dirstate, k)
1154 except AttributeError:
1159 except AttributeError:
1155 pass
1160 pass
1156 delattr(self.unfiltered(), 'dirstate')
1161 delattr(self.unfiltered(), 'dirstate')
1157
1162
1158 def invalidate(self):
1163 def invalidate(self):
1159 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1164 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1160 for k in self._filecache:
1165 for k in self._filecache:
1161 # dirstate is invalidated separately in invalidatedirstate()
1166 # dirstate is invalidated separately in invalidatedirstate()
1162 if k == 'dirstate':
1167 if k == 'dirstate':
1163 continue
1168 continue
1164
1169
1165 try:
1170 try:
1166 delattr(unfiltered, k)
1171 delattr(unfiltered, k)
1167 except AttributeError:
1172 except AttributeError:
1168 pass
1173 pass
1169 self.invalidatecaches()
1174 self.invalidatecaches()
1170 self.store.invalidatecaches()
1175 self.store.invalidatecaches()
1171
1176
1172 def invalidateall(self):
1177 def invalidateall(self):
1173 '''Fully invalidates both store and non-store parts, causing the
1178 '''Fully invalidates both store and non-store parts, causing the
1174 subsequent operation to reread any outside changes.'''
1179 subsequent operation to reread any outside changes.'''
1175 # extension should hook this to invalidate its caches
1180 # extension should hook this to invalidate its caches
1176 self.invalidate()
1181 self.invalidate()
1177 self.invalidatedirstate()
1182 self.invalidatedirstate()
1178
1183
1179 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1184 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1180 try:
1185 try:
1181 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1186 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1182 except error.LockHeld, inst:
1187 except error.LockHeld, inst:
1183 if not wait:
1188 if not wait:
1184 raise
1189 raise
1185 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1190 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1186 (desc, inst.locker))
1191 (desc, inst.locker))
1187 # default to 600 seconds timeout
1192 # default to 600 seconds timeout
1188 l = lockmod.lock(vfs, lockname,
1193 l = lockmod.lock(vfs, lockname,
1189 int(self.ui.config("ui", "timeout", "600")),
1194 int(self.ui.config("ui", "timeout", "600")),
1190 releasefn, desc=desc)
1195 releasefn, desc=desc)
1191 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1196 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1192 if acquirefn:
1197 if acquirefn:
1193 acquirefn()
1198 acquirefn()
1194 return l
1199 return l
1195
1200
1196 def _afterlock(self, callback):
1201 def _afterlock(self, callback):
1197 """add a callback to be run when the repository is fully unlocked
1202 """add a callback to be run when the repository is fully unlocked
1198
1203
1199 The callback will be executed when the outermost lock is released
1204 The callback will be executed when the outermost lock is released
1200 (with wlock being higher level than 'lock')."""
1205 (with wlock being higher level than 'lock')."""
1201 for ref in (self._wlockref, self._lockref):
1206 for ref in (self._wlockref, self._lockref):
1202 l = ref and ref()
1207 l = ref and ref()
1203 if l and l.held:
1208 if l and l.held:
1204 l.postrelease.append(callback)
1209 l.postrelease.append(callback)
1205 break
1210 break
1206 else: # no lock have been found.
1211 else: # no lock have been found.
1207 callback()
1212 callback()
1208
1213
1209 def lock(self, wait=True):
1214 def lock(self, wait=True):
1210 '''Lock the repository store (.hg/store) and return a weak reference
1215 '''Lock the repository store (.hg/store) and return a weak reference
1211 to the lock. Use this before modifying the store (e.g. committing or
1216 to the lock. Use this before modifying the store (e.g. committing or
1212 stripping). If you are opening a transaction, get a lock as well.)
1217 stripping). If you are opening a transaction, get a lock as well.)
1213
1218
1214 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1219 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1215 'wlock' first to avoid a dead-lock hazard.'''
1220 'wlock' first to avoid a dead-lock hazard.'''
1216 l = self._lockref and self._lockref()
1221 l = self._lockref and self._lockref()
1217 if l is not None and l.held:
1222 if l is not None and l.held:
1218 l.lock()
1223 l.lock()
1219 return l
1224 return l
1220
1225
1221 def unlock():
1226 def unlock():
1222 for k, ce in self._filecache.items():
1227 for k, ce in self._filecache.items():
1223 if k == 'dirstate' or k not in self.__dict__:
1228 if k == 'dirstate' or k not in self.__dict__:
1224 continue
1229 continue
1225 ce.refresh()
1230 ce.refresh()
1226
1231
1227 l = self._lock(self.svfs, "lock", wait, unlock,
1232 l = self._lock(self.svfs, "lock", wait, unlock,
1228 self.invalidate, _('repository %s') % self.origroot)
1233 self.invalidate, _('repository %s') % self.origroot)
1229 self._lockref = weakref.ref(l)
1234 self._lockref = weakref.ref(l)
1230 return l
1235 return l
1231
1236
1232 def wlock(self, wait=True):
1237 def wlock(self, wait=True):
1233 '''Lock the non-store parts of the repository (everything under
1238 '''Lock the non-store parts of the repository (everything under
1234 .hg except .hg/store) and return a weak reference to the lock.
1239 .hg except .hg/store) and return a weak reference to the lock.
1235
1240
1236 Use this before modifying files in .hg.
1241 Use this before modifying files in .hg.
1237
1242
1238 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1243 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1239 'wlock' first to avoid a dead-lock hazard.'''
1244 'wlock' first to avoid a dead-lock hazard.'''
1240 l = self._wlockref and self._wlockref()
1245 l = self._wlockref and self._wlockref()
1241 if l is not None and l.held:
1246 if l is not None and l.held:
1242 l.lock()
1247 l.lock()
1243 return l
1248 return l
1244
1249
1245 # We do not need to check for non-waiting lock aquisition. Such
1250 # We do not need to check for non-waiting lock aquisition. Such
1246 # acquisition would not cause dead-lock as they would just fail.
1251 # acquisition would not cause dead-lock as they would just fail.
1247 if wait and (self.ui.configbool('devel', 'all')
1252 if wait and (self.ui.configbool('devel', 'all')
1248 or self.ui.configbool('devel', 'check-locks')):
1253 or self.ui.configbool('devel', 'check-locks')):
1249 l = self._lockref and self._lockref()
1254 l = self._lockref and self._lockref()
1250 if l is not None and l.held:
1255 if l is not None and l.held:
1251 scmutil.develwarn(self.ui, '"wlock" acquired after "lock"')
1256 scmutil.develwarn(self.ui, '"wlock" acquired after "lock"')
1252
1257
1253 def unlock():
1258 def unlock():
1254 if self.dirstate.pendingparentchange():
1259 if self.dirstate.pendingparentchange():
1255 self.dirstate.invalidate()
1260 self.dirstate.invalidate()
1256 else:
1261 else:
1257 self.dirstate.write()
1262 self.dirstate.write()
1258
1263
1259 self._filecache['dirstate'].refresh()
1264 self._filecache['dirstate'].refresh()
1260
1265
1261 l = self._lock(self.vfs, "wlock", wait, unlock,
1266 l = self._lock(self.vfs, "wlock", wait, unlock,
1262 self.invalidatedirstate, _('working directory of %s') %
1267 self.invalidatedirstate, _('working directory of %s') %
1263 self.origroot)
1268 self.origroot)
1264 self._wlockref = weakref.ref(l)
1269 self._wlockref = weakref.ref(l)
1265 return l
1270 return l
1266
1271
1267 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1272 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1268 """
1273 """
1269 commit an individual file as part of a larger transaction
1274 commit an individual file as part of a larger transaction
1270 """
1275 """
1271
1276
1272 fname = fctx.path()
1277 fname = fctx.path()
1273 fparent1 = manifest1.get(fname, nullid)
1278 fparent1 = manifest1.get(fname, nullid)
1274 fparent2 = manifest2.get(fname, nullid)
1279 fparent2 = manifest2.get(fname, nullid)
1275 if isinstance(fctx, context.filectx):
1280 if isinstance(fctx, context.filectx):
1276 node = fctx.filenode()
1281 node = fctx.filenode()
1277 if node in [fparent1, fparent2]:
1282 if node in [fparent1, fparent2]:
1278 self.ui.debug('reusing %s filelog entry\n' % fname)
1283 self.ui.debug('reusing %s filelog entry\n' % fname)
1279 return node
1284 return node
1280
1285
1281 flog = self.file(fname)
1286 flog = self.file(fname)
1282 meta = {}
1287 meta = {}
1283 copy = fctx.renamed()
1288 copy = fctx.renamed()
1284 if copy and copy[0] != fname:
1289 if copy and copy[0] != fname:
1285 # Mark the new revision of this file as a copy of another
1290 # Mark the new revision of this file as a copy of another
1286 # file. This copy data will effectively act as a parent
1291 # file. This copy data will effectively act as a parent
1287 # of this new revision. If this is a merge, the first
1292 # of this new revision. If this is a merge, the first
1288 # parent will be the nullid (meaning "look up the copy data")
1293 # parent will be the nullid (meaning "look up the copy data")
1289 # and the second one will be the other parent. For example:
1294 # and the second one will be the other parent. For example:
1290 #
1295 #
1291 # 0 --- 1 --- 3 rev1 changes file foo
1296 # 0 --- 1 --- 3 rev1 changes file foo
1292 # \ / rev2 renames foo to bar and changes it
1297 # \ / rev2 renames foo to bar and changes it
1293 # \- 2 -/ rev3 should have bar with all changes and
1298 # \- 2 -/ rev3 should have bar with all changes and
1294 # should record that bar descends from
1299 # should record that bar descends from
1295 # bar in rev2 and foo in rev1
1300 # bar in rev2 and foo in rev1
1296 #
1301 #
1297 # this allows this merge to succeed:
1302 # this allows this merge to succeed:
1298 #
1303 #
1299 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1304 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1300 # \ / merging rev3 and rev4 should use bar@rev2
1305 # \ / merging rev3 and rev4 should use bar@rev2
1301 # \- 2 --- 4 as the merge base
1306 # \- 2 --- 4 as the merge base
1302 #
1307 #
1303
1308
1304 cfname = copy[0]
1309 cfname = copy[0]
1305 crev = manifest1.get(cfname)
1310 crev = manifest1.get(cfname)
1306 newfparent = fparent2
1311 newfparent = fparent2
1307
1312
1308 if manifest2: # branch merge
1313 if manifest2: # branch merge
1309 if fparent2 == nullid or crev is None: # copied on remote side
1314 if fparent2 == nullid or crev is None: # copied on remote side
1310 if cfname in manifest2:
1315 if cfname in manifest2:
1311 crev = manifest2[cfname]
1316 crev = manifest2[cfname]
1312 newfparent = fparent1
1317 newfparent = fparent1
1313
1318
1314 # Here, we used to search backwards through history to try to find
1319 # Here, we used to search backwards through history to try to find
1315 # where the file copy came from if the source of a copy was not in
1320 # where the file copy came from if the source of a copy was not in
1316 # the parent directory. However, this doesn't actually make sense to
1321 # the parent directory. However, this doesn't actually make sense to
1317 # do (what does a copy from something not in your working copy even
1322 # do (what does a copy from something not in your working copy even
1318 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1323 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1319 # the user that copy information was dropped, so if they didn't
1324 # the user that copy information was dropped, so if they didn't
1320 # expect this outcome it can be fixed, but this is the correct
1325 # expect this outcome it can be fixed, but this is the correct
1321 # behavior in this circumstance.
1326 # behavior in this circumstance.
1322
1327
1323 if crev:
1328 if crev:
1324 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1329 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1325 meta["copy"] = cfname
1330 meta["copy"] = cfname
1326 meta["copyrev"] = hex(crev)
1331 meta["copyrev"] = hex(crev)
1327 fparent1, fparent2 = nullid, newfparent
1332 fparent1, fparent2 = nullid, newfparent
1328 else:
1333 else:
1329 self.ui.warn(_("warning: can't find ancestor for '%s' "
1334 self.ui.warn(_("warning: can't find ancestor for '%s' "
1330 "copied from '%s'!\n") % (fname, cfname))
1335 "copied from '%s'!\n") % (fname, cfname))
1331
1336
1332 elif fparent1 == nullid:
1337 elif fparent1 == nullid:
1333 fparent1, fparent2 = fparent2, nullid
1338 fparent1, fparent2 = fparent2, nullid
1334 elif fparent2 != nullid:
1339 elif fparent2 != nullid:
1335 # is one parent an ancestor of the other?
1340 # is one parent an ancestor of the other?
1336 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1341 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1337 if fparent1 in fparentancestors:
1342 if fparent1 in fparentancestors:
1338 fparent1, fparent2 = fparent2, nullid
1343 fparent1, fparent2 = fparent2, nullid
1339 elif fparent2 in fparentancestors:
1344 elif fparent2 in fparentancestors:
1340 fparent2 = nullid
1345 fparent2 = nullid
1341
1346
1342 # is the file changed?
1347 # is the file changed?
1343 text = fctx.data()
1348 text = fctx.data()
1344 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1349 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1345 changelist.append(fname)
1350 changelist.append(fname)
1346 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1351 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1347 # are just the flags changed during merge?
1352 # are just the flags changed during merge?
1348 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1353 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1349 changelist.append(fname)
1354 changelist.append(fname)
1350
1355
1351 return fparent1
1356 return fparent1
1352
1357
1353 @unfilteredmethod
1358 @unfilteredmethod
1354 def commit(self, text="", user=None, date=None, match=None, force=False,
1359 def commit(self, text="", user=None, date=None, match=None, force=False,
1355 editor=False, extra={}):
1360 editor=False, extra={}):
1356 """Add a new revision to current repository.
1361 """Add a new revision to current repository.
1357
1362
1358 Revision information is gathered from the working directory,
1363 Revision information is gathered from the working directory,
1359 match can be used to filter the committed files. If editor is
1364 match can be used to filter the committed files. If editor is
1360 supplied, it is called to get a commit message.
1365 supplied, it is called to get a commit message.
1361 """
1366 """
1362
1367
1363 def fail(f, msg):
1368 def fail(f, msg):
1364 raise util.Abort('%s: %s' % (f, msg))
1369 raise util.Abort('%s: %s' % (f, msg))
1365
1370
1366 if not match:
1371 if not match:
1367 match = matchmod.always(self.root, '')
1372 match = matchmod.always(self.root, '')
1368
1373
1369 if not force:
1374 if not force:
1370 vdirs = []
1375 vdirs = []
1371 match.explicitdir = vdirs.append
1376 match.explicitdir = vdirs.append
1372 match.bad = fail
1377 match.bad = fail
1373
1378
1374 wlock = self.wlock()
1379 wlock = self.wlock()
1375 try:
1380 try:
1376 wctx = self[None]
1381 wctx = self[None]
1377 merge = len(wctx.parents()) > 1
1382 merge = len(wctx.parents()) > 1
1378
1383
1379 if not force and merge and not match.always():
1384 if not force and merge and not match.always():
1380 raise util.Abort(_('cannot partially commit a merge '
1385 raise util.Abort(_('cannot partially commit a merge '
1381 '(do not specify files or patterns)'))
1386 '(do not specify files or patterns)'))
1382
1387
1383 status = self.status(match=match, clean=force)
1388 status = self.status(match=match, clean=force)
1384 if force:
1389 if force:
1385 status.modified.extend(status.clean) # mq may commit clean files
1390 status.modified.extend(status.clean) # mq may commit clean files
1386
1391
1387 # check subrepos
1392 # check subrepos
1388 subs = []
1393 subs = []
1389 commitsubs = set()
1394 commitsubs = set()
1390 newstate = wctx.substate.copy()
1395 newstate = wctx.substate.copy()
1391 # only manage subrepos and .hgsubstate if .hgsub is present
1396 # only manage subrepos and .hgsubstate if .hgsub is present
1392 if '.hgsub' in wctx:
1397 if '.hgsub' in wctx:
1393 # we'll decide whether to track this ourselves, thanks
1398 # we'll decide whether to track this ourselves, thanks
1394 for c in status.modified, status.added, status.removed:
1399 for c in status.modified, status.added, status.removed:
1395 if '.hgsubstate' in c:
1400 if '.hgsubstate' in c:
1396 c.remove('.hgsubstate')
1401 c.remove('.hgsubstate')
1397
1402
1398 # compare current state to last committed state
1403 # compare current state to last committed state
1399 # build new substate based on last committed state
1404 # build new substate based on last committed state
1400 oldstate = wctx.p1().substate
1405 oldstate = wctx.p1().substate
1401 for s in sorted(newstate.keys()):
1406 for s in sorted(newstate.keys()):
1402 if not match(s):
1407 if not match(s):
1403 # ignore working copy, use old state if present
1408 # ignore working copy, use old state if present
1404 if s in oldstate:
1409 if s in oldstate:
1405 newstate[s] = oldstate[s]
1410 newstate[s] = oldstate[s]
1406 continue
1411 continue
1407 if not force:
1412 if not force:
1408 raise util.Abort(
1413 raise util.Abort(
1409 _("commit with new subrepo %s excluded") % s)
1414 _("commit with new subrepo %s excluded") % s)
1410 dirtyreason = wctx.sub(s).dirtyreason(True)
1415 dirtyreason = wctx.sub(s).dirtyreason(True)
1411 if dirtyreason:
1416 if dirtyreason:
1412 if not self.ui.configbool('ui', 'commitsubrepos'):
1417 if not self.ui.configbool('ui', 'commitsubrepos'):
1413 raise util.Abort(dirtyreason,
1418 raise util.Abort(dirtyreason,
1414 hint=_("use --subrepos for recursive commit"))
1419 hint=_("use --subrepos for recursive commit"))
1415 subs.append(s)
1420 subs.append(s)
1416 commitsubs.add(s)
1421 commitsubs.add(s)
1417 else:
1422 else:
1418 bs = wctx.sub(s).basestate()
1423 bs = wctx.sub(s).basestate()
1419 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1424 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1420 if oldstate.get(s, (None, None, None))[1] != bs:
1425 if oldstate.get(s, (None, None, None))[1] != bs:
1421 subs.append(s)
1426 subs.append(s)
1422
1427
1423 # check for removed subrepos
1428 # check for removed subrepos
1424 for p in wctx.parents():
1429 for p in wctx.parents():
1425 r = [s for s in p.substate if s not in newstate]
1430 r = [s for s in p.substate if s not in newstate]
1426 subs += [s for s in r if match(s)]
1431 subs += [s for s in r if match(s)]
1427 if subs:
1432 if subs:
1428 if (not match('.hgsub') and
1433 if (not match('.hgsub') and
1429 '.hgsub' in (wctx.modified() + wctx.added())):
1434 '.hgsub' in (wctx.modified() + wctx.added())):
1430 raise util.Abort(
1435 raise util.Abort(
1431 _("can't commit subrepos without .hgsub"))
1436 _("can't commit subrepos without .hgsub"))
1432 status.modified.insert(0, '.hgsubstate')
1437 status.modified.insert(0, '.hgsubstate')
1433
1438
1434 elif '.hgsub' in status.removed:
1439 elif '.hgsub' in status.removed:
1435 # clean up .hgsubstate when .hgsub is removed
1440 # clean up .hgsubstate when .hgsub is removed
1436 if ('.hgsubstate' in wctx and
1441 if ('.hgsubstate' in wctx and
1437 '.hgsubstate' not in (status.modified + status.added +
1442 '.hgsubstate' not in (status.modified + status.added +
1438 status.removed)):
1443 status.removed)):
1439 status.removed.insert(0, '.hgsubstate')
1444 status.removed.insert(0, '.hgsubstate')
1440
1445
1441 # make sure all explicit patterns are matched
1446 # make sure all explicit patterns are matched
1442 if not force and match.files():
1447 if not force and match.files():
1443 matched = set(status.modified + status.added + status.removed)
1448 matched = set(status.modified + status.added + status.removed)
1444
1449
1445 for f in match.files():
1450 for f in match.files():
1446 f = self.dirstate.normalize(f)
1451 f = self.dirstate.normalize(f)
1447 if f == '.' or f in matched or f in wctx.substate:
1452 if f == '.' or f in matched or f in wctx.substate:
1448 continue
1453 continue
1449 if f in status.deleted:
1454 if f in status.deleted:
1450 fail(f, _('file not found!'))
1455 fail(f, _('file not found!'))
1451 if f in vdirs: # visited directory
1456 if f in vdirs: # visited directory
1452 d = f + '/'
1457 d = f + '/'
1453 for mf in matched:
1458 for mf in matched:
1454 if mf.startswith(d):
1459 if mf.startswith(d):
1455 break
1460 break
1456 else:
1461 else:
1457 fail(f, _("no match under directory!"))
1462 fail(f, _("no match under directory!"))
1458 elif f not in self.dirstate:
1463 elif f not in self.dirstate:
1459 fail(f, _("file not tracked!"))
1464 fail(f, _("file not tracked!"))
1460
1465
1461 cctx = context.workingcommitctx(self, status,
1466 cctx = context.workingcommitctx(self, status,
1462 text, user, date, extra)
1467 text, user, date, extra)
1463
1468
1464 if (not force and not extra.get("close") and not merge
1469 if (not force and not extra.get("close") and not merge
1465 and not cctx.files()
1470 and not cctx.files()
1466 and wctx.branch() == wctx.p1().branch()):
1471 and wctx.branch() == wctx.p1().branch()):
1467 return None
1472 return None
1468
1473
1469 if merge and cctx.deleted():
1474 if merge and cctx.deleted():
1470 raise util.Abort(_("cannot commit merge with missing files"))
1475 raise util.Abort(_("cannot commit merge with missing files"))
1471
1476
1472 ms = mergemod.mergestate(self)
1477 ms = mergemod.mergestate(self)
1473 for f in status.modified:
1478 for f in status.modified:
1474 if f in ms and ms[f] == 'u':
1479 if f in ms and ms[f] == 'u':
1475 raise util.Abort(_('unresolved merge conflicts '
1480 raise util.Abort(_('unresolved merge conflicts '
1476 '(see "hg help resolve")'))
1481 '(see "hg help resolve")'))
1477
1482
1478 if editor:
1483 if editor:
1479 cctx._text = editor(self, cctx, subs)
1484 cctx._text = editor(self, cctx, subs)
1480 edited = (text != cctx._text)
1485 edited = (text != cctx._text)
1481
1486
1482 # Save commit message in case this transaction gets rolled back
1487 # Save commit message in case this transaction gets rolled back
1483 # (e.g. by a pretxncommit hook). Leave the content alone on
1488 # (e.g. by a pretxncommit hook). Leave the content alone on
1484 # the assumption that the user will use the same editor again.
1489 # the assumption that the user will use the same editor again.
1485 msgfn = self.savecommitmessage(cctx._text)
1490 msgfn = self.savecommitmessage(cctx._text)
1486
1491
1487 # commit subs and write new state
1492 # commit subs and write new state
1488 if subs:
1493 if subs:
1489 for s in sorted(commitsubs):
1494 for s in sorted(commitsubs):
1490 sub = wctx.sub(s)
1495 sub = wctx.sub(s)
1491 self.ui.status(_('committing subrepository %s\n') %
1496 self.ui.status(_('committing subrepository %s\n') %
1492 subrepo.subrelpath(sub))
1497 subrepo.subrelpath(sub))
1493 sr = sub.commit(cctx._text, user, date)
1498 sr = sub.commit(cctx._text, user, date)
1494 newstate[s] = (newstate[s][0], sr)
1499 newstate[s] = (newstate[s][0], sr)
1495 subrepo.writestate(self, newstate)
1500 subrepo.writestate(self, newstate)
1496
1501
1497 p1, p2 = self.dirstate.parents()
1502 p1, p2 = self.dirstate.parents()
1498 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1503 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1499 try:
1504 try:
1500 self.hook("precommit", throw=True, parent1=hookp1,
1505 self.hook("precommit", throw=True, parent1=hookp1,
1501 parent2=hookp2)
1506 parent2=hookp2)
1502 ret = self.commitctx(cctx, True)
1507 ret = self.commitctx(cctx, True)
1503 except: # re-raises
1508 except: # re-raises
1504 if edited:
1509 if edited:
1505 self.ui.write(
1510 self.ui.write(
1506 _('note: commit message saved in %s\n') % msgfn)
1511 _('note: commit message saved in %s\n') % msgfn)
1507 raise
1512 raise
1508
1513
1509 # update bookmarks, dirstate and mergestate
1514 # update bookmarks, dirstate and mergestate
1510 bookmarks.update(self, [p1, p2], ret)
1515 bookmarks.update(self, [p1, p2], ret)
1511 cctx.markcommitted(ret)
1516 cctx.markcommitted(ret)
1512 ms.reset()
1517 ms.reset()
1513 finally:
1518 finally:
1514 wlock.release()
1519 wlock.release()
1515
1520
1516 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1521 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1517 # hack for command that use a temporary commit (eg: histedit)
1522 # hack for command that use a temporary commit (eg: histedit)
1518 # temporary commit got stripped before hook release
1523 # temporary commit got stripped before hook release
1519 if node in self:
1524 if node in self:
1520 self.hook("commit", node=node, parent1=parent1,
1525 self.hook("commit", node=node, parent1=parent1,
1521 parent2=parent2)
1526 parent2=parent2)
1522 self._afterlock(commithook)
1527 self._afterlock(commithook)
1523 return ret
1528 return ret
1524
1529
1525 @unfilteredmethod
1530 @unfilteredmethod
1526 def commitctx(self, ctx, error=False):
1531 def commitctx(self, ctx, error=False):
1527 """Add a new revision to current repository.
1532 """Add a new revision to current repository.
1528 Revision information is passed via the context argument.
1533 Revision information is passed via the context argument.
1529 """
1534 """
1530
1535
1531 tr = None
1536 tr = None
1532 p1, p2 = ctx.p1(), ctx.p2()
1537 p1, p2 = ctx.p1(), ctx.p2()
1533 user = ctx.user()
1538 user = ctx.user()
1534
1539
1535 lock = self.lock()
1540 lock = self.lock()
1536 try:
1541 try:
1537 tr = self.transaction("commit")
1542 tr = self.transaction("commit")
1538 trp = weakref.proxy(tr)
1543 trp = weakref.proxy(tr)
1539
1544
1540 if ctx.files():
1545 if ctx.files():
1541 m1 = p1.manifest()
1546 m1 = p1.manifest()
1542 m2 = p2.manifest()
1547 m2 = p2.manifest()
1543 m = m1.copy()
1548 m = m1.copy()
1544
1549
1545 # check in files
1550 # check in files
1546 added = []
1551 added = []
1547 changed = []
1552 changed = []
1548 removed = list(ctx.removed())
1553 removed = list(ctx.removed())
1549 linkrev = len(self)
1554 linkrev = len(self)
1550 self.ui.note(_("committing files:\n"))
1555 self.ui.note(_("committing files:\n"))
1551 for f in sorted(ctx.modified() + ctx.added()):
1556 for f in sorted(ctx.modified() + ctx.added()):
1552 self.ui.note(f + "\n")
1557 self.ui.note(f + "\n")
1553 try:
1558 try:
1554 fctx = ctx[f]
1559 fctx = ctx[f]
1555 if fctx is None:
1560 if fctx is None:
1556 removed.append(f)
1561 removed.append(f)
1557 else:
1562 else:
1558 added.append(f)
1563 added.append(f)
1559 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1564 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1560 trp, changed)
1565 trp, changed)
1561 m.setflag(f, fctx.flags())
1566 m.setflag(f, fctx.flags())
1562 except OSError, inst:
1567 except OSError, inst:
1563 self.ui.warn(_("trouble committing %s!\n") % f)
1568 self.ui.warn(_("trouble committing %s!\n") % f)
1564 raise
1569 raise
1565 except IOError, inst:
1570 except IOError, inst:
1566 errcode = getattr(inst, 'errno', errno.ENOENT)
1571 errcode = getattr(inst, 'errno', errno.ENOENT)
1567 if error or errcode and errcode != errno.ENOENT:
1572 if error or errcode and errcode != errno.ENOENT:
1568 self.ui.warn(_("trouble committing %s!\n") % f)
1573 self.ui.warn(_("trouble committing %s!\n") % f)
1569 raise
1574 raise
1570
1575
1571 # update manifest
1576 # update manifest
1572 self.ui.note(_("committing manifest\n"))
1577 self.ui.note(_("committing manifest\n"))
1573 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1578 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1574 drop = [f for f in removed if f in m]
1579 drop = [f for f in removed if f in m]
1575 for f in drop:
1580 for f in drop:
1576 del m[f]
1581 del m[f]
1577 mn = self.manifest.add(m, trp, linkrev,
1582 mn = self.manifest.add(m, trp, linkrev,
1578 p1.manifestnode(), p2.manifestnode(),
1583 p1.manifestnode(), p2.manifestnode(),
1579 added, drop)
1584 added, drop)
1580 files = changed + removed
1585 files = changed + removed
1581 else:
1586 else:
1582 mn = p1.manifestnode()
1587 mn = p1.manifestnode()
1583 files = []
1588 files = []
1584
1589
1585 # update changelog
1590 # update changelog
1586 self.ui.note(_("committing changelog\n"))
1591 self.ui.note(_("committing changelog\n"))
1587 self.changelog.delayupdate(tr)
1592 self.changelog.delayupdate(tr)
1588 n = self.changelog.add(mn, files, ctx.description(),
1593 n = self.changelog.add(mn, files, ctx.description(),
1589 trp, p1.node(), p2.node(),
1594 trp, p1.node(), p2.node(),
1590 user, ctx.date(), ctx.extra().copy())
1595 user, ctx.date(), ctx.extra().copy())
1591 p = lambda: tr.writepending() and self.root or ""
1596 p = lambda: tr.writepending() and self.root or ""
1592 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1597 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1593 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1598 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1594 parent2=xp2, pending=p)
1599 parent2=xp2, pending=p)
1595 # set the new commit is proper phase
1600 # set the new commit is proper phase
1596 targetphase = subrepo.newcommitphase(self.ui, ctx)
1601 targetphase = subrepo.newcommitphase(self.ui, ctx)
1597 if targetphase:
1602 if targetphase:
1598 # retract boundary do not alter parent changeset.
1603 # retract boundary do not alter parent changeset.
1599 # if a parent have higher the resulting phase will
1604 # if a parent have higher the resulting phase will
1600 # be compliant anyway
1605 # be compliant anyway
1601 #
1606 #
1602 # if minimal phase was 0 we don't need to retract anything
1607 # if minimal phase was 0 we don't need to retract anything
1603 phases.retractboundary(self, tr, targetphase, [n])
1608 phases.retractboundary(self, tr, targetphase, [n])
1604 tr.close()
1609 tr.close()
1605 branchmap.updatecache(self.filtered('served'))
1610 branchmap.updatecache(self.filtered('served'))
1606 return n
1611 return n
1607 finally:
1612 finally:
1608 if tr:
1613 if tr:
1609 tr.release()
1614 tr.release()
1610 lock.release()
1615 lock.release()
1611
1616
1612 @unfilteredmethod
1617 @unfilteredmethod
1613 def destroying(self):
1618 def destroying(self):
1614 '''Inform the repository that nodes are about to be destroyed.
1619 '''Inform the repository that nodes are about to be destroyed.
1615 Intended for use by strip and rollback, so there's a common
1620 Intended for use by strip and rollback, so there's a common
1616 place for anything that has to be done before destroying history.
1621 place for anything that has to be done before destroying history.
1617
1622
1618 This is mostly useful for saving state that is in memory and waiting
1623 This is mostly useful for saving state that is in memory and waiting
1619 to be flushed when the current lock is released. Because a call to
1624 to be flushed when the current lock is released. Because a call to
1620 destroyed is imminent, the repo will be invalidated causing those
1625 destroyed is imminent, the repo will be invalidated causing those
1621 changes to stay in memory (waiting for the next unlock), or vanish
1626 changes to stay in memory (waiting for the next unlock), or vanish
1622 completely.
1627 completely.
1623 '''
1628 '''
1624 # When using the same lock to commit and strip, the phasecache is left
1629 # When using the same lock to commit and strip, the phasecache is left
1625 # dirty after committing. Then when we strip, the repo is invalidated,
1630 # dirty after committing. Then when we strip, the repo is invalidated,
1626 # causing those changes to disappear.
1631 # causing those changes to disappear.
1627 if '_phasecache' in vars(self):
1632 if '_phasecache' in vars(self):
1628 self._phasecache.write()
1633 self._phasecache.write()
1629
1634
1630 @unfilteredmethod
1635 @unfilteredmethod
1631 def destroyed(self):
1636 def destroyed(self):
1632 '''Inform the repository that nodes have been destroyed.
1637 '''Inform the repository that nodes have been destroyed.
1633 Intended for use by strip and rollback, so there's a common
1638 Intended for use by strip and rollback, so there's a common
1634 place for anything that has to be done after destroying history.
1639 place for anything that has to be done after destroying history.
1635 '''
1640 '''
1636 # When one tries to:
1641 # When one tries to:
1637 # 1) destroy nodes thus calling this method (e.g. strip)
1642 # 1) destroy nodes thus calling this method (e.g. strip)
1638 # 2) use phasecache somewhere (e.g. commit)
1643 # 2) use phasecache somewhere (e.g. commit)
1639 #
1644 #
1640 # then 2) will fail because the phasecache contains nodes that were
1645 # then 2) will fail because the phasecache contains nodes that were
1641 # removed. We can either remove phasecache from the filecache,
1646 # removed. We can either remove phasecache from the filecache,
1642 # causing it to reload next time it is accessed, or simply filter
1647 # causing it to reload next time it is accessed, or simply filter
1643 # the removed nodes now and write the updated cache.
1648 # the removed nodes now and write the updated cache.
1644 self._phasecache.filterunknown(self)
1649 self._phasecache.filterunknown(self)
1645 self._phasecache.write()
1650 self._phasecache.write()
1646
1651
1647 # update the 'served' branch cache to help read only server process
1652 # update the 'served' branch cache to help read only server process
1648 # Thanks to branchcache collaboration this is done from the nearest
1653 # Thanks to branchcache collaboration this is done from the nearest
1649 # filtered subset and it is expected to be fast.
1654 # filtered subset and it is expected to be fast.
1650 branchmap.updatecache(self.filtered('served'))
1655 branchmap.updatecache(self.filtered('served'))
1651
1656
1652 # Ensure the persistent tag cache is updated. Doing it now
1657 # Ensure the persistent tag cache is updated. Doing it now
1653 # means that the tag cache only has to worry about destroyed
1658 # means that the tag cache only has to worry about destroyed
1654 # heads immediately after a strip/rollback. That in turn
1659 # heads immediately after a strip/rollback. That in turn
1655 # guarantees that "cachetip == currenttip" (comparing both rev
1660 # guarantees that "cachetip == currenttip" (comparing both rev
1656 # and node) always means no nodes have been added or destroyed.
1661 # and node) always means no nodes have been added or destroyed.
1657
1662
1658 # XXX this is suboptimal when qrefresh'ing: we strip the current
1663 # XXX this is suboptimal when qrefresh'ing: we strip the current
1659 # head, refresh the tag cache, then immediately add a new head.
1664 # head, refresh the tag cache, then immediately add a new head.
1660 # But I think doing it this way is necessary for the "instant
1665 # But I think doing it this way is necessary for the "instant
1661 # tag cache retrieval" case to work.
1666 # tag cache retrieval" case to work.
1662 self.invalidate()
1667 self.invalidate()
1663
1668
1664 def walk(self, match, node=None):
1669 def walk(self, match, node=None):
1665 '''
1670 '''
1666 walk recursively through the directory tree or a given
1671 walk recursively through the directory tree or a given
1667 changeset, finding all files matched by the match
1672 changeset, finding all files matched by the match
1668 function
1673 function
1669 '''
1674 '''
1670 return self[node].walk(match)
1675 return self[node].walk(match)
1671
1676
1672 def status(self, node1='.', node2=None, match=None,
1677 def status(self, node1='.', node2=None, match=None,
1673 ignored=False, clean=False, unknown=False,
1678 ignored=False, clean=False, unknown=False,
1674 listsubrepos=False):
1679 listsubrepos=False):
1675 '''a convenience method that calls node1.status(node2)'''
1680 '''a convenience method that calls node1.status(node2)'''
1676 return self[node1].status(node2, match, ignored, clean, unknown,
1681 return self[node1].status(node2, match, ignored, clean, unknown,
1677 listsubrepos)
1682 listsubrepos)
1678
1683
1679 def heads(self, start=None):
1684 def heads(self, start=None):
1680 heads = self.changelog.heads(start)
1685 heads = self.changelog.heads(start)
1681 # sort the output in rev descending order
1686 # sort the output in rev descending order
1682 return sorted(heads, key=self.changelog.rev, reverse=True)
1687 return sorted(heads, key=self.changelog.rev, reverse=True)
1683
1688
1684 def branchheads(self, branch=None, start=None, closed=False):
1689 def branchheads(self, branch=None, start=None, closed=False):
1685 '''return a (possibly filtered) list of heads for the given branch
1690 '''return a (possibly filtered) list of heads for the given branch
1686
1691
1687 Heads are returned in topological order, from newest to oldest.
1692 Heads are returned in topological order, from newest to oldest.
1688 If branch is None, use the dirstate branch.
1693 If branch is None, use the dirstate branch.
1689 If start is not None, return only heads reachable from start.
1694 If start is not None, return only heads reachable from start.
1690 If closed is True, return heads that are marked as closed as well.
1695 If closed is True, return heads that are marked as closed as well.
1691 '''
1696 '''
1692 if branch is None:
1697 if branch is None:
1693 branch = self[None].branch()
1698 branch = self[None].branch()
1694 branches = self.branchmap()
1699 branches = self.branchmap()
1695 if branch not in branches:
1700 if branch not in branches:
1696 return []
1701 return []
1697 # the cache returns heads ordered lowest to highest
1702 # the cache returns heads ordered lowest to highest
1698 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1703 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1699 if start is not None:
1704 if start is not None:
1700 # filter out the heads that cannot be reached from startrev
1705 # filter out the heads that cannot be reached from startrev
1701 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1706 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1702 bheads = [h for h in bheads if h in fbheads]
1707 bheads = [h for h in bheads if h in fbheads]
1703 return bheads
1708 return bheads
1704
1709
1705 def branches(self, nodes):
1710 def branches(self, nodes):
1706 if not nodes:
1711 if not nodes:
1707 nodes = [self.changelog.tip()]
1712 nodes = [self.changelog.tip()]
1708 b = []
1713 b = []
1709 for n in nodes:
1714 for n in nodes:
1710 t = n
1715 t = n
1711 while True:
1716 while True:
1712 p = self.changelog.parents(n)
1717 p = self.changelog.parents(n)
1713 if p[1] != nullid or p[0] == nullid:
1718 if p[1] != nullid or p[0] == nullid:
1714 b.append((t, n, p[0], p[1]))
1719 b.append((t, n, p[0], p[1]))
1715 break
1720 break
1716 n = p[0]
1721 n = p[0]
1717 return b
1722 return b
1718
1723
1719 def between(self, pairs):
1724 def between(self, pairs):
1720 r = []
1725 r = []
1721
1726
1722 for top, bottom in pairs:
1727 for top, bottom in pairs:
1723 n, l, i = top, [], 0
1728 n, l, i = top, [], 0
1724 f = 1
1729 f = 1
1725
1730
1726 while n != bottom and n != nullid:
1731 while n != bottom and n != nullid:
1727 p = self.changelog.parents(n)[0]
1732 p = self.changelog.parents(n)[0]
1728 if i == f:
1733 if i == f:
1729 l.append(n)
1734 l.append(n)
1730 f = f * 2
1735 f = f * 2
1731 n = p
1736 n = p
1732 i += 1
1737 i += 1
1733
1738
1734 r.append(l)
1739 r.append(l)
1735
1740
1736 return r
1741 return r
1737
1742
1738 def checkpush(self, pushop):
1743 def checkpush(self, pushop):
1739 """Extensions can override this function if additional checks have
1744 """Extensions can override this function if additional checks have
1740 to be performed before pushing, or call it if they override push
1745 to be performed before pushing, or call it if they override push
1741 command.
1746 command.
1742 """
1747 """
1743 pass
1748 pass
1744
1749
1745 @unfilteredpropertycache
1750 @unfilteredpropertycache
1746 def prepushoutgoinghooks(self):
1751 def prepushoutgoinghooks(self):
1747 """Return util.hooks consists of "(repo, remote, outgoing)"
1752 """Return util.hooks consists of "(repo, remote, outgoing)"
1748 functions, which are called before pushing changesets.
1753 functions, which are called before pushing changesets.
1749 """
1754 """
1750 return util.hooks()
1755 return util.hooks()
1751
1756
1752 def stream_in(self, remote, requirements):
1757 def stream_in(self, remote, requirements):
1753 lock = self.lock()
1758 lock = self.lock()
1754 try:
1759 try:
1755 # Save remote branchmap. We will use it later
1760 # Save remote branchmap. We will use it later
1756 # to speed up branchcache creation
1761 # to speed up branchcache creation
1757 rbranchmap = None
1762 rbranchmap = None
1758 if remote.capable("branchmap"):
1763 if remote.capable("branchmap"):
1759 rbranchmap = remote.branchmap()
1764 rbranchmap = remote.branchmap()
1760
1765
1761 fp = remote.stream_out()
1766 fp = remote.stream_out()
1762 l = fp.readline()
1767 l = fp.readline()
1763 try:
1768 try:
1764 resp = int(l)
1769 resp = int(l)
1765 except ValueError:
1770 except ValueError:
1766 raise error.ResponseError(
1771 raise error.ResponseError(
1767 _('unexpected response from remote server:'), l)
1772 _('unexpected response from remote server:'), l)
1768 if resp == 1:
1773 if resp == 1:
1769 raise util.Abort(_('operation forbidden by server'))
1774 raise util.Abort(_('operation forbidden by server'))
1770 elif resp == 2:
1775 elif resp == 2:
1771 raise util.Abort(_('locking the remote repository failed'))
1776 raise util.Abort(_('locking the remote repository failed'))
1772 elif resp != 0:
1777 elif resp != 0:
1773 raise util.Abort(_('the server sent an unknown error code'))
1778 raise util.Abort(_('the server sent an unknown error code'))
1774 self.ui.status(_('streaming all changes\n'))
1779 self.ui.status(_('streaming all changes\n'))
1775 l = fp.readline()
1780 l = fp.readline()
1776 try:
1781 try:
1777 total_files, total_bytes = map(int, l.split(' ', 1))
1782 total_files, total_bytes = map(int, l.split(' ', 1))
1778 except (ValueError, TypeError):
1783 except (ValueError, TypeError):
1779 raise error.ResponseError(
1784 raise error.ResponseError(
1780 _('unexpected response from remote server:'), l)
1785 _('unexpected response from remote server:'), l)
1781 self.ui.status(_('%d files to transfer, %s of data\n') %
1786 self.ui.status(_('%d files to transfer, %s of data\n') %
1782 (total_files, util.bytecount(total_bytes)))
1787 (total_files, util.bytecount(total_bytes)))
1783 handled_bytes = 0
1788 handled_bytes = 0
1784 self.ui.progress(_('clone'), 0, total=total_bytes)
1789 self.ui.progress(_('clone'), 0, total=total_bytes)
1785 start = time.time()
1790 start = time.time()
1786
1791
1787 tr = self.transaction(_('clone'))
1792 tr = self.transaction(_('clone'))
1788 try:
1793 try:
1789 for i in xrange(total_files):
1794 for i in xrange(total_files):
1790 # XXX doesn't support '\n' or '\r' in filenames
1795 # XXX doesn't support '\n' or '\r' in filenames
1791 l = fp.readline()
1796 l = fp.readline()
1792 try:
1797 try:
1793 name, size = l.split('\0', 1)
1798 name, size = l.split('\0', 1)
1794 size = int(size)
1799 size = int(size)
1795 except (ValueError, TypeError):
1800 except (ValueError, TypeError):
1796 raise error.ResponseError(
1801 raise error.ResponseError(
1797 _('unexpected response from remote server:'), l)
1802 _('unexpected response from remote server:'), l)
1798 if self.ui.debugflag:
1803 if self.ui.debugflag:
1799 self.ui.debug('adding %s (%s)\n' %
1804 self.ui.debug('adding %s (%s)\n' %
1800 (name, util.bytecount(size)))
1805 (name, util.bytecount(size)))
1801 # for backwards compat, name was partially encoded
1806 # for backwards compat, name was partially encoded
1802 ofp = self.svfs(store.decodedir(name), 'w')
1807 ofp = self.svfs(store.decodedir(name), 'w')
1803 for chunk in util.filechunkiter(fp, limit=size):
1808 for chunk in util.filechunkiter(fp, limit=size):
1804 handled_bytes += len(chunk)
1809 handled_bytes += len(chunk)
1805 self.ui.progress(_('clone'), handled_bytes,
1810 self.ui.progress(_('clone'), handled_bytes,
1806 total=total_bytes)
1811 total=total_bytes)
1807 ofp.write(chunk)
1812 ofp.write(chunk)
1808 ofp.close()
1813 ofp.close()
1809 tr.close()
1814 tr.close()
1810 finally:
1815 finally:
1811 tr.release()
1816 tr.release()
1812
1817
1813 # Writing straight to files circumvented the inmemory caches
1818 # Writing straight to files circumvented the inmemory caches
1814 self.invalidate()
1819 self.invalidate()
1815
1820
1816 elapsed = time.time() - start
1821 elapsed = time.time() - start
1817 if elapsed <= 0:
1822 if elapsed <= 0:
1818 elapsed = 0.001
1823 elapsed = 0.001
1819 self.ui.progress(_('clone'), None)
1824 self.ui.progress(_('clone'), None)
1820 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1825 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1821 (util.bytecount(total_bytes), elapsed,
1826 (util.bytecount(total_bytes), elapsed,
1822 util.bytecount(total_bytes / elapsed)))
1827 util.bytecount(total_bytes / elapsed)))
1823
1828
1824 # new requirements = old non-format requirements +
1829 # new requirements = old non-format requirements +
1825 # new format-related
1830 # new format-related
1826 # requirements from the streamed-in repository
1831 # requirements from the streamed-in repository
1827 requirements.update(set(self.requirements) - self.supportedformats)
1832 requirements.update(set(self.requirements) - self.supportedformats)
1828 self._applyrequirements(requirements)
1833 self._applyrequirements(requirements)
1829 self._writerequirements()
1834 self._writerequirements()
1830
1835
1831 if rbranchmap:
1836 if rbranchmap:
1832 rbheads = []
1837 rbheads = []
1833 closed = []
1838 closed = []
1834 for bheads in rbranchmap.itervalues():
1839 for bheads in rbranchmap.itervalues():
1835 rbheads.extend(bheads)
1840 rbheads.extend(bheads)
1836 for h in bheads:
1841 for h in bheads:
1837 r = self.changelog.rev(h)
1842 r = self.changelog.rev(h)
1838 b, c = self.changelog.branchinfo(r)
1843 b, c = self.changelog.branchinfo(r)
1839 if c:
1844 if c:
1840 closed.append(h)
1845 closed.append(h)
1841
1846
1842 if rbheads:
1847 if rbheads:
1843 rtiprev = max((int(self.changelog.rev(node))
1848 rtiprev = max((int(self.changelog.rev(node))
1844 for node in rbheads))
1849 for node in rbheads))
1845 cache = branchmap.branchcache(rbranchmap,
1850 cache = branchmap.branchcache(rbranchmap,
1846 self[rtiprev].node(),
1851 self[rtiprev].node(),
1847 rtiprev,
1852 rtiprev,
1848 closednodes=closed)
1853 closednodes=closed)
1849 # Try to stick it as low as possible
1854 # Try to stick it as low as possible
1850 # filter above served are unlikely to be fetch from a clone
1855 # filter above served are unlikely to be fetch from a clone
1851 for candidate in ('base', 'immutable', 'served'):
1856 for candidate in ('base', 'immutable', 'served'):
1852 rview = self.filtered(candidate)
1857 rview = self.filtered(candidate)
1853 if cache.validfor(rview):
1858 if cache.validfor(rview):
1854 self._branchcaches[candidate] = cache
1859 self._branchcaches[candidate] = cache
1855 cache.write(rview)
1860 cache.write(rview)
1856 break
1861 break
1857 self.invalidate()
1862 self.invalidate()
1858 return len(self.heads()) + 1
1863 return len(self.heads()) + 1
1859 finally:
1864 finally:
1860 lock.release()
1865 lock.release()
1861
1866
1862 def clone(self, remote, heads=[], stream=None):
1867 def clone(self, remote, heads=[], stream=None):
1863 '''clone remote repository.
1868 '''clone remote repository.
1864
1869
1865 keyword arguments:
1870 keyword arguments:
1866 heads: list of revs to clone (forces use of pull)
1871 heads: list of revs to clone (forces use of pull)
1867 stream: use streaming clone if possible'''
1872 stream: use streaming clone if possible'''
1868
1873
1869 # now, all clients that can request uncompressed clones can
1874 # now, all clients that can request uncompressed clones can
1870 # read repo formats supported by all servers that can serve
1875 # read repo formats supported by all servers that can serve
1871 # them.
1876 # them.
1872
1877
1873 # if revlog format changes, client will have to check version
1878 # if revlog format changes, client will have to check version
1874 # and format flags on "stream" capability, and use
1879 # and format flags on "stream" capability, and use
1875 # uncompressed only if compatible.
1880 # uncompressed only if compatible.
1876
1881
1877 if stream is None:
1882 if stream is None:
1878 # if the server explicitly prefers to stream (for fast LANs)
1883 # if the server explicitly prefers to stream (for fast LANs)
1879 stream = remote.capable('stream-preferred')
1884 stream = remote.capable('stream-preferred')
1880
1885
1881 if stream and not heads:
1886 if stream and not heads:
1882 # 'stream' means remote revlog format is revlogv1 only
1887 # 'stream' means remote revlog format is revlogv1 only
1883 if remote.capable('stream'):
1888 if remote.capable('stream'):
1884 self.stream_in(remote, set(('revlogv1',)))
1889 self.stream_in(remote, set(('revlogv1',)))
1885 else:
1890 else:
1886 # otherwise, 'streamreqs' contains the remote revlog format
1891 # otherwise, 'streamreqs' contains the remote revlog format
1887 streamreqs = remote.capable('streamreqs')
1892 streamreqs = remote.capable('streamreqs')
1888 if streamreqs:
1893 if streamreqs:
1889 streamreqs = set(streamreqs.split(','))
1894 streamreqs = set(streamreqs.split(','))
1890 # if we support it, stream in and adjust our requirements
1895 # if we support it, stream in and adjust our requirements
1891 if not streamreqs - self.supportedformats:
1896 if not streamreqs - self.supportedformats:
1892 self.stream_in(remote, streamreqs)
1897 self.stream_in(remote, streamreqs)
1893
1898
1894 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1899 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1895 try:
1900 try:
1896 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1901 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1897 ret = exchange.pull(self, remote, heads).cgresult
1902 ret = exchange.pull(self, remote, heads).cgresult
1898 finally:
1903 finally:
1899 self.ui.restoreconfig(quiet)
1904 self.ui.restoreconfig(quiet)
1900 return ret
1905 return ret
1901
1906
1902 def pushkey(self, namespace, key, old, new):
1907 def pushkey(self, namespace, key, old, new):
1903 try:
1908 try:
1904 tr = self.currenttransaction()
1909 tr = self.currenttransaction()
1905 hookargs = {}
1910 hookargs = {}
1906 if tr is not None:
1911 if tr is not None:
1907 hookargs.update(tr.hookargs)
1912 hookargs.update(tr.hookargs)
1908 pending = lambda: tr.writepending() and self.root or ""
1913 pending = lambda: tr.writepending() and self.root or ""
1909 hookargs['pending'] = pending
1914 hookargs['pending'] = pending
1910 hookargs['namespace'] = namespace
1915 hookargs['namespace'] = namespace
1911 hookargs['key'] = key
1916 hookargs['key'] = key
1912 hookargs['old'] = old
1917 hookargs['old'] = old
1913 hookargs['new'] = new
1918 hookargs['new'] = new
1914 self.hook('prepushkey', throw=True, **hookargs)
1919 self.hook('prepushkey', throw=True, **hookargs)
1915 except error.HookAbort, exc:
1920 except error.HookAbort, exc:
1916 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1921 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1917 if exc.hint:
1922 if exc.hint:
1918 self.ui.write_err(_("(%s)\n") % exc.hint)
1923 self.ui.write_err(_("(%s)\n") % exc.hint)
1919 return False
1924 return False
1920 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1925 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1921 ret = pushkey.push(self, namespace, key, old, new)
1926 ret = pushkey.push(self, namespace, key, old, new)
1922 def runhook():
1927 def runhook():
1923 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1928 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1924 ret=ret)
1929 ret=ret)
1925 self._afterlock(runhook)
1930 self._afterlock(runhook)
1926 return ret
1931 return ret
1927
1932
1928 def listkeys(self, namespace):
1933 def listkeys(self, namespace):
1929 self.hook('prelistkeys', throw=True, namespace=namespace)
1934 self.hook('prelistkeys', throw=True, namespace=namespace)
1930 self.ui.debug('listing keys for "%s"\n' % namespace)
1935 self.ui.debug('listing keys for "%s"\n' % namespace)
1931 values = pushkey.list(self, namespace)
1936 values = pushkey.list(self, namespace)
1932 self.hook('listkeys', namespace=namespace, values=values)
1937 self.hook('listkeys', namespace=namespace, values=values)
1933 return values
1938 return values
1934
1939
1935 def debugwireargs(self, one, two, three=None, four=None, five=None):
1940 def debugwireargs(self, one, two, three=None, four=None, five=None):
1936 '''used to test argument passing over the wire'''
1941 '''used to test argument passing over the wire'''
1937 return "%s %s %s %s %s" % (one, two, three, four, five)
1942 return "%s %s %s %s %s" % (one, two, three, four, five)
1938
1943
1939 def savecommitmessage(self, text):
1944 def savecommitmessage(self, text):
1940 fp = self.vfs('last-message.txt', 'wb')
1945 fp = self.vfs('last-message.txt', 'wb')
1941 try:
1946 try:
1942 fp.write(text)
1947 fp.write(text)
1943 finally:
1948 finally:
1944 fp.close()
1949 fp.close()
1945 return self.pathto(fp.name[len(self.root) + 1:])
1950 return self.pathto(fp.name[len(self.root) + 1:])
1946
1951
1947 # used to avoid circular references so destructors work
1952 # used to avoid circular references so destructors work
1948 def aftertrans(files):
1953 def aftertrans(files):
1949 renamefiles = [tuple(t) for t in files]
1954 renamefiles = [tuple(t) for t in files]
1950 def a():
1955 def a():
1951 for vfs, src, dest in renamefiles:
1956 for vfs, src, dest in renamefiles:
1952 try:
1957 try:
1953 vfs.rename(src, dest)
1958 vfs.rename(src, dest)
1954 except OSError: # journal file does not yet exist
1959 except OSError: # journal file does not yet exist
1955 pass
1960 pass
1956 return a
1961 return a
1957
1962
1958 def undoname(fn):
1963 def undoname(fn):
1959 base, name = os.path.split(fn)
1964 base, name = os.path.split(fn)
1960 assert name.startswith('journal')
1965 assert name.startswith('journal')
1961 return os.path.join(base, name.replace('journal', 'undo', 1))
1966 return os.path.join(base, name.replace('journal', 'undo', 1))
1962
1967
1963 def instance(ui, path, create):
1968 def instance(ui, path, create):
1964 return localrepository(ui, util.urllocalpath(path), create)
1969 return localrepository(ui, util.urllocalpath(path), create)
1965
1970
1966 def islocal(path):
1971 def islocal(path):
1967 return True
1972 return True
@@ -1,985 +1,1003 b''
1 test merge-tools configuration - mostly exercising filemerge.py
1 test merge-tools configuration - mostly exercising filemerge.py
2
2
3 $ unset HGMERGE # make sure HGMERGE doesn't interfere with the test
3 $ unset HGMERGE # make sure HGMERGE doesn't interfere with the test
4 $ hg init
4 $ hg init
5
5
6 revision 0
6 revision 0
7
7
8 $ echo "revision 0" > f
8 $ echo "revision 0" > f
9 $ echo "space" >> f
9 $ echo "space" >> f
10 $ hg commit -Am "revision 0"
10 $ hg commit -Am "revision 0"
11 adding f
11 adding f
12
12
13 revision 1
13 revision 1
14
14
15 $ echo "revision 1" > f
15 $ echo "revision 1" > f
16 $ echo "space" >> f
16 $ echo "space" >> f
17 $ hg commit -Am "revision 1"
17 $ hg commit -Am "revision 1"
18 $ hg update 0 > /dev/null
18 $ hg update 0 > /dev/null
19
19
20 revision 2
20 revision 2
21
21
22 $ echo "revision 2" > f
22 $ echo "revision 2" > f
23 $ echo "space" >> f
23 $ echo "space" >> f
24 $ hg commit -Am "revision 2"
24 $ hg commit -Am "revision 2"
25 created new head
25 created new head
26 $ hg update 0 > /dev/null
26 $ hg update 0 > /dev/null
27
27
28 revision 3 - simple to merge
28 revision 3 - simple to merge
29
29
30 $ echo "revision 3" >> f
30 $ echo "revision 3" >> f
31 $ hg commit -Am "revision 3"
31 $ hg commit -Am "revision 3"
32 created new head
32 created new head
33
33
34 revision 4 - hard to merge
34 revision 4 - hard to merge
35
35
36 $ hg update 0 > /dev/null
36 $ hg update 0 > /dev/null
37 $ echo "revision 4" > f
37 $ echo "revision 4" > f
38 $ hg commit -Am "revision 4"
38 $ hg commit -Am "revision 4"
39 created new head
39 created new head
40
40
41 $ echo "[merge-tools]" > .hg/hgrc
41 $ echo "[merge-tools]" > .hg/hgrc
42
42
43 $ beforemerge() {
43 $ beforemerge() {
44 > cat .hg/hgrc
44 > cat .hg/hgrc
45 > echo "# hg update -C 1"
45 > echo "# hg update -C 1"
46 > hg update -C 1 > /dev/null
46 > hg update -C 1 > /dev/null
47 > }
47 > }
48 $ aftermerge() {
48 $ aftermerge() {
49 > echo "# cat f"
49 > echo "# cat f"
50 > cat f
50 > cat f
51 > echo "# hg stat"
51 > echo "# hg stat"
52 > hg stat
52 > hg stat
53 > rm -f f.orig
53 > rm -f f.orig
54 > }
54 > }
55
55
56 Tool selection
56 Tool selection
57
57
58 default is internal merge:
58 default is internal merge:
59
59
60 $ beforemerge
60 $ beforemerge
61 [merge-tools]
61 [merge-tools]
62 # hg update -C 1
62 # hg update -C 1
63
63
64 hg merge -r 2
64 hg merge -r 2
65 override $PATH to ensure hgmerge not visible; use $PYTHON in case we're
65 override $PATH to ensure hgmerge not visible; use $PYTHON in case we're
66 running from a devel copy, not a temp installation
66 running from a devel copy, not a temp installation
67
67
68 $ PATH="$BINDIR" $PYTHON "$BINDIR"/hg merge -r 2
68 $ PATH="$BINDIR" $PYTHON "$BINDIR"/hg merge -r 2
69 merging f
69 merging f
70 warning: conflicts during merge.
70 warning: conflicts during merge.
71 merging f incomplete! (edit conflicts, then use 'hg resolve --mark')
71 merging f incomplete! (edit conflicts, then use 'hg resolve --mark')
72 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
72 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
73 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
73 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
74 [1]
74 [1]
75 $ aftermerge
75 $ aftermerge
76 # cat f
76 # cat f
77 <<<<<<< local: ef83787e2614 - test: revision 1
77 <<<<<<< local: ef83787e2614 - test: revision 1
78 revision 1
78 revision 1
79 =======
79 =======
80 revision 2
80 revision 2
81 >>>>>>> other: 0185f4e0cf02 - test: revision 2
81 >>>>>>> other: 0185f4e0cf02 - test: revision 2
82 space
82 space
83 # hg stat
83 # hg stat
84 M f
84 M f
85 ? f.orig
85 ? f.orig
86
86
87 simplest hgrc using false for merge:
87 simplest hgrc using false for merge:
88
88
89 $ echo "false.whatever=" >> .hg/hgrc
89 $ echo "false.whatever=" >> .hg/hgrc
90 $ beforemerge
90 $ beforemerge
91 [merge-tools]
91 [merge-tools]
92 false.whatever=
92 false.whatever=
93 # hg update -C 1
93 # hg update -C 1
94 $ hg merge -r 2
94 $ hg merge -r 2
95 merging f
95 merging f
96 merging f failed!
96 merging f failed!
97 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
97 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
98 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
98 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
99 [1]
99 [1]
100 $ aftermerge
100 $ aftermerge
101 # cat f
101 # cat f
102 revision 1
102 revision 1
103 space
103 space
104 # hg stat
104 # hg stat
105 M f
105 M f
106 ? f.orig
106 ? f.orig
107
107
108 #if unix-permissions
108 #if unix-permissions
109
109
110 unexecutable file in $PATH shouldn't be found:
110 unexecutable file in $PATH shouldn't be found:
111
111
112 $ echo "echo fail" > false
112 $ echo "echo fail" > false
113 $ hg up -qC 1
113 $ hg up -qC 1
114 $ PATH="`pwd`:$BINDIR" $PYTHON "$BINDIR"/hg merge -r 2
114 $ PATH="`pwd`:$BINDIR" $PYTHON "$BINDIR"/hg merge -r 2
115 merging f
115 merging f
116 warning: conflicts during merge.
116 warning: conflicts during merge.
117 merging f incomplete! (edit conflicts, then use 'hg resolve --mark')
117 merging f incomplete! (edit conflicts, then use 'hg resolve --mark')
118 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
118 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
119 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
119 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
120 [1]
120 [1]
121 $ rm false
121 $ rm false
122
122
123 #endif
123 #endif
124
124
125 executable directory in $PATH shouldn't be found:
125 executable directory in $PATH shouldn't be found:
126
126
127 $ mkdir false
127 $ mkdir false
128 $ hg up -qC 1
128 $ hg up -qC 1
129 $ PATH="`pwd`:$BINDIR" $PYTHON "$BINDIR"/hg merge -r 2
129 $ PATH="`pwd`:$BINDIR" $PYTHON "$BINDIR"/hg merge -r 2
130 merging f
130 merging f
131 warning: conflicts during merge.
131 warning: conflicts during merge.
132 merging f incomplete! (edit conflicts, then use 'hg resolve --mark')
132 merging f incomplete! (edit conflicts, then use 'hg resolve --mark')
133 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
133 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
134 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
134 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
135 [1]
135 [1]
136 $ rmdir false
136 $ rmdir false
137
137
138 true with higher .priority gets precedence:
138 true with higher .priority gets precedence:
139
139
140 $ echo "true.priority=1" >> .hg/hgrc
140 $ echo "true.priority=1" >> .hg/hgrc
141 $ beforemerge
141 $ beforemerge
142 [merge-tools]
142 [merge-tools]
143 false.whatever=
143 false.whatever=
144 true.priority=1
144 true.priority=1
145 # hg update -C 1
145 # hg update -C 1
146 $ hg merge -r 2
146 $ hg merge -r 2
147 merging f
147 merging f
148 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
148 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
149 (branch merge, don't forget to commit)
149 (branch merge, don't forget to commit)
150 $ aftermerge
150 $ aftermerge
151 # cat f
151 # cat f
152 revision 1
152 revision 1
153 space
153 space
154 # hg stat
154 # hg stat
155 M f
155 M f
156
156
157 unless lowered on command line:
157 unless lowered on command line:
158
158
159 $ beforemerge
159 $ beforemerge
160 [merge-tools]
160 [merge-tools]
161 false.whatever=
161 false.whatever=
162 true.priority=1
162 true.priority=1
163 # hg update -C 1
163 # hg update -C 1
164 $ hg merge -r 2 --config merge-tools.true.priority=-7
164 $ hg merge -r 2 --config merge-tools.true.priority=-7
165 merging f
165 merging f
166 merging f failed!
166 merging f failed!
167 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
167 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
168 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
168 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
169 [1]
169 [1]
170 $ aftermerge
170 $ aftermerge
171 # cat f
171 # cat f
172 revision 1
172 revision 1
173 space
173 space
174 # hg stat
174 # hg stat
175 M f
175 M f
176 ? f.orig
176 ? f.orig
177
177
178 or false set higher on command line:
178 or false set higher on command line:
179
179
180 $ beforemerge
180 $ beforemerge
181 [merge-tools]
181 [merge-tools]
182 false.whatever=
182 false.whatever=
183 true.priority=1
183 true.priority=1
184 # hg update -C 1
184 # hg update -C 1
185 $ hg merge -r 2 --config merge-tools.false.priority=117
185 $ hg merge -r 2 --config merge-tools.false.priority=117
186 merging f
186 merging f
187 merging f failed!
187 merging f failed!
188 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
188 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
189 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
189 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
190 [1]
190 [1]
191 $ aftermerge
191 $ aftermerge
192 # cat f
192 # cat f
193 revision 1
193 revision 1
194 space
194 space
195 # hg stat
195 # hg stat
196 M f
196 M f
197 ? f.orig
197 ? f.orig
198
198
199 or true.executable not found in PATH:
199 or true.executable not found in PATH:
200
200
201 $ beforemerge
201 $ beforemerge
202 [merge-tools]
202 [merge-tools]
203 false.whatever=
203 false.whatever=
204 true.priority=1
204 true.priority=1
205 # hg update -C 1
205 # hg update -C 1
206 $ hg merge -r 2 --config merge-tools.true.executable=nonexistentmergetool
206 $ hg merge -r 2 --config merge-tools.true.executable=nonexistentmergetool
207 merging f
207 merging f
208 merging f failed!
208 merging f failed!
209 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
209 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
210 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
210 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
211 [1]
211 [1]
212 $ aftermerge
212 $ aftermerge
213 # cat f
213 # cat f
214 revision 1
214 revision 1
215 space
215 space
216 # hg stat
216 # hg stat
217 M f
217 M f
218 ? f.orig
218 ? f.orig
219
219
220 or true.executable with bogus path:
220 or true.executable with bogus path:
221
221
222 $ beforemerge
222 $ beforemerge
223 [merge-tools]
223 [merge-tools]
224 false.whatever=
224 false.whatever=
225 true.priority=1
225 true.priority=1
226 # hg update -C 1
226 # hg update -C 1
227 $ hg merge -r 2 --config merge-tools.true.executable=/nonexistent/mergetool
227 $ hg merge -r 2 --config merge-tools.true.executable=/nonexistent/mergetool
228 merging f
228 merging f
229 merging f failed!
229 merging f failed!
230 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
230 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
231 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
231 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
232 [1]
232 [1]
233 $ aftermerge
233 $ aftermerge
234 # cat f
234 # cat f
235 revision 1
235 revision 1
236 space
236 space
237 # hg stat
237 # hg stat
238 M f
238 M f
239 ? f.orig
239 ? f.orig
240
240
241 but true.executable set to cat found in PATH works:
241 but true.executable set to cat found in PATH works:
242
242
243 $ echo "true.executable=cat" >> .hg/hgrc
243 $ echo "true.executable=cat" >> .hg/hgrc
244 $ beforemerge
244 $ beforemerge
245 [merge-tools]
245 [merge-tools]
246 false.whatever=
246 false.whatever=
247 true.priority=1
247 true.priority=1
248 true.executable=cat
248 true.executable=cat
249 # hg update -C 1
249 # hg update -C 1
250 $ hg merge -r 2
250 $ hg merge -r 2
251 merging f
251 merging f
252 revision 1
252 revision 1
253 space
253 space
254 revision 0
254 revision 0
255 space
255 space
256 revision 2
256 revision 2
257 space
257 space
258 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
258 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
259 (branch merge, don't forget to commit)
259 (branch merge, don't forget to commit)
260 $ aftermerge
260 $ aftermerge
261 # cat f
261 # cat f
262 revision 1
262 revision 1
263 space
263 space
264 # hg stat
264 # hg stat
265 M f
265 M f
266
266
267 and true.executable set to cat with path works:
267 and true.executable set to cat with path works:
268
268
269 $ beforemerge
269 $ beforemerge
270 [merge-tools]
270 [merge-tools]
271 false.whatever=
271 false.whatever=
272 true.priority=1
272 true.priority=1
273 true.executable=cat
273 true.executable=cat
274 # hg update -C 1
274 # hg update -C 1
275 $ hg merge -r 2 --config merge-tools.true.executable=cat
275 $ hg merge -r 2 --config merge-tools.true.executable=cat
276 merging f
276 merging f
277 revision 1
277 revision 1
278 space
278 space
279 revision 0
279 revision 0
280 space
280 space
281 revision 2
281 revision 2
282 space
282 space
283 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
283 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
284 (branch merge, don't forget to commit)
284 (branch merge, don't forget to commit)
285 $ aftermerge
285 $ aftermerge
286 # cat f
286 # cat f
287 revision 1
287 revision 1
288 space
288 space
289 # hg stat
289 # hg stat
290 M f
290 M f
291
291
292 #if unix-permissions
292 #if unix-permissions
293
293
294 environment variables in true.executable are handled:
294 environment variables in true.executable are handled:
295
295
296 $ echo 'echo "custom merge tool"' > .hg/merge.sh
296 $ echo 'echo "custom merge tool"' > .hg/merge.sh
297 $ beforemerge
297 $ beforemerge
298 [merge-tools]
298 [merge-tools]
299 false.whatever=
299 false.whatever=
300 true.priority=1
300 true.priority=1
301 true.executable=cat
301 true.executable=cat
302 # hg update -C 1
302 # hg update -C 1
303 $ hg --config merge-tools.true.executable='sh' \
303 $ hg --config merge-tools.true.executable='sh' \
304 > --config merge-tools.true.args=.hg/merge.sh \
304 > --config merge-tools.true.args=.hg/merge.sh \
305 > merge -r 2
305 > merge -r 2
306 merging f
306 merging f
307 custom merge tool
307 custom merge tool
308 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
308 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
309 (branch merge, don't forget to commit)
309 (branch merge, don't forget to commit)
310 $ aftermerge
310 $ aftermerge
311 # cat f
311 # cat f
312 revision 1
312 revision 1
313 space
313 space
314 # hg stat
314 # hg stat
315 M f
315 M f
316
316
317 #endif
317 #endif
318
318
319 Tool selection and merge-patterns
319 Tool selection and merge-patterns
320
320
321 merge-patterns specifies new tool false:
321 merge-patterns specifies new tool false:
322
322
323 $ beforemerge
323 $ beforemerge
324 [merge-tools]
324 [merge-tools]
325 false.whatever=
325 false.whatever=
326 true.priority=1
326 true.priority=1
327 true.executable=cat
327 true.executable=cat
328 # hg update -C 1
328 # hg update -C 1
329 $ hg merge -r 2 --config merge-patterns.f=false
329 $ hg merge -r 2 --config merge-patterns.f=false
330 merging f
330 merging f
331 merging f failed!
331 merging f failed!
332 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
332 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
333 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
333 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
334 [1]
334 [1]
335 $ aftermerge
335 $ aftermerge
336 # cat f
336 # cat f
337 revision 1
337 revision 1
338 space
338 space
339 # hg stat
339 # hg stat
340 M f
340 M f
341 ? f.orig
341 ? f.orig
342
342
343 merge-patterns specifies executable not found in PATH and gets warning:
343 merge-patterns specifies executable not found in PATH and gets warning:
344
344
345 $ beforemerge
345 $ beforemerge
346 [merge-tools]
346 [merge-tools]
347 false.whatever=
347 false.whatever=
348 true.priority=1
348 true.priority=1
349 true.executable=cat
349 true.executable=cat
350 # hg update -C 1
350 # hg update -C 1
351 $ hg merge -r 2 --config merge-patterns.f=true --config merge-tools.true.executable=nonexistentmergetool
351 $ hg merge -r 2 --config merge-patterns.f=true --config merge-tools.true.executable=nonexistentmergetool
352 couldn't find merge tool true specified for f
352 couldn't find merge tool true specified for f
353 merging f
353 merging f
354 merging f failed!
354 merging f failed!
355 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
355 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
356 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
356 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
357 [1]
357 [1]
358 $ aftermerge
358 $ aftermerge
359 # cat f
359 # cat f
360 revision 1
360 revision 1
361 space
361 space
362 # hg stat
362 # hg stat
363 M f
363 M f
364 ? f.orig
364 ? f.orig
365
365
366 merge-patterns specifies executable with bogus path and gets warning:
366 merge-patterns specifies executable with bogus path and gets warning:
367
367
368 $ beforemerge
368 $ beforemerge
369 [merge-tools]
369 [merge-tools]
370 false.whatever=
370 false.whatever=
371 true.priority=1
371 true.priority=1
372 true.executable=cat
372 true.executable=cat
373 # hg update -C 1
373 # hg update -C 1
374 $ hg merge -r 2 --config merge-patterns.f=true --config merge-tools.true.executable=/nonexistent/mergetool
374 $ hg merge -r 2 --config merge-patterns.f=true --config merge-tools.true.executable=/nonexistent/mergetool
375 couldn't find merge tool true specified for f
375 couldn't find merge tool true specified for f
376 merging f
376 merging f
377 merging f failed!
377 merging f failed!
378 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
378 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
379 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
379 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
380 [1]
380 [1]
381 $ aftermerge
381 $ aftermerge
382 # cat f
382 # cat f
383 revision 1
383 revision 1
384 space
384 space
385 # hg stat
385 # hg stat
386 M f
386 M f
387 ? f.orig
387 ? f.orig
388
388
389 ui.merge overrules priority
389 ui.merge overrules priority
390
390
391 ui.merge specifies false:
391 ui.merge specifies false:
392
392
393 $ beforemerge
393 $ beforemerge
394 [merge-tools]
394 [merge-tools]
395 false.whatever=
395 false.whatever=
396 true.priority=1
396 true.priority=1
397 true.executable=cat
397 true.executable=cat
398 # hg update -C 1
398 # hg update -C 1
399 $ hg merge -r 2 --config ui.merge=false
399 $ hg merge -r 2 --config ui.merge=false
400 merging f
400 merging f
401 merging f failed!
401 merging f failed!
402 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
402 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
403 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
403 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
404 [1]
404 [1]
405 $ aftermerge
405 $ aftermerge
406 # cat f
406 # cat f
407 revision 1
407 revision 1
408 space
408 space
409 # hg stat
409 # hg stat
410 M f
410 M f
411 ? f.orig
411 ? f.orig
412
412
413 ui.merge specifies internal:fail:
413 ui.merge specifies internal:fail:
414
414
415 $ beforemerge
415 $ beforemerge
416 [merge-tools]
416 [merge-tools]
417 false.whatever=
417 false.whatever=
418 true.priority=1
418 true.priority=1
419 true.executable=cat
419 true.executable=cat
420 # hg update -C 1
420 # hg update -C 1
421 $ hg merge -r 2 --config ui.merge=internal:fail
421 $ hg merge -r 2 --config ui.merge=internal:fail
422 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
422 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
423 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
423 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
424 [1]
424 [1]
425 $ aftermerge
425 $ aftermerge
426 # cat f
426 # cat f
427 revision 1
427 revision 1
428 space
428 space
429 # hg stat
429 # hg stat
430 M f
430 M f
431
431
432 ui.merge specifies :local (without internal prefix):
432 ui.merge specifies :local (without internal prefix):
433
433
434 $ beforemerge
434 $ beforemerge
435 [merge-tools]
435 [merge-tools]
436 false.whatever=
436 false.whatever=
437 true.priority=1
437 true.priority=1
438 true.executable=cat
438 true.executable=cat
439 # hg update -C 1
439 # hg update -C 1
440 $ hg merge -r 2 --config ui.merge=:local
440 $ hg merge -r 2 --config ui.merge=:local
441 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
441 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
442 (branch merge, don't forget to commit)
442 (branch merge, don't forget to commit)
443 $ aftermerge
443 $ aftermerge
444 # cat f
444 # cat f
445 revision 1
445 revision 1
446 space
446 space
447 # hg stat
447 # hg stat
448 M f
448 M f
449
449
450 ui.merge specifies internal:other:
450 ui.merge specifies internal:other:
451
451
452 $ beforemerge
452 $ beforemerge
453 [merge-tools]
453 [merge-tools]
454 false.whatever=
454 false.whatever=
455 true.priority=1
455 true.priority=1
456 true.executable=cat
456 true.executable=cat
457 # hg update -C 1
457 # hg update -C 1
458 $ hg merge -r 2 --config ui.merge=internal:other
458 $ hg merge -r 2 --config ui.merge=internal:other
459 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
459 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
460 (branch merge, don't forget to commit)
460 (branch merge, don't forget to commit)
461 $ aftermerge
461 $ aftermerge
462 # cat f
462 # cat f
463 revision 2
463 revision 2
464 space
464 space
465 # hg stat
465 # hg stat
466 M f
466 M f
467
467
468 ui.merge specifies internal:prompt:
468 ui.merge specifies internal:prompt:
469
469
470 $ beforemerge
470 $ beforemerge
471 [merge-tools]
471 [merge-tools]
472 false.whatever=
472 false.whatever=
473 true.priority=1
473 true.priority=1
474 true.executable=cat
474 true.executable=cat
475 # hg update -C 1
475 # hg update -C 1
476 $ hg merge -r 2 --config ui.merge=internal:prompt
476 $ hg merge -r 2 --config ui.merge=internal:prompt
477 no tool found to merge f
477 no tool found to merge f
478 keep (l)ocal or take (o)ther? l
478 keep (l)ocal or take (o)ther? l
479 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
479 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
480 (branch merge, don't forget to commit)
480 (branch merge, don't forget to commit)
481 $ aftermerge
481 $ aftermerge
482 # cat f
482 # cat f
483 revision 1
483 revision 1
484 space
484 space
485 # hg stat
485 # hg stat
486 M f
486 M f
487
487
488 ui.merge specifies internal:dump:
488 ui.merge specifies internal:dump:
489
489
490 $ beforemerge
490 $ beforemerge
491 [merge-tools]
491 [merge-tools]
492 false.whatever=
492 false.whatever=
493 true.priority=1
493 true.priority=1
494 true.executable=cat
494 true.executable=cat
495 # hg update -C 1
495 # hg update -C 1
496 $ hg merge -r 2 --config ui.merge=internal:dump
496 $ hg merge -r 2 --config ui.merge=internal:dump
497 merging f
497 merging f
498 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
498 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
499 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
499 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
500 [1]
500 [1]
501 $ aftermerge
501 $ aftermerge
502 # cat f
502 # cat f
503 revision 1
503 revision 1
504 space
504 space
505 # hg stat
505 # hg stat
506 M f
506 M f
507 ? f.base
507 ? f.base
508 ? f.local
508 ? f.local
509 ? f.orig
509 ? f.orig
510 ? f.other
510 ? f.other
511
511
512 f.base:
512 f.base:
513
513
514 $ cat f.base
514 $ cat f.base
515 revision 0
515 revision 0
516 space
516 space
517
517
518 f.local:
518 f.local:
519
519
520 $ cat f.local
520 $ cat f.local
521 revision 1
521 revision 1
522 space
522 space
523
523
524 f.other:
524 f.other:
525
525
526 $ cat f.other
526 $ cat f.other
527 revision 2
527 revision 2
528 space
528 space
529 $ rm f.base f.local f.other
529 $ rm f.base f.local f.other
530
530
531 ui.merge specifies internal:other but is overruled by pattern for false:
531 ui.merge specifies internal:other but is overruled by pattern for false:
532
532
533 $ beforemerge
533 $ beforemerge
534 [merge-tools]
534 [merge-tools]
535 false.whatever=
535 false.whatever=
536 true.priority=1
536 true.priority=1
537 true.executable=cat
537 true.executable=cat
538 # hg update -C 1
538 # hg update -C 1
539 $ hg merge -r 2 --config ui.merge=internal:other --config merge-patterns.f=false
539 $ hg merge -r 2 --config ui.merge=internal:other --config merge-patterns.f=false
540 merging f
540 merging f
541 merging f failed!
541 merging f failed!
542 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
542 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
543 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
543 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
544 [1]
544 [1]
545 $ aftermerge
545 $ aftermerge
546 # cat f
546 # cat f
547 revision 1
547 revision 1
548 space
548 space
549 # hg stat
549 # hg stat
550 M f
550 M f
551 ? f.orig
551 ? f.orig
552
552
553 Premerge
553 Premerge
554
554
555 ui.merge specifies internal:other but is overruled by --tool=false
555 ui.merge specifies internal:other but is overruled by --tool=false
556
556
557 $ beforemerge
557 $ beforemerge
558 [merge-tools]
558 [merge-tools]
559 false.whatever=
559 false.whatever=
560 true.priority=1
560 true.priority=1
561 true.executable=cat
561 true.executable=cat
562 # hg update -C 1
562 # hg update -C 1
563 $ hg merge -r 2 --config ui.merge=internal:other --tool=false
563 $ hg merge -r 2 --config ui.merge=internal:other --tool=false
564 merging f
564 merging f
565 merging f failed!
565 merging f failed!
566 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
566 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
567 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
567 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
568 [1]
568 [1]
569 $ aftermerge
569 $ aftermerge
570 # cat f
570 # cat f
571 revision 1
571 revision 1
572 space
572 space
573 # hg stat
573 # hg stat
574 M f
574 M f
575 ? f.orig
575 ? f.orig
576
576
577 HGMERGE specifies internal:other but is overruled by --tool=false
577 HGMERGE specifies internal:other but is overruled by --tool=false
578
578
579 $ HGMERGE=internal:other ; export HGMERGE
579 $ HGMERGE=internal:other ; export HGMERGE
580 $ beforemerge
580 $ beforemerge
581 [merge-tools]
581 [merge-tools]
582 false.whatever=
582 false.whatever=
583 true.priority=1
583 true.priority=1
584 true.executable=cat
584 true.executable=cat
585 # hg update -C 1
585 # hg update -C 1
586 $ hg merge -r 2 --tool=false
586 $ hg merge -r 2 --tool=false
587 merging f
587 merging f
588 merging f failed!
588 merging f failed!
589 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
589 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
590 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
590 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
591 [1]
591 [1]
592 $ aftermerge
592 $ aftermerge
593 # cat f
593 # cat f
594 revision 1
594 revision 1
595 space
595 space
596 # hg stat
596 # hg stat
597 M f
597 M f
598 ? f.orig
598 ? f.orig
599
599
600 $ unset HGMERGE # make sure HGMERGE doesn't interfere with remaining tests
600 $ unset HGMERGE # make sure HGMERGE doesn't interfere with remaining tests
601
601
602 update is a merge ...
602 update is a merge ...
603
603
604 $ beforemerge
604 $ beforemerge
605 [merge-tools]
605 [merge-tools]
606 false.whatever=
606 false.whatever=
607 true.priority=1
607 true.priority=1
608 true.executable=cat
608 true.executable=cat
609 # hg update -C 1
609 # hg update -C 1
610 $ hg update -q 0
610 $ hg update -q 0
611 $ f -s f
612 f: size=17
613 $ touch -t 200001010000 f
614 $ hg status f
611 $ hg revert -q -r 1 .
615 $ hg revert -q -r 1 .
616 $ f -s f
617 f: size=17
618 $ touch -t 200001010000 f
619 $ hg status f
620 M f
612 $ hg update -r 2
621 $ hg update -r 2
613 merging f
622 merging f
614 revision 1
623 revision 1
615 space
624 space
616 revision 0
625 revision 0
617 space
626 space
618 revision 2
627 revision 2
619 space
628 space
620 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
629 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
621 $ aftermerge
630 $ aftermerge
622 # cat f
631 # cat f
623 revision 1
632 revision 1
624 space
633 space
625 # hg stat
634 # hg stat
626 M f
635 M f
627
636
628 update should also have --tool
637 update should also have --tool
629
638
630 $ beforemerge
639 $ beforemerge
631 [merge-tools]
640 [merge-tools]
632 false.whatever=
641 false.whatever=
633 true.priority=1
642 true.priority=1
634 true.executable=cat
643 true.executable=cat
635 # hg update -C 1
644 # hg update -C 1
636 $ hg update -q 0
645 $ hg update -q 0
646 $ f -s f
647 f: size=17
648 $ touch -t 200001010000 f
649 $ hg status f
637 $ hg revert -q -r 1 .
650 $ hg revert -q -r 1 .
651 $ f -s f
652 f: size=17
653 $ touch -t 200001010000 f
654 $ hg status f
655 M f
638 $ hg update -r 2 --tool false
656 $ hg update -r 2 --tool false
639 merging f
657 merging f
640 merging f failed!
658 merging f failed!
641 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
659 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
642 use 'hg resolve' to retry unresolved file merges
660 use 'hg resolve' to retry unresolved file merges
643 [1]
661 [1]
644 $ aftermerge
662 $ aftermerge
645 # cat f
663 # cat f
646 revision 1
664 revision 1
647 space
665 space
648 # hg stat
666 # hg stat
649 M f
667 M f
650 ? f.orig
668 ? f.orig
651
669
652 Default is silent simplemerge:
670 Default is silent simplemerge:
653
671
654 $ beforemerge
672 $ beforemerge
655 [merge-tools]
673 [merge-tools]
656 false.whatever=
674 false.whatever=
657 true.priority=1
675 true.priority=1
658 true.executable=cat
676 true.executable=cat
659 # hg update -C 1
677 # hg update -C 1
660 $ hg merge -r 3
678 $ hg merge -r 3
661 merging f
679 merging f
662 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
680 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
663 (branch merge, don't forget to commit)
681 (branch merge, don't forget to commit)
664 $ aftermerge
682 $ aftermerge
665 # cat f
683 # cat f
666 revision 1
684 revision 1
667 space
685 space
668 revision 3
686 revision 3
669 # hg stat
687 # hg stat
670 M f
688 M f
671
689
672 .premerge=True is same:
690 .premerge=True is same:
673
691
674 $ beforemerge
692 $ beforemerge
675 [merge-tools]
693 [merge-tools]
676 false.whatever=
694 false.whatever=
677 true.priority=1
695 true.priority=1
678 true.executable=cat
696 true.executable=cat
679 # hg update -C 1
697 # hg update -C 1
680 $ hg merge -r 3 --config merge-tools.true.premerge=True
698 $ hg merge -r 3 --config merge-tools.true.premerge=True
681 merging f
699 merging f
682 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
700 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
683 (branch merge, don't forget to commit)
701 (branch merge, don't forget to commit)
684 $ aftermerge
702 $ aftermerge
685 # cat f
703 # cat f
686 revision 1
704 revision 1
687 space
705 space
688 revision 3
706 revision 3
689 # hg stat
707 # hg stat
690 M f
708 M f
691
709
692 .premerge=False executes merge-tool:
710 .premerge=False executes merge-tool:
693
711
694 $ beforemerge
712 $ beforemerge
695 [merge-tools]
713 [merge-tools]
696 false.whatever=
714 false.whatever=
697 true.priority=1
715 true.priority=1
698 true.executable=cat
716 true.executable=cat
699 # hg update -C 1
717 # hg update -C 1
700 $ hg merge -r 3 --config merge-tools.true.premerge=False
718 $ hg merge -r 3 --config merge-tools.true.premerge=False
701 merging f
719 merging f
702 revision 1
720 revision 1
703 space
721 space
704 revision 0
722 revision 0
705 space
723 space
706 revision 0
724 revision 0
707 space
725 space
708 revision 3
726 revision 3
709 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
727 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
710 (branch merge, don't forget to commit)
728 (branch merge, don't forget to commit)
711 $ aftermerge
729 $ aftermerge
712 # cat f
730 # cat f
713 revision 1
731 revision 1
714 space
732 space
715 # hg stat
733 # hg stat
716 M f
734 M f
717
735
718 premerge=keep keeps conflict markers in:
736 premerge=keep keeps conflict markers in:
719
737
720 $ beforemerge
738 $ beforemerge
721 [merge-tools]
739 [merge-tools]
722 false.whatever=
740 false.whatever=
723 true.priority=1
741 true.priority=1
724 true.executable=cat
742 true.executable=cat
725 # hg update -C 1
743 # hg update -C 1
726 $ hg merge -r 4 --config merge-tools.true.premerge=keep
744 $ hg merge -r 4 --config merge-tools.true.premerge=keep
727 merging f
745 merging f
728 <<<<<<< local: ef83787e2614 - test: revision 1
746 <<<<<<< local: ef83787e2614 - test: revision 1
729 revision 1
747 revision 1
730 space
748 space
731 =======
749 =======
732 revision 4
750 revision 4
733 >>>>>>> other: 81448d39c9a0 - test: revision 4
751 >>>>>>> other: 81448d39c9a0 - test: revision 4
734 revision 0
752 revision 0
735 space
753 space
736 revision 4
754 revision 4
737 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
755 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
738 (branch merge, don't forget to commit)
756 (branch merge, don't forget to commit)
739 $ aftermerge
757 $ aftermerge
740 # cat f
758 # cat f
741 <<<<<<< local: ef83787e2614 - test: revision 1
759 <<<<<<< local: ef83787e2614 - test: revision 1
742 revision 1
760 revision 1
743 space
761 space
744 =======
762 =======
745 revision 4
763 revision 4
746 >>>>>>> other: 81448d39c9a0 - test: revision 4
764 >>>>>>> other: 81448d39c9a0 - test: revision 4
747 # hg stat
765 # hg stat
748 M f
766 M f
749
767
750 premerge=keep-merge3 keeps conflict markers with base content:
768 premerge=keep-merge3 keeps conflict markers with base content:
751
769
752 $ beforemerge
770 $ beforemerge
753 [merge-tools]
771 [merge-tools]
754 false.whatever=
772 false.whatever=
755 true.priority=1
773 true.priority=1
756 true.executable=cat
774 true.executable=cat
757 # hg update -C 1
775 # hg update -C 1
758 $ hg merge -r 4 --config merge-tools.true.premerge=keep-merge3
776 $ hg merge -r 4 --config merge-tools.true.premerge=keep-merge3
759 merging f
777 merging f
760 <<<<<<< local: ef83787e2614 - test: revision 1
778 <<<<<<< local: ef83787e2614 - test: revision 1
761 revision 1
779 revision 1
762 space
780 space
763 ||||||| base
781 ||||||| base
764 revision 0
782 revision 0
765 space
783 space
766 =======
784 =======
767 revision 4
785 revision 4
768 >>>>>>> other: 81448d39c9a0 - test: revision 4
786 >>>>>>> other: 81448d39c9a0 - test: revision 4
769 revision 0
787 revision 0
770 space
788 space
771 revision 4
789 revision 4
772 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
790 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
773 (branch merge, don't forget to commit)
791 (branch merge, don't forget to commit)
774 $ aftermerge
792 $ aftermerge
775 # cat f
793 # cat f
776 <<<<<<< local: ef83787e2614 - test: revision 1
794 <<<<<<< local: ef83787e2614 - test: revision 1
777 revision 1
795 revision 1
778 space
796 space
779 ||||||| base
797 ||||||| base
780 revision 0
798 revision 0
781 space
799 space
782 =======
800 =======
783 revision 4
801 revision 4
784 >>>>>>> other: 81448d39c9a0 - test: revision 4
802 >>>>>>> other: 81448d39c9a0 - test: revision 4
785 # hg stat
803 # hg stat
786 M f
804 M f
787
805
788
806
789 Tool execution
807 Tool execution
790
808
791 set tools.args explicit to include $base $local $other $output:
809 set tools.args explicit to include $base $local $other $output:
792
810
793 $ beforemerge
811 $ beforemerge
794 [merge-tools]
812 [merge-tools]
795 false.whatever=
813 false.whatever=
796 true.priority=1
814 true.priority=1
797 true.executable=cat
815 true.executable=cat
798 # hg update -C 1
816 # hg update -C 1
799 $ hg merge -r 2 --config merge-tools.true.executable=head --config merge-tools.true.args='$base $local $other $output' \
817 $ hg merge -r 2 --config merge-tools.true.executable=head --config merge-tools.true.args='$base $local $other $output' \
800 > | sed 's,==> .* <==,==> ... <==,g'
818 > | sed 's,==> .* <==,==> ... <==,g'
801 merging f
819 merging f
802 ==> ... <==
820 ==> ... <==
803 revision 0
821 revision 0
804 space
822 space
805
823
806 ==> ... <==
824 ==> ... <==
807 revision 1
825 revision 1
808 space
826 space
809
827
810 ==> ... <==
828 ==> ... <==
811 revision 2
829 revision 2
812 space
830 space
813
831
814 ==> ... <==
832 ==> ... <==
815 revision 1
833 revision 1
816 space
834 space
817 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
835 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
818 (branch merge, don't forget to commit)
836 (branch merge, don't forget to commit)
819 $ aftermerge
837 $ aftermerge
820 # cat f
838 # cat f
821 revision 1
839 revision 1
822 space
840 space
823 # hg stat
841 # hg stat
824 M f
842 M f
825
843
826 Merge with "echo mergeresult > $local":
844 Merge with "echo mergeresult > $local":
827
845
828 $ beforemerge
846 $ beforemerge
829 [merge-tools]
847 [merge-tools]
830 false.whatever=
848 false.whatever=
831 true.priority=1
849 true.priority=1
832 true.executable=cat
850 true.executable=cat
833 # hg update -C 1
851 # hg update -C 1
834 $ hg merge -r 2 --config merge-tools.true.executable=echo --config merge-tools.true.args='mergeresult > $local'
852 $ hg merge -r 2 --config merge-tools.true.executable=echo --config merge-tools.true.args='mergeresult > $local'
835 merging f
853 merging f
836 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
854 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
837 (branch merge, don't forget to commit)
855 (branch merge, don't forget to commit)
838 $ aftermerge
856 $ aftermerge
839 # cat f
857 # cat f
840 mergeresult
858 mergeresult
841 # hg stat
859 # hg stat
842 M f
860 M f
843
861
844 - and $local is the file f:
862 - and $local is the file f:
845
863
846 $ beforemerge
864 $ beforemerge
847 [merge-tools]
865 [merge-tools]
848 false.whatever=
866 false.whatever=
849 true.priority=1
867 true.priority=1
850 true.executable=cat
868 true.executable=cat
851 # hg update -C 1
869 # hg update -C 1
852 $ hg merge -r 2 --config merge-tools.true.executable=echo --config merge-tools.true.args='mergeresult > f'
870 $ hg merge -r 2 --config merge-tools.true.executable=echo --config merge-tools.true.args='mergeresult > f'
853 merging f
871 merging f
854 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
872 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
855 (branch merge, don't forget to commit)
873 (branch merge, don't forget to commit)
856 $ aftermerge
874 $ aftermerge
857 # cat f
875 # cat f
858 mergeresult
876 mergeresult
859 # hg stat
877 # hg stat
860 M f
878 M f
861
879
862 Merge with "echo mergeresult > $output" - the variable is a bit magic:
880 Merge with "echo mergeresult > $output" - the variable is a bit magic:
863
881
864 $ beforemerge
882 $ beforemerge
865 [merge-tools]
883 [merge-tools]
866 false.whatever=
884 false.whatever=
867 true.priority=1
885 true.priority=1
868 true.executable=cat
886 true.executable=cat
869 # hg update -C 1
887 # hg update -C 1
870 $ hg merge -r 2 --config merge-tools.true.executable=echo --config merge-tools.true.args='mergeresult > $output'
888 $ hg merge -r 2 --config merge-tools.true.executable=echo --config merge-tools.true.args='mergeresult > $output'
871 merging f
889 merging f
872 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
890 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
873 (branch merge, don't forget to commit)
891 (branch merge, don't forget to commit)
874 $ aftermerge
892 $ aftermerge
875 # cat f
893 # cat f
876 mergeresult
894 mergeresult
877 # hg stat
895 # hg stat
878 M f
896 M f
879
897
880 Merge using tool with a path that must be quoted:
898 Merge using tool with a path that must be quoted:
881
899
882 $ beforemerge
900 $ beforemerge
883 [merge-tools]
901 [merge-tools]
884 false.whatever=
902 false.whatever=
885 true.priority=1
903 true.priority=1
886 true.executable=cat
904 true.executable=cat
887 # hg update -C 1
905 # hg update -C 1
888 $ cat <<EOF > 'my merge tool'
906 $ cat <<EOF > 'my merge tool'
889 > cat "\$1" "\$2" "\$3" > "\$4"
907 > cat "\$1" "\$2" "\$3" > "\$4"
890 > EOF
908 > EOF
891 $ hg --config merge-tools.true.executable='sh' \
909 $ hg --config merge-tools.true.executable='sh' \
892 > --config merge-tools.true.args='"./my merge tool" $base $local $other $output' \
910 > --config merge-tools.true.args='"./my merge tool" $base $local $other $output' \
893 > merge -r 2
911 > merge -r 2
894 merging f
912 merging f
895 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
913 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
896 (branch merge, don't forget to commit)
914 (branch merge, don't forget to commit)
897 $ rm -f 'my merge tool'
915 $ rm -f 'my merge tool'
898 $ aftermerge
916 $ aftermerge
899 # cat f
917 # cat f
900 revision 0
918 revision 0
901 space
919 space
902 revision 1
920 revision 1
903 space
921 space
904 revision 2
922 revision 2
905 space
923 space
906 # hg stat
924 # hg stat
907 M f
925 M f
908
926
909 Issue3581: Merging a filename that needs to be quoted
927 Issue3581: Merging a filename that needs to be quoted
910 (This test doesn't work on Windows filesystems even on Linux, so check
928 (This test doesn't work on Windows filesystems even on Linux, so check
911 for Unix-like permission)
929 for Unix-like permission)
912
930
913 #if unix-permissions
931 #if unix-permissions
914 $ beforemerge
932 $ beforemerge
915 [merge-tools]
933 [merge-tools]
916 false.whatever=
934 false.whatever=
917 true.priority=1
935 true.priority=1
918 true.executable=cat
936 true.executable=cat
919 # hg update -C 1
937 # hg update -C 1
920 $ echo "revision 5" > '"; exit 1; echo "'
938 $ echo "revision 5" > '"; exit 1; echo "'
921 $ hg commit -Am "revision 5"
939 $ hg commit -Am "revision 5"
922 adding "; exit 1; echo "
940 adding "; exit 1; echo "
923 warning: filename contains '"', which is reserved on Windows: '"; exit 1; echo "'
941 warning: filename contains '"', which is reserved on Windows: '"; exit 1; echo "'
924 $ hg update -C 1 > /dev/null
942 $ hg update -C 1 > /dev/null
925 $ echo "revision 6" > '"; exit 1; echo "'
943 $ echo "revision 6" > '"; exit 1; echo "'
926 $ hg commit -Am "revision 6"
944 $ hg commit -Am "revision 6"
927 adding "; exit 1; echo "
945 adding "; exit 1; echo "
928 warning: filename contains '"', which is reserved on Windows: '"; exit 1; echo "'
946 warning: filename contains '"', which is reserved on Windows: '"; exit 1; echo "'
929 created new head
947 created new head
930 $ hg merge --config merge-tools.true.executable="true" -r 5
948 $ hg merge --config merge-tools.true.executable="true" -r 5
931 merging "; exit 1; echo "
949 merging "; exit 1; echo "
932 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
950 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
933 (branch merge, don't forget to commit)
951 (branch merge, don't forget to commit)
934 $ hg update -C 1 > /dev/null
952 $ hg update -C 1 > /dev/null
935 #endif
953 #endif
936
954
937 Merge post-processing
955 Merge post-processing
938
956
939 cat is a bad merge-tool and doesn't change:
957 cat is a bad merge-tool and doesn't change:
940
958
941 $ beforemerge
959 $ beforemerge
942 [merge-tools]
960 [merge-tools]
943 false.whatever=
961 false.whatever=
944 true.priority=1
962 true.priority=1
945 true.executable=cat
963 true.executable=cat
946 # hg update -C 1
964 # hg update -C 1
947 $ hg merge -y -r 2 --config merge-tools.true.checkchanged=1
965 $ hg merge -y -r 2 --config merge-tools.true.checkchanged=1
948 merging f
966 merging f
949 revision 1
967 revision 1
950 space
968 space
951 revision 0
969 revision 0
952 space
970 space
953 revision 2
971 revision 2
954 space
972 space
955 output file f appears unchanged
973 output file f appears unchanged
956 was merge successful (yn)? n
974 was merge successful (yn)? n
957 merging f failed!
975 merging f failed!
958 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
976 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
959 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
977 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
960 [1]
978 [1]
961 $ aftermerge
979 $ aftermerge
962 # cat f
980 # cat f
963 revision 1
981 revision 1
964 space
982 space
965 # hg stat
983 # hg stat
966 M f
984 M f
967 ? f.orig
985 ? f.orig
968
986
969 #if symlink
987 #if symlink
970
988
971 internal merge cannot handle symlinks and shouldn't try:
989 internal merge cannot handle symlinks and shouldn't try:
972
990
973 $ hg update -q -C 1
991 $ hg update -q -C 1
974 $ rm f
992 $ rm f
975 $ ln -s symlink f
993 $ ln -s symlink f
976 $ hg commit -qm 'f is symlink'
994 $ hg commit -qm 'f is symlink'
977 $ hg merge -r 2 --tool internal:merge
995 $ hg merge -r 2 --tool internal:merge
978 merging f
996 merging f
979 warning: internal :merge cannot merge symlinks for f
997 warning: internal :merge cannot merge symlinks for f
980 merging f incomplete! (edit conflicts, then use 'hg resolve --mark')
998 merging f incomplete! (edit conflicts, then use 'hg resolve --mark')
981 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
999 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
982 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
1000 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
983 [1]
1001 [1]
984
1002
985 #endif
1003 #endif
General Comments 0
You need to be logged in to leave comments. Login now