##// END OF EJS Templates
log: prefer 'wctx' over 'pctx' for working context
Martin von Zweigbergk -
r24534:1925769b default
parent child Browse files
Show More
@@ -1,1376 +1,1376 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10
10
11 import os
11 import os
12 import copy
12 import copy
13
13
14 from mercurial import hg, util, cmdutil, scmutil, match as match_, \
14 from mercurial import hg, util, cmdutil, scmutil, match as match_, \
15 archival, pathutil, revset
15 archival, pathutil, revset
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 from mercurial.node import hex
17 from mercurial.node import hex
18
18
19 import lfutil
19 import lfutil
20 import lfcommands
20 import lfcommands
21 import basestore
21 import basestore
22
22
23 # -- Utility functions: commonly/repeatedly needed functionality ---------------
23 # -- Utility functions: commonly/repeatedly needed functionality ---------------
24
24
25 def composelargefilematcher(match, manifest):
25 def composelargefilematcher(match, manifest):
26 '''create a matcher that matches only the largefiles in the original
26 '''create a matcher that matches only the largefiles in the original
27 matcher'''
27 matcher'''
28 m = copy.copy(match)
28 m = copy.copy(match)
29 lfile = lambda f: lfutil.standin(f) in manifest
29 lfile = lambda f: lfutil.standin(f) in manifest
30 m._files = filter(lfile, m._files)
30 m._files = filter(lfile, m._files)
31 m._fmap = set(m._files)
31 m._fmap = set(m._files)
32 m._always = False
32 m._always = False
33 origmatchfn = m.matchfn
33 origmatchfn = m.matchfn
34 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
34 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
35 return m
35 return m
36
36
37 def composenormalfilematcher(match, manifest, exclude=None):
37 def composenormalfilematcher(match, manifest, exclude=None):
38 excluded = set()
38 excluded = set()
39 if exclude is not None:
39 if exclude is not None:
40 excluded.update(exclude)
40 excluded.update(exclude)
41
41
42 m = copy.copy(match)
42 m = copy.copy(match)
43 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
43 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
44 manifest or f in excluded)
44 manifest or f in excluded)
45 m._files = filter(notlfile, m._files)
45 m._files = filter(notlfile, m._files)
46 m._fmap = set(m._files)
46 m._fmap = set(m._files)
47 m._always = False
47 m._always = False
48 origmatchfn = m.matchfn
48 origmatchfn = m.matchfn
49 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
49 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
50 return m
50 return m
51
51
52 def installnormalfilesmatchfn(manifest):
52 def installnormalfilesmatchfn(manifest):
53 '''installmatchfn with a matchfn that ignores all largefiles'''
53 '''installmatchfn with a matchfn that ignores all largefiles'''
54 def overridematch(ctx, pats=[], opts={}, globbed=False,
54 def overridematch(ctx, pats=[], opts={}, globbed=False,
55 default='relpath'):
55 default='relpath'):
56 match = oldmatch(ctx, pats, opts, globbed, default)
56 match = oldmatch(ctx, pats, opts, globbed, default)
57 return composenormalfilematcher(match, manifest)
57 return composenormalfilematcher(match, manifest)
58 oldmatch = installmatchfn(overridematch)
58 oldmatch = installmatchfn(overridematch)
59
59
60 def installmatchfn(f):
60 def installmatchfn(f):
61 '''monkey patch the scmutil module with a custom match function.
61 '''monkey patch the scmutil module with a custom match function.
62 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
62 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
63 oldmatch = scmutil.match
63 oldmatch = scmutil.match
64 setattr(f, 'oldmatch', oldmatch)
64 setattr(f, 'oldmatch', oldmatch)
65 scmutil.match = f
65 scmutil.match = f
66 return oldmatch
66 return oldmatch
67
67
68 def restorematchfn():
68 def restorematchfn():
69 '''restores scmutil.match to what it was before installmatchfn
69 '''restores scmutil.match to what it was before installmatchfn
70 was called. no-op if scmutil.match is its original function.
70 was called. no-op if scmutil.match is its original function.
71
71
72 Note that n calls to installmatchfn will require n calls to
72 Note that n calls to installmatchfn will require n calls to
73 restore the original matchfn.'''
73 restore the original matchfn.'''
74 scmutil.match = getattr(scmutil.match, 'oldmatch')
74 scmutil.match = getattr(scmutil.match, 'oldmatch')
75
75
76 def installmatchandpatsfn(f):
76 def installmatchandpatsfn(f):
77 oldmatchandpats = scmutil.matchandpats
77 oldmatchandpats = scmutil.matchandpats
78 setattr(f, 'oldmatchandpats', oldmatchandpats)
78 setattr(f, 'oldmatchandpats', oldmatchandpats)
79 scmutil.matchandpats = f
79 scmutil.matchandpats = f
80 return oldmatchandpats
80 return oldmatchandpats
81
81
82 def restorematchandpatsfn():
82 def restorematchandpatsfn():
83 '''restores scmutil.matchandpats to what it was before
83 '''restores scmutil.matchandpats to what it was before
84 installmatchandpatsfn was called. No-op if scmutil.matchandpats
84 installmatchandpatsfn was called. No-op if scmutil.matchandpats
85 is its original function.
85 is its original function.
86
86
87 Note that n calls to installmatchandpatsfn will require n calls
87 Note that n calls to installmatchandpatsfn will require n calls
88 to restore the original matchfn.'''
88 to restore the original matchfn.'''
89 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
89 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
90 scmutil.matchandpats)
90 scmutil.matchandpats)
91
91
92 def addlargefiles(ui, repo, isaddremove, matcher, **opts):
92 def addlargefiles(ui, repo, isaddremove, matcher, **opts):
93 large = opts.get('large')
93 large = opts.get('large')
94 lfsize = lfutil.getminsize(
94 lfsize = lfutil.getminsize(
95 ui, lfutil.islfilesrepo(repo), opts.get('lfsize'))
95 ui, lfutil.islfilesrepo(repo), opts.get('lfsize'))
96
96
97 lfmatcher = None
97 lfmatcher = None
98 if lfutil.islfilesrepo(repo):
98 if lfutil.islfilesrepo(repo):
99 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
99 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
100 if lfpats:
100 if lfpats:
101 lfmatcher = match_.match(repo.root, '', list(lfpats))
101 lfmatcher = match_.match(repo.root, '', list(lfpats))
102
102
103 lfnames = []
103 lfnames = []
104 m = copy.copy(matcher)
104 m = copy.copy(matcher)
105 m.bad = lambda x, y: None
105 m.bad = lambda x, y: None
106 wctx = repo[None]
106 wctx = repo[None]
107 for f in repo.walk(m):
107 for f in repo.walk(m):
108 exact = m.exact(f)
108 exact = m.exact(f)
109 lfile = lfutil.standin(f) in wctx
109 lfile = lfutil.standin(f) in wctx
110 nfile = f in wctx
110 nfile = f in wctx
111 exists = lfile or nfile
111 exists = lfile or nfile
112
112
113 # addremove in core gets fancy with the name, add doesn't
113 # addremove in core gets fancy with the name, add doesn't
114 if isaddremove:
114 if isaddremove:
115 name = m.uipath(f)
115 name = m.uipath(f)
116 else:
116 else:
117 name = m.rel(f)
117 name = m.rel(f)
118
118
119 # Don't warn the user when they attempt to add a normal tracked file.
119 # Don't warn the user when they attempt to add a normal tracked file.
120 # The normal add code will do that for us.
120 # The normal add code will do that for us.
121 if exact and exists:
121 if exact and exists:
122 if lfile:
122 if lfile:
123 ui.warn(_('%s already a largefile\n') % name)
123 ui.warn(_('%s already a largefile\n') % name)
124 continue
124 continue
125
125
126 if (exact or not exists) and not lfutil.isstandin(f):
126 if (exact or not exists) and not lfutil.isstandin(f):
127 # In case the file was removed previously, but not committed
127 # In case the file was removed previously, but not committed
128 # (issue3507)
128 # (issue3507)
129 if not repo.wvfs.exists(f):
129 if not repo.wvfs.exists(f):
130 continue
130 continue
131
131
132 abovemin = (lfsize and
132 abovemin = (lfsize and
133 repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024)
133 repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024)
134 if large or abovemin or (lfmatcher and lfmatcher(f)):
134 if large or abovemin or (lfmatcher and lfmatcher(f)):
135 lfnames.append(f)
135 lfnames.append(f)
136 if ui.verbose or not exact:
136 if ui.verbose or not exact:
137 ui.status(_('adding %s as a largefile\n') % name)
137 ui.status(_('adding %s as a largefile\n') % name)
138
138
139 bad = []
139 bad = []
140
140
141 # Need to lock, otherwise there could be a race condition between
141 # Need to lock, otherwise there could be a race condition between
142 # when standins are created and added to the repo.
142 # when standins are created and added to the repo.
143 wlock = repo.wlock()
143 wlock = repo.wlock()
144 try:
144 try:
145 if not opts.get('dry_run'):
145 if not opts.get('dry_run'):
146 standins = []
146 standins = []
147 lfdirstate = lfutil.openlfdirstate(ui, repo)
147 lfdirstate = lfutil.openlfdirstate(ui, repo)
148 for f in lfnames:
148 for f in lfnames:
149 standinname = lfutil.standin(f)
149 standinname = lfutil.standin(f)
150 lfutil.writestandin(repo, standinname, hash='',
150 lfutil.writestandin(repo, standinname, hash='',
151 executable=lfutil.getexecutable(repo.wjoin(f)))
151 executable=lfutil.getexecutable(repo.wjoin(f)))
152 standins.append(standinname)
152 standins.append(standinname)
153 if lfdirstate[f] == 'r':
153 if lfdirstate[f] == 'r':
154 lfdirstate.normallookup(f)
154 lfdirstate.normallookup(f)
155 else:
155 else:
156 lfdirstate.add(f)
156 lfdirstate.add(f)
157 lfdirstate.write()
157 lfdirstate.write()
158 bad += [lfutil.splitstandin(f)
158 bad += [lfutil.splitstandin(f)
159 for f in repo[None].add(standins)
159 for f in repo[None].add(standins)
160 if f in m.files()]
160 if f in m.files()]
161
161
162 added = [f for f in lfnames if f not in bad]
162 added = [f for f in lfnames if f not in bad]
163 finally:
163 finally:
164 wlock.release()
164 wlock.release()
165 return added, bad
165 return added, bad
166
166
167 def removelargefiles(ui, repo, isaddremove, matcher, **opts):
167 def removelargefiles(ui, repo, isaddremove, matcher, **opts):
168 after = opts.get('after')
168 after = opts.get('after')
169 m = composelargefilematcher(matcher, repo[None].manifest())
169 m = composelargefilematcher(matcher, repo[None].manifest())
170 try:
170 try:
171 repo.lfstatus = True
171 repo.lfstatus = True
172 s = repo.status(match=m, clean=not isaddremove)
172 s = repo.status(match=m, clean=not isaddremove)
173 finally:
173 finally:
174 repo.lfstatus = False
174 repo.lfstatus = False
175 manifest = repo[None].manifest()
175 manifest = repo[None].manifest()
176 modified, added, deleted, clean = [[f for f in list
176 modified, added, deleted, clean = [[f for f in list
177 if lfutil.standin(f) in manifest]
177 if lfutil.standin(f) in manifest]
178 for list in (s.modified, s.added,
178 for list in (s.modified, s.added,
179 s.deleted, s.clean)]
179 s.deleted, s.clean)]
180
180
181 def warn(files, msg):
181 def warn(files, msg):
182 for f in files:
182 for f in files:
183 ui.warn(msg % m.rel(f))
183 ui.warn(msg % m.rel(f))
184 return int(len(files) > 0)
184 return int(len(files) > 0)
185
185
186 result = 0
186 result = 0
187
187
188 if after:
188 if after:
189 remove = deleted
189 remove = deleted
190 result = warn(modified + added + clean,
190 result = warn(modified + added + clean,
191 _('not removing %s: file still exists\n'))
191 _('not removing %s: file still exists\n'))
192 else:
192 else:
193 remove = deleted + clean
193 remove = deleted + clean
194 result = warn(modified, _('not removing %s: file is modified (use -f'
194 result = warn(modified, _('not removing %s: file is modified (use -f'
195 ' to force removal)\n'))
195 ' to force removal)\n'))
196 result = warn(added, _('not removing %s: file has been marked for add'
196 result = warn(added, _('not removing %s: file has been marked for add'
197 ' (use forget to undo)\n')) or result
197 ' (use forget to undo)\n')) or result
198
198
199 # Need to lock because standin files are deleted then removed from the
199 # Need to lock because standin files are deleted then removed from the
200 # repository and we could race in-between.
200 # repository and we could race in-between.
201 wlock = repo.wlock()
201 wlock = repo.wlock()
202 try:
202 try:
203 lfdirstate = lfutil.openlfdirstate(ui, repo)
203 lfdirstate = lfutil.openlfdirstate(ui, repo)
204 for f in sorted(remove):
204 for f in sorted(remove):
205 if ui.verbose or not m.exact(f):
205 if ui.verbose or not m.exact(f):
206 # addremove in core gets fancy with the name, remove doesn't
206 # addremove in core gets fancy with the name, remove doesn't
207 if isaddremove:
207 if isaddremove:
208 name = m.uipath(f)
208 name = m.uipath(f)
209 else:
209 else:
210 name = m.rel(f)
210 name = m.rel(f)
211 ui.status(_('removing %s\n') % name)
211 ui.status(_('removing %s\n') % name)
212
212
213 if not opts.get('dry_run'):
213 if not opts.get('dry_run'):
214 if not after:
214 if not after:
215 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
215 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
216
216
217 if opts.get('dry_run'):
217 if opts.get('dry_run'):
218 return result
218 return result
219
219
220 remove = [lfutil.standin(f) for f in remove]
220 remove = [lfutil.standin(f) for f in remove]
221 # If this is being called by addremove, let the original addremove
221 # If this is being called by addremove, let the original addremove
222 # function handle this.
222 # function handle this.
223 if not isaddremove:
223 if not isaddremove:
224 for f in remove:
224 for f in remove:
225 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
225 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
226 repo[None].forget(remove)
226 repo[None].forget(remove)
227
227
228 for f in remove:
228 for f in remove:
229 lfutil.synclfdirstate(repo, lfdirstate, lfutil.splitstandin(f),
229 lfutil.synclfdirstate(repo, lfdirstate, lfutil.splitstandin(f),
230 False)
230 False)
231
231
232 lfdirstate.write()
232 lfdirstate.write()
233 finally:
233 finally:
234 wlock.release()
234 wlock.release()
235
235
236 return result
236 return result
237
237
238 # For overriding mercurial.hgweb.webcommands so that largefiles will
238 # For overriding mercurial.hgweb.webcommands so that largefiles will
239 # appear at their right place in the manifests.
239 # appear at their right place in the manifests.
240 def decodepath(orig, path):
240 def decodepath(orig, path):
241 return lfutil.splitstandin(path) or path
241 return lfutil.splitstandin(path) or path
242
242
243 # -- Wrappers: modify existing commands --------------------------------
243 # -- Wrappers: modify existing commands --------------------------------
244
244
245 def overrideadd(orig, ui, repo, *pats, **opts):
245 def overrideadd(orig, ui, repo, *pats, **opts):
246 if opts.get('normal') and opts.get('large'):
246 if opts.get('normal') and opts.get('large'):
247 raise util.Abort(_('--normal cannot be used with --large'))
247 raise util.Abort(_('--normal cannot be used with --large'))
248 return orig(ui, repo, *pats, **opts)
248 return orig(ui, repo, *pats, **opts)
249
249
250 def cmdutiladd(orig, ui, repo, matcher, prefix, explicitonly, **opts):
250 def cmdutiladd(orig, ui, repo, matcher, prefix, explicitonly, **opts):
251 # The --normal flag short circuits this override
251 # The --normal flag short circuits this override
252 if opts.get('normal'):
252 if opts.get('normal'):
253 return orig(ui, repo, matcher, prefix, explicitonly, **opts)
253 return orig(ui, repo, matcher, prefix, explicitonly, **opts)
254
254
255 ladded, lbad = addlargefiles(ui, repo, False, matcher, **opts)
255 ladded, lbad = addlargefiles(ui, repo, False, matcher, **opts)
256 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest(),
256 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest(),
257 ladded)
257 ladded)
258 bad = orig(ui, repo, normalmatcher, prefix, explicitonly, **opts)
258 bad = orig(ui, repo, normalmatcher, prefix, explicitonly, **opts)
259
259
260 bad.extend(f for f in lbad)
260 bad.extend(f for f in lbad)
261 return bad
261 return bad
262
262
263 def cmdutilremove(orig, ui, repo, matcher, prefix, after, force, subrepos):
263 def cmdutilremove(orig, ui, repo, matcher, prefix, after, force, subrepos):
264 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
264 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
265 result = orig(ui, repo, normalmatcher, prefix, after, force, subrepos)
265 result = orig(ui, repo, normalmatcher, prefix, after, force, subrepos)
266 return removelargefiles(ui, repo, False, matcher, after=after,
266 return removelargefiles(ui, repo, False, matcher, after=after,
267 force=force) or result
267 force=force) or result
268
268
269 def overridestatusfn(orig, repo, rev2, **opts):
269 def overridestatusfn(orig, repo, rev2, **opts):
270 try:
270 try:
271 repo._repo.lfstatus = True
271 repo._repo.lfstatus = True
272 return orig(repo, rev2, **opts)
272 return orig(repo, rev2, **opts)
273 finally:
273 finally:
274 repo._repo.lfstatus = False
274 repo._repo.lfstatus = False
275
275
276 def overridestatus(orig, ui, repo, *pats, **opts):
276 def overridestatus(orig, ui, repo, *pats, **opts):
277 try:
277 try:
278 repo.lfstatus = True
278 repo.lfstatus = True
279 return orig(ui, repo, *pats, **opts)
279 return orig(ui, repo, *pats, **opts)
280 finally:
280 finally:
281 repo.lfstatus = False
281 repo.lfstatus = False
282
282
283 def overridedirty(orig, repo, ignoreupdate=False):
283 def overridedirty(orig, repo, ignoreupdate=False):
284 try:
284 try:
285 repo._repo.lfstatus = True
285 repo._repo.lfstatus = True
286 return orig(repo, ignoreupdate)
286 return orig(repo, ignoreupdate)
287 finally:
287 finally:
288 repo._repo.lfstatus = False
288 repo._repo.lfstatus = False
289
289
290 def overridelog(orig, ui, repo, *pats, **opts):
290 def overridelog(orig, ui, repo, *pats, **opts):
291 def overridematchandpats(ctx, pats=[], opts={}, globbed=False,
291 def overridematchandpats(ctx, pats=[], opts={}, globbed=False,
292 default='relpath'):
292 default='relpath'):
293 """Matcher that merges root directory with .hglf, suitable for log.
293 """Matcher that merges root directory with .hglf, suitable for log.
294 It is still possible to match .hglf directly.
294 It is still possible to match .hglf directly.
295 For any listed files run log on the standin too.
295 For any listed files run log on the standin too.
296 matchfn tries both the given filename and with .hglf stripped.
296 matchfn tries both the given filename and with .hglf stripped.
297 """
297 """
298 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default)
298 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default)
299 m, p = copy.copy(matchandpats)
299 m, p = copy.copy(matchandpats)
300
300
301 if m.always():
301 if m.always():
302 # We want to match everything anyway, so there's no benefit trying
302 # We want to match everything anyway, so there's no benefit trying
303 # to add standins.
303 # to add standins.
304 return matchandpats
304 return matchandpats
305
305
306 pats = set(p)
306 pats = set(p)
307
307
308 def fixpats(pat, tostandin=lfutil.standin):
308 def fixpats(pat, tostandin=lfutil.standin):
309 kindpat = match_._patsplit(pat, None)
309 kindpat = match_._patsplit(pat, None)
310
310
311 if kindpat[0] is not None:
311 if kindpat[0] is not None:
312 return kindpat[0] + ':' + tostandin(kindpat[1])
312 return kindpat[0] + ':' + tostandin(kindpat[1])
313 return tostandin(kindpat[1])
313 return tostandin(kindpat[1])
314
314
315 if m._cwd:
315 if m._cwd:
316 hglf = lfutil.shortname
316 hglf = lfutil.shortname
317 back = util.pconvert(m.rel(hglf)[:-len(hglf)])
317 back = util.pconvert(m.rel(hglf)[:-len(hglf)])
318
318
319 def tostandin(f):
319 def tostandin(f):
320 # The file may already be a standin, so trucate the back
320 # The file may already be a standin, so trucate the back
321 # prefix and test before mangling it. This avoids turning
321 # prefix and test before mangling it. This avoids turning
322 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
322 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
323 if f.startswith(back) and lfutil.splitstandin(f[len(back):]):
323 if f.startswith(back) and lfutil.splitstandin(f[len(back):]):
324 return f
324 return f
325
325
326 # An absolute path is from outside the repo, so truncate the
326 # An absolute path is from outside the repo, so truncate the
327 # path to the root before building the standin. Otherwise cwd
327 # path to the root before building the standin. Otherwise cwd
328 # is somewhere in the repo, relative to root, and needs to be
328 # is somewhere in the repo, relative to root, and needs to be
329 # prepended before building the standin.
329 # prepended before building the standin.
330 if os.path.isabs(m._cwd):
330 if os.path.isabs(m._cwd):
331 f = f[len(back):]
331 f = f[len(back):]
332 else:
332 else:
333 f = m._cwd + '/' + f
333 f = m._cwd + '/' + f
334 return back + lfutil.standin(f)
334 return back + lfutil.standin(f)
335
335
336 pats.update(fixpats(f, tostandin) for f in p)
336 pats.update(fixpats(f, tostandin) for f in p)
337 else:
337 else:
338 def tostandin(f):
338 def tostandin(f):
339 if lfutil.splitstandin(f):
339 if lfutil.splitstandin(f):
340 return f
340 return f
341 return lfutil.standin(f)
341 return lfutil.standin(f)
342 pats.update(fixpats(f, tostandin) for f in p)
342 pats.update(fixpats(f, tostandin) for f in p)
343
343
344 for i in range(0, len(m._files)):
344 for i in range(0, len(m._files)):
345 # Don't add '.hglf' to m.files, since that is already covered by '.'
345 # Don't add '.hglf' to m.files, since that is already covered by '.'
346 if m._files[i] == '.':
346 if m._files[i] == '.':
347 continue
347 continue
348 standin = lfutil.standin(m._files[i])
348 standin = lfutil.standin(m._files[i])
349 # If the "standin" is a directory, append instead of replace to
349 # If the "standin" is a directory, append instead of replace to
350 # support naming a directory on the command line with only
350 # support naming a directory on the command line with only
351 # largefiles. The original directory is kept to support normal
351 # largefiles. The original directory is kept to support normal
352 # files.
352 # files.
353 if standin in repo[ctx.node()]:
353 if standin in repo[ctx.node()]:
354 m._files[i] = standin
354 m._files[i] = standin
355 elif m._files[i] not in repo[ctx.node()] \
355 elif m._files[i] not in repo[ctx.node()] \
356 and repo.wvfs.isdir(standin):
356 and repo.wvfs.isdir(standin):
357 m._files.append(standin)
357 m._files.append(standin)
358
358
359 m._fmap = set(m._files)
359 m._fmap = set(m._files)
360 m._always = False
360 m._always = False
361 origmatchfn = m.matchfn
361 origmatchfn = m.matchfn
362 def lfmatchfn(f):
362 def lfmatchfn(f):
363 lf = lfutil.splitstandin(f)
363 lf = lfutil.splitstandin(f)
364 if lf is not None and origmatchfn(lf):
364 if lf is not None and origmatchfn(lf):
365 return True
365 return True
366 r = origmatchfn(f)
366 r = origmatchfn(f)
367 return r
367 return r
368 m.matchfn = lfmatchfn
368 m.matchfn = lfmatchfn
369
369
370 ui.debug('updated patterns: %s\n' % sorted(pats))
370 ui.debug('updated patterns: %s\n' % sorted(pats))
371 return m, pats
371 return m, pats
372
372
373 # For hg log --patch, the match object is used in two different senses:
373 # For hg log --patch, the match object is used in two different senses:
374 # (1) to determine what revisions should be printed out, and
374 # (1) to determine what revisions should be printed out, and
375 # (2) to determine what files to print out diffs for.
375 # (2) to determine what files to print out diffs for.
376 # The magic matchandpats override should be used for case (1) but not for
376 # The magic matchandpats override should be used for case (1) but not for
377 # case (2).
377 # case (2).
378 def overridemakelogfilematcher(repo, pats, opts):
378 def overridemakelogfilematcher(repo, pats, opts):
379 pctx = repo[None]
379 wctx = repo[None]
380 match, pats = oldmatchandpats(pctx, pats, opts)
380 match, pats = oldmatchandpats(wctx, pats, opts)
381 return lambda rev: match
381 return lambda rev: match
382
382
383 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
383 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
384 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
384 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
385 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
385 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
386
386
387 try:
387 try:
388 return orig(ui, repo, *pats, **opts)
388 return orig(ui, repo, *pats, **opts)
389 finally:
389 finally:
390 restorematchandpatsfn()
390 restorematchandpatsfn()
391 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
391 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
392
392
393 def overrideverify(orig, ui, repo, *pats, **opts):
393 def overrideverify(orig, ui, repo, *pats, **opts):
394 large = opts.pop('large', False)
394 large = opts.pop('large', False)
395 all = opts.pop('lfa', False)
395 all = opts.pop('lfa', False)
396 contents = opts.pop('lfc', False)
396 contents = opts.pop('lfc', False)
397
397
398 result = orig(ui, repo, *pats, **opts)
398 result = orig(ui, repo, *pats, **opts)
399 if large or all or contents:
399 if large or all or contents:
400 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
400 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
401 return result
401 return result
402
402
403 def overridedebugstate(orig, ui, repo, *pats, **opts):
403 def overridedebugstate(orig, ui, repo, *pats, **opts):
404 large = opts.pop('large', False)
404 large = opts.pop('large', False)
405 if large:
405 if large:
406 class fakerepo(object):
406 class fakerepo(object):
407 dirstate = lfutil.openlfdirstate(ui, repo)
407 dirstate = lfutil.openlfdirstate(ui, repo)
408 orig(ui, fakerepo, *pats, **opts)
408 orig(ui, fakerepo, *pats, **opts)
409 else:
409 else:
410 orig(ui, repo, *pats, **opts)
410 orig(ui, repo, *pats, **opts)
411
411
412 # Before starting the manifest merge, merge.updates will call
412 # Before starting the manifest merge, merge.updates will call
413 # _checkunknownfile to check if there are any files in the merged-in
413 # _checkunknownfile to check if there are any files in the merged-in
414 # changeset that collide with unknown files in the working copy.
414 # changeset that collide with unknown files in the working copy.
415 #
415 #
416 # The largefiles are seen as unknown, so this prevents us from merging
416 # The largefiles are seen as unknown, so this prevents us from merging
417 # in a file 'foo' if we already have a largefile with the same name.
417 # in a file 'foo' if we already have a largefile with the same name.
418 #
418 #
419 # The overridden function filters the unknown files by removing any
419 # The overridden function filters the unknown files by removing any
420 # largefiles. This makes the merge proceed and we can then handle this
420 # largefiles. This makes the merge proceed and we can then handle this
421 # case further in the overridden calculateupdates function below.
421 # case further in the overridden calculateupdates function below.
422 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
422 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
423 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
423 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
424 return False
424 return False
425 return origfn(repo, wctx, mctx, f, f2)
425 return origfn(repo, wctx, mctx, f, f2)
426
426
427 # The manifest merge handles conflicts on the manifest level. We want
427 # The manifest merge handles conflicts on the manifest level. We want
428 # to handle changes in largefile-ness of files at this level too.
428 # to handle changes in largefile-ness of files at this level too.
429 #
429 #
430 # The strategy is to run the original calculateupdates and then process
430 # The strategy is to run the original calculateupdates and then process
431 # the action list it outputs. There are two cases we need to deal with:
431 # the action list it outputs. There are two cases we need to deal with:
432 #
432 #
433 # 1. Normal file in p1, largefile in p2. Here the largefile is
433 # 1. Normal file in p1, largefile in p2. Here the largefile is
434 # detected via its standin file, which will enter the working copy
434 # detected via its standin file, which will enter the working copy
435 # with a "get" action. It is not "merge" since the standin is all
435 # with a "get" action. It is not "merge" since the standin is all
436 # Mercurial is concerned with at this level -- the link to the
436 # Mercurial is concerned with at this level -- the link to the
437 # existing normal file is not relevant here.
437 # existing normal file is not relevant here.
438 #
438 #
439 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
439 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
440 # since the largefile will be present in the working copy and
440 # since the largefile will be present in the working copy and
441 # different from the normal file in p2. Mercurial therefore
441 # different from the normal file in p2. Mercurial therefore
442 # triggers a merge action.
442 # triggers a merge action.
443 #
443 #
444 # In both cases, we prompt the user and emit new actions to either
444 # In both cases, we prompt the user and emit new actions to either
445 # remove the standin (if the normal file was kept) or to remove the
445 # remove the standin (if the normal file was kept) or to remove the
446 # normal file and get the standin (if the largefile was kept). The
446 # normal file and get the standin (if the largefile was kept). The
447 # default prompt answer is to use the largefile version since it was
447 # default prompt answer is to use the largefile version since it was
448 # presumably changed on purpose.
448 # presumably changed on purpose.
449 #
449 #
450 # Finally, the merge.applyupdates function will then take care of
450 # Finally, the merge.applyupdates function will then take care of
451 # writing the files into the working copy and lfcommands.updatelfiles
451 # writing the files into the working copy and lfcommands.updatelfiles
452 # will update the largefiles.
452 # will update the largefiles.
453 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
453 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
454 partial, acceptremote, followcopies):
454 partial, acceptremote, followcopies):
455 overwrite = force and not branchmerge
455 overwrite = force and not branchmerge
456 actions, diverge, renamedelete = origfn(
456 actions, diverge, renamedelete = origfn(
457 repo, p1, p2, pas, branchmerge, force, partial, acceptremote,
457 repo, p1, p2, pas, branchmerge, force, partial, acceptremote,
458 followcopies)
458 followcopies)
459
459
460 if overwrite:
460 if overwrite:
461 return actions, diverge, renamedelete
461 return actions, diverge, renamedelete
462
462
463 # Convert to dictionary with filename as key and action as value.
463 # Convert to dictionary with filename as key and action as value.
464 lfiles = set()
464 lfiles = set()
465 for f in actions:
465 for f in actions:
466 splitstandin = f and lfutil.splitstandin(f)
466 splitstandin = f and lfutil.splitstandin(f)
467 if splitstandin in p1:
467 if splitstandin in p1:
468 lfiles.add(splitstandin)
468 lfiles.add(splitstandin)
469 elif lfutil.standin(f) in p1:
469 elif lfutil.standin(f) in p1:
470 lfiles.add(f)
470 lfiles.add(f)
471
471
472 for lfile in lfiles:
472 for lfile in lfiles:
473 standin = lfutil.standin(lfile)
473 standin = lfutil.standin(lfile)
474 (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
474 (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
475 (sm, sargs, smsg) = actions.get(standin, (None, None, None))
475 (sm, sargs, smsg) = actions.get(standin, (None, None, None))
476 if sm in ('g', 'dc') and lm != 'r':
476 if sm in ('g', 'dc') and lm != 'r':
477 # Case 1: normal file in the working copy, largefile in
477 # Case 1: normal file in the working copy, largefile in
478 # the second parent
478 # the second parent
479 usermsg = _('remote turned local normal file %s into a largefile\n'
479 usermsg = _('remote turned local normal file %s into a largefile\n'
480 'use (l)argefile or keep (n)ormal file?'
480 'use (l)argefile or keep (n)ormal file?'
481 '$$ &Largefile $$ &Normal file') % lfile
481 '$$ &Largefile $$ &Normal file') % lfile
482 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
482 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
483 actions[lfile] = ('r', None, 'replaced by standin')
483 actions[lfile] = ('r', None, 'replaced by standin')
484 actions[standin] = ('g', sargs, 'replaces standin')
484 actions[standin] = ('g', sargs, 'replaces standin')
485 else: # keep local normal file
485 else: # keep local normal file
486 actions[lfile] = ('k', None, 'replaces standin')
486 actions[lfile] = ('k', None, 'replaces standin')
487 if branchmerge:
487 if branchmerge:
488 actions[standin] = ('k', None, 'replaced by non-standin')
488 actions[standin] = ('k', None, 'replaced by non-standin')
489 else:
489 else:
490 actions[standin] = ('r', None, 'replaced by non-standin')
490 actions[standin] = ('r', None, 'replaced by non-standin')
491 elif lm in ('g', 'dc') and sm != 'r':
491 elif lm in ('g', 'dc') and sm != 'r':
492 # Case 2: largefile in the working copy, normal file in
492 # Case 2: largefile in the working copy, normal file in
493 # the second parent
493 # the second parent
494 usermsg = _('remote turned local largefile %s into a normal file\n'
494 usermsg = _('remote turned local largefile %s into a normal file\n'
495 'keep (l)argefile or use (n)ormal file?'
495 'keep (l)argefile or use (n)ormal file?'
496 '$$ &Largefile $$ &Normal file') % lfile
496 '$$ &Largefile $$ &Normal file') % lfile
497 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
497 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
498 if branchmerge:
498 if branchmerge:
499 # largefile can be restored from standin safely
499 # largefile can be restored from standin safely
500 actions[lfile] = ('k', None, 'replaced by standin')
500 actions[lfile] = ('k', None, 'replaced by standin')
501 actions[standin] = ('k', None, 'replaces standin')
501 actions[standin] = ('k', None, 'replaces standin')
502 else:
502 else:
503 # "lfile" should be marked as "removed" without
503 # "lfile" should be marked as "removed" without
504 # removal of itself
504 # removal of itself
505 actions[lfile] = ('lfmr', None,
505 actions[lfile] = ('lfmr', None,
506 'forget non-standin largefile')
506 'forget non-standin largefile')
507
507
508 # linear-merge should treat this largefile as 're-added'
508 # linear-merge should treat this largefile as 're-added'
509 actions[standin] = ('a', None, 'keep standin')
509 actions[standin] = ('a', None, 'keep standin')
510 else: # pick remote normal file
510 else: # pick remote normal file
511 actions[lfile] = ('g', largs, 'replaces standin')
511 actions[lfile] = ('g', largs, 'replaces standin')
512 actions[standin] = ('r', None, 'replaced by non-standin')
512 actions[standin] = ('r', None, 'replaced by non-standin')
513
513
514 return actions, diverge, renamedelete
514 return actions, diverge, renamedelete
515
515
516 def mergerecordupdates(orig, repo, actions, branchmerge):
516 def mergerecordupdates(orig, repo, actions, branchmerge):
517 if 'lfmr' in actions:
517 if 'lfmr' in actions:
518 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
518 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
519 for lfile, args, msg in actions['lfmr']:
519 for lfile, args, msg in actions['lfmr']:
520 # this should be executed before 'orig', to execute 'remove'
520 # this should be executed before 'orig', to execute 'remove'
521 # before all other actions
521 # before all other actions
522 repo.dirstate.remove(lfile)
522 repo.dirstate.remove(lfile)
523 # make sure lfile doesn't get synclfdirstate'd as normal
523 # make sure lfile doesn't get synclfdirstate'd as normal
524 lfdirstate.add(lfile)
524 lfdirstate.add(lfile)
525 lfdirstate.write()
525 lfdirstate.write()
526
526
527 return orig(repo, actions, branchmerge)
527 return orig(repo, actions, branchmerge)
528
528
529
529
530 # Override filemerge to prompt the user about how they wish to merge
530 # Override filemerge to prompt the user about how they wish to merge
531 # largefiles. This will handle identical edits without prompting the user.
531 # largefiles. This will handle identical edits without prompting the user.
532 def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca, labels=None):
532 def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca, labels=None):
533 if not lfutil.isstandin(orig):
533 if not lfutil.isstandin(orig):
534 return origfn(repo, mynode, orig, fcd, fco, fca, labels=labels)
534 return origfn(repo, mynode, orig, fcd, fco, fca, labels=labels)
535
535
536 ahash = fca.data().strip().lower()
536 ahash = fca.data().strip().lower()
537 dhash = fcd.data().strip().lower()
537 dhash = fcd.data().strip().lower()
538 ohash = fco.data().strip().lower()
538 ohash = fco.data().strip().lower()
539 if (ohash != ahash and
539 if (ohash != ahash and
540 ohash != dhash and
540 ohash != dhash and
541 (dhash == ahash or
541 (dhash == ahash or
542 repo.ui.promptchoice(
542 repo.ui.promptchoice(
543 _('largefile %s has a merge conflict\nancestor was %s\n'
543 _('largefile %s has a merge conflict\nancestor was %s\n'
544 'keep (l)ocal %s or\ntake (o)ther %s?'
544 'keep (l)ocal %s or\ntake (o)ther %s?'
545 '$$ &Local $$ &Other') %
545 '$$ &Local $$ &Other') %
546 (lfutil.splitstandin(orig), ahash, dhash, ohash),
546 (lfutil.splitstandin(orig), ahash, dhash, ohash),
547 0) == 1)):
547 0) == 1)):
548 repo.wwrite(fcd.path(), fco.data(), fco.flags())
548 repo.wwrite(fcd.path(), fco.data(), fco.flags())
549 return 0
549 return 0
550
550
551 def copiespathcopies(orig, ctx1, ctx2):
551 def copiespathcopies(orig, ctx1, ctx2):
552 copies = orig(ctx1, ctx2)
552 copies = orig(ctx1, ctx2)
553 updated = {}
553 updated = {}
554
554
555 for k, v in copies.iteritems():
555 for k, v in copies.iteritems():
556 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
556 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
557
557
558 return updated
558 return updated
559
559
560 # Copy first changes the matchers to match standins instead of
560 # Copy first changes the matchers to match standins instead of
561 # largefiles. Then it overrides util.copyfile in that function it
561 # largefiles. Then it overrides util.copyfile in that function it
562 # checks if the destination largefile already exists. It also keeps a
562 # checks if the destination largefile already exists. It also keeps a
563 # list of copied files so that the largefiles can be copied and the
563 # list of copied files so that the largefiles can be copied and the
564 # dirstate updated.
564 # dirstate updated.
565 def overridecopy(orig, ui, repo, pats, opts, rename=False):
565 def overridecopy(orig, ui, repo, pats, opts, rename=False):
566 # doesn't remove largefile on rename
566 # doesn't remove largefile on rename
567 if len(pats) < 2:
567 if len(pats) < 2:
568 # this isn't legal, let the original function deal with it
568 # this isn't legal, let the original function deal with it
569 return orig(ui, repo, pats, opts, rename)
569 return orig(ui, repo, pats, opts, rename)
570
570
571 # This could copy both lfiles and normal files in one command,
571 # This could copy both lfiles and normal files in one command,
572 # but we don't want to do that. First replace their matcher to
572 # but we don't want to do that. First replace their matcher to
573 # only match normal files and run it, then replace it to just
573 # only match normal files and run it, then replace it to just
574 # match largefiles and run it again.
574 # match largefiles and run it again.
575 nonormalfiles = False
575 nonormalfiles = False
576 nolfiles = False
576 nolfiles = False
577 installnormalfilesmatchfn(repo[None].manifest())
577 installnormalfilesmatchfn(repo[None].manifest())
578 try:
578 try:
579 try:
579 try:
580 result = orig(ui, repo, pats, opts, rename)
580 result = orig(ui, repo, pats, opts, rename)
581 except util.Abort, e:
581 except util.Abort, e:
582 if str(e) != _('no files to copy'):
582 if str(e) != _('no files to copy'):
583 raise e
583 raise e
584 else:
584 else:
585 nonormalfiles = True
585 nonormalfiles = True
586 result = 0
586 result = 0
587 finally:
587 finally:
588 restorematchfn()
588 restorematchfn()
589
589
590 # The first rename can cause our current working directory to be removed.
590 # The first rename can cause our current working directory to be removed.
591 # In that case there is nothing left to copy/rename so just quit.
591 # In that case there is nothing left to copy/rename so just quit.
592 try:
592 try:
593 repo.getcwd()
593 repo.getcwd()
594 except OSError:
594 except OSError:
595 return result
595 return result
596
596
597 def makestandin(relpath):
597 def makestandin(relpath):
598 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
598 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
599 return os.path.join(repo.wjoin(lfutil.standin(path)))
599 return os.path.join(repo.wjoin(lfutil.standin(path)))
600
600
601 fullpats = scmutil.expandpats(pats)
601 fullpats = scmutil.expandpats(pats)
602 dest = fullpats[-1]
602 dest = fullpats[-1]
603
603
604 if os.path.isdir(dest):
604 if os.path.isdir(dest):
605 if not os.path.isdir(makestandin(dest)):
605 if not os.path.isdir(makestandin(dest)):
606 os.makedirs(makestandin(dest))
606 os.makedirs(makestandin(dest))
607
607
608 try:
608 try:
609 try:
609 try:
610 # When we call orig below it creates the standins but we don't add
610 # When we call orig below it creates the standins but we don't add
611 # them to the dir state until later so lock during that time.
611 # them to the dir state until later so lock during that time.
612 wlock = repo.wlock()
612 wlock = repo.wlock()
613
613
614 manifest = repo[None].manifest()
614 manifest = repo[None].manifest()
615 def overridematch(ctx, pats=[], opts={}, globbed=False,
615 def overridematch(ctx, pats=[], opts={}, globbed=False,
616 default='relpath'):
616 default='relpath'):
617 newpats = []
617 newpats = []
618 # The patterns were previously mangled to add the standin
618 # The patterns were previously mangled to add the standin
619 # directory; we need to remove that now
619 # directory; we need to remove that now
620 for pat in pats:
620 for pat in pats:
621 if match_.patkind(pat) is None and lfutil.shortname in pat:
621 if match_.patkind(pat) is None and lfutil.shortname in pat:
622 newpats.append(pat.replace(lfutil.shortname, ''))
622 newpats.append(pat.replace(lfutil.shortname, ''))
623 else:
623 else:
624 newpats.append(pat)
624 newpats.append(pat)
625 match = oldmatch(ctx, newpats, opts, globbed, default)
625 match = oldmatch(ctx, newpats, opts, globbed, default)
626 m = copy.copy(match)
626 m = copy.copy(match)
627 lfile = lambda f: lfutil.standin(f) in manifest
627 lfile = lambda f: lfutil.standin(f) in manifest
628 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
628 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
629 m._fmap = set(m._files)
629 m._fmap = set(m._files)
630 origmatchfn = m.matchfn
630 origmatchfn = m.matchfn
631 m.matchfn = lambda f: (lfutil.isstandin(f) and
631 m.matchfn = lambda f: (lfutil.isstandin(f) and
632 (f in manifest) and
632 (f in manifest) and
633 origmatchfn(lfutil.splitstandin(f)) or
633 origmatchfn(lfutil.splitstandin(f)) or
634 None)
634 None)
635 return m
635 return m
636 oldmatch = installmatchfn(overridematch)
636 oldmatch = installmatchfn(overridematch)
637 listpats = []
637 listpats = []
638 for pat in pats:
638 for pat in pats:
639 if match_.patkind(pat) is not None:
639 if match_.patkind(pat) is not None:
640 listpats.append(pat)
640 listpats.append(pat)
641 else:
641 else:
642 listpats.append(makestandin(pat))
642 listpats.append(makestandin(pat))
643
643
644 try:
644 try:
645 origcopyfile = util.copyfile
645 origcopyfile = util.copyfile
646 copiedfiles = []
646 copiedfiles = []
647 def overridecopyfile(src, dest):
647 def overridecopyfile(src, dest):
648 if (lfutil.shortname in src and
648 if (lfutil.shortname in src and
649 dest.startswith(repo.wjoin(lfutil.shortname))):
649 dest.startswith(repo.wjoin(lfutil.shortname))):
650 destlfile = dest.replace(lfutil.shortname, '')
650 destlfile = dest.replace(lfutil.shortname, '')
651 if not opts['force'] and os.path.exists(destlfile):
651 if not opts['force'] and os.path.exists(destlfile):
652 raise IOError('',
652 raise IOError('',
653 _('destination largefile already exists'))
653 _('destination largefile already exists'))
654 copiedfiles.append((src, dest))
654 copiedfiles.append((src, dest))
655 origcopyfile(src, dest)
655 origcopyfile(src, dest)
656
656
657 util.copyfile = overridecopyfile
657 util.copyfile = overridecopyfile
658 result += orig(ui, repo, listpats, opts, rename)
658 result += orig(ui, repo, listpats, opts, rename)
659 finally:
659 finally:
660 util.copyfile = origcopyfile
660 util.copyfile = origcopyfile
661
661
662 lfdirstate = lfutil.openlfdirstate(ui, repo)
662 lfdirstate = lfutil.openlfdirstate(ui, repo)
663 for (src, dest) in copiedfiles:
663 for (src, dest) in copiedfiles:
664 if (lfutil.shortname in src and
664 if (lfutil.shortname in src and
665 dest.startswith(repo.wjoin(lfutil.shortname))):
665 dest.startswith(repo.wjoin(lfutil.shortname))):
666 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
666 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
667 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
667 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
668 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
668 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
669 if not os.path.isdir(destlfiledir):
669 if not os.path.isdir(destlfiledir):
670 os.makedirs(destlfiledir)
670 os.makedirs(destlfiledir)
671 if rename:
671 if rename:
672 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
672 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
673
673
674 # The file is gone, but this deletes any empty parent
674 # The file is gone, but this deletes any empty parent
675 # directories as a side-effect.
675 # directories as a side-effect.
676 util.unlinkpath(repo.wjoin(srclfile), True)
676 util.unlinkpath(repo.wjoin(srclfile), True)
677 lfdirstate.remove(srclfile)
677 lfdirstate.remove(srclfile)
678 else:
678 else:
679 util.copyfile(repo.wjoin(srclfile),
679 util.copyfile(repo.wjoin(srclfile),
680 repo.wjoin(destlfile))
680 repo.wjoin(destlfile))
681
681
682 lfdirstate.add(destlfile)
682 lfdirstate.add(destlfile)
683 lfdirstate.write()
683 lfdirstate.write()
684 except util.Abort, e:
684 except util.Abort, e:
685 if str(e) != _('no files to copy'):
685 if str(e) != _('no files to copy'):
686 raise e
686 raise e
687 else:
687 else:
688 nolfiles = True
688 nolfiles = True
689 finally:
689 finally:
690 restorematchfn()
690 restorematchfn()
691 wlock.release()
691 wlock.release()
692
692
693 if nolfiles and nonormalfiles:
693 if nolfiles and nonormalfiles:
694 raise util.Abort(_('no files to copy'))
694 raise util.Abort(_('no files to copy'))
695
695
696 return result
696 return result
697
697
698 # When the user calls revert, we have to be careful to not revert any
698 # When the user calls revert, we have to be careful to not revert any
699 # changes to other largefiles accidentally. This means we have to keep
699 # changes to other largefiles accidentally. This means we have to keep
700 # track of the largefiles that are being reverted so we only pull down
700 # track of the largefiles that are being reverted so we only pull down
701 # the necessary largefiles.
701 # the necessary largefiles.
702 #
702 #
703 # Standins are only updated (to match the hash of largefiles) before
703 # Standins are only updated (to match the hash of largefiles) before
704 # commits. Update the standins then run the original revert, changing
704 # commits. Update the standins then run the original revert, changing
705 # the matcher to hit standins instead of largefiles. Based on the
705 # the matcher to hit standins instead of largefiles. Based on the
706 # resulting standins update the largefiles.
706 # resulting standins update the largefiles.
707 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
707 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
708 # Because we put the standins in a bad state (by updating them)
708 # Because we put the standins in a bad state (by updating them)
709 # and then return them to a correct state we need to lock to
709 # and then return them to a correct state we need to lock to
710 # prevent others from changing them in their incorrect state.
710 # prevent others from changing them in their incorrect state.
711 wlock = repo.wlock()
711 wlock = repo.wlock()
712 try:
712 try:
713 lfdirstate = lfutil.openlfdirstate(ui, repo)
713 lfdirstate = lfutil.openlfdirstate(ui, repo)
714 s = lfutil.lfdirstatestatus(lfdirstate, repo)
714 s = lfutil.lfdirstatestatus(lfdirstate, repo)
715 lfdirstate.write()
715 lfdirstate.write()
716 for lfile in s.modified:
716 for lfile in s.modified:
717 lfutil.updatestandin(repo, lfutil.standin(lfile))
717 lfutil.updatestandin(repo, lfutil.standin(lfile))
718 for lfile in s.deleted:
718 for lfile in s.deleted:
719 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
719 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
720 os.unlink(repo.wjoin(lfutil.standin(lfile)))
720 os.unlink(repo.wjoin(lfutil.standin(lfile)))
721
721
722 oldstandins = lfutil.getstandinsstate(repo)
722 oldstandins = lfutil.getstandinsstate(repo)
723
723
724 def overridematch(mctx, pats=[], opts={}, globbed=False,
724 def overridematch(mctx, pats=[], opts={}, globbed=False,
725 default='relpath'):
725 default='relpath'):
726 match = oldmatch(mctx, pats, opts, globbed, default)
726 match = oldmatch(mctx, pats, opts, globbed, default)
727 m = copy.copy(match)
727 m = copy.copy(match)
728
728
729 # revert supports recursing into subrepos, and though largefiles
729 # revert supports recursing into subrepos, and though largefiles
730 # currently doesn't work correctly in that case, this match is
730 # currently doesn't work correctly in that case, this match is
731 # called, so the lfdirstate above may not be the correct one for
731 # called, so the lfdirstate above may not be the correct one for
732 # this invocation of match.
732 # this invocation of match.
733 lfdirstate = lfutil.openlfdirstate(mctx.repo().ui, mctx.repo(),
733 lfdirstate = lfutil.openlfdirstate(mctx.repo().ui, mctx.repo(),
734 False)
734 False)
735
735
736 def tostandin(f):
736 def tostandin(f):
737 standin = lfutil.standin(f)
737 standin = lfutil.standin(f)
738 if standin in ctx or standin in mctx:
738 if standin in ctx or standin in mctx:
739 return standin
739 return standin
740 elif standin in repo[None] or lfdirstate[f] == 'r':
740 elif standin in repo[None] or lfdirstate[f] == 'r':
741 return None
741 return None
742 return f
742 return f
743 m._files = [tostandin(f) for f in m._files]
743 m._files = [tostandin(f) for f in m._files]
744 m._files = [f for f in m._files if f is not None]
744 m._files = [f for f in m._files if f is not None]
745 m._fmap = set(m._files)
745 m._fmap = set(m._files)
746 origmatchfn = m.matchfn
746 origmatchfn = m.matchfn
747 def matchfn(f):
747 def matchfn(f):
748 if lfutil.isstandin(f):
748 if lfutil.isstandin(f):
749 return (origmatchfn(lfutil.splitstandin(f)) and
749 return (origmatchfn(lfutil.splitstandin(f)) and
750 (f in ctx or f in mctx))
750 (f in ctx or f in mctx))
751 return origmatchfn(f)
751 return origmatchfn(f)
752 m.matchfn = matchfn
752 m.matchfn = matchfn
753 return m
753 return m
754 oldmatch = installmatchfn(overridematch)
754 oldmatch = installmatchfn(overridematch)
755 try:
755 try:
756 orig(ui, repo, ctx, parents, *pats, **opts)
756 orig(ui, repo, ctx, parents, *pats, **opts)
757 finally:
757 finally:
758 restorematchfn()
758 restorematchfn()
759
759
760 newstandins = lfutil.getstandinsstate(repo)
760 newstandins = lfutil.getstandinsstate(repo)
761 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
761 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
762 # lfdirstate should be 'normallookup'-ed for updated files,
762 # lfdirstate should be 'normallookup'-ed for updated files,
763 # because reverting doesn't touch dirstate for 'normal' files
763 # because reverting doesn't touch dirstate for 'normal' files
764 # when target revision is explicitly specified: in such case,
764 # when target revision is explicitly specified: in such case,
765 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
765 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
766 # of target (standin) file.
766 # of target (standin) file.
767 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
767 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
768 normallookup=True)
768 normallookup=True)
769
769
770 finally:
770 finally:
771 wlock.release()
771 wlock.release()
772
772
773 # after pulling changesets, we need to take some extra care to get
773 # after pulling changesets, we need to take some extra care to get
774 # largefiles updated remotely
774 # largefiles updated remotely
775 def overridepull(orig, ui, repo, source=None, **opts):
775 def overridepull(orig, ui, repo, source=None, **opts):
776 revsprepull = len(repo)
776 revsprepull = len(repo)
777 if not source:
777 if not source:
778 source = 'default'
778 source = 'default'
779 repo.lfpullsource = source
779 repo.lfpullsource = source
780 result = orig(ui, repo, source, **opts)
780 result = orig(ui, repo, source, **opts)
781 revspostpull = len(repo)
781 revspostpull = len(repo)
782 lfrevs = opts.get('lfrev', [])
782 lfrevs = opts.get('lfrev', [])
783 if opts.get('all_largefiles'):
783 if opts.get('all_largefiles'):
784 lfrevs.append('pulled()')
784 lfrevs.append('pulled()')
785 if lfrevs and revspostpull > revsprepull:
785 if lfrevs and revspostpull > revsprepull:
786 numcached = 0
786 numcached = 0
787 repo.firstpulled = revsprepull # for pulled() revset expression
787 repo.firstpulled = revsprepull # for pulled() revset expression
788 try:
788 try:
789 for rev in scmutil.revrange(repo, lfrevs):
789 for rev in scmutil.revrange(repo, lfrevs):
790 ui.note(_('pulling largefiles for revision %s\n') % rev)
790 ui.note(_('pulling largefiles for revision %s\n') % rev)
791 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
791 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
792 numcached += len(cached)
792 numcached += len(cached)
793 finally:
793 finally:
794 del repo.firstpulled
794 del repo.firstpulled
795 ui.status(_("%d largefiles cached\n") % numcached)
795 ui.status(_("%d largefiles cached\n") % numcached)
796 return result
796 return result
797
797
798 def pulledrevsetsymbol(repo, subset, x):
798 def pulledrevsetsymbol(repo, subset, x):
799 """``pulled()``
799 """``pulled()``
800 Changesets that just has been pulled.
800 Changesets that just has been pulled.
801
801
802 Only available with largefiles from pull --lfrev expressions.
802 Only available with largefiles from pull --lfrev expressions.
803
803
804 .. container:: verbose
804 .. container:: verbose
805
805
806 Some examples:
806 Some examples:
807
807
808 - pull largefiles for all new changesets::
808 - pull largefiles for all new changesets::
809
809
810 hg pull -lfrev "pulled()"
810 hg pull -lfrev "pulled()"
811
811
812 - pull largefiles for all new branch heads::
812 - pull largefiles for all new branch heads::
813
813
814 hg pull -lfrev "head(pulled()) and not closed()"
814 hg pull -lfrev "head(pulled()) and not closed()"
815
815
816 """
816 """
817
817
818 try:
818 try:
819 firstpulled = repo.firstpulled
819 firstpulled = repo.firstpulled
820 except AttributeError:
820 except AttributeError:
821 raise util.Abort(_("pulled() only available in --lfrev"))
821 raise util.Abort(_("pulled() only available in --lfrev"))
822 return revset.baseset([r for r in subset if r >= firstpulled])
822 return revset.baseset([r for r in subset if r >= firstpulled])
823
823
824 def overrideclone(orig, ui, source, dest=None, **opts):
824 def overrideclone(orig, ui, source, dest=None, **opts):
825 d = dest
825 d = dest
826 if d is None:
826 if d is None:
827 d = hg.defaultdest(source)
827 d = hg.defaultdest(source)
828 if opts.get('all_largefiles') and not hg.islocal(d):
828 if opts.get('all_largefiles') and not hg.islocal(d):
829 raise util.Abort(_(
829 raise util.Abort(_(
830 '--all-largefiles is incompatible with non-local destination %s') %
830 '--all-largefiles is incompatible with non-local destination %s') %
831 d)
831 d)
832
832
833 return orig(ui, source, dest, **opts)
833 return orig(ui, source, dest, **opts)
834
834
835 def hgclone(orig, ui, opts, *args, **kwargs):
835 def hgclone(orig, ui, opts, *args, **kwargs):
836 result = orig(ui, opts, *args, **kwargs)
836 result = orig(ui, opts, *args, **kwargs)
837
837
838 if result is not None:
838 if result is not None:
839 sourcerepo, destrepo = result
839 sourcerepo, destrepo = result
840 repo = destrepo.local()
840 repo = destrepo.local()
841
841
842 # If largefiles is required for this repo, permanently enable it locally
842 # If largefiles is required for this repo, permanently enable it locally
843 if 'largefiles' in repo.requirements:
843 if 'largefiles' in repo.requirements:
844 fp = repo.vfs('hgrc', 'a', text=True)
844 fp = repo.vfs('hgrc', 'a', text=True)
845 try:
845 try:
846 fp.write('\n[extensions]\nlargefiles=\n')
846 fp.write('\n[extensions]\nlargefiles=\n')
847 finally:
847 finally:
848 fp.close()
848 fp.close()
849
849
850 # Caching is implicitly limited to 'rev' option, since the dest repo was
850 # Caching is implicitly limited to 'rev' option, since the dest repo was
851 # truncated at that point. The user may expect a download count with
851 # truncated at that point. The user may expect a download count with
852 # this option, so attempt whether or not this is a largefile repo.
852 # this option, so attempt whether or not this is a largefile repo.
853 if opts.get('all_largefiles'):
853 if opts.get('all_largefiles'):
854 success, missing = lfcommands.downloadlfiles(ui, repo, None)
854 success, missing = lfcommands.downloadlfiles(ui, repo, None)
855
855
856 if missing != 0:
856 if missing != 0:
857 return None
857 return None
858
858
859 return result
859 return result
860
860
861 def overriderebase(orig, ui, repo, **opts):
861 def overriderebase(orig, ui, repo, **opts):
862 if not util.safehasattr(repo, '_largefilesenabled'):
862 if not util.safehasattr(repo, '_largefilesenabled'):
863 return orig(ui, repo, **opts)
863 return orig(ui, repo, **opts)
864
864
865 resuming = opts.get('continue')
865 resuming = opts.get('continue')
866 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
866 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
867 repo._lfstatuswriters.append(lambda *msg, **opts: None)
867 repo._lfstatuswriters.append(lambda *msg, **opts: None)
868 try:
868 try:
869 return orig(ui, repo, **opts)
869 return orig(ui, repo, **opts)
870 finally:
870 finally:
871 repo._lfstatuswriters.pop()
871 repo._lfstatuswriters.pop()
872 repo._lfcommithooks.pop()
872 repo._lfcommithooks.pop()
873
873
874 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
874 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
875 prefix='', mtime=None, subrepos=None):
875 prefix='', mtime=None, subrepos=None):
876 # No need to lock because we are only reading history and
876 # No need to lock because we are only reading history and
877 # largefile caches, neither of which are modified.
877 # largefile caches, neither of which are modified.
878 lfcommands.cachelfiles(repo.ui, repo, node)
878 lfcommands.cachelfiles(repo.ui, repo, node)
879
879
880 if kind not in archival.archivers:
880 if kind not in archival.archivers:
881 raise util.Abort(_("unknown archive type '%s'") % kind)
881 raise util.Abort(_("unknown archive type '%s'") % kind)
882
882
883 ctx = repo[node]
883 ctx = repo[node]
884
884
885 if kind == 'files':
885 if kind == 'files':
886 if prefix:
886 if prefix:
887 raise util.Abort(
887 raise util.Abort(
888 _('cannot give prefix when archiving to files'))
888 _('cannot give prefix when archiving to files'))
889 else:
889 else:
890 prefix = archival.tidyprefix(dest, kind, prefix)
890 prefix = archival.tidyprefix(dest, kind, prefix)
891
891
892 def write(name, mode, islink, getdata):
892 def write(name, mode, islink, getdata):
893 if matchfn and not matchfn(name):
893 if matchfn and not matchfn(name):
894 return
894 return
895 data = getdata()
895 data = getdata()
896 if decode:
896 if decode:
897 data = repo.wwritedata(name, data)
897 data = repo.wwritedata(name, data)
898 archiver.addfile(prefix + name, mode, islink, data)
898 archiver.addfile(prefix + name, mode, islink, data)
899
899
900 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
900 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
901
901
902 if repo.ui.configbool("ui", "archivemeta", True):
902 if repo.ui.configbool("ui", "archivemeta", True):
903 def metadata():
903 def metadata():
904 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
904 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
905 hex(repo.changelog.node(0)), hex(node), ctx.branch())
905 hex(repo.changelog.node(0)), hex(node), ctx.branch())
906
906
907 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
907 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
908 if repo.tagtype(t) == 'global')
908 if repo.tagtype(t) == 'global')
909 if not tags:
909 if not tags:
910 repo.ui.pushbuffer()
910 repo.ui.pushbuffer()
911 opts = {'template': '{latesttag}\n{latesttagdistance}',
911 opts = {'template': '{latesttag}\n{latesttagdistance}',
912 'style': '', 'patch': None, 'git': None}
912 'style': '', 'patch': None, 'git': None}
913 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
913 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
914 ltags, dist = repo.ui.popbuffer().split('\n')
914 ltags, dist = repo.ui.popbuffer().split('\n')
915 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
915 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
916 tags += 'latesttagdistance: %s\n' % dist
916 tags += 'latesttagdistance: %s\n' % dist
917
917
918 return base + tags
918 return base + tags
919
919
920 write('.hg_archival.txt', 0644, False, metadata)
920 write('.hg_archival.txt', 0644, False, metadata)
921
921
922 for f in ctx:
922 for f in ctx:
923 ff = ctx.flags(f)
923 ff = ctx.flags(f)
924 getdata = ctx[f].data
924 getdata = ctx[f].data
925 if lfutil.isstandin(f):
925 if lfutil.isstandin(f):
926 path = lfutil.findfile(repo, getdata().strip())
926 path = lfutil.findfile(repo, getdata().strip())
927 if path is None:
927 if path is None:
928 raise util.Abort(
928 raise util.Abort(
929 _('largefile %s not found in repo store or system cache')
929 _('largefile %s not found in repo store or system cache')
930 % lfutil.splitstandin(f))
930 % lfutil.splitstandin(f))
931 f = lfutil.splitstandin(f)
931 f = lfutil.splitstandin(f)
932
932
933 def getdatafn():
933 def getdatafn():
934 fd = None
934 fd = None
935 try:
935 try:
936 fd = open(path, 'rb')
936 fd = open(path, 'rb')
937 return fd.read()
937 return fd.read()
938 finally:
938 finally:
939 if fd:
939 if fd:
940 fd.close()
940 fd.close()
941
941
942 getdata = getdatafn
942 getdata = getdatafn
943 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
943 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
944
944
945 if subrepos:
945 if subrepos:
946 for subpath in sorted(ctx.substate):
946 for subpath in sorted(ctx.substate):
947 sub = ctx.sub(subpath)
947 sub = ctx.sub(subpath)
948 submatch = match_.narrowmatcher(subpath, matchfn)
948 submatch = match_.narrowmatcher(subpath, matchfn)
949 sub.archive(archiver, prefix, submatch)
949 sub.archive(archiver, prefix, submatch)
950
950
951 archiver.done()
951 archiver.done()
952
952
953 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None):
953 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None):
954 repo._get(repo._state + ('hg',))
954 repo._get(repo._state + ('hg',))
955 rev = repo._state[1]
955 rev = repo._state[1]
956 ctx = repo._repo[rev]
956 ctx = repo._repo[rev]
957
957
958 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
958 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
959
959
960 def write(name, mode, islink, getdata):
960 def write(name, mode, islink, getdata):
961 # At this point, the standin has been replaced with the largefile name,
961 # At this point, the standin has been replaced with the largefile name,
962 # so the normal matcher works here without the lfutil variants.
962 # so the normal matcher works here without the lfutil variants.
963 if match and not match(f):
963 if match and not match(f):
964 return
964 return
965 data = getdata()
965 data = getdata()
966
966
967 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
967 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
968
968
969 for f in ctx:
969 for f in ctx:
970 ff = ctx.flags(f)
970 ff = ctx.flags(f)
971 getdata = ctx[f].data
971 getdata = ctx[f].data
972 if lfutil.isstandin(f):
972 if lfutil.isstandin(f):
973 path = lfutil.findfile(repo._repo, getdata().strip())
973 path = lfutil.findfile(repo._repo, getdata().strip())
974 if path is None:
974 if path is None:
975 raise util.Abort(
975 raise util.Abort(
976 _('largefile %s not found in repo store or system cache')
976 _('largefile %s not found in repo store or system cache')
977 % lfutil.splitstandin(f))
977 % lfutil.splitstandin(f))
978 f = lfutil.splitstandin(f)
978 f = lfutil.splitstandin(f)
979
979
980 def getdatafn():
980 def getdatafn():
981 fd = None
981 fd = None
982 try:
982 try:
983 fd = open(os.path.join(prefix, path), 'rb')
983 fd = open(os.path.join(prefix, path), 'rb')
984 return fd.read()
984 return fd.read()
985 finally:
985 finally:
986 if fd:
986 if fd:
987 fd.close()
987 fd.close()
988
988
989 getdata = getdatafn
989 getdata = getdatafn
990
990
991 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
991 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
992
992
993 for subpath in sorted(ctx.substate):
993 for subpath in sorted(ctx.substate):
994 sub = ctx.sub(subpath)
994 sub = ctx.sub(subpath)
995 submatch = match_.narrowmatcher(subpath, match)
995 submatch = match_.narrowmatcher(subpath, match)
996 sub.archive(archiver, os.path.join(prefix, repo._path) + '/', submatch)
996 sub.archive(archiver, os.path.join(prefix, repo._path) + '/', submatch)
997
997
998 # If a largefile is modified, the change is not reflected in its
998 # If a largefile is modified, the change is not reflected in its
999 # standin until a commit. cmdutil.bailifchanged() raises an exception
999 # standin until a commit. cmdutil.bailifchanged() raises an exception
1000 # if the repo has uncommitted changes. Wrap it to also check if
1000 # if the repo has uncommitted changes. Wrap it to also check if
1001 # largefiles were changed. This is used by bisect, backout and fetch.
1001 # largefiles were changed. This is used by bisect, backout and fetch.
1002 def overridebailifchanged(orig, repo, *args, **kwargs):
1002 def overridebailifchanged(orig, repo, *args, **kwargs):
1003 orig(repo, *args, **kwargs)
1003 orig(repo, *args, **kwargs)
1004 repo.lfstatus = True
1004 repo.lfstatus = True
1005 s = repo.status()
1005 s = repo.status()
1006 repo.lfstatus = False
1006 repo.lfstatus = False
1007 if s.modified or s.added or s.removed or s.deleted:
1007 if s.modified or s.added or s.removed or s.deleted:
1008 raise util.Abort(_('uncommitted changes'))
1008 raise util.Abort(_('uncommitted changes'))
1009
1009
1010 def cmdutilforget(orig, ui, repo, match, prefix, explicitonly):
1010 def cmdutilforget(orig, ui, repo, match, prefix, explicitonly):
1011 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1011 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1012 bad, forgot = orig(ui, repo, normalmatcher, prefix, explicitonly)
1012 bad, forgot = orig(ui, repo, normalmatcher, prefix, explicitonly)
1013 m = composelargefilematcher(match, repo[None].manifest())
1013 m = composelargefilematcher(match, repo[None].manifest())
1014
1014
1015 try:
1015 try:
1016 repo.lfstatus = True
1016 repo.lfstatus = True
1017 s = repo.status(match=m, clean=True)
1017 s = repo.status(match=m, clean=True)
1018 finally:
1018 finally:
1019 repo.lfstatus = False
1019 repo.lfstatus = False
1020 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1020 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1021 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
1021 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
1022
1022
1023 for f in forget:
1023 for f in forget:
1024 if lfutil.standin(f) not in repo.dirstate and not \
1024 if lfutil.standin(f) not in repo.dirstate and not \
1025 repo.wvfs.isdir(lfutil.standin(f)):
1025 repo.wvfs.isdir(lfutil.standin(f)):
1026 ui.warn(_('not removing %s: file is already untracked\n')
1026 ui.warn(_('not removing %s: file is already untracked\n')
1027 % m.rel(f))
1027 % m.rel(f))
1028 bad.append(f)
1028 bad.append(f)
1029
1029
1030 for f in forget:
1030 for f in forget:
1031 if ui.verbose or not m.exact(f):
1031 if ui.verbose or not m.exact(f):
1032 ui.status(_('removing %s\n') % m.rel(f))
1032 ui.status(_('removing %s\n') % m.rel(f))
1033
1033
1034 # Need to lock because standin files are deleted then removed from the
1034 # Need to lock because standin files are deleted then removed from the
1035 # repository and we could race in-between.
1035 # repository and we could race in-between.
1036 wlock = repo.wlock()
1036 wlock = repo.wlock()
1037 try:
1037 try:
1038 lfdirstate = lfutil.openlfdirstate(ui, repo)
1038 lfdirstate = lfutil.openlfdirstate(ui, repo)
1039 for f in forget:
1039 for f in forget:
1040 if lfdirstate[f] == 'a':
1040 if lfdirstate[f] == 'a':
1041 lfdirstate.drop(f)
1041 lfdirstate.drop(f)
1042 else:
1042 else:
1043 lfdirstate.remove(f)
1043 lfdirstate.remove(f)
1044 lfdirstate.write()
1044 lfdirstate.write()
1045 standins = [lfutil.standin(f) for f in forget]
1045 standins = [lfutil.standin(f) for f in forget]
1046 for f in standins:
1046 for f in standins:
1047 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
1047 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
1048 rejected = repo[None].forget(standins)
1048 rejected = repo[None].forget(standins)
1049 finally:
1049 finally:
1050 wlock.release()
1050 wlock.release()
1051
1051
1052 bad.extend(f for f in rejected if f in m.files())
1052 bad.extend(f for f in rejected if f in m.files())
1053 forgot.extend(f for f in forget if f not in rejected)
1053 forgot.extend(f for f in forget if f not in rejected)
1054 return bad, forgot
1054 return bad, forgot
1055
1055
1056 def _getoutgoings(repo, other, missing, addfunc):
1056 def _getoutgoings(repo, other, missing, addfunc):
1057 """get pairs of filename and largefile hash in outgoing revisions
1057 """get pairs of filename and largefile hash in outgoing revisions
1058 in 'missing'.
1058 in 'missing'.
1059
1059
1060 largefiles already existing on 'other' repository are ignored.
1060 largefiles already existing on 'other' repository are ignored.
1061
1061
1062 'addfunc' is invoked with each unique pairs of filename and
1062 'addfunc' is invoked with each unique pairs of filename and
1063 largefile hash value.
1063 largefile hash value.
1064 """
1064 """
1065 knowns = set()
1065 knowns = set()
1066 lfhashes = set()
1066 lfhashes = set()
1067 def dedup(fn, lfhash):
1067 def dedup(fn, lfhash):
1068 k = (fn, lfhash)
1068 k = (fn, lfhash)
1069 if k not in knowns:
1069 if k not in knowns:
1070 knowns.add(k)
1070 knowns.add(k)
1071 lfhashes.add(lfhash)
1071 lfhashes.add(lfhash)
1072 lfutil.getlfilestoupload(repo, missing, dedup)
1072 lfutil.getlfilestoupload(repo, missing, dedup)
1073 if lfhashes:
1073 if lfhashes:
1074 lfexists = basestore._openstore(repo, other).exists(lfhashes)
1074 lfexists = basestore._openstore(repo, other).exists(lfhashes)
1075 for fn, lfhash in knowns:
1075 for fn, lfhash in knowns:
1076 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1076 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1077 addfunc(fn, lfhash)
1077 addfunc(fn, lfhash)
1078
1078
1079 def outgoinghook(ui, repo, other, opts, missing):
1079 def outgoinghook(ui, repo, other, opts, missing):
1080 if opts.pop('large', None):
1080 if opts.pop('large', None):
1081 lfhashes = set()
1081 lfhashes = set()
1082 if ui.debugflag:
1082 if ui.debugflag:
1083 toupload = {}
1083 toupload = {}
1084 def addfunc(fn, lfhash):
1084 def addfunc(fn, lfhash):
1085 if fn not in toupload:
1085 if fn not in toupload:
1086 toupload[fn] = []
1086 toupload[fn] = []
1087 toupload[fn].append(lfhash)
1087 toupload[fn].append(lfhash)
1088 lfhashes.add(lfhash)
1088 lfhashes.add(lfhash)
1089 def showhashes(fn):
1089 def showhashes(fn):
1090 for lfhash in sorted(toupload[fn]):
1090 for lfhash in sorted(toupload[fn]):
1091 ui.debug(' %s\n' % (lfhash))
1091 ui.debug(' %s\n' % (lfhash))
1092 else:
1092 else:
1093 toupload = set()
1093 toupload = set()
1094 def addfunc(fn, lfhash):
1094 def addfunc(fn, lfhash):
1095 toupload.add(fn)
1095 toupload.add(fn)
1096 lfhashes.add(lfhash)
1096 lfhashes.add(lfhash)
1097 def showhashes(fn):
1097 def showhashes(fn):
1098 pass
1098 pass
1099 _getoutgoings(repo, other, missing, addfunc)
1099 _getoutgoings(repo, other, missing, addfunc)
1100
1100
1101 if not toupload:
1101 if not toupload:
1102 ui.status(_('largefiles: no files to upload\n'))
1102 ui.status(_('largefiles: no files to upload\n'))
1103 else:
1103 else:
1104 ui.status(_('largefiles to upload (%d entities):\n')
1104 ui.status(_('largefiles to upload (%d entities):\n')
1105 % (len(lfhashes)))
1105 % (len(lfhashes)))
1106 for file in sorted(toupload):
1106 for file in sorted(toupload):
1107 ui.status(lfutil.splitstandin(file) + '\n')
1107 ui.status(lfutil.splitstandin(file) + '\n')
1108 showhashes(file)
1108 showhashes(file)
1109 ui.status('\n')
1109 ui.status('\n')
1110
1110
1111 def summaryremotehook(ui, repo, opts, changes):
1111 def summaryremotehook(ui, repo, opts, changes):
1112 largeopt = opts.get('large', False)
1112 largeopt = opts.get('large', False)
1113 if changes is None:
1113 if changes is None:
1114 if largeopt:
1114 if largeopt:
1115 return (False, True) # only outgoing check is needed
1115 return (False, True) # only outgoing check is needed
1116 else:
1116 else:
1117 return (False, False)
1117 return (False, False)
1118 elif largeopt:
1118 elif largeopt:
1119 url, branch, peer, outgoing = changes[1]
1119 url, branch, peer, outgoing = changes[1]
1120 if peer is None:
1120 if peer is None:
1121 # i18n: column positioning for "hg summary"
1121 # i18n: column positioning for "hg summary"
1122 ui.status(_('largefiles: (no remote repo)\n'))
1122 ui.status(_('largefiles: (no remote repo)\n'))
1123 return
1123 return
1124
1124
1125 toupload = set()
1125 toupload = set()
1126 lfhashes = set()
1126 lfhashes = set()
1127 def addfunc(fn, lfhash):
1127 def addfunc(fn, lfhash):
1128 toupload.add(fn)
1128 toupload.add(fn)
1129 lfhashes.add(lfhash)
1129 lfhashes.add(lfhash)
1130 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1130 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1131
1131
1132 if not toupload:
1132 if not toupload:
1133 # i18n: column positioning for "hg summary"
1133 # i18n: column positioning for "hg summary"
1134 ui.status(_('largefiles: (no files to upload)\n'))
1134 ui.status(_('largefiles: (no files to upload)\n'))
1135 else:
1135 else:
1136 # i18n: column positioning for "hg summary"
1136 # i18n: column positioning for "hg summary"
1137 ui.status(_('largefiles: %d entities for %d files to upload\n')
1137 ui.status(_('largefiles: %d entities for %d files to upload\n')
1138 % (len(lfhashes), len(toupload)))
1138 % (len(lfhashes), len(toupload)))
1139
1139
1140 def overridesummary(orig, ui, repo, *pats, **opts):
1140 def overridesummary(orig, ui, repo, *pats, **opts):
1141 try:
1141 try:
1142 repo.lfstatus = True
1142 repo.lfstatus = True
1143 orig(ui, repo, *pats, **opts)
1143 orig(ui, repo, *pats, **opts)
1144 finally:
1144 finally:
1145 repo.lfstatus = False
1145 repo.lfstatus = False
1146
1146
1147 def scmutiladdremove(orig, repo, matcher, prefix, opts={}, dry_run=None,
1147 def scmutiladdremove(orig, repo, matcher, prefix, opts={}, dry_run=None,
1148 similarity=None):
1148 similarity=None):
1149 if not lfutil.islfilesrepo(repo):
1149 if not lfutil.islfilesrepo(repo):
1150 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1150 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1151 # Get the list of missing largefiles so we can remove them
1151 # Get the list of missing largefiles so we can remove them
1152 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1152 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1153 unsure, s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [],
1153 unsure, s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [],
1154 False, False, False)
1154 False, False, False)
1155
1155
1156 # Call into the normal remove code, but the removing of the standin, we want
1156 # Call into the normal remove code, but the removing of the standin, we want
1157 # to have handled by original addremove. Monkey patching here makes sure
1157 # to have handled by original addremove. Monkey patching here makes sure
1158 # we don't remove the standin in the largefiles code, preventing a very
1158 # we don't remove the standin in the largefiles code, preventing a very
1159 # confused state later.
1159 # confused state later.
1160 if s.deleted:
1160 if s.deleted:
1161 m = copy.copy(matcher)
1161 m = copy.copy(matcher)
1162
1162
1163 # The m._files and m._map attributes are not changed to the deleted list
1163 # The m._files and m._map attributes are not changed to the deleted list
1164 # because that affects the m.exact() test, which in turn governs whether
1164 # because that affects the m.exact() test, which in turn governs whether
1165 # or not the file name is printed, and how. Simply limit the original
1165 # or not the file name is printed, and how. Simply limit the original
1166 # matches to those in the deleted status list.
1166 # matches to those in the deleted status list.
1167 matchfn = m.matchfn
1167 matchfn = m.matchfn
1168 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1168 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1169
1169
1170 removelargefiles(repo.ui, repo, True, m, **opts)
1170 removelargefiles(repo.ui, repo, True, m, **opts)
1171 # Call into the normal add code, and any files that *should* be added as
1171 # Call into the normal add code, and any files that *should* be added as
1172 # largefiles will be
1172 # largefiles will be
1173 added, bad = addlargefiles(repo.ui, repo, True, matcher, **opts)
1173 added, bad = addlargefiles(repo.ui, repo, True, matcher, **opts)
1174 # Now that we've handled largefiles, hand off to the original addremove
1174 # Now that we've handled largefiles, hand off to the original addremove
1175 # function to take care of the rest. Make sure it doesn't do anything with
1175 # function to take care of the rest. Make sure it doesn't do anything with
1176 # largefiles by passing a matcher that will ignore them.
1176 # largefiles by passing a matcher that will ignore them.
1177 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1177 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1178 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1178 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1179
1179
1180 # Calling purge with --all will cause the largefiles to be deleted.
1180 # Calling purge with --all will cause the largefiles to be deleted.
1181 # Override repo.status to prevent this from happening.
1181 # Override repo.status to prevent this from happening.
1182 def overridepurge(orig, ui, repo, *dirs, **opts):
1182 def overridepurge(orig, ui, repo, *dirs, **opts):
1183 # XXX Monkey patching a repoview will not work. The assigned attribute will
1183 # XXX Monkey patching a repoview will not work. The assigned attribute will
1184 # be set on the unfiltered repo, but we will only lookup attributes in the
1184 # be set on the unfiltered repo, but we will only lookup attributes in the
1185 # unfiltered repo if the lookup in the repoview object itself fails. As the
1185 # unfiltered repo if the lookup in the repoview object itself fails. As the
1186 # monkey patched method exists on the repoview class the lookup will not
1186 # monkey patched method exists on the repoview class the lookup will not
1187 # fail. As a result, the original version will shadow the monkey patched
1187 # fail. As a result, the original version will shadow the monkey patched
1188 # one, defeating the monkey patch.
1188 # one, defeating the monkey patch.
1189 #
1189 #
1190 # As a work around we use an unfiltered repo here. We should do something
1190 # As a work around we use an unfiltered repo here. We should do something
1191 # cleaner instead.
1191 # cleaner instead.
1192 repo = repo.unfiltered()
1192 repo = repo.unfiltered()
1193 oldstatus = repo.status
1193 oldstatus = repo.status
1194 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1194 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1195 clean=False, unknown=False, listsubrepos=False):
1195 clean=False, unknown=False, listsubrepos=False):
1196 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1196 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1197 listsubrepos)
1197 listsubrepos)
1198 lfdirstate = lfutil.openlfdirstate(ui, repo)
1198 lfdirstate = lfutil.openlfdirstate(ui, repo)
1199 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1199 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1200 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1200 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1201 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1201 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1202 unknown, ignored, r.clean)
1202 unknown, ignored, r.clean)
1203 repo.status = overridestatus
1203 repo.status = overridestatus
1204 orig(ui, repo, *dirs, **opts)
1204 orig(ui, repo, *dirs, **opts)
1205 repo.status = oldstatus
1205 repo.status = oldstatus
1206 def overriderollback(orig, ui, repo, **opts):
1206 def overriderollback(orig, ui, repo, **opts):
1207 wlock = repo.wlock()
1207 wlock = repo.wlock()
1208 try:
1208 try:
1209 before = repo.dirstate.parents()
1209 before = repo.dirstate.parents()
1210 orphans = set(f for f in repo.dirstate
1210 orphans = set(f for f in repo.dirstate
1211 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1211 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1212 result = orig(ui, repo, **opts)
1212 result = orig(ui, repo, **opts)
1213 after = repo.dirstate.parents()
1213 after = repo.dirstate.parents()
1214 if before == after:
1214 if before == after:
1215 return result # no need to restore standins
1215 return result # no need to restore standins
1216
1216
1217 pctx = repo['.']
1217 pctx = repo['.']
1218 for f in repo.dirstate:
1218 for f in repo.dirstate:
1219 if lfutil.isstandin(f):
1219 if lfutil.isstandin(f):
1220 orphans.discard(f)
1220 orphans.discard(f)
1221 if repo.dirstate[f] == 'r':
1221 if repo.dirstate[f] == 'r':
1222 repo.wvfs.unlinkpath(f, ignoremissing=True)
1222 repo.wvfs.unlinkpath(f, ignoremissing=True)
1223 elif f in pctx:
1223 elif f in pctx:
1224 fctx = pctx[f]
1224 fctx = pctx[f]
1225 repo.wwrite(f, fctx.data(), fctx.flags())
1225 repo.wwrite(f, fctx.data(), fctx.flags())
1226 else:
1226 else:
1227 # content of standin is not so important in 'a',
1227 # content of standin is not so important in 'a',
1228 # 'm' or 'n' (coming from the 2nd parent) cases
1228 # 'm' or 'n' (coming from the 2nd parent) cases
1229 lfutil.writestandin(repo, f, '', False)
1229 lfutil.writestandin(repo, f, '', False)
1230 for standin in orphans:
1230 for standin in orphans:
1231 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1231 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1232
1232
1233 lfdirstate = lfutil.openlfdirstate(ui, repo)
1233 lfdirstate = lfutil.openlfdirstate(ui, repo)
1234 orphans = set(lfdirstate)
1234 orphans = set(lfdirstate)
1235 lfiles = lfutil.listlfiles(repo)
1235 lfiles = lfutil.listlfiles(repo)
1236 for file in lfiles:
1236 for file in lfiles:
1237 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1237 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1238 orphans.discard(file)
1238 orphans.discard(file)
1239 for lfile in orphans:
1239 for lfile in orphans:
1240 lfdirstate.drop(lfile)
1240 lfdirstate.drop(lfile)
1241 lfdirstate.write()
1241 lfdirstate.write()
1242 finally:
1242 finally:
1243 wlock.release()
1243 wlock.release()
1244 return result
1244 return result
1245
1245
1246 def overridetransplant(orig, ui, repo, *revs, **opts):
1246 def overridetransplant(orig, ui, repo, *revs, **opts):
1247 resuming = opts.get('continue')
1247 resuming = opts.get('continue')
1248 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1248 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1249 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1249 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1250 try:
1250 try:
1251 result = orig(ui, repo, *revs, **opts)
1251 result = orig(ui, repo, *revs, **opts)
1252 finally:
1252 finally:
1253 repo._lfstatuswriters.pop()
1253 repo._lfstatuswriters.pop()
1254 repo._lfcommithooks.pop()
1254 repo._lfcommithooks.pop()
1255 return result
1255 return result
1256
1256
1257 def overridecat(orig, ui, repo, file1, *pats, **opts):
1257 def overridecat(orig, ui, repo, file1, *pats, **opts):
1258 ctx = scmutil.revsingle(repo, opts.get('rev'))
1258 ctx = scmutil.revsingle(repo, opts.get('rev'))
1259 err = 1
1259 err = 1
1260 notbad = set()
1260 notbad = set()
1261 m = scmutil.match(ctx, (file1,) + pats, opts)
1261 m = scmutil.match(ctx, (file1,) + pats, opts)
1262 origmatchfn = m.matchfn
1262 origmatchfn = m.matchfn
1263 def lfmatchfn(f):
1263 def lfmatchfn(f):
1264 if origmatchfn(f):
1264 if origmatchfn(f):
1265 return True
1265 return True
1266 lf = lfutil.splitstandin(f)
1266 lf = lfutil.splitstandin(f)
1267 if lf is None:
1267 if lf is None:
1268 return False
1268 return False
1269 notbad.add(lf)
1269 notbad.add(lf)
1270 return origmatchfn(lf)
1270 return origmatchfn(lf)
1271 m.matchfn = lfmatchfn
1271 m.matchfn = lfmatchfn
1272 origbadfn = m.bad
1272 origbadfn = m.bad
1273 def lfbadfn(f, msg):
1273 def lfbadfn(f, msg):
1274 if not f in notbad:
1274 if not f in notbad:
1275 origbadfn(f, msg)
1275 origbadfn(f, msg)
1276 m.bad = lfbadfn
1276 m.bad = lfbadfn
1277 for f in ctx.walk(m):
1277 for f in ctx.walk(m):
1278 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1278 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1279 pathname=f)
1279 pathname=f)
1280 lf = lfutil.splitstandin(f)
1280 lf = lfutil.splitstandin(f)
1281 if lf is None or origmatchfn(f):
1281 if lf is None or origmatchfn(f):
1282 # duplicating unreachable code from commands.cat
1282 # duplicating unreachable code from commands.cat
1283 data = ctx[f].data()
1283 data = ctx[f].data()
1284 if opts.get('decode'):
1284 if opts.get('decode'):
1285 data = repo.wwritedata(f, data)
1285 data = repo.wwritedata(f, data)
1286 fp.write(data)
1286 fp.write(data)
1287 else:
1287 else:
1288 hash = lfutil.readstandin(repo, lf, ctx.rev())
1288 hash = lfutil.readstandin(repo, lf, ctx.rev())
1289 if not lfutil.inusercache(repo.ui, hash):
1289 if not lfutil.inusercache(repo.ui, hash):
1290 store = basestore._openstore(repo)
1290 store = basestore._openstore(repo)
1291 success, missing = store.get([(lf, hash)])
1291 success, missing = store.get([(lf, hash)])
1292 if len(success) != 1:
1292 if len(success) != 1:
1293 raise util.Abort(
1293 raise util.Abort(
1294 _('largefile %s is not in cache and could not be '
1294 _('largefile %s is not in cache and could not be '
1295 'downloaded') % lf)
1295 'downloaded') % lf)
1296 path = lfutil.usercachepath(repo.ui, hash)
1296 path = lfutil.usercachepath(repo.ui, hash)
1297 fpin = open(path, "rb")
1297 fpin = open(path, "rb")
1298 for chunk in util.filechunkiter(fpin, 128 * 1024):
1298 for chunk in util.filechunkiter(fpin, 128 * 1024):
1299 fp.write(chunk)
1299 fp.write(chunk)
1300 fpin.close()
1300 fpin.close()
1301 fp.close()
1301 fp.close()
1302 err = 0
1302 err = 0
1303 return err
1303 return err
1304
1304
1305 def mergeupdate(orig, repo, node, branchmerge, force, partial,
1305 def mergeupdate(orig, repo, node, branchmerge, force, partial,
1306 *args, **kwargs):
1306 *args, **kwargs):
1307 wlock = repo.wlock()
1307 wlock = repo.wlock()
1308 try:
1308 try:
1309 # branch | | |
1309 # branch | | |
1310 # merge | force | partial | action
1310 # merge | force | partial | action
1311 # -------+-------+---------+--------------
1311 # -------+-------+---------+--------------
1312 # x | x | x | linear-merge
1312 # x | x | x | linear-merge
1313 # o | x | x | branch-merge
1313 # o | x | x | branch-merge
1314 # x | o | x | overwrite (as clean update)
1314 # x | o | x | overwrite (as clean update)
1315 # o | o | x | force-branch-merge (*1)
1315 # o | o | x | force-branch-merge (*1)
1316 # x | x | o | (*)
1316 # x | x | o | (*)
1317 # o | x | o | (*)
1317 # o | x | o | (*)
1318 # x | o | o | overwrite (as revert)
1318 # x | o | o | overwrite (as revert)
1319 # o | o | o | (*)
1319 # o | o | o | (*)
1320 #
1320 #
1321 # (*) don't care
1321 # (*) don't care
1322 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1322 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1323
1323
1324 linearmerge = not branchmerge and not force and not partial
1324 linearmerge = not branchmerge and not force and not partial
1325
1325
1326 if linearmerge or (branchmerge and force and not partial):
1326 if linearmerge or (branchmerge and force and not partial):
1327 # update standins for linear-merge or force-branch-merge,
1327 # update standins for linear-merge or force-branch-merge,
1328 # because largefiles in the working directory may be modified
1328 # because largefiles in the working directory may be modified
1329 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1329 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1330 unsure, s = lfdirstate.status(match_.always(repo.root,
1330 unsure, s = lfdirstate.status(match_.always(repo.root,
1331 repo.getcwd()),
1331 repo.getcwd()),
1332 [], False, False, False)
1332 [], False, False, False)
1333 pctx = repo['.']
1333 pctx = repo['.']
1334 for lfile in unsure + s.modified:
1334 for lfile in unsure + s.modified:
1335 lfileabs = repo.wvfs.join(lfile)
1335 lfileabs = repo.wvfs.join(lfile)
1336 if not os.path.exists(lfileabs):
1336 if not os.path.exists(lfileabs):
1337 continue
1337 continue
1338 lfhash = lfutil.hashrepofile(repo, lfile)
1338 lfhash = lfutil.hashrepofile(repo, lfile)
1339 standin = lfutil.standin(lfile)
1339 standin = lfutil.standin(lfile)
1340 lfutil.writestandin(repo, standin, lfhash,
1340 lfutil.writestandin(repo, standin, lfhash,
1341 lfutil.getexecutable(lfileabs))
1341 lfutil.getexecutable(lfileabs))
1342 if (standin in pctx and
1342 if (standin in pctx and
1343 lfhash == lfutil.readstandin(repo, lfile, '.')):
1343 lfhash == lfutil.readstandin(repo, lfile, '.')):
1344 lfdirstate.normal(lfile)
1344 lfdirstate.normal(lfile)
1345 for lfile in s.added:
1345 for lfile in s.added:
1346 lfutil.updatestandin(repo, lfutil.standin(lfile))
1346 lfutil.updatestandin(repo, lfutil.standin(lfile))
1347 lfdirstate.write()
1347 lfdirstate.write()
1348
1348
1349 if linearmerge:
1349 if linearmerge:
1350 # Only call updatelfiles on the standins that have changed
1350 # Only call updatelfiles on the standins that have changed
1351 # to save time
1351 # to save time
1352 oldstandins = lfutil.getstandinsstate(repo)
1352 oldstandins = lfutil.getstandinsstate(repo)
1353
1353
1354 result = orig(repo, node, branchmerge, force, partial, *args, **kwargs)
1354 result = orig(repo, node, branchmerge, force, partial, *args, **kwargs)
1355
1355
1356 filelist = None
1356 filelist = None
1357 if linearmerge:
1357 if linearmerge:
1358 newstandins = lfutil.getstandinsstate(repo)
1358 newstandins = lfutil.getstandinsstate(repo)
1359 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1359 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1360
1360
1361 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1361 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1362 normallookup=partial, checked=linearmerge)
1362 normallookup=partial, checked=linearmerge)
1363
1363
1364 return result
1364 return result
1365 finally:
1365 finally:
1366 wlock.release()
1366 wlock.release()
1367
1367
1368 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1368 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1369 result = orig(repo, files, *args, **kwargs)
1369 result = orig(repo, files, *args, **kwargs)
1370
1370
1371 filelist = [lfutil.splitstandin(f) for f in files if lfutil.isstandin(f)]
1371 filelist = [lfutil.splitstandin(f) for f in files if lfutil.isstandin(f)]
1372 if filelist:
1372 if filelist:
1373 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1373 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1374 printmessage=False, normallookup=True)
1374 printmessage=False, normallookup=True)
1375
1375
1376 return result
1376 return result
@@ -1,3242 +1,3242 b''
1 # cmdutil.py - help for command processing in mercurial
1 # cmdutil.py - help for command processing in mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import hex, nullid, nullrev, short
8 from node import hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import os, sys, errno, re, tempfile, cStringIO, shutil
10 import os, sys, errno, re, tempfile, cStringIO, shutil
11 import util, scmutil, templater, patch, error, templatekw, revlog, copies
11 import util, scmutil, templater, patch, error, templatekw, revlog, copies
12 import match as matchmod
12 import match as matchmod
13 import context, repair, graphmod, revset, phases, obsolete, pathutil
13 import context, repair, graphmod, revset, phases, obsolete, pathutil
14 import changelog
14 import changelog
15 import bookmarks
15 import bookmarks
16 import encoding
16 import encoding
17 import crecord as crecordmod
17 import crecord as crecordmod
18 import lock as lockmod
18 import lock as lockmod
19
19
20 def parsealiases(cmd):
20 def parsealiases(cmd):
21 return cmd.lstrip("^").split("|")
21 return cmd.lstrip("^").split("|")
22
22
23 def setupwrapcolorwrite(ui):
23 def setupwrapcolorwrite(ui):
24 # wrap ui.write so diff output can be labeled/colorized
24 # wrap ui.write so diff output can be labeled/colorized
25 def wrapwrite(orig, *args, **kw):
25 def wrapwrite(orig, *args, **kw):
26 label = kw.pop('label', '')
26 label = kw.pop('label', '')
27 for chunk, l in patch.difflabel(lambda: args):
27 for chunk, l in patch.difflabel(lambda: args):
28 orig(chunk, label=label + l)
28 orig(chunk, label=label + l)
29
29
30 oldwrite = ui.write
30 oldwrite = ui.write
31 def wrap(*args, **kwargs):
31 def wrap(*args, **kwargs):
32 return wrapwrite(oldwrite, *args, **kwargs)
32 return wrapwrite(oldwrite, *args, **kwargs)
33 setattr(ui, 'write', wrap)
33 setattr(ui, 'write', wrap)
34 return oldwrite
34 return oldwrite
35
35
36 def filterchunks(ui, originalhunks, usecurses, testfile):
36 def filterchunks(ui, originalhunks, usecurses, testfile):
37 if usecurses:
37 if usecurses:
38 if testfile:
38 if testfile:
39 recordfn = crecordmod.testdecorator(testfile,
39 recordfn = crecordmod.testdecorator(testfile,
40 crecordmod.testchunkselector)
40 crecordmod.testchunkselector)
41 else:
41 else:
42 recordfn = crecordmod.chunkselector
42 recordfn = crecordmod.chunkselector
43
43
44 return crecordmod.filterpatch(ui, originalhunks, recordfn)
44 return crecordmod.filterpatch(ui, originalhunks, recordfn)
45
45
46 else:
46 else:
47 return patch.filterpatch(ui, originalhunks)
47 return patch.filterpatch(ui, originalhunks)
48
48
49 def recordfilter(ui, originalhunks):
49 def recordfilter(ui, originalhunks):
50 usecurses = ui.configbool('experimental', 'crecord', False)
50 usecurses = ui.configbool('experimental', 'crecord', False)
51 testfile = ui.config('experimental', 'crecordtest', None)
51 testfile = ui.config('experimental', 'crecordtest', None)
52 oldwrite = setupwrapcolorwrite(ui)
52 oldwrite = setupwrapcolorwrite(ui)
53 try:
53 try:
54 newchunks = filterchunks(ui, originalhunks, usecurses, testfile)
54 newchunks = filterchunks(ui, originalhunks, usecurses, testfile)
55 finally:
55 finally:
56 ui.write = oldwrite
56 ui.write = oldwrite
57 return newchunks
57 return newchunks
58
58
59 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
59 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
60 filterfn, *pats, **opts):
60 filterfn, *pats, **opts):
61 import merge as mergemod
61 import merge as mergemod
62 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
62 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
63 ishunk = lambda x: isinstance(x, hunkclasses)
63 ishunk = lambda x: isinstance(x, hunkclasses)
64
64
65 if not ui.interactive():
65 if not ui.interactive():
66 raise util.Abort(_('running non-interactively, use %s instead') %
66 raise util.Abort(_('running non-interactively, use %s instead') %
67 cmdsuggest)
67 cmdsuggest)
68
68
69 # make sure username is set before going interactive
69 # make sure username is set before going interactive
70 if not opts.get('user'):
70 if not opts.get('user'):
71 ui.username() # raise exception, username not provided
71 ui.username() # raise exception, username not provided
72
72
73 def recordfunc(ui, repo, message, match, opts):
73 def recordfunc(ui, repo, message, match, opts):
74 """This is generic record driver.
74 """This is generic record driver.
75
75
76 Its job is to interactively filter local changes, and
76 Its job is to interactively filter local changes, and
77 accordingly prepare working directory into a state in which the
77 accordingly prepare working directory into a state in which the
78 job can be delegated to a non-interactive commit command such as
78 job can be delegated to a non-interactive commit command such as
79 'commit' or 'qrefresh'.
79 'commit' or 'qrefresh'.
80
80
81 After the actual job is done by non-interactive command, the
81 After the actual job is done by non-interactive command, the
82 working directory is restored to its original state.
82 working directory is restored to its original state.
83
83
84 In the end we'll record interesting changes, and everything else
84 In the end we'll record interesting changes, and everything else
85 will be left in place, so the user can continue working.
85 will be left in place, so the user can continue working.
86 """
86 """
87
87
88 checkunfinished(repo, commit=True)
88 checkunfinished(repo, commit=True)
89 merge = len(repo[None].parents()) > 1
89 merge = len(repo[None].parents()) > 1
90 if merge:
90 if merge:
91 raise util.Abort(_('cannot partially commit a merge '
91 raise util.Abort(_('cannot partially commit a merge '
92 '(use "hg commit" instead)'))
92 '(use "hg commit" instead)'))
93
93
94 status = repo.status(match=match)
94 status = repo.status(match=match)
95 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
95 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
96 diffopts.nodates = True
96 diffopts.nodates = True
97 diffopts.git = True
97 diffopts.git = True
98 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
98 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
99 originalchunks = patch.parsepatch(originaldiff)
99 originalchunks = patch.parsepatch(originaldiff)
100
100
101 # 1. filter patch, so we have intending-to apply subset of it
101 # 1. filter patch, so we have intending-to apply subset of it
102 try:
102 try:
103 chunks = filterfn(ui, originalchunks)
103 chunks = filterfn(ui, originalchunks)
104 except patch.PatchError, err:
104 except patch.PatchError, err:
105 raise util.Abort(_('error parsing patch: %s') % err)
105 raise util.Abort(_('error parsing patch: %s') % err)
106
106
107 contenders = set()
107 contenders = set()
108 for h in chunks:
108 for h in chunks:
109 try:
109 try:
110 contenders.update(set(h.files()))
110 contenders.update(set(h.files()))
111 except AttributeError:
111 except AttributeError:
112 pass
112 pass
113
113
114 changed = status.modified + status.added + status.removed
114 changed = status.modified + status.added + status.removed
115 newfiles = [f for f in changed if f in contenders]
115 newfiles = [f for f in changed if f in contenders]
116 if not newfiles:
116 if not newfiles:
117 ui.status(_('no changes to record\n'))
117 ui.status(_('no changes to record\n'))
118 return 0
118 return 0
119
119
120 newandmodifiedfiles = set()
120 newandmodifiedfiles = set()
121 for h in chunks:
121 for h in chunks:
122 isnew = h.filename() in status.added
122 isnew = h.filename() in status.added
123 if ishunk(h) and isnew and not h in originalchunks:
123 if ishunk(h) and isnew and not h in originalchunks:
124 newandmodifiedfiles.add(h.filename())
124 newandmodifiedfiles.add(h.filename())
125
125
126 modified = set(status.modified)
126 modified = set(status.modified)
127
127
128 # 2. backup changed files, so we can restore them in the end
128 # 2. backup changed files, so we can restore them in the end
129
129
130 if backupall:
130 if backupall:
131 tobackup = changed
131 tobackup = changed
132 else:
132 else:
133 tobackup = [f for f in newfiles
133 tobackup = [f for f in newfiles
134 if f in modified or f in newandmodifiedfiles]
134 if f in modified or f in newandmodifiedfiles]
135
135
136 backups = {}
136 backups = {}
137 if tobackup:
137 if tobackup:
138 backupdir = repo.join('record-backups')
138 backupdir = repo.join('record-backups')
139 try:
139 try:
140 os.mkdir(backupdir)
140 os.mkdir(backupdir)
141 except OSError, err:
141 except OSError, err:
142 if err.errno != errno.EEXIST:
142 if err.errno != errno.EEXIST:
143 raise
143 raise
144 try:
144 try:
145 # backup continues
145 # backup continues
146 for f in tobackup:
146 for f in tobackup:
147 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
147 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
148 dir=backupdir)
148 dir=backupdir)
149 os.close(fd)
149 os.close(fd)
150 ui.debug('backup %r as %r\n' % (f, tmpname))
150 ui.debug('backup %r as %r\n' % (f, tmpname))
151 util.copyfile(repo.wjoin(f), tmpname)
151 util.copyfile(repo.wjoin(f), tmpname)
152 shutil.copystat(repo.wjoin(f), tmpname)
152 shutil.copystat(repo.wjoin(f), tmpname)
153 backups[f] = tmpname
153 backups[f] = tmpname
154
154
155 fp = cStringIO.StringIO()
155 fp = cStringIO.StringIO()
156 for c in chunks:
156 for c in chunks:
157 fname = c.filename()
157 fname = c.filename()
158 if fname in backups or fname in newandmodifiedfiles:
158 if fname in backups or fname in newandmodifiedfiles:
159 c.write(fp)
159 c.write(fp)
160 dopatch = fp.tell()
160 dopatch = fp.tell()
161 fp.seek(0)
161 fp.seek(0)
162
162
163 [os.unlink(c) for c in newandmodifiedfiles]
163 [os.unlink(c) for c in newandmodifiedfiles]
164
164
165 # 3a. apply filtered patch to clean repo (clean)
165 # 3a. apply filtered patch to clean repo (clean)
166 if backups:
166 if backups:
167 # Equivalent to hg.revert
167 # Equivalent to hg.revert
168 choices = lambda key: key in backups
168 choices = lambda key: key in backups
169 mergemod.update(repo, repo.dirstate.p1(),
169 mergemod.update(repo, repo.dirstate.p1(),
170 False, True, choices)
170 False, True, choices)
171
171
172 # 3b. (apply)
172 # 3b. (apply)
173 if dopatch:
173 if dopatch:
174 try:
174 try:
175 ui.debug('applying patch\n')
175 ui.debug('applying patch\n')
176 ui.debug(fp.getvalue())
176 ui.debug(fp.getvalue())
177 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
177 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
178 except patch.PatchError, err:
178 except patch.PatchError, err:
179 raise util.Abort(str(err))
179 raise util.Abort(str(err))
180 del fp
180 del fp
181
181
182 # 4. We prepared working directory according to filtered
182 # 4. We prepared working directory according to filtered
183 # patch. Now is the time to delegate the job to
183 # patch. Now is the time to delegate the job to
184 # commit/qrefresh or the like!
184 # commit/qrefresh or the like!
185
185
186 # Make all of the pathnames absolute.
186 # Make all of the pathnames absolute.
187 newfiles = [repo.wjoin(nf) for nf in newfiles]
187 newfiles = [repo.wjoin(nf) for nf in newfiles]
188 return commitfunc(ui, repo, *newfiles, **opts)
188 return commitfunc(ui, repo, *newfiles, **opts)
189 finally:
189 finally:
190 # 5. finally restore backed-up files
190 # 5. finally restore backed-up files
191 try:
191 try:
192 for realname, tmpname in backups.iteritems():
192 for realname, tmpname in backups.iteritems():
193 ui.debug('restoring %r to %r\n' % (tmpname, realname))
193 ui.debug('restoring %r to %r\n' % (tmpname, realname))
194 util.copyfile(tmpname, repo.wjoin(realname))
194 util.copyfile(tmpname, repo.wjoin(realname))
195 # Our calls to copystat() here and above are a
195 # Our calls to copystat() here and above are a
196 # hack to trick any editors that have f open that
196 # hack to trick any editors that have f open that
197 # we haven't modified them.
197 # we haven't modified them.
198 #
198 #
199 # Also note that this racy as an editor could
199 # Also note that this racy as an editor could
200 # notice the file's mtime before we've finished
200 # notice the file's mtime before we've finished
201 # writing it.
201 # writing it.
202 shutil.copystat(tmpname, repo.wjoin(realname))
202 shutil.copystat(tmpname, repo.wjoin(realname))
203 os.unlink(tmpname)
203 os.unlink(tmpname)
204 if tobackup:
204 if tobackup:
205 os.rmdir(backupdir)
205 os.rmdir(backupdir)
206 except OSError:
206 except OSError:
207 pass
207 pass
208
208
209 return commit(ui, repo, recordfunc, pats, opts)
209 return commit(ui, repo, recordfunc, pats, opts)
210
210
211 def findpossible(cmd, table, strict=False):
211 def findpossible(cmd, table, strict=False):
212 """
212 """
213 Return cmd -> (aliases, command table entry)
213 Return cmd -> (aliases, command table entry)
214 for each matching command.
214 for each matching command.
215 Return debug commands (or their aliases) only if no normal command matches.
215 Return debug commands (or their aliases) only if no normal command matches.
216 """
216 """
217 choice = {}
217 choice = {}
218 debugchoice = {}
218 debugchoice = {}
219
219
220 if cmd in table:
220 if cmd in table:
221 # short-circuit exact matches, "log" alias beats "^log|history"
221 # short-circuit exact matches, "log" alias beats "^log|history"
222 keys = [cmd]
222 keys = [cmd]
223 else:
223 else:
224 keys = table.keys()
224 keys = table.keys()
225
225
226 allcmds = []
226 allcmds = []
227 for e in keys:
227 for e in keys:
228 aliases = parsealiases(e)
228 aliases = parsealiases(e)
229 allcmds.extend(aliases)
229 allcmds.extend(aliases)
230 found = None
230 found = None
231 if cmd in aliases:
231 if cmd in aliases:
232 found = cmd
232 found = cmd
233 elif not strict:
233 elif not strict:
234 for a in aliases:
234 for a in aliases:
235 if a.startswith(cmd):
235 if a.startswith(cmd):
236 found = a
236 found = a
237 break
237 break
238 if found is not None:
238 if found is not None:
239 if aliases[0].startswith("debug") or found.startswith("debug"):
239 if aliases[0].startswith("debug") or found.startswith("debug"):
240 debugchoice[found] = (aliases, table[e])
240 debugchoice[found] = (aliases, table[e])
241 else:
241 else:
242 choice[found] = (aliases, table[e])
242 choice[found] = (aliases, table[e])
243
243
244 if not choice and debugchoice:
244 if not choice and debugchoice:
245 choice = debugchoice
245 choice = debugchoice
246
246
247 return choice, allcmds
247 return choice, allcmds
248
248
249 def findcmd(cmd, table, strict=True):
249 def findcmd(cmd, table, strict=True):
250 """Return (aliases, command table entry) for command string."""
250 """Return (aliases, command table entry) for command string."""
251 choice, allcmds = findpossible(cmd, table, strict)
251 choice, allcmds = findpossible(cmd, table, strict)
252
252
253 if cmd in choice:
253 if cmd in choice:
254 return choice[cmd]
254 return choice[cmd]
255
255
256 if len(choice) > 1:
256 if len(choice) > 1:
257 clist = choice.keys()
257 clist = choice.keys()
258 clist.sort()
258 clist.sort()
259 raise error.AmbiguousCommand(cmd, clist)
259 raise error.AmbiguousCommand(cmd, clist)
260
260
261 if choice:
261 if choice:
262 return choice.values()[0]
262 return choice.values()[0]
263
263
264 raise error.UnknownCommand(cmd, allcmds)
264 raise error.UnknownCommand(cmd, allcmds)
265
265
266 def findrepo(p):
266 def findrepo(p):
267 while not os.path.isdir(os.path.join(p, ".hg")):
267 while not os.path.isdir(os.path.join(p, ".hg")):
268 oldp, p = p, os.path.dirname(p)
268 oldp, p = p, os.path.dirname(p)
269 if p == oldp:
269 if p == oldp:
270 return None
270 return None
271
271
272 return p
272 return p
273
273
274 def bailifchanged(repo, merge=True):
274 def bailifchanged(repo, merge=True):
275 if merge and repo.dirstate.p2() != nullid:
275 if merge and repo.dirstate.p2() != nullid:
276 raise util.Abort(_('outstanding uncommitted merge'))
276 raise util.Abort(_('outstanding uncommitted merge'))
277 modified, added, removed, deleted = repo.status()[:4]
277 modified, added, removed, deleted = repo.status()[:4]
278 if modified or added or removed or deleted:
278 if modified or added or removed or deleted:
279 raise util.Abort(_('uncommitted changes'))
279 raise util.Abort(_('uncommitted changes'))
280 ctx = repo[None]
280 ctx = repo[None]
281 for s in sorted(ctx.substate):
281 for s in sorted(ctx.substate):
282 ctx.sub(s).bailifchanged()
282 ctx.sub(s).bailifchanged()
283
283
284 def logmessage(ui, opts):
284 def logmessage(ui, opts):
285 """ get the log message according to -m and -l option """
285 """ get the log message according to -m and -l option """
286 message = opts.get('message')
286 message = opts.get('message')
287 logfile = opts.get('logfile')
287 logfile = opts.get('logfile')
288
288
289 if message and logfile:
289 if message and logfile:
290 raise util.Abort(_('options --message and --logfile are mutually '
290 raise util.Abort(_('options --message and --logfile are mutually '
291 'exclusive'))
291 'exclusive'))
292 if not message and logfile:
292 if not message and logfile:
293 try:
293 try:
294 if logfile == '-':
294 if logfile == '-':
295 message = ui.fin.read()
295 message = ui.fin.read()
296 else:
296 else:
297 message = '\n'.join(util.readfile(logfile).splitlines())
297 message = '\n'.join(util.readfile(logfile).splitlines())
298 except IOError, inst:
298 except IOError, inst:
299 raise util.Abort(_("can't read commit message '%s': %s") %
299 raise util.Abort(_("can't read commit message '%s': %s") %
300 (logfile, inst.strerror))
300 (logfile, inst.strerror))
301 return message
301 return message
302
302
303 def mergeeditform(ctxorbool, baseformname):
303 def mergeeditform(ctxorbool, baseformname):
304 """return appropriate editform name (referencing a committemplate)
304 """return appropriate editform name (referencing a committemplate)
305
305
306 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
306 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
307 merging is committed.
307 merging is committed.
308
308
309 This returns baseformname with '.merge' appended if it is a merge,
309 This returns baseformname with '.merge' appended if it is a merge,
310 otherwise '.normal' is appended.
310 otherwise '.normal' is appended.
311 """
311 """
312 if isinstance(ctxorbool, bool):
312 if isinstance(ctxorbool, bool):
313 if ctxorbool:
313 if ctxorbool:
314 return baseformname + ".merge"
314 return baseformname + ".merge"
315 elif 1 < len(ctxorbool.parents()):
315 elif 1 < len(ctxorbool.parents()):
316 return baseformname + ".merge"
316 return baseformname + ".merge"
317
317
318 return baseformname + ".normal"
318 return baseformname + ".normal"
319
319
320 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
320 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
321 editform='', **opts):
321 editform='', **opts):
322 """get appropriate commit message editor according to '--edit' option
322 """get appropriate commit message editor according to '--edit' option
323
323
324 'finishdesc' is a function to be called with edited commit message
324 'finishdesc' is a function to be called with edited commit message
325 (= 'description' of the new changeset) just after editing, but
325 (= 'description' of the new changeset) just after editing, but
326 before checking empty-ness. It should return actual text to be
326 before checking empty-ness. It should return actual text to be
327 stored into history. This allows to change description before
327 stored into history. This allows to change description before
328 storing.
328 storing.
329
329
330 'extramsg' is a extra message to be shown in the editor instead of
330 'extramsg' is a extra message to be shown in the editor instead of
331 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
331 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
332 is automatically added.
332 is automatically added.
333
333
334 'editform' is a dot-separated list of names, to distinguish
334 'editform' is a dot-separated list of names, to distinguish
335 the purpose of commit text editing.
335 the purpose of commit text editing.
336
336
337 'getcommiteditor' returns 'commitforceeditor' regardless of
337 'getcommiteditor' returns 'commitforceeditor' regardless of
338 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
338 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
339 they are specific for usage in MQ.
339 they are specific for usage in MQ.
340 """
340 """
341 if edit or finishdesc or extramsg:
341 if edit or finishdesc or extramsg:
342 return lambda r, c, s: commitforceeditor(r, c, s,
342 return lambda r, c, s: commitforceeditor(r, c, s,
343 finishdesc=finishdesc,
343 finishdesc=finishdesc,
344 extramsg=extramsg,
344 extramsg=extramsg,
345 editform=editform)
345 editform=editform)
346 elif editform:
346 elif editform:
347 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
347 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
348 else:
348 else:
349 return commiteditor
349 return commiteditor
350
350
351 def loglimit(opts):
351 def loglimit(opts):
352 """get the log limit according to option -l/--limit"""
352 """get the log limit according to option -l/--limit"""
353 limit = opts.get('limit')
353 limit = opts.get('limit')
354 if limit:
354 if limit:
355 try:
355 try:
356 limit = int(limit)
356 limit = int(limit)
357 except ValueError:
357 except ValueError:
358 raise util.Abort(_('limit must be a positive integer'))
358 raise util.Abort(_('limit must be a positive integer'))
359 if limit <= 0:
359 if limit <= 0:
360 raise util.Abort(_('limit must be positive'))
360 raise util.Abort(_('limit must be positive'))
361 else:
361 else:
362 limit = None
362 limit = None
363 return limit
363 return limit
364
364
365 def makefilename(repo, pat, node, desc=None,
365 def makefilename(repo, pat, node, desc=None,
366 total=None, seqno=None, revwidth=None, pathname=None):
366 total=None, seqno=None, revwidth=None, pathname=None):
367 node_expander = {
367 node_expander = {
368 'H': lambda: hex(node),
368 'H': lambda: hex(node),
369 'R': lambda: str(repo.changelog.rev(node)),
369 'R': lambda: str(repo.changelog.rev(node)),
370 'h': lambda: short(node),
370 'h': lambda: short(node),
371 'm': lambda: re.sub('[^\w]', '_', str(desc))
371 'm': lambda: re.sub('[^\w]', '_', str(desc))
372 }
372 }
373 expander = {
373 expander = {
374 '%': lambda: '%',
374 '%': lambda: '%',
375 'b': lambda: os.path.basename(repo.root),
375 'b': lambda: os.path.basename(repo.root),
376 }
376 }
377
377
378 try:
378 try:
379 if node:
379 if node:
380 expander.update(node_expander)
380 expander.update(node_expander)
381 if node:
381 if node:
382 expander['r'] = (lambda:
382 expander['r'] = (lambda:
383 str(repo.changelog.rev(node)).zfill(revwidth or 0))
383 str(repo.changelog.rev(node)).zfill(revwidth or 0))
384 if total is not None:
384 if total is not None:
385 expander['N'] = lambda: str(total)
385 expander['N'] = lambda: str(total)
386 if seqno is not None:
386 if seqno is not None:
387 expander['n'] = lambda: str(seqno)
387 expander['n'] = lambda: str(seqno)
388 if total is not None and seqno is not None:
388 if total is not None and seqno is not None:
389 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
389 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
390 if pathname is not None:
390 if pathname is not None:
391 expander['s'] = lambda: os.path.basename(pathname)
391 expander['s'] = lambda: os.path.basename(pathname)
392 expander['d'] = lambda: os.path.dirname(pathname) or '.'
392 expander['d'] = lambda: os.path.dirname(pathname) or '.'
393 expander['p'] = lambda: pathname
393 expander['p'] = lambda: pathname
394
394
395 newname = []
395 newname = []
396 patlen = len(pat)
396 patlen = len(pat)
397 i = 0
397 i = 0
398 while i < patlen:
398 while i < patlen:
399 c = pat[i]
399 c = pat[i]
400 if c == '%':
400 if c == '%':
401 i += 1
401 i += 1
402 c = pat[i]
402 c = pat[i]
403 c = expander[c]()
403 c = expander[c]()
404 newname.append(c)
404 newname.append(c)
405 i += 1
405 i += 1
406 return ''.join(newname)
406 return ''.join(newname)
407 except KeyError, inst:
407 except KeyError, inst:
408 raise util.Abort(_("invalid format spec '%%%s' in output filename") %
408 raise util.Abort(_("invalid format spec '%%%s' in output filename") %
409 inst.args[0])
409 inst.args[0])
410
410
411 def makefileobj(repo, pat, node=None, desc=None, total=None,
411 def makefileobj(repo, pat, node=None, desc=None, total=None,
412 seqno=None, revwidth=None, mode='wb', modemap=None,
412 seqno=None, revwidth=None, mode='wb', modemap=None,
413 pathname=None):
413 pathname=None):
414
414
415 writable = mode not in ('r', 'rb')
415 writable = mode not in ('r', 'rb')
416
416
417 if not pat or pat == '-':
417 if not pat or pat == '-':
418 if writable:
418 if writable:
419 fp = repo.ui.fout
419 fp = repo.ui.fout
420 else:
420 else:
421 fp = repo.ui.fin
421 fp = repo.ui.fin
422 if util.safehasattr(fp, 'fileno'):
422 if util.safehasattr(fp, 'fileno'):
423 return os.fdopen(os.dup(fp.fileno()), mode)
423 return os.fdopen(os.dup(fp.fileno()), mode)
424 else:
424 else:
425 # if this fp can't be duped properly, return
425 # if this fp can't be duped properly, return
426 # a dummy object that can be closed
426 # a dummy object that can be closed
427 class wrappedfileobj(object):
427 class wrappedfileobj(object):
428 noop = lambda x: None
428 noop = lambda x: None
429 def __init__(self, f):
429 def __init__(self, f):
430 self.f = f
430 self.f = f
431 def __getattr__(self, attr):
431 def __getattr__(self, attr):
432 if attr == 'close':
432 if attr == 'close':
433 return self.noop
433 return self.noop
434 else:
434 else:
435 return getattr(self.f, attr)
435 return getattr(self.f, attr)
436
436
437 return wrappedfileobj(fp)
437 return wrappedfileobj(fp)
438 if util.safehasattr(pat, 'write') and writable:
438 if util.safehasattr(pat, 'write') and writable:
439 return pat
439 return pat
440 if util.safehasattr(pat, 'read') and 'r' in mode:
440 if util.safehasattr(pat, 'read') and 'r' in mode:
441 return pat
441 return pat
442 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
442 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
443 if modemap is not None:
443 if modemap is not None:
444 mode = modemap.get(fn, mode)
444 mode = modemap.get(fn, mode)
445 if mode == 'wb':
445 if mode == 'wb':
446 modemap[fn] = 'ab'
446 modemap[fn] = 'ab'
447 return open(fn, mode)
447 return open(fn, mode)
448
448
449 def openrevlog(repo, cmd, file_, opts):
449 def openrevlog(repo, cmd, file_, opts):
450 """opens the changelog, manifest, a filelog or a given revlog"""
450 """opens the changelog, manifest, a filelog or a given revlog"""
451 cl = opts['changelog']
451 cl = opts['changelog']
452 mf = opts['manifest']
452 mf = opts['manifest']
453 msg = None
453 msg = None
454 if cl and mf:
454 if cl and mf:
455 msg = _('cannot specify --changelog and --manifest at the same time')
455 msg = _('cannot specify --changelog and --manifest at the same time')
456 elif cl or mf:
456 elif cl or mf:
457 if file_:
457 if file_:
458 msg = _('cannot specify filename with --changelog or --manifest')
458 msg = _('cannot specify filename with --changelog or --manifest')
459 elif not repo:
459 elif not repo:
460 msg = _('cannot specify --changelog or --manifest '
460 msg = _('cannot specify --changelog or --manifest '
461 'without a repository')
461 'without a repository')
462 if msg:
462 if msg:
463 raise util.Abort(msg)
463 raise util.Abort(msg)
464
464
465 r = None
465 r = None
466 if repo:
466 if repo:
467 if cl:
467 if cl:
468 r = repo.unfiltered().changelog
468 r = repo.unfiltered().changelog
469 elif mf:
469 elif mf:
470 r = repo.manifest
470 r = repo.manifest
471 elif file_:
471 elif file_:
472 filelog = repo.file(file_)
472 filelog = repo.file(file_)
473 if len(filelog):
473 if len(filelog):
474 r = filelog
474 r = filelog
475 if not r:
475 if not r:
476 if not file_:
476 if not file_:
477 raise error.CommandError(cmd, _('invalid arguments'))
477 raise error.CommandError(cmd, _('invalid arguments'))
478 if not os.path.isfile(file_):
478 if not os.path.isfile(file_):
479 raise util.Abort(_("revlog '%s' not found") % file_)
479 raise util.Abort(_("revlog '%s' not found") % file_)
480 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False),
480 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False),
481 file_[:-2] + ".i")
481 file_[:-2] + ".i")
482 return r
482 return r
483
483
484 def copy(ui, repo, pats, opts, rename=False):
484 def copy(ui, repo, pats, opts, rename=False):
485 # called with the repo lock held
485 # called with the repo lock held
486 #
486 #
487 # hgsep => pathname that uses "/" to separate directories
487 # hgsep => pathname that uses "/" to separate directories
488 # ossep => pathname that uses os.sep to separate directories
488 # ossep => pathname that uses os.sep to separate directories
489 cwd = repo.getcwd()
489 cwd = repo.getcwd()
490 targets = {}
490 targets = {}
491 after = opts.get("after")
491 after = opts.get("after")
492 dryrun = opts.get("dry_run")
492 dryrun = opts.get("dry_run")
493 wctx = repo[None]
493 wctx = repo[None]
494
494
495 def walkpat(pat):
495 def walkpat(pat):
496 srcs = []
496 srcs = []
497 if after:
497 if after:
498 badstates = '?'
498 badstates = '?'
499 else:
499 else:
500 badstates = '?r'
500 badstates = '?r'
501 m = scmutil.match(repo[None], [pat], opts, globbed=True)
501 m = scmutil.match(repo[None], [pat], opts, globbed=True)
502 for abs in repo.walk(m):
502 for abs in repo.walk(m):
503 state = repo.dirstate[abs]
503 state = repo.dirstate[abs]
504 rel = m.rel(abs)
504 rel = m.rel(abs)
505 exact = m.exact(abs)
505 exact = m.exact(abs)
506 if state in badstates:
506 if state in badstates:
507 if exact and state == '?':
507 if exact and state == '?':
508 ui.warn(_('%s: not copying - file is not managed\n') % rel)
508 ui.warn(_('%s: not copying - file is not managed\n') % rel)
509 if exact and state == 'r':
509 if exact and state == 'r':
510 ui.warn(_('%s: not copying - file has been marked for'
510 ui.warn(_('%s: not copying - file has been marked for'
511 ' remove\n') % rel)
511 ' remove\n') % rel)
512 continue
512 continue
513 # abs: hgsep
513 # abs: hgsep
514 # rel: ossep
514 # rel: ossep
515 srcs.append((abs, rel, exact))
515 srcs.append((abs, rel, exact))
516 return srcs
516 return srcs
517
517
518 # abssrc: hgsep
518 # abssrc: hgsep
519 # relsrc: ossep
519 # relsrc: ossep
520 # otarget: ossep
520 # otarget: ossep
521 def copyfile(abssrc, relsrc, otarget, exact):
521 def copyfile(abssrc, relsrc, otarget, exact):
522 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
522 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
523 if '/' in abstarget:
523 if '/' in abstarget:
524 # We cannot normalize abstarget itself, this would prevent
524 # We cannot normalize abstarget itself, this would prevent
525 # case only renames, like a => A.
525 # case only renames, like a => A.
526 abspath, absname = abstarget.rsplit('/', 1)
526 abspath, absname = abstarget.rsplit('/', 1)
527 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
527 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
528 reltarget = repo.pathto(abstarget, cwd)
528 reltarget = repo.pathto(abstarget, cwd)
529 target = repo.wjoin(abstarget)
529 target = repo.wjoin(abstarget)
530 src = repo.wjoin(abssrc)
530 src = repo.wjoin(abssrc)
531 state = repo.dirstate[abstarget]
531 state = repo.dirstate[abstarget]
532
532
533 scmutil.checkportable(ui, abstarget)
533 scmutil.checkportable(ui, abstarget)
534
534
535 # check for collisions
535 # check for collisions
536 prevsrc = targets.get(abstarget)
536 prevsrc = targets.get(abstarget)
537 if prevsrc is not None:
537 if prevsrc is not None:
538 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
538 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
539 (reltarget, repo.pathto(abssrc, cwd),
539 (reltarget, repo.pathto(abssrc, cwd),
540 repo.pathto(prevsrc, cwd)))
540 repo.pathto(prevsrc, cwd)))
541 return
541 return
542
542
543 # check for overwrites
543 # check for overwrites
544 exists = os.path.lexists(target)
544 exists = os.path.lexists(target)
545 samefile = False
545 samefile = False
546 if exists and abssrc != abstarget:
546 if exists and abssrc != abstarget:
547 if (repo.dirstate.normalize(abssrc) ==
547 if (repo.dirstate.normalize(abssrc) ==
548 repo.dirstate.normalize(abstarget)):
548 repo.dirstate.normalize(abstarget)):
549 if not rename:
549 if not rename:
550 ui.warn(_("%s: can't copy - same file\n") % reltarget)
550 ui.warn(_("%s: can't copy - same file\n") % reltarget)
551 return
551 return
552 exists = False
552 exists = False
553 samefile = True
553 samefile = True
554
554
555 if not after and exists or after and state in 'mn':
555 if not after and exists or after and state in 'mn':
556 if not opts['force']:
556 if not opts['force']:
557 ui.warn(_('%s: not overwriting - file exists\n') %
557 ui.warn(_('%s: not overwriting - file exists\n') %
558 reltarget)
558 reltarget)
559 return
559 return
560
560
561 if after:
561 if after:
562 if not exists:
562 if not exists:
563 if rename:
563 if rename:
564 ui.warn(_('%s: not recording move - %s does not exist\n') %
564 ui.warn(_('%s: not recording move - %s does not exist\n') %
565 (relsrc, reltarget))
565 (relsrc, reltarget))
566 else:
566 else:
567 ui.warn(_('%s: not recording copy - %s does not exist\n') %
567 ui.warn(_('%s: not recording copy - %s does not exist\n') %
568 (relsrc, reltarget))
568 (relsrc, reltarget))
569 return
569 return
570 elif not dryrun:
570 elif not dryrun:
571 try:
571 try:
572 if exists:
572 if exists:
573 os.unlink(target)
573 os.unlink(target)
574 targetdir = os.path.dirname(target) or '.'
574 targetdir = os.path.dirname(target) or '.'
575 if not os.path.isdir(targetdir):
575 if not os.path.isdir(targetdir):
576 os.makedirs(targetdir)
576 os.makedirs(targetdir)
577 if samefile:
577 if samefile:
578 tmp = target + "~hgrename"
578 tmp = target + "~hgrename"
579 os.rename(src, tmp)
579 os.rename(src, tmp)
580 os.rename(tmp, target)
580 os.rename(tmp, target)
581 else:
581 else:
582 util.copyfile(src, target)
582 util.copyfile(src, target)
583 srcexists = True
583 srcexists = True
584 except IOError, inst:
584 except IOError, inst:
585 if inst.errno == errno.ENOENT:
585 if inst.errno == errno.ENOENT:
586 ui.warn(_('%s: deleted in working directory\n') % relsrc)
586 ui.warn(_('%s: deleted in working directory\n') % relsrc)
587 srcexists = False
587 srcexists = False
588 else:
588 else:
589 ui.warn(_('%s: cannot copy - %s\n') %
589 ui.warn(_('%s: cannot copy - %s\n') %
590 (relsrc, inst.strerror))
590 (relsrc, inst.strerror))
591 return True # report a failure
591 return True # report a failure
592
592
593 if ui.verbose or not exact:
593 if ui.verbose or not exact:
594 if rename:
594 if rename:
595 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
595 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
596 else:
596 else:
597 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
597 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
598
598
599 targets[abstarget] = abssrc
599 targets[abstarget] = abssrc
600
600
601 # fix up dirstate
601 # fix up dirstate
602 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
602 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
603 dryrun=dryrun, cwd=cwd)
603 dryrun=dryrun, cwd=cwd)
604 if rename and not dryrun:
604 if rename and not dryrun:
605 if not after and srcexists and not samefile:
605 if not after and srcexists and not samefile:
606 util.unlinkpath(repo.wjoin(abssrc))
606 util.unlinkpath(repo.wjoin(abssrc))
607 wctx.forget([abssrc])
607 wctx.forget([abssrc])
608
608
609 # pat: ossep
609 # pat: ossep
610 # dest ossep
610 # dest ossep
611 # srcs: list of (hgsep, hgsep, ossep, bool)
611 # srcs: list of (hgsep, hgsep, ossep, bool)
612 # return: function that takes hgsep and returns ossep
612 # return: function that takes hgsep and returns ossep
613 def targetpathfn(pat, dest, srcs):
613 def targetpathfn(pat, dest, srcs):
614 if os.path.isdir(pat):
614 if os.path.isdir(pat):
615 abspfx = pathutil.canonpath(repo.root, cwd, pat)
615 abspfx = pathutil.canonpath(repo.root, cwd, pat)
616 abspfx = util.localpath(abspfx)
616 abspfx = util.localpath(abspfx)
617 if destdirexists:
617 if destdirexists:
618 striplen = len(os.path.split(abspfx)[0])
618 striplen = len(os.path.split(abspfx)[0])
619 else:
619 else:
620 striplen = len(abspfx)
620 striplen = len(abspfx)
621 if striplen:
621 if striplen:
622 striplen += len(os.sep)
622 striplen += len(os.sep)
623 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
623 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
624 elif destdirexists:
624 elif destdirexists:
625 res = lambda p: os.path.join(dest,
625 res = lambda p: os.path.join(dest,
626 os.path.basename(util.localpath(p)))
626 os.path.basename(util.localpath(p)))
627 else:
627 else:
628 res = lambda p: dest
628 res = lambda p: dest
629 return res
629 return res
630
630
631 # pat: ossep
631 # pat: ossep
632 # dest ossep
632 # dest ossep
633 # srcs: list of (hgsep, hgsep, ossep, bool)
633 # srcs: list of (hgsep, hgsep, ossep, bool)
634 # return: function that takes hgsep and returns ossep
634 # return: function that takes hgsep and returns ossep
635 def targetpathafterfn(pat, dest, srcs):
635 def targetpathafterfn(pat, dest, srcs):
636 if matchmod.patkind(pat):
636 if matchmod.patkind(pat):
637 # a mercurial pattern
637 # a mercurial pattern
638 res = lambda p: os.path.join(dest,
638 res = lambda p: os.path.join(dest,
639 os.path.basename(util.localpath(p)))
639 os.path.basename(util.localpath(p)))
640 else:
640 else:
641 abspfx = pathutil.canonpath(repo.root, cwd, pat)
641 abspfx = pathutil.canonpath(repo.root, cwd, pat)
642 if len(abspfx) < len(srcs[0][0]):
642 if len(abspfx) < len(srcs[0][0]):
643 # A directory. Either the target path contains the last
643 # A directory. Either the target path contains the last
644 # component of the source path or it does not.
644 # component of the source path or it does not.
645 def evalpath(striplen):
645 def evalpath(striplen):
646 score = 0
646 score = 0
647 for s in srcs:
647 for s in srcs:
648 t = os.path.join(dest, util.localpath(s[0])[striplen:])
648 t = os.path.join(dest, util.localpath(s[0])[striplen:])
649 if os.path.lexists(t):
649 if os.path.lexists(t):
650 score += 1
650 score += 1
651 return score
651 return score
652
652
653 abspfx = util.localpath(abspfx)
653 abspfx = util.localpath(abspfx)
654 striplen = len(abspfx)
654 striplen = len(abspfx)
655 if striplen:
655 if striplen:
656 striplen += len(os.sep)
656 striplen += len(os.sep)
657 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
657 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
658 score = evalpath(striplen)
658 score = evalpath(striplen)
659 striplen1 = len(os.path.split(abspfx)[0])
659 striplen1 = len(os.path.split(abspfx)[0])
660 if striplen1:
660 if striplen1:
661 striplen1 += len(os.sep)
661 striplen1 += len(os.sep)
662 if evalpath(striplen1) > score:
662 if evalpath(striplen1) > score:
663 striplen = striplen1
663 striplen = striplen1
664 res = lambda p: os.path.join(dest,
664 res = lambda p: os.path.join(dest,
665 util.localpath(p)[striplen:])
665 util.localpath(p)[striplen:])
666 else:
666 else:
667 # a file
667 # a file
668 if destdirexists:
668 if destdirexists:
669 res = lambda p: os.path.join(dest,
669 res = lambda p: os.path.join(dest,
670 os.path.basename(util.localpath(p)))
670 os.path.basename(util.localpath(p)))
671 else:
671 else:
672 res = lambda p: dest
672 res = lambda p: dest
673 return res
673 return res
674
674
675 pats = scmutil.expandpats(pats)
675 pats = scmutil.expandpats(pats)
676 if not pats:
676 if not pats:
677 raise util.Abort(_('no source or destination specified'))
677 raise util.Abort(_('no source or destination specified'))
678 if len(pats) == 1:
678 if len(pats) == 1:
679 raise util.Abort(_('no destination specified'))
679 raise util.Abort(_('no destination specified'))
680 dest = pats.pop()
680 dest = pats.pop()
681 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
681 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
682 if not destdirexists:
682 if not destdirexists:
683 if len(pats) > 1 or matchmod.patkind(pats[0]):
683 if len(pats) > 1 or matchmod.patkind(pats[0]):
684 raise util.Abort(_('with multiple sources, destination must be an '
684 raise util.Abort(_('with multiple sources, destination must be an '
685 'existing directory'))
685 'existing directory'))
686 if util.endswithsep(dest):
686 if util.endswithsep(dest):
687 raise util.Abort(_('destination %s is not a directory') % dest)
687 raise util.Abort(_('destination %s is not a directory') % dest)
688
688
689 tfn = targetpathfn
689 tfn = targetpathfn
690 if after:
690 if after:
691 tfn = targetpathafterfn
691 tfn = targetpathafterfn
692 copylist = []
692 copylist = []
693 for pat in pats:
693 for pat in pats:
694 srcs = walkpat(pat)
694 srcs = walkpat(pat)
695 if not srcs:
695 if not srcs:
696 continue
696 continue
697 copylist.append((tfn(pat, dest, srcs), srcs))
697 copylist.append((tfn(pat, dest, srcs), srcs))
698 if not copylist:
698 if not copylist:
699 raise util.Abort(_('no files to copy'))
699 raise util.Abort(_('no files to copy'))
700
700
701 errors = 0
701 errors = 0
702 for targetpath, srcs in copylist:
702 for targetpath, srcs in copylist:
703 for abssrc, relsrc, exact in srcs:
703 for abssrc, relsrc, exact in srcs:
704 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
704 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
705 errors += 1
705 errors += 1
706
706
707 if errors:
707 if errors:
708 ui.warn(_('(consider using --after)\n'))
708 ui.warn(_('(consider using --after)\n'))
709
709
710 return errors != 0
710 return errors != 0
711
711
712 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
712 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
713 runargs=None, appendpid=False):
713 runargs=None, appendpid=False):
714 '''Run a command as a service.'''
714 '''Run a command as a service.'''
715
715
716 def writepid(pid):
716 def writepid(pid):
717 if opts['pid_file']:
717 if opts['pid_file']:
718 if appendpid:
718 if appendpid:
719 mode = 'a'
719 mode = 'a'
720 else:
720 else:
721 mode = 'w'
721 mode = 'w'
722 fp = open(opts['pid_file'], mode)
722 fp = open(opts['pid_file'], mode)
723 fp.write(str(pid) + '\n')
723 fp.write(str(pid) + '\n')
724 fp.close()
724 fp.close()
725
725
726 if opts['daemon'] and not opts['daemon_pipefds']:
726 if opts['daemon'] and not opts['daemon_pipefds']:
727 # Signal child process startup with file removal
727 # Signal child process startup with file removal
728 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
728 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
729 os.close(lockfd)
729 os.close(lockfd)
730 try:
730 try:
731 if not runargs:
731 if not runargs:
732 runargs = util.hgcmd() + sys.argv[1:]
732 runargs = util.hgcmd() + sys.argv[1:]
733 runargs.append('--daemon-pipefds=%s' % lockpath)
733 runargs.append('--daemon-pipefds=%s' % lockpath)
734 # Don't pass --cwd to the child process, because we've already
734 # Don't pass --cwd to the child process, because we've already
735 # changed directory.
735 # changed directory.
736 for i in xrange(1, len(runargs)):
736 for i in xrange(1, len(runargs)):
737 if runargs[i].startswith('--cwd='):
737 if runargs[i].startswith('--cwd='):
738 del runargs[i]
738 del runargs[i]
739 break
739 break
740 elif runargs[i].startswith('--cwd'):
740 elif runargs[i].startswith('--cwd'):
741 del runargs[i:i + 2]
741 del runargs[i:i + 2]
742 break
742 break
743 def condfn():
743 def condfn():
744 return not os.path.exists(lockpath)
744 return not os.path.exists(lockpath)
745 pid = util.rundetached(runargs, condfn)
745 pid = util.rundetached(runargs, condfn)
746 if pid < 0:
746 if pid < 0:
747 raise util.Abort(_('child process failed to start'))
747 raise util.Abort(_('child process failed to start'))
748 writepid(pid)
748 writepid(pid)
749 finally:
749 finally:
750 try:
750 try:
751 os.unlink(lockpath)
751 os.unlink(lockpath)
752 except OSError, e:
752 except OSError, e:
753 if e.errno != errno.ENOENT:
753 if e.errno != errno.ENOENT:
754 raise
754 raise
755 if parentfn:
755 if parentfn:
756 return parentfn(pid)
756 return parentfn(pid)
757 else:
757 else:
758 return
758 return
759
759
760 if initfn:
760 if initfn:
761 initfn()
761 initfn()
762
762
763 if not opts['daemon']:
763 if not opts['daemon']:
764 writepid(os.getpid())
764 writepid(os.getpid())
765
765
766 if opts['daemon_pipefds']:
766 if opts['daemon_pipefds']:
767 lockpath = opts['daemon_pipefds']
767 lockpath = opts['daemon_pipefds']
768 try:
768 try:
769 os.setsid()
769 os.setsid()
770 except AttributeError:
770 except AttributeError:
771 pass
771 pass
772 os.unlink(lockpath)
772 os.unlink(lockpath)
773 util.hidewindow()
773 util.hidewindow()
774 sys.stdout.flush()
774 sys.stdout.flush()
775 sys.stderr.flush()
775 sys.stderr.flush()
776
776
777 nullfd = os.open(os.devnull, os.O_RDWR)
777 nullfd = os.open(os.devnull, os.O_RDWR)
778 logfilefd = nullfd
778 logfilefd = nullfd
779 if logfile:
779 if logfile:
780 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
780 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
781 os.dup2(nullfd, 0)
781 os.dup2(nullfd, 0)
782 os.dup2(logfilefd, 1)
782 os.dup2(logfilefd, 1)
783 os.dup2(logfilefd, 2)
783 os.dup2(logfilefd, 2)
784 if nullfd not in (0, 1, 2):
784 if nullfd not in (0, 1, 2):
785 os.close(nullfd)
785 os.close(nullfd)
786 if logfile and logfilefd not in (0, 1, 2):
786 if logfile and logfilefd not in (0, 1, 2):
787 os.close(logfilefd)
787 os.close(logfilefd)
788
788
789 if runfn:
789 if runfn:
790 return runfn()
790 return runfn()
791
791
792 def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
792 def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
793 """Utility function used by commands.import to import a single patch
793 """Utility function used by commands.import to import a single patch
794
794
795 This function is explicitly defined here to help the evolve extension to
795 This function is explicitly defined here to help the evolve extension to
796 wrap this part of the import logic.
796 wrap this part of the import logic.
797
797
798 The API is currently a bit ugly because it a simple code translation from
798 The API is currently a bit ugly because it a simple code translation from
799 the import command. Feel free to make it better.
799 the import command. Feel free to make it better.
800
800
801 :hunk: a patch (as a binary string)
801 :hunk: a patch (as a binary string)
802 :parents: nodes that will be parent of the created commit
802 :parents: nodes that will be parent of the created commit
803 :opts: the full dict of option passed to the import command
803 :opts: the full dict of option passed to the import command
804 :msgs: list to save commit message to.
804 :msgs: list to save commit message to.
805 (used in case we need to save it when failing)
805 (used in case we need to save it when failing)
806 :updatefunc: a function that update a repo to a given node
806 :updatefunc: a function that update a repo to a given node
807 updatefunc(<repo>, <node>)
807 updatefunc(<repo>, <node>)
808 """
808 """
809 tmpname, message, user, date, branch, nodeid, p1, p2 = \
809 tmpname, message, user, date, branch, nodeid, p1, p2 = \
810 patch.extract(ui, hunk)
810 patch.extract(ui, hunk)
811
811
812 update = not opts.get('bypass')
812 update = not opts.get('bypass')
813 strip = opts["strip"]
813 strip = opts["strip"]
814 prefix = opts["prefix"]
814 prefix = opts["prefix"]
815 sim = float(opts.get('similarity') or 0)
815 sim = float(opts.get('similarity') or 0)
816 if not tmpname:
816 if not tmpname:
817 return (None, None, False)
817 return (None, None, False)
818 msg = _('applied to working directory')
818 msg = _('applied to working directory')
819
819
820 rejects = False
820 rejects = False
821
821
822 try:
822 try:
823 cmdline_message = logmessage(ui, opts)
823 cmdline_message = logmessage(ui, opts)
824 if cmdline_message:
824 if cmdline_message:
825 # pickup the cmdline msg
825 # pickup the cmdline msg
826 message = cmdline_message
826 message = cmdline_message
827 elif message:
827 elif message:
828 # pickup the patch msg
828 # pickup the patch msg
829 message = message.strip()
829 message = message.strip()
830 else:
830 else:
831 # launch the editor
831 # launch the editor
832 message = None
832 message = None
833 ui.debug('message:\n%s\n' % message)
833 ui.debug('message:\n%s\n' % message)
834
834
835 if len(parents) == 1:
835 if len(parents) == 1:
836 parents.append(repo[nullid])
836 parents.append(repo[nullid])
837 if opts.get('exact'):
837 if opts.get('exact'):
838 if not nodeid or not p1:
838 if not nodeid or not p1:
839 raise util.Abort(_('not a Mercurial patch'))
839 raise util.Abort(_('not a Mercurial patch'))
840 p1 = repo[p1]
840 p1 = repo[p1]
841 p2 = repo[p2 or nullid]
841 p2 = repo[p2 or nullid]
842 elif p2:
842 elif p2:
843 try:
843 try:
844 p1 = repo[p1]
844 p1 = repo[p1]
845 p2 = repo[p2]
845 p2 = repo[p2]
846 # Without any options, consider p2 only if the
846 # Without any options, consider p2 only if the
847 # patch is being applied on top of the recorded
847 # patch is being applied on top of the recorded
848 # first parent.
848 # first parent.
849 if p1 != parents[0]:
849 if p1 != parents[0]:
850 p1 = parents[0]
850 p1 = parents[0]
851 p2 = repo[nullid]
851 p2 = repo[nullid]
852 except error.RepoError:
852 except error.RepoError:
853 p1, p2 = parents
853 p1, p2 = parents
854 if p2.node() == nullid:
854 if p2.node() == nullid:
855 ui.warn(_("warning: import the patch as a normal revision\n"
855 ui.warn(_("warning: import the patch as a normal revision\n"
856 "(use --exact to import the patch as a merge)\n"))
856 "(use --exact to import the patch as a merge)\n"))
857 else:
857 else:
858 p1, p2 = parents
858 p1, p2 = parents
859
859
860 n = None
860 n = None
861 if update:
861 if update:
862 repo.dirstate.beginparentchange()
862 repo.dirstate.beginparentchange()
863 if p1 != parents[0]:
863 if p1 != parents[0]:
864 updatefunc(repo, p1.node())
864 updatefunc(repo, p1.node())
865 if p2 != parents[1]:
865 if p2 != parents[1]:
866 repo.setparents(p1.node(), p2.node())
866 repo.setparents(p1.node(), p2.node())
867
867
868 if opts.get('exact') or opts.get('import_branch'):
868 if opts.get('exact') or opts.get('import_branch'):
869 repo.dirstate.setbranch(branch or 'default')
869 repo.dirstate.setbranch(branch or 'default')
870
870
871 partial = opts.get('partial', False)
871 partial = opts.get('partial', False)
872 files = set()
872 files = set()
873 try:
873 try:
874 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
874 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
875 files=files, eolmode=None, similarity=sim / 100.0)
875 files=files, eolmode=None, similarity=sim / 100.0)
876 except patch.PatchError, e:
876 except patch.PatchError, e:
877 if not partial:
877 if not partial:
878 raise util.Abort(str(e))
878 raise util.Abort(str(e))
879 if partial:
879 if partial:
880 rejects = True
880 rejects = True
881
881
882 files = list(files)
882 files = list(files)
883 if opts.get('no_commit'):
883 if opts.get('no_commit'):
884 if message:
884 if message:
885 msgs.append(message)
885 msgs.append(message)
886 else:
886 else:
887 if opts.get('exact') or p2:
887 if opts.get('exact') or p2:
888 # If you got here, you either use --force and know what
888 # If you got here, you either use --force and know what
889 # you are doing or used --exact or a merge patch while
889 # you are doing or used --exact or a merge patch while
890 # being updated to its first parent.
890 # being updated to its first parent.
891 m = None
891 m = None
892 else:
892 else:
893 m = scmutil.matchfiles(repo, files or [])
893 m = scmutil.matchfiles(repo, files or [])
894 editform = mergeeditform(repo[None], 'import.normal')
894 editform = mergeeditform(repo[None], 'import.normal')
895 if opts.get('exact'):
895 if opts.get('exact'):
896 editor = None
896 editor = None
897 else:
897 else:
898 editor = getcommiteditor(editform=editform, **opts)
898 editor = getcommiteditor(editform=editform, **opts)
899 n = repo.commit(message, opts.get('user') or user,
899 n = repo.commit(message, opts.get('user') or user,
900 opts.get('date') or date, match=m,
900 opts.get('date') or date, match=m,
901 editor=editor, force=partial)
901 editor=editor, force=partial)
902 repo.dirstate.endparentchange()
902 repo.dirstate.endparentchange()
903 else:
903 else:
904 if opts.get('exact') or opts.get('import_branch'):
904 if opts.get('exact') or opts.get('import_branch'):
905 branch = branch or 'default'
905 branch = branch or 'default'
906 else:
906 else:
907 branch = p1.branch()
907 branch = p1.branch()
908 store = patch.filestore()
908 store = patch.filestore()
909 try:
909 try:
910 files = set()
910 files = set()
911 try:
911 try:
912 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
912 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
913 files, eolmode=None)
913 files, eolmode=None)
914 except patch.PatchError, e:
914 except patch.PatchError, e:
915 raise util.Abort(str(e))
915 raise util.Abort(str(e))
916 if opts.get('exact'):
916 if opts.get('exact'):
917 editor = None
917 editor = None
918 else:
918 else:
919 editor = getcommiteditor(editform='import.bypass')
919 editor = getcommiteditor(editform='import.bypass')
920 memctx = context.makememctx(repo, (p1.node(), p2.node()),
920 memctx = context.makememctx(repo, (p1.node(), p2.node()),
921 message,
921 message,
922 opts.get('user') or user,
922 opts.get('user') or user,
923 opts.get('date') or date,
923 opts.get('date') or date,
924 branch, files, store,
924 branch, files, store,
925 editor=editor)
925 editor=editor)
926 n = memctx.commit()
926 n = memctx.commit()
927 finally:
927 finally:
928 store.close()
928 store.close()
929 if opts.get('exact') and opts.get('no_commit'):
929 if opts.get('exact') and opts.get('no_commit'):
930 # --exact with --no-commit is still useful in that it does merge
930 # --exact with --no-commit is still useful in that it does merge
931 # and branch bits
931 # and branch bits
932 ui.warn(_("warning: can't check exact import with --no-commit\n"))
932 ui.warn(_("warning: can't check exact import with --no-commit\n"))
933 elif opts.get('exact') and hex(n) != nodeid:
933 elif opts.get('exact') and hex(n) != nodeid:
934 raise util.Abort(_('patch is damaged or loses information'))
934 raise util.Abort(_('patch is damaged or loses information'))
935 if n:
935 if n:
936 # i18n: refers to a short changeset id
936 # i18n: refers to a short changeset id
937 msg = _('created %s') % short(n)
937 msg = _('created %s') % short(n)
938 return (msg, n, rejects)
938 return (msg, n, rejects)
939 finally:
939 finally:
940 os.unlink(tmpname)
940 os.unlink(tmpname)
941
941
942 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
942 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
943 opts=None):
943 opts=None):
944 '''export changesets as hg patches.'''
944 '''export changesets as hg patches.'''
945
945
946 total = len(revs)
946 total = len(revs)
947 revwidth = max([len(str(rev)) for rev in revs])
947 revwidth = max([len(str(rev)) for rev in revs])
948 filemode = {}
948 filemode = {}
949
949
950 def single(rev, seqno, fp):
950 def single(rev, seqno, fp):
951 ctx = repo[rev]
951 ctx = repo[rev]
952 node = ctx.node()
952 node = ctx.node()
953 parents = [p.node() for p in ctx.parents() if p]
953 parents = [p.node() for p in ctx.parents() if p]
954 branch = ctx.branch()
954 branch = ctx.branch()
955 if switch_parent:
955 if switch_parent:
956 parents.reverse()
956 parents.reverse()
957
957
958 if parents:
958 if parents:
959 prev = parents[0]
959 prev = parents[0]
960 else:
960 else:
961 prev = nullid
961 prev = nullid
962
962
963 shouldclose = False
963 shouldclose = False
964 if not fp and len(template) > 0:
964 if not fp and len(template) > 0:
965 desc_lines = ctx.description().rstrip().split('\n')
965 desc_lines = ctx.description().rstrip().split('\n')
966 desc = desc_lines[0] #Commit always has a first line.
966 desc = desc_lines[0] #Commit always has a first line.
967 fp = makefileobj(repo, template, node, desc=desc, total=total,
967 fp = makefileobj(repo, template, node, desc=desc, total=total,
968 seqno=seqno, revwidth=revwidth, mode='wb',
968 seqno=seqno, revwidth=revwidth, mode='wb',
969 modemap=filemode)
969 modemap=filemode)
970 if fp != template:
970 if fp != template:
971 shouldclose = True
971 shouldclose = True
972 if fp and fp != sys.stdout and util.safehasattr(fp, 'name'):
972 if fp and fp != sys.stdout and util.safehasattr(fp, 'name'):
973 repo.ui.note("%s\n" % fp.name)
973 repo.ui.note("%s\n" % fp.name)
974
974
975 if not fp:
975 if not fp:
976 write = repo.ui.write
976 write = repo.ui.write
977 else:
977 else:
978 def write(s, **kw):
978 def write(s, **kw):
979 fp.write(s)
979 fp.write(s)
980
980
981 write("# HG changeset patch\n")
981 write("# HG changeset patch\n")
982 write("# User %s\n" % ctx.user())
982 write("# User %s\n" % ctx.user())
983 write("# Date %d %d\n" % ctx.date())
983 write("# Date %d %d\n" % ctx.date())
984 write("# %s\n" % util.datestr(ctx.date()))
984 write("# %s\n" % util.datestr(ctx.date()))
985 if branch and branch != 'default':
985 if branch and branch != 'default':
986 write("# Branch %s\n" % branch)
986 write("# Branch %s\n" % branch)
987 write("# Node ID %s\n" % hex(node))
987 write("# Node ID %s\n" % hex(node))
988 write("# Parent %s\n" % hex(prev))
988 write("# Parent %s\n" % hex(prev))
989 if len(parents) > 1:
989 if len(parents) > 1:
990 write("# Parent %s\n" % hex(parents[1]))
990 write("# Parent %s\n" % hex(parents[1]))
991 write(ctx.description().rstrip())
991 write(ctx.description().rstrip())
992 write("\n\n")
992 write("\n\n")
993
993
994 for chunk, label in patch.diffui(repo, prev, node, opts=opts):
994 for chunk, label in patch.diffui(repo, prev, node, opts=opts):
995 write(chunk, label=label)
995 write(chunk, label=label)
996
996
997 if shouldclose:
997 if shouldclose:
998 fp.close()
998 fp.close()
999
999
1000 for seqno, rev in enumerate(revs):
1000 for seqno, rev in enumerate(revs):
1001 single(rev, seqno + 1, fp)
1001 single(rev, seqno + 1, fp)
1002
1002
1003 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
1003 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
1004 changes=None, stat=False, fp=None, prefix='',
1004 changes=None, stat=False, fp=None, prefix='',
1005 root='', listsubrepos=False):
1005 root='', listsubrepos=False):
1006 '''show diff or diffstat.'''
1006 '''show diff or diffstat.'''
1007 if fp is None:
1007 if fp is None:
1008 write = ui.write
1008 write = ui.write
1009 else:
1009 else:
1010 def write(s, **kw):
1010 def write(s, **kw):
1011 fp.write(s)
1011 fp.write(s)
1012
1012
1013 if root:
1013 if root:
1014 relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
1014 relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
1015 else:
1015 else:
1016 relroot = ''
1016 relroot = ''
1017 if relroot != '':
1017 if relroot != '':
1018 # XXX relative roots currently don't work if the root is within a
1018 # XXX relative roots currently don't work if the root is within a
1019 # subrepo
1019 # subrepo
1020 uirelroot = match.uipath(relroot)
1020 uirelroot = match.uipath(relroot)
1021 relroot += '/'
1021 relroot += '/'
1022 for matchroot in match.files():
1022 for matchroot in match.files():
1023 if not matchroot.startswith(relroot):
1023 if not matchroot.startswith(relroot):
1024 ui.warn(_('warning: %s not inside relative root %s\n') % (
1024 ui.warn(_('warning: %s not inside relative root %s\n') % (
1025 match.uipath(matchroot), uirelroot))
1025 match.uipath(matchroot), uirelroot))
1026
1026
1027 if stat:
1027 if stat:
1028 diffopts = diffopts.copy(context=0)
1028 diffopts = diffopts.copy(context=0)
1029 width = 80
1029 width = 80
1030 if not ui.plain():
1030 if not ui.plain():
1031 width = ui.termwidth()
1031 width = ui.termwidth()
1032 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
1032 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
1033 prefix=prefix, relroot=relroot)
1033 prefix=prefix, relroot=relroot)
1034 for chunk, label in patch.diffstatui(util.iterlines(chunks),
1034 for chunk, label in patch.diffstatui(util.iterlines(chunks),
1035 width=width,
1035 width=width,
1036 git=diffopts.git):
1036 git=diffopts.git):
1037 write(chunk, label=label)
1037 write(chunk, label=label)
1038 else:
1038 else:
1039 for chunk, label in patch.diffui(repo, node1, node2, match,
1039 for chunk, label in patch.diffui(repo, node1, node2, match,
1040 changes, diffopts, prefix=prefix,
1040 changes, diffopts, prefix=prefix,
1041 relroot=relroot):
1041 relroot=relroot):
1042 write(chunk, label=label)
1042 write(chunk, label=label)
1043
1043
1044 if listsubrepos:
1044 if listsubrepos:
1045 ctx1 = repo[node1]
1045 ctx1 = repo[node1]
1046 ctx2 = repo[node2]
1046 ctx2 = repo[node2]
1047 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1047 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1048 tempnode2 = node2
1048 tempnode2 = node2
1049 try:
1049 try:
1050 if node2 is not None:
1050 if node2 is not None:
1051 tempnode2 = ctx2.substate[subpath][1]
1051 tempnode2 = ctx2.substate[subpath][1]
1052 except KeyError:
1052 except KeyError:
1053 # A subrepo that existed in node1 was deleted between node1 and
1053 # A subrepo that existed in node1 was deleted between node1 and
1054 # node2 (inclusive). Thus, ctx2's substate won't contain that
1054 # node2 (inclusive). Thus, ctx2's substate won't contain that
1055 # subpath. The best we can do is to ignore it.
1055 # subpath. The best we can do is to ignore it.
1056 tempnode2 = None
1056 tempnode2 = None
1057 submatch = matchmod.narrowmatcher(subpath, match)
1057 submatch = matchmod.narrowmatcher(subpath, match)
1058 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
1058 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
1059 stat=stat, fp=fp, prefix=prefix)
1059 stat=stat, fp=fp, prefix=prefix)
1060
1060
1061 class changeset_printer(object):
1061 class changeset_printer(object):
1062 '''show changeset information when templating not requested.'''
1062 '''show changeset information when templating not requested.'''
1063
1063
1064 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1064 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1065 self.ui = ui
1065 self.ui = ui
1066 self.repo = repo
1066 self.repo = repo
1067 self.buffered = buffered
1067 self.buffered = buffered
1068 self.matchfn = matchfn
1068 self.matchfn = matchfn
1069 self.diffopts = diffopts
1069 self.diffopts = diffopts
1070 self.header = {}
1070 self.header = {}
1071 self.hunk = {}
1071 self.hunk = {}
1072 self.lastheader = None
1072 self.lastheader = None
1073 self.footer = None
1073 self.footer = None
1074
1074
1075 def flush(self, rev):
1075 def flush(self, rev):
1076 if rev in self.header:
1076 if rev in self.header:
1077 h = self.header[rev]
1077 h = self.header[rev]
1078 if h != self.lastheader:
1078 if h != self.lastheader:
1079 self.lastheader = h
1079 self.lastheader = h
1080 self.ui.write(h)
1080 self.ui.write(h)
1081 del self.header[rev]
1081 del self.header[rev]
1082 if rev in self.hunk:
1082 if rev in self.hunk:
1083 self.ui.write(self.hunk[rev])
1083 self.ui.write(self.hunk[rev])
1084 del self.hunk[rev]
1084 del self.hunk[rev]
1085 return 1
1085 return 1
1086 return 0
1086 return 0
1087
1087
1088 def close(self):
1088 def close(self):
1089 if self.footer:
1089 if self.footer:
1090 self.ui.write(self.footer)
1090 self.ui.write(self.footer)
1091
1091
1092 def show(self, ctx, copies=None, matchfn=None, **props):
1092 def show(self, ctx, copies=None, matchfn=None, **props):
1093 if self.buffered:
1093 if self.buffered:
1094 self.ui.pushbuffer()
1094 self.ui.pushbuffer()
1095 self._show(ctx, copies, matchfn, props)
1095 self._show(ctx, copies, matchfn, props)
1096 self.hunk[ctx.rev()] = self.ui.popbuffer(labeled=True)
1096 self.hunk[ctx.rev()] = self.ui.popbuffer(labeled=True)
1097 else:
1097 else:
1098 self._show(ctx, copies, matchfn, props)
1098 self._show(ctx, copies, matchfn, props)
1099
1099
1100 def _show(self, ctx, copies, matchfn, props):
1100 def _show(self, ctx, copies, matchfn, props):
1101 '''show a single changeset or file revision'''
1101 '''show a single changeset or file revision'''
1102 changenode = ctx.node()
1102 changenode = ctx.node()
1103 rev = ctx.rev()
1103 rev = ctx.rev()
1104
1104
1105 if self.ui.quiet:
1105 if self.ui.quiet:
1106 self.ui.write("%d:%s\n" % (rev, short(changenode)),
1106 self.ui.write("%d:%s\n" % (rev, short(changenode)),
1107 label='log.node')
1107 label='log.node')
1108 return
1108 return
1109
1109
1110 date = util.datestr(ctx.date())
1110 date = util.datestr(ctx.date())
1111
1111
1112 if self.ui.debugflag:
1112 if self.ui.debugflag:
1113 hexfunc = hex
1113 hexfunc = hex
1114 else:
1114 else:
1115 hexfunc = short
1115 hexfunc = short
1116
1116
1117 # i18n: column positioning for "hg log"
1117 # i18n: column positioning for "hg log"
1118 self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)),
1118 self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)),
1119 label='log.changeset changeset.%s' % ctx.phasestr())
1119 label='log.changeset changeset.%s' % ctx.phasestr())
1120
1120
1121 # branches are shown first before any other names due to backwards
1121 # branches are shown first before any other names due to backwards
1122 # compatibility
1122 # compatibility
1123 branch = ctx.branch()
1123 branch = ctx.branch()
1124 # don't show the default branch name
1124 # don't show the default branch name
1125 if branch != 'default':
1125 if branch != 'default':
1126 # i18n: column positioning for "hg log"
1126 # i18n: column positioning for "hg log"
1127 self.ui.write(_("branch: %s\n") % branch,
1127 self.ui.write(_("branch: %s\n") % branch,
1128 label='log.branch')
1128 label='log.branch')
1129
1129
1130 for name, ns in self.repo.names.iteritems():
1130 for name, ns in self.repo.names.iteritems():
1131 # branches has special logic already handled above, so here we just
1131 # branches has special logic already handled above, so here we just
1132 # skip it
1132 # skip it
1133 if name == 'branches':
1133 if name == 'branches':
1134 continue
1134 continue
1135 # we will use the templatename as the color name since those two
1135 # we will use the templatename as the color name since those two
1136 # should be the same
1136 # should be the same
1137 for name in ns.names(self.repo, changenode):
1137 for name in ns.names(self.repo, changenode):
1138 self.ui.write(ns.logfmt % name,
1138 self.ui.write(ns.logfmt % name,
1139 label='log.%s' % ns.colorname)
1139 label='log.%s' % ns.colorname)
1140 if self.ui.debugflag:
1140 if self.ui.debugflag:
1141 # i18n: column positioning for "hg log"
1141 # i18n: column positioning for "hg log"
1142 self.ui.write(_("phase: %s\n") % _(ctx.phasestr()),
1142 self.ui.write(_("phase: %s\n") % _(ctx.phasestr()),
1143 label='log.phase')
1143 label='log.phase')
1144 for pctx in self._meaningful_parentrevs(ctx):
1144 for pctx in self._meaningful_parentrevs(ctx):
1145 label = 'log.parent changeset.%s' % pctx.phasestr()
1145 label = 'log.parent changeset.%s' % pctx.phasestr()
1146 # i18n: column positioning for "hg log"
1146 # i18n: column positioning for "hg log"
1147 self.ui.write(_("parent: %d:%s\n")
1147 self.ui.write(_("parent: %d:%s\n")
1148 % (pctx.rev(), hexfunc(pctx.node())),
1148 % (pctx.rev(), hexfunc(pctx.node())),
1149 label=label)
1149 label=label)
1150
1150
1151 if self.ui.debugflag:
1151 if self.ui.debugflag:
1152 mnode = ctx.manifestnode()
1152 mnode = ctx.manifestnode()
1153 # i18n: column positioning for "hg log"
1153 # i18n: column positioning for "hg log"
1154 self.ui.write(_("manifest: %d:%s\n") %
1154 self.ui.write(_("manifest: %d:%s\n") %
1155 (self.repo.manifest.rev(mnode), hex(mnode)),
1155 (self.repo.manifest.rev(mnode), hex(mnode)),
1156 label='ui.debug log.manifest')
1156 label='ui.debug log.manifest')
1157 # i18n: column positioning for "hg log"
1157 # i18n: column positioning for "hg log"
1158 self.ui.write(_("user: %s\n") % ctx.user(),
1158 self.ui.write(_("user: %s\n") % ctx.user(),
1159 label='log.user')
1159 label='log.user')
1160 # i18n: column positioning for "hg log"
1160 # i18n: column positioning for "hg log"
1161 self.ui.write(_("date: %s\n") % date,
1161 self.ui.write(_("date: %s\n") % date,
1162 label='log.date')
1162 label='log.date')
1163
1163
1164 if self.ui.debugflag:
1164 if self.ui.debugflag:
1165 files = ctx.p1().status(ctx)[:3]
1165 files = ctx.p1().status(ctx)[:3]
1166 for key, value in zip([# i18n: column positioning for "hg log"
1166 for key, value in zip([# i18n: column positioning for "hg log"
1167 _("files:"),
1167 _("files:"),
1168 # i18n: column positioning for "hg log"
1168 # i18n: column positioning for "hg log"
1169 _("files+:"),
1169 _("files+:"),
1170 # i18n: column positioning for "hg log"
1170 # i18n: column positioning for "hg log"
1171 _("files-:")], files):
1171 _("files-:")], files):
1172 if value:
1172 if value:
1173 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
1173 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
1174 label='ui.debug log.files')
1174 label='ui.debug log.files')
1175 elif ctx.files() and self.ui.verbose:
1175 elif ctx.files() and self.ui.verbose:
1176 # i18n: column positioning for "hg log"
1176 # i18n: column positioning for "hg log"
1177 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
1177 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
1178 label='ui.note log.files')
1178 label='ui.note log.files')
1179 if copies and self.ui.verbose:
1179 if copies and self.ui.verbose:
1180 copies = ['%s (%s)' % c for c in copies]
1180 copies = ['%s (%s)' % c for c in copies]
1181 # i18n: column positioning for "hg log"
1181 # i18n: column positioning for "hg log"
1182 self.ui.write(_("copies: %s\n") % ' '.join(copies),
1182 self.ui.write(_("copies: %s\n") % ' '.join(copies),
1183 label='ui.note log.copies')
1183 label='ui.note log.copies')
1184
1184
1185 extra = ctx.extra()
1185 extra = ctx.extra()
1186 if extra and self.ui.debugflag:
1186 if extra and self.ui.debugflag:
1187 for key, value in sorted(extra.items()):
1187 for key, value in sorted(extra.items()):
1188 # i18n: column positioning for "hg log"
1188 # i18n: column positioning for "hg log"
1189 self.ui.write(_("extra: %s=%s\n")
1189 self.ui.write(_("extra: %s=%s\n")
1190 % (key, value.encode('string_escape')),
1190 % (key, value.encode('string_escape')),
1191 label='ui.debug log.extra')
1191 label='ui.debug log.extra')
1192
1192
1193 description = ctx.description().strip()
1193 description = ctx.description().strip()
1194 if description:
1194 if description:
1195 if self.ui.verbose:
1195 if self.ui.verbose:
1196 self.ui.write(_("description:\n"),
1196 self.ui.write(_("description:\n"),
1197 label='ui.note log.description')
1197 label='ui.note log.description')
1198 self.ui.write(description,
1198 self.ui.write(description,
1199 label='ui.note log.description')
1199 label='ui.note log.description')
1200 self.ui.write("\n\n")
1200 self.ui.write("\n\n")
1201 else:
1201 else:
1202 # i18n: column positioning for "hg log"
1202 # i18n: column positioning for "hg log"
1203 self.ui.write(_("summary: %s\n") %
1203 self.ui.write(_("summary: %s\n") %
1204 description.splitlines()[0],
1204 description.splitlines()[0],
1205 label='log.summary')
1205 label='log.summary')
1206 self.ui.write("\n")
1206 self.ui.write("\n")
1207
1207
1208 self.showpatch(changenode, matchfn)
1208 self.showpatch(changenode, matchfn)
1209
1209
1210 def showpatch(self, node, matchfn):
1210 def showpatch(self, node, matchfn):
1211 if not matchfn:
1211 if not matchfn:
1212 matchfn = self.matchfn
1212 matchfn = self.matchfn
1213 if matchfn:
1213 if matchfn:
1214 stat = self.diffopts.get('stat')
1214 stat = self.diffopts.get('stat')
1215 diff = self.diffopts.get('patch')
1215 diff = self.diffopts.get('patch')
1216 diffopts = patch.diffallopts(self.ui, self.diffopts)
1216 diffopts = patch.diffallopts(self.ui, self.diffopts)
1217 prev = self.repo.changelog.parents(node)[0]
1217 prev = self.repo.changelog.parents(node)[0]
1218 if stat:
1218 if stat:
1219 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1219 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1220 match=matchfn, stat=True)
1220 match=matchfn, stat=True)
1221 if diff:
1221 if diff:
1222 if stat:
1222 if stat:
1223 self.ui.write("\n")
1223 self.ui.write("\n")
1224 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1224 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1225 match=matchfn, stat=False)
1225 match=matchfn, stat=False)
1226 self.ui.write("\n")
1226 self.ui.write("\n")
1227
1227
1228 def _meaningful_parentrevs(self, ctx):
1228 def _meaningful_parentrevs(self, ctx):
1229 """Return list of meaningful (or all if debug) parentrevs for rev.
1229 """Return list of meaningful (or all if debug) parentrevs for rev.
1230
1230
1231 For merges (two non-nullrev revisions) both parents are meaningful.
1231 For merges (two non-nullrev revisions) both parents are meaningful.
1232 Otherwise the first parent revision is considered meaningful if it
1232 Otherwise the first parent revision is considered meaningful if it
1233 is not the preceding revision.
1233 is not the preceding revision.
1234 """
1234 """
1235 parents = ctx.parents()
1235 parents = ctx.parents()
1236 if len(parents) > 1:
1236 if len(parents) > 1:
1237 return parents
1237 return parents
1238 if self.ui.debugflag:
1238 if self.ui.debugflag:
1239 return [parents[0], self.repo['null']]
1239 return [parents[0], self.repo['null']]
1240 if parents[0].rev() >= ctx.rev() - 1:
1240 if parents[0].rev() >= ctx.rev() - 1:
1241 return []
1241 return []
1242 return parents
1242 return parents
1243
1243
1244 class jsonchangeset(changeset_printer):
1244 class jsonchangeset(changeset_printer):
1245 '''format changeset information.'''
1245 '''format changeset information.'''
1246
1246
1247 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1247 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1248 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1248 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1249 self.cache = {}
1249 self.cache = {}
1250 self._first = True
1250 self._first = True
1251
1251
1252 def close(self):
1252 def close(self):
1253 if not self._first:
1253 if not self._first:
1254 self.ui.write("\n]\n")
1254 self.ui.write("\n]\n")
1255 else:
1255 else:
1256 self.ui.write("[]\n")
1256 self.ui.write("[]\n")
1257
1257
1258 def _show(self, ctx, copies, matchfn, props):
1258 def _show(self, ctx, copies, matchfn, props):
1259 '''show a single changeset or file revision'''
1259 '''show a single changeset or file revision'''
1260 hexnode = hex(ctx.node())
1260 hexnode = hex(ctx.node())
1261 rev = ctx.rev()
1261 rev = ctx.rev()
1262 j = encoding.jsonescape
1262 j = encoding.jsonescape
1263
1263
1264 if self._first:
1264 if self._first:
1265 self.ui.write("[\n {")
1265 self.ui.write("[\n {")
1266 self._first = False
1266 self._first = False
1267 else:
1267 else:
1268 self.ui.write(",\n {")
1268 self.ui.write(",\n {")
1269
1269
1270 if self.ui.quiet:
1270 if self.ui.quiet:
1271 self.ui.write('\n "rev": %d' % rev)
1271 self.ui.write('\n "rev": %d' % rev)
1272 self.ui.write(',\n "node": "%s"' % hexnode)
1272 self.ui.write(',\n "node": "%s"' % hexnode)
1273 self.ui.write('\n }')
1273 self.ui.write('\n }')
1274 return
1274 return
1275
1275
1276 self.ui.write('\n "rev": %d' % rev)
1276 self.ui.write('\n "rev": %d' % rev)
1277 self.ui.write(',\n "node": "%s"' % hexnode)
1277 self.ui.write(',\n "node": "%s"' % hexnode)
1278 self.ui.write(',\n "branch": "%s"' % j(ctx.branch()))
1278 self.ui.write(',\n "branch": "%s"' % j(ctx.branch()))
1279 self.ui.write(',\n "phase": "%s"' % ctx.phasestr())
1279 self.ui.write(',\n "phase": "%s"' % ctx.phasestr())
1280 self.ui.write(',\n "user": "%s"' % j(ctx.user()))
1280 self.ui.write(',\n "user": "%s"' % j(ctx.user()))
1281 self.ui.write(',\n "date": [%d, %d]' % ctx.date())
1281 self.ui.write(',\n "date": [%d, %d]' % ctx.date())
1282 self.ui.write(',\n "desc": "%s"' % j(ctx.description()))
1282 self.ui.write(',\n "desc": "%s"' % j(ctx.description()))
1283
1283
1284 self.ui.write(',\n "bookmarks": [%s]' %
1284 self.ui.write(',\n "bookmarks": [%s]' %
1285 ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
1285 ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
1286 self.ui.write(',\n "tags": [%s]' %
1286 self.ui.write(',\n "tags": [%s]' %
1287 ", ".join('"%s"' % j(t) for t in ctx.tags()))
1287 ", ".join('"%s"' % j(t) for t in ctx.tags()))
1288 self.ui.write(',\n "parents": [%s]' %
1288 self.ui.write(',\n "parents": [%s]' %
1289 ", ".join('"%s"' % c.hex() for c in ctx.parents()))
1289 ", ".join('"%s"' % c.hex() for c in ctx.parents()))
1290
1290
1291 if self.ui.debugflag:
1291 if self.ui.debugflag:
1292 self.ui.write(',\n "manifest": "%s"' % hex(ctx.manifestnode()))
1292 self.ui.write(',\n "manifest": "%s"' % hex(ctx.manifestnode()))
1293
1293
1294 self.ui.write(',\n "extra": {%s}' %
1294 self.ui.write(',\n "extra": {%s}' %
1295 ", ".join('"%s": "%s"' % (j(k), j(v))
1295 ", ".join('"%s": "%s"' % (j(k), j(v))
1296 for k, v in ctx.extra().items()))
1296 for k, v in ctx.extra().items()))
1297
1297
1298 files = ctx.p1().status(ctx)
1298 files = ctx.p1().status(ctx)
1299 self.ui.write(',\n "modified": [%s]' %
1299 self.ui.write(',\n "modified": [%s]' %
1300 ", ".join('"%s"' % j(f) for f in files[0]))
1300 ", ".join('"%s"' % j(f) for f in files[0]))
1301 self.ui.write(',\n "added": [%s]' %
1301 self.ui.write(',\n "added": [%s]' %
1302 ", ".join('"%s"' % j(f) for f in files[1]))
1302 ", ".join('"%s"' % j(f) for f in files[1]))
1303 self.ui.write(',\n "removed": [%s]' %
1303 self.ui.write(',\n "removed": [%s]' %
1304 ", ".join('"%s"' % j(f) for f in files[2]))
1304 ", ".join('"%s"' % j(f) for f in files[2]))
1305
1305
1306 elif self.ui.verbose:
1306 elif self.ui.verbose:
1307 self.ui.write(',\n "files": [%s]' %
1307 self.ui.write(',\n "files": [%s]' %
1308 ", ".join('"%s"' % j(f) for f in ctx.files()))
1308 ", ".join('"%s"' % j(f) for f in ctx.files()))
1309
1309
1310 if copies:
1310 if copies:
1311 self.ui.write(',\n "copies": {%s}' %
1311 self.ui.write(',\n "copies": {%s}' %
1312 ", ".join('"%s": "%s"' % (j(k), j(v))
1312 ", ".join('"%s": "%s"' % (j(k), j(v))
1313 for k, v in copies))
1313 for k, v in copies))
1314
1314
1315 matchfn = self.matchfn
1315 matchfn = self.matchfn
1316 if matchfn:
1316 if matchfn:
1317 stat = self.diffopts.get('stat')
1317 stat = self.diffopts.get('stat')
1318 diff = self.diffopts.get('patch')
1318 diff = self.diffopts.get('patch')
1319 diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
1319 diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
1320 node, prev = ctx.node(), ctx.p1().node()
1320 node, prev = ctx.node(), ctx.p1().node()
1321 if stat:
1321 if stat:
1322 self.ui.pushbuffer()
1322 self.ui.pushbuffer()
1323 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1323 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1324 match=matchfn, stat=True)
1324 match=matchfn, stat=True)
1325 self.ui.write(',\n "diffstat": "%s"' % j(self.ui.popbuffer()))
1325 self.ui.write(',\n "diffstat": "%s"' % j(self.ui.popbuffer()))
1326 if diff:
1326 if diff:
1327 self.ui.pushbuffer()
1327 self.ui.pushbuffer()
1328 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1328 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1329 match=matchfn, stat=False)
1329 match=matchfn, stat=False)
1330 self.ui.write(',\n "diff": "%s"' % j(self.ui.popbuffer()))
1330 self.ui.write(',\n "diff": "%s"' % j(self.ui.popbuffer()))
1331
1331
1332 self.ui.write("\n }")
1332 self.ui.write("\n }")
1333
1333
1334 class changeset_templater(changeset_printer):
1334 class changeset_templater(changeset_printer):
1335 '''format changeset information.'''
1335 '''format changeset information.'''
1336
1336
1337 def __init__(self, ui, repo, matchfn, diffopts, tmpl, mapfile, buffered):
1337 def __init__(self, ui, repo, matchfn, diffopts, tmpl, mapfile, buffered):
1338 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1338 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1339 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
1339 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
1340 defaulttempl = {
1340 defaulttempl = {
1341 'parent': '{rev}:{node|formatnode} ',
1341 'parent': '{rev}:{node|formatnode} ',
1342 'manifest': '{rev}:{node|formatnode}',
1342 'manifest': '{rev}:{node|formatnode}',
1343 'file_copy': '{name} ({source})',
1343 'file_copy': '{name} ({source})',
1344 'extra': '{key}={value|stringescape}'
1344 'extra': '{key}={value|stringescape}'
1345 }
1345 }
1346 # filecopy is preserved for compatibility reasons
1346 # filecopy is preserved for compatibility reasons
1347 defaulttempl['filecopy'] = defaulttempl['file_copy']
1347 defaulttempl['filecopy'] = defaulttempl['file_copy']
1348 self.t = templater.templater(mapfile, {'formatnode': formatnode},
1348 self.t = templater.templater(mapfile, {'formatnode': formatnode},
1349 cache=defaulttempl)
1349 cache=defaulttempl)
1350 if tmpl:
1350 if tmpl:
1351 self.t.cache['changeset'] = tmpl
1351 self.t.cache['changeset'] = tmpl
1352
1352
1353 self.cache = {}
1353 self.cache = {}
1354
1354
1355 def _show(self, ctx, copies, matchfn, props):
1355 def _show(self, ctx, copies, matchfn, props):
1356 '''show a single changeset or file revision'''
1356 '''show a single changeset or file revision'''
1357
1357
1358 showlist = templatekw.showlist
1358 showlist = templatekw.showlist
1359
1359
1360 # showparents() behaviour depends on ui trace level which
1360 # showparents() behaviour depends on ui trace level which
1361 # causes unexpected behaviours at templating level and makes
1361 # causes unexpected behaviours at templating level and makes
1362 # it harder to extract it in a standalone function. Its
1362 # it harder to extract it in a standalone function. Its
1363 # behaviour cannot be changed so leave it here for now.
1363 # behaviour cannot be changed so leave it here for now.
1364 def showparents(**args):
1364 def showparents(**args):
1365 ctx = args['ctx']
1365 ctx = args['ctx']
1366 parents = [[('rev', p.rev()),
1366 parents = [[('rev', p.rev()),
1367 ('node', p.hex()),
1367 ('node', p.hex()),
1368 ('phase', p.phasestr())]
1368 ('phase', p.phasestr())]
1369 for p in self._meaningful_parentrevs(ctx)]
1369 for p in self._meaningful_parentrevs(ctx)]
1370 return showlist('parent', parents, **args)
1370 return showlist('parent', parents, **args)
1371
1371
1372 props = props.copy()
1372 props = props.copy()
1373 props.update(templatekw.keywords)
1373 props.update(templatekw.keywords)
1374 props['parents'] = showparents
1374 props['parents'] = showparents
1375 props['templ'] = self.t
1375 props['templ'] = self.t
1376 props['ctx'] = ctx
1376 props['ctx'] = ctx
1377 props['repo'] = self.repo
1377 props['repo'] = self.repo
1378 props['revcache'] = {'copies': copies}
1378 props['revcache'] = {'copies': copies}
1379 props['cache'] = self.cache
1379 props['cache'] = self.cache
1380
1380
1381 # find correct templates for current mode
1381 # find correct templates for current mode
1382
1382
1383 tmplmodes = [
1383 tmplmodes = [
1384 (True, None),
1384 (True, None),
1385 (self.ui.verbose, 'verbose'),
1385 (self.ui.verbose, 'verbose'),
1386 (self.ui.quiet, 'quiet'),
1386 (self.ui.quiet, 'quiet'),
1387 (self.ui.debugflag, 'debug'),
1387 (self.ui.debugflag, 'debug'),
1388 ]
1388 ]
1389
1389
1390 types = {'header': '', 'footer':'', 'changeset': 'changeset'}
1390 types = {'header': '', 'footer':'', 'changeset': 'changeset'}
1391 for mode, postfix in tmplmodes:
1391 for mode, postfix in tmplmodes:
1392 for type in types:
1392 for type in types:
1393 cur = postfix and ('%s_%s' % (type, postfix)) or type
1393 cur = postfix and ('%s_%s' % (type, postfix)) or type
1394 if mode and cur in self.t:
1394 if mode and cur in self.t:
1395 types[type] = cur
1395 types[type] = cur
1396
1396
1397 try:
1397 try:
1398
1398
1399 # write header
1399 # write header
1400 if types['header']:
1400 if types['header']:
1401 h = templater.stringify(self.t(types['header'], **props))
1401 h = templater.stringify(self.t(types['header'], **props))
1402 if self.buffered:
1402 if self.buffered:
1403 self.header[ctx.rev()] = h
1403 self.header[ctx.rev()] = h
1404 else:
1404 else:
1405 if self.lastheader != h:
1405 if self.lastheader != h:
1406 self.lastheader = h
1406 self.lastheader = h
1407 self.ui.write(h)
1407 self.ui.write(h)
1408
1408
1409 # write changeset metadata, then patch if requested
1409 # write changeset metadata, then patch if requested
1410 key = types['changeset']
1410 key = types['changeset']
1411 self.ui.write(templater.stringify(self.t(key, **props)))
1411 self.ui.write(templater.stringify(self.t(key, **props)))
1412 self.showpatch(ctx.node(), matchfn)
1412 self.showpatch(ctx.node(), matchfn)
1413
1413
1414 if types['footer']:
1414 if types['footer']:
1415 if not self.footer:
1415 if not self.footer:
1416 self.footer = templater.stringify(self.t(types['footer'],
1416 self.footer = templater.stringify(self.t(types['footer'],
1417 **props))
1417 **props))
1418
1418
1419 except KeyError, inst:
1419 except KeyError, inst:
1420 msg = _("%s: no key named '%s'")
1420 msg = _("%s: no key named '%s'")
1421 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
1421 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
1422 except SyntaxError, inst:
1422 except SyntaxError, inst:
1423 raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
1423 raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
1424
1424
1425 def gettemplate(ui, tmpl, style):
1425 def gettemplate(ui, tmpl, style):
1426 """
1426 """
1427 Find the template matching the given template spec or style.
1427 Find the template matching the given template spec or style.
1428 """
1428 """
1429
1429
1430 # ui settings
1430 # ui settings
1431 if not tmpl and not style: # template are stronger than style
1431 if not tmpl and not style: # template are stronger than style
1432 tmpl = ui.config('ui', 'logtemplate')
1432 tmpl = ui.config('ui', 'logtemplate')
1433 if tmpl:
1433 if tmpl:
1434 try:
1434 try:
1435 tmpl = templater.parsestring(tmpl)
1435 tmpl = templater.parsestring(tmpl)
1436 except SyntaxError:
1436 except SyntaxError:
1437 tmpl = templater.parsestring(tmpl, quoted=False)
1437 tmpl = templater.parsestring(tmpl, quoted=False)
1438 return tmpl, None
1438 return tmpl, None
1439 else:
1439 else:
1440 style = util.expandpath(ui.config('ui', 'style', ''))
1440 style = util.expandpath(ui.config('ui', 'style', ''))
1441
1441
1442 if not tmpl and style:
1442 if not tmpl and style:
1443 mapfile = style
1443 mapfile = style
1444 if not os.path.split(mapfile)[0]:
1444 if not os.path.split(mapfile)[0]:
1445 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1445 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1446 or templater.templatepath(mapfile))
1446 or templater.templatepath(mapfile))
1447 if mapname:
1447 if mapname:
1448 mapfile = mapname
1448 mapfile = mapname
1449 return None, mapfile
1449 return None, mapfile
1450
1450
1451 if not tmpl:
1451 if not tmpl:
1452 return None, None
1452 return None, None
1453
1453
1454 # looks like a literal template?
1454 # looks like a literal template?
1455 if '{' in tmpl:
1455 if '{' in tmpl:
1456 return tmpl, None
1456 return tmpl, None
1457
1457
1458 # perhaps a stock style?
1458 # perhaps a stock style?
1459 if not os.path.split(tmpl)[0]:
1459 if not os.path.split(tmpl)[0]:
1460 mapname = (templater.templatepath('map-cmdline.' + tmpl)
1460 mapname = (templater.templatepath('map-cmdline.' + tmpl)
1461 or templater.templatepath(tmpl))
1461 or templater.templatepath(tmpl))
1462 if mapname and os.path.isfile(mapname):
1462 if mapname and os.path.isfile(mapname):
1463 return None, mapname
1463 return None, mapname
1464
1464
1465 # perhaps it's a reference to [templates]
1465 # perhaps it's a reference to [templates]
1466 t = ui.config('templates', tmpl)
1466 t = ui.config('templates', tmpl)
1467 if t:
1467 if t:
1468 try:
1468 try:
1469 tmpl = templater.parsestring(t)
1469 tmpl = templater.parsestring(t)
1470 except SyntaxError:
1470 except SyntaxError:
1471 tmpl = templater.parsestring(t, quoted=False)
1471 tmpl = templater.parsestring(t, quoted=False)
1472 return tmpl, None
1472 return tmpl, None
1473
1473
1474 if tmpl == 'list':
1474 if tmpl == 'list':
1475 ui.write(_("available styles: %s\n") % templater.stylelist())
1475 ui.write(_("available styles: %s\n") % templater.stylelist())
1476 raise util.Abort(_("specify a template"))
1476 raise util.Abort(_("specify a template"))
1477
1477
1478 # perhaps it's a path to a map or a template
1478 # perhaps it's a path to a map or a template
1479 if ('/' in tmpl or '\\' in tmpl) and os.path.isfile(tmpl):
1479 if ('/' in tmpl or '\\' in tmpl) and os.path.isfile(tmpl):
1480 # is it a mapfile for a style?
1480 # is it a mapfile for a style?
1481 if os.path.basename(tmpl).startswith("map-"):
1481 if os.path.basename(tmpl).startswith("map-"):
1482 return None, os.path.realpath(tmpl)
1482 return None, os.path.realpath(tmpl)
1483 tmpl = open(tmpl).read()
1483 tmpl = open(tmpl).read()
1484 return tmpl, None
1484 return tmpl, None
1485
1485
1486 # constant string?
1486 # constant string?
1487 return tmpl, None
1487 return tmpl, None
1488
1488
1489 def show_changeset(ui, repo, opts, buffered=False):
1489 def show_changeset(ui, repo, opts, buffered=False):
1490 """show one changeset using template or regular display.
1490 """show one changeset using template or regular display.
1491
1491
1492 Display format will be the first non-empty hit of:
1492 Display format will be the first non-empty hit of:
1493 1. option 'template'
1493 1. option 'template'
1494 2. option 'style'
1494 2. option 'style'
1495 3. [ui] setting 'logtemplate'
1495 3. [ui] setting 'logtemplate'
1496 4. [ui] setting 'style'
1496 4. [ui] setting 'style'
1497 If all of these values are either the unset or the empty string,
1497 If all of these values are either the unset or the empty string,
1498 regular display via changeset_printer() is done.
1498 regular display via changeset_printer() is done.
1499 """
1499 """
1500 # options
1500 # options
1501 matchfn = None
1501 matchfn = None
1502 if opts.get('patch') or opts.get('stat'):
1502 if opts.get('patch') or opts.get('stat'):
1503 matchfn = scmutil.matchall(repo)
1503 matchfn = scmutil.matchall(repo)
1504
1504
1505 if opts.get('template') == 'json':
1505 if opts.get('template') == 'json':
1506 return jsonchangeset(ui, repo, matchfn, opts, buffered)
1506 return jsonchangeset(ui, repo, matchfn, opts, buffered)
1507
1507
1508 tmpl, mapfile = gettemplate(ui, opts.get('template'), opts.get('style'))
1508 tmpl, mapfile = gettemplate(ui, opts.get('template'), opts.get('style'))
1509
1509
1510 if not tmpl and not mapfile:
1510 if not tmpl and not mapfile:
1511 return changeset_printer(ui, repo, matchfn, opts, buffered)
1511 return changeset_printer(ui, repo, matchfn, opts, buffered)
1512
1512
1513 try:
1513 try:
1514 t = changeset_templater(ui, repo, matchfn, opts, tmpl, mapfile,
1514 t = changeset_templater(ui, repo, matchfn, opts, tmpl, mapfile,
1515 buffered)
1515 buffered)
1516 except SyntaxError, inst:
1516 except SyntaxError, inst:
1517 raise util.Abort(inst.args[0])
1517 raise util.Abort(inst.args[0])
1518 return t
1518 return t
1519
1519
1520 def showmarker(ui, marker):
1520 def showmarker(ui, marker):
1521 """utility function to display obsolescence marker in a readable way
1521 """utility function to display obsolescence marker in a readable way
1522
1522
1523 To be used by debug function."""
1523 To be used by debug function."""
1524 ui.write(hex(marker.precnode()))
1524 ui.write(hex(marker.precnode()))
1525 for repl in marker.succnodes():
1525 for repl in marker.succnodes():
1526 ui.write(' ')
1526 ui.write(' ')
1527 ui.write(hex(repl))
1527 ui.write(hex(repl))
1528 ui.write(' %X ' % marker.flags())
1528 ui.write(' %X ' % marker.flags())
1529 parents = marker.parentnodes()
1529 parents = marker.parentnodes()
1530 if parents is not None:
1530 if parents is not None:
1531 ui.write('{%s} ' % ', '.join(hex(p) for p in parents))
1531 ui.write('{%s} ' % ', '.join(hex(p) for p in parents))
1532 ui.write('(%s) ' % util.datestr(marker.date()))
1532 ui.write('(%s) ' % util.datestr(marker.date()))
1533 ui.write('{%s}' % (', '.join('%r: %r' % t for t in
1533 ui.write('{%s}' % (', '.join('%r: %r' % t for t in
1534 sorted(marker.metadata().items())
1534 sorted(marker.metadata().items())
1535 if t[0] != 'date')))
1535 if t[0] != 'date')))
1536 ui.write('\n')
1536 ui.write('\n')
1537
1537
1538 def finddate(ui, repo, date):
1538 def finddate(ui, repo, date):
1539 """Find the tipmost changeset that matches the given date spec"""
1539 """Find the tipmost changeset that matches the given date spec"""
1540
1540
1541 df = util.matchdate(date)
1541 df = util.matchdate(date)
1542 m = scmutil.matchall(repo)
1542 m = scmutil.matchall(repo)
1543 results = {}
1543 results = {}
1544
1544
1545 def prep(ctx, fns):
1545 def prep(ctx, fns):
1546 d = ctx.date()
1546 d = ctx.date()
1547 if df(d[0]):
1547 if df(d[0]):
1548 results[ctx.rev()] = d
1548 results[ctx.rev()] = d
1549
1549
1550 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1550 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1551 rev = ctx.rev()
1551 rev = ctx.rev()
1552 if rev in results:
1552 if rev in results:
1553 ui.status(_("found revision %s from %s\n") %
1553 ui.status(_("found revision %s from %s\n") %
1554 (rev, util.datestr(results[rev])))
1554 (rev, util.datestr(results[rev])))
1555 return str(rev)
1555 return str(rev)
1556
1556
1557 raise util.Abort(_("revision matching date not found"))
1557 raise util.Abort(_("revision matching date not found"))
1558
1558
1559 def increasingwindows(windowsize=8, sizelimit=512):
1559 def increasingwindows(windowsize=8, sizelimit=512):
1560 while True:
1560 while True:
1561 yield windowsize
1561 yield windowsize
1562 if windowsize < sizelimit:
1562 if windowsize < sizelimit:
1563 windowsize *= 2
1563 windowsize *= 2
1564
1564
1565 class FileWalkError(Exception):
1565 class FileWalkError(Exception):
1566 pass
1566 pass
1567
1567
1568 def walkfilerevs(repo, match, follow, revs, fncache):
1568 def walkfilerevs(repo, match, follow, revs, fncache):
1569 '''Walks the file history for the matched files.
1569 '''Walks the file history for the matched files.
1570
1570
1571 Returns the changeset revs that are involved in the file history.
1571 Returns the changeset revs that are involved in the file history.
1572
1572
1573 Throws FileWalkError if the file history can't be walked using
1573 Throws FileWalkError if the file history can't be walked using
1574 filelogs alone.
1574 filelogs alone.
1575 '''
1575 '''
1576 wanted = set()
1576 wanted = set()
1577 copies = []
1577 copies = []
1578 minrev, maxrev = min(revs), max(revs)
1578 minrev, maxrev = min(revs), max(revs)
1579 def filerevgen(filelog, last):
1579 def filerevgen(filelog, last):
1580 """
1580 """
1581 Only files, no patterns. Check the history of each file.
1581 Only files, no patterns. Check the history of each file.
1582
1582
1583 Examines filelog entries within minrev, maxrev linkrev range
1583 Examines filelog entries within minrev, maxrev linkrev range
1584 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1584 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1585 tuples in backwards order
1585 tuples in backwards order
1586 """
1586 """
1587 cl_count = len(repo)
1587 cl_count = len(repo)
1588 revs = []
1588 revs = []
1589 for j in xrange(0, last + 1):
1589 for j in xrange(0, last + 1):
1590 linkrev = filelog.linkrev(j)
1590 linkrev = filelog.linkrev(j)
1591 if linkrev < minrev:
1591 if linkrev < minrev:
1592 continue
1592 continue
1593 # only yield rev for which we have the changelog, it can
1593 # only yield rev for which we have the changelog, it can
1594 # happen while doing "hg log" during a pull or commit
1594 # happen while doing "hg log" during a pull or commit
1595 if linkrev >= cl_count:
1595 if linkrev >= cl_count:
1596 break
1596 break
1597
1597
1598 parentlinkrevs = []
1598 parentlinkrevs = []
1599 for p in filelog.parentrevs(j):
1599 for p in filelog.parentrevs(j):
1600 if p != nullrev:
1600 if p != nullrev:
1601 parentlinkrevs.append(filelog.linkrev(p))
1601 parentlinkrevs.append(filelog.linkrev(p))
1602 n = filelog.node(j)
1602 n = filelog.node(j)
1603 revs.append((linkrev, parentlinkrevs,
1603 revs.append((linkrev, parentlinkrevs,
1604 follow and filelog.renamed(n)))
1604 follow and filelog.renamed(n)))
1605
1605
1606 return reversed(revs)
1606 return reversed(revs)
1607 def iterfiles():
1607 def iterfiles():
1608 pctx = repo['.']
1608 pctx = repo['.']
1609 for filename in match.files():
1609 for filename in match.files():
1610 if follow:
1610 if follow:
1611 if filename not in pctx:
1611 if filename not in pctx:
1612 raise util.Abort(_('cannot follow file not in parent '
1612 raise util.Abort(_('cannot follow file not in parent '
1613 'revision: "%s"') % filename)
1613 'revision: "%s"') % filename)
1614 yield filename, pctx[filename].filenode()
1614 yield filename, pctx[filename].filenode()
1615 else:
1615 else:
1616 yield filename, None
1616 yield filename, None
1617 for filename_node in copies:
1617 for filename_node in copies:
1618 yield filename_node
1618 yield filename_node
1619
1619
1620 for file_, node in iterfiles():
1620 for file_, node in iterfiles():
1621 filelog = repo.file(file_)
1621 filelog = repo.file(file_)
1622 if not len(filelog):
1622 if not len(filelog):
1623 if node is None:
1623 if node is None:
1624 # A zero count may be a directory or deleted file, so
1624 # A zero count may be a directory or deleted file, so
1625 # try to find matching entries on the slow path.
1625 # try to find matching entries on the slow path.
1626 if follow:
1626 if follow:
1627 raise util.Abort(
1627 raise util.Abort(
1628 _('cannot follow nonexistent file: "%s"') % file_)
1628 _('cannot follow nonexistent file: "%s"') % file_)
1629 raise FileWalkError("Cannot walk via filelog")
1629 raise FileWalkError("Cannot walk via filelog")
1630 else:
1630 else:
1631 continue
1631 continue
1632
1632
1633 if node is None:
1633 if node is None:
1634 last = len(filelog) - 1
1634 last = len(filelog) - 1
1635 else:
1635 else:
1636 last = filelog.rev(node)
1636 last = filelog.rev(node)
1637
1637
1638 # keep track of all ancestors of the file
1638 # keep track of all ancestors of the file
1639 ancestors = set([filelog.linkrev(last)])
1639 ancestors = set([filelog.linkrev(last)])
1640
1640
1641 # iterate from latest to oldest revision
1641 # iterate from latest to oldest revision
1642 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1642 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1643 if not follow:
1643 if not follow:
1644 if rev > maxrev:
1644 if rev > maxrev:
1645 continue
1645 continue
1646 else:
1646 else:
1647 # Note that last might not be the first interesting
1647 # Note that last might not be the first interesting
1648 # rev to us:
1648 # rev to us:
1649 # if the file has been changed after maxrev, we'll
1649 # if the file has been changed after maxrev, we'll
1650 # have linkrev(last) > maxrev, and we still need
1650 # have linkrev(last) > maxrev, and we still need
1651 # to explore the file graph
1651 # to explore the file graph
1652 if rev not in ancestors:
1652 if rev not in ancestors:
1653 continue
1653 continue
1654 # XXX insert 1327 fix here
1654 # XXX insert 1327 fix here
1655 if flparentlinkrevs:
1655 if flparentlinkrevs:
1656 ancestors.update(flparentlinkrevs)
1656 ancestors.update(flparentlinkrevs)
1657
1657
1658 fncache.setdefault(rev, []).append(file_)
1658 fncache.setdefault(rev, []).append(file_)
1659 wanted.add(rev)
1659 wanted.add(rev)
1660 if copied:
1660 if copied:
1661 copies.append(copied)
1661 copies.append(copied)
1662
1662
1663 return wanted
1663 return wanted
1664
1664
1665 class _followfilter(object):
1665 class _followfilter(object):
1666 def __init__(self, repo, onlyfirst=False):
1666 def __init__(self, repo, onlyfirst=False):
1667 self.repo = repo
1667 self.repo = repo
1668 self.startrev = nullrev
1668 self.startrev = nullrev
1669 self.roots = set()
1669 self.roots = set()
1670 self.onlyfirst = onlyfirst
1670 self.onlyfirst = onlyfirst
1671
1671
1672 def match(self, rev):
1672 def match(self, rev):
1673 def realparents(rev):
1673 def realparents(rev):
1674 if self.onlyfirst:
1674 if self.onlyfirst:
1675 return self.repo.changelog.parentrevs(rev)[0:1]
1675 return self.repo.changelog.parentrevs(rev)[0:1]
1676 else:
1676 else:
1677 return filter(lambda x: x != nullrev,
1677 return filter(lambda x: x != nullrev,
1678 self.repo.changelog.parentrevs(rev))
1678 self.repo.changelog.parentrevs(rev))
1679
1679
1680 if self.startrev == nullrev:
1680 if self.startrev == nullrev:
1681 self.startrev = rev
1681 self.startrev = rev
1682 return True
1682 return True
1683
1683
1684 if rev > self.startrev:
1684 if rev > self.startrev:
1685 # forward: all descendants
1685 # forward: all descendants
1686 if not self.roots:
1686 if not self.roots:
1687 self.roots.add(self.startrev)
1687 self.roots.add(self.startrev)
1688 for parent in realparents(rev):
1688 for parent in realparents(rev):
1689 if parent in self.roots:
1689 if parent in self.roots:
1690 self.roots.add(rev)
1690 self.roots.add(rev)
1691 return True
1691 return True
1692 else:
1692 else:
1693 # backwards: all parents
1693 # backwards: all parents
1694 if not self.roots:
1694 if not self.roots:
1695 self.roots.update(realparents(self.startrev))
1695 self.roots.update(realparents(self.startrev))
1696 if rev in self.roots:
1696 if rev in self.roots:
1697 self.roots.remove(rev)
1697 self.roots.remove(rev)
1698 self.roots.update(realparents(rev))
1698 self.roots.update(realparents(rev))
1699 return True
1699 return True
1700
1700
1701 return False
1701 return False
1702
1702
1703 def walkchangerevs(repo, match, opts, prepare):
1703 def walkchangerevs(repo, match, opts, prepare):
1704 '''Iterate over files and the revs in which they changed.
1704 '''Iterate over files and the revs in which they changed.
1705
1705
1706 Callers most commonly need to iterate backwards over the history
1706 Callers most commonly need to iterate backwards over the history
1707 in which they are interested. Doing so has awful (quadratic-looking)
1707 in which they are interested. Doing so has awful (quadratic-looking)
1708 performance, so we use iterators in a "windowed" way.
1708 performance, so we use iterators in a "windowed" way.
1709
1709
1710 We walk a window of revisions in the desired order. Within the
1710 We walk a window of revisions in the desired order. Within the
1711 window, we first walk forwards to gather data, then in the desired
1711 window, we first walk forwards to gather data, then in the desired
1712 order (usually backwards) to display it.
1712 order (usually backwards) to display it.
1713
1713
1714 This function returns an iterator yielding contexts. Before
1714 This function returns an iterator yielding contexts. Before
1715 yielding each context, the iterator will first call the prepare
1715 yielding each context, the iterator will first call the prepare
1716 function on each context in the window in forward order.'''
1716 function on each context in the window in forward order.'''
1717
1717
1718 follow = opts.get('follow') or opts.get('follow_first')
1718 follow = opts.get('follow') or opts.get('follow_first')
1719 revs = _logrevs(repo, opts)
1719 revs = _logrevs(repo, opts)
1720 if not revs:
1720 if not revs:
1721 return []
1721 return []
1722 wanted = set()
1722 wanted = set()
1723 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1723 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1724 fncache = {}
1724 fncache = {}
1725 change = repo.changectx
1725 change = repo.changectx
1726
1726
1727 # First step is to fill wanted, the set of revisions that we want to yield.
1727 # First step is to fill wanted, the set of revisions that we want to yield.
1728 # When it does not induce extra cost, we also fill fncache for revisions in
1728 # When it does not induce extra cost, we also fill fncache for revisions in
1729 # wanted: a cache of filenames that were changed (ctx.files()) and that
1729 # wanted: a cache of filenames that were changed (ctx.files()) and that
1730 # match the file filtering conditions.
1730 # match the file filtering conditions.
1731
1731
1732 if match.always():
1732 if match.always():
1733 # No files, no patterns. Display all revs.
1733 # No files, no patterns. Display all revs.
1734 wanted = revs
1734 wanted = revs
1735
1735
1736 if not slowpath and match.files():
1736 if not slowpath and match.files():
1737 # We only have to read through the filelog to find wanted revisions
1737 # We only have to read through the filelog to find wanted revisions
1738
1738
1739 try:
1739 try:
1740 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1740 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1741 except FileWalkError:
1741 except FileWalkError:
1742 slowpath = True
1742 slowpath = True
1743
1743
1744 # We decided to fall back to the slowpath because at least one
1744 # We decided to fall back to the slowpath because at least one
1745 # of the paths was not a file. Check to see if at least one of them
1745 # of the paths was not a file. Check to see if at least one of them
1746 # existed in history, otherwise simply return
1746 # existed in history, otherwise simply return
1747 for path in match.files():
1747 for path in match.files():
1748 if path == '.' or path in repo.store:
1748 if path == '.' or path in repo.store:
1749 break
1749 break
1750 else:
1750 else:
1751 return []
1751 return []
1752
1752
1753 if slowpath:
1753 if slowpath:
1754 # We have to read the changelog to match filenames against
1754 # We have to read the changelog to match filenames against
1755 # changed files
1755 # changed files
1756
1756
1757 if follow:
1757 if follow:
1758 raise util.Abort(_('can only follow copies/renames for explicit '
1758 raise util.Abort(_('can only follow copies/renames for explicit '
1759 'filenames'))
1759 'filenames'))
1760
1760
1761 # The slow path checks files modified in every changeset.
1761 # The slow path checks files modified in every changeset.
1762 # This is really slow on large repos, so compute the set lazily.
1762 # This is really slow on large repos, so compute the set lazily.
1763 class lazywantedset(object):
1763 class lazywantedset(object):
1764 def __init__(self):
1764 def __init__(self):
1765 self.set = set()
1765 self.set = set()
1766 self.revs = set(revs)
1766 self.revs = set(revs)
1767
1767
1768 # No need to worry about locality here because it will be accessed
1768 # No need to worry about locality here because it will be accessed
1769 # in the same order as the increasing window below.
1769 # in the same order as the increasing window below.
1770 def __contains__(self, value):
1770 def __contains__(self, value):
1771 if value in self.set:
1771 if value in self.set:
1772 return True
1772 return True
1773 elif not value in self.revs:
1773 elif not value in self.revs:
1774 return False
1774 return False
1775 else:
1775 else:
1776 self.revs.discard(value)
1776 self.revs.discard(value)
1777 ctx = change(value)
1777 ctx = change(value)
1778 matches = filter(match, ctx.files())
1778 matches = filter(match, ctx.files())
1779 if matches:
1779 if matches:
1780 fncache[value] = matches
1780 fncache[value] = matches
1781 self.set.add(value)
1781 self.set.add(value)
1782 return True
1782 return True
1783 return False
1783 return False
1784
1784
1785 def discard(self, value):
1785 def discard(self, value):
1786 self.revs.discard(value)
1786 self.revs.discard(value)
1787 self.set.discard(value)
1787 self.set.discard(value)
1788
1788
1789 wanted = lazywantedset()
1789 wanted = lazywantedset()
1790
1790
1791 # it might be worthwhile to do this in the iterator if the rev range
1791 # it might be worthwhile to do this in the iterator if the rev range
1792 # is descending and the prune args are all within that range
1792 # is descending and the prune args are all within that range
1793 for rev in opts.get('prune', ()):
1793 for rev in opts.get('prune', ()):
1794 rev = repo[rev].rev()
1794 rev = repo[rev].rev()
1795 ff = _followfilter(repo)
1795 ff = _followfilter(repo)
1796 stop = min(revs[0], revs[-1])
1796 stop = min(revs[0], revs[-1])
1797 for x in xrange(rev, stop - 1, -1):
1797 for x in xrange(rev, stop - 1, -1):
1798 if ff.match(x):
1798 if ff.match(x):
1799 wanted = wanted - [x]
1799 wanted = wanted - [x]
1800
1800
1801 # Now that wanted is correctly initialized, we can iterate over the
1801 # Now that wanted is correctly initialized, we can iterate over the
1802 # revision range, yielding only revisions in wanted.
1802 # revision range, yielding only revisions in wanted.
1803 def iterate():
1803 def iterate():
1804 if follow and not match.files():
1804 if follow and not match.files():
1805 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
1805 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
1806 def want(rev):
1806 def want(rev):
1807 return ff.match(rev) and rev in wanted
1807 return ff.match(rev) and rev in wanted
1808 else:
1808 else:
1809 def want(rev):
1809 def want(rev):
1810 return rev in wanted
1810 return rev in wanted
1811
1811
1812 it = iter(revs)
1812 it = iter(revs)
1813 stopiteration = False
1813 stopiteration = False
1814 for windowsize in increasingwindows():
1814 for windowsize in increasingwindows():
1815 nrevs = []
1815 nrevs = []
1816 for i in xrange(windowsize):
1816 for i in xrange(windowsize):
1817 try:
1817 try:
1818 rev = it.next()
1818 rev = it.next()
1819 if want(rev):
1819 if want(rev):
1820 nrevs.append(rev)
1820 nrevs.append(rev)
1821 except (StopIteration):
1821 except (StopIteration):
1822 stopiteration = True
1822 stopiteration = True
1823 break
1823 break
1824 for rev in sorted(nrevs):
1824 for rev in sorted(nrevs):
1825 fns = fncache.get(rev)
1825 fns = fncache.get(rev)
1826 ctx = change(rev)
1826 ctx = change(rev)
1827 if not fns:
1827 if not fns:
1828 def fns_generator():
1828 def fns_generator():
1829 for f in ctx.files():
1829 for f in ctx.files():
1830 if match(f):
1830 if match(f):
1831 yield f
1831 yield f
1832 fns = fns_generator()
1832 fns = fns_generator()
1833 prepare(ctx, fns)
1833 prepare(ctx, fns)
1834 for rev in nrevs:
1834 for rev in nrevs:
1835 yield change(rev)
1835 yield change(rev)
1836
1836
1837 if stopiteration:
1837 if stopiteration:
1838 break
1838 break
1839
1839
1840 return iterate()
1840 return iterate()
1841
1841
1842 def _makefollowlogfilematcher(repo, files, followfirst):
1842 def _makefollowlogfilematcher(repo, files, followfirst):
1843 # When displaying a revision with --patch --follow FILE, we have
1843 # When displaying a revision with --patch --follow FILE, we have
1844 # to know which file of the revision must be diffed. With
1844 # to know which file of the revision must be diffed. With
1845 # --follow, we want the names of the ancestors of FILE in the
1845 # --follow, we want the names of the ancestors of FILE in the
1846 # revision, stored in "fcache". "fcache" is populated by
1846 # revision, stored in "fcache". "fcache" is populated by
1847 # reproducing the graph traversal already done by --follow revset
1847 # reproducing the graph traversal already done by --follow revset
1848 # and relating linkrevs to file names (which is not "correct" but
1848 # and relating linkrevs to file names (which is not "correct" but
1849 # good enough).
1849 # good enough).
1850 fcache = {}
1850 fcache = {}
1851 fcacheready = [False]
1851 fcacheready = [False]
1852 pctx = repo['.']
1852 pctx = repo['.']
1853
1853
1854 def populate():
1854 def populate():
1855 for fn in files:
1855 for fn in files:
1856 for i in ((pctx[fn],), pctx[fn].ancestors(followfirst=followfirst)):
1856 for i in ((pctx[fn],), pctx[fn].ancestors(followfirst=followfirst)):
1857 for c in i:
1857 for c in i:
1858 fcache.setdefault(c.linkrev(), set()).add(c.path())
1858 fcache.setdefault(c.linkrev(), set()).add(c.path())
1859
1859
1860 def filematcher(rev):
1860 def filematcher(rev):
1861 if not fcacheready[0]:
1861 if not fcacheready[0]:
1862 # Lazy initialization
1862 # Lazy initialization
1863 fcacheready[0] = True
1863 fcacheready[0] = True
1864 populate()
1864 populate()
1865 return scmutil.matchfiles(repo, fcache.get(rev, []))
1865 return scmutil.matchfiles(repo, fcache.get(rev, []))
1866
1866
1867 return filematcher
1867 return filematcher
1868
1868
1869 def _makenofollowlogfilematcher(repo, pats, opts):
1869 def _makenofollowlogfilematcher(repo, pats, opts):
1870 '''hook for extensions to override the filematcher for non-follow cases'''
1870 '''hook for extensions to override the filematcher for non-follow cases'''
1871 return None
1871 return None
1872
1872
1873 def _makelogrevset(repo, pats, opts, revs):
1873 def _makelogrevset(repo, pats, opts, revs):
1874 """Return (expr, filematcher) where expr is a revset string built
1874 """Return (expr, filematcher) where expr is a revset string built
1875 from log options and file patterns or None. If --stat or --patch
1875 from log options and file patterns or None. If --stat or --patch
1876 are not passed filematcher is None. Otherwise it is a callable
1876 are not passed filematcher is None. Otherwise it is a callable
1877 taking a revision number and returning a match objects filtering
1877 taking a revision number and returning a match objects filtering
1878 the files to be detailed when displaying the revision.
1878 the files to be detailed when displaying the revision.
1879 """
1879 """
1880 opt2revset = {
1880 opt2revset = {
1881 'no_merges': ('not merge()', None),
1881 'no_merges': ('not merge()', None),
1882 'only_merges': ('merge()', None),
1882 'only_merges': ('merge()', None),
1883 '_ancestors': ('ancestors(%(val)s)', None),
1883 '_ancestors': ('ancestors(%(val)s)', None),
1884 '_fancestors': ('_firstancestors(%(val)s)', None),
1884 '_fancestors': ('_firstancestors(%(val)s)', None),
1885 '_descendants': ('descendants(%(val)s)', None),
1885 '_descendants': ('descendants(%(val)s)', None),
1886 '_fdescendants': ('_firstdescendants(%(val)s)', None),
1886 '_fdescendants': ('_firstdescendants(%(val)s)', None),
1887 '_matchfiles': ('_matchfiles(%(val)s)', None),
1887 '_matchfiles': ('_matchfiles(%(val)s)', None),
1888 'date': ('date(%(val)r)', None),
1888 'date': ('date(%(val)r)', None),
1889 'branch': ('branch(%(val)r)', ' or '),
1889 'branch': ('branch(%(val)r)', ' or '),
1890 '_patslog': ('filelog(%(val)r)', ' or '),
1890 '_patslog': ('filelog(%(val)r)', ' or '),
1891 '_patsfollow': ('follow(%(val)r)', ' or '),
1891 '_patsfollow': ('follow(%(val)r)', ' or '),
1892 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
1892 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
1893 'keyword': ('keyword(%(val)r)', ' or '),
1893 'keyword': ('keyword(%(val)r)', ' or '),
1894 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
1894 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
1895 'user': ('user(%(val)r)', ' or '),
1895 'user': ('user(%(val)r)', ' or '),
1896 }
1896 }
1897
1897
1898 opts = dict(opts)
1898 opts = dict(opts)
1899 # follow or not follow?
1899 # follow or not follow?
1900 follow = opts.get('follow') or opts.get('follow_first')
1900 follow = opts.get('follow') or opts.get('follow_first')
1901 if opts.get('follow_first'):
1901 if opts.get('follow_first'):
1902 followfirst = 1
1902 followfirst = 1
1903 else:
1903 else:
1904 followfirst = 0
1904 followfirst = 0
1905 # --follow with FILE behaviour depends on revs...
1905 # --follow with FILE behaviour depends on revs...
1906 it = iter(revs)
1906 it = iter(revs)
1907 startrev = it.next()
1907 startrev = it.next()
1908 try:
1908 try:
1909 followdescendants = startrev < it.next()
1909 followdescendants = startrev < it.next()
1910 except (StopIteration):
1910 except (StopIteration):
1911 followdescendants = False
1911 followdescendants = False
1912
1912
1913 # branch and only_branch are really aliases and must be handled at
1913 # branch and only_branch are really aliases and must be handled at
1914 # the same time
1914 # the same time
1915 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
1915 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
1916 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
1916 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
1917 # pats/include/exclude are passed to match.match() directly in
1917 # pats/include/exclude are passed to match.match() directly in
1918 # _matchfiles() revset but walkchangerevs() builds its matcher with
1918 # _matchfiles() revset but walkchangerevs() builds its matcher with
1919 # scmutil.match(). The difference is input pats are globbed on
1919 # scmutil.match(). The difference is input pats are globbed on
1920 # platforms without shell expansion (windows).
1920 # platforms without shell expansion (windows).
1921 pctx = repo[None]
1921 wctx = repo[None]
1922 match, pats = scmutil.matchandpats(pctx, pats, opts)
1922 match, pats = scmutil.matchandpats(wctx, pats, opts)
1923 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1923 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1924 if not slowpath:
1924 if not slowpath:
1925 for f in match.files():
1925 for f in match.files():
1926 if follow and f not in pctx:
1926 if follow and f not in wctx:
1927 # If the file exists, it may be a directory, so let it
1927 # If the file exists, it may be a directory, so let it
1928 # take the slow path.
1928 # take the slow path.
1929 if os.path.exists(repo.wjoin(f)):
1929 if os.path.exists(repo.wjoin(f)):
1930 slowpath = True
1930 slowpath = True
1931 continue
1931 continue
1932 else:
1932 else:
1933 raise util.Abort(_('cannot follow file not in parent '
1933 raise util.Abort(_('cannot follow file not in parent '
1934 'revision: "%s"') % f)
1934 'revision: "%s"') % f)
1935 filelog = repo.file(f)
1935 filelog = repo.file(f)
1936 if not filelog:
1936 if not filelog:
1937 # A zero count may be a directory or deleted file, so
1937 # A zero count may be a directory or deleted file, so
1938 # try to find matching entries on the slow path.
1938 # try to find matching entries on the slow path.
1939 if follow:
1939 if follow:
1940 raise util.Abort(
1940 raise util.Abort(
1941 _('cannot follow nonexistent file: "%s"') % f)
1941 _('cannot follow nonexistent file: "%s"') % f)
1942 slowpath = True
1942 slowpath = True
1943
1943
1944 # We decided to fall back to the slowpath because at least one
1944 # We decided to fall back to the slowpath because at least one
1945 # of the paths was not a file. Check to see if at least one of them
1945 # of the paths was not a file. Check to see if at least one of them
1946 # existed in history - in that case, we'll continue down the
1946 # existed in history - in that case, we'll continue down the
1947 # slowpath; otherwise, we can turn off the slowpath
1947 # slowpath; otherwise, we can turn off the slowpath
1948 if slowpath:
1948 if slowpath:
1949 for path in match.files():
1949 for path in match.files():
1950 if path == '.' or path in repo.store:
1950 if path == '.' or path in repo.store:
1951 break
1951 break
1952 else:
1952 else:
1953 slowpath = False
1953 slowpath = False
1954
1954
1955 fpats = ('_patsfollow', '_patsfollowfirst')
1955 fpats = ('_patsfollow', '_patsfollowfirst')
1956 fnopats = (('_ancestors', '_fancestors'),
1956 fnopats = (('_ancestors', '_fancestors'),
1957 ('_descendants', '_fdescendants'))
1957 ('_descendants', '_fdescendants'))
1958 if slowpath:
1958 if slowpath:
1959 # See walkchangerevs() slow path.
1959 # See walkchangerevs() slow path.
1960 #
1960 #
1961 # pats/include/exclude cannot be represented as separate
1961 # pats/include/exclude cannot be represented as separate
1962 # revset expressions as their filtering logic applies at file
1962 # revset expressions as their filtering logic applies at file
1963 # level. For instance "-I a -X a" matches a revision touching
1963 # level. For instance "-I a -X a" matches a revision touching
1964 # "a" and "b" while "file(a) and not file(b)" does
1964 # "a" and "b" while "file(a) and not file(b)" does
1965 # not. Besides, filesets are evaluated against the working
1965 # not. Besides, filesets are evaluated against the working
1966 # directory.
1966 # directory.
1967 matchargs = ['r:', 'd:relpath']
1967 matchargs = ['r:', 'd:relpath']
1968 for p in pats:
1968 for p in pats:
1969 matchargs.append('p:' + p)
1969 matchargs.append('p:' + p)
1970 for p in opts.get('include', []):
1970 for p in opts.get('include', []):
1971 matchargs.append('i:' + p)
1971 matchargs.append('i:' + p)
1972 for p in opts.get('exclude', []):
1972 for p in opts.get('exclude', []):
1973 matchargs.append('x:' + p)
1973 matchargs.append('x:' + p)
1974 matchargs = ','.join(('%r' % p) for p in matchargs)
1974 matchargs = ','.join(('%r' % p) for p in matchargs)
1975 opts['_matchfiles'] = matchargs
1975 opts['_matchfiles'] = matchargs
1976 if follow:
1976 if follow:
1977 opts[fnopats[0][followfirst]] = '.'
1977 opts[fnopats[0][followfirst]] = '.'
1978 else:
1978 else:
1979 if follow:
1979 if follow:
1980 if pats:
1980 if pats:
1981 # follow() revset interprets its file argument as a
1981 # follow() revset interprets its file argument as a
1982 # manifest entry, so use match.files(), not pats.
1982 # manifest entry, so use match.files(), not pats.
1983 opts[fpats[followfirst]] = list(match.files())
1983 opts[fpats[followfirst]] = list(match.files())
1984 else:
1984 else:
1985 op = fnopats[followdescendants][followfirst]
1985 op = fnopats[followdescendants][followfirst]
1986 opts[op] = 'rev(%d)' % startrev
1986 opts[op] = 'rev(%d)' % startrev
1987 else:
1987 else:
1988 opts['_patslog'] = list(pats)
1988 opts['_patslog'] = list(pats)
1989
1989
1990 filematcher = None
1990 filematcher = None
1991 if opts.get('patch') or opts.get('stat'):
1991 if opts.get('patch') or opts.get('stat'):
1992 # When following files, track renames via a special matcher.
1992 # When following files, track renames via a special matcher.
1993 # If we're forced to take the slowpath it means we're following
1993 # If we're forced to take the slowpath it means we're following
1994 # at least one pattern/directory, so don't bother with rename tracking.
1994 # at least one pattern/directory, so don't bother with rename tracking.
1995 if follow and not match.always() and not slowpath:
1995 if follow and not match.always() and not slowpath:
1996 # _makefollowlogfilematcher expects its files argument to be
1996 # _makefollowlogfilematcher expects its files argument to be
1997 # relative to the repo root, so use match.files(), not pats.
1997 # relative to the repo root, so use match.files(), not pats.
1998 filematcher = _makefollowlogfilematcher(repo, match.files(),
1998 filematcher = _makefollowlogfilematcher(repo, match.files(),
1999 followfirst)
1999 followfirst)
2000 else:
2000 else:
2001 filematcher = _makenofollowlogfilematcher(repo, pats, opts)
2001 filematcher = _makenofollowlogfilematcher(repo, pats, opts)
2002 if filematcher is None:
2002 if filematcher is None:
2003 filematcher = lambda rev: match
2003 filematcher = lambda rev: match
2004
2004
2005 expr = []
2005 expr = []
2006 for op, val in sorted(opts.iteritems()):
2006 for op, val in sorted(opts.iteritems()):
2007 if not val:
2007 if not val:
2008 continue
2008 continue
2009 if op not in opt2revset:
2009 if op not in opt2revset:
2010 continue
2010 continue
2011 revop, andor = opt2revset[op]
2011 revop, andor = opt2revset[op]
2012 if '%(val)' not in revop:
2012 if '%(val)' not in revop:
2013 expr.append(revop)
2013 expr.append(revop)
2014 else:
2014 else:
2015 if not isinstance(val, list):
2015 if not isinstance(val, list):
2016 e = revop % {'val': val}
2016 e = revop % {'val': val}
2017 else:
2017 else:
2018 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
2018 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
2019 expr.append(e)
2019 expr.append(e)
2020
2020
2021 if expr:
2021 if expr:
2022 expr = '(' + ' and '.join(expr) + ')'
2022 expr = '(' + ' and '.join(expr) + ')'
2023 else:
2023 else:
2024 expr = None
2024 expr = None
2025 return expr, filematcher
2025 return expr, filematcher
2026
2026
2027 def _logrevs(repo, opts):
2027 def _logrevs(repo, opts):
2028 # Default --rev value depends on --follow but --follow behaviour
2028 # Default --rev value depends on --follow but --follow behaviour
2029 # depends on revisions resolved from --rev...
2029 # depends on revisions resolved from --rev...
2030 follow = opts.get('follow') or opts.get('follow_first')
2030 follow = opts.get('follow') or opts.get('follow_first')
2031 if opts.get('rev'):
2031 if opts.get('rev'):
2032 revs = scmutil.revrange(repo, opts['rev'])
2032 revs = scmutil.revrange(repo, opts['rev'])
2033 elif follow and repo.dirstate.p1() == nullid:
2033 elif follow and repo.dirstate.p1() == nullid:
2034 revs = revset.baseset()
2034 revs = revset.baseset()
2035 elif follow:
2035 elif follow:
2036 revs = repo.revs('reverse(:.)')
2036 revs = repo.revs('reverse(:.)')
2037 else:
2037 else:
2038 revs = revset.spanset(repo)
2038 revs = revset.spanset(repo)
2039 revs.reverse()
2039 revs.reverse()
2040 return revs
2040 return revs
2041
2041
2042 def getgraphlogrevs(repo, pats, opts):
2042 def getgraphlogrevs(repo, pats, opts):
2043 """Return (revs, expr, filematcher) where revs is an iterable of
2043 """Return (revs, expr, filematcher) where revs is an iterable of
2044 revision numbers, expr is a revset string built from log options
2044 revision numbers, expr is a revset string built from log options
2045 and file patterns or None, and used to filter 'revs'. If --stat or
2045 and file patterns or None, and used to filter 'revs'. If --stat or
2046 --patch are not passed filematcher is None. Otherwise it is a
2046 --patch are not passed filematcher is None. Otherwise it is a
2047 callable taking a revision number and returning a match objects
2047 callable taking a revision number and returning a match objects
2048 filtering the files to be detailed when displaying the revision.
2048 filtering the files to be detailed when displaying the revision.
2049 """
2049 """
2050 limit = loglimit(opts)
2050 limit = loglimit(opts)
2051 revs = _logrevs(repo, opts)
2051 revs = _logrevs(repo, opts)
2052 if not revs:
2052 if not revs:
2053 return revset.baseset(), None, None
2053 return revset.baseset(), None, None
2054 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2054 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2055 if opts.get('rev'):
2055 if opts.get('rev'):
2056 # User-specified revs might be unsorted, but don't sort before
2056 # User-specified revs might be unsorted, but don't sort before
2057 # _makelogrevset because it might depend on the order of revs
2057 # _makelogrevset because it might depend on the order of revs
2058 revs.sort(reverse=True)
2058 revs.sort(reverse=True)
2059 if expr:
2059 if expr:
2060 # Revset matchers often operate faster on revisions in changelog
2060 # Revset matchers often operate faster on revisions in changelog
2061 # order, because most filters deal with the changelog.
2061 # order, because most filters deal with the changelog.
2062 revs.reverse()
2062 revs.reverse()
2063 matcher = revset.match(repo.ui, expr)
2063 matcher = revset.match(repo.ui, expr)
2064 # Revset matches can reorder revisions. "A or B" typically returns
2064 # Revset matches can reorder revisions. "A or B" typically returns
2065 # returns the revision matching A then the revision matching B. Sort
2065 # returns the revision matching A then the revision matching B. Sort
2066 # again to fix that.
2066 # again to fix that.
2067 revs = matcher(repo, revs)
2067 revs = matcher(repo, revs)
2068 revs.sort(reverse=True)
2068 revs.sort(reverse=True)
2069 if limit is not None:
2069 if limit is not None:
2070 limitedrevs = []
2070 limitedrevs = []
2071 for idx, rev in enumerate(revs):
2071 for idx, rev in enumerate(revs):
2072 if idx >= limit:
2072 if idx >= limit:
2073 break
2073 break
2074 limitedrevs.append(rev)
2074 limitedrevs.append(rev)
2075 revs = revset.baseset(limitedrevs)
2075 revs = revset.baseset(limitedrevs)
2076
2076
2077 return revs, expr, filematcher
2077 return revs, expr, filematcher
2078
2078
2079 def getlogrevs(repo, pats, opts):
2079 def getlogrevs(repo, pats, opts):
2080 """Return (revs, expr, filematcher) where revs is an iterable of
2080 """Return (revs, expr, filematcher) where revs is an iterable of
2081 revision numbers, expr is a revset string built from log options
2081 revision numbers, expr is a revset string built from log options
2082 and file patterns or None, and used to filter 'revs'. If --stat or
2082 and file patterns or None, and used to filter 'revs'. If --stat or
2083 --patch are not passed filematcher is None. Otherwise it is a
2083 --patch are not passed filematcher is None. Otherwise it is a
2084 callable taking a revision number and returning a match objects
2084 callable taking a revision number and returning a match objects
2085 filtering the files to be detailed when displaying the revision.
2085 filtering the files to be detailed when displaying the revision.
2086 """
2086 """
2087 limit = loglimit(opts)
2087 limit = loglimit(opts)
2088 revs = _logrevs(repo, opts)
2088 revs = _logrevs(repo, opts)
2089 if not revs:
2089 if not revs:
2090 return revset.baseset([]), None, None
2090 return revset.baseset([]), None, None
2091 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2091 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2092 if expr:
2092 if expr:
2093 # Revset matchers often operate faster on revisions in changelog
2093 # Revset matchers often operate faster on revisions in changelog
2094 # order, because most filters deal with the changelog.
2094 # order, because most filters deal with the changelog.
2095 if not opts.get('rev'):
2095 if not opts.get('rev'):
2096 revs.reverse()
2096 revs.reverse()
2097 matcher = revset.match(repo.ui, expr)
2097 matcher = revset.match(repo.ui, expr)
2098 # Revset matches can reorder revisions. "A or B" typically returns
2098 # Revset matches can reorder revisions. "A or B" typically returns
2099 # returns the revision matching A then the revision matching B. Sort
2099 # returns the revision matching A then the revision matching B. Sort
2100 # again to fix that.
2100 # again to fix that.
2101 revs = matcher(repo, revs)
2101 revs = matcher(repo, revs)
2102 if not opts.get('rev'):
2102 if not opts.get('rev'):
2103 revs.sort(reverse=True)
2103 revs.sort(reverse=True)
2104 if limit is not None:
2104 if limit is not None:
2105 count = 0
2105 count = 0
2106 limitedrevs = []
2106 limitedrevs = []
2107 it = iter(revs)
2107 it = iter(revs)
2108 while count < limit:
2108 while count < limit:
2109 try:
2109 try:
2110 limitedrevs.append(it.next())
2110 limitedrevs.append(it.next())
2111 except (StopIteration):
2111 except (StopIteration):
2112 break
2112 break
2113 count += 1
2113 count += 1
2114 revs = revset.baseset(limitedrevs)
2114 revs = revset.baseset(limitedrevs)
2115
2115
2116 return revs, expr, filematcher
2116 return revs, expr, filematcher
2117
2117
2118 def displaygraph(ui, dag, displayer, showparents, edgefn, getrenamed=None,
2118 def displaygraph(ui, dag, displayer, showparents, edgefn, getrenamed=None,
2119 filematcher=None):
2119 filematcher=None):
2120 seen, state = [], graphmod.asciistate()
2120 seen, state = [], graphmod.asciistate()
2121 for rev, type, ctx, parents in dag:
2121 for rev, type, ctx, parents in dag:
2122 char = 'o'
2122 char = 'o'
2123 if ctx.node() in showparents:
2123 if ctx.node() in showparents:
2124 char = '@'
2124 char = '@'
2125 elif ctx.obsolete():
2125 elif ctx.obsolete():
2126 char = 'x'
2126 char = 'x'
2127 elif ctx.closesbranch():
2127 elif ctx.closesbranch():
2128 char = '_'
2128 char = '_'
2129 copies = None
2129 copies = None
2130 if getrenamed and ctx.rev():
2130 if getrenamed and ctx.rev():
2131 copies = []
2131 copies = []
2132 for fn in ctx.files():
2132 for fn in ctx.files():
2133 rename = getrenamed(fn, ctx.rev())
2133 rename = getrenamed(fn, ctx.rev())
2134 if rename:
2134 if rename:
2135 copies.append((fn, rename[0]))
2135 copies.append((fn, rename[0]))
2136 revmatchfn = None
2136 revmatchfn = None
2137 if filematcher is not None:
2137 if filematcher is not None:
2138 revmatchfn = filematcher(ctx.rev())
2138 revmatchfn = filematcher(ctx.rev())
2139 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
2139 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
2140 lines = displayer.hunk.pop(rev).split('\n')
2140 lines = displayer.hunk.pop(rev).split('\n')
2141 if not lines[-1]:
2141 if not lines[-1]:
2142 del lines[-1]
2142 del lines[-1]
2143 displayer.flush(rev)
2143 displayer.flush(rev)
2144 edges = edgefn(type, char, lines, seen, rev, parents)
2144 edges = edgefn(type, char, lines, seen, rev, parents)
2145 for type, char, lines, coldata in edges:
2145 for type, char, lines, coldata in edges:
2146 graphmod.ascii(ui, state, type, char, lines, coldata)
2146 graphmod.ascii(ui, state, type, char, lines, coldata)
2147 displayer.close()
2147 displayer.close()
2148
2148
2149 def graphlog(ui, repo, *pats, **opts):
2149 def graphlog(ui, repo, *pats, **opts):
2150 # Parameters are identical to log command ones
2150 # Parameters are identical to log command ones
2151 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
2151 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
2152 revdag = graphmod.dagwalker(repo, revs)
2152 revdag = graphmod.dagwalker(repo, revs)
2153
2153
2154 getrenamed = None
2154 getrenamed = None
2155 if opts.get('copies'):
2155 if opts.get('copies'):
2156 endrev = None
2156 endrev = None
2157 if opts.get('rev'):
2157 if opts.get('rev'):
2158 endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
2158 endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
2159 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
2159 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
2160 displayer = show_changeset(ui, repo, opts, buffered=True)
2160 displayer = show_changeset(ui, repo, opts, buffered=True)
2161 showparents = [ctx.node() for ctx in repo[None].parents()]
2161 showparents = [ctx.node() for ctx in repo[None].parents()]
2162 displaygraph(ui, revdag, displayer, showparents,
2162 displaygraph(ui, revdag, displayer, showparents,
2163 graphmod.asciiedges, getrenamed, filematcher)
2163 graphmod.asciiedges, getrenamed, filematcher)
2164
2164
2165 def checkunsupportedgraphflags(pats, opts):
2165 def checkunsupportedgraphflags(pats, opts):
2166 for op in ["newest_first"]:
2166 for op in ["newest_first"]:
2167 if op in opts and opts[op]:
2167 if op in opts and opts[op]:
2168 raise util.Abort(_("-G/--graph option is incompatible with --%s")
2168 raise util.Abort(_("-G/--graph option is incompatible with --%s")
2169 % op.replace("_", "-"))
2169 % op.replace("_", "-"))
2170
2170
2171 def graphrevs(repo, nodes, opts):
2171 def graphrevs(repo, nodes, opts):
2172 limit = loglimit(opts)
2172 limit = loglimit(opts)
2173 nodes.reverse()
2173 nodes.reverse()
2174 if limit is not None:
2174 if limit is not None:
2175 nodes = nodes[:limit]
2175 nodes = nodes[:limit]
2176 return graphmod.nodes(repo, nodes)
2176 return graphmod.nodes(repo, nodes)
2177
2177
2178 def add(ui, repo, match, prefix, explicitonly, **opts):
2178 def add(ui, repo, match, prefix, explicitonly, **opts):
2179 join = lambda f: os.path.join(prefix, f)
2179 join = lambda f: os.path.join(prefix, f)
2180 bad = []
2180 bad = []
2181 oldbad = match.bad
2181 oldbad = match.bad
2182 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
2182 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
2183 names = []
2183 names = []
2184 wctx = repo[None]
2184 wctx = repo[None]
2185 cca = None
2185 cca = None
2186 abort, warn = scmutil.checkportabilityalert(ui)
2186 abort, warn = scmutil.checkportabilityalert(ui)
2187 if abort or warn:
2187 if abort or warn:
2188 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2188 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2189 for f in wctx.walk(match):
2189 for f in wctx.walk(match):
2190 exact = match.exact(f)
2190 exact = match.exact(f)
2191 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2191 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2192 if cca:
2192 if cca:
2193 cca(f)
2193 cca(f)
2194 names.append(f)
2194 names.append(f)
2195 if ui.verbose or not exact:
2195 if ui.verbose or not exact:
2196 ui.status(_('adding %s\n') % match.rel(f))
2196 ui.status(_('adding %s\n') % match.rel(f))
2197
2197
2198 for subpath in sorted(wctx.substate):
2198 for subpath in sorted(wctx.substate):
2199 sub = wctx.sub(subpath)
2199 sub = wctx.sub(subpath)
2200 try:
2200 try:
2201 submatch = matchmod.narrowmatcher(subpath, match)
2201 submatch = matchmod.narrowmatcher(subpath, match)
2202 if opts.get('subrepos'):
2202 if opts.get('subrepos'):
2203 bad.extend(sub.add(ui, submatch, prefix, False, **opts))
2203 bad.extend(sub.add(ui, submatch, prefix, False, **opts))
2204 else:
2204 else:
2205 bad.extend(sub.add(ui, submatch, prefix, True, **opts))
2205 bad.extend(sub.add(ui, submatch, prefix, True, **opts))
2206 except error.LookupError:
2206 except error.LookupError:
2207 ui.status(_("skipping missing subrepository: %s\n")
2207 ui.status(_("skipping missing subrepository: %s\n")
2208 % join(subpath))
2208 % join(subpath))
2209
2209
2210 if not opts.get('dry_run'):
2210 if not opts.get('dry_run'):
2211 rejected = wctx.add(names, prefix)
2211 rejected = wctx.add(names, prefix)
2212 bad.extend(f for f in rejected if f in match.files())
2212 bad.extend(f for f in rejected if f in match.files())
2213 return bad
2213 return bad
2214
2214
2215 def forget(ui, repo, match, prefix, explicitonly):
2215 def forget(ui, repo, match, prefix, explicitonly):
2216 join = lambda f: os.path.join(prefix, f)
2216 join = lambda f: os.path.join(prefix, f)
2217 bad = []
2217 bad = []
2218 oldbad = match.bad
2218 oldbad = match.bad
2219 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
2219 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
2220 wctx = repo[None]
2220 wctx = repo[None]
2221 forgot = []
2221 forgot = []
2222 s = repo.status(match=match, clean=True)
2222 s = repo.status(match=match, clean=True)
2223 forget = sorted(s[0] + s[1] + s[3] + s[6])
2223 forget = sorted(s[0] + s[1] + s[3] + s[6])
2224 if explicitonly:
2224 if explicitonly:
2225 forget = [f for f in forget if match.exact(f)]
2225 forget = [f for f in forget if match.exact(f)]
2226
2226
2227 for subpath in sorted(wctx.substate):
2227 for subpath in sorted(wctx.substate):
2228 sub = wctx.sub(subpath)
2228 sub = wctx.sub(subpath)
2229 try:
2229 try:
2230 submatch = matchmod.narrowmatcher(subpath, match)
2230 submatch = matchmod.narrowmatcher(subpath, match)
2231 subbad, subforgot = sub.forget(submatch, prefix)
2231 subbad, subforgot = sub.forget(submatch, prefix)
2232 bad.extend([subpath + '/' + f for f in subbad])
2232 bad.extend([subpath + '/' + f for f in subbad])
2233 forgot.extend([subpath + '/' + f for f in subforgot])
2233 forgot.extend([subpath + '/' + f for f in subforgot])
2234 except error.LookupError:
2234 except error.LookupError:
2235 ui.status(_("skipping missing subrepository: %s\n")
2235 ui.status(_("skipping missing subrepository: %s\n")
2236 % join(subpath))
2236 % join(subpath))
2237
2237
2238 if not explicitonly:
2238 if not explicitonly:
2239 for f in match.files():
2239 for f in match.files():
2240 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2240 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2241 if f not in forgot:
2241 if f not in forgot:
2242 if repo.wvfs.exists(f):
2242 if repo.wvfs.exists(f):
2243 ui.warn(_('not removing %s: '
2243 ui.warn(_('not removing %s: '
2244 'file is already untracked\n')
2244 'file is already untracked\n')
2245 % match.rel(f))
2245 % match.rel(f))
2246 bad.append(f)
2246 bad.append(f)
2247
2247
2248 for f in forget:
2248 for f in forget:
2249 if ui.verbose or not match.exact(f):
2249 if ui.verbose or not match.exact(f):
2250 ui.status(_('removing %s\n') % match.rel(f))
2250 ui.status(_('removing %s\n') % match.rel(f))
2251
2251
2252 rejected = wctx.forget(forget, prefix)
2252 rejected = wctx.forget(forget, prefix)
2253 bad.extend(f for f in rejected if f in match.files())
2253 bad.extend(f for f in rejected if f in match.files())
2254 forgot.extend(f for f in forget if f not in rejected)
2254 forgot.extend(f for f in forget if f not in rejected)
2255 return bad, forgot
2255 return bad, forgot
2256
2256
2257 def files(ui, ctx, m, fm, fmt, subrepos):
2257 def files(ui, ctx, m, fm, fmt, subrepos):
2258 rev = ctx.rev()
2258 rev = ctx.rev()
2259 ret = 1
2259 ret = 1
2260 ds = ctx.repo().dirstate
2260 ds = ctx.repo().dirstate
2261
2261
2262 for f in ctx.matches(m):
2262 for f in ctx.matches(m):
2263 if rev is None and ds[f] == 'r':
2263 if rev is None and ds[f] == 'r':
2264 continue
2264 continue
2265 fm.startitem()
2265 fm.startitem()
2266 if ui.verbose:
2266 if ui.verbose:
2267 fc = ctx[f]
2267 fc = ctx[f]
2268 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2268 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2269 fm.data(abspath=f)
2269 fm.data(abspath=f)
2270 fm.write('path', fmt, m.rel(f))
2270 fm.write('path', fmt, m.rel(f))
2271 ret = 0
2271 ret = 0
2272
2272
2273 if subrepos:
2273 if subrepos:
2274 for subpath in sorted(ctx.substate):
2274 for subpath in sorted(ctx.substate):
2275 sub = ctx.sub(subpath)
2275 sub = ctx.sub(subpath)
2276 try:
2276 try:
2277 submatch = matchmod.narrowmatcher(subpath, m)
2277 submatch = matchmod.narrowmatcher(subpath, m)
2278 if sub.printfiles(ui, submatch, fm, fmt) == 0:
2278 if sub.printfiles(ui, submatch, fm, fmt) == 0:
2279 ret = 0
2279 ret = 0
2280 except error.LookupError:
2280 except error.LookupError:
2281 ui.status(_("skipping missing subrepository: %s\n")
2281 ui.status(_("skipping missing subrepository: %s\n")
2282 % m.abs(subpath))
2282 % m.abs(subpath))
2283
2283
2284 return ret
2284 return ret
2285
2285
2286 def remove(ui, repo, m, prefix, after, force, subrepos):
2286 def remove(ui, repo, m, prefix, after, force, subrepos):
2287 join = lambda f: os.path.join(prefix, f)
2287 join = lambda f: os.path.join(prefix, f)
2288 ret = 0
2288 ret = 0
2289 s = repo.status(match=m, clean=True)
2289 s = repo.status(match=m, clean=True)
2290 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2290 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2291
2291
2292 wctx = repo[None]
2292 wctx = repo[None]
2293
2293
2294 for subpath in sorted(wctx.substate):
2294 for subpath in sorted(wctx.substate):
2295 def matchessubrepo(matcher, subpath):
2295 def matchessubrepo(matcher, subpath):
2296 if matcher.exact(subpath):
2296 if matcher.exact(subpath):
2297 return True
2297 return True
2298 for f in matcher.files():
2298 for f in matcher.files():
2299 if f.startswith(subpath):
2299 if f.startswith(subpath):
2300 return True
2300 return True
2301 return False
2301 return False
2302
2302
2303 if subrepos or matchessubrepo(m, subpath):
2303 if subrepos or matchessubrepo(m, subpath):
2304 sub = wctx.sub(subpath)
2304 sub = wctx.sub(subpath)
2305 try:
2305 try:
2306 submatch = matchmod.narrowmatcher(subpath, m)
2306 submatch = matchmod.narrowmatcher(subpath, m)
2307 if sub.removefiles(submatch, prefix, after, force, subrepos):
2307 if sub.removefiles(submatch, prefix, after, force, subrepos):
2308 ret = 1
2308 ret = 1
2309 except error.LookupError:
2309 except error.LookupError:
2310 ui.status(_("skipping missing subrepository: %s\n")
2310 ui.status(_("skipping missing subrepository: %s\n")
2311 % join(subpath))
2311 % join(subpath))
2312
2312
2313 # warn about failure to delete explicit files/dirs
2313 # warn about failure to delete explicit files/dirs
2314 deleteddirs = scmutil.dirs(deleted)
2314 deleteddirs = scmutil.dirs(deleted)
2315 for f in m.files():
2315 for f in m.files():
2316 def insubrepo():
2316 def insubrepo():
2317 for subpath in wctx.substate:
2317 for subpath in wctx.substate:
2318 if f.startswith(subpath):
2318 if f.startswith(subpath):
2319 return True
2319 return True
2320 return False
2320 return False
2321
2321
2322 isdir = f in deleteddirs or f in wctx.dirs()
2322 isdir = f in deleteddirs or f in wctx.dirs()
2323 if f in repo.dirstate or isdir or f == '.' or insubrepo():
2323 if f in repo.dirstate or isdir or f == '.' or insubrepo():
2324 continue
2324 continue
2325
2325
2326 if repo.wvfs.exists(f):
2326 if repo.wvfs.exists(f):
2327 if repo.wvfs.isdir(f):
2327 if repo.wvfs.isdir(f):
2328 ui.warn(_('not removing %s: no tracked files\n')
2328 ui.warn(_('not removing %s: no tracked files\n')
2329 % m.rel(f))
2329 % m.rel(f))
2330 else:
2330 else:
2331 ui.warn(_('not removing %s: file is untracked\n')
2331 ui.warn(_('not removing %s: file is untracked\n')
2332 % m.rel(f))
2332 % m.rel(f))
2333 # missing files will generate a warning elsewhere
2333 # missing files will generate a warning elsewhere
2334 ret = 1
2334 ret = 1
2335
2335
2336 if force:
2336 if force:
2337 list = modified + deleted + clean + added
2337 list = modified + deleted + clean + added
2338 elif after:
2338 elif after:
2339 list = deleted
2339 list = deleted
2340 for f in modified + added + clean:
2340 for f in modified + added + clean:
2341 ui.warn(_('not removing %s: file still exists\n') % m.rel(f))
2341 ui.warn(_('not removing %s: file still exists\n') % m.rel(f))
2342 ret = 1
2342 ret = 1
2343 else:
2343 else:
2344 list = deleted + clean
2344 list = deleted + clean
2345 for f in modified:
2345 for f in modified:
2346 ui.warn(_('not removing %s: file is modified (use -f'
2346 ui.warn(_('not removing %s: file is modified (use -f'
2347 ' to force removal)\n') % m.rel(f))
2347 ' to force removal)\n') % m.rel(f))
2348 ret = 1
2348 ret = 1
2349 for f in added:
2349 for f in added:
2350 ui.warn(_('not removing %s: file has been marked for add'
2350 ui.warn(_('not removing %s: file has been marked for add'
2351 ' (use forget to undo)\n') % m.rel(f))
2351 ' (use forget to undo)\n') % m.rel(f))
2352 ret = 1
2352 ret = 1
2353
2353
2354 for f in sorted(list):
2354 for f in sorted(list):
2355 if ui.verbose or not m.exact(f):
2355 if ui.verbose or not m.exact(f):
2356 ui.status(_('removing %s\n') % m.rel(f))
2356 ui.status(_('removing %s\n') % m.rel(f))
2357
2357
2358 wlock = repo.wlock()
2358 wlock = repo.wlock()
2359 try:
2359 try:
2360 if not after:
2360 if not after:
2361 for f in list:
2361 for f in list:
2362 if f in added:
2362 if f in added:
2363 continue # we never unlink added files on remove
2363 continue # we never unlink added files on remove
2364 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
2364 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
2365 repo[None].forget(list)
2365 repo[None].forget(list)
2366 finally:
2366 finally:
2367 wlock.release()
2367 wlock.release()
2368
2368
2369 return ret
2369 return ret
2370
2370
2371 def cat(ui, repo, ctx, matcher, prefix, **opts):
2371 def cat(ui, repo, ctx, matcher, prefix, **opts):
2372 err = 1
2372 err = 1
2373
2373
2374 def write(path):
2374 def write(path):
2375 fp = makefileobj(repo, opts.get('output'), ctx.node(),
2375 fp = makefileobj(repo, opts.get('output'), ctx.node(),
2376 pathname=os.path.join(prefix, path))
2376 pathname=os.path.join(prefix, path))
2377 data = ctx[path].data()
2377 data = ctx[path].data()
2378 if opts.get('decode'):
2378 if opts.get('decode'):
2379 data = repo.wwritedata(path, data)
2379 data = repo.wwritedata(path, data)
2380 fp.write(data)
2380 fp.write(data)
2381 fp.close()
2381 fp.close()
2382
2382
2383 # Automation often uses hg cat on single files, so special case it
2383 # Automation often uses hg cat on single files, so special case it
2384 # for performance to avoid the cost of parsing the manifest.
2384 # for performance to avoid the cost of parsing the manifest.
2385 if len(matcher.files()) == 1 and not matcher.anypats():
2385 if len(matcher.files()) == 1 and not matcher.anypats():
2386 file = matcher.files()[0]
2386 file = matcher.files()[0]
2387 mf = repo.manifest
2387 mf = repo.manifest
2388 mfnode = ctx._changeset[0]
2388 mfnode = ctx._changeset[0]
2389 if mf.find(mfnode, file)[0]:
2389 if mf.find(mfnode, file)[0]:
2390 write(file)
2390 write(file)
2391 return 0
2391 return 0
2392
2392
2393 # Don't warn about "missing" files that are really in subrepos
2393 # Don't warn about "missing" files that are really in subrepos
2394 bad = matcher.bad
2394 bad = matcher.bad
2395
2395
2396 def badfn(path, msg):
2396 def badfn(path, msg):
2397 for subpath in ctx.substate:
2397 for subpath in ctx.substate:
2398 if path.startswith(subpath):
2398 if path.startswith(subpath):
2399 return
2399 return
2400 bad(path, msg)
2400 bad(path, msg)
2401
2401
2402 matcher.bad = badfn
2402 matcher.bad = badfn
2403
2403
2404 for abs in ctx.walk(matcher):
2404 for abs in ctx.walk(matcher):
2405 write(abs)
2405 write(abs)
2406 err = 0
2406 err = 0
2407
2407
2408 matcher.bad = bad
2408 matcher.bad = bad
2409
2409
2410 for subpath in sorted(ctx.substate):
2410 for subpath in sorted(ctx.substate):
2411 sub = ctx.sub(subpath)
2411 sub = ctx.sub(subpath)
2412 try:
2412 try:
2413 submatch = matchmod.narrowmatcher(subpath, matcher)
2413 submatch = matchmod.narrowmatcher(subpath, matcher)
2414
2414
2415 if not sub.cat(submatch, os.path.join(prefix, sub._path),
2415 if not sub.cat(submatch, os.path.join(prefix, sub._path),
2416 **opts):
2416 **opts):
2417 err = 0
2417 err = 0
2418 except error.RepoLookupError:
2418 except error.RepoLookupError:
2419 ui.status(_("skipping missing subrepository: %s\n")
2419 ui.status(_("skipping missing subrepository: %s\n")
2420 % os.path.join(prefix, subpath))
2420 % os.path.join(prefix, subpath))
2421
2421
2422 return err
2422 return err
2423
2423
2424 def commit(ui, repo, commitfunc, pats, opts):
2424 def commit(ui, repo, commitfunc, pats, opts):
2425 '''commit the specified files or all outstanding changes'''
2425 '''commit the specified files or all outstanding changes'''
2426 date = opts.get('date')
2426 date = opts.get('date')
2427 if date:
2427 if date:
2428 opts['date'] = util.parsedate(date)
2428 opts['date'] = util.parsedate(date)
2429 message = logmessage(ui, opts)
2429 message = logmessage(ui, opts)
2430 matcher = scmutil.match(repo[None], pats, opts)
2430 matcher = scmutil.match(repo[None], pats, opts)
2431
2431
2432 # extract addremove carefully -- this function can be called from a command
2432 # extract addremove carefully -- this function can be called from a command
2433 # that doesn't support addremove
2433 # that doesn't support addremove
2434 if opts.get('addremove'):
2434 if opts.get('addremove'):
2435 if scmutil.addremove(repo, matcher, "", opts) != 0:
2435 if scmutil.addremove(repo, matcher, "", opts) != 0:
2436 raise util.Abort(
2436 raise util.Abort(
2437 _("failed to mark all new/missing files as added/removed"))
2437 _("failed to mark all new/missing files as added/removed"))
2438
2438
2439 return commitfunc(ui, repo, message, matcher, opts)
2439 return commitfunc(ui, repo, message, matcher, opts)
2440
2440
2441 def amend(ui, repo, commitfunc, old, extra, pats, opts):
2441 def amend(ui, repo, commitfunc, old, extra, pats, opts):
2442 # amend will reuse the existing user if not specified, but the obsolete
2442 # amend will reuse the existing user if not specified, but the obsolete
2443 # marker creation requires that the current user's name is specified.
2443 # marker creation requires that the current user's name is specified.
2444 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2444 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2445 ui.username() # raise exception if username not set
2445 ui.username() # raise exception if username not set
2446
2446
2447 ui.note(_('amending changeset %s\n') % old)
2447 ui.note(_('amending changeset %s\n') % old)
2448 base = old.p1()
2448 base = old.p1()
2449
2449
2450 wlock = lock = newid = None
2450 wlock = lock = newid = None
2451 try:
2451 try:
2452 wlock = repo.wlock()
2452 wlock = repo.wlock()
2453 lock = repo.lock()
2453 lock = repo.lock()
2454 tr = repo.transaction('amend')
2454 tr = repo.transaction('amend')
2455 try:
2455 try:
2456 # See if we got a message from -m or -l, if not, open the editor
2456 # See if we got a message from -m or -l, if not, open the editor
2457 # with the message of the changeset to amend
2457 # with the message of the changeset to amend
2458 message = logmessage(ui, opts)
2458 message = logmessage(ui, opts)
2459 # ensure logfile does not conflict with later enforcement of the
2459 # ensure logfile does not conflict with later enforcement of the
2460 # message. potential logfile content has been processed by
2460 # message. potential logfile content has been processed by
2461 # `logmessage` anyway.
2461 # `logmessage` anyway.
2462 opts.pop('logfile')
2462 opts.pop('logfile')
2463 # First, do a regular commit to record all changes in the working
2463 # First, do a regular commit to record all changes in the working
2464 # directory (if there are any)
2464 # directory (if there are any)
2465 ui.callhooks = False
2465 ui.callhooks = False
2466 currentbookmark = repo._bookmarkcurrent
2466 currentbookmark = repo._bookmarkcurrent
2467 try:
2467 try:
2468 repo._bookmarkcurrent = None
2468 repo._bookmarkcurrent = None
2469 opts['message'] = 'temporary amend commit for %s' % old
2469 opts['message'] = 'temporary amend commit for %s' % old
2470 node = commit(ui, repo, commitfunc, pats, opts)
2470 node = commit(ui, repo, commitfunc, pats, opts)
2471 finally:
2471 finally:
2472 repo._bookmarkcurrent = currentbookmark
2472 repo._bookmarkcurrent = currentbookmark
2473 ui.callhooks = True
2473 ui.callhooks = True
2474 ctx = repo[node]
2474 ctx = repo[node]
2475
2475
2476 # Participating changesets:
2476 # Participating changesets:
2477 #
2477 #
2478 # node/ctx o - new (intermediate) commit that contains changes
2478 # node/ctx o - new (intermediate) commit that contains changes
2479 # | from working dir to go into amending commit
2479 # | from working dir to go into amending commit
2480 # | (or a workingctx if there were no changes)
2480 # | (or a workingctx if there were no changes)
2481 # |
2481 # |
2482 # old o - changeset to amend
2482 # old o - changeset to amend
2483 # |
2483 # |
2484 # base o - parent of amending changeset
2484 # base o - parent of amending changeset
2485
2485
2486 # Update extra dict from amended commit (e.g. to preserve graft
2486 # Update extra dict from amended commit (e.g. to preserve graft
2487 # source)
2487 # source)
2488 extra.update(old.extra())
2488 extra.update(old.extra())
2489
2489
2490 # Also update it from the intermediate commit or from the wctx
2490 # Also update it from the intermediate commit or from the wctx
2491 extra.update(ctx.extra())
2491 extra.update(ctx.extra())
2492
2492
2493 if len(old.parents()) > 1:
2493 if len(old.parents()) > 1:
2494 # ctx.files() isn't reliable for merges, so fall back to the
2494 # ctx.files() isn't reliable for merges, so fall back to the
2495 # slower repo.status() method
2495 # slower repo.status() method
2496 files = set([fn for st in repo.status(base, old)[:3]
2496 files = set([fn for st in repo.status(base, old)[:3]
2497 for fn in st])
2497 for fn in st])
2498 else:
2498 else:
2499 files = set(old.files())
2499 files = set(old.files())
2500
2500
2501 # Second, we use either the commit we just did, or if there were no
2501 # Second, we use either the commit we just did, or if there were no
2502 # changes the parent of the working directory as the version of the
2502 # changes the parent of the working directory as the version of the
2503 # files in the final amend commit
2503 # files in the final amend commit
2504 if node:
2504 if node:
2505 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
2505 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
2506
2506
2507 user = ctx.user()
2507 user = ctx.user()
2508 date = ctx.date()
2508 date = ctx.date()
2509 # Recompute copies (avoid recording a -> b -> a)
2509 # Recompute copies (avoid recording a -> b -> a)
2510 copied = copies.pathcopies(base, ctx)
2510 copied = copies.pathcopies(base, ctx)
2511 if old.p2:
2511 if old.p2:
2512 copied.update(copies.pathcopies(old.p2(), ctx))
2512 copied.update(copies.pathcopies(old.p2(), ctx))
2513
2513
2514 # Prune files which were reverted by the updates: if old
2514 # Prune files which were reverted by the updates: if old
2515 # introduced file X and our intermediate commit, node,
2515 # introduced file X and our intermediate commit, node,
2516 # renamed that file, then those two files are the same and
2516 # renamed that file, then those two files are the same and
2517 # we can discard X from our list of files. Likewise if X
2517 # we can discard X from our list of files. Likewise if X
2518 # was deleted, it's no longer relevant
2518 # was deleted, it's no longer relevant
2519 files.update(ctx.files())
2519 files.update(ctx.files())
2520
2520
2521 def samefile(f):
2521 def samefile(f):
2522 if f in ctx.manifest():
2522 if f in ctx.manifest():
2523 a = ctx.filectx(f)
2523 a = ctx.filectx(f)
2524 if f in base.manifest():
2524 if f in base.manifest():
2525 b = base.filectx(f)
2525 b = base.filectx(f)
2526 return (not a.cmp(b)
2526 return (not a.cmp(b)
2527 and a.flags() == b.flags())
2527 and a.flags() == b.flags())
2528 else:
2528 else:
2529 return False
2529 return False
2530 else:
2530 else:
2531 return f not in base.manifest()
2531 return f not in base.manifest()
2532 files = [f for f in files if not samefile(f)]
2532 files = [f for f in files if not samefile(f)]
2533
2533
2534 def filectxfn(repo, ctx_, path):
2534 def filectxfn(repo, ctx_, path):
2535 try:
2535 try:
2536 fctx = ctx[path]
2536 fctx = ctx[path]
2537 flags = fctx.flags()
2537 flags = fctx.flags()
2538 mctx = context.memfilectx(repo,
2538 mctx = context.memfilectx(repo,
2539 fctx.path(), fctx.data(),
2539 fctx.path(), fctx.data(),
2540 islink='l' in flags,
2540 islink='l' in flags,
2541 isexec='x' in flags,
2541 isexec='x' in flags,
2542 copied=copied.get(path))
2542 copied=copied.get(path))
2543 return mctx
2543 return mctx
2544 except KeyError:
2544 except KeyError:
2545 return None
2545 return None
2546 else:
2546 else:
2547 ui.note(_('copying changeset %s to %s\n') % (old, base))
2547 ui.note(_('copying changeset %s to %s\n') % (old, base))
2548
2548
2549 # Use version of files as in the old cset
2549 # Use version of files as in the old cset
2550 def filectxfn(repo, ctx_, path):
2550 def filectxfn(repo, ctx_, path):
2551 try:
2551 try:
2552 return old.filectx(path)
2552 return old.filectx(path)
2553 except KeyError:
2553 except KeyError:
2554 return None
2554 return None
2555
2555
2556 user = opts.get('user') or old.user()
2556 user = opts.get('user') or old.user()
2557 date = opts.get('date') or old.date()
2557 date = opts.get('date') or old.date()
2558 editform = mergeeditform(old, 'commit.amend')
2558 editform = mergeeditform(old, 'commit.amend')
2559 editor = getcommiteditor(editform=editform, **opts)
2559 editor = getcommiteditor(editform=editform, **opts)
2560 if not message:
2560 if not message:
2561 editor = getcommiteditor(edit=True, editform=editform)
2561 editor = getcommiteditor(edit=True, editform=editform)
2562 message = old.description()
2562 message = old.description()
2563
2563
2564 pureextra = extra.copy()
2564 pureextra = extra.copy()
2565 extra['amend_source'] = old.hex()
2565 extra['amend_source'] = old.hex()
2566
2566
2567 new = context.memctx(repo,
2567 new = context.memctx(repo,
2568 parents=[base.node(), old.p2().node()],
2568 parents=[base.node(), old.p2().node()],
2569 text=message,
2569 text=message,
2570 files=files,
2570 files=files,
2571 filectxfn=filectxfn,
2571 filectxfn=filectxfn,
2572 user=user,
2572 user=user,
2573 date=date,
2573 date=date,
2574 extra=extra,
2574 extra=extra,
2575 editor=editor)
2575 editor=editor)
2576
2576
2577 newdesc = changelog.stripdesc(new.description())
2577 newdesc = changelog.stripdesc(new.description())
2578 if ((not node)
2578 if ((not node)
2579 and newdesc == old.description()
2579 and newdesc == old.description()
2580 and user == old.user()
2580 and user == old.user()
2581 and date == old.date()
2581 and date == old.date()
2582 and pureextra == old.extra()):
2582 and pureextra == old.extra()):
2583 # nothing changed. continuing here would create a new node
2583 # nothing changed. continuing here would create a new node
2584 # anyway because of the amend_source noise.
2584 # anyway because of the amend_source noise.
2585 #
2585 #
2586 # This not what we expect from amend.
2586 # This not what we expect from amend.
2587 return old.node()
2587 return old.node()
2588
2588
2589 ph = repo.ui.config('phases', 'new-commit', phases.draft)
2589 ph = repo.ui.config('phases', 'new-commit', phases.draft)
2590 try:
2590 try:
2591 if opts.get('secret'):
2591 if opts.get('secret'):
2592 commitphase = 'secret'
2592 commitphase = 'secret'
2593 else:
2593 else:
2594 commitphase = old.phase()
2594 commitphase = old.phase()
2595 repo.ui.setconfig('phases', 'new-commit', commitphase, 'amend')
2595 repo.ui.setconfig('phases', 'new-commit', commitphase, 'amend')
2596 newid = repo.commitctx(new)
2596 newid = repo.commitctx(new)
2597 finally:
2597 finally:
2598 repo.ui.setconfig('phases', 'new-commit', ph, 'amend')
2598 repo.ui.setconfig('phases', 'new-commit', ph, 'amend')
2599 if newid != old.node():
2599 if newid != old.node():
2600 # Reroute the working copy parent to the new changeset
2600 # Reroute the working copy parent to the new changeset
2601 repo.setparents(newid, nullid)
2601 repo.setparents(newid, nullid)
2602
2602
2603 # Move bookmarks from old parent to amend commit
2603 # Move bookmarks from old parent to amend commit
2604 bms = repo.nodebookmarks(old.node())
2604 bms = repo.nodebookmarks(old.node())
2605 if bms:
2605 if bms:
2606 marks = repo._bookmarks
2606 marks = repo._bookmarks
2607 for bm in bms:
2607 for bm in bms:
2608 marks[bm] = newid
2608 marks[bm] = newid
2609 marks.write()
2609 marks.write()
2610 #commit the whole amend process
2610 #commit the whole amend process
2611 createmarkers = obsolete.isenabled(repo, obsolete.createmarkersopt)
2611 createmarkers = obsolete.isenabled(repo, obsolete.createmarkersopt)
2612 if createmarkers and newid != old.node():
2612 if createmarkers and newid != old.node():
2613 # mark the new changeset as successor of the rewritten one
2613 # mark the new changeset as successor of the rewritten one
2614 new = repo[newid]
2614 new = repo[newid]
2615 obs = [(old, (new,))]
2615 obs = [(old, (new,))]
2616 if node:
2616 if node:
2617 obs.append((ctx, ()))
2617 obs.append((ctx, ()))
2618
2618
2619 obsolete.createmarkers(repo, obs)
2619 obsolete.createmarkers(repo, obs)
2620 tr.close()
2620 tr.close()
2621 finally:
2621 finally:
2622 tr.release()
2622 tr.release()
2623 if not createmarkers and newid != old.node():
2623 if not createmarkers and newid != old.node():
2624 # Strip the intermediate commit (if there was one) and the amended
2624 # Strip the intermediate commit (if there was one) and the amended
2625 # commit
2625 # commit
2626 if node:
2626 if node:
2627 ui.note(_('stripping intermediate changeset %s\n') % ctx)
2627 ui.note(_('stripping intermediate changeset %s\n') % ctx)
2628 ui.note(_('stripping amended changeset %s\n') % old)
2628 ui.note(_('stripping amended changeset %s\n') % old)
2629 repair.strip(ui, repo, old.node(), topic='amend-backup')
2629 repair.strip(ui, repo, old.node(), topic='amend-backup')
2630 finally:
2630 finally:
2631 if newid is None:
2631 if newid is None:
2632 repo.dirstate.invalidate()
2632 repo.dirstate.invalidate()
2633 lockmod.release(lock, wlock)
2633 lockmod.release(lock, wlock)
2634 return newid
2634 return newid
2635
2635
2636 def commiteditor(repo, ctx, subs, editform=''):
2636 def commiteditor(repo, ctx, subs, editform=''):
2637 if ctx.description():
2637 if ctx.description():
2638 return ctx.description()
2638 return ctx.description()
2639 return commitforceeditor(repo, ctx, subs, editform=editform)
2639 return commitforceeditor(repo, ctx, subs, editform=editform)
2640
2640
2641 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
2641 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
2642 editform=''):
2642 editform=''):
2643 if not extramsg:
2643 if not extramsg:
2644 extramsg = _("Leave message empty to abort commit.")
2644 extramsg = _("Leave message empty to abort commit.")
2645
2645
2646 forms = [e for e in editform.split('.') if e]
2646 forms = [e for e in editform.split('.') if e]
2647 forms.insert(0, 'changeset')
2647 forms.insert(0, 'changeset')
2648 while forms:
2648 while forms:
2649 tmpl = repo.ui.config('committemplate', '.'.join(forms))
2649 tmpl = repo.ui.config('committemplate', '.'.join(forms))
2650 if tmpl:
2650 if tmpl:
2651 committext = buildcommittemplate(repo, ctx, subs, extramsg, tmpl)
2651 committext = buildcommittemplate(repo, ctx, subs, extramsg, tmpl)
2652 break
2652 break
2653 forms.pop()
2653 forms.pop()
2654 else:
2654 else:
2655 committext = buildcommittext(repo, ctx, subs, extramsg)
2655 committext = buildcommittext(repo, ctx, subs, extramsg)
2656
2656
2657 # run editor in the repository root
2657 # run editor in the repository root
2658 olddir = os.getcwd()
2658 olddir = os.getcwd()
2659 os.chdir(repo.root)
2659 os.chdir(repo.root)
2660 text = repo.ui.edit(committext, ctx.user(), ctx.extra(), editform=editform)
2660 text = repo.ui.edit(committext, ctx.user(), ctx.extra(), editform=editform)
2661 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
2661 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
2662 os.chdir(olddir)
2662 os.chdir(olddir)
2663
2663
2664 if finishdesc:
2664 if finishdesc:
2665 text = finishdesc(text)
2665 text = finishdesc(text)
2666 if not text.strip():
2666 if not text.strip():
2667 raise util.Abort(_("empty commit message"))
2667 raise util.Abort(_("empty commit message"))
2668
2668
2669 return text
2669 return text
2670
2670
2671 def buildcommittemplate(repo, ctx, subs, extramsg, tmpl):
2671 def buildcommittemplate(repo, ctx, subs, extramsg, tmpl):
2672 ui = repo.ui
2672 ui = repo.ui
2673 tmpl, mapfile = gettemplate(ui, tmpl, None)
2673 tmpl, mapfile = gettemplate(ui, tmpl, None)
2674
2674
2675 try:
2675 try:
2676 t = changeset_templater(ui, repo, None, {}, tmpl, mapfile, False)
2676 t = changeset_templater(ui, repo, None, {}, tmpl, mapfile, False)
2677 except SyntaxError, inst:
2677 except SyntaxError, inst:
2678 raise util.Abort(inst.args[0])
2678 raise util.Abort(inst.args[0])
2679
2679
2680 for k, v in repo.ui.configitems('committemplate'):
2680 for k, v in repo.ui.configitems('committemplate'):
2681 if k != 'changeset':
2681 if k != 'changeset':
2682 t.t.cache[k] = v
2682 t.t.cache[k] = v
2683
2683
2684 if not extramsg:
2684 if not extramsg:
2685 extramsg = '' # ensure that extramsg is string
2685 extramsg = '' # ensure that extramsg is string
2686
2686
2687 ui.pushbuffer()
2687 ui.pushbuffer()
2688 t.show(ctx, extramsg=extramsg)
2688 t.show(ctx, extramsg=extramsg)
2689 return ui.popbuffer()
2689 return ui.popbuffer()
2690
2690
2691 def buildcommittext(repo, ctx, subs, extramsg):
2691 def buildcommittext(repo, ctx, subs, extramsg):
2692 edittext = []
2692 edittext = []
2693 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
2693 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
2694 if ctx.description():
2694 if ctx.description():
2695 edittext.append(ctx.description())
2695 edittext.append(ctx.description())
2696 edittext.append("")
2696 edittext.append("")
2697 edittext.append("") # Empty line between message and comments.
2697 edittext.append("") # Empty line between message and comments.
2698 edittext.append(_("HG: Enter commit message."
2698 edittext.append(_("HG: Enter commit message."
2699 " Lines beginning with 'HG:' are removed."))
2699 " Lines beginning with 'HG:' are removed."))
2700 edittext.append("HG: %s" % extramsg)
2700 edittext.append("HG: %s" % extramsg)
2701 edittext.append("HG: --")
2701 edittext.append("HG: --")
2702 edittext.append(_("HG: user: %s") % ctx.user())
2702 edittext.append(_("HG: user: %s") % ctx.user())
2703 if ctx.p2():
2703 if ctx.p2():
2704 edittext.append(_("HG: branch merge"))
2704 edittext.append(_("HG: branch merge"))
2705 if ctx.branch():
2705 if ctx.branch():
2706 edittext.append(_("HG: branch '%s'") % ctx.branch())
2706 edittext.append(_("HG: branch '%s'") % ctx.branch())
2707 if bookmarks.iscurrent(repo):
2707 if bookmarks.iscurrent(repo):
2708 edittext.append(_("HG: bookmark '%s'") % repo._bookmarkcurrent)
2708 edittext.append(_("HG: bookmark '%s'") % repo._bookmarkcurrent)
2709 edittext.extend([_("HG: subrepo %s") % s for s in subs])
2709 edittext.extend([_("HG: subrepo %s") % s for s in subs])
2710 edittext.extend([_("HG: added %s") % f for f in added])
2710 edittext.extend([_("HG: added %s") % f for f in added])
2711 edittext.extend([_("HG: changed %s") % f for f in modified])
2711 edittext.extend([_("HG: changed %s") % f for f in modified])
2712 edittext.extend([_("HG: removed %s") % f for f in removed])
2712 edittext.extend([_("HG: removed %s") % f for f in removed])
2713 if not added and not modified and not removed:
2713 if not added and not modified and not removed:
2714 edittext.append(_("HG: no files changed"))
2714 edittext.append(_("HG: no files changed"))
2715 edittext.append("")
2715 edittext.append("")
2716
2716
2717 return "\n".join(edittext)
2717 return "\n".join(edittext)
2718
2718
2719 def commitstatus(repo, node, branch, bheads=None, opts={}):
2719 def commitstatus(repo, node, branch, bheads=None, opts={}):
2720 ctx = repo[node]
2720 ctx = repo[node]
2721 parents = ctx.parents()
2721 parents = ctx.parents()
2722
2722
2723 if (not opts.get('amend') and bheads and node not in bheads and not
2723 if (not opts.get('amend') and bheads and node not in bheads and not
2724 [x for x in parents if x.node() in bheads and x.branch() == branch]):
2724 [x for x in parents if x.node() in bheads and x.branch() == branch]):
2725 repo.ui.status(_('created new head\n'))
2725 repo.ui.status(_('created new head\n'))
2726 # The message is not printed for initial roots. For the other
2726 # The message is not printed for initial roots. For the other
2727 # changesets, it is printed in the following situations:
2727 # changesets, it is printed in the following situations:
2728 #
2728 #
2729 # Par column: for the 2 parents with ...
2729 # Par column: for the 2 parents with ...
2730 # N: null or no parent
2730 # N: null or no parent
2731 # B: parent is on another named branch
2731 # B: parent is on another named branch
2732 # C: parent is a regular non head changeset
2732 # C: parent is a regular non head changeset
2733 # H: parent was a branch head of the current branch
2733 # H: parent was a branch head of the current branch
2734 # Msg column: whether we print "created new head" message
2734 # Msg column: whether we print "created new head" message
2735 # In the following, it is assumed that there already exists some
2735 # In the following, it is assumed that there already exists some
2736 # initial branch heads of the current branch, otherwise nothing is
2736 # initial branch heads of the current branch, otherwise nothing is
2737 # printed anyway.
2737 # printed anyway.
2738 #
2738 #
2739 # Par Msg Comment
2739 # Par Msg Comment
2740 # N N y additional topo root
2740 # N N y additional topo root
2741 #
2741 #
2742 # B N y additional branch root
2742 # B N y additional branch root
2743 # C N y additional topo head
2743 # C N y additional topo head
2744 # H N n usual case
2744 # H N n usual case
2745 #
2745 #
2746 # B B y weird additional branch root
2746 # B B y weird additional branch root
2747 # C B y branch merge
2747 # C B y branch merge
2748 # H B n merge with named branch
2748 # H B n merge with named branch
2749 #
2749 #
2750 # C C y additional head from merge
2750 # C C y additional head from merge
2751 # C H n merge with a head
2751 # C H n merge with a head
2752 #
2752 #
2753 # H H n head merge: head count decreases
2753 # H H n head merge: head count decreases
2754
2754
2755 if not opts.get('close_branch'):
2755 if not opts.get('close_branch'):
2756 for r in parents:
2756 for r in parents:
2757 if r.closesbranch() and r.branch() == branch:
2757 if r.closesbranch() and r.branch() == branch:
2758 repo.ui.status(_('reopening closed branch head %d\n') % r)
2758 repo.ui.status(_('reopening closed branch head %d\n') % r)
2759
2759
2760 if repo.ui.debugflag:
2760 if repo.ui.debugflag:
2761 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
2761 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
2762 elif repo.ui.verbose:
2762 elif repo.ui.verbose:
2763 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
2763 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
2764
2764
2765 def revert(ui, repo, ctx, parents, *pats, **opts):
2765 def revert(ui, repo, ctx, parents, *pats, **opts):
2766 parent, p2 = parents
2766 parent, p2 = parents
2767 node = ctx.node()
2767 node = ctx.node()
2768
2768
2769 mf = ctx.manifest()
2769 mf = ctx.manifest()
2770 if node == p2:
2770 if node == p2:
2771 parent = p2
2771 parent = p2
2772 if node == parent:
2772 if node == parent:
2773 pmf = mf
2773 pmf = mf
2774 else:
2774 else:
2775 pmf = None
2775 pmf = None
2776
2776
2777 # need all matching names in dirstate and manifest of target rev,
2777 # need all matching names in dirstate and manifest of target rev,
2778 # so have to walk both. do not print errors if files exist in one
2778 # so have to walk both. do not print errors if files exist in one
2779 # but not other. in both cases, filesets should be evaluated against
2779 # but not other. in both cases, filesets should be evaluated against
2780 # workingctx to get consistent result (issue4497). this means 'set:**'
2780 # workingctx to get consistent result (issue4497). this means 'set:**'
2781 # cannot be used to select missing files from target rev.
2781 # cannot be used to select missing files from target rev.
2782
2782
2783 # `names` is a mapping for all elements in working copy and target revision
2783 # `names` is a mapping for all elements in working copy and target revision
2784 # The mapping is in the form:
2784 # The mapping is in the form:
2785 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
2785 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
2786 names = {}
2786 names = {}
2787
2787
2788 wlock = repo.wlock()
2788 wlock = repo.wlock()
2789 try:
2789 try:
2790 ## filling of the `names` mapping
2790 ## filling of the `names` mapping
2791 # walk dirstate to fill `names`
2791 # walk dirstate to fill `names`
2792
2792
2793 interactive = opts.get('interactive', False)
2793 interactive = opts.get('interactive', False)
2794 wctx = repo[None]
2794 wctx = repo[None]
2795 m = scmutil.match(wctx, pats, opts)
2795 m = scmutil.match(wctx, pats, opts)
2796
2796
2797 # we'll need this later
2797 # we'll need this later
2798 targetsubs = sorted(s for s in wctx.substate if m(s))
2798 targetsubs = sorted(s for s in wctx.substate if m(s))
2799
2799
2800 if not m.always():
2800 if not m.always():
2801 m.bad = lambda x, y: False
2801 m.bad = lambda x, y: False
2802 for abs in repo.walk(m):
2802 for abs in repo.walk(m):
2803 names[abs] = m.rel(abs), m.exact(abs)
2803 names[abs] = m.rel(abs), m.exact(abs)
2804
2804
2805 # walk target manifest to fill `names`
2805 # walk target manifest to fill `names`
2806
2806
2807 def badfn(path, msg):
2807 def badfn(path, msg):
2808 if path in names:
2808 if path in names:
2809 return
2809 return
2810 if path in ctx.substate:
2810 if path in ctx.substate:
2811 return
2811 return
2812 path_ = path + '/'
2812 path_ = path + '/'
2813 for f in names:
2813 for f in names:
2814 if f.startswith(path_):
2814 if f.startswith(path_):
2815 return
2815 return
2816 ui.warn("%s: %s\n" % (m.rel(path), msg))
2816 ui.warn("%s: %s\n" % (m.rel(path), msg))
2817
2817
2818 m.bad = badfn
2818 m.bad = badfn
2819 for abs in ctx.walk(m):
2819 for abs in ctx.walk(m):
2820 if abs not in names:
2820 if abs not in names:
2821 names[abs] = m.rel(abs), m.exact(abs)
2821 names[abs] = m.rel(abs), m.exact(abs)
2822
2822
2823 # Find status of all file in `names`.
2823 # Find status of all file in `names`.
2824 m = scmutil.matchfiles(repo, names)
2824 m = scmutil.matchfiles(repo, names)
2825
2825
2826 changes = repo.status(node1=node, match=m,
2826 changes = repo.status(node1=node, match=m,
2827 unknown=True, ignored=True, clean=True)
2827 unknown=True, ignored=True, clean=True)
2828 else:
2828 else:
2829 changes = repo.status(node1=node, match=m)
2829 changes = repo.status(node1=node, match=m)
2830 for kind in changes:
2830 for kind in changes:
2831 for abs in kind:
2831 for abs in kind:
2832 names[abs] = m.rel(abs), m.exact(abs)
2832 names[abs] = m.rel(abs), m.exact(abs)
2833
2833
2834 m = scmutil.matchfiles(repo, names)
2834 m = scmutil.matchfiles(repo, names)
2835
2835
2836 modified = set(changes.modified)
2836 modified = set(changes.modified)
2837 added = set(changes.added)
2837 added = set(changes.added)
2838 removed = set(changes.removed)
2838 removed = set(changes.removed)
2839 _deleted = set(changes.deleted)
2839 _deleted = set(changes.deleted)
2840 unknown = set(changes.unknown)
2840 unknown = set(changes.unknown)
2841 unknown.update(changes.ignored)
2841 unknown.update(changes.ignored)
2842 clean = set(changes.clean)
2842 clean = set(changes.clean)
2843 modadded = set()
2843 modadded = set()
2844
2844
2845 # split between files known in target manifest and the others
2845 # split between files known in target manifest and the others
2846 smf = set(mf)
2846 smf = set(mf)
2847
2847
2848 # determine the exact nature of the deleted changesets
2848 # determine the exact nature of the deleted changesets
2849 deladded = _deleted - smf
2849 deladded = _deleted - smf
2850 deleted = _deleted - deladded
2850 deleted = _deleted - deladded
2851
2851
2852 # We need to account for the state of the file in the dirstate,
2852 # We need to account for the state of the file in the dirstate,
2853 # even when we revert against something else than parent. This will
2853 # even when we revert against something else than parent. This will
2854 # slightly alter the behavior of revert (doing back up or not, delete
2854 # slightly alter the behavior of revert (doing back up or not, delete
2855 # or just forget etc).
2855 # or just forget etc).
2856 if parent == node:
2856 if parent == node:
2857 dsmodified = modified
2857 dsmodified = modified
2858 dsadded = added
2858 dsadded = added
2859 dsremoved = removed
2859 dsremoved = removed
2860 # store all local modifications, useful later for rename detection
2860 # store all local modifications, useful later for rename detection
2861 localchanges = dsmodified | dsadded
2861 localchanges = dsmodified | dsadded
2862 modified, added, removed = set(), set(), set()
2862 modified, added, removed = set(), set(), set()
2863 else:
2863 else:
2864 changes = repo.status(node1=parent, match=m)
2864 changes = repo.status(node1=parent, match=m)
2865 dsmodified = set(changes.modified)
2865 dsmodified = set(changes.modified)
2866 dsadded = set(changes.added)
2866 dsadded = set(changes.added)
2867 dsremoved = set(changes.removed)
2867 dsremoved = set(changes.removed)
2868 # store all local modifications, useful later for rename detection
2868 # store all local modifications, useful later for rename detection
2869 localchanges = dsmodified | dsadded
2869 localchanges = dsmodified | dsadded
2870
2870
2871 # only take into account for removes between wc and target
2871 # only take into account for removes between wc and target
2872 clean |= dsremoved - removed
2872 clean |= dsremoved - removed
2873 dsremoved &= removed
2873 dsremoved &= removed
2874 # distinct between dirstate remove and other
2874 # distinct between dirstate remove and other
2875 removed -= dsremoved
2875 removed -= dsremoved
2876
2876
2877 modadded = added & dsmodified
2877 modadded = added & dsmodified
2878 added -= modadded
2878 added -= modadded
2879
2879
2880 # tell newly modified apart.
2880 # tell newly modified apart.
2881 dsmodified &= modified
2881 dsmodified &= modified
2882 dsmodified |= modified & dsadded # dirstate added may needs backup
2882 dsmodified |= modified & dsadded # dirstate added may needs backup
2883 modified -= dsmodified
2883 modified -= dsmodified
2884
2884
2885 # We need to wait for some post-processing to update this set
2885 # We need to wait for some post-processing to update this set
2886 # before making the distinction. The dirstate will be used for
2886 # before making the distinction. The dirstate will be used for
2887 # that purpose.
2887 # that purpose.
2888 dsadded = added
2888 dsadded = added
2889
2889
2890 # in case of merge, files that are actually added can be reported as
2890 # in case of merge, files that are actually added can be reported as
2891 # modified, we need to post process the result
2891 # modified, we need to post process the result
2892 if p2 != nullid:
2892 if p2 != nullid:
2893 if pmf is None:
2893 if pmf is None:
2894 # only need parent manifest in the merge case,
2894 # only need parent manifest in the merge case,
2895 # so do not read by default
2895 # so do not read by default
2896 pmf = repo[parent].manifest()
2896 pmf = repo[parent].manifest()
2897 mergeadd = dsmodified - set(pmf)
2897 mergeadd = dsmodified - set(pmf)
2898 dsadded |= mergeadd
2898 dsadded |= mergeadd
2899 dsmodified -= mergeadd
2899 dsmodified -= mergeadd
2900
2900
2901 # if f is a rename, update `names` to also revert the source
2901 # if f is a rename, update `names` to also revert the source
2902 cwd = repo.getcwd()
2902 cwd = repo.getcwd()
2903 for f in localchanges:
2903 for f in localchanges:
2904 src = repo.dirstate.copied(f)
2904 src = repo.dirstate.copied(f)
2905 # XXX should we check for rename down to target node?
2905 # XXX should we check for rename down to target node?
2906 if src and src not in names and repo.dirstate[src] == 'r':
2906 if src and src not in names and repo.dirstate[src] == 'r':
2907 dsremoved.add(src)
2907 dsremoved.add(src)
2908 names[src] = (repo.pathto(src, cwd), True)
2908 names[src] = (repo.pathto(src, cwd), True)
2909
2909
2910 # distinguish between file to forget and the other
2910 # distinguish between file to forget and the other
2911 added = set()
2911 added = set()
2912 for abs in dsadded:
2912 for abs in dsadded:
2913 if repo.dirstate[abs] != 'a':
2913 if repo.dirstate[abs] != 'a':
2914 added.add(abs)
2914 added.add(abs)
2915 dsadded -= added
2915 dsadded -= added
2916
2916
2917 for abs in deladded:
2917 for abs in deladded:
2918 if repo.dirstate[abs] == 'a':
2918 if repo.dirstate[abs] == 'a':
2919 dsadded.add(abs)
2919 dsadded.add(abs)
2920 deladded -= dsadded
2920 deladded -= dsadded
2921
2921
2922 # For files marked as removed, we check if an unknown file is present at
2922 # For files marked as removed, we check if an unknown file is present at
2923 # the same path. If a such file exists it may need to be backed up.
2923 # the same path. If a such file exists it may need to be backed up.
2924 # Making the distinction at this stage helps have simpler backup
2924 # Making the distinction at this stage helps have simpler backup
2925 # logic.
2925 # logic.
2926 removunk = set()
2926 removunk = set()
2927 for abs in removed:
2927 for abs in removed:
2928 target = repo.wjoin(abs)
2928 target = repo.wjoin(abs)
2929 if os.path.lexists(target):
2929 if os.path.lexists(target):
2930 removunk.add(abs)
2930 removunk.add(abs)
2931 removed -= removunk
2931 removed -= removunk
2932
2932
2933 dsremovunk = set()
2933 dsremovunk = set()
2934 for abs in dsremoved:
2934 for abs in dsremoved:
2935 target = repo.wjoin(abs)
2935 target = repo.wjoin(abs)
2936 if os.path.lexists(target):
2936 if os.path.lexists(target):
2937 dsremovunk.add(abs)
2937 dsremovunk.add(abs)
2938 dsremoved -= dsremovunk
2938 dsremoved -= dsremovunk
2939
2939
2940 # action to be actually performed by revert
2940 # action to be actually performed by revert
2941 # (<list of file>, message>) tuple
2941 # (<list of file>, message>) tuple
2942 actions = {'revert': ([], _('reverting %s\n')),
2942 actions = {'revert': ([], _('reverting %s\n')),
2943 'add': ([], _('adding %s\n')),
2943 'add': ([], _('adding %s\n')),
2944 'remove': ([], _('removing %s\n')),
2944 'remove': ([], _('removing %s\n')),
2945 'drop': ([], _('removing %s\n')),
2945 'drop': ([], _('removing %s\n')),
2946 'forget': ([], _('forgetting %s\n')),
2946 'forget': ([], _('forgetting %s\n')),
2947 'undelete': ([], _('undeleting %s\n')),
2947 'undelete': ([], _('undeleting %s\n')),
2948 'noop': (None, _('no changes needed to %s\n')),
2948 'noop': (None, _('no changes needed to %s\n')),
2949 'unknown': (None, _('file not managed: %s\n')),
2949 'unknown': (None, _('file not managed: %s\n')),
2950 }
2950 }
2951
2951
2952 # "constant" that convey the backup strategy.
2952 # "constant" that convey the backup strategy.
2953 # All set to `discard` if `no-backup` is set do avoid checking
2953 # All set to `discard` if `no-backup` is set do avoid checking
2954 # no_backup lower in the code.
2954 # no_backup lower in the code.
2955 # These values are ordered for comparison purposes
2955 # These values are ordered for comparison purposes
2956 backup = 2 # unconditionally do backup
2956 backup = 2 # unconditionally do backup
2957 check = 1 # check if the existing file differs from target
2957 check = 1 # check if the existing file differs from target
2958 discard = 0 # never do backup
2958 discard = 0 # never do backup
2959 if opts.get('no_backup'):
2959 if opts.get('no_backup'):
2960 backup = check = discard
2960 backup = check = discard
2961
2961
2962 backupanddel = actions['remove']
2962 backupanddel = actions['remove']
2963 if not opts.get('no_backup'):
2963 if not opts.get('no_backup'):
2964 backupanddel = actions['drop']
2964 backupanddel = actions['drop']
2965
2965
2966 disptable = (
2966 disptable = (
2967 # dispatch table:
2967 # dispatch table:
2968 # file state
2968 # file state
2969 # action
2969 # action
2970 # make backup
2970 # make backup
2971
2971
2972 ## Sets that results that will change file on disk
2972 ## Sets that results that will change file on disk
2973 # Modified compared to target, no local change
2973 # Modified compared to target, no local change
2974 (modified, actions['revert'], discard),
2974 (modified, actions['revert'], discard),
2975 # Modified compared to target, but local file is deleted
2975 # Modified compared to target, but local file is deleted
2976 (deleted, actions['revert'], discard),
2976 (deleted, actions['revert'], discard),
2977 # Modified compared to target, local change
2977 # Modified compared to target, local change
2978 (dsmodified, actions['revert'], backup),
2978 (dsmodified, actions['revert'], backup),
2979 # Added since target
2979 # Added since target
2980 (added, actions['remove'], discard),
2980 (added, actions['remove'], discard),
2981 # Added in working directory
2981 # Added in working directory
2982 (dsadded, actions['forget'], discard),
2982 (dsadded, actions['forget'], discard),
2983 # Added since target, have local modification
2983 # Added since target, have local modification
2984 (modadded, backupanddel, backup),
2984 (modadded, backupanddel, backup),
2985 # Added since target but file is missing in working directory
2985 # Added since target but file is missing in working directory
2986 (deladded, actions['drop'], discard),
2986 (deladded, actions['drop'], discard),
2987 # Removed since target, before working copy parent
2987 # Removed since target, before working copy parent
2988 (removed, actions['add'], discard),
2988 (removed, actions['add'], discard),
2989 # Same as `removed` but an unknown file exists at the same path
2989 # Same as `removed` but an unknown file exists at the same path
2990 (removunk, actions['add'], check),
2990 (removunk, actions['add'], check),
2991 # Removed since targe, marked as such in working copy parent
2991 # Removed since targe, marked as such in working copy parent
2992 (dsremoved, actions['undelete'], discard),
2992 (dsremoved, actions['undelete'], discard),
2993 # Same as `dsremoved` but an unknown file exists at the same path
2993 # Same as `dsremoved` but an unknown file exists at the same path
2994 (dsremovunk, actions['undelete'], check),
2994 (dsremovunk, actions['undelete'], check),
2995 ## the following sets does not result in any file changes
2995 ## the following sets does not result in any file changes
2996 # File with no modification
2996 # File with no modification
2997 (clean, actions['noop'], discard),
2997 (clean, actions['noop'], discard),
2998 # Existing file, not tracked anywhere
2998 # Existing file, not tracked anywhere
2999 (unknown, actions['unknown'], discard),
2999 (unknown, actions['unknown'], discard),
3000 )
3000 )
3001
3001
3002 for abs, (rel, exact) in sorted(names.items()):
3002 for abs, (rel, exact) in sorted(names.items()):
3003 # target file to be touch on disk (relative to cwd)
3003 # target file to be touch on disk (relative to cwd)
3004 target = repo.wjoin(abs)
3004 target = repo.wjoin(abs)
3005 # search the entry in the dispatch table.
3005 # search the entry in the dispatch table.
3006 # if the file is in any of these sets, it was touched in the working
3006 # if the file is in any of these sets, it was touched in the working
3007 # directory parent and we are sure it needs to be reverted.
3007 # directory parent and we are sure it needs to be reverted.
3008 for table, (xlist, msg), dobackup in disptable:
3008 for table, (xlist, msg), dobackup in disptable:
3009 if abs not in table:
3009 if abs not in table:
3010 continue
3010 continue
3011 if xlist is not None:
3011 if xlist is not None:
3012 xlist.append(abs)
3012 xlist.append(abs)
3013 if dobackup and (backup <= dobackup
3013 if dobackup and (backup <= dobackup
3014 or wctx[abs].cmp(ctx[abs])):
3014 or wctx[abs].cmp(ctx[abs])):
3015 bakname = "%s.orig" % rel
3015 bakname = "%s.orig" % rel
3016 ui.note(_('saving current version of %s as %s\n') %
3016 ui.note(_('saving current version of %s as %s\n') %
3017 (rel, bakname))
3017 (rel, bakname))
3018 if not opts.get('dry_run'):
3018 if not opts.get('dry_run'):
3019 if interactive:
3019 if interactive:
3020 util.copyfile(target, bakname)
3020 util.copyfile(target, bakname)
3021 else:
3021 else:
3022 util.rename(target, bakname)
3022 util.rename(target, bakname)
3023 if ui.verbose or not exact:
3023 if ui.verbose or not exact:
3024 if not isinstance(msg, basestring):
3024 if not isinstance(msg, basestring):
3025 msg = msg(abs)
3025 msg = msg(abs)
3026 ui.status(msg % rel)
3026 ui.status(msg % rel)
3027 elif exact:
3027 elif exact:
3028 ui.warn(msg % rel)
3028 ui.warn(msg % rel)
3029 break
3029 break
3030
3030
3031 if not opts.get('dry_run'):
3031 if not opts.get('dry_run'):
3032 needdata = ('revert', 'add', 'undelete')
3032 needdata = ('revert', 'add', 'undelete')
3033 _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata])
3033 _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata])
3034 _performrevert(repo, parents, ctx, actions, interactive)
3034 _performrevert(repo, parents, ctx, actions, interactive)
3035
3035
3036 if targetsubs:
3036 if targetsubs:
3037 # Revert the subrepos on the revert list
3037 # Revert the subrepos on the revert list
3038 for sub in targetsubs:
3038 for sub in targetsubs:
3039 try:
3039 try:
3040 wctx.sub(sub).revert(ctx.substate[sub], *pats, **opts)
3040 wctx.sub(sub).revert(ctx.substate[sub], *pats, **opts)
3041 except KeyError:
3041 except KeyError:
3042 raise util.Abort("subrepository '%s' does not exist in %s!"
3042 raise util.Abort("subrepository '%s' does not exist in %s!"
3043 % (sub, short(ctx.node())))
3043 % (sub, short(ctx.node())))
3044 finally:
3044 finally:
3045 wlock.release()
3045 wlock.release()
3046
3046
3047 def _revertprefetch(repo, ctx, *files):
3047 def _revertprefetch(repo, ctx, *files):
3048 """Let extension changing the storage layer prefetch content"""
3048 """Let extension changing the storage layer prefetch content"""
3049 pass
3049 pass
3050
3050
3051 def _performrevert(repo, parents, ctx, actions, interactive=False):
3051 def _performrevert(repo, parents, ctx, actions, interactive=False):
3052 """function that actually perform all the actions computed for revert
3052 """function that actually perform all the actions computed for revert
3053
3053
3054 This is an independent function to let extension to plug in and react to
3054 This is an independent function to let extension to plug in and react to
3055 the imminent revert.
3055 the imminent revert.
3056
3056
3057 Make sure you have the working directory locked when calling this function.
3057 Make sure you have the working directory locked when calling this function.
3058 """
3058 """
3059 parent, p2 = parents
3059 parent, p2 = parents
3060 node = ctx.node()
3060 node = ctx.node()
3061 def checkout(f):
3061 def checkout(f):
3062 fc = ctx[f]
3062 fc = ctx[f]
3063 repo.wwrite(f, fc.data(), fc.flags())
3063 repo.wwrite(f, fc.data(), fc.flags())
3064
3064
3065 audit_path = pathutil.pathauditor(repo.root)
3065 audit_path = pathutil.pathauditor(repo.root)
3066 for f in actions['forget'][0]:
3066 for f in actions['forget'][0]:
3067 repo.dirstate.drop(f)
3067 repo.dirstate.drop(f)
3068 for f in actions['remove'][0]:
3068 for f in actions['remove'][0]:
3069 audit_path(f)
3069 audit_path(f)
3070 util.unlinkpath(repo.wjoin(f))
3070 util.unlinkpath(repo.wjoin(f))
3071 repo.dirstate.remove(f)
3071 repo.dirstate.remove(f)
3072 for f in actions['drop'][0]:
3072 for f in actions['drop'][0]:
3073 audit_path(f)
3073 audit_path(f)
3074 repo.dirstate.remove(f)
3074 repo.dirstate.remove(f)
3075
3075
3076 normal = None
3076 normal = None
3077 if node == parent:
3077 if node == parent:
3078 # We're reverting to our parent. If possible, we'd like status
3078 # We're reverting to our parent. If possible, we'd like status
3079 # to report the file as clean. We have to use normallookup for
3079 # to report the file as clean. We have to use normallookup for
3080 # merges to avoid losing information about merged/dirty files.
3080 # merges to avoid losing information about merged/dirty files.
3081 if p2 != nullid:
3081 if p2 != nullid:
3082 normal = repo.dirstate.normallookup
3082 normal = repo.dirstate.normallookup
3083 else:
3083 else:
3084 normal = repo.dirstate.normal
3084 normal = repo.dirstate.normal
3085
3085
3086 if interactive:
3086 if interactive:
3087 # Prompt the user for changes to revert
3087 # Prompt the user for changes to revert
3088 torevert = [repo.wjoin(f) for f in actions['revert'][0]]
3088 torevert = [repo.wjoin(f) for f in actions['revert'][0]]
3089 m = scmutil.match(ctx, torevert, {})
3089 m = scmutil.match(ctx, torevert, {})
3090 diff = patch.diff(repo, None, ctx.node(), m)
3090 diff = patch.diff(repo, None, ctx.node(), m)
3091 originalchunks = patch.parsepatch(diff)
3091 originalchunks = patch.parsepatch(diff)
3092 try:
3092 try:
3093 chunks = recordfilter(repo.ui, originalchunks)
3093 chunks = recordfilter(repo.ui, originalchunks)
3094 except patch.PatchError, err:
3094 except patch.PatchError, err:
3095 raise util.Abort(_('error parsing patch: %s') % err)
3095 raise util.Abort(_('error parsing patch: %s') % err)
3096
3096
3097 # Apply changes
3097 # Apply changes
3098 fp = cStringIO.StringIO()
3098 fp = cStringIO.StringIO()
3099 for c in chunks:
3099 for c in chunks:
3100 c.write(fp)
3100 c.write(fp)
3101 dopatch = fp.tell()
3101 dopatch = fp.tell()
3102 fp.seek(0)
3102 fp.seek(0)
3103 if dopatch:
3103 if dopatch:
3104 try:
3104 try:
3105 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3105 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3106 except patch.PatchError, err:
3106 except patch.PatchError, err:
3107 raise util.Abort(str(err))
3107 raise util.Abort(str(err))
3108 del fp
3108 del fp
3109
3109
3110 for f in actions['revert'][0]:
3110 for f in actions['revert'][0]:
3111 if normal:
3111 if normal:
3112 normal(f)
3112 normal(f)
3113
3113
3114 else:
3114 else:
3115 for f in actions['revert'][0]:
3115 for f in actions['revert'][0]:
3116 checkout(f)
3116 checkout(f)
3117 if normal:
3117 if normal:
3118 normal(f)
3118 normal(f)
3119
3119
3120 for f in actions['add'][0]:
3120 for f in actions['add'][0]:
3121 checkout(f)
3121 checkout(f)
3122 repo.dirstate.add(f)
3122 repo.dirstate.add(f)
3123
3123
3124 normal = repo.dirstate.normallookup
3124 normal = repo.dirstate.normallookup
3125 if node == parent and p2 == nullid:
3125 if node == parent and p2 == nullid:
3126 normal = repo.dirstate.normal
3126 normal = repo.dirstate.normal
3127 for f in actions['undelete'][0]:
3127 for f in actions['undelete'][0]:
3128 checkout(f)
3128 checkout(f)
3129 normal(f)
3129 normal(f)
3130
3130
3131 copied = copies.pathcopies(repo[parent], ctx)
3131 copied = copies.pathcopies(repo[parent], ctx)
3132
3132
3133 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3133 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3134 if f in copied:
3134 if f in copied:
3135 repo.dirstate.copy(copied[f], f)
3135 repo.dirstate.copy(copied[f], f)
3136
3136
3137 def command(table):
3137 def command(table):
3138 """Returns a function object to be used as a decorator for making commands.
3138 """Returns a function object to be used as a decorator for making commands.
3139
3139
3140 This function receives a command table as its argument. The table should
3140 This function receives a command table as its argument. The table should
3141 be a dict.
3141 be a dict.
3142
3142
3143 The returned function can be used as a decorator for adding commands
3143 The returned function can be used as a decorator for adding commands
3144 to that command table. This function accepts multiple arguments to define
3144 to that command table. This function accepts multiple arguments to define
3145 a command.
3145 a command.
3146
3146
3147 The first argument is the command name.
3147 The first argument is the command name.
3148
3148
3149 The options argument is an iterable of tuples defining command arguments.
3149 The options argument is an iterable of tuples defining command arguments.
3150 See ``mercurial.fancyopts.fancyopts()`` for the format of each tuple.
3150 See ``mercurial.fancyopts.fancyopts()`` for the format of each tuple.
3151
3151
3152 The synopsis argument defines a short, one line summary of how to use the
3152 The synopsis argument defines a short, one line summary of how to use the
3153 command. This shows up in the help output.
3153 command. This shows up in the help output.
3154
3154
3155 The norepo argument defines whether the command does not require a
3155 The norepo argument defines whether the command does not require a
3156 local repository. Most commands operate against a repository, thus the
3156 local repository. Most commands operate against a repository, thus the
3157 default is False.
3157 default is False.
3158
3158
3159 The optionalrepo argument defines whether the command optionally requires
3159 The optionalrepo argument defines whether the command optionally requires
3160 a local repository.
3160 a local repository.
3161
3161
3162 The inferrepo argument defines whether to try to find a repository from the
3162 The inferrepo argument defines whether to try to find a repository from the
3163 command line arguments. If True, arguments will be examined for potential
3163 command line arguments. If True, arguments will be examined for potential
3164 repository locations. See ``findrepo()``. If a repository is found, it
3164 repository locations. See ``findrepo()``. If a repository is found, it
3165 will be used.
3165 will be used.
3166 """
3166 """
3167 def cmd(name, options=(), synopsis=None, norepo=False, optionalrepo=False,
3167 def cmd(name, options=(), synopsis=None, norepo=False, optionalrepo=False,
3168 inferrepo=False):
3168 inferrepo=False):
3169 def decorator(func):
3169 def decorator(func):
3170 if synopsis:
3170 if synopsis:
3171 table[name] = func, list(options), synopsis
3171 table[name] = func, list(options), synopsis
3172 else:
3172 else:
3173 table[name] = func, list(options)
3173 table[name] = func, list(options)
3174
3174
3175 if norepo:
3175 if norepo:
3176 # Avoid import cycle.
3176 # Avoid import cycle.
3177 import commands
3177 import commands
3178 commands.norepo += ' %s' % ' '.join(parsealiases(name))
3178 commands.norepo += ' %s' % ' '.join(parsealiases(name))
3179
3179
3180 if optionalrepo:
3180 if optionalrepo:
3181 import commands
3181 import commands
3182 commands.optionalrepo += ' %s' % ' '.join(parsealiases(name))
3182 commands.optionalrepo += ' %s' % ' '.join(parsealiases(name))
3183
3183
3184 if inferrepo:
3184 if inferrepo:
3185 import commands
3185 import commands
3186 commands.inferrepo += ' %s' % ' '.join(parsealiases(name))
3186 commands.inferrepo += ' %s' % ' '.join(parsealiases(name))
3187
3187
3188 return func
3188 return func
3189 return decorator
3189 return decorator
3190
3190
3191 return cmd
3191 return cmd
3192
3192
3193 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3193 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3194 # commands.outgoing. "missing" is "missing" of the result of
3194 # commands.outgoing. "missing" is "missing" of the result of
3195 # "findcommonoutgoing()"
3195 # "findcommonoutgoing()"
3196 outgoinghooks = util.hooks()
3196 outgoinghooks = util.hooks()
3197
3197
3198 # a list of (ui, repo) functions called by commands.summary
3198 # a list of (ui, repo) functions called by commands.summary
3199 summaryhooks = util.hooks()
3199 summaryhooks = util.hooks()
3200
3200
3201 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3201 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3202 #
3202 #
3203 # functions should return tuple of booleans below, if 'changes' is None:
3203 # functions should return tuple of booleans below, if 'changes' is None:
3204 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3204 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3205 #
3205 #
3206 # otherwise, 'changes' is a tuple of tuples below:
3206 # otherwise, 'changes' is a tuple of tuples below:
3207 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3207 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3208 # - (desturl, destbranch, destpeer, outgoing)
3208 # - (desturl, destbranch, destpeer, outgoing)
3209 summaryremotehooks = util.hooks()
3209 summaryremotehooks = util.hooks()
3210
3210
3211 # A list of state files kept by multistep operations like graft.
3211 # A list of state files kept by multistep operations like graft.
3212 # Since graft cannot be aborted, it is considered 'clearable' by update.
3212 # Since graft cannot be aborted, it is considered 'clearable' by update.
3213 # note: bisect is intentionally excluded
3213 # note: bisect is intentionally excluded
3214 # (state file, clearable, allowcommit, error, hint)
3214 # (state file, clearable, allowcommit, error, hint)
3215 unfinishedstates = [
3215 unfinishedstates = [
3216 ('graftstate', True, False, _('graft in progress'),
3216 ('graftstate', True, False, _('graft in progress'),
3217 _("use 'hg graft --continue' or 'hg update' to abort")),
3217 _("use 'hg graft --continue' or 'hg update' to abort")),
3218 ('updatestate', True, False, _('last update was interrupted'),
3218 ('updatestate', True, False, _('last update was interrupted'),
3219 _("use 'hg update' to get a consistent checkout"))
3219 _("use 'hg update' to get a consistent checkout"))
3220 ]
3220 ]
3221
3221
3222 def checkunfinished(repo, commit=False):
3222 def checkunfinished(repo, commit=False):
3223 '''Look for an unfinished multistep operation, like graft, and abort
3223 '''Look for an unfinished multistep operation, like graft, and abort
3224 if found. It's probably good to check this right before
3224 if found. It's probably good to check this right before
3225 bailifchanged().
3225 bailifchanged().
3226 '''
3226 '''
3227 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3227 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3228 if commit and allowcommit:
3228 if commit and allowcommit:
3229 continue
3229 continue
3230 if repo.vfs.exists(f):
3230 if repo.vfs.exists(f):
3231 raise util.Abort(msg, hint=hint)
3231 raise util.Abort(msg, hint=hint)
3232
3232
3233 def clearunfinished(repo):
3233 def clearunfinished(repo):
3234 '''Check for unfinished operations (as above), and clear the ones
3234 '''Check for unfinished operations (as above), and clear the ones
3235 that are clearable.
3235 that are clearable.
3236 '''
3236 '''
3237 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3237 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3238 if not clearable and repo.vfs.exists(f):
3238 if not clearable and repo.vfs.exists(f):
3239 raise util.Abort(msg, hint=hint)
3239 raise util.Abort(msg, hint=hint)
3240 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3240 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3241 if clearable and repo.vfs.exists(f):
3241 if clearable and repo.vfs.exists(f):
3242 util.unlink(repo.join(f))
3242 util.unlink(repo.join(f))
General Comments 0
You need to be logged in to leave comments. Login now