##// END OF EJS Templates
forget: use vfs instead of os.path + match.rel() for filesystem checks
Matt Harbison -
r23673:69cd91d0 default
parent child Browse files
Show More
@@ -1,1302 +1,1302 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10
10
11 import os
11 import os
12 import copy
12 import copy
13
13
14 from mercurial import hg, util, cmdutil, scmutil, match as match_, \
14 from mercurial import hg, util, cmdutil, scmutil, match as match_, \
15 archival, pathutil, revset
15 archival, pathutil, revset
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 from mercurial.node import hex
17 from mercurial.node import hex
18
18
19 import lfutil
19 import lfutil
20 import lfcommands
20 import lfcommands
21 import basestore
21 import basestore
22
22
23 # -- Utility functions: commonly/repeatedly needed functionality ---------------
23 # -- Utility functions: commonly/repeatedly needed functionality ---------------
24
24
25 def composelargefilematcher(match, manifest):
25 def composelargefilematcher(match, manifest):
26 '''create a matcher that matches only the largefiles in the original
26 '''create a matcher that matches only the largefiles in the original
27 matcher'''
27 matcher'''
28 m = copy.copy(match)
28 m = copy.copy(match)
29 lfile = lambda f: lfutil.standin(f) in manifest
29 lfile = lambda f: lfutil.standin(f) in manifest
30 m._files = filter(lfile, m._files)
30 m._files = filter(lfile, m._files)
31 m._fmap = set(m._files)
31 m._fmap = set(m._files)
32 m._always = False
32 m._always = False
33 origmatchfn = m.matchfn
33 origmatchfn = m.matchfn
34 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
34 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
35 return m
35 return m
36
36
37 def composenormalfilematcher(match, manifest):
37 def composenormalfilematcher(match, manifest):
38 m = copy.copy(match)
38 m = copy.copy(match)
39 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
39 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
40 manifest)
40 manifest)
41 m._files = filter(notlfile, m._files)
41 m._files = filter(notlfile, m._files)
42 m._fmap = set(m._files)
42 m._fmap = set(m._files)
43 m._always = False
43 m._always = False
44 origmatchfn = m.matchfn
44 origmatchfn = m.matchfn
45 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
45 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
46 return m
46 return m
47
47
48 def installnormalfilesmatchfn(manifest):
48 def installnormalfilesmatchfn(manifest):
49 '''installmatchfn with a matchfn that ignores all largefiles'''
49 '''installmatchfn with a matchfn that ignores all largefiles'''
50 def overridematch(ctx, pats=[], opts={}, globbed=False,
50 def overridematch(ctx, pats=[], opts={}, globbed=False,
51 default='relpath'):
51 default='relpath'):
52 match = oldmatch(ctx, pats, opts, globbed, default)
52 match = oldmatch(ctx, pats, opts, globbed, default)
53 return composenormalfilematcher(match, manifest)
53 return composenormalfilematcher(match, manifest)
54 oldmatch = installmatchfn(overridematch)
54 oldmatch = installmatchfn(overridematch)
55
55
56 def installmatchfn(f):
56 def installmatchfn(f):
57 '''monkey patch the scmutil module with a custom match function.
57 '''monkey patch the scmutil module with a custom match function.
58 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
58 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
59 oldmatch = scmutil.match
59 oldmatch = scmutil.match
60 setattr(f, 'oldmatch', oldmatch)
60 setattr(f, 'oldmatch', oldmatch)
61 scmutil.match = f
61 scmutil.match = f
62 return oldmatch
62 return oldmatch
63
63
64 def restorematchfn():
64 def restorematchfn():
65 '''restores scmutil.match to what it was before installmatchfn
65 '''restores scmutil.match to what it was before installmatchfn
66 was called. no-op if scmutil.match is its original function.
66 was called. no-op if scmutil.match is its original function.
67
67
68 Note that n calls to installmatchfn will require n calls to
68 Note that n calls to installmatchfn will require n calls to
69 restore the original matchfn.'''
69 restore the original matchfn.'''
70 scmutil.match = getattr(scmutil.match, 'oldmatch')
70 scmutil.match = getattr(scmutil.match, 'oldmatch')
71
71
72 def installmatchandpatsfn(f):
72 def installmatchandpatsfn(f):
73 oldmatchandpats = scmutil.matchandpats
73 oldmatchandpats = scmutil.matchandpats
74 setattr(f, 'oldmatchandpats', oldmatchandpats)
74 setattr(f, 'oldmatchandpats', oldmatchandpats)
75 scmutil.matchandpats = f
75 scmutil.matchandpats = f
76 return oldmatchandpats
76 return oldmatchandpats
77
77
78 def restorematchandpatsfn():
78 def restorematchandpatsfn():
79 '''restores scmutil.matchandpats to what it was before
79 '''restores scmutil.matchandpats to what it was before
80 installmatchandpatsfn was called. No-op if scmutil.matchandpats
80 installmatchandpatsfn was called. No-op if scmutil.matchandpats
81 is its original function.
81 is its original function.
82
82
83 Note that n calls to installmatchandpatsfn will require n calls
83 Note that n calls to installmatchandpatsfn will require n calls
84 to restore the original matchfn.'''
84 to restore the original matchfn.'''
85 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
85 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
86 scmutil.matchandpats)
86 scmutil.matchandpats)
87
87
88 def addlargefiles(ui, repo, matcher, **opts):
88 def addlargefiles(ui, repo, matcher, **opts):
89 large = opts.pop('large', None)
89 large = opts.pop('large', None)
90 lfsize = lfutil.getminsize(
90 lfsize = lfutil.getminsize(
91 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
91 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
92
92
93 lfmatcher = None
93 lfmatcher = None
94 if lfutil.islfilesrepo(repo):
94 if lfutil.islfilesrepo(repo):
95 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
95 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
96 if lfpats:
96 if lfpats:
97 lfmatcher = match_.match(repo.root, '', list(lfpats))
97 lfmatcher = match_.match(repo.root, '', list(lfpats))
98
98
99 lfnames = []
99 lfnames = []
100 m = copy.copy(matcher)
100 m = copy.copy(matcher)
101 m.bad = lambda x, y: None
101 m.bad = lambda x, y: None
102 wctx = repo[None]
102 wctx = repo[None]
103 for f in repo.walk(m):
103 for f in repo.walk(m):
104 exact = m.exact(f)
104 exact = m.exact(f)
105 lfile = lfutil.standin(f) in wctx
105 lfile = lfutil.standin(f) in wctx
106 nfile = f in wctx
106 nfile = f in wctx
107 exists = lfile or nfile
107 exists = lfile or nfile
108
108
109 # Don't warn the user when they attempt to add a normal tracked file.
109 # Don't warn the user when they attempt to add a normal tracked file.
110 # The normal add code will do that for us.
110 # The normal add code will do that for us.
111 if exact and exists:
111 if exact and exists:
112 if lfile:
112 if lfile:
113 ui.warn(_('%s already a largefile\n') % f)
113 ui.warn(_('%s already a largefile\n') % f)
114 continue
114 continue
115
115
116 if (exact or not exists) and not lfutil.isstandin(f):
116 if (exact or not exists) and not lfutil.isstandin(f):
117 wfile = repo.wjoin(f)
117 wfile = repo.wjoin(f)
118
118
119 # In case the file was removed previously, but not committed
119 # In case the file was removed previously, but not committed
120 # (issue3507)
120 # (issue3507)
121 if not os.path.exists(wfile):
121 if not os.path.exists(wfile):
122 continue
122 continue
123
123
124 abovemin = (lfsize and
124 abovemin = (lfsize and
125 os.lstat(wfile).st_size >= lfsize * 1024 * 1024)
125 os.lstat(wfile).st_size >= lfsize * 1024 * 1024)
126 if large or abovemin or (lfmatcher and lfmatcher(f)):
126 if large or abovemin or (lfmatcher and lfmatcher(f)):
127 lfnames.append(f)
127 lfnames.append(f)
128 if ui.verbose or not exact:
128 if ui.verbose or not exact:
129 ui.status(_('adding %s as a largefile\n') % m.rel(f))
129 ui.status(_('adding %s as a largefile\n') % m.rel(f))
130
130
131 bad = []
131 bad = []
132
132
133 # Need to lock, otherwise there could be a race condition between
133 # Need to lock, otherwise there could be a race condition between
134 # when standins are created and added to the repo.
134 # when standins are created and added to the repo.
135 wlock = repo.wlock()
135 wlock = repo.wlock()
136 try:
136 try:
137 if not opts.get('dry_run'):
137 if not opts.get('dry_run'):
138 standins = []
138 standins = []
139 lfdirstate = lfutil.openlfdirstate(ui, repo)
139 lfdirstate = lfutil.openlfdirstate(ui, repo)
140 for f in lfnames:
140 for f in lfnames:
141 standinname = lfutil.standin(f)
141 standinname = lfutil.standin(f)
142 lfutil.writestandin(repo, standinname, hash='',
142 lfutil.writestandin(repo, standinname, hash='',
143 executable=lfutil.getexecutable(repo.wjoin(f)))
143 executable=lfutil.getexecutable(repo.wjoin(f)))
144 standins.append(standinname)
144 standins.append(standinname)
145 if lfdirstate[f] == 'r':
145 if lfdirstate[f] == 'r':
146 lfdirstate.normallookup(f)
146 lfdirstate.normallookup(f)
147 else:
147 else:
148 lfdirstate.add(f)
148 lfdirstate.add(f)
149 lfdirstate.write()
149 lfdirstate.write()
150 bad += [lfutil.splitstandin(f)
150 bad += [lfutil.splitstandin(f)
151 for f in repo[None].add(standins)
151 for f in repo[None].add(standins)
152 if f in m.files()]
152 if f in m.files()]
153 finally:
153 finally:
154 wlock.release()
154 wlock.release()
155 return bad
155 return bad
156
156
157 def removelargefiles(ui, repo, isaddremove, *pats, **opts):
157 def removelargefiles(ui, repo, isaddremove, *pats, **opts):
158 after = opts.get('after')
158 after = opts.get('after')
159 if not pats and not after:
159 if not pats and not after:
160 raise util.Abort(_('no files specified'))
160 raise util.Abort(_('no files specified'))
161 m = composelargefilematcher(scmutil.match(repo[None], pats, opts),
161 m = composelargefilematcher(scmutil.match(repo[None], pats, opts),
162 repo[None].manifest())
162 repo[None].manifest())
163 try:
163 try:
164 repo.lfstatus = True
164 repo.lfstatus = True
165 s = repo.status(match=m, clean=True)
165 s = repo.status(match=m, clean=True)
166 finally:
166 finally:
167 repo.lfstatus = False
167 repo.lfstatus = False
168 manifest = repo[None].manifest()
168 manifest = repo[None].manifest()
169 modified, added, deleted, clean = [[f for f in list
169 modified, added, deleted, clean = [[f for f in list
170 if lfutil.standin(f) in manifest]
170 if lfutil.standin(f) in manifest]
171 for list in (s.modified, s.added,
171 for list in (s.modified, s.added,
172 s.deleted, s.clean)]
172 s.deleted, s.clean)]
173
173
174 def warn(files, msg):
174 def warn(files, msg):
175 for f in files:
175 for f in files:
176 ui.warn(msg % m.rel(f))
176 ui.warn(msg % m.rel(f))
177 return int(len(files) > 0)
177 return int(len(files) > 0)
178
178
179 result = 0
179 result = 0
180
180
181 if after:
181 if after:
182 remove = deleted
182 remove = deleted
183 result = warn(modified + added + clean,
183 result = warn(modified + added + clean,
184 _('not removing %s: file still exists\n'))
184 _('not removing %s: file still exists\n'))
185 else:
185 else:
186 remove = deleted + clean
186 remove = deleted + clean
187 result = warn(modified, _('not removing %s: file is modified (use -f'
187 result = warn(modified, _('not removing %s: file is modified (use -f'
188 ' to force removal)\n'))
188 ' to force removal)\n'))
189 result = warn(added, _('not removing %s: file has been marked for add'
189 result = warn(added, _('not removing %s: file has been marked for add'
190 ' (use forget to undo)\n')) or result
190 ' (use forget to undo)\n')) or result
191
191
192 # Need to lock because standin files are deleted then removed from the
192 # Need to lock because standin files are deleted then removed from the
193 # repository and we could race in-between.
193 # repository and we could race in-between.
194 wlock = repo.wlock()
194 wlock = repo.wlock()
195 try:
195 try:
196 lfdirstate = lfutil.openlfdirstate(ui, repo)
196 lfdirstate = lfutil.openlfdirstate(ui, repo)
197 for f in sorted(remove):
197 for f in sorted(remove):
198 if isaddremove:
198 if isaddremove:
199 ui.status(_('removing %s\n') % f)
199 ui.status(_('removing %s\n') % f)
200 elif ui.verbose or not m.exact(f):
200 elif ui.verbose or not m.exact(f):
201 ui.status(_('removing %s\n') % m.rel(f))
201 ui.status(_('removing %s\n') % m.rel(f))
202
202
203 if not opts.get('dry_run'):
203 if not opts.get('dry_run'):
204 if not after:
204 if not after:
205 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
205 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
206 lfdirstate.remove(f)
206 lfdirstate.remove(f)
207
207
208 if opts.get('dry_run'):
208 if opts.get('dry_run'):
209 return result
209 return result
210
210
211 lfdirstate.write()
211 lfdirstate.write()
212 remove = [lfutil.standin(f) for f in remove]
212 remove = [lfutil.standin(f) for f in remove]
213 # If this is being called by addremove, let the original addremove
213 # If this is being called by addremove, let the original addremove
214 # function handle this.
214 # function handle this.
215 if not isaddremove:
215 if not isaddremove:
216 for f in remove:
216 for f in remove:
217 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
217 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
218 repo[None].forget(remove)
218 repo[None].forget(remove)
219 finally:
219 finally:
220 wlock.release()
220 wlock.release()
221
221
222 return result
222 return result
223
223
224 # For overriding mercurial.hgweb.webcommands so that largefiles will
224 # For overriding mercurial.hgweb.webcommands so that largefiles will
225 # appear at their right place in the manifests.
225 # appear at their right place in the manifests.
226 def decodepath(orig, path):
226 def decodepath(orig, path):
227 return lfutil.splitstandin(path) or path
227 return lfutil.splitstandin(path) or path
228
228
229 # -- Wrappers: modify existing commands --------------------------------
229 # -- Wrappers: modify existing commands --------------------------------
230
230
231 # Add works by going through the files that the user wanted to add and
231 # Add works by going through the files that the user wanted to add and
232 # checking if they should be added as largefiles. Then it makes a new
232 # checking if they should be added as largefiles. Then it makes a new
233 # matcher which matches only the normal files and runs the original
233 # matcher which matches only the normal files and runs the original
234 # version of add.
234 # version of add.
235 def overrideadd(orig, ui, repo, *pats, **opts):
235 def overrideadd(orig, ui, repo, *pats, **opts):
236 normal = opts.pop('normal')
236 normal = opts.pop('normal')
237 if normal:
237 if normal:
238 if opts.get('large'):
238 if opts.get('large'):
239 raise util.Abort(_('--normal cannot be used with --large'))
239 raise util.Abort(_('--normal cannot be used with --large'))
240 return orig(ui, repo, *pats, **opts)
240 return orig(ui, repo, *pats, **opts)
241 matcher = scmutil.match(repo[None], pats, opts)
241 matcher = scmutil.match(repo[None], pats, opts)
242 bad = addlargefiles(ui, repo, matcher, **opts)
242 bad = addlargefiles(ui, repo, matcher, **opts)
243 installnormalfilesmatchfn(repo[None].manifest())
243 installnormalfilesmatchfn(repo[None].manifest())
244 result = orig(ui, repo, *pats, **opts)
244 result = orig(ui, repo, *pats, **opts)
245 restorematchfn()
245 restorematchfn()
246
246
247 return (result == 1 or bad) and 1 or 0
247 return (result == 1 or bad) and 1 or 0
248
248
249 def overrideremove(orig, ui, repo, *pats, **opts):
249 def overrideremove(orig, ui, repo, *pats, **opts):
250 installnormalfilesmatchfn(repo[None].manifest())
250 installnormalfilesmatchfn(repo[None].manifest())
251 result = orig(ui, repo, *pats, **opts)
251 result = orig(ui, repo, *pats, **opts)
252 restorematchfn()
252 restorematchfn()
253 return removelargefiles(ui, repo, False, *pats, **opts) or result
253 return removelargefiles(ui, repo, False, *pats, **opts) or result
254
254
255 def overridestatusfn(orig, repo, rev2, **opts):
255 def overridestatusfn(orig, repo, rev2, **opts):
256 try:
256 try:
257 repo._repo.lfstatus = True
257 repo._repo.lfstatus = True
258 return orig(repo, rev2, **opts)
258 return orig(repo, rev2, **opts)
259 finally:
259 finally:
260 repo._repo.lfstatus = False
260 repo._repo.lfstatus = False
261
261
262 def overridestatus(orig, ui, repo, *pats, **opts):
262 def overridestatus(orig, ui, repo, *pats, **opts):
263 try:
263 try:
264 repo.lfstatus = True
264 repo.lfstatus = True
265 return orig(ui, repo, *pats, **opts)
265 return orig(ui, repo, *pats, **opts)
266 finally:
266 finally:
267 repo.lfstatus = False
267 repo.lfstatus = False
268
268
269 def overridedirty(orig, repo, ignoreupdate=False):
269 def overridedirty(orig, repo, ignoreupdate=False):
270 try:
270 try:
271 repo._repo.lfstatus = True
271 repo._repo.lfstatus = True
272 return orig(repo, ignoreupdate)
272 return orig(repo, ignoreupdate)
273 finally:
273 finally:
274 repo._repo.lfstatus = False
274 repo._repo.lfstatus = False
275
275
276 def overridelog(orig, ui, repo, *pats, **opts):
276 def overridelog(orig, ui, repo, *pats, **opts):
277 def overridematchandpats(ctx, pats=[], opts={}, globbed=False,
277 def overridematchandpats(ctx, pats=[], opts={}, globbed=False,
278 default='relpath'):
278 default='relpath'):
279 """Matcher that merges root directory with .hglf, suitable for log.
279 """Matcher that merges root directory with .hglf, suitable for log.
280 It is still possible to match .hglf directly.
280 It is still possible to match .hglf directly.
281 For any listed files run log on the standin too.
281 For any listed files run log on the standin too.
282 matchfn tries both the given filename and with .hglf stripped.
282 matchfn tries both the given filename and with .hglf stripped.
283 """
283 """
284 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default)
284 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default)
285 m, p = copy.copy(matchandpats)
285 m, p = copy.copy(matchandpats)
286
286
287 if m.always():
287 if m.always():
288 # We want to match everything anyway, so there's no benefit trying
288 # We want to match everything anyway, so there's no benefit trying
289 # to add standins.
289 # to add standins.
290 return matchandpats
290 return matchandpats
291
291
292 pats = set(p)
292 pats = set(p)
293 # TODO: handling of patterns in both cases below
293 # TODO: handling of patterns in both cases below
294 if m._cwd:
294 if m._cwd:
295 if os.path.isabs(m._cwd):
295 if os.path.isabs(m._cwd):
296 # TODO: handle largefile magic when invoked from other cwd
296 # TODO: handle largefile magic when invoked from other cwd
297 return matchandpats
297 return matchandpats
298 back = (m._cwd.count('/') + 1) * '../'
298 back = (m._cwd.count('/') + 1) * '../'
299 pats.update(back + lfutil.standin(m._cwd + '/' + f) for f in p)
299 pats.update(back + lfutil.standin(m._cwd + '/' + f) for f in p)
300 else:
300 else:
301 pats.update(lfutil.standin(f) for f in p)
301 pats.update(lfutil.standin(f) for f in p)
302
302
303 for i in range(0, len(m._files)):
303 for i in range(0, len(m._files)):
304 standin = lfutil.standin(m._files[i])
304 standin = lfutil.standin(m._files[i])
305 if standin in repo[ctx.node()]:
305 if standin in repo[ctx.node()]:
306 m._files[i] = standin
306 m._files[i] = standin
307 elif m._files[i] not in repo[ctx.node()]:
307 elif m._files[i] not in repo[ctx.node()]:
308 m._files.append(standin)
308 m._files.append(standin)
309 pats.add(standin)
309 pats.add(standin)
310
310
311 m._fmap = set(m._files)
311 m._fmap = set(m._files)
312 m._always = False
312 m._always = False
313 origmatchfn = m.matchfn
313 origmatchfn = m.matchfn
314 def lfmatchfn(f):
314 def lfmatchfn(f):
315 lf = lfutil.splitstandin(f)
315 lf = lfutil.splitstandin(f)
316 if lf is not None and origmatchfn(lf):
316 if lf is not None and origmatchfn(lf):
317 return True
317 return True
318 r = origmatchfn(f)
318 r = origmatchfn(f)
319 return r
319 return r
320 m.matchfn = lfmatchfn
320 m.matchfn = lfmatchfn
321
321
322 return m, pats
322 return m, pats
323
323
324 # For hg log --patch, the match object is used in two different senses:
324 # For hg log --patch, the match object is used in two different senses:
325 # (1) to determine what revisions should be printed out, and
325 # (1) to determine what revisions should be printed out, and
326 # (2) to determine what files to print out diffs for.
326 # (2) to determine what files to print out diffs for.
327 # The magic matchandpats override should be used for case (1) but not for
327 # The magic matchandpats override should be used for case (1) but not for
328 # case (2).
328 # case (2).
329 def overridemakelogfilematcher(repo, pats, opts):
329 def overridemakelogfilematcher(repo, pats, opts):
330 pctx = repo[None]
330 pctx = repo[None]
331 match, pats = oldmatchandpats(pctx, pats, opts)
331 match, pats = oldmatchandpats(pctx, pats, opts)
332 return lambda rev: match
332 return lambda rev: match
333
333
334 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
334 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
335 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
335 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
336 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
336 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
337
337
338 try:
338 try:
339 return orig(ui, repo, *pats, **opts)
339 return orig(ui, repo, *pats, **opts)
340 finally:
340 finally:
341 restorematchandpatsfn()
341 restorematchandpatsfn()
342 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
342 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
343
343
344 def overrideverify(orig, ui, repo, *pats, **opts):
344 def overrideverify(orig, ui, repo, *pats, **opts):
345 large = opts.pop('large', False)
345 large = opts.pop('large', False)
346 all = opts.pop('lfa', False)
346 all = opts.pop('lfa', False)
347 contents = opts.pop('lfc', False)
347 contents = opts.pop('lfc', False)
348
348
349 result = orig(ui, repo, *pats, **opts)
349 result = orig(ui, repo, *pats, **opts)
350 if large or all or contents:
350 if large or all or contents:
351 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
351 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
352 return result
352 return result
353
353
354 def overridedebugstate(orig, ui, repo, *pats, **opts):
354 def overridedebugstate(orig, ui, repo, *pats, **opts):
355 large = opts.pop('large', False)
355 large = opts.pop('large', False)
356 if large:
356 if large:
357 class fakerepo(object):
357 class fakerepo(object):
358 dirstate = lfutil.openlfdirstate(ui, repo)
358 dirstate = lfutil.openlfdirstate(ui, repo)
359 orig(ui, fakerepo, *pats, **opts)
359 orig(ui, fakerepo, *pats, **opts)
360 else:
360 else:
361 orig(ui, repo, *pats, **opts)
361 orig(ui, repo, *pats, **opts)
362
362
363 # Override needs to refresh standins so that update's normal merge
363 # Override needs to refresh standins so that update's normal merge
364 # will go through properly. Then the other update hook (overriding repo.update)
364 # will go through properly. Then the other update hook (overriding repo.update)
365 # will get the new files. Filemerge is also overridden so that the merge
365 # will get the new files. Filemerge is also overridden so that the merge
366 # will merge standins correctly.
366 # will merge standins correctly.
367 def overrideupdate(orig, ui, repo, *pats, **opts):
367 def overrideupdate(orig, ui, repo, *pats, **opts):
368 # Need to lock between the standins getting updated and their
368 # Need to lock between the standins getting updated and their
369 # largefiles getting updated
369 # largefiles getting updated
370 wlock = repo.wlock()
370 wlock = repo.wlock()
371 try:
371 try:
372 if opts['check']:
372 if opts['check']:
373 lfdirstate = lfutil.openlfdirstate(ui, repo)
373 lfdirstate = lfutil.openlfdirstate(ui, repo)
374 unsure, s = lfdirstate.status(
374 unsure, s = lfdirstate.status(
375 match_.always(repo.root, repo.getcwd()),
375 match_.always(repo.root, repo.getcwd()),
376 [], False, False, False)
376 [], False, False, False)
377
377
378 mod = len(s.modified) > 0
378 mod = len(s.modified) > 0
379 for lfile in unsure:
379 for lfile in unsure:
380 standin = lfutil.standin(lfile)
380 standin = lfutil.standin(lfile)
381 if repo['.'][standin].data().strip() != \
381 if repo['.'][standin].data().strip() != \
382 lfutil.hashfile(repo.wjoin(lfile)):
382 lfutil.hashfile(repo.wjoin(lfile)):
383 mod = True
383 mod = True
384 else:
384 else:
385 lfdirstate.normal(lfile)
385 lfdirstate.normal(lfile)
386 lfdirstate.write()
386 lfdirstate.write()
387 if mod:
387 if mod:
388 raise util.Abort(_('uncommitted changes'))
388 raise util.Abort(_('uncommitted changes'))
389 return orig(ui, repo, *pats, **opts)
389 return orig(ui, repo, *pats, **opts)
390 finally:
390 finally:
391 wlock.release()
391 wlock.release()
392
392
393 # Before starting the manifest merge, merge.updates will call
393 # Before starting the manifest merge, merge.updates will call
394 # _checkunknownfile to check if there are any files in the merged-in
394 # _checkunknownfile to check if there are any files in the merged-in
395 # changeset that collide with unknown files in the working copy.
395 # changeset that collide with unknown files in the working copy.
396 #
396 #
397 # The largefiles are seen as unknown, so this prevents us from merging
397 # The largefiles are seen as unknown, so this prevents us from merging
398 # in a file 'foo' if we already have a largefile with the same name.
398 # in a file 'foo' if we already have a largefile with the same name.
399 #
399 #
400 # The overridden function filters the unknown files by removing any
400 # The overridden function filters the unknown files by removing any
401 # largefiles. This makes the merge proceed and we can then handle this
401 # largefiles. This makes the merge proceed and we can then handle this
402 # case further in the overridden calculateupdates function below.
402 # case further in the overridden calculateupdates function below.
403 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
403 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
404 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
404 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
405 return False
405 return False
406 return origfn(repo, wctx, mctx, f, f2)
406 return origfn(repo, wctx, mctx, f, f2)
407
407
408 # The manifest merge handles conflicts on the manifest level. We want
408 # The manifest merge handles conflicts on the manifest level. We want
409 # to handle changes in largefile-ness of files at this level too.
409 # to handle changes in largefile-ness of files at this level too.
410 #
410 #
411 # The strategy is to run the original calculateupdates and then process
411 # The strategy is to run the original calculateupdates and then process
412 # the action list it outputs. There are two cases we need to deal with:
412 # the action list it outputs. There are two cases we need to deal with:
413 #
413 #
414 # 1. Normal file in p1, largefile in p2. Here the largefile is
414 # 1. Normal file in p1, largefile in p2. Here the largefile is
415 # detected via its standin file, which will enter the working copy
415 # detected via its standin file, which will enter the working copy
416 # with a "get" action. It is not "merge" since the standin is all
416 # with a "get" action. It is not "merge" since the standin is all
417 # Mercurial is concerned with at this level -- the link to the
417 # Mercurial is concerned with at this level -- the link to the
418 # existing normal file is not relevant here.
418 # existing normal file is not relevant here.
419 #
419 #
420 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
420 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
421 # since the largefile will be present in the working copy and
421 # since the largefile will be present in the working copy and
422 # different from the normal file in p2. Mercurial therefore
422 # different from the normal file in p2. Mercurial therefore
423 # triggers a merge action.
423 # triggers a merge action.
424 #
424 #
425 # In both cases, we prompt the user and emit new actions to either
425 # In both cases, we prompt the user and emit new actions to either
426 # remove the standin (if the normal file was kept) or to remove the
426 # remove the standin (if the normal file was kept) or to remove the
427 # normal file and get the standin (if the largefile was kept). The
427 # normal file and get the standin (if the largefile was kept). The
428 # default prompt answer is to use the largefile version since it was
428 # default prompt answer is to use the largefile version since it was
429 # presumably changed on purpose.
429 # presumably changed on purpose.
430 #
430 #
431 # Finally, the merge.applyupdates function will then take care of
431 # Finally, the merge.applyupdates function will then take care of
432 # writing the files into the working copy and lfcommands.updatelfiles
432 # writing the files into the working copy and lfcommands.updatelfiles
433 # will update the largefiles.
433 # will update the largefiles.
434 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
434 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
435 partial, acceptremote, followcopies):
435 partial, acceptremote, followcopies):
436 overwrite = force and not branchmerge
436 overwrite = force and not branchmerge
437 actions, diverge, renamedelete = origfn(
437 actions, diverge, renamedelete = origfn(
438 repo, p1, p2, pas, branchmerge, force, partial, acceptremote,
438 repo, p1, p2, pas, branchmerge, force, partial, acceptremote,
439 followcopies)
439 followcopies)
440
440
441 if overwrite:
441 if overwrite:
442 return actions, diverge, renamedelete
442 return actions, diverge, renamedelete
443
443
444 # Convert to dictionary with filename as key and action as value.
444 # Convert to dictionary with filename as key and action as value.
445 lfiles = set()
445 lfiles = set()
446 for f in actions:
446 for f in actions:
447 splitstandin = f and lfutil.splitstandin(f)
447 splitstandin = f and lfutil.splitstandin(f)
448 if splitstandin in p1:
448 if splitstandin in p1:
449 lfiles.add(splitstandin)
449 lfiles.add(splitstandin)
450 elif lfutil.standin(f) in p1:
450 elif lfutil.standin(f) in p1:
451 lfiles.add(f)
451 lfiles.add(f)
452
452
453 for lfile in lfiles:
453 for lfile in lfiles:
454 standin = lfutil.standin(lfile)
454 standin = lfutil.standin(lfile)
455 (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
455 (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
456 (sm, sargs, smsg) = actions.get(standin, (None, None, None))
456 (sm, sargs, smsg) = actions.get(standin, (None, None, None))
457 if sm in ('g', 'dc') and lm != 'r':
457 if sm in ('g', 'dc') and lm != 'r':
458 # Case 1: normal file in the working copy, largefile in
458 # Case 1: normal file in the working copy, largefile in
459 # the second parent
459 # the second parent
460 usermsg = _('remote turned local normal file %s into a largefile\n'
460 usermsg = _('remote turned local normal file %s into a largefile\n'
461 'use (l)argefile or keep (n)ormal file?'
461 'use (l)argefile or keep (n)ormal file?'
462 '$$ &Largefile $$ &Normal file') % lfile
462 '$$ &Largefile $$ &Normal file') % lfile
463 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
463 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
464 actions[lfile] = ('r', None, 'replaced by standin')
464 actions[lfile] = ('r', None, 'replaced by standin')
465 actions[standin] = ('g', sargs, 'replaces standin')
465 actions[standin] = ('g', sargs, 'replaces standin')
466 else: # keep local normal file
466 else: # keep local normal file
467 actions[lfile] = ('k', None, 'replaces standin')
467 actions[lfile] = ('k', None, 'replaces standin')
468 if branchmerge:
468 if branchmerge:
469 actions[standin] = ('k', None, 'replaced by non-standin')
469 actions[standin] = ('k', None, 'replaced by non-standin')
470 else:
470 else:
471 actions[standin] = ('r', None, 'replaced by non-standin')
471 actions[standin] = ('r', None, 'replaced by non-standin')
472 elif lm in ('g', 'dc') and sm != 'r':
472 elif lm in ('g', 'dc') and sm != 'r':
473 # Case 2: largefile in the working copy, normal file in
473 # Case 2: largefile in the working copy, normal file in
474 # the second parent
474 # the second parent
475 usermsg = _('remote turned local largefile %s into a normal file\n'
475 usermsg = _('remote turned local largefile %s into a normal file\n'
476 'keep (l)argefile or use (n)ormal file?'
476 'keep (l)argefile or use (n)ormal file?'
477 '$$ &Largefile $$ &Normal file') % lfile
477 '$$ &Largefile $$ &Normal file') % lfile
478 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
478 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
479 if branchmerge:
479 if branchmerge:
480 # largefile can be restored from standin safely
480 # largefile can be restored from standin safely
481 actions[lfile] = ('k', None, 'replaced by standin')
481 actions[lfile] = ('k', None, 'replaced by standin')
482 actions[standin] = ('k', None, 'replaces standin')
482 actions[standin] = ('k', None, 'replaces standin')
483 else:
483 else:
484 # "lfile" should be marked as "removed" without
484 # "lfile" should be marked as "removed" without
485 # removal of itself
485 # removal of itself
486 actions[lfile] = ('lfmr', None,
486 actions[lfile] = ('lfmr', None,
487 'forget non-standin largefile')
487 'forget non-standin largefile')
488
488
489 # linear-merge should treat this largefile as 're-added'
489 # linear-merge should treat this largefile as 're-added'
490 actions[standin] = ('a', None, 'keep standin')
490 actions[standin] = ('a', None, 'keep standin')
491 else: # pick remote normal file
491 else: # pick remote normal file
492 actions[lfile] = ('g', largs, 'replaces standin')
492 actions[lfile] = ('g', largs, 'replaces standin')
493 actions[standin] = ('r', None, 'replaced by non-standin')
493 actions[standin] = ('r', None, 'replaced by non-standin')
494
494
495 return actions, diverge, renamedelete
495 return actions, diverge, renamedelete
496
496
497 def mergerecordupdates(orig, repo, actions, branchmerge):
497 def mergerecordupdates(orig, repo, actions, branchmerge):
498 if 'lfmr' in actions:
498 if 'lfmr' in actions:
499 # this should be executed before 'orig', to execute 'remove'
499 # this should be executed before 'orig', to execute 'remove'
500 # before all other actions
500 # before all other actions
501 for lfile, args, msg in actions['lfmr']:
501 for lfile, args, msg in actions['lfmr']:
502 repo.dirstate.remove(lfile)
502 repo.dirstate.remove(lfile)
503
503
504 return orig(repo, actions, branchmerge)
504 return orig(repo, actions, branchmerge)
505
505
506
506
507 # Override filemerge to prompt the user about how they wish to merge
507 # Override filemerge to prompt the user about how they wish to merge
508 # largefiles. This will handle identical edits without prompting the user.
508 # largefiles. This will handle identical edits without prompting the user.
509 def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca, labels=None):
509 def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca, labels=None):
510 if not lfutil.isstandin(orig):
510 if not lfutil.isstandin(orig):
511 return origfn(repo, mynode, orig, fcd, fco, fca, labels=labels)
511 return origfn(repo, mynode, orig, fcd, fco, fca, labels=labels)
512
512
513 ahash = fca.data().strip().lower()
513 ahash = fca.data().strip().lower()
514 dhash = fcd.data().strip().lower()
514 dhash = fcd.data().strip().lower()
515 ohash = fco.data().strip().lower()
515 ohash = fco.data().strip().lower()
516 if (ohash != ahash and
516 if (ohash != ahash and
517 ohash != dhash and
517 ohash != dhash and
518 (dhash == ahash or
518 (dhash == ahash or
519 repo.ui.promptchoice(
519 repo.ui.promptchoice(
520 _('largefile %s has a merge conflict\nancestor was %s\n'
520 _('largefile %s has a merge conflict\nancestor was %s\n'
521 'keep (l)ocal %s or\ntake (o)ther %s?'
521 'keep (l)ocal %s or\ntake (o)ther %s?'
522 '$$ &Local $$ &Other') %
522 '$$ &Local $$ &Other') %
523 (lfutil.splitstandin(orig), ahash, dhash, ohash),
523 (lfutil.splitstandin(orig), ahash, dhash, ohash),
524 0) == 1)):
524 0) == 1)):
525 repo.wwrite(fcd.path(), fco.data(), fco.flags())
525 repo.wwrite(fcd.path(), fco.data(), fco.flags())
526 return 0
526 return 0
527
527
528 # Copy first changes the matchers to match standins instead of
528 # Copy first changes the matchers to match standins instead of
529 # largefiles. Then it overrides util.copyfile in that function it
529 # largefiles. Then it overrides util.copyfile in that function it
530 # checks if the destination largefile already exists. It also keeps a
530 # checks if the destination largefile already exists. It also keeps a
531 # list of copied files so that the largefiles can be copied and the
531 # list of copied files so that the largefiles can be copied and the
532 # dirstate updated.
532 # dirstate updated.
533 def overridecopy(orig, ui, repo, pats, opts, rename=False):
533 def overridecopy(orig, ui, repo, pats, opts, rename=False):
534 # doesn't remove largefile on rename
534 # doesn't remove largefile on rename
535 if len(pats) < 2:
535 if len(pats) < 2:
536 # this isn't legal, let the original function deal with it
536 # this isn't legal, let the original function deal with it
537 return orig(ui, repo, pats, opts, rename)
537 return orig(ui, repo, pats, opts, rename)
538
538
539 def makestandin(relpath):
539 def makestandin(relpath):
540 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
540 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
541 return os.path.join(repo.wjoin(lfutil.standin(path)))
541 return os.path.join(repo.wjoin(lfutil.standin(path)))
542
542
543 fullpats = scmutil.expandpats(pats)
543 fullpats = scmutil.expandpats(pats)
544 dest = fullpats[-1]
544 dest = fullpats[-1]
545
545
546 if os.path.isdir(dest):
546 if os.path.isdir(dest):
547 if not os.path.isdir(makestandin(dest)):
547 if not os.path.isdir(makestandin(dest)):
548 os.makedirs(makestandin(dest))
548 os.makedirs(makestandin(dest))
549 # This could copy both lfiles and normal files in one command,
549 # This could copy both lfiles and normal files in one command,
550 # but we don't want to do that. First replace their matcher to
550 # but we don't want to do that. First replace their matcher to
551 # only match normal files and run it, then replace it to just
551 # only match normal files and run it, then replace it to just
552 # match largefiles and run it again.
552 # match largefiles and run it again.
553 nonormalfiles = False
553 nonormalfiles = False
554 nolfiles = False
554 nolfiles = False
555 installnormalfilesmatchfn(repo[None].manifest())
555 installnormalfilesmatchfn(repo[None].manifest())
556 try:
556 try:
557 try:
557 try:
558 result = orig(ui, repo, pats, opts, rename)
558 result = orig(ui, repo, pats, opts, rename)
559 except util.Abort, e:
559 except util.Abort, e:
560 if str(e) != _('no files to copy'):
560 if str(e) != _('no files to copy'):
561 raise e
561 raise e
562 else:
562 else:
563 nonormalfiles = True
563 nonormalfiles = True
564 result = 0
564 result = 0
565 finally:
565 finally:
566 restorematchfn()
566 restorematchfn()
567
567
568 # The first rename can cause our current working directory to be removed.
568 # The first rename can cause our current working directory to be removed.
569 # In that case there is nothing left to copy/rename so just quit.
569 # In that case there is nothing left to copy/rename so just quit.
570 try:
570 try:
571 repo.getcwd()
571 repo.getcwd()
572 except OSError:
572 except OSError:
573 return result
573 return result
574
574
575 try:
575 try:
576 try:
576 try:
577 # When we call orig below it creates the standins but we don't add
577 # When we call orig below it creates the standins but we don't add
578 # them to the dir state until later so lock during that time.
578 # them to the dir state until later so lock during that time.
579 wlock = repo.wlock()
579 wlock = repo.wlock()
580
580
581 manifest = repo[None].manifest()
581 manifest = repo[None].manifest()
582 def overridematch(ctx, pats=[], opts={}, globbed=False,
582 def overridematch(ctx, pats=[], opts={}, globbed=False,
583 default='relpath'):
583 default='relpath'):
584 newpats = []
584 newpats = []
585 # The patterns were previously mangled to add the standin
585 # The patterns were previously mangled to add the standin
586 # directory; we need to remove that now
586 # directory; we need to remove that now
587 for pat in pats:
587 for pat in pats:
588 if match_.patkind(pat) is None and lfutil.shortname in pat:
588 if match_.patkind(pat) is None and lfutil.shortname in pat:
589 newpats.append(pat.replace(lfutil.shortname, ''))
589 newpats.append(pat.replace(lfutil.shortname, ''))
590 else:
590 else:
591 newpats.append(pat)
591 newpats.append(pat)
592 match = oldmatch(ctx, newpats, opts, globbed, default)
592 match = oldmatch(ctx, newpats, opts, globbed, default)
593 m = copy.copy(match)
593 m = copy.copy(match)
594 lfile = lambda f: lfutil.standin(f) in manifest
594 lfile = lambda f: lfutil.standin(f) in manifest
595 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
595 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
596 m._fmap = set(m._files)
596 m._fmap = set(m._files)
597 origmatchfn = m.matchfn
597 origmatchfn = m.matchfn
598 m.matchfn = lambda f: (lfutil.isstandin(f) and
598 m.matchfn = lambda f: (lfutil.isstandin(f) and
599 (f in manifest) and
599 (f in manifest) and
600 origmatchfn(lfutil.splitstandin(f)) or
600 origmatchfn(lfutil.splitstandin(f)) or
601 None)
601 None)
602 return m
602 return m
603 oldmatch = installmatchfn(overridematch)
603 oldmatch = installmatchfn(overridematch)
604 listpats = []
604 listpats = []
605 for pat in pats:
605 for pat in pats:
606 if match_.patkind(pat) is not None:
606 if match_.patkind(pat) is not None:
607 listpats.append(pat)
607 listpats.append(pat)
608 else:
608 else:
609 listpats.append(makestandin(pat))
609 listpats.append(makestandin(pat))
610
610
611 try:
611 try:
612 origcopyfile = util.copyfile
612 origcopyfile = util.copyfile
613 copiedfiles = []
613 copiedfiles = []
614 def overridecopyfile(src, dest):
614 def overridecopyfile(src, dest):
615 if (lfutil.shortname in src and
615 if (lfutil.shortname in src and
616 dest.startswith(repo.wjoin(lfutil.shortname))):
616 dest.startswith(repo.wjoin(lfutil.shortname))):
617 destlfile = dest.replace(lfutil.shortname, '')
617 destlfile = dest.replace(lfutil.shortname, '')
618 if not opts['force'] and os.path.exists(destlfile):
618 if not opts['force'] and os.path.exists(destlfile):
619 raise IOError('',
619 raise IOError('',
620 _('destination largefile already exists'))
620 _('destination largefile already exists'))
621 copiedfiles.append((src, dest))
621 copiedfiles.append((src, dest))
622 origcopyfile(src, dest)
622 origcopyfile(src, dest)
623
623
624 util.copyfile = overridecopyfile
624 util.copyfile = overridecopyfile
625 result += orig(ui, repo, listpats, opts, rename)
625 result += orig(ui, repo, listpats, opts, rename)
626 finally:
626 finally:
627 util.copyfile = origcopyfile
627 util.copyfile = origcopyfile
628
628
629 lfdirstate = lfutil.openlfdirstate(ui, repo)
629 lfdirstate = lfutil.openlfdirstate(ui, repo)
630 for (src, dest) in copiedfiles:
630 for (src, dest) in copiedfiles:
631 if (lfutil.shortname in src and
631 if (lfutil.shortname in src and
632 dest.startswith(repo.wjoin(lfutil.shortname))):
632 dest.startswith(repo.wjoin(lfutil.shortname))):
633 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
633 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
634 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
634 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
635 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
635 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
636 if not os.path.isdir(destlfiledir):
636 if not os.path.isdir(destlfiledir):
637 os.makedirs(destlfiledir)
637 os.makedirs(destlfiledir)
638 if rename:
638 if rename:
639 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
639 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
640
640
641 # The file is gone, but this deletes any empty parent
641 # The file is gone, but this deletes any empty parent
642 # directories as a side-effect.
642 # directories as a side-effect.
643 util.unlinkpath(repo.wjoin(srclfile), True)
643 util.unlinkpath(repo.wjoin(srclfile), True)
644 lfdirstate.remove(srclfile)
644 lfdirstate.remove(srclfile)
645 else:
645 else:
646 util.copyfile(repo.wjoin(srclfile),
646 util.copyfile(repo.wjoin(srclfile),
647 repo.wjoin(destlfile))
647 repo.wjoin(destlfile))
648
648
649 lfdirstate.add(destlfile)
649 lfdirstate.add(destlfile)
650 lfdirstate.write()
650 lfdirstate.write()
651 except util.Abort, e:
651 except util.Abort, e:
652 if str(e) != _('no files to copy'):
652 if str(e) != _('no files to copy'):
653 raise e
653 raise e
654 else:
654 else:
655 nolfiles = True
655 nolfiles = True
656 finally:
656 finally:
657 restorematchfn()
657 restorematchfn()
658 wlock.release()
658 wlock.release()
659
659
660 if nolfiles and nonormalfiles:
660 if nolfiles and nonormalfiles:
661 raise util.Abort(_('no files to copy'))
661 raise util.Abort(_('no files to copy'))
662
662
663 return result
663 return result
664
664
665 # When the user calls revert, we have to be careful to not revert any
665 # When the user calls revert, we have to be careful to not revert any
666 # changes to other largefiles accidentally. This means we have to keep
666 # changes to other largefiles accidentally. This means we have to keep
667 # track of the largefiles that are being reverted so we only pull down
667 # track of the largefiles that are being reverted so we only pull down
668 # the necessary largefiles.
668 # the necessary largefiles.
669 #
669 #
670 # Standins are only updated (to match the hash of largefiles) before
670 # Standins are only updated (to match the hash of largefiles) before
671 # commits. Update the standins then run the original revert, changing
671 # commits. Update the standins then run the original revert, changing
672 # the matcher to hit standins instead of largefiles. Based on the
672 # the matcher to hit standins instead of largefiles. Based on the
673 # resulting standins update the largefiles.
673 # resulting standins update the largefiles.
674 def overriderevert(orig, ui, repo, *pats, **opts):
674 def overriderevert(orig, ui, repo, *pats, **opts):
675 # Because we put the standins in a bad state (by updating them)
675 # Because we put the standins in a bad state (by updating them)
676 # and then return them to a correct state we need to lock to
676 # and then return them to a correct state we need to lock to
677 # prevent others from changing them in their incorrect state.
677 # prevent others from changing them in their incorrect state.
678 wlock = repo.wlock()
678 wlock = repo.wlock()
679 try:
679 try:
680 lfdirstate = lfutil.openlfdirstate(ui, repo)
680 lfdirstate = lfutil.openlfdirstate(ui, repo)
681 s = lfutil.lfdirstatestatus(lfdirstate, repo)
681 s = lfutil.lfdirstatestatus(lfdirstate, repo)
682 lfdirstate.write()
682 lfdirstate.write()
683 for lfile in s.modified:
683 for lfile in s.modified:
684 lfutil.updatestandin(repo, lfutil.standin(lfile))
684 lfutil.updatestandin(repo, lfutil.standin(lfile))
685 for lfile in s.deleted:
685 for lfile in s.deleted:
686 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
686 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
687 os.unlink(repo.wjoin(lfutil.standin(lfile)))
687 os.unlink(repo.wjoin(lfutil.standin(lfile)))
688
688
689 oldstandins = lfutil.getstandinsstate(repo)
689 oldstandins = lfutil.getstandinsstate(repo)
690
690
691 def overridematch(ctx, pats=[], opts={}, globbed=False,
691 def overridematch(ctx, pats=[], opts={}, globbed=False,
692 default='relpath'):
692 default='relpath'):
693 match = oldmatch(ctx, pats, opts, globbed, default)
693 match = oldmatch(ctx, pats, opts, globbed, default)
694 m = copy.copy(match)
694 m = copy.copy(match)
695 def tostandin(f):
695 def tostandin(f):
696 if lfutil.standin(f) in ctx:
696 if lfutil.standin(f) in ctx:
697 return lfutil.standin(f)
697 return lfutil.standin(f)
698 elif lfutil.standin(f) in repo[None]:
698 elif lfutil.standin(f) in repo[None]:
699 return None
699 return None
700 return f
700 return f
701 m._files = [tostandin(f) for f in m._files]
701 m._files = [tostandin(f) for f in m._files]
702 m._files = [f for f in m._files if f is not None]
702 m._files = [f for f in m._files if f is not None]
703 m._fmap = set(m._files)
703 m._fmap = set(m._files)
704 origmatchfn = m.matchfn
704 origmatchfn = m.matchfn
705 def matchfn(f):
705 def matchfn(f):
706 if lfutil.isstandin(f):
706 if lfutil.isstandin(f):
707 return (origmatchfn(lfutil.splitstandin(f)) and
707 return (origmatchfn(lfutil.splitstandin(f)) and
708 (f in repo[None] or f in ctx))
708 (f in repo[None] or f in ctx))
709 return origmatchfn(f)
709 return origmatchfn(f)
710 m.matchfn = matchfn
710 m.matchfn = matchfn
711 return m
711 return m
712 oldmatch = installmatchfn(overridematch)
712 oldmatch = installmatchfn(overridematch)
713 try:
713 try:
714 orig(ui, repo, *pats, **opts)
714 orig(ui, repo, *pats, **opts)
715 finally:
715 finally:
716 restorematchfn()
716 restorematchfn()
717
717
718 newstandins = lfutil.getstandinsstate(repo)
718 newstandins = lfutil.getstandinsstate(repo)
719 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
719 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
720 # lfdirstate should be 'normallookup'-ed for updated files,
720 # lfdirstate should be 'normallookup'-ed for updated files,
721 # because reverting doesn't touch dirstate for 'normal' files
721 # because reverting doesn't touch dirstate for 'normal' files
722 # when target revision is explicitly specified: in such case,
722 # when target revision is explicitly specified: in such case,
723 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
723 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
724 # of target (standin) file.
724 # of target (standin) file.
725 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
725 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
726 normallookup=True)
726 normallookup=True)
727
727
728 finally:
728 finally:
729 wlock.release()
729 wlock.release()
730
730
731 # after pulling changesets, we need to take some extra care to get
731 # after pulling changesets, we need to take some extra care to get
732 # largefiles updated remotely
732 # largefiles updated remotely
733 def overridepull(orig, ui, repo, source=None, **opts):
733 def overridepull(orig, ui, repo, source=None, **opts):
734 revsprepull = len(repo)
734 revsprepull = len(repo)
735 if not source:
735 if not source:
736 source = 'default'
736 source = 'default'
737 repo.lfpullsource = source
737 repo.lfpullsource = source
738 result = orig(ui, repo, source, **opts)
738 result = orig(ui, repo, source, **opts)
739 revspostpull = len(repo)
739 revspostpull = len(repo)
740 lfrevs = opts.get('lfrev', [])
740 lfrevs = opts.get('lfrev', [])
741 if opts.get('all_largefiles'):
741 if opts.get('all_largefiles'):
742 lfrevs.append('pulled()')
742 lfrevs.append('pulled()')
743 if lfrevs and revspostpull > revsprepull:
743 if lfrevs and revspostpull > revsprepull:
744 numcached = 0
744 numcached = 0
745 repo.firstpulled = revsprepull # for pulled() revset expression
745 repo.firstpulled = revsprepull # for pulled() revset expression
746 try:
746 try:
747 for rev in scmutil.revrange(repo, lfrevs):
747 for rev in scmutil.revrange(repo, lfrevs):
748 ui.note(_('pulling largefiles for revision %s\n') % rev)
748 ui.note(_('pulling largefiles for revision %s\n') % rev)
749 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
749 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
750 numcached += len(cached)
750 numcached += len(cached)
751 finally:
751 finally:
752 del repo.firstpulled
752 del repo.firstpulled
753 ui.status(_("%d largefiles cached\n") % numcached)
753 ui.status(_("%d largefiles cached\n") % numcached)
754 return result
754 return result
755
755
756 def pulledrevsetsymbol(repo, subset, x):
756 def pulledrevsetsymbol(repo, subset, x):
757 """``pulled()``
757 """``pulled()``
758 Changesets that just has been pulled.
758 Changesets that just has been pulled.
759
759
760 Only available with largefiles from pull --lfrev expressions.
760 Only available with largefiles from pull --lfrev expressions.
761
761
762 .. container:: verbose
762 .. container:: verbose
763
763
764 Some examples:
764 Some examples:
765
765
766 - pull largefiles for all new changesets::
766 - pull largefiles for all new changesets::
767
767
768 hg pull -lfrev "pulled()"
768 hg pull -lfrev "pulled()"
769
769
770 - pull largefiles for all new branch heads::
770 - pull largefiles for all new branch heads::
771
771
772 hg pull -lfrev "head(pulled()) and not closed()"
772 hg pull -lfrev "head(pulled()) and not closed()"
773
773
774 """
774 """
775
775
776 try:
776 try:
777 firstpulled = repo.firstpulled
777 firstpulled = repo.firstpulled
778 except AttributeError:
778 except AttributeError:
779 raise util.Abort(_("pulled() only available in --lfrev"))
779 raise util.Abort(_("pulled() only available in --lfrev"))
780 return revset.baseset([r for r in subset if r >= firstpulled])
780 return revset.baseset([r for r in subset if r >= firstpulled])
781
781
782 def overrideclone(orig, ui, source, dest=None, **opts):
782 def overrideclone(orig, ui, source, dest=None, **opts):
783 d = dest
783 d = dest
784 if d is None:
784 if d is None:
785 d = hg.defaultdest(source)
785 d = hg.defaultdest(source)
786 if opts.get('all_largefiles') and not hg.islocal(d):
786 if opts.get('all_largefiles') and not hg.islocal(d):
787 raise util.Abort(_(
787 raise util.Abort(_(
788 '--all-largefiles is incompatible with non-local destination %s') %
788 '--all-largefiles is incompatible with non-local destination %s') %
789 d)
789 d)
790
790
791 return orig(ui, source, dest, **opts)
791 return orig(ui, source, dest, **opts)
792
792
793 def hgclone(orig, ui, opts, *args, **kwargs):
793 def hgclone(orig, ui, opts, *args, **kwargs):
794 result = orig(ui, opts, *args, **kwargs)
794 result = orig(ui, opts, *args, **kwargs)
795
795
796 if result is not None:
796 if result is not None:
797 sourcerepo, destrepo = result
797 sourcerepo, destrepo = result
798 repo = destrepo.local()
798 repo = destrepo.local()
799
799
800 # Caching is implicitly limited to 'rev' option, since the dest repo was
800 # Caching is implicitly limited to 'rev' option, since the dest repo was
801 # truncated at that point. The user may expect a download count with
801 # truncated at that point. The user may expect a download count with
802 # this option, so attempt whether or not this is a largefile repo.
802 # this option, so attempt whether or not this is a largefile repo.
803 if opts.get('all_largefiles'):
803 if opts.get('all_largefiles'):
804 success, missing = lfcommands.downloadlfiles(ui, repo, None)
804 success, missing = lfcommands.downloadlfiles(ui, repo, None)
805
805
806 if missing != 0:
806 if missing != 0:
807 return None
807 return None
808
808
809 return result
809 return result
810
810
811 def overriderebase(orig, ui, repo, **opts):
811 def overriderebase(orig, ui, repo, **opts):
812 resuming = opts.get('continue')
812 resuming = opts.get('continue')
813 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
813 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
814 repo._lfstatuswriters.append(lambda *msg, **opts: None)
814 repo._lfstatuswriters.append(lambda *msg, **opts: None)
815 try:
815 try:
816 return orig(ui, repo, **opts)
816 return orig(ui, repo, **opts)
817 finally:
817 finally:
818 repo._lfstatuswriters.pop()
818 repo._lfstatuswriters.pop()
819 repo._lfcommithooks.pop()
819 repo._lfcommithooks.pop()
820
820
821 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
821 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
822 prefix=None, mtime=None, subrepos=None):
822 prefix=None, mtime=None, subrepos=None):
823 # No need to lock because we are only reading history and
823 # No need to lock because we are only reading history and
824 # largefile caches, neither of which are modified.
824 # largefile caches, neither of which are modified.
825 lfcommands.cachelfiles(repo.ui, repo, node)
825 lfcommands.cachelfiles(repo.ui, repo, node)
826
826
827 if kind not in archival.archivers:
827 if kind not in archival.archivers:
828 raise util.Abort(_("unknown archive type '%s'") % kind)
828 raise util.Abort(_("unknown archive type '%s'") % kind)
829
829
830 ctx = repo[node]
830 ctx = repo[node]
831
831
832 if kind == 'files':
832 if kind == 'files':
833 if prefix:
833 if prefix:
834 raise util.Abort(
834 raise util.Abort(
835 _('cannot give prefix when archiving to files'))
835 _('cannot give prefix when archiving to files'))
836 else:
836 else:
837 prefix = archival.tidyprefix(dest, kind, prefix)
837 prefix = archival.tidyprefix(dest, kind, prefix)
838
838
839 def write(name, mode, islink, getdata):
839 def write(name, mode, islink, getdata):
840 if matchfn and not matchfn(name):
840 if matchfn and not matchfn(name):
841 return
841 return
842 data = getdata()
842 data = getdata()
843 if decode:
843 if decode:
844 data = repo.wwritedata(name, data)
844 data = repo.wwritedata(name, data)
845 archiver.addfile(prefix + name, mode, islink, data)
845 archiver.addfile(prefix + name, mode, islink, data)
846
846
847 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
847 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
848
848
849 if repo.ui.configbool("ui", "archivemeta", True):
849 if repo.ui.configbool("ui", "archivemeta", True):
850 def metadata():
850 def metadata():
851 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
851 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
852 hex(repo.changelog.node(0)), hex(node), ctx.branch())
852 hex(repo.changelog.node(0)), hex(node), ctx.branch())
853
853
854 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
854 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
855 if repo.tagtype(t) == 'global')
855 if repo.tagtype(t) == 'global')
856 if not tags:
856 if not tags:
857 repo.ui.pushbuffer()
857 repo.ui.pushbuffer()
858 opts = {'template': '{latesttag}\n{latesttagdistance}',
858 opts = {'template': '{latesttag}\n{latesttagdistance}',
859 'style': '', 'patch': None, 'git': None}
859 'style': '', 'patch': None, 'git': None}
860 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
860 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
861 ltags, dist = repo.ui.popbuffer().split('\n')
861 ltags, dist = repo.ui.popbuffer().split('\n')
862 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
862 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
863 tags += 'latesttagdistance: %s\n' % dist
863 tags += 'latesttagdistance: %s\n' % dist
864
864
865 return base + tags
865 return base + tags
866
866
867 write('.hg_archival.txt', 0644, False, metadata)
867 write('.hg_archival.txt', 0644, False, metadata)
868
868
869 for f in ctx:
869 for f in ctx:
870 ff = ctx.flags(f)
870 ff = ctx.flags(f)
871 getdata = ctx[f].data
871 getdata = ctx[f].data
872 if lfutil.isstandin(f):
872 if lfutil.isstandin(f):
873 path = lfutil.findfile(repo, getdata().strip())
873 path = lfutil.findfile(repo, getdata().strip())
874 if path is None:
874 if path is None:
875 raise util.Abort(
875 raise util.Abort(
876 _('largefile %s not found in repo store or system cache')
876 _('largefile %s not found in repo store or system cache')
877 % lfutil.splitstandin(f))
877 % lfutil.splitstandin(f))
878 f = lfutil.splitstandin(f)
878 f = lfutil.splitstandin(f)
879
879
880 def getdatafn():
880 def getdatafn():
881 fd = None
881 fd = None
882 try:
882 try:
883 fd = open(path, 'rb')
883 fd = open(path, 'rb')
884 return fd.read()
884 return fd.read()
885 finally:
885 finally:
886 if fd:
886 if fd:
887 fd.close()
887 fd.close()
888
888
889 getdata = getdatafn
889 getdata = getdatafn
890 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
890 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
891
891
892 if subrepos:
892 if subrepos:
893 for subpath in sorted(ctx.substate):
893 for subpath in sorted(ctx.substate):
894 sub = ctx.sub(subpath)
894 sub = ctx.sub(subpath)
895 submatch = match_.narrowmatcher(subpath, matchfn)
895 submatch = match_.narrowmatcher(subpath, matchfn)
896 sub.archive(archiver, prefix, submatch)
896 sub.archive(archiver, prefix, submatch)
897
897
898 archiver.done()
898 archiver.done()
899
899
900 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None):
900 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None):
901 repo._get(repo._state + ('hg',))
901 repo._get(repo._state + ('hg',))
902 rev = repo._state[1]
902 rev = repo._state[1]
903 ctx = repo._repo[rev]
903 ctx = repo._repo[rev]
904
904
905 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
905 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
906
906
907 def write(name, mode, islink, getdata):
907 def write(name, mode, islink, getdata):
908 # At this point, the standin has been replaced with the largefile name,
908 # At this point, the standin has been replaced with the largefile name,
909 # so the normal matcher works here without the lfutil variants.
909 # so the normal matcher works here without the lfutil variants.
910 if match and not match(f):
910 if match and not match(f):
911 return
911 return
912 data = getdata()
912 data = getdata()
913
913
914 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
914 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
915
915
916 for f in ctx:
916 for f in ctx:
917 ff = ctx.flags(f)
917 ff = ctx.flags(f)
918 getdata = ctx[f].data
918 getdata = ctx[f].data
919 if lfutil.isstandin(f):
919 if lfutil.isstandin(f):
920 path = lfutil.findfile(repo._repo, getdata().strip())
920 path = lfutil.findfile(repo._repo, getdata().strip())
921 if path is None:
921 if path is None:
922 raise util.Abort(
922 raise util.Abort(
923 _('largefile %s not found in repo store or system cache')
923 _('largefile %s not found in repo store or system cache')
924 % lfutil.splitstandin(f))
924 % lfutil.splitstandin(f))
925 f = lfutil.splitstandin(f)
925 f = lfutil.splitstandin(f)
926
926
927 def getdatafn():
927 def getdatafn():
928 fd = None
928 fd = None
929 try:
929 try:
930 fd = open(os.path.join(prefix, path), 'rb')
930 fd = open(os.path.join(prefix, path), 'rb')
931 return fd.read()
931 return fd.read()
932 finally:
932 finally:
933 if fd:
933 if fd:
934 fd.close()
934 fd.close()
935
935
936 getdata = getdatafn
936 getdata = getdatafn
937
937
938 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
938 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
939
939
940 for subpath in sorted(ctx.substate):
940 for subpath in sorted(ctx.substate):
941 sub = ctx.sub(subpath)
941 sub = ctx.sub(subpath)
942 submatch = match_.narrowmatcher(subpath, match)
942 submatch = match_.narrowmatcher(subpath, match)
943 sub.archive(archiver, os.path.join(prefix, repo._path) + '/', submatch)
943 sub.archive(archiver, os.path.join(prefix, repo._path) + '/', submatch)
944
944
945 # If a largefile is modified, the change is not reflected in its
945 # If a largefile is modified, the change is not reflected in its
946 # standin until a commit. cmdutil.bailifchanged() raises an exception
946 # standin until a commit. cmdutil.bailifchanged() raises an exception
947 # if the repo has uncommitted changes. Wrap it to also check if
947 # if the repo has uncommitted changes. Wrap it to also check if
948 # largefiles were changed. This is used by bisect, backout and fetch.
948 # largefiles were changed. This is used by bisect, backout and fetch.
949 def overridebailifchanged(orig, repo):
949 def overridebailifchanged(orig, repo):
950 orig(repo)
950 orig(repo)
951 repo.lfstatus = True
951 repo.lfstatus = True
952 s = repo.status()
952 s = repo.status()
953 repo.lfstatus = False
953 repo.lfstatus = False
954 if s.modified or s.added or s.removed or s.deleted:
954 if s.modified or s.added or s.removed or s.deleted:
955 raise util.Abort(_('uncommitted changes'))
955 raise util.Abort(_('uncommitted changes'))
956
956
957 def overrideforget(orig, ui, repo, *pats, **opts):
957 def overrideforget(orig, ui, repo, *pats, **opts):
958 installnormalfilesmatchfn(repo[None].manifest())
958 installnormalfilesmatchfn(repo[None].manifest())
959 result = orig(ui, repo, *pats, **opts)
959 result = orig(ui, repo, *pats, **opts)
960 restorematchfn()
960 restorematchfn()
961 m = composelargefilematcher(scmutil.match(repo[None], pats, opts),
961 m = composelargefilematcher(scmutil.match(repo[None], pats, opts),
962 repo[None].manifest())
962 repo[None].manifest())
963
963
964 try:
964 try:
965 repo.lfstatus = True
965 repo.lfstatus = True
966 s = repo.status(match=m, clean=True)
966 s = repo.status(match=m, clean=True)
967 finally:
967 finally:
968 repo.lfstatus = False
968 repo.lfstatus = False
969 forget = sorted(s.modified + s.added + s.deleted + s.clean)
969 forget = sorted(s.modified + s.added + s.deleted + s.clean)
970 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
970 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
971
971
972 for f in forget:
972 for f in forget:
973 if lfutil.standin(f) not in repo.dirstate and not \
973 if lfutil.standin(f) not in repo.dirstate and not \
974 os.path.isdir(m.rel(lfutil.standin(f))):
974 repo.wvfs.isdir(lfutil.standin(f)):
975 ui.warn(_('not removing %s: file is already untracked\n')
975 ui.warn(_('not removing %s: file is already untracked\n')
976 % m.rel(f))
976 % m.rel(f))
977 result = 1
977 result = 1
978
978
979 for f in forget:
979 for f in forget:
980 if ui.verbose or not m.exact(f):
980 if ui.verbose or not m.exact(f):
981 ui.status(_('removing %s\n') % m.rel(f))
981 ui.status(_('removing %s\n') % m.rel(f))
982
982
983 # Need to lock because standin files are deleted then removed from the
983 # Need to lock because standin files are deleted then removed from the
984 # repository and we could race in-between.
984 # repository and we could race in-between.
985 wlock = repo.wlock()
985 wlock = repo.wlock()
986 try:
986 try:
987 lfdirstate = lfutil.openlfdirstate(ui, repo)
987 lfdirstate = lfutil.openlfdirstate(ui, repo)
988 for f in forget:
988 for f in forget:
989 if lfdirstate[f] == 'a':
989 if lfdirstate[f] == 'a':
990 lfdirstate.drop(f)
990 lfdirstate.drop(f)
991 else:
991 else:
992 lfdirstate.remove(f)
992 lfdirstate.remove(f)
993 lfdirstate.write()
993 lfdirstate.write()
994 standins = [lfutil.standin(f) for f in forget]
994 standins = [lfutil.standin(f) for f in forget]
995 for f in standins:
995 for f in standins:
996 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
996 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
997 repo[None].forget(standins)
997 repo[None].forget(standins)
998 finally:
998 finally:
999 wlock.release()
999 wlock.release()
1000
1000
1001 return result
1001 return result
1002
1002
1003 def _getoutgoings(repo, other, missing, addfunc):
1003 def _getoutgoings(repo, other, missing, addfunc):
1004 """get pairs of filename and largefile hash in outgoing revisions
1004 """get pairs of filename and largefile hash in outgoing revisions
1005 in 'missing'.
1005 in 'missing'.
1006
1006
1007 largefiles already existing on 'other' repository are ignored.
1007 largefiles already existing on 'other' repository are ignored.
1008
1008
1009 'addfunc' is invoked with each unique pairs of filename and
1009 'addfunc' is invoked with each unique pairs of filename and
1010 largefile hash value.
1010 largefile hash value.
1011 """
1011 """
1012 knowns = set()
1012 knowns = set()
1013 lfhashes = set()
1013 lfhashes = set()
1014 def dedup(fn, lfhash):
1014 def dedup(fn, lfhash):
1015 k = (fn, lfhash)
1015 k = (fn, lfhash)
1016 if k not in knowns:
1016 if k not in knowns:
1017 knowns.add(k)
1017 knowns.add(k)
1018 lfhashes.add(lfhash)
1018 lfhashes.add(lfhash)
1019 lfutil.getlfilestoupload(repo, missing, dedup)
1019 lfutil.getlfilestoupload(repo, missing, dedup)
1020 if lfhashes:
1020 if lfhashes:
1021 lfexists = basestore._openstore(repo, other).exists(lfhashes)
1021 lfexists = basestore._openstore(repo, other).exists(lfhashes)
1022 for fn, lfhash in knowns:
1022 for fn, lfhash in knowns:
1023 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1023 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1024 addfunc(fn, lfhash)
1024 addfunc(fn, lfhash)
1025
1025
1026 def outgoinghook(ui, repo, other, opts, missing):
1026 def outgoinghook(ui, repo, other, opts, missing):
1027 if opts.pop('large', None):
1027 if opts.pop('large', None):
1028 lfhashes = set()
1028 lfhashes = set()
1029 if ui.debugflag:
1029 if ui.debugflag:
1030 toupload = {}
1030 toupload = {}
1031 def addfunc(fn, lfhash):
1031 def addfunc(fn, lfhash):
1032 if fn not in toupload:
1032 if fn not in toupload:
1033 toupload[fn] = []
1033 toupload[fn] = []
1034 toupload[fn].append(lfhash)
1034 toupload[fn].append(lfhash)
1035 lfhashes.add(lfhash)
1035 lfhashes.add(lfhash)
1036 def showhashes(fn):
1036 def showhashes(fn):
1037 for lfhash in sorted(toupload[fn]):
1037 for lfhash in sorted(toupload[fn]):
1038 ui.debug(' %s\n' % (lfhash))
1038 ui.debug(' %s\n' % (lfhash))
1039 else:
1039 else:
1040 toupload = set()
1040 toupload = set()
1041 def addfunc(fn, lfhash):
1041 def addfunc(fn, lfhash):
1042 toupload.add(fn)
1042 toupload.add(fn)
1043 lfhashes.add(lfhash)
1043 lfhashes.add(lfhash)
1044 def showhashes(fn):
1044 def showhashes(fn):
1045 pass
1045 pass
1046 _getoutgoings(repo, other, missing, addfunc)
1046 _getoutgoings(repo, other, missing, addfunc)
1047
1047
1048 if not toupload:
1048 if not toupload:
1049 ui.status(_('largefiles: no files to upload\n'))
1049 ui.status(_('largefiles: no files to upload\n'))
1050 else:
1050 else:
1051 ui.status(_('largefiles to upload (%d entities):\n')
1051 ui.status(_('largefiles to upload (%d entities):\n')
1052 % (len(lfhashes)))
1052 % (len(lfhashes)))
1053 for file in sorted(toupload):
1053 for file in sorted(toupload):
1054 ui.status(lfutil.splitstandin(file) + '\n')
1054 ui.status(lfutil.splitstandin(file) + '\n')
1055 showhashes(file)
1055 showhashes(file)
1056 ui.status('\n')
1056 ui.status('\n')
1057
1057
1058 def summaryremotehook(ui, repo, opts, changes):
1058 def summaryremotehook(ui, repo, opts, changes):
1059 largeopt = opts.get('large', False)
1059 largeopt = opts.get('large', False)
1060 if changes is None:
1060 if changes is None:
1061 if largeopt:
1061 if largeopt:
1062 return (False, True) # only outgoing check is needed
1062 return (False, True) # only outgoing check is needed
1063 else:
1063 else:
1064 return (False, False)
1064 return (False, False)
1065 elif largeopt:
1065 elif largeopt:
1066 url, branch, peer, outgoing = changes[1]
1066 url, branch, peer, outgoing = changes[1]
1067 if peer is None:
1067 if peer is None:
1068 # i18n: column positioning for "hg summary"
1068 # i18n: column positioning for "hg summary"
1069 ui.status(_('largefiles: (no remote repo)\n'))
1069 ui.status(_('largefiles: (no remote repo)\n'))
1070 return
1070 return
1071
1071
1072 toupload = set()
1072 toupload = set()
1073 lfhashes = set()
1073 lfhashes = set()
1074 def addfunc(fn, lfhash):
1074 def addfunc(fn, lfhash):
1075 toupload.add(fn)
1075 toupload.add(fn)
1076 lfhashes.add(lfhash)
1076 lfhashes.add(lfhash)
1077 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1077 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1078
1078
1079 if not toupload:
1079 if not toupload:
1080 # i18n: column positioning for "hg summary"
1080 # i18n: column positioning for "hg summary"
1081 ui.status(_('largefiles: (no files to upload)\n'))
1081 ui.status(_('largefiles: (no files to upload)\n'))
1082 else:
1082 else:
1083 # i18n: column positioning for "hg summary"
1083 # i18n: column positioning for "hg summary"
1084 ui.status(_('largefiles: %d entities for %d files to upload\n')
1084 ui.status(_('largefiles: %d entities for %d files to upload\n')
1085 % (len(lfhashes), len(toupload)))
1085 % (len(lfhashes), len(toupload)))
1086
1086
1087 def overridesummary(orig, ui, repo, *pats, **opts):
1087 def overridesummary(orig, ui, repo, *pats, **opts):
1088 try:
1088 try:
1089 repo.lfstatus = True
1089 repo.lfstatus = True
1090 orig(ui, repo, *pats, **opts)
1090 orig(ui, repo, *pats, **opts)
1091 finally:
1091 finally:
1092 repo.lfstatus = False
1092 repo.lfstatus = False
1093
1093
1094 def scmutiladdremove(orig, repo, matcher, prefix, opts={}, dry_run=None,
1094 def scmutiladdremove(orig, repo, matcher, prefix, opts={}, dry_run=None,
1095 similarity=None):
1095 similarity=None):
1096 if not lfutil.islfilesrepo(repo):
1096 if not lfutil.islfilesrepo(repo):
1097 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1097 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1098 # Get the list of missing largefiles so we can remove them
1098 # Get the list of missing largefiles so we can remove them
1099 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1099 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1100 unsure, s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [],
1100 unsure, s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [],
1101 False, False, False)
1101 False, False, False)
1102
1102
1103 # Call into the normal remove code, but the removing of the standin, we want
1103 # Call into the normal remove code, but the removing of the standin, we want
1104 # to have handled by original addremove. Monkey patching here makes sure
1104 # to have handled by original addremove. Monkey patching here makes sure
1105 # we don't remove the standin in the largefiles code, preventing a very
1105 # we don't remove the standin in the largefiles code, preventing a very
1106 # confused state later.
1106 # confused state later.
1107 if s.deleted:
1107 if s.deleted:
1108 m = [repo.wjoin(f) for f in s.deleted]
1108 m = [repo.wjoin(f) for f in s.deleted]
1109 removelargefiles(repo.ui, repo, True, *m, **opts)
1109 removelargefiles(repo.ui, repo, True, *m, **opts)
1110 # Call into the normal add code, and any files that *should* be added as
1110 # Call into the normal add code, and any files that *should* be added as
1111 # largefiles will be
1111 # largefiles will be
1112 addlargefiles(repo.ui, repo, matcher, **opts)
1112 addlargefiles(repo.ui, repo, matcher, **opts)
1113 # Now that we've handled largefiles, hand off to the original addremove
1113 # Now that we've handled largefiles, hand off to the original addremove
1114 # function to take care of the rest. Make sure it doesn't do anything with
1114 # function to take care of the rest. Make sure it doesn't do anything with
1115 # largefiles by passing a matcher that will ignore them.
1115 # largefiles by passing a matcher that will ignore them.
1116 matcher = composenormalfilematcher(matcher, repo[None].manifest())
1116 matcher = composenormalfilematcher(matcher, repo[None].manifest())
1117 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1117 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1118
1118
1119 # Calling purge with --all will cause the largefiles to be deleted.
1119 # Calling purge with --all will cause the largefiles to be deleted.
1120 # Override repo.status to prevent this from happening.
1120 # Override repo.status to prevent this from happening.
1121 def overridepurge(orig, ui, repo, *dirs, **opts):
1121 def overridepurge(orig, ui, repo, *dirs, **opts):
1122 # XXX Monkey patching a repoview will not work. The assigned attribute will
1122 # XXX Monkey patching a repoview will not work. The assigned attribute will
1123 # be set on the unfiltered repo, but we will only lookup attributes in the
1123 # be set on the unfiltered repo, but we will only lookup attributes in the
1124 # unfiltered repo if the lookup in the repoview object itself fails. As the
1124 # unfiltered repo if the lookup in the repoview object itself fails. As the
1125 # monkey patched method exists on the repoview class the lookup will not
1125 # monkey patched method exists on the repoview class the lookup will not
1126 # fail. As a result, the original version will shadow the monkey patched
1126 # fail. As a result, the original version will shadow the monkey patched
1127 # one, defeating the monkey patch.
1127 # one, defeating the monkey patch.
1128 #
1128 #
1129 # As a work around we use an unfiltered repo here. We should do something
1129 # As a work around we use an unfiltered repo here. We should do something
1130 # cleaner instead.
1130 # cleaner instead.
1131 repo = repo.unfiltered()
1131 repo = repo.unfiltered()
1132 oldstatus = repo.status
1132 oldstatus = repo.status
1133 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1133 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1134 clean=False, unknown=False, listsubrepos=False):
1134 clean=False, unknown=False, listsubrepos=False):
1135 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1135 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1136 listsubrepos)
1136 listsubrepos)
1137 lfdirstate = lfutil.openlfdirstate(ui, repo)
1137 lfdirstate = lfutil.openlfdirstate(ui, repo)
1138 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1138 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1139 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1139 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1140 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1140 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1141 unknown, ignored, r.clean)
1141 unknown, ignored, r.clean)
1142 repo.status = overridestatus
1142 repo.status = overridestatus
1143 orig(ui, repo, *dirs, **opts)
1143 orig(ui, repo, *dirs, **opts)
1144 repo.status = oldstatus
1144 repo.status = oldstatus
1145 def overriderollback(orig, ui, repo, **opts):
1145 def overriderollback(orig, ui, repo, **opts):
1146 wlock = repo.wlock()
1146 wlock = repo.wlock()
1147 try:
1147 try:
1148 before = repo.dirstate.parents()
1148 before = repo.dirstate.parents()
1149 orphans = set(f for f in repo.dirstate
1149 orphans = set(f for f in repo.dirstate
1150 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1150 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1151 result = orig(ui, repo, **opts)
1151 result = orig(ui, repo, **opts)
1152 after = repo.dirstate.parents()
1152 after = repo.dirstate.parents()
1153 if before == after:
1153 if before == after:
1154 return result # no need to restore standins
1154 return result # no need to restore standins
1155
1155
1156 pctx = repo['.']
1156 pctx = repo['.']
1157 for f in repo.dirstate:
1157 for f in repo.dirstate:
1158 if lfutil.isstandin(f):
1158 if lfutil.isstandin(f):
1159 orphans.discard(f)
1159 orphans.discard(f)
1160 if repo.dirstate[f] == 'r':
1160 if repo.dirstate[f] == 'r':
1161 repo.wvfs.unlinkpath(f, ignoremissing=True)
1161 repo.wvfs.unlinkpath(f, ignoremissing=True)
1162 elif f in pctx:
1162 elif f in pctx:
1163 fctx = pctx[f]
1163 fctx = pctx[f]
1164 repo.wwrite(f, fctx.data(), fctx.flags())
1164 repo.wwrite(f, fctx.data(), fctx.flags())
1165 else:
1165 else:
1166 # content of standin is not so important in 'a',
1166 # content of standin is not so important in 'a',
1167 # 'm' or 'n' (coming from the 2nd parent) cases
1167 # 'm' or 'n' (coming from the 2nd parent) cases
1168 lfutil.writestandin(repo, f, '', False)
1168 lfutil.writestandin(repo, f, '', False)
1169 for standin in orphans:
1169 for standin in orphans:
1170 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1170 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1171
1171
1172 lfdirstate = lfutil.openlfdirstate(ui, repo)
1172 lfdirstate = lfutil.openlfdirstate(ui, repo)
1173 orphans = set(lfdirstate)
1173 orphans = set(lfdirstate)
1174 lfiles = lfutil.listlfiles(repo)
1174 lfiles = lfutil.listlfiles(repo)
1175 for file in lfiles:
1175 for file in lfiles:
1176 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1176 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1177 orphans.discard(file)
1177 orphans.discard(file)
1178 for lfile in orphans:
1178 for lfile in orphans:
1179 lfdirstate.drop(lfile)
1179 lfdirstate.drop(lfile)
1180 lfdirstate.write()
1180 lfdirstate.write()
1181 finally:
1181 finally:
1182 wlock.release()
1182 wlock.release()
1183 return result
1183 return result
1184
1184
1185 def overridetransplant(orig, ui, repo, *revs, **opts):
1185 def overridetransplant(orig, ui, repo, *revs, **opts):
1186 resuming = opts.get('continue')
1186 resuming = opts.get('continue')
1187 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1187 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1188 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1188 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1189 try:
1189 try:
1190 result = orig(ui, repo, *revs, **opts)
1190 result = orig(ui, repo, *revs, **opts)
1191 finally:
1191 finally:
1192 repo._lfstatuswriters.pop()
1192 repo._lfstatuswriters.pop()
1193 repo._lfcommithooks.pop()
1193 repo._lfcommithooks.pop()
1194 return result
1194 return result
1195
1195
1196 def overridecat(orig, ui, repo, file1, *pats, **opts):
1196 def overridecat(orig, ui, repo, file1, *pats, **opts):
1197 ctx = scmutil.revsingle(repo, opts.get('rev'))
1197 ctx = scmutil.revsingle(repo, opts.get('rev'))
1198 err = 1
1198 err = 1
1199 notbad = set()
1199 notbad = set()
1200 m = scmutil.match(ctx, (file1,) + pats, opts)
1200 m = scmutil.match(ctx, (file1,) + pats, opts)
1201 origmatchfn = m.matchfn
1201 origmatchfn = m.matchfn
1202 def lfmatchfn(f):
1202 def lfmatchfn(f):
1203 if origmatchfn(f):
1203 if origmatchfn(f):
1204 return True
1204 return True
1205 lf = lfutil.splitstandin(f)
1205 lf = lfutil.splitstandin(f)
1206 if lf is None:
1206 if lf is None:
1207 return False
1207 return False
1208 notbad.add(lf)
1208 notbad.add(lf)
1209 return origmatchfn(lf)
1209 return origmatchfn(lf)
1210 m.matchfn = lfmatchfn
1210 m.matchfn = lfmatchfn
1211 origbadfn = m.bad
1211 origbadfn = m.bad
1212 def lfbadfn(f, msg):
1212 def lfbadfn(f, msg):
1213 if not f in notbad:
1213 if not f in notbad:
1214 origbadfn(f, msg)
1214 origbadfn(f, msg)
1215 m.bad = lfbadfn
1215 m.bad = lfbadfn
1216 for f in ctx.walk(m):
1216 for f in ctx.walk(m):
1217 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1217 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1218 pathname=f)
1218 pathname=f)
1219 lf = lfutil.splitstandin(f)
1219 lf = lfutil.splitstandin(f)
1220 if lf is None or origmatchfn(f):
1220 if lf is None or origmatchfn(f):
1221 # duplicating unreachable code from commands.cat
1221 # duplicating unreachable code from commands.cat
1222 data = ctx[f].data()
1222 data = ctx[f].data()
1223 if opts.get('decode'):
1223 if opts.get('decode'):
1224 data = repo.wwritedata(f, data)
1224 data = repo.wwritedata(f, data)
1225 fp.write(data)
1225 fp.write(data)
1226 else:
1226 else:
1227 hash = lfutil.readstandin(repo, lf, ctx.rev())
1227 hash = lfutil.readstandin(repo, lf, ctx.rev())
1228 if not lfutil.inusercache(repo.ui, hash):
1228 if not lfutil.inusercache(repo.ui, hash):
1229 store = basestore._openstore(repo)
1229 store = basestore._openstore(repo)
1230 success, missing = store.get([(lf, hash)])
1230 success, missing = store.get([(lf, hash)])
1231 if len(success) != 1:
1231 if len(success) != 1:
1232 raise util.Abort(
1232 raise util.Abort(
1233 _('largefile %s is not in cache and could not be '
1233 _('largefile %s is not in cache and could not be '
1234 'downloaded') % lf)
1234 'downloaded') % lf)
1235 path = lfutil.usercachepath(repo.ui, hash)
1235 path = lfutil.usercachepath(repo.ui, hash)
1236 fpin = open(path, "rb")
1236 fpin = open(path, "rb")
1237 for chunk in util.filechunkiter(fpin, 128 * 1024):
1237 for chunk in util.filechunkiter(fpin, 128 * 1024):
1238 fp.write(chunk)
1238 fp.write(chunk)
1239 fpin.close()
1239 fpin.close()
1240 fp.close()
1240 fp.close()
1241 err = 0
1241 err = 0
1242 return err
1242 return err
1243
1243
1244 def mergeupdate(orig, repo, node, branchmerge, force, partial,
1244 def mergeupdate(orig, repo, node, branchmerge, force, partial,
1245 *args, **kwargs):
1245 *args, **kwargs):
1246 wlock = repo.wlock()
1246 wlock = repo.wlock()
1247 try:
1247 try:
1248 # branch | | |
1248 # branch | | |
1249 # merge | force | partial | action
1249 # merge | force | partial | action
1250 # -------+-------+---------+--------------
1250 # -------+-------+---------+--------------
1251 # x | x | x | linear-merge
1251 # x | x | x | linear-merge
1252 # o | x | x | branch-merge
1252 # o | x | x | branch-merge
1253 # x | o | x | overwrite (as clean update)
1253 # x | o | x | overwrite (as clean update)
1254 # o | o | x | force-branch-merge (*1)
1254 # o | o | x | force-branch-merge (*1)
1255 # x | x | o | (*)
1255 # x | x | o | (*)
1256 # o | x | o | (*)
1256 # o | x | o | (*)
1257 # x | o | o | overwrite (as revert)
1257 # x | o | o | overwrite (as revert)
1258 # o | o | o | (*)
1258 # o | o | o | (*)
1259 #
1259 #
1260 # (*) don't care
1260 # (*) don't care
1261 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1261 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1262
1262
1263 linearmerge = not branchmerge and not force and not partial
1263 linearmerge = not branchmerge and not force and not partial
1264
1264
1265 if linearmerge or (branchmerge and force and not partial):
1265 if linearmerge or (branchmerge and force and not partial):
1266 # update standins for linear-merge or force-branch-merge,
1266 # update standins for linear-merge or force-branch-merge,
1267 # because largefiles in the working directory may be modified
1267 # because largefiles in the working directory may be modified
1268 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1268 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1269 unsure, s = lfdirstate.status(match_.always(repo.root,
1269 unsure, s = lfdirstate.status(match_.always(repo.root,
1270 repo.getcwd()),
1270 repo.getcwd()),
1271 [], False, False, False)
1271 [], False, False, False)
1272 for lfile in unsure + s.modified + s.added:
1272 for lfile in unsure + s.modified + s.added:
1273 lfutil.updatestandin(repo, lfutil.standin(lfile))
1273 lfutil.updatestandin(repo, lfutil.standin(lfile))
1274
1274
1275 if linearmerge:
1275 if linearmerge:
1276 # Only call updatelfiles on the standins that have changed
1276 # Only call updatelfiles on the standins that have changed
1277 # to save time
1277 # to save time
1278 oldstandins = lfutil.getstandinsstate(repo)
1278 oldstandins = lfutil.getstandinsstate(repo)
1279
1279
1280 result = orig(repo, node, branchmerge, force, partial, *args, **kwargs)
1280 result = orig(repo, node, branchmerge, force, partial, *args, **kwargs)
1281
1281
1282 filelist = None
1282 filelist = None
1283 if linearmerge:
1283 if linearmerge:
1284 newstandins = lfutil.getstandinsstate(repo)
1284 newstandins = lfutil.getstandinsstate(repo)
1285 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1285 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1286
1286
1287 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1287 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1288 normallookup=partial)
1288 normallookup=partial)
1289
1289
1290 return result
1290 return result
1291 finally:
1291 finally:
1292 wlock.release()
1292 wlock.release()
1293
1293
1294 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1294 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1295 result = orig(repo, files, *args, **kwargs)
1295 result = orig(repo, files, *args, **kwargs)
1296
1296
1297 filelist = [lfutil.splitstandin(f) for f in files if lfutil.isstandin(f)]
1297 filelist = [lfutil.splitstandin(f) for f in files if lfutil.isstandin(f)]
1298 if filelist:
1298 if filelist:
1299 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1299 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1300 printmessage=False, normallookup=True)
1300 printmessage=False, normallookup=True)
1301
1301
1302 return result
1302 return result
@@ -1,2973 +1,2973 b''
1 # cmdutil.py - help for command processing in mercurial
1 # cmdutil.py - help for command processing in mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import hex, nullid, nullrev, short
8 from node import hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import os, sys, errno, re, tempfile
10 import os, sys, errno, re, tempfile
11 import util, scmutil, templater, patch, error, templatekw, revlog, copies
11 import util, scmutil, templater, patch, error, templatekw, revlog, copies
12 import match as matchmod
12 import match as matchmod
13 import context, repair, graphmod, revset, phases, obsolete, pathutil
13 import context, repair, graphmod, revset, phases, obsolete, pathutil
14 import changelog
14 import changelog
15 import bookmarks
15 import bookmarks
16 import encoding
16 import encoding
17 import lock as lockmod
17 import lock as lockmod
18
18
19 def parsealiases(cmd):
19 def parsealiases(cmd):
20 return cmd.lstrip("^").split("|")
20 return cmd.lstrip("^").split("|")
21
21
22 def findpossible(cmd, table, strict=False):
22 def findpossible(cmd, table, strict=False):
23 """
23 """
24 Return cmd -> (aliases, command table entry)
24 Return cmd -> (aliases, command table entry)
25 for each matching command.
25 for each matching command.
26 Return debug commands (or their aliases) only if no normal command matches.
26 Return debug commands (or their aliases) only if no normal command matches.
27 """
27 """
28 choice = {}
28 choice = {}
29 debugchoice = {}
29 debugchoice = {}
30
30
31 if cmd in table:
31 if cmd in table:
32 # short-circuit exact matches, "log" alias beats "^log|history"
32 # short-circuit exact matches, "log" alias beats "^log|history"
33 keys = [cmd]
33 keys = [cmd]
34 else:
34 else:
35 keys = table.keys()
35 keys = table.keys()
36
36
37 for e in keys:
37 for e in keys:
38 aliases = parsealiases(e)
38 aliases = parsealiases(e)
39 found = None
39 found = None
40 if cmd in aliases:
40 if cmd in aliases:
41 found = cmd
41 found = cmd
42 elif not strict:
42 elif not strict:
43 for a in aliases:
43 for a in aliases:
44 if a.startswith(cmd):
44 if a.startswith(cmd):
45 found = a
45 found = a
46 break
46 break
47 if found is not None:
47 if found is not None:
48 if aliases[0].startswith("debug") or found.startswith("debug"):
48 if aliases[0].startswith("debug") or found.startswith("debug"):
49 debugchoice[found] = (aliases, table[e])
49 debugchoice[found] = (aliases, table[e])
50 else:
50 else:
51 choice[found] = (aliases, table[e])
51 choice[found] = (aliases, table[e])
52
52
53 if not choice and debugchoice:
53 if not choice and debugchoice:
54 choice = debugchoice
54 choice = debugchoice
55
55
56 return choice
56 return choice
57
57
58 def findcmd(cmd, table, strict=True):
58 def findcmd(cmd, table, strict=True):
59 """Return (aliases, command table entry) for command string."""
59 """Return (aliases, command table entry) for command string."""
60 choice = findpossible(cmd, table, strict)
60 choice = findpossible(cmd, table, strict)
61
61
62 if cmd in choice:
62 if cmd in choice:
63 return choice[cmd]
63 return choice[cmd]
64
64
65 if len(choice) > 1:
65 if len(choice) > 1:
66 clist = choice.keys()
66 clist = choice.keys()
67 clist.sort()
67 clist.sort()
68 raise error.AmbiguousCommand(cmd, clist)
68 raise error.AmbiguousCommand(cmd, clist)
69
69
70 if choice:
70 if choice:
71 return choice.values()[0]
71 return choice.values()[0]
72
72
73 raise error.UnknownCommand(cmd)
73 raise error.UnknownCommand(cmd)
74
74
75 def findrepo(p):
75 def findrepo(p):
76 while not os.path.isdir(os.path.join(p, ".hg")):
76 while not os.path.isdir(os.path.join(p, ".hg")):
77 oldp, p = p, os.path.dirname(p)
77 oldp, p = p, os.path.dirname(p)
78 if p == oldp:
78 if p == oldp:
79 return None
79 return None
80
80
81 return p
81 return p
82
82
83 def bailifchanged(repo):
83 def bailifchanged(repo):
84 if repo.dirstate.p2() != nullid:
84 if repo.dirstate.p2() != nullid:
85 raise util.Abort(_('outstanding uncommitted merge'))
85 raise util.Abort(_('outstanding uncommitted merge'))
86 modified, added, removed, deleted = repo.status()[:4]
86 modified, added, removed, deleted = repo.status()[:4]
87 if modified or added or removed or deleted:
87 if modified or added or removed or deleted:
88 raise util.Abort(_('uncommitted changes'))
88 raise util.Abort(_('uncommitted changes'))
89 ctx = repo[None]
89 ctx = repo[None]
90 for s in sorted(ctx.substate):
90 for s in sorted(ctx.substate):
91 if ctx.sub(s).dirty():
91 if ctx.sub(s).dirty():
92 raise util.Abort(_("uncommitted changes in subrepo %s") % s)
92 raise util.Abort(_("uncommitted changes in subrepo %s") % s)
93
93
94 def logmessage(ui, opts):
94 def logmessage(ui, opts):
95 """ get the log message according to -m and -l option """
95 """ get the log message according to -m and -l option """
96 message = opts.get('message')
96 message = opts.get('message')
97 logfile = opts.get('logfile')
97 logfile = opts.get('logfile')
98
98
99 if message and logfile:
99 if message and logfile:
100 raise util.Abort(_('options --message and --logfile are mutually '
100 raise util.Abort(_('options --message and --logfile are mutually '
101 'exclusive'))
101 'exclusive'))
102 if not message and logfile:
102 if not message and logfile:
103 try:
103 try:
104 if logfile == '-':
104 if logfile == '-':
105 message = ui.fin.read()
105 message = ui.fin.read()
106 else:
106 else:
107 message = '\n'.join(util.readfile(logfile).splitlines())
107 message = '\n'.join(util.readfile(logfile).splitlines())
108 except IOError, inst:
108 except IOError, inst:
109 raise util.Abort(_("can't read commit message '%s': %s") %
109 raise util.Abort(_("can't read commit message '%s': %s") %
110 (logfile, inst.strerror))
110 (logfile, inst.strerror))
111 return message
111 return message
112
112
113 def mergeeditform(ctxorbool, baseform):
113 def mergeeditform(ctxorbool, baseform):
114 """build appropriate editform from ctxorbool and baseform
114 """build appropriate editform from ctxorbool and baseform
115
115
116 'ctxorbool' is one of a ctx to be committed, or a bool whether
116 'ctxorbool' is one of a ctx to be committed, or a bool whether
117 merging is committed.
117 merging is committed.
118
118
119 This returns editform 'baseform' with '.merge' if merging is
119 This returns editform 'baseform' with '.merge' if merging is
120 committed, or one with '.normal' suffix otherwise.
120 committed, or one with '.normal' suffix otherwise.
121 """
121 """
122 if isinstance(ctxorbool, bool):
122 if isinstance(ctxorbool, bool):
123 if ctxorbool:
123 if ctxorbool:
124 return baseform + ".merge"
124 return baseform + ".merge"
125 elif 1 < len(ctxorbool.parents()):
125 elif 1 < len(ctxorbool.parents()):
126 return baseform + ".merge"
126 return baseform + ".merge"
127
127
128 return baseform + ".normal"
128 return baseform + ".normal"
129
129
130 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
130 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
131 editform='', **opts):
131 editform='', **opts):
132 """get appropriate commit message editor according to '--edit' option
132 """get appropriate commit message editor according to '--edit' option
133
133
134 'finishdesc' is a function to be called with edited commit message
134 'finishdesc' is a function to be called with edited commit message
135 (= 'description' of the new changeset) just after editing, but
135 (= 'description' of the new changeset) just after editing, but
136 before checking empty-ness. It should return actual text to be
136 before checking empty-ness. It should return actual text to be
137 stored into history. This allows to change description before
137 stored into history. This allows to change description before
138 storing.
138 storing.
139
139
140 'extramsg' is a extra message to be shown in the editor instead of
140 'extramsg' is a extra message to be shown in the editor instead of
141 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
141 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
142 is automatically added.
142 is automatically added.
143
143
144 'editform' is a dot-separated list of names, to distinguish
144 'editform' is a dot-separated list of names, to distinguish
145 the purpose of commit text editing.
145 the purpose of commit text editing.
146
146
147 'getcommiteditor' returns 'commitforceeditor' regardless of
147 'getcommiteditor' returns 'commitforceeditor' regardless of
148 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
148 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
149 they are specific for usage in MQ.
149 they are specific for usage in MQ.
150 """
150 """
151 if edit or finishdesc or extramsg:
151 if edit or finishdesc or extramsg:
152 return lambda r, c, s: commitforceeditor(r, c, s,
152 return lambda r, c, s: commitforceeditor(r, c, s,
153 finishdesc=finishdesc,
153 finishdesc=finishdesc,
154 extramsg=extramsg,
154 extramsg=extramsg,
155 editform=editform)
155 editform=editform)
156 elif editform:
156 elif editform:
157 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
157 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
158 else:
158 else:
159 return commiteditor
159 return commiteditor
160
160
161 def loglimit(opts):
161 def loglimit(opts):
162 """get the log limit according to option -l/--limit"""
162 """get the log limit according to option -l/--limit"""
163 limit = opts.get('limit')
163 limit = opts.get('limit')
164 if limit:
164 if limit:
165 try:
165 try:
166 limit = int(limit)
166 limit = int(limit)
167 except ValueError:
167 except ValueError:
168 raise util.Abort(_('limit must be a positive integer'))
168 raise util.Abort(_('limit must be a positive integer'))
169 if limit <= 0:
169 if limit <= 0:
170 raise util.Abort(_('limit must be positive'))
170 raise util.Abort(_('limit must be positive'))
171 else:
171 else:
172 limit = None
172 limit = None
173 return limit
173 return limit
174
174
175 def makefilename(repo, pat, node, desc=None,
175 def makefilename(repo, pat, node, desc=None,
176 total=None, seqno=None, revwidth=None, pathname=None):
176 total=None, seqno=None, revwidth=None, pathname=None):
177 node_expander = {
177 node_expander = {
178 'H': lambda: hex(node),
178 'H': lambda: hex(node),
179 'R': lambda: str(repo.changelog.rev(node)),
179 'R': lambda: str(repo.changelog.rev(node)),
180 'h': lambda: short(node),
180 'h': lambda: short(node),
181 'm': lambda: re.sub('[^\w]', '_', str(desc))
181 'm': lambda: re.sub('[^\w]', '_', str(desc))
182 }
182 }
183 expander = {
183 expander = {
184 '%': lambda: '%',
184 '%': lambda: '%',
185 'b': lambda: os.path.basename(repo.root),
185 'b': lambda: os.path.basename(repo.root),
186 }
186 }
187
187
188 try:
188 try:
189 if node:
189 if node:
190 expander.update(node_expander)
190 expander.update(node_expander)
191 if node:
191 if node:
192 expander['r'] = (lambda:
192 expander['r'] = (lambda:
193 str(repo.changelog.rev(node)).zfill(revwidth or 0))
193 str(repo.changelog.rev(node)).zfill(revwidth or 0))
194 if total is not None:
194 if total is not None:
195 expander['N'] = lambda: str(total)
195 expander['N'] = lambda: str(total)
196 if seqno is not None:
196 if seqno is not None:
197 expander['n'] = lambda: str(seqno)
197 expander['n'] = lambda: str(seqno)
198 if total is not None and seqno is not None:
198 if total is not None and seqno is not None:
199 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
199 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
200 if pathname is not None:
200 if pathname is not None:
201 expander['s'] = lambda: os.path.basename(pathname)
201 expander['s'] = lambda: os.path.basename(pathname)
202 expander['d'] = lambda: os.path.dirname(pathname) or '.'
202 expander['d'] = lambda: os.path.dirname(pathname) or '.'
203 expander['p'] = lambda: pathname
203 expander['p'] = lambda: pathname
204
204
205 newname = []
205 newname = []
206 patlen = len(pat)
206 patlen = len(pat)
207 i = 0
207 i = 0
208 while i < patlen:
208 while i < patlen:
209 c = pat[i]
209 c = pat[i]
210 if c == '%':
210 if c == '%':
211 i += 1
211 i += 1
212 c = pat[i]
212 c = pat[i]
213 c = expander[c]()
213 c = expander[c]()
214 newname.append(c)
214 newname.append(c)
215 i += 1
215 i += 1
216 return ''.join(newname)
216 return ''.join(newname)
217 except KeyError, inst:
217 except KeyError, inst:
218 raise util.Abort(_("invalid format spec '%%%s' in output filename") %
218 raise util.Abort(_("invalid format spec '%%%s' in output filename") %
219 inst.args[0])
219 inst.args[0])
220
220
221 def makefileobj(repo, pat, node=None, desc=None, total=None,
221 def makefileobj(repo, pat, node=None, desc=None, total=None,
222 seqno=None, revwidth=None, mode='wb', modemap=None,
222 seqno=None, revwidth=None, mode='wb', modemap=None,
223 pathname=None):
223 pathname=None):
224
224
225 writable = mode not in ('r', 'rb')
225 writable = mode not in ('r', 'rb')
226
226
227 if not pat or pat == '-':
227 if not pat or pat == '-':
228 fp = writable and repo.ui.fout or repo.ui.fin
228 fp = writable and repo.ui.fout or repo.ui.fin
229 if util.safehasattr(fp, 'fileno'):
229 if util.safehasattr(fp, 'fileno'):
230 return os.fdopen(os.dup(fp.fileno()), mode)
230 return os.fdopen(os.dup(fp.fileno()), mode)
231 else:
231 else:
232 # if this fp can't be duped properly, return
232 # if this fp can't be duped properly, return
233 # a dummy object that can be closed
233 # a dummy object that can be closed
234 class wrappedfileobj(object):
234 class wrappedfileobj(object):
235 noop = lambda x: None
235 noop = lambda x: None
236 def __init__(self, f):
236 def __init__(self, f):
237 self.f = f
237 self.f = f
238 def __getattr__(self, attr):
238 def __getattr__(self, attr):
239 if attr == 'close':
239 if attr == 'close':
240 return self.noop
240 return self.noop
241 else:
241 else:
242 return getattr(self.f, attr)
242 return getattr(self.f, attr)
243
243
244 return wrappedfileobj(fp)
244 return wrappedfileobj(fp)
245 if util.safehasattr(pat, 'write') and writable:
245 if util.safehasattr(pat, 'write') and writable:
246 return pat
246 return pat
247 if util.safehasattr(pat, 'read') and 'r' in mode:
247 if util.safehasattr(pat, 'read') and 'r' in mode:
248 return pat
248 return pat
249 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
249 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
250 if modemap is not None:
250 if modemap is not None:
251 mode = modemap.get(fn, mode)
251 mode = modemap.get(fn, mode)
252 if mode == 'wb':
252 if mode == 'wb':
253 modemap[fn] = 'ab'
253 modemap[fn] = 'ab'
254 return open(fn, mode)
254 return open(fn, mode)
255
255
256 def openrevlog(repo, cmd, file_, opts):
256 def openrevlog(repo, cmd, file_, opts):
257 """opens the changelog, manifest, a filelog or a given revlog"""
257 """opens the changelog, manifest, a filelog or a given revlog"""
258 cl = opts['changelog']
258 cl = opts['changelog']
259 mf = opts['manifest']
259 mf = opts['manifest']
260 msg = None
260 msg = None
261 if cl and mf:
261 if cl and mf:
262 msg = _('cannot specify --changelog and --manifest at the same time')
262 msg = _('cannot specify --changelog and --manifest at the same time')
263 elif cl or mf:
263 elif cl or mf:
264 if file_:
264 if file_:
265 msg = _('cannot specify filename with --changelog or --manifest')
265 msg = _('cannot specify filename with --changelog or --manifest')
266 elif not repo:
266 elif not repo:
267 msg = _('cannot specify --changelog or --manifest '
267 msg = _('cannot specify --changelog or --manifest '
268 'without a repository')
268 'without a repository')
269 if msg:
269 if msg:
270 raise util.Abort(msg)
270 raise util.Abort(msg)
271
271
272 r = None
272 r = None
273 if repo:
273 if repo:
274 if cl:
274 if cl:
275 r = repo.unfiltered().changelog
275 r = repo.unfiltered().changelog
276 elif mf:
276 elif mf:
277 r = repo.manifest
277 r = repo.manifest
278 elif file_:
278 elif file_:
279 filelog = repo.file(file_)
279 filelog = repo.file(file_)
280 if len(filelog):
280 if len(filelog):
281 r = filelog
281 r = filelog
282 if not r:
282 if not r:
283 if not file_:
283 if not file_:
284 raise error.CommandError(cmd, _('invalid arguments'))
284 raise error.CommandError(cmd, _('invalid arguments'))
285 if not os.path.isfile(file_):
285 if not os.path.isfile(file_):
286 raise util.Abort(_("revlog '%s' not found") % file_)
286 raise util.Abort(_("revlog '%s' not found") % file_)
287 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False),
287 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False),
288 file_[:-2] + ".i")
288 file_[:-2] + ".i")
289 return r
289 return r
290
290
291 def copy(ui, repo, pats, opts, rename=False):
291 def copy(ui, repo, pats, opts, rename=False):
292 # called with the repo lock held
292 # called with the repo lock held
293 #
293 #
294 # hgsep => pathname that uses "/" to separate directories
294 # hgsep => pathname that uses "/" to separate directories
295 # ossep => pathname that uses os.sep to separate directories
295 # ossep => pathname that uses os.sep to separate directories
296 cwd = repo.getcwd()
296 cwd = repo.getcwd()
297 targets = {}
297 targets = {}
298 after = opts.get("after")
298 after = opts.get("after")
299 dryrun = opts.get("dry_run")
299 dryrun = opts.get("dry_run")
300 wctx = repo[None]
300 wctx = repo[None]
301
301
302 def walkpat(pat):
302 def walkpat(pat):
303 srcs = []
303 srcs = []
304 badstates = after and '?' or '?r'
304 badstates = after and '?' or '?r'
305 m = scmutil.match(repo[None], [pat], opts, globbed=True)
305 m = scmutil.match(repo[None], [pat], opts, globbed=True)
306 for abs in repo.walk(m):
306 for abs in repo.walk(m):
307 state = repo.dirstate[abs]
307 state = repo.dirstate[abs]
308 rel = m.rel(abs)
308 rel = m.rel(abs)
309 exact = m.exact(abs)
309 exact = m.exact(abs)
310 if state in badstates:
310 if state in badstates:
311 if exact and state == '?':
311 if exact and state == '?':
312 ui.warn(_('%s: not copying - file is not managed\n') % rel)
312 ui.warn(_('%s: not copying - file is not managed\n') % rel)
313 if exact and state == 'r':
313 if exact and state == 'r':
314 ui.warn(_('%s: not copying - file has been marked for'
314 ui.warn(_('%s: not copying - file has been marked for'
315 ' remove\n') % rel)
315 ' remove\n') % rel)
316 continue
316 continue
317 # abs: hgsep
317 # abs: hgsep
318 # rel: ossep
318 # rel: ossep
319 srcs.append((abs, rel, exact))
319 srcs.append((abs, rel, exact))
320 return srcs
320 return srcs
321
321
322 # abssrc: hgsep
322 # abssrc: hgsep
323 # relsrc: ossep
323 # relsrc: ossep
324 # otarget: ossep
324 # otarget: ossep
325 def copyfile(abssrc, relsrc, otarget, exact):
325 def copyfile(abssrc, relsrc, otarget, exact):
326 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
326 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
327 if '/' in abstarget:
327 if '/' in abstarget:
328 # We cannot normalize abstarget itself, this would prevent
328 # We cannot normalize abstarget itself, this would prevent
329 # case only renames, like a => A.
329 # case only renames, like a => A.
330 abspath, absname = abstarget.rsplit('/', 1)
330 abspath, absname = abstarget.rsplit('/', 1)
331 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
331 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
332 reltarget = repo.pathto(abstarget, cwd)
332 reltarget = repo.pathto(abstarget, cwd)
333 target = repo.wjoin(abstarget)
333 target = repo.wjoin(abstarget)
334 src = repo.wjoin(abssrc)
334 src = repo.wjoin(abssrc)
335 state = repo.dirstate[abstarget]
335 state = repo.dirstate[abstarget]
336
336
337 scmutil.checkportable(ui, abstarget)
337 scmutil.checkportable(ui, abstarget)
338
338
339 # check for collisions
339 # check for collisions
340 prevsrc = targets.get(abstarget)
340 prevsrc = targets.get(abstarget)
341 if prevsrc is not None:
341 if prevsrc is not None:
342 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
342 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
343 (reltarget, repo.pathto(abssrc, cwd),
343 (reltarget, repo.pathto(abssrc, cwd),
344 repo.pathto(prevsrc, cwd)))
344 repo.pathto(prevsrc, cwd)))
345 return
345 return
346
346
347 # check for overwrites
347 # check for overwrites
348 exists = os.path.lexists(target)
348 exists = os.path.lexists(target)
349 samefile = False
349 samefile = False
350 if exists and abssrc != abstarget:
350 if exists and abssrc != abstarget:
351 if (repo.dirstate.normalize(abssrc) ==
351 if (repo.dirstate.normalize(abssrc) ==
352 repo.dirstate.normalize(abstarget)):
352 repo.dirstate.normalize(abstarget)):
353 if not rename:
353 if not rename:
354 ui.warn(_("%s: can't copy - same file\n") % reltarget)
354 ui.warn(_("%s: can't copy - same file\n") % reltarget)
355 return
355 return
356 exists = False
356 exists = False
357 samefile = True
357 samefile = True
358
358
359 if not after and exists or after and state in 'mn':
359 if not after and exists or after and state in 'mn':
360 if not opts['force']:
360 if not opts['force']:
361 ui.warn(_('%s: not overwriting - file exists\n') %
361 ui.warn(_('%s: not overwriting - file exists\n') %
362 reltarget)
362 reltarget)
363 return
363 return
364
364
365 if after:
365 if after:
366 if not exists:
366 if not exists:
367 if rename:
367 if rename:
368 ui.warn(_('%s: not recording move - %s does not exist\n') %
368 ui.warn(_('%s: not recording move - %s does not exist\n') %
369 (relsrc, reltarget))
369 (relsrc, reltarget))
370 else:
370 else:
371 ui.warn(_('%s: not recording copy - %s does not exist\n') %
371 ui.warn(_('%s: not recording copy - %s does not exist\n') %
372 (relsrc, reltarget))
372 (relsrc, reltarget))
373 return
373 return
374 elif not dryrun:
374 elif not dryrun:
375 try:
375 try:
376 if exists:
376 if exists:
377 os.unlink(target)
377 os.unlink(target)
378 targetdir = os.path.dirname(target) or '.'
378 targetdir = os.path.dirname(target) or '.'
379 if not os.path.isdir(targetdir):
379 if not os.path.isdir(targetdir):
380 os.makedirs(targetdir)
380 os.makedirs(targetdir)
381 if samefile:
381 if samefile:
382 tmp = target + "~hgrename"
382 tmp = target + "~hgrename"
383 os.rename(src, tmp)
383 os.rename(src, tmp)
384 os.rename(tmp, target)
384 os.rename(tmp, target)
385 else:
385 else:
386 util.copyfile(src, target)
386 util.copyfile(src, target)
387 srcexists = True
387 srcexists = True
388 except IOError, inst:
388 except IOError, inst:
389 if inst.errno == errno.ENOENT:
389 if inst.errno == errno.ENOENT:
390 ui.warn(_('%s: deleted in working copy\n') % relsrc)
390 ui.warn(_('%s: deleted in working copy\n') % relsrc)
391 srcexists = False
391 srcexists = False
392 else:
392 else:
393 ui.warn(_('%s: cannot copy - %s\n') %
393 ui.warn(_('%s: cannot copy - %s\n') %
394 (relsrc, inst.strerror))
394 (relsrc, inst.strerror))
395 return True # report a failure
395 return True # report a failure
396
396
397 if ui.verbose or not exact:
397 if ui.verbose or not exact:
398 if rename:
398 if rename:
399 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
399 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
400 else:
400 else:
401 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
401 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
402
402
403 targets[abstarget] = abssrc
403 targets[abstarget] = abssrc
404
404
405 # fix up dirstate
405 # fix up dirstate
406 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
406 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
407 dryrun=dryrun, cwd=cwd)
407 dryrun=dryrun, cwd=cwd)
408 if rename and not dryrun:
408 if rename and not dryrun:
409 if not after and srcexists and not samefile:
409 if not after and srcexists and not samefile:
410 util.unlinkpath(repo.wjoin(abssrc))
410 util.unlinkpath(repo.wjoin(abssrc))
411 wctx.forget([abssrc])
411 wctx.forget([abssrc])
412
412
413 # pat: ossep
413 # pat: ossep
414 # dest ossep
414 # dest ossep
415 # srcs: list of (hgsep, hgsep, ossep, bool)
415 # srcs: list of (hgsep, hgsep, ossep, bool)
416 # return: function that takes hgsep and returns ossep
416 # return: function that takes hgsep and returns ossep
417 def targetpathfn(pat, dest, srcs):
417 def targetpathfn(pat, dest, srcs):
418 if os.path.isdir(pat):
418 if os.path.isdir(pat):
419 abspfx = pathutil.canonpath(repo.root, cwd, pat)
419 abspfx = pathutil.canonpath(repo.root, cwd, pat)
420 abspfx = util.localpath(abspfx)
420 abspfx = util.localpath(abspfx)
421 if destdirexists:
421 if destdirexists:
422 striplen = len(os.path.split(abspfx)[0])
422 striplen = len(os.path.split(abspfx)[0])
423 else:
423 else:
424 striplen = len(abspfx)
424 striplen = len(abspfx)
425 if striplen:
425 if striplen:
426 striplen += len(os.sep)
426 striplen += len(os.sep)
427 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
427 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
428 elif destdirexists:
428 elif destdirexists:
429 res = lambda p: os.path.join(dest,
429 res = lambda p: os.path.join(dest,
430 os.path.basename(util.localpath(p)))
430 os.path.basename(util.localpath(p)))
431 else:
431 else:
432 res = lambda p: dest
432 res = lambda p: dest
433 return res
433 return res
434
434
435 # pat: ossep
435 # pat: ossep
436 # dest ossep
436 # dest ossep
437 # srcs: list of (hgsep, hgsep, ossep, bool)
437 # srcs: list of (hgsep, hgsep, ossep, bool)
438 # return: function that takes hgsep and returns ossep
438 # return: function that takes hgsep and returns ossep
439 def targetpathafterfn(pat, dest, srcs):
439 def targetpathafterfn(pat, dest, srcs):
440 if matchmod.patkind(pat):
440 if matchmod.patkind(pat):
441 # a mercurial pattern
441 # a mercurial pattern
442 res = lambda p: os.path.join(dest,
442 res = lambda p: os.path.join(dest,
443 os.path.basename(util.localpath(p)))
443 os.path.basename(util.localpath(p)))
444 else:
444 else:
445 abspfx = pathutil.canonpath(repo.root, cwd, pat)
445 abspfx = pathutil.canonpath(repo.root, cwd, pat)
446 if len(abspfx) < len(srcs[0][0]):
446 if len(abspfx) < len(srcs[0][0]):
447 # A directory. Either the target path contains the last
447 # A directory. Either the target path contains the last
448 # component of the source path or it does not.
448 # component of the source path or it does not.
449 def evalpath(striplen):
449 def evalpath(striplen):
450 score = 0
450 score = 0
451 for s in srcs:
451 for s in srcs:
452 t = os.path.join(dest, util.localpath(s[0])[striplen:])
452 t = os.path.join(dest, util.localpath(s[0])[striplen:])
453 if os.path.lexists(t):
453 if os.path.lexists(t):
454 score += 1
454 score += 1
455 return score
455 return score
456
456
457 abspfx = util.localpath(abspfx)
457 abspfx = util.localpath(abspfx)
458 striplen = len(abspfx)
458 striplen = len(abspfx)
459 if striplen:
459 if striplen:
460 striplen += len(os.sep)
460 striplen += len(os.sep)
461 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
461 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
462 score = evalpath(striplen)
462 score = evalpath(striplen)
463 striplen1 = len(os.path.split(abspfx)[0])
463 striplen1 = len(os.path.split(abspfx)[0])
464 if striplen1:
464 if striplen1:
465 striplen1 += len(os.sep)
465 striplen1 += len(os.sep)
466 if evalpath(striplen1) > score:
466 if evalpath(striplen1) > score:
467 striplen = striplen1
467 striplen = striplen1
468 res = lambda p: os.path.join(dest,
468 res = lambda p: os.path.join(dest,
469 util.localpath(p)[striplen:])
469 util.localpath(p)[striplen:])
470 else:
470 else:
471 # a file
471 # a file
472 if destdirexists:
472 if destdirexists:
473 res = lambda p: os.path.join(dest,
473 res = lambda p: os.path.join(dest,
474 os.path.basename(util.localpath(p)))
474 os.path.basename(util.localpath(p)))
475 else:
475 else:
476 res = lambda p: dest
476 res = lambda p: dest
477 return res
477 return res
478
478
479
479
480 pats = scmutil.expandpats(pats)
480 pats = scmutil.expandpats(pats)
481 if not pats:
481 if not pats:
482 raise util.Abort(_('no source or destination specified'))
482 raise util.Abort(_('no source or destination specified'))
483 if len(pats) == 1:
483 if len(pats) == 1:
484 raise util.Abort(_('no destination specified'))
484 raise util.Abort(_('no destination specified'))
485 dest = pats.pop()
485 dest = pats.pop()
486 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
486 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
487 if not destdirexists:
487 if not destdirexists:
488 if len(pats) > 1 or matchmod.patkind(pats[0]):
488 if len(pats) > 1 or matchmod.patkind(pats[0]):
489 raise util.Abort(_('with multiple sources, destination must be an '
489 raise util.Abort(_('with multiple sources, destination must be an '
490 'existing directory'))
490 'existing directory'))
491 if util.endswithsep(dest):
491 if util.endswithsep(dest):
492 raise util.Abort(_('destination %s is not a directory') % dest)
492 raise util.Abort(_('destination %s is not a directory') % dest)
493
493
494 tfn = targetpathfn
494 tfn = targetpathfn
495 if after:
495 if after:
496 tfn = targetpathafterfn
496 tfn = targetpathafterfn
497 copylist = []
497 copylist = []
498 for pat in pats:
498 for pat in pats:
499 srcs = walkpat(pat)
499 srcs = walkpat(pat)
500 if not srcs:
500 if not srcs:
501 continue
501 continue
502 copylist.append((tfn(pat, dest, srcs), srcs))
502 copylist.append((tfn(pat, dest, srcs), srcs))
503 if not copylist:
503 if not copylist:
504 raise util.Abort(_('no files to copy'))
504 raise util.Abort(_('no files to copy'))
505
505
506 errors = 0
506 errors = 0
507 for targetpath, srcs in copylist:
507 for targetpath, srcs in copylist:
508 for abssrc, relsrc, exact in srcs:
508 for abssrc, relsrc, exact in srcs:
509 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
509 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
510 errors += 1
510 errors += 1
511
511
512 if errors:
512 if errors:
513 ui.warn(_('(consider using --after)\n'))
513 ui.warn(_('(consider using --after)\n'))
514
514
515 return errors != 0
515 return errors != 0
516
516
517 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
517 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
518 runargs=None, appendpid=False):
518 runargs=None, appendpid=False):
519 '''Run a command as a service.'''
519 '''Run a command as a service.'''
520
520
521 def writepid(pid):
521 def writepid(pid):
522 if opts['pid_file']:
522 if opts['pid_file']:
523 mode = appendpid and 'a' or 'w'
523 mode = appendpid and 'a' or 'w'
524 fp = open(opts['pid_file'], mode)
524 fp = open(opts['pid_file'], mode)
525 fp.write(str(pid) + '\n')
525 fp.write(str(pid) + '\n')
526 fp.close()
526 fp.close()
527
527
528 if opts['daemon'] and not opts['daemon_pipefds']:
528 if opts['daemon'] and not opts['daemon_pipefds']:
529 # Signal child process startup with file removal
529 # Signal child process startup with file removal
530 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
530 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
531 os.close(lockfd)
531 os.close(lockfd)
532 try:
532 try:
533 if not runargs:
533 if not runargs:
534 runargs = util.hgcmd() + sys.argv[1:]
534 runargs = util.hgcmd() + sys.argv[1:]
535 runargs.append('--daemon-pipefds=%s' % lockpath)
535 runargs.append('--daemon-pipefds=%s' % lockpath)
536 # Don't pass --cwd to the child process, because we've already
536 # Don't pass --cwd to the child process, because we've already
537 # changed directory.
537 # changed directory.
538 for i in xrange(1, len(runargs)):
538 for i in xrange(1, len(runargs)):
539 if runargs[i].startswith('--cwd='):
539 if runargs[i].startswith('--cwd='):
540 del runargs[i]
540 del runargs[i]
541 break
541 break
542 elif runargs[i].startswith('--cwd'):
542 elif runargs[i].startswith('--cwd'):
543 del runargs[i:i + 2]
543 del runargs[i:i + 2]
544 break
544 break
545 def condfn():
545 def condfn():
546 return not os.path.exists(lockpath)
546 return not os.path.exists(lockpath)
547 pid = util.rundetached(runargs, condfn)
547 pid = util.rundetached(runargs, condfn)
548 if pid < 0:
548 if pid < 0:
549 raise util.Abort(_('child process failed to start'))
549 raise util.Abort(_('child process failed to start'))
550 writepid(pid)
550 writepid(pid)
551 finally:
551 finally:
552 try:
552 try:
553 os.unlink(lockpath)
553 os.unlink(lockpath)
554 except OSError, e:
554 except OSError, e:
555 if e.errno != errno.ENOENT:
555 if e.errno != errno.ENOENT:
556 raise
556 raise
557 if parentfn:
557 if parentfn:
558 return parentfn(pid)
558 return parentfn(pid)
559 else:
559 else:
560 return
560 return
561
561
562 if initfn:
562 if initfn:
563 initfn()
563 initfn()
564
564
565 if not opts['daemon']:
565 if not opts['daemon']:
566 writepid(os.getpid())
566 writepid(os.getpid())
567
567
568 if opts['daemon_pipefds']:
568 if opts['daemon_pipefds']:
569 lockpath = opts['daemon_pipefds']
569 lockpath = opts['daemon_pipefds']
570 try:
570 try:
571 os.setsid()
571 os.setsid()
572 except AttributeError:
572 except AttributeError:
573 pass
573 pass
574 os.unlink(lockpath)
574 os.unlink(lockpath)
575 util.hidewindow()
575 util.hidewindow()
576 sys.stdout.flush()
576 sys.stdout.flush()
577 sys.stderr.flush()
577 sys.stderr.flush()
578
578
579 nullfd = os.open(os.devnull, os.O_RDWR)
579 nullfd = os.open(os.devnull, os.O_RDWR)
580 logfilefd = nullfd
580 logfilefd = nullfd
581 if logfile:
581 if logfile:
582 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
582 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
583 os.dup2(nullfd, 0)
583 os.dup2(nullfd, 0)
584 os.dup2(logfilefd, 1)
584 os.dup2(logfilefd, 1)
585 os.dup2(logfilefd, 2)
585 os.dup2(logfilefd, 2)
586 if nullfd not in (0, 1, 2):
586 if nullfd not in (0, 1, 2):
587 os.close(nullfd)
587 os.close(nullfd)
588 if logfile and logfilefd not in (0, 1, 2):
588 if logfile and logfilefd not in (0, 1, 2):
589 os.close(logfilefd)
589 os.close(logfilefd)
590
590
591 if runfn:
591 if runfn:
592 return runfn()
592 return runfn()
593
593
594 def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
594 def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
595 """Utility function used by commands.import to import a single patch
595 """Utility function used by commands.import to import a single patch
596
596
597 This function is explicitly defined here to help the evolve extension to
597 This function is explicitly defined here to help the evolve extension to
598 wrap this part of the import logic.
598 wrap this part of the import logic.
599
599
600 The API is currently a bit ugly because it a simple code translation from
600 The API is currently a bit ugly because it a simple code translation from
601 the import command. Feel free to make it better.
601 the import command. Feel free to make it better.
602
602
603 :hunk: a patch (as a binary string)
603 :hunk: a patch (as a binary string)
604 :parents: nodes that will be parent of the created commit
604 :parents: nodes that will be parent of the created commit
605 :opts: the full dict of option passed to the import command
605 :opts: the full dict of option passed to the import command
606 :msgs: list to save commit message to.
606 :msgs: list to save commit message to.
607 (used in case we need to save it when failing)
607 (used in case we need to save it when failing)
608 :updatefunc: a function that update a repo to a given node
608 :updatefunc: a function that update a repo to a given node
609 updatefunc(<repo>, <node>)
609 updatefunc(<repo>, <node>)
610 """
610 """
611 tmpname, message, user, date, branch, nodeid, p1, p2 = \
611 tmpname, message, user, date, branch, nodeid, p1, p2 = \
612 patch.extract(ui, hunk)
612 patch.extract(ui, hunk)
613
613
614 update = not opts.get('bypass')
614 update = not opts.get('bypass')
615 strip = opts["strip"]
615 strip = opts["strip"]
616 sim = float(opts.get('similarity') or 0)
616 sim = float(opts.get('similarity') or 0)
617 if not tmpname:
617 if not tmpname:
618 return (None, None, False)
618 return (None, None, False)
619 msg = _('applied to working directory')
619 msg = _('applied to working directory')
620
620
621 rejects = False
621 rejects = False
622
622
623 try:
623 try:
624 cmdline_message = logmessage(ui, opts)
624 cmdline_message = logmessage(ui, opts)
625 if cmdline_message:
625 if cmdline_message:
626 # pickup the cmdline msg
626 # pickup the cmdline msg
627 message = cmdline_message
627 message = cmdline_message
628 elif message:
628 elif message:
629 # pickup the patch msg
629 # pickup the patch msg
630 message = message.strip()
630 message = message.strip()
631 else:
631 else:
632 # launch the editor
632 # launch the editor
633 message = None
633 message = None
634 ui.debug('message:\n%s\n' % message)
634 ui.debug('message:\n%s\n' % message)
635
635
636 if len(parents) == 1:
636 if len(parents) == 1:
637 parents.append(repo[nullid])
637 parents.append(repo[nullid])
638 if opts.get('exact'):
638 if opts.get('exact'):
639 if not nodeid or not p1:
639 if not nodeid or not p1:
640 raise util.Abort(_('not a Mercurial patch'))
640 raise util.Abort(_('not a Mercurial patch'))
641 p1 = repo[p1]
641 p1 = repo[p1]
642 p2 = repo[p2 or nullid]
642 p2 = repo[p2 or nullid]
643 elif p2:
643 elif p2:
644 try:
644 try:
645 p1 = repo[p1]
645 p1 = repo[p1]
646 p2 = repo[p2]
646 p2 = repo[p2]
647 # Without any options, consider p2 only if the
647 # Without any options, consider p2 only if the
648 # patch is being applied on top of the recorded
648 # patch is being applied on top of the recorded
649 # first parent.
649 # first parent.
650 if p1 != parents[0]:
650 if p1 != parents[0]:
651 p1 = parents[0]
651 p1 = parents[0]
652 p2 = repo[nullid]
652 p2 = repo[nullid]
653 except error.RepoError:
653 except error.RepoError:
654 p1, p2 = parents
654 p1, p2 = parents
655 if p2.node() == nullid:
655 if p2.node() == nullid:
656 ui.warn(_("warning: import the patch as a normal revision\n"
656 ui.warn(_("warning: import the patch as a normal revision\n"
657 "(use --exact to import the patch as a merge)\n"))
657 "(use --exact to import the patch as a merge)\n"))
658 else:
658 else:
659 p1, p2 = parents
659 p1, p2 = parents
660
660
661 n = None
661 n = None
662 if update:
662 if update:
663 repo.dirstate.beginparentchange()
663 repo.dirstate.beginparentchange()
664 if p1 != parents[0]:
664 if p1 != parents[0]:
665 updatefunc(repo, p1.node())
665 updatefunc(repo, p1.node())
666 if p2 != parents[1]:
666 if p2 != parents[1]:
667 repo.setparents(p1.node(), p2.node())
667 repo.setparents(p1.node(), p2.node())
668
668
669 if opts.get('exact') or opts.get('import_branch'):
669 if opts.get('exact') or opts.get('import_branch'):
670 repo.dirstate.setbranch(branch or 'default')
670 repo.dirstate.setbranch(branch or 'default')
671
671
672 partial = opts.get('partial', False)
672 partial = opts.get('partial', False)
673 files = set()
673 files = set()
674 try:
674 try:
675 patch.patch(ui, repo, tmpname, strip=strip, files=files,
675 patch.patch(ui, repo, tmpname, strip=strip, files=files,
676 eolmode=None, similarity=sim / 100.0)
676 eolmode=None, similarity=sim / 100.0)
677 except patch.PatchError, e:
677 except patch.PatchError, e:
678 if not partial:
678 if not partial:
679 raise util.Abort(str(e))
679 raise util.Abort(str(e))
680 if partial:
680 if partial:
681 rejects = True
681 rejects = True
682
682
683 files = list(files)
683 files = list(files)
684 if opts.get('no_commit'):
684 if opts.get('no_commit'):
685 if message:
685 if message:
686 msgs.append(message)
686 msgs.append(message)
687 else:
687 else:
688 if opts.get('exact') or p2:
688 if opts.get('exact') or p2:
689 # If you got here, you either use --force and know what
689 # If you got here, you either use --force and know what
690 # you are doing or used --exact or a merge patch while
690 # you are doing or used --exact or a merge patch while
691 # being updated to its first parent.
691 # being updated to its first parent.
692 m = None
692 m = None
693 else:
693 else:
694 m = scmutil.matchfiles(repo, files or [])
694 m = scmutil.matchfiles(repo, files or [])
695 editform = mergeeditform(repo[None], 'import.normal')
695 editform = mergeeditform(repo[None], 'import.normal')
696 if opts.get('exact'):
696 if opts.get('exact'):
697 editor = None
697 editor = None
698 else:
698 else:
699 editor = getcommiteditor(editform=editform, **opts)
699 editor = getcommiteditor(editform=editform, **opts)
700 n = repo.commit(message, opts.get('user') or user,
700 n = repo.commit(message, opts.get('user') or user,
701 opts.get('date') or date, match=m,
701 opts.get('date') or date, match=m,
702 editor=editor, force=partial)
702 editor=editor, force=partial)
703 repo.dirstate.endparentchange()
703 repo.dirstate.endparentchange()
704 else:
704 else:
705 if opts.get('exact') or opts.get('import_branch'):
705 if opts.get('exact') or opts.get('import_branch'):
706 branch = branch or 'default'
706 branch = branch or 'default'
707 else:
707 else:
708 branch = p1.branch()
708 branch = p1.branch()
709 store = patch.filestore()
709 store = patch.filestore()
710 try:
710 try:
711 files = set()
711 files = set()
712 try:
712 try:
713 patch.patchrepo(ui, repo, p1, store, tmpname, strip,
713 patch.patchrepo(ui, repo, p1, store, tmpname, strip,
714 files, eolmode=None)
714 files, eolmode=None)
715 except patch.PatchError, e:
715 except patch.PatchError, e:
716 raise util.Abort(str(e))
716 raise util.Abort(str(e))
717 if opts.get('exact'):
717 if opts.get('exact'):
718 editor = None
718 editor = None
719 else:
719 else:
720 editor = getcommiteditor(editform='import.bypass')
720 editor = getcommiteditor(editform='import.bypass')
721 memctx = context.makememctx(repo, (p1.node(), p2.node()),
721 memctx = context.makememctx(repo, (p1.node(), p2.node()),
722 message,
722 message,
723 opts.get('user') or user,
723 opts.get('user') or user,
724 opts.get('date') or date,
724 opts.get('date') or date,
725 branch, files, store,
725 branch, files, store,
726 editor=editor)
726 editor=editor)
727 n = memctx.commit()
727 n = memctx.commit()
728 finally:
728 finally:
729 store.close()
729 store.close()
730 if opts.get('exact') and opts.get('no_commit'):
730 if opts.get('exact') and opts.get('no_commit'):
731 # --exact with --no-commit is still useful in that it does merge
731 # --exact with --no-commit is still useful in that it does merge
732 # and branch bits
732 # and branch bits
733 ui.warn(_("warning: can't check exact import with --no-commit\n"))
733 ui.warn(_("warning: can't check exact import with --no-commit\n"))
734 elif opts.get('exact') and hex(n) != nodeid:
734 elif opts.get('exact') and hex(n) != nodeid:
735 raise util.Abort(_('patch is damaged or loses information'))
735 raise util.Abort(_('patch is damaged or loses information'))
736 if n:
736 if n:
737 # i18n: refers to a short changeset id
737 # i18n: refers to a short changeset id
738 msg = _('created %s') % short(n)
738 msg = _('created %s') % short(n)
739 return (msg, n, rejects)
739 return (msg, n, rejects)
740 finally:
740 finally:
741 os.unlink(tmpname)
741 os.unlink(tmpname)
742
742
743 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
743 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
744 opts=None):
744 opts=None):
745 '''export changesets as hg patches.'''
745 '''export changesets as hg patches.'''
746
746
747 total = len(revs)
747 total = len(revs)
748 revwidth = max([len(str(rev)) for rev in revs])
748 revwidth = max([len(str(rev)) for rev in revs])
749 filemode = {}
749 filemode = {}
750
750
751 def single(rev, seqno, fp):
751 def single(rev, seqno, fp):
752 ctx = repo[rev]
752 ctx = repo[rev]
753 node = ctx.node()
753 node = ctx.node()
754 parents = [p.node() for p in ctx.parents() if p]
754 parents = [p.node() for p in ctx.parents() if p]
755 branch = ctx.branch()
755 branch = ctx.branch()
756 if switch_parent:
756 if switch_parent:
757 parents.reverse()
757 parents.reverse()
758 prev = (parents and parents[0]) or nullid
758 prev = (parents and parents[0]) or nullid
759
759
760 shouldclose = False
760 shouldclose = False
761 if not fp and len(template) > 0:
761 if not fp and len(template) > 0:
762 desc_lines = ctx.description().rstrip().split('\n')
762 desc_lines = ctx.description().rstrip().split('\n')
763 desc = desc_lines[0] #Commit always has a first line.
763 desc = desc_lines[0] #Commit always has a first line.
764 fp = makefileobj(repo, template, node, desc=desc, total=total,
764 fp = makefileobj(repo, template, node, desc=desc, total=total,
765 seqno=seqno, revwidth=revwidth, mode='wb',
765 seqno=seqno, revwidth=revwidth, mode='wb',
766 modemap=filemode)
766 modemap=filemode)
767 if fp != template:
767 if fp != template:
768 shouldclose = True
768 shouldclose = True
769 if fp and fp != sys.stdout and util.safehasattr(fp, 'name'):
769 if fp and fp != sys.stdout and util.safehasattr(fp, 'name'):
770 repo.ui.note("%s\n" % fp.name)
770 repo.ui.note("%s\n" % fp.name)
771
771
772 if not fp:
772 if not fp:
773 write = repo.ui.write
773 write = repo.ui.write
774 else:
774 else:
775 def write(s, **kw):
775 def write(s, **kw):
776 fp.write(s)
776 fp.write(s)
777
777
778
778
779 write("# HG changeset patch\n")
779 write("# HG changeset patch\n")
780 write("# User %s\n" % ctx.user())
780 write("# User %s\n" % ctx.user())
781 write("# Date %d %d\n" % ctx.date())
781 write("# Date %d %d\n" % ctx.date())
782 write("# %s\n" % util.datestr(ctx.date()))
782 write("# %s\n" % util.datestr(ctx.date()))
783 if branch and branch != 'default':
783 if branch and branch != 'default':
784 write("# Branch %s\n" % branch)
784 write("# Branch %s\n" % branch)
785 write("# Node ID %s\n" % hex(node))
785 write("# Node ID %s\n" % hex(node))
786 write("# Parent %s\n" % hex(prev))
786 write("# Parent %s\n" % hex(prev))
787 if len(parents) > 1:
787 if len(parents) > 1:
788 write("# Parent %s\n" % hex(parents[1]))
788 write("# Parent %s\n" % hex(parents[1]))
789 write(ctx.description().rstrip())
789 write(ctx.description().rstrip())
790 write("\n\n")
790 write("\n\n")
791
791
792 for chunk, label in patch.diffui(repo, prev, node, opts=opts):
792 for chunk, label in patch.diffui(repo, prev, node, opts=opts):
793 write(chunk, label=label)
793 write(chunk, label=label)
794
794
795 if shouldclose:
795 if shouldclose:
796 fp.close()
796 fp.close()
797
797
798 for seqno, rev in enumerate(revs):
798 for seqno, rev in enumerate(revs):
799 single(rev, seqno + 1, fp)
799 single(rev, seqno + 1, fp)
800
800
801 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
801 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
802 changes=None, stat=False, fp=None, prefix='',
802 changes=None, stat=False, fp=None, prefix='',
803 listsubrepos=False):
803 listsubrepos=False):
804 '''show diff or diffstat.'''
804 '''show diff or diffstat.'''
805 if fp is None:
805 if fp is None:
806 write = ui.write
806 write = ui.write
807 else:
807 else:
808 def write(s, **kw):
808 def write(s, **kw):
809 fp.write(s)
809 fp.write(s)
810
810
811 if stat:
811 if stat:
812 diffopts = diffopts.copy(context=0)
812 diffopts = diffopts.copy(context=0)
813 width = 80
813 width = 80
814 if not ui.plain():
814 if not ui.plain():
815 width = ui.termwidth()
815 width = ui.termwidth()
816 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
816 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
817 prefix=prefix)
817 prefix=prefix)
818 for chunk, label in patch.diffstatui(util.iterlines(chunks),
818 for chunk, label in patch.diffstatui(util.iterlines(chunks),
819 width=width,
819 width=width,
820 git=diffopts.git):
820 git=diffopts.git):
821 write(chunk, label=label)
821 write(chunk, label=label)
822 else:
822 else:
823 for chunk, label in patch.diffui(repo, node1, node2, match,
823 for chunk, label in patch.diffui(repo, node1, node2, match,
824 changes, diffopts, prefix=prefix):
824 changes, diffopts, prefix=prefix):
825 write(chunk, label=label)
825 write(chunk, label=label)
826
826
827 if listsubrepos:
827 if listsubrepos:
828 ctx1 = repo[node1]
828 ctx1 = repo[node1]
829 ctx2 = repo[node2]
829 ctx2 = repo[node2]
830 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
830 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
831 tempnode2 = node2
831 tempnode2 = node2
832 try:
832 try:
833 if node2 is not None:
833 if node2 is not None:
834 tempnode2 = ctx2.substate[subpath][1]
834 tempnode2 = ctx2.substate[subpath][1]
835 except KeyError:
835 except KeyError:
836 # A subrepo that existed in node1 was deleted between node1 and
836 # A subrepo that existed in node1 was deleted between node1 and
837 # node2 (inclusive). Thus, ctx2's substate won't contain that
837 # node2 (inclusive). Thus, ctx2's substate won't contain that
838 # subpath. The best we can do is to ignore it.
838 # subpath. The best we can do is to ignore it.
839 tempnode2 = None
839 tempnode2 = None
840 submatch = matchmod.narrowmatcher(subpath, match)
840 submatch = matchmod.narrowmatcher(subpath, match)
841 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
841 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
842 stat=stat, fp=fp, prefix=prefix)
842 stat=stat, fp=fp, prefix=prefix)
843
843
844 class changeset_printer(object):
844 class changeset_printer(object):
845 '''show changeset information when templating not requested.'''
845 '''show changeset information when templating not requested.'''
846
846
847 def __init__(self, ui, repo, matchfn, diffopts, buffered):
847 def __init__(self, ui, repo, matchfn, diffopts, buffered):
848 self.ui = ui
848 self.ui = ui
849 self.repo = repo
849 self.repo = repo
850 self.buffered = buffered
850 self.buffered = buffered
851 self.matchfn = matchfn
851 self.matchfn = matchfn
852 self.diffopts = diffopts
852 self.diffopts = diffopts
853 self.header = {}
853 self.header = {}
854 self.hunk = {}
854 self.hunk = {}
855 self.lastheader = None
855 self.lastheader = None
856 self.footer = None
856 self.footer = None
857
857
858 def flush(self, rev):
858 def flush(self, rev):
859 if rev in self.header:
859 if rev in self.header:
860 h = self.header[rev]
860 h = self.header[rev]
861 if h != self.lastheader:
861 if h != self.lastheader:
862 self.lastheader = h
862 self.lastheader = h
863 self.ui.write(h)
863 self.ui.write(h)
864 del self.header[rev]
864 del self.header[rev]
865 if rev in self.hunk:
865 if rev in self.hunk:
866 self.ui.write(self.hunk[rev])
866 self.ui.write(self.hunk[rev])
867 del self.hunk[rev]
867 del self.hunk[rev]
868 return 1
868 return 1
869 return 0
869 return 0
870
870
871 def close(self):
871 def close(self):
872 if self.footer:
872 if self.footer:
873 self.ui.write(self.footer)
873 self.ui.write(self.footer)
874
874
875 def show(self, ctx, copies=None, matchfn=None, **props):
875 def show(self, ctx, copies=None, matchfn=None, **props):
876 if self.buffered:
876 if self.buffered:
877 self.ui.pushbuffer()
877 self.ui.pushbuffer()
878 self._show(ctx, copies, matchfn, props)
878 self._show(ctx, copies, matchfn, props)
879 self.hunk[ctx.rev()] = self.ui.popbuffer(labeled=True)
879 self.hunk[ctx.rev()] = self.ui.popbuffer(labeled=True)
880 else:
880 else:
881 self._show(ctx, copies, matchfn, props)
881 self._show(ctx, copies, matchfn, props)
882
882
883 def _show(self, ctx, copies, matchfn, props):
883 def _show(self, ctx, copies, matchfn, props):
884 '''show a single changeset or file revision'''
884 '''show a single changeset or file revision'''
885 changenode = ctx.node()
885 changenode = ctx.node()
886 rev = ctx.rev()
886 rev = ctx.rev()
887
887
888 if self.ui.quiet:
888 if self.ui.quiet:
889 self.ui.write("%d:%s\n" % (rev, short(changenode)),
889 self.ui.write("%d:%s\n" % (rev, short(changenode)),
890 label='log.node')
890 label='log.node')
891 return
891 return
892
892
893 log = self.repo.changelog
893 log = self.repo.changelog
894 date = util.datestr(ctx.date())
894 date = util.datestr(ctx.date())
895
895
896 hexfunc = self.ui.debugflag and hex or short
896 hexfunc = self.ui.debugflag and hex or short
897
897
898 parents = [(p, hexfunc(log.node(p)))
898 parents = [(p, hexfunc(log.node(p)))
899 for p in self._meaningful_parentrevs(log, rev)]
899 for p in self._meaningful_parentrevs(log, rev)]
900
900
901 # i18n: column positioning for "hg log"
901 # i18n: column positioning for "hg log"
902 self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)),
902 self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)),
903 label='log.changeset changeset.%s' % ctx.phasestr())
903 label='log.changeset changeset.%s' % ctx.phasestr())
904
904
905 branch = ctx.branch()
905 branch = ctx.branch()
906 # don't show the default branch name
906 # don't show the default branch name
907 if branch != 'default':
907 if branch != 'default':
908 # i18n: column positioning for "hg log"
908 # i18n: column positioning for "hg log"
909 self.ui.write(_("branch: %s\n") % branch,
909 self.ui.write(_("branch: %s\n") % branch,
910 label='log.branch')
910 label='log.branch')
911 for bookmark in self.repo.nodebookmarks(changenode):
911 for bookmark in self.repo.nodebookmarks(changenode):
912 # i18n: column positioning for "hg log"
912 # i18n: column positioning for "hg log"
913 self.ui.write(_("bookmark: %s\n") % bookmark,
913 self.ui.write(_("bookmark: %s\n") % bookmark,
914 label='log.bookmark')
914 label='log.bookmark')
915 for tag in self.repo.nodetags(changenode):
915 for tag in self.repo.nodetags(changenode):
916 # i18n: column positioning for "hg log"
916 # i18n: column positioning for "hg log"
917 self.ui.write(_("tag: %s\n") % tag,
917 self.ui.write(_("tag: %s\n") % tag,
918 label='log.tag')
918 label='log.tag')
919 if self.ui.debugflag:
919 if self.ui.debugflag:
920 # i18n: column positioning for "hg log"
920 # i18n: column positioning for "hg log"
921 self.ui.write(_("phase: %s\n") % _(ctx.phasestr()),
921 self.ui.write(_("phase: %s\n") % _(ctx.phasestr()),
922 label='log.phase')
922 label='log.phase')
923 for parent in parents:
923 for parent in parents:
924 label = 'log.parent changeset.%s' % self.repo[parent[0]].phasestr()
924 label = 'log.parent changeset.%s' % self.repo[parent[0]].phasestr()
925 # i18n: column positioning for "hg log"
925 # i18n: column positioning for "hg log"
926 self.ui.write(_("parent: %d:%s\n") % parent,
926 self.ui.write(_("parent: %d:%s\n") % parent,
927 label=label)
927 label=label)
928
928
929 if self.ui.debugflag:
929 if self.ui.debugflag:
930 mnode = ctx.manifestnode()
930 mnode = ctx.manifestnode()
931 # i18n: column positioning for "hg log"
931 # i18n: column positioning for "hg log"
932 self.ui.write(_("manifest: %d:%s\n") %
932 self.ui.write(_("manifest: %d:%s\n") %
933 (self.repo.manifest.rev(mnode), hex(mnode)),
933 (self.repo.manifest.rev(mnode), hex(mnode)),
934 label='ui.debug log.manifest')
934 label='ui.debug log.manifest')
935 # i18n: column positioning for "hg log"
935 # i18n: column positioning for "hg log"
936 self.ui.write(_("user: %s\n") % ctx.user(),
936 self.ui.write(_("user: %s\n") % ctx.user(),
937 label='log.user')
937 label='log.user')
938 # i18n: column positioning for "hg log"
938 # i18n: column positioning for "hg log"
939 self.ui.write(_("date: %s\n") % date,
939 self.ui.write(_("date: %s\n") % date,
940 label='log.date')
940 label='log.date')
941
941
942 if self.ui.debugflag:
942 if self.ui.debugflag:
943 files = self.repo.status(log.parents(changenode)[0], changenode)[:3]
943 files = self.repo.status(log.parents(changenode)[0], changenode)[:3]
944 for key, value in zip([# i18n: column positioning for "hg log"
944 for key, value in zip([# i18n: column positioning for "hg log"
945 _("files:"),
945 _("files:"),
946 # i18n: column positioning for "hg log"
946 # i18n: column positioning for "hg log"
947 _("files+:"),
947 _("files+:"),
948 # i18n: column positioning for "hg log"
948 # i18n: column positioning for "hg log"
949 _("files-:")], files):
949 _("files-:")], files):
950 if value:
950 if value:
951 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
951 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
952 label='ui.debug log.files')
952 label='ui.debug log.files')
953 elif ctx.files() and self.ui.verbose:
953 elif ctx.files() and self.ui.verbose:
954 # i18n: column positioning for "hg log"
954 # i18n: column positioning for "hg log"
955 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
955 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
956 label='ui.note log.files')
956 label='ui.note log.files')
957 if copies and self.ui.verbose:
957 if copies and self.ui.verbose:
958 copies = ['%s (%s)' % c for c in copies]
958 copies = ['%s (%s)' % c for c in copies]
959 # i18n: column positioning for "hg log"
959 # i18n: column positioning for "hg log"
960 self.ui.write(_("copies: %s\n") % ' '.join(copies),
960 self.ui.write(_("copies: %s\n") % ' '.join(copies),
961 label='ui.note log.copies')
961 label='ui.note log.copies')
962
962
963 extra = ctx.extra()
963 extra = ctx.extra()
964 if extra and self.ui.debugflag:
964 if extra and self.ui.debugflag:
965 for key, value in sorted(extra.items()):
965 for key, value in sorted(extra.items()):
966 # i18n: column positioning for "hg log"
966 # i18n: column positioning for "hg log"
967 self.ui.write(_("extra: %s=%s\n")
967 self.ui.write(_("extra: %s=%s\n")
968 % (key, value.encode('string_escape')),
968 % (key, value.encode('string_escape')),
969 label='ui.debug log.extra')
969 label='ui.debug log.extra')
970
970
971 description = ctx.description().strip()
971 description = ctx.description().strip()
972 if description:
972 if description:
973 if self.ui.verbose:
973 if self.ui.verbose:
974 self.ui.write(_("description:\n"),
974 self.ui.write(_("description:\n"),
975 label='ui.note log.description')
975 label='ui.note log.description')
976 self.ui.write(description,
976 self.ui.write(description,
977 label='ui.note log.description')
977 label='ui.note log.description')
978 self.ui.write("\n\n")
978 self.ui.write("\n\n")
979 else:
979 else:
980 # i18n: column positioning for "hg log"
980 # i18n: column positioning for "hg log"
981 self.ui.write(_("summary: %s\n") %
981 self.ui.write(_("summary: %s\n") %
982 description.splitlines()[0],
982 description.splitlines()[0],
983 label='log.summary')
983 label='log.summary')
984 self.ui.write("\n")
984 self.ui.write("\n")
985
985
986 self.showpatch(changenode, matchfn)
986 self.showpatch(changenode, matchfn)
987
987
988 def showpatch(self, node, matchfn):
988 def showpatch(self, node, matchfn):
989 if not matchfn:
989 if not matchfn:
990 matchfn = self.matchfn
990 matchfn = self.matchfn
991 if matchfn:
991 if matchfn:
992 stat = self.diffopts.get('stat')
992 stat = self.diffopts.get('stat')
993 diff = self.diffopts.get('patch')
993 diff = self.diffopts.get('patch')
994 diffopts = patch.diffopts(self.ui, self.diffopts)
994 diffopts = patch.diffopts(self.ui, self.diffopts)
995 prev = self.repo.changelog.parents(node)[0]
995 prev = self.repo.changelog.parents(node)[0]
996 if stat:
996 if stat:
997 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
997 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
998 match=matchfn, stat=True)
998 match=matchfn, stat=True)
999 if diff:
999 if diff:
1000 if stat:
1000 if stat:
1001 self.ui.write("\n")
1001 self.ui.write("\n")
1002 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1002 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1003 match=matchfn, stat=False)
1003 match=matchfn, stat=False)
1004 self.ui.write("\n")
1004 self.ui.write("\n")
1005
1005
1006 def _meaningful_parentrevs(self, log, rev):
1006 def _meaningful_parentrevs(self, log, rev):
1007 """Return list of meaningful (or all if debug) parentrevs for rev.
1007 """Return list of meaningful (or all if debug) parentrevs for rev.
1008
1008
1009 For merges (two non-nullrev revisions) both parents are meaningful.
1009 For merges (two non-nullrev revisions) both parents are meaningful.
1010 Otherwise the first parent revision is considered meaningful if it
1010 Otherwise the first parent revision is considered meaningful if it
1011 is not the preceding revision.
1011 is not the preceding revision.
1012 """
1012 """
1013 parents = log.parentrevs(rev)
1013 parents = log.parentrevs(rev)
1014 if not self.ui.debugflag and parents[1] == nullrev:
1014 if not self.ui.debugflag and parents[1] == nullrev:
1015 if parents[0] >= rev - 1:
1015 if parents[0] >= rev - 1:
1016 parents = []
1016 parents = []
1017 else:
1017 else:
1018 parents = [parents[0]]
1018 parents = [parents[0]]
1019 return parents
1019 return parents
1020
1020
1021 class jsonchangeset(changeset_printer):
1021 class jsonchangeset(changeset_printer):
1022 '''format changeset information.'''
1022 '''format changeset information.'''
1023
1023
1024 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1024 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1025 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1025 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1026 self.cache = {}
1026 self.cache = {}
1027 self._first = True
1027 self._first = True
1028
1028
1029 def close(self):
1029 def close(self):
1030 if not self._first:
1030 if not self._first:
1031 self.ui.write("\n]\n")
1031 self.ui.write("\n]\n")
1032 else:
1032 else:
1033 self.ui.write("[]\n")
1033 self.ui.write("[]\n")
1034
1034
1035 def _show(self, ctx, copies, matchfn, props):
1035 def _show(self, ctx, copies, matchfn, props):
1036 '''show a single changeset or file revision'''
1036 '''show a single changeset or file revision'''
1037 hexnode = hex(ctx.node())
1037 hexnode = hex(ctx.node())
1038 rev = ctx.rev()
1038 rev = ctx.rev()
1039 j = encoding.jsonescape
1039 j = encoding.jsonescape
1040
1040
1041 if self._first:
1041 if self._first:
1042 self.ui.write("[\n {")
1042 self.ui.write("[\n {")
1043 self._first = False
1043 self._first = False
1044 else:
1044 else:
1045 self.ui.write(",\n {")
1045 self.ui.write(",\n {")
1046
1046
1047 if self.ui.quiet:
1047 if self.ui.quiet:
1048 self.ui.write('\n "rev": %d' % rev)
1048 self.ui.write('\n "rev": %d' % rev)
1049 self.ui.write(',\n "node": "%s"' % hexnode)
1049 self.ui.write(',\n "node": "%s"' % hexnode)
1050 self.ui.write('\n }')
1050 self.ui.write('\n }')
1051 return
1051 return
1052
1052
1053 self.ui.write('\n "rev": %d' % rev)
1053 self.ui.write('\n "rev": %d' % rev)
1054 self.ui.write(',\n "node": "%s"' % hexnode)
1054 self.ui.write(',\n "node": "%s"' % hexnode)
1055 self.ui.write(',\n "branch": "%s"' % j(ctx.branch()))
1055 self.ui.write(',\n "branch": "%s"' % j(ctx.branch()))
1056 self.ui.write(',\n "phase": "%s"' % ctx.phasestr())
1056 self.ui.write(',\n "phase": "%s"' % ctx.phasestr())
1057 self.ui.write(',\n "user": "%s"' % j(ctx.user()))
1057 self.ui.write(',\n "user": "%s"' % j(ctx.user()))
1058 self.ui.write(',\n "date": [%d, %d]' % ctx.date())
1058 self.ui.write(',\n "date": [%d, %d]' % ctx.date())
1059 self.ui.write(',\n "desc": "%s"' % j(ctx.description()))
1059 self.ui.write(',\n "desc": "%s"' % j(ctx.description()))
1060
1060
1061 self.ui.write(',\n "bookmarks": [%s]' %
1061 self.ui.write(',\n "bookmarks": [%s]' %
1062 ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
1062 ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
1063 self.ui.write(',\n "tags": [%s]' %
1063 self.ui.write(',\n "tags": [%s]' %
1064 ", ".join('"%s"' % j(t) for t in ctx.tags()))
1064 ", ".join('"%s"' % j(t) for t in ctx.tags()))
1065 self.ui.write(',\n "parents": [%s]' %
1065 self.ui.write(',\n "parents": [%s]' %
1066 ", ".join('"%s"' % c.hex() for c in ctx.parents()))
1066 ", ".join('"%s"' % c.hex() for c in ctx.parents()))
1067
1067
1068 if self.ui.debugflag:
1068 if self.ui.debugflag:
1069 self.ui.write(',\n "manifest": "%s"' % hex(ctx.manifestnode()))
1069 self.ui.write(',\n "manifest": "%s"' % hex(ctx.manifestnode()))
1070
1070
1071 self.ui.write(',\n "extra": {%s}' %
1071 self.ui.write(',\n "extra": {%s}' %
1072 ", ".join('"%s": "%s"' % (j(k), j(v))
1072 ", ".join('"%s": "%s"' % (j(k), j(v))
1073 for k, v in ctx.extra().items()))
1073 for k, v in ctx.extra().items()))
1074
1074
1075 files = ctx.status(ctx.p1())
1075 files = ctx.status(ctx.p1())
1076 self.ui.write(',\n "modified": [%s]' %
1076 self.ui.write(',\n "modified": [%s]' %
1077 ", ".join('"%s"' % j(f) for f in files[0]))
1077 ", ".join('"%s"' % j(f) for f in files[0]))
1078 self.ui.write(',\n "added": [%s]' %
1078 self.ui.write(',\n "added": [%s]' %
1079 ", ".join('"%s"' % j(f) for f in files[1]))
1079 ", ".join('"%s"' % j(f) for f in files[1]))
1080 self.ui.write(',\n "removed": [%s]' %
1080 self.ui.write(',\n "removed": [%s]' %
1081 ", ".join('"%s"' % j(f) for f in files[2]))
1081 ", ".join('"%s"' % j(f) for f in files[2]))
1082
1082
1083 elif self.ui.verbose:
1083 elif self.ui.verbose:
1084 self.ui.write(',\n "files": [%s]' %
1084 self.ui.write(',\n "files": [%s]' %
1085 ", ".join('"%s"' % j(f) for f in ctx.files()))
1085 ", ".join('"%s"' % j(f) for f in ctx.files()))
1086
1086
1087 if copies:
1087 if copies:
1088 self.ui.write(',\n "copies": {%s}' %
1088 self.ui.write(',\n "copies": {%s}' %
1089 ", ".join('"%s": %s' % (j(k), j(copies[k]))
1089 ", ".join('"%s": %s' % (j(k), j(copies[k]))
1090 for k in copies))
1090 for k in copies))
1091
1091
1092 matchfn = self.matchfn
1092 matchfn = self.matchfn
1093 if matchfn:
1093 if matchfn:
1094 stat = self.diffopts.get('stat')
1094 stat = self.diffopts.get('stat')
1095 diff = self.diffopts.get('patch')
1095 diff = self.diffopts.get('patch')
1096 diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
1096 diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
1097 node, prev = ctx.node(), ctx.p1().node()
1097 node, prev = ctx.node(), ctx.p1().node()
1098 if stat:
1098 if stat:
1099 self.ui.pushbuffer()
1099 self.ui.pushbuffer()
1100 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1100 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1101 match=matchfn, stat=True)
1101 match=matchfn, stat=True)
1102 self.ui.write(',\n "diffstat": "%s"' % j(self.ui.popbuffer()))
1102 self.ui.write(',\n "diffstat": "%s"' % j(self.ui.popbuffer()))
1103 if diff:
1103 if diff:
1104 self.ui.pushbuffer()
1104 self.ui.pushbuffer()
1105 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1105 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1106 match=matchfn, stat=False)
1106 match=matchfn, stat=False)
1107 self.ui.write(',\n "diff": "%s"' % j(self.ui.popbuffer()))
1107 self.ui.write(',\n "diff": "%s"' % j(self.ui.popbuffer()))
1108
1108
1109 self.ui.write("\n }")
1109 self.ui.write("\n }")
1110
1110
1111 class changeset_templater(changeset_printer):
1111 class changeset_templater(changeset_printer):
1112 '''format changeset information.'''
1112 '''format changeset information.'''
1113
1113
1114 def __init__(self, ui, repo, matchfn, diffopts, tmpl, mapfile, buffered):
1114 def __init__(self, ui, repo, matchfn, diffopts, tmpl, mapfile, buffered):
1115 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1115 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1116 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
1116 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
1117 defaulttempl = {
1117 defaulttempl = {
1118 'parent': '{rev}:{node|formatnode} ',
1118 'parent': '{rev}:{node|formatnode} ',
1119 'manifest': '{rev}:{node|formatnode}',
1119 'manifest': '{rev}:{node|formatnode}',
1120 'file_copy': '{name} ({source})',
1120 'file_copy': '{name} ({source})',
1121 'extra': '{key}={value|stringescape}'
1121 'extra': '{key}={value|stringescape}'
1122 }
1122 }
1123 # filecopy is preserved for compatibility reasons
1123 # filecopy is preserved for compatibility reasons
1124 defaulttempl['filecopy'] = defaulttempl['file_copy']
1124 defaulttempl['filecopy'] = defaulttempl['file_copy']
1125 self.t = templater.templater(mapfile, {'formatnode': formatnode},
1125 self.t = templater.templater(mapfile, {'formatnode': formatnode},
1126 cache=defaulttempl)
1126 cache=defaulttempl)
1127 if tmpl:
1127 if tmpl:
1128 self.t.cache['changeset'] = tmpl
1128 self.t.cache['changeset'] = tmpl
1129
1129
1130 self.cache = {}
1130 self.cache = {}
1131
1131
1132 def _meaningful_parentrevs(self, ctx):
1132 def _meaningful_parentrevs(self, ctx):
1133 """Return list of meaningful (or all if debug) parentrevs for rev.
1133 """Return list of meaningful (or all if debug) parentrevs for rev.
1134 """
1134 """
1135 parents = ctx.parents()
1135 parents = ctx.parents()
1136 if len(parents) > 1:
1136 if len(parents) > 1:
1137 return parents
1137 return parents
1138 if self.ui.debugflag:
1138 if self.ui.debugflag:
1139 return [parents[0], self.repo['null']]
1139 return [parents[0], self.repo['null']]
1140 if parents[0].rev() >= ctx.rev() - 1:
1140 if parents[0].rev() >= ctx.rev() - 1:
1141 return []
1141 return []
1142 return parents
1142 return parents
1143
1143
1144 def _show(self, ctx, copies, matchfn, props):
1144 def _show(self, ctx, copies, matchfn, props):
1145 '''show a single changeset or file revision'''
1145 '''show a single changeset or file revision'''
1146
1146
1147 showlist = templatekw.showlist
1147 showlist = templatekw.showlist
1148
1148
1149 # showparents() behaviour depends on ui trace level which
1149 # showparents() behaviour depends on ui trace level which
1150 # causes unexpected behaviours at templating level and makes
1150 # causes unexpected behaviours at templating level and makes
1151 # it harder to extract it in a standalone function. Its
1151 # it harder to extract it in a standalone function. Its
1152 # behaviour cannot be changed so leave it here for now.
1152 # behaviour cannot be changed so leave it here for now.
1153 def showparents(**args):
1153 def showparents(**args):
1154 ctx = args['ctx']
1154 ctx = args['ctx']
1155 parents = [[('rev', p.rev()),
1155 parents = [[('rev', p.rev()),
1156 ('node', p.hex()),
1156 ('node', p.hex()),
1157 ('phase', p.phasestr())]
1157 ('phase', p.phasestr())]
1158 for p in self._meaningful_parentrevs(ctx)]
1158 for p in self._meaningful_parentrevs(ctx)]
1159 return showlist('parent', parents, **args)
1159 return showlist('parent', parents, **args)
1160
1160
1161 props = props.copy()
1161 props = props.copy()
1162 props.update(templatekw.keywords)
1162 props.update(templatekw.keywords)
1163 props['parents'] = showparents
1163 props['parents'] = showparents
1164 props['templ'] = self.t
1164 props['templ'] = self.t
1165 props['ctx'] = ctx
1165 props['ctx'] = ctx
1166 props['repo'] = self.repo
1166 props['repo'] = self.repo
1167 props['revcache'] = {'copies': copies}
1167 props['revcache'] = {'copies': copies}
1168 props['cache'] = self.cache
1168 props['cache'] = self.cache
1169
1169
1170 # find correct templates for current mode
1170 # find correct templates for current mode
1171
1171
1172 tmplmodes = [
1172 tmplmodes = [
1173 (True, None),
1173 (True, None),
1174 (self.ui.verbose, 'verbose'),
1174 (self.ui.verbose, 'verbose'),
1175 (self.ui.quiet, 'quiet'),
1175 (self.ui.quiet, 'quiet'),
1176 (self.ui.debugflag, 'debug'),
1176 (self.ui.debugflag, 'debug'),
1177 ]
1177 ]
1178
1178
1179 types = {'header': '', 'footer':'', 'changeset': 'changeset'}
1179 types = {'header': '', 'footer':'', 'changeset': 'changeset'}
1180 for mode, postfix in tmplmodes:
1180 for mode, postfix in tmplmodes:
1181 for type in types:
1181 for type in types:
1182 cur = postfix and ('%s_%s' % (type, postfix)) or type
1182 cur = postfix and ('%s_%s' % (type, postfix)) or type
1183 if mode and cur in self.t:
1183 if mode and cur in self.t:
1184 types[type] = cur
1184 types[type] = cur
1185
1185
1186 try:
1186 try:
1187
1187
1188 # write header
1188 # write header
1189 if types['header']:
1189 if types['header']:
1190 h = templater.stringify(self.t(types['header'], **props))
1190 h = templater.stringify(self.t(types['header'], **props))
1191 if self.buffered:
1191 if self.buffered:
1192 self.header[ctx.rev()] = h
1192 self.header[ctx.rev()] = h
1193 else:
1193 else:
1194 if self.lastheader != h:
1194 if self.lastheader != h:
1195 self.lastheader = h
1195 self.lastheader = h
1196 self.ui.write(h)
1196 self.ui.write(h)
1197
1197
1198 # write changeset metadata, then patch if requested
1198 # write changeset metadata, then patch if requested
1199 key = types['changeset']
1199 key = types['changeset']
1200 self.ui.write(templater.stringify(self.t(key, **props)))
1200 self.ui.write(templater.stringify(self.t(key, **props)))
1201 self.showpatch(ctx.node(), matchfn)
1201 self.showpatch(ctx.node(), matchfn)
1202
1202
1203 if types['footer']:
1203 if types['footer']:
1204 if not self.footer:
1204 if not self.footer:
1205 self.footer = templater.stringify(self.t(types['footer'],
1205 self.footer = templater.stringify(self.t(types['footer'],
1206 **props))
1206 **props))
1207
1207
1208 except KeyError, inst:
1208 except KeyError, inst:
1209 msg = _("%s: no key named '%s'")
1209 msg = _("%s: no key named '%s'")
1210 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
1210 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
1211 except SyntaxError, inst:
1211 except SyntaxError, inst:
1212 raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
1212 raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
1213
1213
1214 def gettemplate(ui, tmpl, style):
1214 def gettemplate(ui, tmpl, style):
1215 """
1215 """
1216 Find the template matching the given template spec or style.
1216 Find the template matching the given template spec or style.
1217 """
1217 """
1218
1218
1219 # ui settings
1219 # ui settings
1220 if not tmpl and not style: # template are stronger than style
1220 if not tmpl and not style: # template are stronger than style
1221 tmpl = ui.config('ui', 'logtemplate')
1221 tmpl = ui.config('ui', 'logtemplate')
1222 if tmpl:
1222 if tmpl:
1223 try:
1223 try:
1224 tmpl = templater.parsestring(tmpl)
1224 tmpl = templater.parsestring(tmpl)
1225 except SyntaxError:
1225 except SyntaxError:
1226 tmpl = templater.parsestring(tmpl, quoted=False)
1226 tmpl = templater.parsestring(tmpl, quoted=False)
1227 return tmpl, None
1227 return tmpl, None
1228 else:
1228 else:
1229 style = util.expandpath(ui.config('ui', 'style', ''))
1229 style = util.expandpath(ui.config('ui', 'style', ''))
1230
1230
1231 if not tmpl and style:
1231 if not tmpl and style:
1232 mapfile = style
1232 mapfile = style
1233 if not os.path.split(mapfile)[0]:
1233 if not os.path.split(mapfile)[0]:
1234 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1234 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1235 or templater.templatepath(mapfile))
1235 or templater.templatepath(mapfile))
1236 if mapname:
1236 if mapname:
1237 mapfile = mapname
1237 mapfile = mapname
1238 return None, mapfile
1238 return None, mapfile
1239
1239
1240 if not tmpl:
1240 if not tmpl:
1241 return None, None
1241 return None, None
1242
1242
1243 # looks like a literal template?
1243 # looks like a literal template?
1244 if '{' in tmpl:
1244 if '{' in tmpl:
1245 return tmpl, None
1245 return tmpl, None
1246
1246
1247 # perhaps a stock style?
1247 # perhaps a stock style?
1248 if not os.path.split(tmpl)[0]:
1248 if not os.path.split(tmpl)[0]:
1249 mapname = (templater.templatepath('map-cmdline.' + tmpl)
1249 mapname = (templater.templatepath('map-cmdline.' + tmpl)
1250 or templater.templatepath(tmpl))
1250 or templater.templatepath(tmpl))
1251 if mapname and os.path.isfile(mapname):
1251 if mapname and os.path.isfile(mapname):
1252 return None, mapname
1252 return None, mapname
1253
1253
1254 # perhaps it's a reference to [templates]
1254 # perhaps it's a reference to [templates]
1255 t = ui.config('templates', tmpl)
1255 t = ui.config('templates', tmpl)
1256 if t:
1256 if t:
1257 try:
1257 try:
1258 tmpl = templater.parsestring(t)
1258 tmpl = templater.parsestring(t)
1259 except SyntaxError:
1259 except SyntaxError:
1260 tmpl = templater.parsestring(t, quoted=False)
1260 tmpl = templater.parsestring(t, quoted=False)
1261 return tmpl, None
1261 return tmpl, None
1262
1262
1263 if tmpl == 'list':
1263 if tmpl == 'list':
1264 ui.write(_("available styles: %s\n") % templater.stylelist())
1264 ui.write(_("available styles: %s\n") % templater.stylelist())
1265 raise util.Abort(_("specify a template"))
1265 raise util.Abort(_("specify a template"))
1266
1266
1267 # perhaps it's a path to a map or a template
1267 # perhaps it's a path to a map or a template
1268 if ('/' in tmpl or '\\' in tmpl) and os.path.isfile(tmpl):
1268 if ('/' in tmpl or '\\' in tmpl) and os.path.isfile(tmpl):
1269 # is it a mapfile for a style?
1269 # is it a mapfile for a style?
1270 if os.path.basename(tmpl).startswith("map-"):
1270 if os.path.basename(tmpl).startswith("map-"):
1271 return None, os.path.realpath(tmpl)
1271 return None, os.path.realpath(tmpl)
1272 tmpl = open(tmpl).read()
1272 tmpl = open(tmpl).read()
1273 return tmpl, None
1273 return tmpl, None
1274
1274
1275 # constant string?
1275 # constant string?
1276 return tmpl, None
1276 return tmpl, None
1277
1277
1278 def show_changeset(ui, repo, opts, buffered=False):
1278 def show_changeset(ui, repo, opts, buffered=False):
1279 """show one changeset using template or regular display.
1279 """show one changeset using template or regular display.
1280
1280
1281 Display format will be the first non-empty hit of:
1281 Display format will be the first non-empty hit of:
1282 1. option 'template'
1282 1. option 'template'
1283 2. option 'style'
1283 2. option 'style'
1284 3. [ui] setting 'logtemplate'
1284 3. [ui] setting 'logtemplate'
1285 4. [ui] setting 'style'
1285 4. [ui] setting 'style'
1286 If all of these values are either the unset or the empty string,
1286 If all of these values are either the unset or the empty string,
1287 regular display via changeset_printer() is done.
1287 regular display via changeset_printer() is done.
1288 """
1288 """
1289 # options
1289 # options
1290 matchfn = None
1290 matchfn = None
1291 if opts.get('patch') or opts.get('stat'):
1291 if opts.get('patch') or opts.get('stat'):
1292 matchfn = scmutil.matchall(repo)
1292 matchfn = scmutil.matchall(repo)
1293
1293
1294 if opts.get('template') == 'json':
1294 if opts.get('template') == 'json':
1295 return jsonchangeset(ui, repo, matchfn, opts, buffered)
1295 return jsonchangeset(ui, repo, matchfn, opts, buffered)
1296
1296
1297 tmpl, mapfile = gettemplate(ui, opts.get('template'), opts.get('style'))
1297 tmpl, mapfile = gettemplate(ui, opts.get('template'), opts.get('style'))
1298
1298
1299 if not tmpl and not mapfile:
1299 if not tmpl and not mapfile:
1300 return changeset_printer(ui, repo, matchfn, opts, buffered)
1300 return changeset_printer(ui, repo, matchfn, opts, buffered)
1301
1301
1302 try:
1302 try:
1303 t = changeset_templater(ui, repo, matchfn, opts, tmpl, mapfile,
1303 t = changeset_templater(ui, repo, matchfn, opts, tmpl, mapfile,
1304 buffered)
1304 buffered)
1305 except SyntaxError, inst:
1305 except SyntaxError, inst:
1306 raise util.Abort(inst.args[0])
1306 raise util.Abort(inst.args[0])
1307 return t
1307 return t
1308
1308
1309 def showmarker(ui, marker):
1309 def showmarker(ui, marker):
1310 """utility function to display obsolescence marker in a readable way
1310 """utility function to display obsolescence marker in a readable way
1311
1311
1312 To be used by debug function."""
1312 To be used by debug function."""
1313 ui.write(hex(marker.precnode()))
1313 ui.write(hex(marker.precnode()))
1314 for repl in marker.succnodes():
1314 for repl in marker.succnodes():
1315 ui.write(' ')
1315 ui.write(' ')
1316 ui.write(hex(repl))
1316 ui.write(hex(repl))
1317 ui.write(' %X ' % marker.flags())
1317 ui.write(' %X ' % marker.flags())
1318 parents = marker.parentnodes()
1318 parents = marker.parentnodes()
1319 if parents is not None:
1319 if parents is not None:
1320 ui.write('{%s} ' % ', '.join(hex(p) for p in parents))
1320 ui.write('{%s} ' % ', '.join(hex(p) for p in parents))
1321 ui.write('(%s) ' % util.datestr(marker.date()))
1321 ui.write('(%s) ' % util.datestr(marker.date()))
1322 ui.write('{%s}' % (', '.join('%r: %r' % t for t in
1322 ui.write('{%s}' % (', '.join('%r: %r' % t for t in
1323 sorted(marker.metadata().items())
1323 sorted(marker.metadata().items())
1324 if t[0] != 'date')))
1324 if t[0] != 'date')))
1325 ui.write('\n')
1325 ui.write('\n')
1326
1326
1327 def finddate(ui, repo, date):
1327 def finddate(ui, repo, date):
1328 """Find the tipmost changeset that matches the given date spec"""
1328 """Find the tipmost changeset that matches the given date spec"""
1329
1329
1330 df = util.matchdate(date)
1330 df = util.matchdate(date)
1331 m = scmutil.matchall(repo)
1331 m = scmutil.matchall(repo)
1332 results = {}
1332 results = {}
1333
1333
1334 def prep(ctx, fns):
1334 def prep(ctx, fns):
1335 d = ctx.date()
1335 d = ctx.date()
1336 if df(d[0]):
1336 if df(d[0]):
1337 results[ctx.rev()] = d
1337 results[ctx.rev()] = d
1338
1338
1339 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1339 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1340 rev = ctx.rev()
1340 rev = ctx.rev()
1341 if rev in results:
1341 if rev in results:
1342 ui.status(_("found revision %s from %s\n") %
1342 ui.status(_("found revision %s from %s\n") %
1343 (rev, util.datestr(results[rev])))
1343 (rev, util.datestr(results[rev])))
1344 return str(rev)
1344 return str(rev)
1345
1345
1346 raise util.Abort(_("revision matching date not found"))
1346 raise util.Abort(_("revision matching date not found"))
1347
1347
1348 def increasingwindows(windowsize=8, sizelimit=512):
1348 def increasingwindows(windowsize=8, sizelimit=512):
1349 while True:
1349 while True:
1350 yield windowsize
1350 yield windowsize
1351 if windowsize < sizelimit:
1351 if windowsize < sizelimit:
1352 windowsize *= 2
1352 windowsize *= 2
1353
1353
1354 class FileWalkError(Exception):
1354 class FileWalkError(Exception):
1355 pass
1355 pass
1356
1356
1357 def walkfilerevs(repo, match, follow, revs, fncache):
1357 def walkfilerevs(repo, match, follow, revs, fncache):
1358 '''Walks the file history for the matched files.
1358 '''Walks the file history for the matched files.
1359
1359
1360 Returns the changeset revs that are involved in the file history.
1360 Returns the changeset revs that are involved in the file history.
1361
1361
1362 Throws FileWalkError if the file history can't be walked using
1362 Throws FileWalkError if the file history can't be walked using
1363 filelogs alone.
1363 filelogs alone.
1364 '''
1364 '''
1365 wanted = set()
1365 wanted = set()
1366 copies = []
1366 copies = []
1367 minrev, maxrev = min(revs), max(revs)
1367 minrev, maxrev = min(revs), max(revs)
1368 def filerevgen(filelog, last):
1368 def filerevgen(filelog, last):
1369 """
1369 """
1370 Only files, no patterns. Check the history of each file.
1370 Only files, no patterns. Check the history of each file.
1371
1371
1372 Examines filelog entries within minrev, maxrev linkrev range
1372 Examines filelog entries within minrev, maxrev linkrev range
1373 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1373 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1374 tuples in backwards order
1374 tuples in backwards order
1375 """
1375 """
1376 cl_count = len(repo)
1376 cl_count = len(repo)
1377 revs = []
1377 revs = []
1378 for j in xrange(0, last + 1):
1378 for j in xrange(0, last + 1):
1379 linkrev = filelog.linkrev(j)
1379 linkrev = filelog.linkrev(j)
1380 if linkrev < minrev:
1380 if linkrev < minrev:
1381 continue
1381 continue
1382 # only yield rev for which we have the changelog, it can
1382 # only yield rev for which we have the changelog, it can
1383 # happen while doing "hg log" during a pull or commit
1383 # happen while doing "hg log" during a pull or commit
1384 if linkrev >= cl_count:
1384 if linkrev >= cl_count:
1385 break
1385 break
1386
1386
1387 parentlinkrevs = []
1387 parentlinkrevs = []
1388 for p in filelog.parentrevs(j):
1388 for p in filelog.parentrevs(j):
1389 if p != nullrev:
1389 if p != nullrev:
1390 parentlinkrevs.append(filelog.linkrev(p))
1390 parentlinkrevs.append(filelog.linkrev(p))
1391 n = filelog.node(j)
1391 n = filelog.node(j)
1392 revs.append((linkrev, parentlinkrevs,
1392 revs.append((linkrev, parentlinkrevs,
1393 follow and filelog.renamed(n)))
1393 follow and filelog.renamed(n)))
1394
1394
1395 return reversed(revs)
1395 return reversed(revs)
1396 def iterfiles():
1396 def iterfiles():
1397 pctx = repo['.']
1397 pctx = repo['.']
1398 for filename in match.files():
1398 for filename in match.files():
1399 if follow:
1399 if follow:
1400 if filename not in pctx:
1400 if filename not in pctx:
1401 raise util.Abort(_('cannot follow file not in parent '
1401 raise util.Abort(_('cannot follow file not in parent '
1402 'revision: "%s"') % filename)
1402 'revision: "%s"') % filename)
1403 yield filename, pctx[filename].filenode()
1403 yield filename, pctx[filename].filenode()
1404 else:
1404 else:
1405 yield filename, None
1405 yield filename, None
1406 for filename_node in copies:
1406 for filename_node in copies:
1407 yield filename_node
1407 yield filename_node
1408
1408
1409 for file_, node in iterfiles():
1409 for file_, node in iterfiles():
1410 filelog = repo.file(file_)
1410 filelog = repo.file(file_)
1411 if not len(filelog):
1411 if not len(filelog):
1412 if node is None:
1412 if node is None:
1413 # A zero count may be a directory or deleted file, so
1413 # A zero count may be a directory or deleted file, so
1414 # try to find matching entries on the slow path.
1414 # try to find matching entries on the slow path.
1415 if follow:
1415 if follow:
1416 raise util.Abort(
1416 raise util.Abort(
1417 _('cannot follow nonexistent file: "%s"') % file_)
1417 _('cannot follow nonexistent file: "%s"') % file_)
1418 raise FileWalkError("Cannot walk via filelog")
1418 raise FileWalkError("Cannot walk via filelog")
1419 else:
1419 else:
1420 continue
1420 continue
1421
1421
1422 if node is None:
1422 if node is None:
1423 last = len(filelog) - 1
1423 last = len(filelog) - 1
1424 else:
1424 else:
1425 last = filelog.rev(node)
1425 last = filelog.rev(node)
1426
1426
1427
1427
1428 # keep track of all ancestors of the file
1428 # keep track of all ancestors of the file
1429 ancestors = set([filelog.linkrev(last)])
1429 ancestors = set([filelog.linkrev(last)])
1430
1430
1431 # iterate from latest to oldest revision
1431 # iterate from latest to oldest revision
1432 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1432 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1433 if not follow:
1433 if not follow:
1434 if rev > maxrev:
1434 if rev > maxrev:
1435 continue
1435 continue
1436 else:
1436 else:
1437 # Note that last might not be the first interesting
1437 # Note that last might not be the first interesting
1438 # rev to us:
1438 # rev to us:
1439 # if the file has been changed after maxrev, we'll
1439 # if the file has been changed after maxrev, we'll
1440 # have linkrev(last) > maxrev, and we still need
1440 # have linkrev(last) > maxrev, and we still need
1441 # to explore the file graph
1441 # to explore the file graph
1442 if rev not in ancestors:
1442 if rev not in ancestors:
1443 continue
1443 continue
1444 # XXX insert 1327 fix here
1444 # XXX insert 1327 fix here
1445 if flparentlinkrevs:
1445 if flparentlinkrevs:
1446 ancestors.update(flparentlinkrevs)
1446 ancestors.update(flparentlinkrevs)
1447
1447
1448 fncache.setdefault(rev, []).append(file_)
1448 fncache.setdefault(rev, []).append(file_)
1449 wanted.add(rev)
1449 wanted.add(rev)
1450 if copied:
1450 if copied:
1451 copies.append(copied)
1451 copies.append(copied)
1452
1452
1453 return wanted
1453 return wanted
1454
1454
1455 def walkchangerevs(repo, match, opts, prepare):
1455 def walkchangerevs(repo, match, opts, prepare):
1456 '''Iterate over files and the revs in which they changed.
1456 '''Iterate over files and the revs in which they changed.
1457
1457
1458 Callers most commonly need to iterate backwards over the history
1458 Callers most commonly need to iterate backwards over the history
1459 in which they are interested. Doing so has awful (quadratic-looking)
1459 in which they are interested. Doing so has awful (quadratic-looking)
1460 performance, so we use iterators in a "windowed" way.
1460 performance, so we use iterators in a "windowed" way.
1461
1461
1462 We walk a window of revisions in the desired order. Within the
1462 We walk a window of revisions in the desired order. Within the
1463 window, we first walk forwards to gather data, then in the desired
1463 window, we first walk forwards to gather data, then in the desired
1464 order (usually backwards) to display it.
1464 order (usually backwards) to display it.
1465
1465
1466 This function returns an iterator yielding contexts. Before
1466 This function returns an iterator yielding contexts. Before
1467 yielding each context, the iterator will first call the prepare
1467 yielding each context, the iterator will first call the prepare
1468 function on each context in the window in forward order.'''
1468 function on each context in the window in forward order.'''
1469
1469
1470 follow = opts.get('follow') or opts.get('follow_first')
1470 follow = opts.get('follow') or opts.get('follow_first')
1471
1471
1472 if opts.get('rev'):
1472 if opts.get('rev'):
1473 revs = scmutil.revrange(repo, opts.get('rev'))
1473 revs = scmutil.revrange(repo, opts.get('rev'))
1474 elif follow:
1474 elif follow:
1475 revs = repo.revs('reverse(:.)')
1475 revs = repo.revs('reverse(:.)')
1476 else:
1476 else:
1477 revs = revset.spanset(repo)
1477 revs = revset.spanset(repo)
1478 revs.reverse()
1478 revs.reverse()
1479 if not revs:
1479 if not revs:
1480 return []
1480 return []
1481 wanted = set()
1481 wanted = set()
1482 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1482 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1483 fncache = {}
1483 fncache = {}
1484 change = repo.changectx
1484 change = repo.changectx
1485
1485
1486 # First step is to fill wanted, the set of revisions that we want to yield.
1486 # First step is to fill wanted, the set of revisions that we want to yield.
1487 # When it does not induce extra cost, we also fill fncache for revisions in
1487 # When it does not induce extra cost, we also fill fncache for revisions in
1488 # wanted: a cache of filenames that were changed (ctx.files()) and that
1488 # wanted: a cache of filenames that were changed (ctx.files()) and that
1489 # match the file filtering conditions.
1489 # match the file filtering conditions.
1490
1490
1491 if not slowpath and not match.files():
1491 if not slowpath and not match.files():
1492 # No files, no patterns. Display all revs.
1492 # No files, no patterns. Display all revs.
1493 wanted = revs
1493 wanted = revs
1494
1494
1495 if not slowpath and match.files():
1495 if not slowpath and match.files():
1496 # We only have to read through the filelog to find wanted revisions
1496 # We only have to read through the filelog to find wanted revisions
1497
1497
1498 try:
1498 try:
1499 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1499 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1500 except FileWalkError:
1500 except FileWalkError:
1501 slowpath = True
1501 slowpath = True
1502
1502
1503 # We decided to fall back to the slowpath because at least one
1503 # We decided to fall back to the slowpath because at least one
1504 # of the paths was not a file. Check to see if at least one of them
1504 # of the paths was not a file. Check to see if at least one of them
1505 # existed in history, otherwise simply return
1505 # existed in history, otherwise simply return
1506 for path in match.files():
1506 for path in match.files():
1507 if path == '.' or path in repo.store:
1507 if path == '.' or path in repo.store:
1508 break
1508 break
1509 else:
1509 else:
1510 return []
1510 return []
1511
1511
1512 if slowpath:
1512 if slowpath:
1513 # We have to read the changelog to match filenames against
1513 # We have to read the changelog to match filenames against
1514 # changed files
1514 # changed files
1515
1515
1516 if follow:
1516 if follow:
1517 raise util.Abort(_('can only follow copies/renames for explicit '
1517 raise util.Abort(_('can only follow copies/renames for explicit '
1518 'filenames'))
1518 'filenames'))
1519
1519
1520 # The slow path checks files modified in every changeset.
1520 # The slow path checks files modified in every changeset.
1521 # This is really slow on large repos, so compute the set lazily.
1521 # This is really slow on large repos, so compute the set lazily.
1522 class lazywantedset(object):
1522 class lazywantedset(object):
1523 def __init__(self):
1523 def __init__(self):
1524 self.set = set()
1524 self.set = set()
1525 self.revs = set(revs)
1525 self.revs = set(revs)
1526
1526
1527 # No need to worry about locality here because it will be accessed
1527 # No need to worry about locality here because it will be accessed
1528 # in the same order as the increasing window below.
1528 # in the same order as the increasing window below.
1529 def __contains__(self, value):
1529 def __contains__(self, value):
1530 if value in self.set:
1530 if value in self.set:
1531 return True
1531 return True
1532 elif not value in self.revs:
1532 elif not value in self.revs:
1533 return False
1533 return False
1534 else:
1534 else:
1535 self.revs.discard(value)
1535 self.revs.discard(value)
1536 ctx = change(value)
1536 ctx = change(value)
1537 matches = filter(match, ctx.files())
1537 matches = filter(match, ctx.files())
1538 if matches:
1538 if matches:
1539 fncache[value] = matches
1539 fncache[value] = matches
1540 self.set.add(value)
1540 self.set.add(value)
1541 return True
1541 return True
1542 return False
1542 return False
1543
1543
1544 def discard(self, value):
1544 def discard(self, value):
1545 self.revs.discard(value)
1545 self.revs.discard(value)
1546 self.set.discard(value)
1546 self.set.discard(value)
1547
1547
1548 wanted = lazywantedset()
1548 wanted = lazywantedset()
1549
1549
1550 class followfilter(object):
1550 class followfilter(object):
1551 def __init__(self, onlyfirst=False):
1551 def __init__(self, onlyfirst=False):
1552 self.startrev = nullrev
1552 self.startrev = nullrev
1553 self.roots = set()
1553 self.roots = set()
1554 self.onlyfirst = onlyfirst
1554 self.onlyfirst = onlyfirst
1555
1555
1556 def match(self, rev):
1556 def match(self, rev):
1557 def realparents(rev):
1557 def realparents(rev):
1558 if self.onlyfirst:
1558 if self.onlyfirst:
1559 return repo.changelog.parentrevs(rev)[0:1]
1559 return repo.changelog.parentrevs(rev)[0:1]
1560 else:
1560 else:
1561 return filter(lambda x: x != nullrev,
1561 return filter(lambda x: x != nullrev,
1562 repo.changelog.parentrevs(rev))
1562 repo.changelog.parentrevs(rev))
1563
1563
1564 if self.startrev == nullrev:
1564 if self.startrev == nullrev:
1565 self.startrev = rev
1565 self.startrev = rev
1566 return True
1566 return True
1567
1567
1568 if rev > self.startrev:
1568 if rev > self.startrev:
1569 # forward: all descendants
1569 # forward: all descendants
1570 if not self.roots:
1570 if not self.roots:
1571 self.roots.add(self.startrev)
1571 self.roots.add(self.startrev)
1572 for parent in realparents(rev):
1572 for parent in realparents(rev):
1573 if parent in self.roots:
1573 if parent in self.roots:
1574 self.roots.add(rev)
1574 self.roots.add(rev)
1575 return True
1575 return True
1576 else:
1576 else:
1577 # backwards: all parents
1577 # backwards: all parents
1578 if not self.roots:
1578 if not self.roots:
1579 self.roots.update(realparents(self.startrev))
1579 self.roots.update(realparents(self.startrev))
1580 if rev in self.roots:
1580 if rev in self.roots:
1581 self.roots.remove(rev)
1581 self.roots.remove(rev)
1582 self.roots.update(realparents(rev))
1582 self.roots.update(realparents(rev))
1583 return True
1583 return True
1584
1584
1585 return False
1585 return False
1586
1586
1587 # it might be worthwhile to do this in the iterator if the rev range
1587 # it might be worthwhile to do this in the iterator if the rev range
1588 # is descending and the prune args are all within that range
1588 # is descending and the prune args are all within that range
1589 for rev in opts.get('prune', ()):
1589 for rev in opts.get('prune', ()):
1590 rev = repo[rev].rev()
1590 rev = repo[rev].rev()
1591 ff = followfilter()
1591 ff = followfilter()
1592 stop = min(revs[0], revs[-1])
1592 stop = min(revs[0], revs[-1])
1593 for x in xrange(rev, stop - 1, -1):
1593 for x in xrange(rev, stop - 1, -1):
1594 if ff.match(x):
1594 if ff.match(x):
1595 wanted = wanted - [x]
1595 wanted = wanted - [x]
1596
1596
1597 # Now that wanted is correctly initialized, we can iterate over the
1597 # Now that wanted is correctly initialized, we can iterate over the
1598 # revision range, yielding only revisions in wanted.
1598 # revision range, yielding only revisions in wanted.
1599 def iterate():
1599 def iterate():
1600 if follow and not match.files():
1600 if follow and not match.files():
1601 ff = followfilter(onlyfirst=opts.get('follow_first'))
1601 ff = followfilter(onlyfirst=opts.get('follow_first'))
1602 def want(rev):
1602 def want(rev):
1603 return ff.match(rev) and rev in wanted
1603 return ff.match(rev) and rev in wanted
1604 else:
1604 else:
1605 def want(rev):
1605 def want(rev):
1606 return rev in wanted
1606 return rev in wanted
1607
1607
1608 it = iter(revs)
1608 it = iter(revs)
1609 stopiteration = False
1609 stopiteration = False
1610 for windowsize in increasingwindows():
1610 for windowsize in increasingwindows():
1611 nrevs = []
1611 nrevs = []
1612 for i in xrange(windowsize):
1612 for i in xrange(windowsize):
1613 try:
1613 try:
1614 rev = it.next()
1614 rev = it.next()
1615 if want(rev):
1615 if want(rev):
1616 nrevs.append(rev)
1616 nrevs.append(rev)
1617 except (StopIteration):
1617 except (StopIteration):
1618 stopiteration = True
1618 stopiteration = True
1619 break
1619 break
1620 for rev in sorted(nrevs):
1620 for rev in sorted(nrevs):
1621 fns = fncache.get(rev)
1621 fns = fncache.get(rev)
1622 ctx = change(rev)
1622 ctx = change(rev)
1623 if not fns:
1623 if not fns:
1624 def fns_generator():
1624 def fns_generator():
1625 for f in ctx.files():
1625 for f in ctx.files():
1626 if match(f):
1626 if match(f):
1627 yield f
1627 yield f
1628 fns = fns_generator()
1628 fns = fns_generator()
1629 prepare(ctx, fns)
1629 prepare(ctx, fns)
1630 for rev in nrevs:
1630 for rev in nrevs:
1631 yield change(rev)
1631 yield change(rev)
1632
1632
1633 if stopiteration:
1633 if stopiteration:
1634 break
1634 break
1635
1635
1636 return iterate()
1636 return iterate()
1637
1637
1638 def _makefollowlogfilematcher(repo, files, followfirst):
1638 def _makefollowlogfilematcher(repo, files, followfirst):
1639 # When displaying a revision with --patch --follow FILE, we have
1639 # When displaying a revision with --patch --follow FILE, we have
1640 # to know which file of the revision must be diffed. With
1640 # to know which file of the revision must be diffed. With
1641 # --follow, we want the names of the ancestors of FILE in the
1641 # --follow, we want the names of the ancestors of FILE in the
1642 # revision, stored in "fcache". "fcache" is populated by
1642 # revision, stored in "fcache". "fcache" is populated by
1643 # reproducing the graph traversal already done by --follow revset
1643 # reproducing the graph traversal already done by --follow revset
1644 # and relating linkrevs to file names (which is not "correct" but
1644 # and relating linkrevs to file names (which is not "correct" but
1645 # good enough).
1645 # good enough).
1646 fcache = {}
1646 fcache = {}
1647 fcacheready = [False]
1647 fcacheready = [False]
1648 pctx = repo['.']
1648 pctx = repo['.']
1649
1649
1650 def populate():
1650 def populate():
1651 for fn in files:
1651 for fn in files:
1652 for i in ((pctx[fn],), pctx[fn].ancestors(followfirst=followfirst)):
1652 for i in ((pctx[fn],), pctx[fn].ancestors(followfirst=followfirst)):
1653 for c in i:
1653 for c in i:
1654 fcache.setdefault(c.linkrev(), set()).add(c.path())
1654 fcache.setdefault(c.linkrev(), set()).add(c.path())
1655
1655
1656 def filematcher(rev):
1656 def filematcher(rev):
1657 if not fcacheready[0]:
1657 if not fcacheready[0]:
1658 # Lazy initialization
1658 # Lazy initialization
1659 fcacheready[0] = True
1659 fcacheready[0] = True
1660 populate()
1660 populate()
1661 return scmutil.matchfiles(repo, fcache.get(rev, []))
1661 return scmutil.matchfiles(repo, fcache.get(rev, []))
1662
1662
1663 return filematcher
1663 return filematcher
1664
1664
1665 def _makenofollowlogfilematcher(repo, pats, opts):
1665 def _makenofollowlogfilematcher(repo, pats, opts):
1666 '''hook for extensions to override the filematcher for non-follow cases'''
1666 '''hook for extensions to override the filematcher for non-follow cases'''
1667 return None
1667 return None
1668
1668
1669 def _makelogrevset(repo, pats, opts, revs):
1669 def _makelogrevset(repo, pats, opts, revs):
1670 """Return (expr, filematcher) where expr is a revset string built
1670 """Return (expr, filematcher) where expr is a revset string built
1671 from log options and file patterns or None. If --stat or --patch
1671 from log options and file patterns or None. If --stat or --patch
1672 are not passed filematcher is None. Otherwise it is a callable
1672 are not passed filematcher is None. Otherwise it is a callable
1673 taking a revision number and returning a match objects filtering
1673 taking a revision number and returning a match objects filtering
1674 the files to be detailed when displaying the revision.
1674 the files to be detailed when displaying the revision.
1675 """
1675 """
1676 opt2revset = {
1676 opt2revset = {
1677 'no_merges': ('not merge()', None),
1677 'no_merges': ('not merge()', None),
1678 'only_merges': ('merge()', None),
1678 'only_merges': ('merge()', None),
1679 '_ancestors': ('ancestors(%(val)s)', None),
1679 '_ancestors': ('ancestors(%(val)s)', None),
1680 '_fancestors': ('_firstancestors(%(val)s)', None),
1680 '_fancestors': ('_firstancestors(%(val)s)', None),
1681 '_descendants': ('descendants(%(val)s)', None),
1681 '_descendants': ('descendants(%(val)s)', None),
1682 '_fdescendants': ('_firstdescendants(%(val)s)', None),
1682 '_fdescendants': ('_firstdescendants(%(val)s)', None),
1683 '_matchfiles': ('_matchfiles(%(val)s)', None),
1683 '_matchfiles': ('_matchfiles(%(val)s)', None),
1684 'date': ('date(%(val)r)', None),
1684 'date': ('date(%(val)r)', None),
1685 'branch': ('branch(%(val)r)', ' or '),
1685 'branch': ('branch(%(val)r)', ' or '),
1686 '_patslog': ('filelog(%(val)r)', ' or '),
1686 '_patslog': ('filelog(%(val)r)', ' or '),
1687 '_patsfollow': ('follow(%(val)r)', ' or '),
1687 '_patsfollow': ('follow(%(val)r)', ' or '),
1688 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
1688 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
1689 'keyword': ('keyword(%(val)r)', ' or '),
1689 'keyword': ('keyword(%(val)r)', ' or '),
1690 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
1690 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
1691 'user': ('user(%(val)r)', ' or '),
1691 'user': ('user(%(val)r)', ' or '),
1692 }
1692 }
1693
1693
1694 opts = dict(opts)
1694 opts = dict(opts)
1695 # follow or not follow?
1695 # follow or not follow?
1696 follow = opts.get('follow') or opts.get('follow_first')
1696 follow = opts.get('follow') or opts.get('follow_first')
1697 followfirst = opts.get('follow_first') and 1 or 0
1697 followfirst = opts.get('follow_first') and 1 or 0
1698 # --follow with FILE behaviour depends on revs...
1698 # --follow with FILE behaviour depends on revs...
1699 it = iter(revs)
1699 it = iter(revs)
1700 startrev = it.next()
1700 startrev = it.next()
1701 try:
1701 try:
1702 followdescendants = startrev < it.next()
1702 followdescendants = startrev < it.next()
1703 except (StopIteration):
1703 except (StopIteration):
1704 followdescendants = False
1704 followdescendants = False
1705
1705
1706 # branch and only_branch are really aliases and must be handled at
1706 # branch and only_branch are really aliases and must be handled at
1707 # the same time
1707 # the same time
1708 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
1708 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
1709 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
1709 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
1710 # pats/include/exclude are passed to match.match() directly in
1710 # pats/include/exclude are passed to match.match() directly in
1711 # _matchfiles() revset but walkchangerevs() builds its matcher with
1711 # _matchfiles() revset but walkchangerevs() builds its matcher with
1712 # scmutil.match(). The difference is input pats are globbed on
1712 # scmutil.match(). The difference is input pats are globbed on
1713 # platforms without shell expansion (windows).
1713 # platforms without shell expansion (windows).
1714 pctx = repo[None]
1714 pctx = repo[None]
1715 match, pats = scmutil.matchandpats(pctx, pats, opts)
1715 match, pats = scmutil.matchandpats(pctx, pats, opts)
1716 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1716 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1717 if not slowpath:
1717 if not slowpath:
1718 for f in match.files():
1718 for f in match.files():
1719 if follow and f not in pctx:
1719 if follow and f not in pctx:
1720 # If the file exists, it may be a directory, so let it
1720 # If the file exists, it may be a directory, so let it
1721 # take the slow path.
1721 # take the slow path.
1722 if os.path.exists(repo.wjoin(f)):
1722 if os.path.exists(repo.wjoin(f)):
1723 slowpath = True
1723 slowpath = True
1724 continue
1724 continue
1725 else:
1725 else:
1726 raise util.Abort(_('cannot follow file not in parent '
1726 raise util.Abort(_('cannot follow file not in parent '
1727 'revision: "%s"') % f)
1727 'revision: "%s"') % f)
1728 filelog = repo.file(f)
1728 filelog = repo.file(f)
1729 if not filelog:
1729 if not filelog:
1730 # A zero count may be a directory or deleted file, so
1730 # A zero count may be a directory or deleted file, so
1731 # try to find matching entries on the slow path.
1731 # try to find matching entries on the slow path.
1732 if follow:
1732 if follow:
1733 raise util.Abort(
1733 raise util.Abort(
1734 _('cannot follow nonexistent file: "%s"') % f)
1734 _('cannot follow nonexistent file: "%s"') % f)
1735 slowpath = True
1735 slowpath = True
1736
1736
1737 # We decided to fall back to the slowpath because at least one
1737 # We decided to fall back to the slowpath because at least one
1738 # of the paths was not a file. Check to see if at least one of them
1738 # of the paths was not a file. Check to see if at least one of them
1739 # existed in history - in that case, we'll continue down the
1739 # existed in history - in that case, we'll continue down the
1740 # slowpath; otherwise, we can turn off the slowpath
1740 # slowpath; otherwise, we can turn off the slowpath
1741 if slowpath:
1741 if slowpath:
1742 for path in match.files():
1742 for path in match.files():
1743 if path == '.' or path in repo.store:
1743 if path == '.' or path in repo.store:
1744 break
1744 break
1745 else:
1745 else:
1746 slowpath = False
1746 slowpath = False
1747
1747
1748 fpats = ('_patsfollow', '_patsfollowfirst')
1748 fpats = ('_patsfollow', '_patsfollowfirst')
1749 fnopats = (('_ancestors', '_fancestors'),
1749 fnopats = (('_ancestors', '_fancestors'),
1750 ('_descendants', '_fdescendants'))
1750 ('_descendants', '_fdescendants'))
1751 if slowpath:
1751 if slowpath:
1752 # See walkchangerevs() slow path.
1752 # See walkchangerevs() slow path.
1753 #
1753 #
1754 # pats/include/exclude cannot be represented as separate
1754 # pats/include/exclude cannot be represented as separate
1755 # revset expressions as their filtering logic applies at file
1755 # revset expressions as their filtering logic applies at file
1756 # level. For instance "-I a -X a" matches a revision touching
1756 # level. For instance "-I a -X a" matches a revision touching
1757 # "a" and "b" while "file(a) and not file(b)" does
1757 # "a" and "b" while "file(a) and not file(b)" does
1758 # not. Besides, filesets are evaluated against the working
1758 # not. Besides, filesets are evaluated against the working
1759 # directory.
1759 # directory.
1760 matchargs = ['r:', 'd:relpath']
1760 matchargs = ['r:', 'd:relpath']
1761 for p in pats:
1761 for p in pats:
1762 matchargs.append('p:' + p)
1762 matchargs.append('p:' + p)
1763 for p in opts.get('include', []):
1763 for p in opts.get('include', []):
1764 matchargs.append('i:' + p)
1764 matchargs.append('i:' + p)
1765 for p in opts.get('exclude', []):
1765 for p in opts.get('exclude', []):
1766 matchargs.append('x:' + p)
1766 matchargs.append('x:' + p)
1767 matchargs = ','.join(('%r' % p) for p in matchargs)
1767 matchargs = ','.join(('%r' % p) for p in matchargs)
1768 opts['_matchfiles'] = matchargs
1768 opts['_matchfiles'] = matchargs
1769 if follow:
1769 if follow:
1770 opts[fnopats[0][followfirst]] = '.'
1770 opts[fnopats[0][followfirst]] = '.'
1771 else:
1771 else:
1772 if follow:
1772 if follow:
1773 if pats:
1773 if pats:
1774 # follow() revset interprets its file argument as a
1774 # follow() revset interprets its file argument as a
1775 # manifest entry, so use match.files(), not pats.
1775 # manifest entry, so use match.files(), not pats.
1776 opts[fpats[followfirst]] = list(match.files())
1776 opts[fpats[followfirst]] = list(match.files())
1777 else:
1777 else:
1778 opts[fnopats[followdescendants][followfirst]] = str(startrev)
1778 opts[fnopats[followdescendants][followfirst]] = str(startrev)
1779 else:
1779 else:
1780 opts['_patslog'] = list(pats)
1780 opts['_patslog'] = list(pats)
1781
1781
1782 filematcher = None
1782 filematcher = None
1783 if opts.get('patch') or opts.get('stat'):
1783 if opts.get('patch') or opts.get('stat'):
1784 # When following files, track renames via a special matcher.
1784 # When following files, track renames via a special matcher.
1785 # If we're forced to take the slowpath it means we're following
1785 # If we're forced to take the slowpath it means we're following
1786 # at least one pattern/directory, so don't bother with rename tracking.
1786 # at least one pattern/directory, so don't bother with rename tracking.
1787 if follow and not match.always() and not slowpath:
1787 if follow and not match.always() and not slowpath:
1788 # _makefollowlogfilematcher expects its files argument to be
1788 # _makefollowlogfilematcher expects its files argument to be
1789 # relative to the repo root, so use match.files(), not pats.
1789 # relative to the repo root, so use match.files(), not pats.
1790 filematcher = _makefollowlogfilematcher(repo, match.files(),
1790 filematcher = _makefollowlogfilematcher(repo, match.files(),
1791 followfirst)
1791 followfirst)
1792 else:
1792 else:
1793 filematcher = _makenofollowlogfilematcher(repo, pats, opts)
1793 filematcher = _makenofollowlogfilematcher(repo, pats, opts)
1794 if filematcher is None:
1794 if filematcher is None:
1795 filematcher = lambda rev: match
1795 filematcher = lambda rev: match
1796
1796
1797 expr = []
1797 expr = []
1798 for op, val in sorted(opts.iteritems()):
1798 for op, val in sorted(opts.iteritems()):
1799 if not val:
1799 if not val:
1800 continue
1800 continue
1801 if op not in opt2revset:
1801 if op not in opt2revset:
1802 continue
1802 continue
1803 revop, andor = opt2revset[op]
1803 revop, andor = opt2revset[op]
1804 if '%(val)' not in revop:
1804 if '%(val)' not in revop:
1805 expr.append(revop)
1805 expr.append(revop)
1806 else:
1806 else:
1807 if not isinstance(val, list):
1807 if not isinstance(val, list):
1808 e = revop % {'val': val}
1808 e = revop % {'val': val}
1809 else:
1809 else:
1810 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
1810 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
1811 expr.append(e)
1811 expr.append(e)
1812
1812
1813 if expr:
1813 if expr:
1814 expr = '(' + ' and '.join(expr) + ')'
1814 expr = '(' + ' and '.join(expr) + ')'
1815 else:
1815 else:
1816 expr = None
1816 expr = None
1817 return expr, filematcher
1817 return expr, filematcher
1818
1818
1819 def getgraphlogrevs(repo, pats, opts):
1819 def getgraphlogrevs(repo, pats, opts):
1820 """Return (revs, expr, filematcher) where revs is an iterable of
1820 """Return (revs, expr, filematcher) where revs is an iterable of
1821 revision numbers, expr is a revset string built from log options
1821 revision numbers, expr is a revset string built from log options
1822 and file patterns or None, and used to filter 'revs'. If --stat or
1822 and file patterns or None, and used to filter 'revs'. If --stat or
1823 --patch are not passed filematcher is None. Otherwise it is a
1823 --patch are not passed filematcher is None. Otherwise it is a
1824 callable taking a revision number and returning a match objects
1824 callable taking a revision number and returning a match objects
1825 filtering the files to be detailed when displaying the revision.
1825 filtering the files to be detailed when displaying the revision.
1826 """
1826 """
1827 if not len(repo):
1827 if not len(repo):
1828 return [], None, None
1828 return [], None, None
1829 limit = loglimit(opts)
1829 limit = loglimit(opts)
1830 # Default --rev value depends on --follow but --follow behaviour
1830 # Default --rev value depends on --follow but --follow behaviour
1831 # depends on revisions resolved from --rev...
1831 # depends on revisions resolved from --rev...
1832 follow = opts.get('follow') or opts.get('follow_first')
1832 follow = opts.get('follow') or opts.get('follow_first')
1833 possiblyunsorted = False # whether revs might need sorting
1833 possiblyunsorted = False # whether revs might need sorting
1834 if opts.get('rev'):
1834 if opts.get('rev'):
1835 revs = scmutil.revrange(repo, opts['rev'])
1835 revs = scmutil.revrange(repo, opts['rev'])
1836 # Don't sort here because _makelogrevset might depend on the
1836 # Don't sort here because _makelogrevset might depend on the
1837 # order of revs
1837 # order of revs
1838 possiblyunsorted = True
1838 possiblyunsorted = True
1839 else:
1839 else:
1840 if follow and len(repo) > 0:
1840 if follow and len(repo) > 0:
1841 revs = repo.revs('reverse(:.)')
1841 revs = repo.revs('reverse(:.)')
1842 else:
1842 else:
1843 revs = revset.spanset(repo)
1843 revs = revset.spanset(repo)
1844 revs.reverse()
1844 revs.reverse()
1845 if not revs:
1845 if not revs:
1846 return revset.baseset(), None, None
1846 return revset.baseset(), None, None
1847 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
1847 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
1848 if possiblyunsorted:
1848 if possiblyunsorted:
1849 revs.sort(reverse=True)
1849 revs.sort(reverse=True)
1850 if expr:
1850 if expr:
1851 # Revset matchers often operate faster on revisions in changelog
1851 # Revset matchers often operate faster on revisions in changelog
1852 # order, because most filters deal with the changelog.
1852 # order, because most filters deal with the changelog.
1853 revs.reverse()
1853 revs.reverse()
1854 matcher = revset.match(repo.ui, expr)
1854 matcher = revset.match(repo.ui, expr)
1855 # Revset matches can reorder revisions. "A or B" typically returns
1855 # Revset matches can reorder revisions. "A or B" typically returns
1856 # returns the revision matching A then the revision matching B. Sort
1856 # returns the revision matching A then the revision matching B. Sort
1857 # again to fix that.
1857 # again to fix that.
1858 revs = matcher(repo, revs)
1858 revs = matcher(repo, revs)
1859 revs.sort(reverse=True)
1859 revs.sort(reverse=True)
1860 if limit is not None:
1860 if limit is not None:
1861 limitedrevs = []
1861 limitedrevs = []
1862 for idx, rev in enumerate(revs):
1862 for idx, rev in enumerate(revs):
1863 if idx >= limit:
1863 if idx >= limit:
1864 break
1864 break
1865 limitedrevs.append(rev)
1865 limitedrevs.append(rev)
1866 revs = revset.baseset(limitedrevs)
1866 revs = revset.baseset(limitedrevs)
1867
1867
1868 return revs, expr, filematcher
1868 return revs, expr, filematcher
1869
1869
1870 def getlogrevs(repo, pats, opts):
1870 def getlogrevs(repo, pats, opts):
1871 """Return (revs, expr, filematcher) where revs is an iterable of
1871 """Return (revs, expr, filematcher) where revs is an iterable of
1872 revision numbers, expr is a revset string built from log options
1872 revision numbers, expr is a revset string built from log options
1873 and file patterns or None, and used to filter 'revs'. If --stat or
1873 and file patterns or None, and used to filter 'revs'. If --stat or
1874 --patch are not passed filematcher is None. Otherwise it is a
1874 --patch are not passed filematcher is None. Otherwise it is a
1875 callable taking a revision number and returning a match objects
1875 callable taking a revision number and returning a match objects
1876 filtering the files to be detailed when displaying the revision.
1876 filtering the files to be detailed when displaying the revision.
1877 """
1877 """
1878 limit = loglimit(opts)
1878 limit = loglimit(opts)
1879 # Default --rev value depends on --follow but --follow behaviour
1879 # Default --rev value depends on --follow but --follow behaviour
1880 # depends on revisions resolved from --rev...
1880 # depends on revisions resolved from --rev...
1881 follow = opts.get('follow') or opts.get('follow_first')
1881 follow = opts.get('follow') or opts.get('follow_first')
1882 if opts.get('rev'):
1882 if opts.get('rev'):
1883 revs = scmutil.revrange(repo, opts['rev'])
1883 revs = scmutil.revrange(repo, opts['rev'])
1884 elif follow:
1884 elif follow:
1885 revs = repo.revs('reverse(:.)')
1885 revs = repo.revs('reverse(:.)')
1886 else:
1886 else:
1887 revs = revset.spanset(repo)
1887 revs = revset.spanset(repo)
1888 revs.reverse()
1888 revs.reverse()
1889 if not revs:
1889 if not revs:
1890 return revset.baseset([]), None, None
1890 return revset.baseset([]), None, None
1891 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
1891 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
1892 if expr:
1892 if expr:
1893 # Revset matchers often operate faster on revisions in changelog
1893 # Revset matchers often operate faster on revisions in changelog
1894 # order, because most filters deal with the changelog.
1894 # order, because most filters deal with the changelog.
1895 if not opts.get('rev'):
1895 if not opts.get('rev'):
1896 revs.reverse()
1896 revs.reverse()
1897 matcher = revset.match(repo.ui, expr)
1897 matcher = revset.match(repo.ui, expr)
1898 # Revset matches can reorder revisions. "A or B" typically returns
1898 # Revset matches can reorder revisions. "A or B" typically returns
1899 # returns the revision matching A then the revision matching B. Sort
1899 # returns the revision matching A then the revision matching B. Sort
1900 # again to fix that.
1900 # again to fix that.
1901 revs = matcher(repo, revs)
1901 revs = matcher(repo, revs)
1902 if not opts.get('rev'):
1902 if not opts.get('rev'):
1903 revs.sort(reverse=True)
1903 revs.sort(reverse=True)
1904 if limit is not None:
1904 if limit is not None:
1905 count = 0
1905 count = 0
1906 limitedrevs = []
1906 limitedrevs = []
1907 it = iter(revs)
1907 it = iter(revs)
1908 while count < limit:
1908 while count < limit:
1909 try:
1909 try:
1910 limitedrevs.append(it.next())
1910 limitedrevs.append(it.next())
1911 except (StopIteration):
1911 except (StopIteration):
1912 break
1912 break
1913 count += 1
1913 count += 1
1914 revs = revset.baseset(limitedrevs)
1914 revs = revset.baseset(limitedrevs)
1915
1915
1916 return revs, expr, filematcher
1916 return revs, expr, filematcher
1917
1917
1918 def displaygraph(ui, dag, displayer, showparents, edgefn, getrenamed=None,
1918 def displaygraph(ui, dag, displayer, showparents, edgefn, getrenamed=None,
1919 filematcher=None):
1919 filematcher=None):
1920 seen, state = [], graphmod.asciistate()
1920 seen, state = [], graphmod.asciistate()
1921 for rev, type, ctx, parents in dag:
1921 for rev, type, ctx, parents in dag:
1922 char = 'o'
1922 char = 'o'
1923 if ctx.node() in showparents:
1923 if ctx.node() in showparents:
1924 char = '@'
1924 char = '@'
1925 elif ctx.obsolete():
1925 elif ctx.obsolete():
1926 char = 'x'
1926 char = 'x'
1927 copies = None
1927 copies = None
1928 if getrenamed and ctx.rev():
1928 if getrenamed and ctx.rev():
1929 copies = []
1929 copies = []
1930 for fn in ctx.files():
1930 for fn in ctx.files():
1931 rename = getrenamed(fn, ctx.rev())
1931 rename = getrenamed(fn, ctx.rev())
1932 if rename:
1932 if rename:
1933 copies.append((fn, rename[0]))
1933 copies.append((fn, rename[0]))
1934 revmatchfn = None
1934 revmatchfn = None
1935 if filematcher is not None:
1935 if filematcher is not None:
1936 revmatchfn = filematcher(ctx.rev())
1936 revmatchfn = filematcher(ctx.rev())
1937 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
1937 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
1938 lines = displayer.hunk.pop(rev).split('\n')
1938 lines = displayer.hunk.pop(rev).split('\n')
1939 if not lines[-1]:
1939 if not lines[-1]:
1940 del lines[-1]
1940 del lines[-1]
1941 displayer.flush(rev)
1941 displayer.flush(rev)
1942 edges = edgefn(type, char, lines, seen, rev, parents)
1942 edges = edgefn(type, char, lines, seen, rev, parents)
1943 for type, char, lines, coldata in edges:
1943 for type, char, lines, coldata in edges:
1944 graphmod.ascii(ui, state, type, char, lines, coldata)
1944 graphmod.ascii(ui, state, type, char, lines, coldata)
1945 displayer.close()
1945 displayer.close()
1946
1946
1947 def graphlog(ui, repo, *pats, **opts):
1947 def graphlog(ui, repo, *pats, **opts):
1948 # Parameters are identical to log command ones
1948 # Parameters are identical to log command ones
1949 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
1949 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
1950 revdag = graphmod.dagwalker(repo, revs)
1950 revdag = graphmod.dagwalker(repo, revs)
1951
1951
1952 getrenamed = None
1952 getrenamed = None
1953 if opts.get('copies'):
1953 if opts.get('copies'):
1954 endrev = None
1954 endrev = None
1955 if opts.get('rev'):
1955 if opts.get('rev'):
1956 endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
1956 endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
1957 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
1957 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
1958 displayer = show_changeset(ui, repo, opts, buffered=True)
1958 displayer = show_changeset(ui, repo, opts, buffered=True)
1959 showparents = [ctx.node() for ctx in repo[None].parents()]
1959 showparents = [ctx.node() for ctx in repo[None].parents()]
1960 displaygraph(ui, revdag, displayer, showparents,
1960 displaygraph(ui, revdag, displayer, showparents,
1961 graphmod.asciiedges, getrenamed, filematcher)
1961 graphmod.asciiedges, getrenamed, filematcher)
1962
1962
1963 def checkunsupportedgraphflags(pats, opts):
1963 def checkunsupportedgraphflags(pats, opts):
1964 for op in ["newest_first"]:
1964 for op in ["newest_first"]:
1965 if op in opts and opts[op]:
1965 if op in opts and opts[op]:
1966 raise util.Abort(_("-G/--graph option is incompatible with --%s")
1966 raise util.Abort(_("-G/--graph option is incompatible with --%s")
1967 % op.replace("_", "-"))
1967 % op.replace("_", "-"))
1968
1968
1969 def graphrevs(repo, nodes, opts):
1969 def graphrevs(repo, nodes, opts):
1970 limit = loglimit(opts)
1970 limit = loglimit(opts)
1971 nodes.reverse()
1971 nodes.reverse()
1972 if limit is not None:
1972 if limit is not None:
1973 nodes = nodes[:limit]
1973 nodes = nodes[:limit]
1974 return graphmod.nodes(repo, nodes)
1974 return graphmod.nodes(repo, nodes)
1975
1975
1976 def add(ui, repo, match, dryrun, listsubrepos, prefix, explicitonly):
1976 def add(ui, repo, match, dryrun, listsubrepos, prefix, explicitonly):
1977 join = lambda f: os.path.join(prefix, f)
1977 join = lambda f: os.path.join(prefix, f)
1978 bad = []
1978 bad = []
1979 oldbad = match.bad
1979 oldbad = match.bad
1980 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
1980 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
1981 names = []
1981 names = []
1982 wctx = repo[None]
1982 wctx = repo[None]
1983 cca = None
1983 cca = None
1984 abort, warn = scmutil.checkportabilityalert(ui)
1984 abort, warn = scmutil.checkportabilityalert(ui)
1985 if abort or warn:
1985 if abort or warn:
1986 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
1986 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
1987 for f in wctx.walk(match):
1987 for f in wctx.walk(match):
1988 exact = match.exact(f)
1988 exact = match.exact(f)
1989 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
1989 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
1990 if cca:
1990 if cca:
1991 cca(f)
1991 cca(f)
1992 names.append(f)
1992 names.append(f)
1993 if ui.verbose or not exact:
1993 if ui.verbose or not exact:
1994 ui.status(_('adding %s\n') % match.rel(join(f)))
1994 ui.status(_('adding %s\n') % match.rel(join(f)))
1995
1995
1996 for subpath in sorted(wctx.substate):
1996 for subpath in sorted(wctx.substate):
1997 sub = wctx.sub(subpath)
1997 sub = wctx.sub(subpath)
1998 try:
1998 try:
1999 submatch = matchmod.narrowmatcher(subpath, match)
1999 submatch = matchmod.narrowmatcher(subpath, match)
2000 if listsubrepos:
2000 if listsubrepos:
2001 bad.extend(sub.add(ui, submatch, dryrun, listsubrepos, prefix,
2001 bad.extend(sub.add(ui, submatch, dryrun, listsubrepos, prefix,
2002 False))
2002 False))
2003 else:
2003 else:
2004 bad.extend(sub.add(ui, submatch, dryrun, listsubrepos, prefix,
2004 bad.extend(sub.add(ui, submatch, dryrun, listsubrepos, prefix,
2005 True))
2005 True))
2006 except error.LookupError:
2006 except error.LookupError:
2007 ui.status(_("skipping missing subrepository: %s\n")
2007 ui.status(_("skipping missing subrepository: %s\n")
2008 % join(subpath))
2008 % join(subpath))
2009
2009
2010 if not dryrun:
2010 if not dryrun:
2011 rejected = wctx.add(names, prefix)
2011 rejected = wctx.add(names, prefix)
2012 bad.extend(f for f in rejected if f in match.files())
2012 bad.extend(f for f in rejected if f in match.files())
2013 return bad
2013 return bad
2014
2014
2015 def forget(ui, repo, match, prefix, explicitonly):
2015 def forget(ui, repo, match, prefix, explicitonly):
2016 join = lambda f: os.path.join(prefix, f)
2016 join = lambda f: os.path.join(prefix, f)
2017 bad = []
2017 bad = []
2018 oldbad = match.bad
2018 oldbad = match.bad
2019 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
2019 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
2020 wctx = repo[None]
2020 wctx = repo[None]
2021 forgot = []
2021 forgot = []
2022 s = repo.status(match=match, clean=True)
2022 s = repo.status(match=match, clean=True)
2023 forget = sorted(s[0] + s[1] + s[3] + s[6])
2023 forget = sorted(s[0] + s[1] + s[3] + s[6])
2024 if explicitonly:
2024 if explicitonly:
2025 forget = [f for f in forget if match.exact(f)]
2025 forget = [f for f in forget if match.exact(f)]
2026
2026
2027 for subpath in sorted(wctx.substate):
2027 for subpath in sorted(wctx.substate):
2028 sub = wctx.sub(subpath)
2028 sub = wctx.sub(subpath)
2029 try:
2029 try:
2030 submatch = matchmod.narrowmatcher(subpath, match)
2030 submatch = matchmod.narrowmatcher(subpath, match)
2031 subbad, subforgot = sub.forget(submatch, prefix)
2031 subbad, subforgot = sub.forget(submatch, prefix)
2032 bad.extend([subpath + '/' + f for f in subbad])
2032 bad.extend([subpath + '/' + f for f in subbad])
2033 forgot.extend([subpath + '/' + f for f in subforgot])
2033 forgot.extend([subpath + '/' + f for f in subforgot])
2034 except error.LookupError:
2034 except error.LookupError:
2035 ui.status(_("skipping missing subrepository: %s\n")
2035 ui.status(_("skipping missing subrepository: %s\n")
2036 % join(subpath))
2036 % join(subpath))
2037
2037
2038 if not explicitonly:
2038 if not explicitonly:
2039 for f in match.files():
2039 for f in match.files():
2040 if f not in repo.dirstate and not os.path.isdir(match.rel(join(f))):
2040 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2041 if f not in forgot:
2041 if f not in forgot:
2042 if os.path.exists(match.rel(join(f))):
2042 if repo.wvfs.exists(f):
2043 ui.warn(_('not removing %s: '
2043 ui.warn(_('not removing %s: '
2044 'file is already untracked\n')
2044 'file is already untracked\n')
2045 % match.rel(join(f)))
2045 % match.rel(join(f)))
2046 bad.append(f)
2046 bad.append(f)
2047
2047
2048 for f in forget:
2048 for f in forget:
2049 if ui.verbose or not match.exact(f):
2049 if ui.verbose or not match.exact(f):
2050 ui.status(_('removing %s\n') % match.rel(join(f)))
2050 ui.status(_('removing %s\n') % match.rel(join(f)))
2051
2051
2052 rejected = wctx.forget(forget, prefix)
2052 rejected = wctx.forget(forget, prefix)
2053 bad.extend(f for f in rejected if f in match.files())
2053 bad.extend(f for f in rejected if f in match.files())
2054 forgot.extend(forget)
2054 forgot.extend(forget)
2055 return bad, forgot
2055 return bad, forgot
2056
2056
2057 def remove(ui, repo, m, prefix, after, force, subrepos):
2057 def remove(ui, repo, m, prefix, after, force, subrepos):
2058 join = lambda f: os.path.join(prefix, f)
2058 join = lambda f: os.path.join(prefix, f)
2059 ret = 0
2059 ret = 0
2060 s = repo.status(match=m, clean=True)
2060 s = repo.status(match=m, clean=True)
2061 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2061 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2062
2062
2063 wctx = repo[None]
2063 wctx = repo[None]
2064
2064
2065 for subpath in sorted(wctx.substate):
2065 for subpath in sorted(wctx.substate):
2066 def matchessubrepo(matcher, subpath):
2066 def matchessubrepo(matcher, subpath):
2067 if matcher.exact(subpath):
2067 if matcher.exact(subpath):
2068 return True
2068 return True
2069 for f in matcher.files():
2069 for f in matcher.files():
2070 if f.startswith(subpath):
2070 if f.startswith(subpath):
2071 return True
2071 return True
2072 return False
2072 return False
2073
2073
2074 if subrepos or matchessubrepo(m, subpath):
2074 if subrepos or matchessubrepo(m, subpath):
2075 sub = wctx.sub(subpath)
2075 sub = wctx.sub(subpath)
2076 try:
2076 try:
2077 submatch = matchmod.narrowmatcher(subpath, m)
2077 submatch = matchmod.narrowmatcher(subpath, m)
2078 if sub.removefiles(submatch, prefix, after, force, subrepos):
2078 if sub.removefiles(submatch, prefix, after, force, subrepos):
2079 ret = 1
2079 ret = 1
2080 except error.LookupError:
2080 except error.LookupError:
2081 ui.status(_("skipping missing subrepository: %s\n")
2081 ui.status(_("skipping missing subrepository: %s\n")
2082 % join(subpath))
2082 % join(subpath))
2083
2083
2084 # warn about failure to delete explicit files/dirs
2084 # warn about failure to delete explicit files/dirs
2085 for f in m.files():
2085 for f in m.files():
2086 def insubrepo():
2086 def insubrepo():
2087 for subpath in wctx.substate:
2087 for subpath in wctx.substate:
2088 if f.startswith(subpath):
2088 if f.startswith(subpath):
2089 return True
2089 return True
2090 return False
2090 return False
2091
2091
2092 if f in repo.dirstate or f in wctx.dirs() or f == '.' or insubrepo():
2092 if f in repo.dirstate or f in wctx.dirs() or f == '.' or insubrepo():
2093 continue
2093 continue
2094
2094
2095 if os.path.exists(m.rel(join(f))):
2095 if os.path.exists(m.rel(join(f))):
2096 if os.path.isdir(m.rel(join(f))):
2096 if os.path.isdir(m.rel(join(f))):
2097 ui.warn(_('not removing %s: no tracked files\n')
2097 ui.warn(_('not removing %s: no tracked files\n')
2098 % m.rel(join(f)))
2098 % m.rel(join(f)))
2099 else:
2099 else:
2100 ui.warn(_('not removing %s: file is untracked\n')
2100 ui.warn(_('not removing %s: file is untracked\n')
2101 % m.rel(join(f)))
2101 % m.rel(join(f)))
2102 # missing files will generate a warning elsewhere
2102 # missing files will generate a warning elsewhere
2103 ret = 1
2103 ret = 1
2104
2104
2105 if force:
2105 if force:
2106 list = modified + deleted + clean + added
2106 list = modified + deleted + clean + added
2107 elif after:
2107 elif after:
2108 list = deleted
2108 list = deleted
2109 for f in modified + added + clean:
2109 for f in modified + added + clean:
2110 ui.warn(_('not removing %s: file still exists\n') % m.rel(join(f)))
2110 ui.warn(_('not removing %s: file still exists\n') % m.rel(join(f)))
2111 ret = 1
2111 ret = 1
2112 else:
2112 else:
2113 list = deleted + clean
2113 list = deleted + clean
2114 for f in modified:
2114 for f in modified:
2115 ui.warn(_('not removing %s: file is modified (use -f'
2115 ui.warn(_('not removing %s: file is modified (use -f'
2116 ' to force removal)\n') % m.rel(join(f)))
2116 ' to force removal)\n') % m.rel(join(f)))
2117 ret = 1
2117 ret = 1
2118 for f in added:
2118 for f in added:
2119 ui.warn(_('not removing %s: file has been marked for add'
2119 ui.warn(_('not removing %s: file has been marked for add'
2120 ' (use forget to undo)\n') % m.rel(join(f)))
2120 ' (use forget to undo)\n') % m.rel(join(f)))
2121 ret = 1
2121 ret = 1
2122
2122
2123 for f in sorted(list):
2123 for f in sorted(list):
2124 if ui.verbose or not m.exact(f):
2124 if ui.verbose or not m.exact(f):
2125 ui.status(_('removing %s\n') % m.rel(join(f)))
2125 ui.status(_('removing %s\n') % m.rel(join(f)))
2126
2126
2127 wlock = repo.wlock()
2127 wlock = repo.wlock()
2128 try:
2128 try:
2129 if not after:
2129 if not after:
2130 for f in list:
2130 for f in list:
2131 if f in added:
2131 if f in added:
2132 continue # we never unlink added files on remove
2132 continue # we never unlink added files on remove
2133 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
2133 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
2134 repo[None].forget(list)
2134 repo[None].forget(list)
2135 finally:
2135 finally:
2136 wlock.release()
2136 wlock.release()
2137
2137
2138 return ret
2138 return ret
2139
2139
2140 def cat(ui, repo, ctx, matcher, prefix, **opts):
2140 def cat(ui, repo, ctx, matcher, prefix, **opts):
2141 err = 1
2141 err = 1
2142
2142
2143 def write(path):
2143 def write(path):
2144 fp = makefileobj(repo, opts.get('output'), ctx.node(),
2144 fp = makefileobj(repo, opts.get('output'), ctx.node(),
2145 pathname=os.path.join(prefix, path))
2145 pathname=os.path.join(prefix, path))
2146 data = ctx[path].data()
2146 data = ctx[path].data()
2147 if opts.get('decode'):
2147 if opts.get('decode'):
2148 data = repo.wwritedata(path, data)
2148 data = repo.wwritedata(path, data)
2149 fp.write(data)
2149 fp.write(data)
2150 fp.close()
2150 fp.close()
2151
2151
2152 # Automation often uses hg cat on single files, so special case it
2152 # Automation often uses hg cat on single files, so special case it
2153 # for performance to avoid the cost of parsing the manifest.
2153 # for performance to avoid the cost of parsing the manifest.
2154 if len(matcher.files()) == 1 and not matcher.anypats():
2154 if len(matcher.files()) == 1 and not matcher.anypats():
2155 file = matcher.files()[0]
2155 file = matcher.files()[0]
2156 mf = repo.manifest
2156 mf = repo.manifest
2157 mfnode = ctx._changeset[0]
2157 mfnode = ctx._changeset[0]
2158 if mf.find(mfnode, file)[0]:
2158 if mf.find(mfnode, file)[0]:
2159 write(file)
2159 write(file)
2160 return 0
2160 return 0
2161
2161
2162 # Don't warn about "missing" files that are really in subrepos
2162 # Don't warn about "missing" files that are really in subrepos
2163 bad = matcher.bad
2163 bad = matcher.bad
2164
2164
2165 def badfn(path, msg):
2165 def badfn(path, msg):
2166 for subpath in ctx.substate:
2166 for subpath in ctx.substate:
2167 if path.startswith(subpath):
2167 if path.startswith(subpath):
2168 return
2168 return
2169 bad(path, msg)
2169 bad(path, msg)
2170
2170
2171 matcher.bad = badfn
2171 matcher.bad = badfn
2172
2172
2173 for abs in ctx.walk(matcher):
2173 for abs in ctx.walk(matcher):
2174 write(abs)
2174 write(abs)
2175 err = 0
2175 err = 0
2176
2176
2177 matcher.bad = bad
2177 matcher.bad = bad
2178
2178
2179 for subpath in sorted(ctx.substate):
2179 for subpath in sorted(ctx.substate):
2180 sub = ctx.sub(subpath)
2180 sub = ctx.sub(subpath)
2181 try:
2181 try:
2182 submatch = matchmod.narrowmatcher(subpath, matcher)
2182 submatch = matchmod.narrowmatcher(subpath, matcher)
2183
2183
2184 if not sub.cat(submatch, os.path.join(prefix, sub._path),
2184 if not sub.cat(submatch, os.path.join(prefix, sub._path),
2185 **opts):
2185 **opts):
2186 err = 0
2186 err = 0
2187 except error.RepoLookupError:
2187 except error.RepoLookupError:
2188 ui.status(_("skipping missing subrepository: %s\n")
2188 ui.status(_("skipping missing subrepository: %s\n")
2189 % os.path.join(prefix, subpath))
2189 % os.path.join(prefix, subpath))
2190
2190
2191 return err
2191 return err
2192
2192
2193 def commit(ui, repo, commitfunc, pats, opts):
2193 def commit(ui, repo, commitfunc, pats, opts):
2194 '''commit the specified files or all outstanding changes'''
2194 '''commit the specified files or all outstanding changes'''
2195 date = opts.get('date')
2195 date = opts.get('date')
2196 if date:
2196 if date:
2197 opts['date'] = util.parsedate(date)
2197 opts['date'] = util.parsedate(date)
2198 message = logmessage(ui, opts)
2198 message = logmessage(ui, opts)
2199 matcher = scmutil.match(repo[None], pats, opts)
2199 matcher = scmutil.match(repo[None], pats, opts)
2200
2200
2201 # extract addremove carefully -- this function can be called from a command
2201 # extract addremove carefully -- this function can be called from a command
2202 # that doesn't support addremove
2202 # that doesn't support addremove
2203 if opts.get('addremove'):
2203 if opts.get('addremove'):
2204 if scmutil.addremove(repo, matcher, "", opts) != 0:
2204 if scmutil.addremove(repo, matcher, "", opts) != 0:
2205 raise util.Abort(
2205 raise util.Abort(
2206 _("failed to mark all new/missing files as added/removed"))
2206 _("failed to mark all new/missing files as added/removed"))
2207
2207
2208 return commitfunc(ui, repo, message, matcher, opts)
2208 return commitfunc(ui, repo, message, matcher, opts)
2209
2209
2210 def amend(ui, repo, commitfunc, old, extra, pats, opts):
2210 def amend(ui, repo, commitfunc, old, extra, pats, opts):
2211 # amend will reuse the existing user if not specified, but the obsolete
2211 # amend will reuse the existing user if not specified, but the obsolete
2212 # marker creation requires that the current user's name is specified.
2212 # marker creation requires that the current user's name is specified.
2213 if obsolete._enabled:
2213 if obsolete._enabled:
2214 ui.username() # raise exception if username not set
2214 ui.username() # raise exception if username not set
2215
2215
2216 ui.note(_('amending changeset %s\n') % old)
2216 ui.note(_('amending changeset %s\n') % old)
2217 base = old.p1()
2217 base = old.p1()
2218
2218
2219 wlock = lock = newid = None
2219 wlock = lock = newid = None
2220 try:
2220 try:
2221 wlock = repo.wlock()
2221 wlock = repo.wlock()
2222 lock = repo.lock()
2222 lock = repo.lock()
2223 tr = repo.transaction('amend')
2223 tr = repo.transaction('amend')
2224 try:
2224 try:
2225 # See if we got a message from -m or -l, if not, open the editor
2225 # See if we got a message from -m or -l, if not, open the editor
2226 # with the message of the changeset to amend
2226 # with the message of the changeset to amend
2227 message = logmessage(ui, opts)
2227 message = logmessage(ui, opts)
2228 # ensure logfile does not conflict with later enforcement of the
2228 # ensure logfile does not conflict with later enforcement of the
2229 # message. potential logfile content has been processed by
2229 # message. potential logfile content has been processed by
2230 # `logmessage` anyway.
2230 # `logmessage` anyway.
2231 opts.pop('logfile')
2231 opts.pop('logfile')
2232 # First, do a regular commit to record all changes in the working
2232 # First, do a regular commit to record all changes in the working
2233 # directory (if there are any)
2233 # directory (if there are any)
2234 ui.callhooks = False
2234 ui.callhooks = False
2235 currentbookmark = repo._bookmarkcurrent
2235 currentbookmark = repo._bookmarkcurrent
2236 try:
2236 try:
2237 repo._bookmarkcurrent = None
2237 repo._bookmarkcurrent = None
2238 opts['message'] = 'temporary amend commit for %s' % old
2238 opts['message'] = 'temporary amend commit for %s' % old
2239 node = commit(ui, repo, commitfunc, pats, opts)
2239 node = commit(ui, repo, commitfunc, pats, opts)
2240 finally:
2240 finally:
2241 repo._bookmarkcurrent = currentbookmark
2241 repo._bookmarkcurrent = currentbookmark
2242 ui.callhooks = True
2242 ui.callhooks = True
2243 ctx = repo[node]
2243 ctx = repo[node]
2244
2244
2245 # Participating changesets:
2245 # Participating changesets:
2246 #
2246 #
2247 # node/ctx o - new (intermediate) commit that contains changes
2247 # node/ctx o - new (intermediate) commit that contains changes
2248 # | from working dir to go into amending commit
2248 # | from working dir to go into amending commit
2249 # | (or a workingctx if there were no changes)
2249 # | (or a workingctx if there were no changes)
2250 # |
2250 # |
2251 # old o - changeset to amend
2251 # old o - changeset to amend
2252 # |
2252 # |
2253 # base o - parent of amending changeset
2253 # base o - parent of amending changeset
2254
2254
2255 # Update extra dict from amended commit (e.g. to preserve graft
2255 # Update extra dict from amended commit (e.g. to preserve graft
2256 # source)
2256 # source)
2257 extra.update(old.extra())
2257 extra.update(old.extra())
2258
2258
2259 # Also update it from the intermediate commit or from the wctx
2259 # Also update it from the intermediate commit or from the wctx
2260 extra.update(ctx.extra())
2260 extra.update(ctx.extra())
2261
2261
2262 if len(old.parents()) > 1:
2262 if len(old.parents()) > 1:
2263 # ctx.files() isn't reliable for merges, so fall back to the
2263 # ctx.files() isn't reliable for merges, so fall back to the
2264 # slower repo.status() method
2264 # slower repo.status() method
2265 files = set([fn for st in repo.status(base, old)[:3]
2265 files = set([fn for st in repo.status(base, old)[:3]
2266 for fn in st])
2266 for fn in st])
2267 else:
2267 else:
2268 files = set(old.files())
2268 files = set(old.files())
2269
2269
2270 # Second, we use either the commit we just did, or if there were no
2270 # Second, we use either the commit we just did, or if there were no
2271 # changes the parent of the working directory as the version of the
2271 # changes the parent of the working directory as the version of the
2272 # files in the final amend commit
2272 # files in the final amend commit
2273 if node:
2273 if node:
2274 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
2274 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
2275
2275
2276 user = ctx.user()
2276 user = ctx.user()
2277 date = ctx.date()
2277 date = ctx.date()
2278 # Recompute copies (avoid recording a -> b -> a)
2278 # Recompute copies (avoid recording a -> b -> a)
2279 copied = copies.pathcopies(base, ctx)
2279 copied = copies.pathcopies(base, ctx)
2280
2280
2281 # Prune files which were reverted by the updates: if old
2281 # Prune files which were reverted by the updates: if old
2282 # introduced file X and our intermediate commit, node,
2282 # introduced file X and our intermediate commit, node,
2283 # renamed that file, then those two files are the same and
2283 # renamed that file, then those two files are the same and
2284 # we can discard X from our list of files. Likewise if X
2284 # we can discard X from our list of files. Likewise if X
2285 # was deleted, it's no longer relevant
2285 # was deleted, it's no longer relevant
2286 files.update(ctx.files())
2286 files.update(ctx.files())
2287
2287
2288 def samefile(f):
2288 def samefile(f):
2289 if f in ctx.manifest():
2289 if f in ctx.manifest():
2290 a = ctx.filectx(f)
2290 a = ctx.filectx(f)
2291 if f in base.manifest():
2291 if f in base.manifest():
2292 b = base.filectx(f)
2292 b = base.filectx(f)
2293 return (not a.cmp(b)
2293 return (not a.cmp(b)
2294 and a.flags() == b.flags())
2294 and a.flags() == b.flags())
2295 else:
2295 else:
2296 return False
2296 return False
2297 else:
2297 else:
2298 return f not in base.manifest()
2298 return f not in base.manifest()
2299 files = [f for f in files if not samefile(f)]
2299 files = [f for f in files if not samefile(f)]
2300
2300
2301 def filectxfn(repo, ctx_, path):
2301 def filectxfn(repo, ctx_, path):
2302 try:
2302 try:
2303 fctx = ctx[path]
2303 fctx = ctx[path]
2304 flags = fctx.flags()
2304 flags = fctx.flags()
2305 mctx = context.memfilectx(repo,
2305 mctx = context.memfilectx(repo,
2306 fctx.path(), fctx.data(),
2306 fctx.path(), fctx.data(),
2307 islink='l' in flags,
2307 islink='l' in flags,
2308 isexec='x' in flags,
2308 isexec='x' in flags,
2309 copied=copied.get(path))
2309 copied=copied.get(path))
2310 return mctx
2310 return mctx
2311 except KeyError:
2311 except KeyError:
2312 return None
2312 return None
2313 else:
2313 else:
2314 ui.note(_('copying changeset %s to %s\n') % (old, base))
2314 ui.note(_('copying changeset %s to %s\n') % (old, base))
2315
2315
2316 # Use version of files as in the old cset
2316 # Use version of files as in the old cset
2317 def filectxfn(repo, ctx_, path):
2317 def filectxfn(repo, ctx_, path):
2318 try:
2318 try:
2319 return old.filectx(path)
2319 return old.filectx(path)
2320 except KeyError:
2320 except KeyError:
2321 return None
2321 return None
2322
2322
2323 user = opts.get('user') or old.user()
2323 user = opts.get('user') or old.user()
2324 date = opts.get('date') or old.date()
2324 date = opts.get('date') or old.date()
2325 editform = mergeeditform(old, 'commit.amend')
2325 editform = mergeeditform(old, 'commit.amend')
2326 editor = getcommiteditor(editform=editform, **opts)
2326 editor = getcommiteditor(editform=editform, **opts)
2327 if not message:
2327 if not message:
2328 editor = getcommiteditor(edit=True, editform=editform)
2328 editor = getcommiteditor(edit=True, editform=editform)
2329 message = old.description()
2329 message = old.description()
2330
2330
2331 pureextra = extra.copy()
2331 pureextra = extra.copy()
2332 extra['amend_source'] = old.hex()
2332 extra['amend_source'] = old.hex()
2333
2333
2334 new = context.memctx(repo,
2334 new = context.memctx(repo,
2335 parents=[base.node(), old.p2().node()],
2335 parents=[base.node(), old.p2().node()],
2336 text=message,
2336 text=message,
2337 files=files,
2337 files=files,
2338 filectxfn=filectxfn,
2338 filectxfn=filectxfn,
2339 user=user,
2339 user=user,
2340 date=date,
2340 date=date,
2341 extra=extra,
2341 extra=extra,
2342 editor=editor)
2342 editor=editor)
2343
2343
2344 newdesc = changelog.stripdesc(new.description())
2344 newdesc = changelog.stripdesc(new.description())
2345 if ((not node)
2345 if ((not node)
2346 and newdesc == old.description()
2346 and newdesc == old.description()
2347 and user == old.user()
2347 and user == old.user()
2348 and date == old.date()
2348 and date == old.date()
2349 and pureextra == old.extra()):
2349 and pureextra == old.extra()):
2350 # nothing changed. continuing here would create a new node
2350 # nothing changed. continuing here would create a new node
2351 # anyway because of the amend_source noise.
2351 # anyway because of the amend_source noise.
2352 #
2352 #
2353 # This not what we expect from amend.
2353 # This not what we expect from amend.
2354 return old.node()
2354 return old.node()
2355
2355
2356 ph = repo.ui.config('phases', 'new-commit', phases.draft)
2356 ph = repo.ui.config('phases', 'new-commit', phases.draft)
2357 try:
2357 try:
2358 if opts.get('secret'):
2358 if opts.get('secret'):
2359 commitphase = 'secret'
2359 commitphase = 'secret'
2360 else:
2360 else:
2361 commitphase = old.phase()
2361 commitphase = old.phase()
2362 repo.ui.setconfig('phases', 'new-commit', commitphase, 'amend')
2362 repo.ui.setconfig('phases', 'new-commit', commitphase, 'amend')
2363 newid = repo.commitctx(new)
2363 newid = repo.commitctx(new)
2364 finally:
2364 finally:
2365 repo.ui.setconfig('phases', 'new-commit', ph, 'amend')
2365 repo.ui.setconfig('phases', 'new-commit', ph, 'amend')
2366 if newid != old.node():
2366 if newid != old.node():
2367 # Reroute the working copy parent to the new changeset
2367 # Reroute the working copy parent to the new changeset
2368 repo.setparents(newid, nullid)
2368 repo.setparents(newid, nullid)
2369
2369
2370 # Move bookmarks from old parent to amend commit
2370 # Move bookmarks from old parent to amend commit
2371 bms = repo.nodebookmarks(old.node())
2371 bms = repo.nodebookmarks(old.node())
2372 if bms:
2372 if bms:
2373 marks = repo._bookmarks
2373 marks = repo._bookmarks
2374 for bm in bms:
2374 for bm in bms:
2375 marks[bm] = newid
2375 marks[bm] = newid
2376 marks.write()
2376 marks.write()
2377 #commit the whole amend process
2377 #commit the whole amend process
2378 createmarkers = obsolete.isenabled(repo, obsolete.createmarkersopt)
2378 createmarkers = obsolete.isenabled(repo, obsolete.createmarkersopt)
2379 if createmarkers and newid != old.node():
2379 if createmarkers and newid != old.node():
2380 # mark the new changeset as successor of the rewritten one
2380 # mark the new changeset as successor of the rewritten one
2381 new = repo[newid]
2381 new = repo[newid]
2382 obs = [(old, (new,))]
2382 obs = [(old, (new,))]
2383 if node:
2383 if node:
2384 obs.append((ctx, ()))
2384 obs.append((ctx, ()))
2385
2385
2386 obsolete.createmarkers(repo, obs)
2386 obsolete.createmarkers(repo, obs)
2387 tr.close()
2387 tr.close()
2388 finally:
2388 finally:
2389 tr.release()
2389 tr.release()
2390 if not createmarkers and newid != old.node():
2390 if not createmarkers and newid != old.node():
2391 # Strip the intermediate commit (if there was one) and the amended
2391 # Strip the intermediate commit (if there was one) and the amended
2392 # commit
2392 # commit
2393 if node:
2393 if node:
2394 ui.note(_('stripping intermediate changeset %s\n') % ctx)
2394 ui.note(_('stripping intermediate changeset %s\n') % ctx)
2395 ui.note(_('stripping amended changeset %s\n') % old)
2395 ui.note(_('stripping amended changeset %s\n') % old)
2396 repair.strip(ui, repo, old.node(), topic='amend-backup')
2396 repair.strip(ui, repo, old.node(), topic='amend-backup')
2397 finally:
2397 finally:
2398 if newid is None:
2398 if newid is None:
2399 repo.dirstate.invalidate()
2399 repo.dirstate.invalidate()
2400 lockmod.release(lock, wlock)
2400 lockmod.release(lock, wlock)
2401 return newid
2401 return newid
2402
2402
2403 def commiteditor(repo, ctx, subs, editform=''):
2403 def commiteditor(repo, ctx, subs, editform=''):
2404 if ctx.description():
2404 if ctx.description():
2405 return ctx.description()
2405 return ctx.description()
2406 return commitforceeditor(repo, ctx, subs, editform=editform)
2406 return commitforceeditor(repo, ctx, subs, editform=editform)
2407
2407
2408 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
2408 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
2409 editform=''):
2409 editform=''):
2410 if not extramsg:
2410 if not extramsg:
2411 extramsg = _("Leave message empty to abort commit.")
2411 extramsg = _("Leave message empty to abort commit.")
2412
2412
2413 forms = [e for e in editform.split('.') if e]
2413 forms = [e for e in editform.split('.') if e]
2414 forms.insert(0, 'changeset')
2414 forms.insert(0, 'changeset')
2415 while forms:
2415 while forms:
2416 tmpl = repo.ui.config('committemplate', '.'.join(forms))
2416 tmpl = repo.ui.config('committemplate', '.'.join(forms))
2417 if tmpl:
2417 if tmpl:
2418 committext = buildcommittemplate(repo, ctx, subs, extramsg, tmpl)
2418 committext = buildcommittemplate(repo, ctx, subs, extramsg, tmpl)
2419 break
2419 break
2420 forms.pop()
2420 forms.pop()
2421 else:
2421 else:
2422 committext = buildcommittext(repo, ctx, subs, extramsg)
2422 committext = buildcommittext(repo, ctx, subs, extramsg)
2423
2423
2424 # run editor in the repository root
2424 # run editor in the repository root
2425 olddir = os.getcwd()
2425 olddir = os.getcwd()
2426 os.chdir(repo.root)
2426 os.chdir(repo.root)
2427 text = repo.ui.edit(committext, ctx.user(), ctx.extra(), editform=editform)
2427 text = repo.ui.edit(committext, ctx.user(), ctx.extra(), editform=editform)
2428 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
2428 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
2429 os.chdir(olddir)
2429 os.chdir(olddir)
2430
2430
2431 if finishdesc:
2431 if finishdesc:
2432 text = finishdesc(text)
2432 text = finishdesc(text)
2433 if not text.strip():
2433 if not text.strip():
2434 raise util.Abort(_("empty commit message"))
2434 raise util.Abort(_("empty commit message"))
2435
2435
2436 return text
2436 return text
2437
2437
2438 def buildcommittemplate(repo, ctx, subs, extramsg, tmpl):
2438 def buildcommittemplate(repo, ctx, subs, extramsg, tmpl):
2439 ui = repo.ui
2439 ui = repo.ui
2440 tmpl, mapfile = gettemplate(ui, tmpl, None)
2440 tmpl, mapfile = gettemplate(ui, tmpl, None)
2441
2441
2442 try:
2442 try:
2443 t = changeset_templater(ui, repo, None, {}, tmpl, mapfile, False)
2443 t = changeset_templater(ui, repo, None, {}, tmpl, mapfile, False)
2444 except SyntaxError, inst:
2444 except SyntaxError, inst:
2445 raise util.Abort(inst.args[0])
2445 raise util.Abort(inst.args[0])
2446
2446
2447 for k, v in repo.ui.configitems('committemplate'):
2447 for k, v in repo.ui.configitems('committemplate'):
2448 if k != 'changeset':
2448 if k != 'changeset':
2449 t.t.cache[k] = v
2449 t.t.cache[k] = v
2450
2450
2451 if not extramsg:
2451 if not extramsg:
2452 extramsg = '' # ensure that extramsg is string
2452 extramsg = '' # ensure that extramsg is string
2453
2453
2454 ui.pushbuffer()
2454 ui.pushbuffer()
2455 t.show(ctx, extramsg=extramsg)
2455 t.show(ctx, extramsg=extramsg)
2456 return ui.popbuffer()
2456 return ui.popbuffer()
2457
2457
2458 def buildcommittext(repo, ctx, subs, extramsg):
2458 def buildcommittext(repo, ctx, subs, extramsg):
2459 edittext = []
2459 edittext = []
2460 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
2460 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
2461 if ctx.description():
2461 if ctx.description():
2462 edittext.append(ctx.description())
2462 edittext.append(ctx.description())
2463 edittext.append("")
2463 edittext.append("")
2464 edittext.append("") # Empty line between message and comments.
2464 edittext.append("") # Empty line between message and comments.
2465 edittext.append(_("HG: Enter commit message."
2465 edittext.append(_("HG: Enter commit message."
2466 " Lines beginning with 'HG:' are removed."))
2466 " Lines beginning with 'HG:' are removed."))
2467 edittext.append("HG: %s" % extramsg)
2467 edittext.append("HG: %s" % extramsg)
2468 edittext.append("HG: --")
2468 edittext.append("HG: --")
2469 edittext.append(_("HG: user: %s") % ctx.user())
2469 edittext.append(_("HG: user: %s") % ctx.user())
2470 if ctx.p2():
2470 if ctx.p2():
2471 edittext.append(_("HG: branch merge"))
2471 edittext.append(_("HG: branch merge"))
2472 if ctx.branch():
2472 if ctx.branch():
2473 edittext.append(_("HG: branch '%s'") % ctx.branch())
2473 edittext.append(_("HG: branch '%s'") % ctx.branch())
2474 if bookmarks.iscurrent(repo):
2474 if bookmarks.iscurrent(repo):
2475 edittext.append(_("HG: bookmark '%s'") % repo._bookmarkcurrent)
2475 edittext.append(_("HG: bookmark '%s'") % repo._bookmarkcurrent)
2476 edittext.extend([_("HG: subrepo %s") % s for s in subs])
2476 edittext.extend([_("HG: subrepo %s") % s for s in subs])
2477 edittext.extend([_("HG: added %s") % f for f in added])
2477 edittext.extend([_("HG: added %s") % f for f in added])
2478 edittext.extend([_("HG: changed %s") % f for f in modified])
2478 edittext.extend([_("HG: changed %s") % f for f in modified])
2479 edittext.extend([_("HG: removed %s") % f for f in removed])
2479 edittext.extend([_("HG: removed %s") % f for f in removed])
2480 if not added and not modified and not removed:
2480 if not added and not modified and not removed:
2481 edittext.append(_("HG: no files changed"))
2481 edittext.append(_("HG: no files changed"))
2482 edittext.append("")
2482 edittext.append("")
2483
2483
2484 return "\n".join(edittext)
2484 return "\n".join(edittext)
2485
2485
2486 def commitstatus(repo, node, branch, bheads=None, opts={}):
2486 def commitstatus(repo, node, branch, bheads=None, opts={}):
2487 ctx = repo[node]
2487 ctx = repo[node]
2488 parents = ctx.parents()
2488 parents = ctx.parents()
2489
2489
2490 if (not opts.get('amend') and bheads and node not in bheads and not
2490 if (not opts.get('amend') and bheads and node not in bheads and not
2491 [x for x in parents if x.node() in bheads and x.branch() == branch]):
2491 [x for x in parents if x.node() in bheads and x.branch() == branch]):
2492 repo.ui.status(_('created new head\n'))
2492 repo.ui.status(_('created new head\n'))
2493 # The message is not printed for initial roots. For the other
2493 # The message is not printed for initial roots. For the other
2494 # changesets, it is printed in the following situations:
2494 # changesets, it is printed in the following situations:
2495 #
2495 #
2496 # Par column: for the 2 parents with ...
2496 # Par column: for the 2 parents with ...
2497 # N: null or no parent
2497 # N: null or no parent
2498 # B: parent is on another named branch
2498 # B: parent is on another named branch
2499 # C: parent is a regular non head changeset
2499 # C: parent is a regular non head changeset
2500 # H: parent was a branch head of the current branch
2500 # H: parent was a branch head of the current branch
2501 # Msg column: whether we print "created new head" message
2501 # Msg column: whether we print "created new head" message
2502 # In the following, it is assumed that there already exists some
2502 # In the following, it is assumed that there already exists some
2503 # initial branch heads of the current branch, otherwise nothing is
2503 # initial branch heads of the current branch, otherwise nothing is
2504 # printed anyway.
2504 # printed anyway.
2505 #
2505 #
2506 # Par Msg Comment
2506 # Par Msg Comment
2507 # N N y additional topo root
2507 # N N y additional topo root
2508 #
2508 #
2509 # B N y additional branch root
2509 # B N y additional branch root
2510 # C N y additional topo head
2510 # C N y additional topo head
2511 # H N n usual case
2511 # H N n usual case
2512 #
2512 #
2513 # B B y weird additional branch root
2513 # B B y weird additional branch root
2514 # C B y branch merge
2514 # C B y branch merge
2515 # H B n merge with named branch
2515 # H B n merge with named branch
2516 #
2516 #
2517 # C C y additional head from merge
2517 # C C y additional head from merge
2518 # C H n merge with a head
2518 # C H n merge with a head
2519 #
2519 #
2520 # H H n head merge: head count decreases
2520 # H H n head merge: head count decreases
2521
2521
2522 if not opts.get('close_branch'):
2522 if not opts.get('close_branch'):
2523 for r in parents:
2523 for r in parents:
2524 if r.closesbranch() and r.branch() == branch:
2524 if r.closesbranch() and r.branch() == branch:
2525 repo.ui.status(_('reopening closed branch head %d\n') % r)
2525 repo.ui.status(_('reopening closed branch head %d\n') % r)
2526
2526
2527 if repo.ui.debugflag:
2527 if repo.ui.debugflag:
2528 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
2528 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
2529 elif repo.ui.verbose:
2529 elif repo.ui.verbose:
2530 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
2530 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
2531
2531
2532 def revert(ui, repo, ctx, parents, *pats, **opts):
2532 def revert(ui, repo, ctx, parents, *pats, **opts):
2533 parent, p2 = parents
2533 parent, p2 = parents
2534 node = ctx.node()
2534 node = ctx.node()
2535
2535
2536 mf = ctx.manifest()
2536 mf = ctx.manifest()
2537 if node == p2:
2537 if node == p2:
2538 parent = p2
2538 parent = p2
2539 if node == parent:
2539 if node == parent:
2540 pmf = mf
2540 pmf = mf
2541 else:
2541 else:
2542 pmf = None
2542 pmf = None
2543
2543
2544 # need all matching names in dirstate and manifest of target rev,
2544 # need all matching names in dirstate and manifest of target rev,
2545 # so have to walk both. do not print errors if files exist in one
2545 # so have to walk both. do not print errors if files exist in one
2546 # but not other.
2546 # but not other.
2547
2547
2548 # `names` is a mapping for all elements in working copy and target revision
2548 # `names` is a mapping for all elements in working copy and target revision
2549 # The mapping is in the form:
2549 # The mapping is in the form:
2550 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
2550 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
2551 names = {}
2551 names = {}
2552
2552
2553 wlock = repo.wlock()
2553 wlock = repo.wlock()
2554 try:
2554 try:
2555 ## filling of the `names` mapping
2555 ## filling of the `names` mapping
2556 # walk dirstate to fill `names`
2556 # walk dirstate to fill `names`
2557
2557
2558 m = scmutil.match(repo[None], pats, opts)
2558 m = scmutil.match(repo[None], pats, opts)
2559 if not m.always() or node != parent:
2559 if not m.always() or node != parent:
2560 m.bad = lambda x, y: False
2560 m.bad = lambda x, y: False
2561 for abs in repo.walk(m):
2561 for abs in repo.walk(m):
2562 names[abs] = m.rel(abs), m.exact(abs)
2562 names[abs] = m.rel(abs), m.exact(abs)
2563
2563
2564 # walk target manifest to fill `names`
2564 # walk target manifest to fill `names`
2565
2565
2566 def badfn(path, msg):
2566 def badfn(path, msg):
2567 if path in names:
2567 if path in names:
2568 return
2568 return
2569 if path in ctx.substate:
2569 if path in ctx.substate:
2570 return
2570 return
2571 path_ = path + '/'
2571 path_ = path + '/'
2572 for f in names:
2572 for f in names:
2573 if f.startswith(path_):
2573 if f.startswith(path_):
2574 return
2574 return
2575 ui.warn("%s: %s\n" % (m.rel(path), msg))
2575 ui.warn("%s: %s\n" % (m.rel(path), msg))
2576
2576
2577 m = scmutil.match(ctx, pats, opts)
2577 m = scmutil.match(ctx, pats, opts)
2578 m.bad = badfn
2578 m.bad = badfn
2579 for abs in ctx.walk(m):
2579 for abs in ctx.walk(m):
2580 if abs not in names:
2580 if abs not in names:
2581 names[abs] = m.rel(abs), m.exact(abs)
2581 names[abs] = m.rel(abs), m.exact(abs)
2582
2582
2583 # Find status of all file in `names`.
2583 # Find status of all file in `names`.
2584 m = scmutil.matchfiles(repo, names)
2584 m = scmutil.matchfiles(repo, names)
2585
2585
2586 changes = repo.status(node1=node, match=m,
2586 changes = repo.status(node1=node, match=m,
2587 unknown=True, ignored=True, clean=True)
2587 unknown=True, ignored=True, clean=True)
2588 else:
2588 else:
2589 changes = repo.status(match=m)
2589 changes = repo.status(match=m)
2590 for kind in changes:
2590 for kind in changes:
2591 for abs in kind:
2591 for abs in kind:
2592 names[abs] = m.rel(abs), m.exact(abs)
2592 names[abs] = m.rel(abs), m.exact(abs)
2593
2593
2594 m = scmutil.matchfiles(repo, names)
2594 m = scmutil.matchfiles(repo, names)
2595
2595
2596 modified = set(changes.modified)
2596 modified = set(changes.modified)
2597 added = set(changes.added)
2597 added = set(changes.added)
2598 removed = set(changes.removed)
2598 removed = set(changes.removed)
2599 _deleted = set(changes.deleted)
2599 _deleted = set(changes.deleted)
2600 unknown = set(changes.unknown)
2600 unknown = set(changes.unknown)
2601 unknown.update(changes.ignored)
2601 unknown.update(changes.ignored)
2602 clean = set(changes.clean)
2602 clean = set(changes.clean)
2603 modadded = set()
2603 modadded = set()
2604
2604
2605 # split between files known in target manifest and the others
2605 # split between files known in target manifest and the others
2606 smf = set(mf)
2606 smf = set(mf)
2607
2607
2608 # determine the exact nature of the deleted changesets
2608 # determine the exact nature of the deleted changesets
2609 deladded = _deleted - smf
2609 deladded = _deleted - smf
2610 deleted = _deleted - deladded
2610 deleted = _deleted - deladded
2611
2611
2612 # We need to account for the state of file in the dirstate.
2612 # We need to account for the state of file in the dirstate.
2613 #
2613 #
2614 # Even, when we revert against something else than parent. This will
2614 # Even, when we revert against something else than parent. This will
2615 # slightly alter the behavior of revert (doing back up or not, delete
2615 # slightly alter the behavior of revert (doing back up or not, delete
2616 # or just forget etc).
2616 # or just forget etc).
2617 if parent == node:
2617 if parent == node:
2618 dsmodified = modified
2618 dsmodified = modified
2619 dsadded = added
2619 dsadded = added
2620 dsremoved = removed
2620 dsremoved = removed
2621 # store all local modifications, useful later for rename detection
2621 # store all local modifications, useful later for rename detection
2622 localchanges = dsmodified | dsadded
2622 localchanges = dsmodified | dsadded
2623 modified, added, removed = set(), set(), set()
2623 modified, added, removed = set(), set(), set()
2624 else:
2624 else:
2625 changes = repo.status(node1=parent, match=m)
2625 changes = repo.status(node1=parent, match=m)
2626 dsmodified = set(changes.modified)
2626 dsmodified = set(changes.modified)
2627 dsadded = set(changes.added)
2627 dsadded = set(changes.added)
2628 dsremoved = set(changes.removed)
2628 dsremoved = set(changes.removed)
2629 # store all local modifications, useful later for rename detection
2629 # store all local modifications, useful later for rename detection
2630 localchanges = dsmodified | dsadded
2630 localchanges = dsmodified | dsadded
2631
2631
2632 # only take into account for removes between wc and target
2632 # only take into account for removes between wc and target
2633 clean |= dsremoved - removed
2633 clean |= dsremoved - removed
2634 dsremoved &= removed
2634 dsremoved &= removed
2635 # distinct between dirstate remove and other
2635 # distinct between dirstate remove and other
2636 removed -= dsremoved
2636 removed -= dsremoved
2637
2637
2638 modadded = added & dsmodified
2638 modadded = added & dsmodified
2639 added -= modadded
2639 added -= modadded
2640
2640
2641 # tell newly modified apart.
2641 # tell newly modified apart.
2642 dsmodified &= modified
2642 dsmodified &= modified
2643 dsmodified |= modified & dsadded # dirstate added may needs backup
2643 dsmodified |= modified & dsadded # dirstate added may needs backup
2644 modified -= dsmodified
2644 modified -= dsmodified
2645
2645
2646 # We need to wait for some post-processing to update this set
2646 # We need to wait for some post-processing to update this set
2647 # before making the distinction. The dirstate will be used for
2647 # before making the distinction. The dirstate will be used for
2648 # that purpose.
2648 # that purpose.
2649 dsadded = added
2649 dsadded = added
2650
2650
2651 # in case of merge, files that are actually added can be reported as
2651 # in case of merge, files that are actually added can be reported as
2652 # modified, we need to post process the result
2652 # modified, we need to post process the result
2653 if p2 != nullid:
2653 if p2 != nullid:
2654 if pmf is None:
2654 if pmf is None:
2655 # only need parent manifest in the merge case,
2655 # only need parent manifest in the merge case,
2656 # so do not read by default
2656 # so do not read by default
2657 pmf = repo[parent].manifest()
2657 pmf = repo[parent].manifest()
2658 mergeadd = dsmodified - set(pmf)
2658 mergeadd = dsmodified - set(pmf)
2659 dsadded |= mergeadd
2659 dsadded |= mergeadd
2660 dsmodified -= mergeadd
2660 dsmodified -= mergeadd
2661
2661
2662 # if f is a rename, update `names` to also revert the source
2662 # if f is a rename, update `names` to also revert the source
2663 cwd = repo.getcwd()
2663 cwd = repo.getcwd()
2664 for f in localchanges:
2664 for f in localchanges:
2665 src = repo.dirstate.copied(f)
2665 src = repo.dirstate.copied(f)
2666 # XXX should we check for rename down to target node?
2666 # XXX should we check for rename down to target node?
2667 if src and src not in names and repo.dirstate[src] == 'r':
2667 if src and src not in names and repo.dirstate[src] == 'r':
2668 dsremoved.add(src)
2668 dsremoved.add(src)
2669 names[src] = (repo.pathto(src, cwd), True)
2669 names[src] = (repo.pathto(src, cwd), True)
2670
2670
2671 # distinguish between file to forget and the other
2671 # distinguish between file to forget and the other
2672 added = set()
2672 added = set()
2673 for abs in dsadded:
2673 for abs in dsadded:
2674 if repo.dirstate[abs] != 'a':
2674 if repo.dirstate[abs] != 'a':
2675 added.add(abs)
2675 added.add(abs)
2676 dsadded -= added
2676 dsadded -= added
2677
2677
2678 for abs in deladded:
2678 for abs in deladded:
2679 if repo.dirstate[abs] == 'a':
2679 if repo.dirstate[abs] == 'a':
2680 dsadded.add(abs)
2680 dsadded.add(abs)
2681 deladded -= dsadded
2681 deladded -= dsadded
2682
2682
2683 # For files marked as removed, we check if an unknown file is present at
2683 # For files marked as removed, we check if an unknown file is present at
2684 # the same path. If a such file exists it may need to be backed up.
2684 # the same path. If a such file exists it may need to be backed up.
2685 # Making the distinction at this stage helps have simpler backup
2685 # Making the distinction at this stage helps have simpler backup
2686 # logic.
2686 # logic.
2687 removunk = set()
2687 removunk = set()
2688 for abs in removed:
2688 for abs in removed:
2689 target = repo.wjoin(abs)
2689 target = repo.wjoin(abs)
2690 if os.path.lexists(target):
2690 if os.path.lexists(target):
2691 removunk.add(abs)
2691 removunk.add(abs)
2692 removed -= removunk
2692 removed -= removunk
2693
2693
2694 dsremovunk = set()
2694 dsremovunk = set()
2695 for abs in dsremoved:
2695 for abs in dsremoved:
2696 target = repo.wjoin(abs)
2696 target = repo.wjoin(abs)
2697 if os.path.lexists(target):
2697 if os.path.lexists(target):
2698 dsremovunk.add(abs)
2698 dsremovunk.add(abs)
2699 dsremoved -= dsremovunk
2699 dsremoved -= dsremovunk
2700
2700
2701 # action to be actually performed by revert
2701 # action to be actually performed by revert
2702 # (<list of file>, message>) tuple
2702 # (<list of file>, message>) tuple
2703 actions = {'revert': ([], _('reverting %s\n')),
2703 actions = {'revert': ([], _('reverting %s\n')),
2704 'add': ([], _('adding %s\n')),
2704 'add': ([], _('adding %s\n')),
2705 'remove': ([], _('removing %s\n')),
2705 'remove': ([], _('removing %s\n')),
2706 'drop': ([], _('removing %s\n')),
2706 'drop': ([], _('removing %s\n')),
2707 'forget': ([], _('forgetting %s\n')),
2707 'forget': ([], _('forgetting %s\n')),
2708 'undelete': ([], _('undeleting %s\n')),
2708 'undelete': ([], _('undeleting %s\n')),
2709 'noop': (None, _('no changes needed to %s\n')),
2709 'noop': (None, _('no changes needed to %s\n')),
2710 'unknown': (None, _('file not managed: %s\n')),
2710 'unknown': (None, _('file not managed: %s\n')),
2711 }
2711 }
2712
2712
2713 # "constant" that convey the backup strategy.
2713 # "constant" that convey the backup strategy.
2714 # All set to `discard` if `no-backup` is set do avoid checking
2714 # All set to `discard` if `no-backup` is set do avoid checking
2715 # no_backup lower in the code.
2715 # no_backup lower in the code.
2716 # These values are ordered for comparison purposes
2716 # These values are ordered for comparison purposes
2717 backup = 2 # unconditionally do backup
2717 backup = 2 # unconditionally do backup
2718 check = 1 # check if the existing file differs from target
2718 check = 1 # check if the existing file differs from target
2719 discard = 0 # never do backup
2719 discard = 0 # never do backup
2720 if opts.get('no_backup'):
2720 if opts.get('no_backup'):
2721 backup = check = discard
2721 backup = check = discard
2722
2722
2723 backupanddel = actions['remove']
2723 backupanddel = actions['remove']
2724 if not opts.get('no_backup'):
2724 if not opts.get('no_backup'):
2725 backupanddel = actions['drop']
2725 backupanddel = actions['drop']
2726
2726
2727 disptable = (
2727 disptable = (
2728 # dispatch table:
2728 # dispatch table:
2729 # file state
2729 # file state
2730 # action
2730 # action
2731 # make backup
2731 # make backup
2732
2732
2733 ## Sets that results that will change file on disk
2733 ## Sets that results that will change file on disk
2734 # Modified compared to target, no local change
2734 # Modified compared to target, no local change
2735 (modified, actions['revert'], discard),
2735 (modified, actions['revert'], discard),
2736 # Modified compared to target, but local file is deleted
2736 # Modified compared to target, but local file is deleted
2737 (deleted, actions['revert'], discard),
2737 (deleted, actions['revert'], discard),
2738 # Modified compared to target, local change
2738 # Modified compared to target, local change
2739 (dsmodified, actions['revert'], backup),
2739 (dsmodified, actions['revert'], backup),
2740 # Added since target
2740 # Added since target
2741 (added, actions['remove'], discard),
2741 (added, actions['remove'], discard),
2742 # Added in working directory
2742 # Added in working directory
2743 (dsadded, actions['forget'], discard),
2743 (dsadded, actions['forget'], discard),
2744 # Added since target, have local modification
2744 # Added since target, have local modification
2745 (modadded, backupanddel, backup),
2745 (modadded, backupanddel, backup),
2746 # Added since target but file is missing in working directory
2746 # Added since target but file is missing in working directory
2747 (deladded, actions['drop'], discard),
2747 (deladded, actions['drop'], discard),
2748 # Removed since target, before working copy parent
2748 # Removed since target, before working copy parent
2749 (removed, actions['add'], discard),
2749 (removed, actions['add'], discard),
2750 # Same as `removed` but an unknown file exists at the same path
2750 # Same as `removed` but an unknown file exists at the same path
2751 (removunk, actions['add'], check),
2751 (removunk, actions['add'], check),
2752 # Removed since targe, marked as such in working copy parent
2752 # Removed since targe, marked as such in working copy parent
2753 (dsremoved, actions['undelete'], discard),
2753 (dsremoved, actions['undelete'], discard),
2754 # Same as `dsremoved` but an unknown file exists at the same path
2754 # Same as `dsremoved` but an unknown file exists at the same path
2755 (dsremovunk, actions['undelete'], check),
2755 (dsremovunk, actions['undelete'], check),
2756 ## the following sets does not result in any file changes
2756 ## the following sets does not result in any file changes
2757 # File with no modification
2757 # File with no modification
2758 (clean, actions['noop'], discard),
2758 (clean, actions['noop'], discard),
2759 # Existing file, not tracked anywhere
2759 # Existing file, not tracked anywhere
2760 (unknown, actions['unknown'], discard),
2760 (unknown, actions['unknown'], discard),
2761 )
2761 )
2762
2762
2763 needdata = ('revert', 'add', 'undelete')
2763 needdata = ('revert', 'add', 'undelete')
2764 _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata])
2764 _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata])
2765
2765
2766 wctx = repo[None]
2766 wctx = repo[None]
2767 for abs, (rel, exact) in sorted(names.items()):
2767 for abs, (rel, exact) in sorted(names.items()):
2768 # target file to be touch on disk (relative to cwd)
2768 # target file to be touch on disk (relative to cwd)
2769 target = repo.wjoin(abs)
2769 target = repo.wjoin(abs)
2770 # search the entry in the dispatch table.
2770 # search the entry in the dispatch table.
2771 # if the file is in any of these sets, it was touched in the working
2771 # if the file is in any of these sets, it was touched in the working
2772 # directory parent and we are sure it needs to be reverted.
2772 # directory parent and we are sure it needs to be reverted.
2773 for table, (xlist, msg), dobackup in disptable:
2773 for table, (xlist, msg), dobackup in disptable:
2774 if abs not in table:
2774 if abs not in table:
2775 continue
2775 continue
2776 if xlist is not None:
2776 if xlist is not None:
2777 xlist.append(abs)
2777 xlist.append(abs)
2778 if dobackup and (backup <= dobackup
2778 if dobackup and (backup <= dobackup
2779 or wctx[abs].cmp(ctx[abs])):
2779 or wctx[abs].cmp(ctx[abs])):
2780 bakname = "%s.orig" % rel
2780 bakname = "%s.orig" % rel
2781 ui.note(_('saving current version of %s as %s\n') %
2781 ui.note(_('saving current version of %s as %s\n') %
2782 (rel, bakname))
2782 (rel, bakname))
2783 if not opts.get('dry_run'):
2783 if not opts.get('dry_run'):
2784 util.rename(target, bakname)
2784 util.rename(target, bakname)
2785 if ui.verbose or not exact:
2785 if ui.verbose or not exact:
2786 if not isinstance(msg, basestring):
2786 if not isinstance(msg, basestring):
2787 msg = msg(abs)
2787 msg = msg(abs)
2788 ui.status(msg % rel)
2788 ui.status(msg % rel)
2789 elif exact:
2789 elif exact:
2790 ui.warn(msg % rel)
2790 ui.warn(msg % rel)
2791 break
2791 break
2792
2792
2793
2793
2794 if not opts.get('dry_run'):
2794 if not opts.get('dry_run'):
2795 _performrevert(repo, parents, ctx, actions)
2795 _performrevert(repo, parents, ctx, actions)
2796
2796
2797 # get the list of subrepos that must be reverted
2797 # get the list of subrepos that must be reverted
2798 subrepomatch = scmutil.match(ctx, pats, opts)
2798 subrepomatch = scmutil.match(ctx, pats, opts)
2799 targetsubs = sorted(s for s in ctx.substate if subrepomatch(s))
2799 targetsubs = sorted(s for s in ctx.substate if subrepomatch(s))
2800
2800
2801 if targetsubs:
2801 if targetsubs:
2802 # Revert the subrepos on the revert list
2802 # Revert the subrepos on the revert list
2803 for sub in targetsubs:
2803 for sub in targetsubs:
2804 ctx.sub(sub).revert(ctx.substate[sub], *pats, **opts)
2804 ctx.sub(sub).revert(ctx.substate[sub], *pats, **opts)
2805 finally:
2805 finally:
2806 wlock.release()
2806 wlock.release()
2807
2807
2808 def _revertprefetch(repo, ctx, *files):
2808 def _revertprefetch(repo, ctx, *files):
2809 """Let extension changing the storage layer prefetch content"""
2809 """Let extension changing the storage layer prefetch content"""
2810 pass
2810 pass
2811
2811
2812 def _performrevert(repo, parents, ctx, actions):
2812 def _performrevert(repo, parents, ctx, actions):
2813 """function that actually perform all the actions computed for revert
2813 """function that actually perform all the actions computed for revert
2814
2814
2815 This is an independent function to let extension to plug in and react to
2815 This is an independent function to let extension to plug in and react to
2816 the imminent revert.
2816 the imminent revert.
2817
2817
2818 Make sure you have the working directory locked when calling this function.
2818 Make sure you have the working directory locked when calling this function.
2819 """
2819 """
2820 parent, p2 = parents
2820 parent, p2 = parents
2821 node = ctx.node()
2821 node = ctx.node()
2822 def checkout(f):
2822 def checkout(f):
2823 fc = ctx[f]
2823 fc = ctx[f]
2824 repo.wwrite(f, fc.data(), fc.flags())
2824 repo.wwrite(f, fc.data(), fc.flags())
2825
2825
2826 audit_path = pathutil.pathauditor(repo.root)
2826 audit_path = pathutil.pathauditor(repo.root)
2827 for f in actions['forget'][0]:
2827 for f in actions['forget'][0]:
2828 repo.dirstate.drop(f)
2828 repo.dirstate.drop(f)
2829 for f in actions['remove'][0]:
2829 for f in actions['remove'][0]:
2830 audit_path(f)
2830 audit_path(f)
2831 util.unlinkpath(repo.wjoin(f))
2831 util.unlinkpath(repo.wjoin(f))
2832 repo.dirstate.remove(f)
2832 repo.dirstate.remove(f)
2833 for f in actions['drop'][0]:
2833 for f in actions['drop'][0]:
2834 audit_path(f)
2834 audit_path(f)
2835 repo.dirstate.remove(f)
2835 repo.dirstate.remove(f)
2836
2836
2837 normal = None
2837 normal = None
2838 if node == parent:
2838 if node == parent:
2839 # We're reverting to our parent. If possible, we'd like status
2839 # We're reverting to our parent. If possible, we'd like status
2840 # to report the file as clean. We have to use normallookup for
2840 # to report the file as clean. We have to use normallookup for
2841 # merges to avoid losing information about merged/dirty files.
2841 # merges to avoid losing information about merged/dirty files.
2842 if p2 != nullid:
2842 if p2 != nullid:
2843 normal = repo.dirstate.normallookup
2843 normal = repo.dirstate.normallookup
2844 else:
2844 else:
2845 normal = repo.dirstate.normal
2845 normal = repo.dirstate.normal
2846 for f in actions['revert'][0]:
2846 for f in actions['revert'][0]:
2847 checkout(f)
2847 checkout(f)
2848 if normal:
2848 if normal:
2849 normal(f)
2849 normal(f)
2850
2850
2851 for f in actions['add'][0]:
2851 for f in actions['add'][0]:
2852 checkout(f)
2852 checkout(f)
2853 repo.dirstate.add(f)
2853 repo.dirstate.add(f)
2854
2854
2855 normal = repo.dirstate.normallookup
2855 normal = repo.dirstate.normallookup
2856 if node == parent and p2 == nullid:
2856 if node == parent and p2 == nullid:
2857 normal = repo.dirstate.normal
2857 normal = repo.dirstate.normal
2858 for f in actions['undelete'][0]:
2858 for f in actions['undelete'][0]:
2859 checkout(f)
2859 checkout(f)
2860 normal(f)
2860 normal(f)
2861
2861
2862 copied = copies.pathcopies(repo[parent], ctx)
2862 copied = copies.pathcopies(repo[parent], ctx)
2863
2863
2864 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
2864 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
2865 if f in copied:
2865 if f in copied:
2866 repo.dirstate.copy(copied[f], f)
2866 repo.dirstate.copy(copied[f], f)
2867
2867
2868 def command(table):
2868 def command(table):
2869 """Returns a function object to be used as a decorator for making commands.
2869 """Returns a function object to be used as a decorator for making commands.
2870
2870
2871 This function receives a command table as its argument. The table should
2871 This function receives a command table as its argument. The table should
2872 be a dict.
2872 be a dict.
2873
2873
2874 The returned function can be used as a decorator for adding commands
2874 The returned function can be used as a decorator for adding commands
2875 to that command table. This function accepts multiple arguments to define
2875 to that command table. This function accepts multiple arguments to define
2876 a command.
2876 a command.
2877
2877
2878 The first argument is the command name.
2878 The first argument is the command name.
2879
2879
2880 The options argument is an iterable of tuples defining command arguments.
2880 The options argument is an iterable of tuples defining command arguments.
2881 See ``mercurial.fancyopts.fancyopts()`` for the format of each tuple.
2881 See ``mercurial.fancyopts.fancyopts()`` for the format of each tuple.
2882
2882
2883 The synopsis argument defines a short, one line summary of how to use the
2883 The synopsis argument defines a short, one line summary of how to use the
2884 command. This shows up in the help output.
2884 command. This shows up in the help output.
2885
2885
2886 The norepo argument defines whether the command does not require a
2886 The norepo argument defines whether the command does not require a
2887 local repository. Most commands operate against a repository, thus the
2887 local repository. Most commands operate against a repository, thus the
2888 default is False.
2888 default is False.
2889
2889
2890 The optionalrepo argument defines whether the command optionally requires
2890 The optionalrepo argument defines whether the command optionally requires
2891 a local repository.
2891 a local repository.
2892
2892
2893 The inferrepo argument defines whether to try to find a repository from the
2893 The inferrepo argument defines whether to try to find a repository from the
2894 command line arguments. If True, arguments will be examined for potential
2894 command line arguments. If True, arguments will be examined for potential
2895 repository locations. See ``findrepo()``. If a repository is found, it
2895 repository locations. See ``findrepo()``. If a repository is found, it
2896 will be used.
2896 will be used.
2897 """
2897 """
2898 def cmd(name, options=(), synopsis=None, norepo=False, optionalrepo=False,
2898 def cmd(name, options=(), synopsis=None, norepo=False, optionalrepo=False,
2899 inferrepo=False):
2899 inferrepo=False):
2900 def decorator(func):
2900 def decorator(func):
2901 if synopsis:
2901 if synopsis:
2902 table[name] = func, list(options), synopsis
2902 table[name] = func, list(options), synopsis
2903 else:
2903 else:
2904 table[name] = func, list(options)
2904 table[name] = func, list(options)
2905
2905
2906 if norepo:
2906 if norepo:
2907 # Avoid import cycle.
2907 # Avoid import cycle.
2908 import commands
2908 import commands
2909 commands.norepo += ' %s' % ' '.join(parsealiases(name))
2909 commands.norepo += ' %s' % ' '.join(parsealiases(name))
2910
2910
2911 if optionalrepo:
2911 if optionalrepo:
2912 import commands
2912 import commands
2913 commands.optionalrepo += ' %s' % ' '.join(parsealiases(name))
2913 commands.optionalrepo += ' %s' % ' '.join(parsealiases(name))
2914
2914
2915 if inferrepo:
2915 if inferrepo:
2916 import commands
2916 import commands
2917 commands.inferrepo += ' %s' % ' '.join(parsealiases(name))
2917 commands.inferrepo += ' %s' % ' '.join(parsealiases(name))
2918
2918
2919 return func
2919 return func
2920 return decorator
2920 return decorator
2921
2921
2922 return cmd
2922 return cmd
2923
2923
2924 # a list of (ui, repo, otherpeer, opts, missing) functions called by
2924 # a list of (ui, repo, otherpeer, opts, missing) functions called by
2925 # commands.outgoing. "missing" is "missing" of the result of
2925 # commands.outgoing. "missing" is "missing" of the result of
2926 # "findcommonoutgoing()"
2926 # "findcommonoutgoing()"
2927 outgoinghooks = util.hooks()
2927 outgoinghooks = util.hooks()
2928
2928
2929 # a list of (ui, repo) functions called by commands.summary
2929 # a list of (ui, repo) functions called by commands.summary
2930 summaryhooks = util.hooks()
2930 summaryhooks = util.hooks()
2931
2931
2932 # a list of (ui, repo, opts, changes) functions called by commands.summary.
2932 # a list of (ui, repo, opts, changes) functions called by commands.summary.
2933 #
2933 #
2934 # functions should return tuple of booleans below, if 'changes' is None:
2934 # functions should return tuple of booleans below, if 'changes' is None:
2935 # (whether-incomings-are-needed, whether-outgoings-are-needed)
2935 # (whether-incomings-are-needed, whether-outgoings-are-needed)
2936 #
2936 #
2937 # otherwise, 'changes' is a tuple of tuples below:
2937 # otherwise, 'changes' is a tuple of tuples below:
2938 # - (sourceurl, sourcebranch, sourcepeer, incoming)
2938 # - (sourceurl, sourcebranch, sourcepeer, incoming)
2939 # - (desturl, destbranch, destpeer, outgoing)
2939 # - (desturl, destbranch, destpeer, outgoing)
2940 summaryremotehooks = util.hooks()
2940 summaryremotehooks = util.hooks()
2941
2941
2942 # A list of state files kept by multistep operations like graft.
2942 # A list of state files kept by multistep operations like graft.
2943 # Since graft cannot be aborted, it is considered 'clearable' by update.
2943 # Since graft cannot be aborted, it is considered 'clearable' by update.
2944 # note: bisect is intentionally excluded
2944 # note: bisect is intentionally excluded
2945 # (state file, clearable, allowcommit, error, hint)
2945 # (state file, clearable, allowcommit, error, hint)
2946 unfinishedstates = [
2946 unfinishedstates = [
2947 ('graftstate', True, False, _('graft in progress'),
2947 ('graftstate', True, False, _('graft in progress'),
2948 _("use 'hg graft --continue' or 'hg update' to abort")),
2948 _("use 'hg graft --continue' or 'hg update' to abort")),
2949 ('updatestate', True, False, _('last update was interrupted'),
2949 ('updatestate', True, False, _('last update was interrupted'),
2950 _("use 'hg update' to get a consistent checkout"))
2950 _("use 'hg update' to get a consistent checkout"))
2951 ]
2951 ]
2952
2952
2953 def checkunfinished(repo, commit=False):
2953 def checkunfinished(repo, commit=False):
2954 '''Look for an unfinished multistep operation, like graft, and abort
2954 '''Look for an unfinished multistep operation, like graft, and abort
2955 if found. It's probably good to check this right before
2955 if found. It's probably good to check this right before
2956 bailifchanged().
2956 bailifchanged().
2957 '''
2957 '''
2958 for f, clearable, allowcommit, msg, hint in unfinishedstates:
2958 for f, clearable, allowcommit, msg, hint in unfinishedstates:
2959 if commit and allowcommit:
2959 if commit and allowcommit:
2960 continue
2960 continue
2961 if repo.vfs.exists(f):
2961 if repo.vfs.exists(f):
2962 raise util.Abort(msg, hint=hint)
2962 raise util.Abort(msg, hint=hint)
2963
2963
2964 def clearunfinished(repo):
2964 def clearunfinished(repo):
2965 '''Check for unfinished operations (as above), and clear the ones
2965 '''Check for unfinished operations (as above), and clear the ones
2966 that are clearable.
2966 that are clearable.
2967 '''
2967 '''
2968 for f, clearable, allowcommit, msg, hint in unfinishedstates:
2968 for f, clearable, allowcommit, msg, hint in unfinishedstates:
2969 if not clearable and repo.vfs.exists(f):
2969 if not clearable and repo.vfs.exists(f):
2970 raise util.Abort(msg, hint=hint)
2970 raise util.Abort(msg, hint=hint)
2971 for f, clearable, allowcommit, msg, hint in unfinishedstates:
2971 for f, clearable, allowcommit, msg, hint in unfinishedstates:
2972 if clearable and repo.vfs.exists(f):
2972 if clearable and repo.vfs.exists(f):
2973 util.unlink(repo.join(f))
2973 util.unlink(repo.join(f))
General Comments 0
You need to be logged in to leave comments. Login now