##// END OF EJS Templates
commit: use `dirstate.change_files` to scope the associated `addremove`...
marmoute -
r50924:28dfb2df default
parent child Browse files
Show More
@@ -1,1890 +1,1903 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10
10
11 import contextlib
11 import contextlib
12 import copy
12 import copy
13 import os
13 import os
14
14
15 from mercurial.i18n import _
15 from mercurial.i18n import _
16
16
17 from mercurial.pycompat import open
17 from mercurial.pycompat import open
18
18
19 from mercurial.hgweb import webcommands
19 from mercurial.hgweb import webcommands
20
20
21 from mercurial import (
21 from mercurial import (
22 archival,
22 archival,
23 cmdutil,
23 cmdutil,
24 copies as copiesmod,
24 copies as copiesmod,
25 dirstate,
25 dirstate,
26 error,
26 error,
27 exchange,
27 exchange,
28 extensions,
28 extensions,
29 exthelper,
29 exthelper,
30 filemerge,
30 filemerge,
31 hg,
31 hg,
32 logcmdutil,
32 logcmdutil,
33 match as matchmod,
33 match as matchmod,
34 merge,
34 merge,
35 mergestate as mergestatemod,
35 mergestate as mergestatemod,
36 pathutil,
36 pathutil,
37 pycompat,
37 pycompat,
38 scmutil,
38 scmutil,
39 smartset,
39 smartset,
40 subrepo,
40 subrepo,
41 url as urlmod,
41 url as urlmod,
42 util,
42 util,
43 )
43 )
44
44
45 from mercurial.upgrade_utils import (
45 from mercurial.upgrade_utils import (
46 actions as upgrade_actions,
46 actions as upgrade_actions,
47 )
47 )
48
48
49 from . import (
49 from . import (
50 lfcommands,
50 lfcommands,
51 lfutil,
51 lfutil,
52 storefactory,
52 storefactory,
53 )
53 )
54
54
55 ACTION_ADD = mergestatemod.ACTION_ADD
55 ACTION_ADD = mergestatemod.ACTION_ADD
56 ACTION_DELETED_CHANGED = mergestatemod.ACTION_DELETED_CHANGED
56 ACTION_DELETED_CHANGED = mergestatemod.ACTION_DELETED_CHANGED
57 ACTION_GET = mergestatemod.ACTION_GET
57 ACTION_GET = mergestatemod.ACTION_GET
58 ACTION_KEEP = mergestatemod.ACTION_KEEP
58 ACTION_KEEP = mergestatemod.ACTION_KEEP
59 ACTION_REMOVE = mergestatemod.ACTION_REMOVE
59 ACTION_REMOVE = mergestatemod.ACTION_REMOVE
60
60
61 eh = exthelper.exthelper()
61 eh = exthelper.exthelper()
62
62
63 lfstatus = lfutil.lfstatus
63 lfstatus = lfutil.lfstatus
64
64
65 MERGE_ACTION_LARGEFILE_MARK_REMOVED = mergestatemod.MergeAction('lfmr')
65 MERGE_ACTION_LARGEFILE_MARK_REMOVED = mergestatemod.MergeAction('lfmr')
66
66
67 # -- Utility functions: commonly/repeatedly needed functionality ---------------
67 # -- Utility functions: commonly/repeatedly needed functionality ---------------
68
68
69
69
70 def composelargefilematcher(match, manifest):
70 def composelargefilematcher(match, manifest):
71 """create a matcher that matches only the largefiles in the original
71 """create a matcher that matches only the largefiles in the original
72 matcher"""
72 matcher"""
73 m = copy.copy(match)
73 m = copy.copy(match)
74 lfile = lambda f: lfutil.standin(f) in manifest
74 lfile = lambda f: lfutil.standin(f) in manifest
75 m._files = [lf for lf in m._files if lfile(lf)]
75 m._files = [lf for lf in m._files if lfile(lf)]
76 m._fileset = set(m._files)
76 m._fileset = set(m._files)
77 m.always = lambda: False
77 m.always = lambda: False
78 origmatchfn = m.matchfn
78 origmatchfn = m.matchfn
79 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
79 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
80 return m
80 return m
81
81
82
82
83 def composenormalfilematcher(match, manifest, exclude=None):
83 def composenormalfilematcher(match, manifest, exclude=None):
84 excluded = set()
84 excluded = set()
85 if exclude is not None:
85 if exclude is not None:
86 excluded.update(exclude)
86 excluded.update(exclude)
87
87
88 m = copy.copy(match)
88 m = copy.copy(match)
89 notlfile = lambda f: not (
89 notlfile = lambda f: not (
90 lfutil.isstandin(f) or lfutil.standin(f) in manifest or f in excluded
90 lfutil.isstandin(f) or lfutil.standin(f) in manifest or f in excluded
91 )
91 )
92 m._files = [lf for lf in m._files if notlfile(lf)]
92 m._files = [lf for lf in m._files if notlfile(lf)]
93 m._fileset = set(m._files)
93 m._fileset = set(m._files)
94 m.always = lambda: False
94 m.always = lambda: False
95 origmatchfn = m.matchfn
95 origmatchfn = m.matchfn
96 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
96 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
97 return m
97 return m
98
98
99
99
100 def addlargefiles(ui, repo, isaddremove, matcher, uipathfn, **opts):
100 def addlargefiles(ui, repo, isaddremove, matcher, uipathfn, **opts):
101 large = opts.get('large')
101 large = opts.get('large')
102 lfsize = lfutil.getminsize(
102 lfsize = lfutil.getminsize(
103 ui, lfutil.islfilesrepo(repo), opts.get('lfsize')
103 ui, lfutil.islfilesrepo(repo), opts.get('lfsize')
104 )
104 )
105
105
106 lfmatcher = None
106 lfmatcher = None
107 if lfutil.islfilesrepo(repo):
107 if lfutil.islfilesrepo(repo):
108 lfpats = ui.configlist(lfutil.longname, b'patterns')
108 lfpats = ui.configlist(lfutil.longname, b'patterns')
109 if lfpats:
109 if lfpats:
110 lfmatcher = matchmod.match(repo.root, b'', list(lfpats))
110 lfmatcher = matchmod.match(repo.root, b'', list(lfpats))
111
111
112 lfnames = []
112 lfnames = []
113 m = matcher
113 m = matcher
114
114
115 wctx = repo[None]
115 wctx = repo[None]
116 for f in wctx.walk(matchmod.badmatch(m, lambda x, y: None)):
116 for f in wctx.walk(matchmod.badmatch(m, lambda x, y: None)):
117 exact = m.exact(f)
117 exact = m.exact(f)
118 lfile = lfutil.standin(f) in wctx
118 lfile = lfutil.standin(f) in wctx
119 nfile = f in wctx
119 nfile = f in wctx
120 exists = lfile or nfile
120 exists = lfile or nfile
121
121
122 # Don't warn the user when they attempt to add a normal tracked file.
122 # Don't warn the user when they attempt to add a normal tracked file.
123 # The normal add code will do that for us.
123 # The normal add code will do that for us.
124 if exact and exists:
124 if exact and exists:
125 if lfile:
125 if lfile:
126 ui.warn(_(b'%s already a largefile\n') % uipathfn(f))
126 ui.warn(_(b'%s already a largefile\n') % uipathfn(f))
127 continue
127 continue
128
128
129 if (exact or not exists) and not lfutil.isstandin(f):
129 if (exact or not exists) and not lfutil.isstandin(f):
130 # In case the file was removed previously, but not committed
130 # In case the file was removed previously, but not committed
131 # (issue3507)
131 # (issue3507)
132 if not repo.wvfs.exists(f):
132 if not repo.wvfs.exists(f):
133 continue
133 continue
134
134
135 abovemin = (
135 abovemin = (
136 lfsize and repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024
136 lfsize and repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024
137 )
137 )
138 if large or abovemin or (lfmatcher and lfmatcher(f)):
138 if large or abovemin or (lfmatcher and lfmatcher(f)):
139 lfnames.append(f)
139 lfnames.append(f)
140 if ui.verbose or not exact:
140 if ui.verbose or not exact:
141 ui.status(_(b'adding %s as a largefile\n') % uipathfn(f))
141 ui.status(_(b'adding %s as a largefile\n') % uipathfn(f))
142
142
143 bad = []
143 bad = []
144
144
145 # Need to lock, otherwise there could be a race condition between
145 # Need to lock, otherwise there could be a race condition between
146 # when standins are created and added to the repo.
146 # when standins are created and added to the repo.
147 with repo.wlock():
147 with repo.wlock():
148 if not opts.get('dry_run'):
148 if not opts.get('dry_run'):
149 standins = []
149 standins = []
150 lfdirstate = lfutil.openlfdirstate(ui, repo)
150 lfdirstate = lfutil.openlfdirstate(ui, repo)
151 for f in lfnames:
151 for f in lfnames:
152 standinname = lfutil.standin(f)
152 standinname = lfutil.standin(f)
153 lfutil.writestandin(
153 lfutil.writestandin(
154 repo,
154 repo,
155 standinname,
155 standinname,
156 hash=b'',
156 hash=b'',
157 executable=lfutil.getexecutable(repo.wjoin(f)),
157 executable=lfutil.getexecutable(repo.wjoin(f)),
158 )
158 )
159 standins.append(standinname)
159 standins.append(standinname)
160 lfdirstate.set_tracked(f)
160 lfdirstate.set_tracked(f)
161 lfdirstate.write(repo.currenttransaction())
161 lfdirstate.write(repo.currenttransaction())
162 bad += [
162 bad += [
163 lfutil.splitstandin(f)
163 lfutil.splitstandin(f)
164 for f in repo[None].add(standins)
164 for f in repo[None].add(standins)
165 if f in m.files()
165 if f in m.files()
166 ]
166 ]
167
167
168 added = [f for f in lfnames if f not in bad]
168 added = [f for f in lfnames if f not in bad]
169 return added, bad
169 return added, bad
170
170
171
171
172 def removelargefiles(ui, repo, isaddremove, matcher, uipathfn, dryrun, **opts):
172 def removelargefiles(ui, repo, isaddremove, matcher, uipathfn, dryrun, **opts):
173 after = opts.get('after')
173 after = opts.get('after')
174 m = composelargefilematcher(matcher, repo[None].manifest())
174 m = composelargefilematcher(matcher, repo[None].manifest())
175 with lfstatus(repo):
175 with lfstatus(repo):
176 s = repo.status(match=m, clean=not isaddremove)
176 s = repo.status(match=m, clean=not isaddremove)
177 manifest = repo[None].manifest()
177 manifest = repo[None].manifest()
178 modified, added, deleted, clean = [
178 modified, added, deleted, clean = [
179 [f for f in list if lfutil.standin(f) in manifest]
179 [f for f in list if lfutil.standin(f) in manifest]
180 for list in (s.modified, s.added, s.deleted, s.clean)
180 for list in (s.modified, s.added, s.deleted, s.clean)
181 ]
181 ]
182
182
183 def warn(files, msg):
183 def warn(files, msg):
184 for f in files:
184 for f in files:
185 ui.warn(msg % uipathfn(f))
185 ui.warn(msg % uipathfn(f))
186 return int(len(files) > 0)
186 return int(len(files) > 0)
187
187
188 if after:
188 if after:
189 remove = deleted
189 remove = deleted
190 result = warn(
190 result = warn(
191 modified + added + clean, _(b'not removing %s: file still exists\n')
191 modified + added + clean, _(b'not removing %s: file still exists\n')
192 )
192 )
193 else:
193 else:
194 remove = deleted + clean
194 remove = deleted + clean
195 result = warn(
195 result = warn(
196 modified,
196 modified,
197 _(
197 _(
198 b'not removing %s: file is modified (use -f'
198 b'not removing %s: file is modified (use -f'
199 b' to force removal)\n'
199 b' to force removal)\n'
200 ),
200 ),
201 )
201 )
202 result = (
202 result = (
203 warn(
203 warn(
204 added,
204 added,
205 _(
205 _(
206 b'not removing %s: file has been marked for add'
206 b'not removing %s: file has been marked for add'
207 b' (use forget to undo)\n'
207 b' (use forget to undo)\n'
208 ),
208 ),
209 )
209 )
210 or result
210 or result
211 )
211 )
212
212
213 # Need to lock because standin files are deleted then removed from the
213 # Need to lock because standin files are deleted then removed from the
214 # repository and we could race in-between.
214 # repository and we could race in-between.
215 with repo.wlock():
215 with repo.wlock():
216 lfdirstate = lfutil.openlfdirstate(ui, repo)
216 lfdirstate = lfutil.openlfdirstate(ui, repo)
217 for f in sorted(remove):
217 for f in sorted(remove):
218 if ui.verbose or not m.exact(f):
218 if ui.verbose or not m.exact(f):
219 ui.status(_(b'removing %s\n') % uipathfn(f))
219 ui.status(_(b'removing %s\n') % uipathfn(f))
220
220
221 if not dryrun:
221 if not dryrun:
222 if not after:
222 if not after:
223 repo.wvfs.unlinkpath(f, ignoremissing=True)
223 repo.wvfs.unlinkpath(f, ignoremissing=True)
224
224
225 if dryrun:
225 if dryrun:
226 return result
226 return result
227
227
228 remove = [lfutil.standin(f) for f in remove]
228 remove = [lfutil.standin(f) for f in remove]
229 # If this is being called by addremove, let the original addremove
229 # If this is being called by addremove, let the original addremove
230 # function handle this.
230 # function handle this.
231 if not isaddremove:
231 if not isaddremove:
232 for f in remove:
232 for f in remove:
233 repo.wvfs.unlinkpath(f, ignoremissing=True)
233 repo.wvfs.unlinkpath(f, ignoremissing=True)
234 repo[None].forget(remove)
234 repo[None].forget(remove)
235
235
236 for f in remove:
236 for f in remove:
237 lfdirstate.set_untracked(lfutil.splitstandin(f))
237 lfdirstate.set_untracked(lfutil.splitstandin(f))
238
238
239 lfdirstate.write(repo.currenttransaction())
239 lfdirstate.write(repo.currenttransaction())
240
240
241 return result
241 return result
242
242
243
243
244 # For overriding mercurial.hgweb.webcommands so that largefiles will
244 # For overriding mercurial.hgweb.webcommands so that largefiles will
245 # appear at their right place in the manifests.
245 # appear at their right place in the manifests.
246 @eh.wrapfunction(webcommands, b'decodepath')
246 @eh.wrapfunction(webcommands, b'decodepath')
247 def decodepath(orig, path):
247 def decodepath(orig, path):
248 return lfutil.splitstandin(path) or path
248 return lfutil.splitstandin(path) or path
249
249
250
250
251 # -- Wrappers: modify existing commands --------------------------------
251 # -- Wrappers: modify existing commands --------------------------------
252
252
253
253
254 @eh.wrapcommand(
254 @eh.wrapcommand(
255 b'add',
255 b'add',
256 opts=[
256 opts=[
257 (b'', b'large', None, _(b'add as largefile')),
257 (b'', b'large', None, _(b'add as largefile')),
258 (b'', b'normal', None, _(b'add as normal file')),
258 (b'', b'normal', None, _(b'add as normal file')),
259 (
259 (
260 b'',
260 b'',
261 b'lfsize',
261 b'lfsize',
262 b'',
262 b'',
263 _(
263 _(
264 b'add all files above this size (in megabytes) '
264 b'add all files above this size (in megabytes) '
265 b'as largefiles (default: 10)'
265 b'as largefiles (default: 10)'
266 ),
266 ),
267 ),
267 ),
268 ],
268 ],
269 )
269 )
270 def overrideadd(orig, ui, repo, *pats, **opts):
270 def overrideadd(orig, ui, repo, *pats, **opts):
271 if opts.get('normal') and opts.get('large'):
271 if opts.get('normal') and opts.get('large'):
272 raise error.Abort(_(b'--normal cannot be used with --large'))
272 raise error.Abort(_(b'--normal cannot be used with --large'))
273 return orig(ui, repo, *pats, **opts)
273 return orig(ui, repo, *pats, **opts)
274
274
275
275
276 @eh.wrapfunction(cmdutil, b'add')
276 @eh.wrapfunction(cmdutil, b'add')
277 def cmdutiladd(orig, ui, repo, matcher, prefix, uipathfn, explicitonly, **opts):
277 def cmdutiladd(orig, ui, repo, matcher, prefix, uipathfn, explicitonly, **opts):
278 # The --normal flag short circuits this override
278 # The --normal flag short circuits this override
279 if opts.get('normal'):
279 if opts.get('normal'):
280 return orig(ui, repo, matcher, prefix, uipathfn, explicitonly, **opts)
280 return orig(ui, repo, matcher, prefix, uipathfn, explicitonly, **opts)
281
281
282 ladded, lbad = addlargefiles(ui, repo, False, matcher, uipathfn, **opts)
282 ladded, lbad = addlargefiles(ui, repo, False, matcher, uipathfn, **opts)
283 normalmatcher = composenormalfilematcher(
283 normalmatcher = composenormalfilematcher(
284 matcher, repo[None].manifest(), ladded
284 matcher, repo[None].manifest(), ladded
285 )
285 )
286 bad = orig(ui, repo, normalmatcher, prefix, uipathfn, explicitonly, **opts)
286 bad = orig(ui, repo, normalmatcher, prefix, uipathfn, explicitonly, **opts)
287
287
288 bad.extend(f for f in lbad)
288 bad.extend(f for f in lbad)
289 return bad
289 return bad
290
290
291
291
292 @eh.wrapfunction(cmdutil, b'remove')
292 @eh.wrapfunction(cmdutil, b'remove')
293 def cmdutilremove(
293 def cmdutilremove(
294 orig, ui, repo, matcher, prefix, uipathfn, after, force, subrepos, dryrun
294 orig, ui, repo, matcher, prefix, uipathfn, after, force, subrepos, dryrun
295 ):
295 ):
296 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
296 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
297 result = orig(
297 result = orig(
298 ui,
298 ui,
299 repo,
299 repo,
300 normalmatcher,
300 normalmatcher,
301 prefix,
301 prefix,
302 uipathfn,
302 uipathfn,
303 after,
303 after,
304 force,
304 force,
305 subrepos,
305 subrepos,
306 dryrun,
306 dryrun,
307 )
307 )
308 return (
308 return (
309 removelargefiles(
309 removelargefiles(
310 ui, repo, False, matcher, uipathfn, dryrun, after=after, force=force
310 ui, repo, False, matcher, uipathfn, dryrun, after=after, force=force
311 )
311 )
312 or result
312 or result
313 )
313 )
314
314
315
315
316 @eh.wrapfunction(dirstate.dirstate, b'_changing')
316 @eh.wrapfunction(dirstate.dirstate, b'_changing')
317 @contextlib.contextmanager
317 @contextlib.contextmanager
318 def _changing(orig, self, repo, change_type):
318 def _changing(orig, self, repo, change_type):
319 pre = sub_dirstate = getattr(self, '_sub_dirstate', None)
319 pre = sub_dirstate = getattr(self, '_sub_dirstate', None)
320 try:
320 try:
321 lfd = getattr(self, '_large_file_dirstate', False)
321 lfd = getattr(self, '_large_file_dirstate', False)
322 if sub_dirstate is None and not lfd:
322 if sub_dirstate is None and not lfd:
323 sub_dirstate = lfutil.openlfdirstate(repo.ui, repo)
323 sub_dirstate = lfutil.openlfdirstate(repo.ui, repo)
324 self._sub_dirstate = sub_dirstate
324 self._sub_dirstate = sub_dirstate
325 if not lfd:
325 if not lfd:
326 assert self._sub_dirstate is not None
326 assert self._sub_dirstate is not None
327 with orig(self, repo, change_type):
327 with orig(self, repo, change_type):
328 if sub_dirstate is None:
328 if sub_dirstate is None:
329 yield
329 yield
330 else:
330 else:
331 with sub_dirstate._changing(repo, change_type):
331 with sub_dirstate._changing(repo, change_type):
332 yield
332 yield
333 finally:
333 finally:
334 self._sub_dirstate = pre
334 self._sub_dirstate = pre
335
335
336
336
337 @eh.wrapfunction(subrepo.hgsubrepo, b'status')
337 @eh.wrapfunction(subrepo.hgsubrepo, b'status')
338 def overridestatusfn(orig, repo, rev2, **opts):
338 def overridestatusfn(orig, repo, rev2, **opts):
339 with lfstatus(repo._repo):
339 with lfstatus(repo._repo):
340 return orig(repo, rev2, **opts)
340 return orig(repo, rev2, **opts)
341
341
342
342
343 @eh.wrapcommand(b'status')
343 @eh.wrapcommand(b'status')
344 def overridestatus(orig, ui, repo, *pats, **opts):
344 def overridestatus(orig, ui, repo, *pats, **opts):
345 with lfstatus(repo):
345 with lfstatus(repo):
346 return orig(ui, repo, *pats, **opts)
346 return orig(ui, repo, *pats, **opts)
347
347
348
348
349 @eh.wrapfunction(subrepo.hgsubrepo, b'dirty')
349 @eh.wrapfunction(subrepo.hgsubrepo, b'dirty')
350 def overridedirty(orig, repo, ignoreupdate=False, missing=False):
350 def overridedirty(orig, repo, ignoreupdate=False, missing=False):
351 with lfstatus(repo._repo):
351 with lfstatus(repo._repo):
352 return orig(repo, ignoreupdate=ignoreupdate, missing=missing)
352 return orig(repo, ignoreupdate=ignoreupdate, missing=missing)
353
353
354
354
355 @eh.wrapcommand(b'log')
355 @eh.wrapcommand(b'log')
356 def overridelog(orig, ui, repo, *pats, **opts):
356 def overridelog(orig, ui, repo, *pats, **opts):
357 def overridematchandpats(
357 def overridematchandpats(
358 orig,
358 orig,
359 ctx,
359 ctx,
360 pats=(),
360 pats=(),
361 opts=None,
361 opts=None,
362 globbed=False,
362 globbed=False,
363 default=b'relpath',
363 default=b'relpath',
364 badfn=None,
364 badfn=None,
365 ):
365 ):
366 """Matcher that merges root directory with .hglf, suitable for log.
366 """Matcher that merges root directory with .hglf, suitable for log.
367 It is still possible to match .hglf directly.
367 It is still possible to match .hglf directly.
368 For any listed files run log on the standin too.
368 For any listed files run log on the standin too.
369 matchfn tries both the given filename and with .hglf stripped.
369 matchfn tries both the given filename and with .hglf stripped.
370 """
370 """
371 if opts is None:
371 if opts is None:
372 opts = {}
372 opts = {}
373 matchandpats = orig(ctx, pats, opts, globbed, default, badfn=badfn)
373 matchandpats = orig(ctx, pats, opts, globbed, default, badfn=badfn)
374 m, p = copy.copy(matchandpats)
374 m, p = copy.copy(matchandpats)
375
375
376 if m.always():
376 if m.always():
377 # We want to match everything anyway, so there's no benefit trying
377 # We want to match everything anyway, so there's no benefit trying
378 # to add standins.
378 # to add standins.
379 return matchandpats
379 return matchandpats
380
380
381 pats = set(p)
381 pats = set(p)
382
382
383 def fixpats(pat, tostandin=lfutil.standin):
383 def fixpats(pat, tostandin=lfutil.standin):
384 if pat.startswith(b'set:'):
384 if pat.startswith(b'set:'):
385 return pat
385 return pat
386
386
387 kindpat = matchmod._patsplit(pat, None)
387 kindpat = matchmod._patsplit(pat, None)
388
388
389 if kindpat[0] is not None:
389 if kindpat[0] is not None:
390 return kindpat[0] + b':' + tostandin(kindpat[1])
390 return kindpat[0] + b':' + tostandin(kindpat[1])
391 return tostandin(kindpat[1])
391 return tostandin(kindpat[1])
392
392
393 cwd = repo.getcwd()
393 cwd = repo.getcwd()
394 if cwd:
394 if cwd:
395 hglf = lfutil.shortname
395 hglf = lfutil.shortname
396 back = util.pconvert(repo.pathto(hglf)[: -len(hglf)])
396 back = util.pconvert(repo.pathto(hglf)[: -len(hglf)])
397
397
398 def tostandin(f):
398 def tostandin(f):
399 # The file may already be a standin, so truncate the back
399 # The file may already be a standin, so truncate the back
400 # prefix and test before mangling it. This avoids turning
400 # prefix and test before mangling it. This avoids turning
401 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
401 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
402 if f.startswith(back) and lfutil.splitstandin(f[len(back) :]):
402 if f.startswith(back) and lfutil.splitstandin(f[len(back) :]):
403 return f
403 return f
404
404
405 # An absolute path is from outside the repo, so truncate the
405 # An absolute path is from outside the repo, so truncate the
406 # path to the root before building the standin. Otherwise cwd
406 # path to the root before building the standin. Otherwise cwd
407 # is somewhere in the repo, relative to root, and needs to be
407 # is somewhere in the repo, relative to root, and needs to be
408 # prepended before building the standin.
408 # prepended before building the standin.
409 if os.path.isabs(cwd):
409 if os.path.isabs(cwd):
410 f = f[len(back) :]
410 f = f[len(back) :]
411 else:
411 else:
412 f = cwd + b'/' + f
412 f = cwd + b'/' + f
413 return back + lfutil.standin(f)
413 return back + lfutil.standin(f)
414
414
415 else:
415 else:
416
416
417 def tostandin(f):
417 def tostandin(f):
418 if lfutil.isstandin(f):
418 if lfutil.isstandin(f):
419 return f
419 return f
420 return lfutil.standin(f)
420 return lfutil.standin(f)
421
421
422 pats.update(fixpats(f, tostandin) for f in p)
422 pats.update(fixpats(f, tostandin) for f in p)
423
423
424 for i in range(0, len(m._files)):
424 for i in range(0, len(m._files)):
425 # Don't add '.hglf' to m.files, since that is already covered by '.'
425 # Don't add '.hglf' to m.files, since that is already covered by '.'
426 if m._files[i] == b'.':
426 if m._files[i] == b'.':
427 continue
427 continue
428 standin = lfutil.standin(m._files[i])
428 standin = lfutil.standin(m._files[i])
429 # If the "standin" is a directory, append instead of replace to
429 # If the "standin" is a directory, append instead of replace to
430 # support naming a directory on the command line with only
430 # support naming a directory on the command line with only
431 # largefiles. The original directory is kept to support normal
431 # largefiles. The original directory is kept to support normal
432 # files.
432 # files.
433 if standin in ctx:
433 if standin in ctx:
434 m._files[i] = standin
434 m._files[i] = standin
435 elif m._files[i] not in ctx and repo.wvfs.isdir(standin):
435 elif m._files[i] not in ctx and repo.wvfs.isdir(standin):
436 m._files.append(standin)
436 m._files.append(standin)
437
437
438 m._fileset = set(m._files)
438 m._fileset = set(m._files)
439 m.always = lambda: False
439 m.always = lambda: False
440 origmatchfn = m.matchfn
440 origmatchfn = m.matchfn
441
441
442 def lfmatchfn(f):
442 def lfmatchfn(f):
443 lf = lfutil.splitstandin(f)
443 lf = lfutil.splitstandin(f)
444 if lf is not None and origmatchfn(lf):
444 if lf is not None and origmatchfn(lf):
445 return True
445 return True
446 r = origmatchfn(f)
446 r = origmatchfn(f)
447 return r
447 return r
448
448
449 m.matchfn = lfmatchfn
449 m.matchfn = lfmatchfn
450
450
451 ui.debug(b'updated patterns: %s\n' % b', '.join(sorted(pats)))
451 ui.debug(b'updated patterns: %s\n' % b', '.join(sorted(pats)))
452 return m, pats
452 return m, pats
453
453
454 # For hg log --patch, the match object is used in two different senses:
454 # For hg log --patch, the match object is used in two different senses:
455 # (1) to determine what revisions should be printed out, and
455 # (1) to determine what revisions should be printed out, and
456 # (2) to determine what files to print out diffs for.
456 # (2) to determine what files to print out diffs for.
457 # The magic matchandpats override should be used for case (1) but not for
457 # The magic matchandpats override should be used for case (1) but not for
458 # case (2).
458 # case (2).
459 oldmatchandpats = scmutil.matchandpats
459 oldmatchandpats = scmutil.matchandpats
460
460
461 def overridemakefilematcher(orig, repo, pats, opts, badfn=None):
461 def overridemakefilematcher(orig, repo, pats, opts, badfn=None):
462 wctx = repo[None]
462 wctx = repo[None]
463 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
463 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
464 return lambda ctx: match
464 return lambda ctx: match
465
465
466 wrappedmatchandpats = extensions.wrappedfunction(
466 wrappedmatchandpats = extensions.wrappedfunction(
467 scmutil, b'matchandpats', overridematchandpats
467 scmutil, b'matchandpats', overridematchandpats
468 )
468 )
469 wrappedmakefilematcher = extensions.wrappedfunction(
469 wrappedmakefilematcher = extensions.wrappedfunction(
470 logcmdutil, b'_makenofollowfilematcher', overridemakefilematcher
470 logcmdutil, b'_makenofollowfilematcher', overridemakefilematcher
471 )
471 )
472 with wrappedmatchandpats, wrappedmakefilematcher:
472 with wrappedmatchandpats, wrappedmakefilematcher:
473 return orig(ui, repo, *pats, **opts)
473 return orig(ui, repo, *pats, **opts)
474
474
475
475
476 @eh.wrapcommand(
476 @eh.wrapcommand(
477 b'verify',
477 b'verify',
478 opts=[
478 opts=[
479 (
479 (
480 b'',
480 b'',
481 b'large',
481 b'large',
482 None,
482 None,
483 _(b'verify that all largefiles in current revision exists'),
483 _(b'verify that all largefiles in current revision exists'),
484 ),
484 ),
485 (
485 (
486 b'',
486 b'',
487 b'lfa',
487 b'lfa',
488 None,
488 None,
489 _(b'verify largefiles in all revisions, not just current'),
489 _(b'verify largefiles in all revisions, not just current'),
490 ),
490 ),
491 (
491 (
492 b'',
492 b'',
493 b'lfc',
493 b'lfc',
494 None,
494 None,
495 _(b'verify local largefile contents, not just existence'),
495 _(b'verify local largefile contents, not just existence'),
496 ),
496 ),
497 ],
497 ],
498 )
498 )
499 def overrideverify(orig, ui, repo, *pats, **opts):
499 def overrideverify(orig, ui, repo, *pats, **opts):
500 large = opts.pop('large', False)
500 large = opts.pop('large', False)
501 all = opts.pop('lfa', False)
501 all = opts.pop('lfa', False)
502 contents = opts.pop('lfc', False)
502 contents = opts.pop('lfc', False)
503
503
504 result = orig(ui, repo, *pats, **opts)
504 result = orig(ui, repo, *pats, **opts)
505 if large or all or contents:
505 if large or all or contents:
506 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
506 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
507 return result
507 return result
508
508
509
509
510 @eh.wrapcommand(
510 @eh.wrapcommand(
511 b'debugstate',
511 b'debugstate',
512 opts=[(b'', b'large', None, _(b'display largefiles dirstate'))],
512 opts=[(b'', b'large', None, _(b'display largefiles dirstate'))],
513 )
513 )
514 def overridedebugstate(orig, ui, repo, *pats, **opts):
514 def overridedebugstate(orig, ui, repo, *pats, **opts):
515 large = opts.pop('large', False)
515 large = opts.pop('large', False)
516 if large:
516 if large:
517
517
518 class fakerepo:
518 class fakerepo:
519 dirstate = lfutil.openlfdirstate(ui, repo)
519 dirstate = lfutil.openlfdirstate(ui, repo)
520
520
521 orig(ui, fakerepo, *pats, **opts)
521 orig(ui, fakerepo, *pats, **opts)
522 else:
522 else:
523 orig(ui, repo, *pats, **opts)
523 orig(ui, repo, *pats, **opts)
524
524
525
525
526 # Before starting the manifest merge, merge.updates will call
526 # Before starting the manifest merge, merge.updates will call
527 # _checkunknownfile to check if there are any files in the merged-in
527 # _checkunknownfile to check if there are any files in the merged-in
528 # changeset that collide with unknown files in the working copy.
528 # changeset that collide with unknown files in the working copy.
529 #
529 #
530 # The largefiles are seen as unknown, so this prevents us from merging
530 # The largefiles are seen as unknown, so this prevents us from merging
531 # in a file 'foo' if we already have a largefile with the same name.
531 # in a file 'foo' if we already have a largefile with the same name.
532 #
532 #
533 # The overridden function filters the unknown files by removing any
533 # The overridden function filters the unknown files by removing any
534 # largefiles. This makes the merge proceed and we can then handle this
534 # largefiles. This makes the merge proceed and we can then handle this
535 # case further in the overridden calculateupdates function below.
535 # case further in the overridden calculateupdates function below.
536 @eh.wrapfunction(merge, b'_checkunknownfile')
536 @eh.wrapfunction(merge, b'_checkunknownfile')
537 def overridecheckunknownfile(
537 def overridecheckunknownfile(
538 origfn, dirstate, wvfs, dircache, wctx, mctx, f, f2=None
538 origfn, dirstate, wvfs, dircache, wctx, mctx, f, f2=None
539 ):
539 ):
540 if lfutil.standin(dirstate.normalize(f)) in wctx:
540 if lfutil.standin(dirstate.normalize(f)) in wctx:
541 return False
541 return False
542 return origfn(dirstate, wvfs, dircache, wctx, mctx, f, f2)
542 return origfn(dirstate, wvfs, dircache, wctx, mctx, f, f2)
543
543
544
544
545 # The manifest merge handles conflicts on the manifest level. We want
545 # The manifest merge handles conflicts on the manifest level. We want
546 # to handle changes in largefile-ness of files at this level too.
546 # to handle changes in largefile-ness of files at this level too.
547 #
547 #
548 # The strategy is to run the original calculateupdates and then process
548 # The strategy is to run the original calculateupdates and then process
549 # the action list it outputs. There are two cases we need to deal with:
549 # the action list it outputs. There are two cases we need to deal with:
550 #
550 #
551 # 1. Normal file in p1, largefile in p2. Here the largefile is
551 # 1. Normal file in p1, largefile in p2. Here the largefile is
552 # detected via its standin file, which will enter the working copy
552 # detected via its standin file, which will enter the working copy
553 # with a "get" action. It is not "merge" since the standin is all
553 # with a "get" action. It is not "merge" since the standin is all
554 # Mercurial is concerned with at this level -- the link to the
554 # Mercurial is concerned with at this level -- the link to the
555 # existing normal file is not relevant here.
555 # existing normal file is not relevant here.
556 #
556 #
557 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
557 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
558 # since the largefile will be present in the working copy and
558 # since the largefile will be present in the working copy and
559 # different from the normal file in p2. Mercurial therefore
559 # different from the normal file in p2. Mercurial therefore
560 # triggers a merge action.
560 # triggers a merge action.
561 #
561 #
562 # In both cases, we prompt the user and emit new actions to either
562 # In both cases, we prompt the user and emit new actions to either
563 # remove the standin (if the normal file was kept) or to remove the
563 # remove the standin (if the normal file was kept) or to remove the
564 # normal file and get the standin (if the largefile was kept). The
564 # normal file and get the standin (if the largefile was kept). The
565 # default prompt answer is to use the largefile version since it was
565 # default prompt answer is to use the largefile version since it was
566 # presumably changed on purpose.
566 # presumably changed on purpose.
567 #
567 #
568 # Finally, the merge.applyupdates function will then take care of
568 # Finally, the merge.applyupdates function will then take care of
569 # writing the files into the working copy and lfcommands.updatelfiles
569 # writing the files into the working copy and lfcommands.updatelfiles
570 # will update the largefiles.
570 # will update the largefiles.
571 @eh.wrapfunction(merge, b'calculateupdates')
571 @eh.wrapfunction(merge, b'calculateupdates')
572 def overridecalculateupdates(
572 def overridecalculateupdates(
573 origfn, repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
573 origfn, repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
574 ):
574 ):
575 overwrite = force and not branchmerge
575 overwrite = force and not branchmerge
576 mresult = origfn(
576 mresult = origfn(
577 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
577 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
578 )
578 )
579
579
580 if overwrite:
580 if overwrite:
581 return mresult
581 return mresult
582
582
583 # Convert to dictionary with filename as key and action as value.
583 # Convert to dictionary with filename as key and action as value.
584 lfiles = set()
584 lfiles = set()
585 for f in mresult.files():
585 for f in mresult.files():
586 splitstandin = lfutil.splitstandin(f)
586 splitstandin = lfutil.splitstandin(f)
587 if splitstandin is not None and splitstandin in p1:
587 if splitstandin is not None and splitstandin in p1:
588 lfiles.add(splitstandin)
588 lfiles.add(splitstandin)
589 elif lfutil.standin(f) in p1:
589 elif lfutil.standin(f) in p1:
590 lfiles.add(f)
590 lfiles.add(f)
591
591
592 for lfile in sorted(lfiles):
592 for lfile in sorted(lfiles):
593 standin = lfutil.standin(lfile)
593 standin = lfutil.standin(lfile)
594 (lm, largs, lmsg) = mresult.getfile(lfile, (None, None, None))
594 (lm, largs, lmsg) = mresult.getfile(lfile, (None, None, None))
595 (sm, sargs, smsg) = mresult.getfile(standin, (None, None, None))
595 (sm, sargs, smsg) = mresult.getfile(standin, (None, None, None))
596
596
597 if sm in (ACTION_GET, ACTION_DELETED_CHANGED) and lm != ACTION_REMOVE:
597 if sm in (ACTION_GET, ACTION_DELETED_CHANGED) and lm != ACTION_REMOVE:
598 if sm == ACTION_DELETED_CHANGED:
598 if sm == ACTION_DELETED_CHANGED:
599 f1, f2, fa, move, anc = sargs
599 f1, f2, fa, move, anc = sargs
600 sargs = (p2[f2].flags(), False)
600 sargs = (p2[f2].flags(), False)
601 # Case 1: normal file in the working copy, largefile in
601 # Case 1: normal file in the working copy, largefile in
602 # the second parent
602 # the second parent
603 usermsg = (
603 usermsg = (
604 _(
604 _(
605 b'remote turned local normal file %s into a largefile\n'
605 b'remote turned local normal file %s into a largefile\n'
606 b'use (l)argefile or keep (n)ormal file?'
606 b'use (l)argefile or keep (n)ormal file?'
607 b'$$ &Largefile $$ &Normal file'
607 b'$$ &Largefile $$ &Normal file'
608 )
608 )
609 % lfile
609 % lfile
610 )
610 )
611 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
611 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
612 mresult.addfile(
612 mresult.addfile(
613 lfile, ACTION_REMOVE, None, b'replaced by standin'
613 lfile, ACTION_REMOVE, None, b'replaced by standin'
614 )
614 )
615 mresult.addfile(standin, ACTION_GET, sargs, b'replaces standin')
615 mresult.addfile(standin, ACTION_GET, sargs, b'replaces standin')
616 else: # keep local normal file
616 else: # keep local normal file
617 mresult.addfile(lfile, ACTION_KEEP, None, b'replaces standin')
617 mresult.addfile(lfile, ACTION_KEEP, None, b'replaces standin')
618 if branchmerge:
618 if branchmerge:
619 mresult.addfile(
619 mresult.addfile(
620 standin,
620 standin,
621 ACTION_KEEP,
621 ACTION_KEEP,
622 None,
622 None,
623 b'replaced by non-standin',
623 b'replaced by non-standin',
624 )
624 )
625 else:
625 else:
626 mresult.addfile(
626 mresult.addfile(
627 standin,
627 standin,
628 ACTION_REMOVE,
628 ACTION_REMOVE,
629 None,
629 None,
630 b'replaced by non-standin',
630 b'replaced by non-standin',
631 )
631 )
632 if lm in (ACTION_GET, ACTION_DELETED_CHANGED) and sm != ACTION_REMOVE:
632 if lm in (ACTION_GET, ACTION_DELETED_CHANGED) and sm != ACTION_REMOVE:
633 if lm == ACTION_DELETED_CHANGED:
633 if lm == ACTION_DELETED_CHANGED:
634 f1, f2, fa, move, anc = largs
634 f1, f2, fa, move, anc = largs
635 largs = (p2[f2].flags(), False)
635 largs = (p2[f2].flags(), False)
636 # Case 2: largefile in the working copy, normal file in
636 # Case 2: largefile in the working copy, normal file in
637 # the second parent
637 # the second parent
638 usermsg = (
638 usermsg = (
639 _(
639 _(
640 b'remote turned local largefile %s into a normal file\n'
640 b'remote turned local largefile %s into a normal file\n'
641 b'keep (l)argefile or use (n)ormal file?'
641 b'keep (l)argefile or use (n)ormal file?'
642 b'$$ &Largefile $$ &Normal file'
642 b'$$ &Largefile $$ &Normal file'
643 )
643 )
644 % lfile
644 % lfile
645 )
645 )
646 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
646 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
647 if branchmerge:
647 if branchmerge:
648 # largefile can be restored from standin safely
648 # largefile can be restored from standin safely
649 mresult.addfile(
649 mresult.addfile(
650 lfile,
650 lfile,
651 ACTION_KEEP,
651 ACTION_KEEP,
652 None,
652 None,
653 b'replaced by standin',
653 b'replaced by standin',
654 )
654 )
655 mresult.addfile(
655 mresult.addfile(
656 standin, ACTION_KEEP, None, b'replaces standin'
656 standin, ACTION_KEEP, None, b'replaces standin'
657 )
657 )
658 else:
658 else:
659 # "lfile" should be marked as "removed" without
659 # "lfile" should be marked as "removed" without
660 # removal of itself
660 # removal of itself
661 mresult.addfile(
661 mresult.addfile(
662 lfile,
662 lfile,
663 MERGE_ACTION_LARGEFILE_MARK_REMOVED,
663 MERGE_ACTION_LARGEFILE_MARK_REMOVED,
664 None,
664 None,
665 b'forget non-standin largefile',
665 b'forget non-standin largefile',
666 )
666 )
667
667
668 # linear-merge should treat this largefile as 're-added'
668 # linear-merge should treat this largefile as 're-added'
669 mresult.addfile(standin, ACTION_ADD, None, b'keep standin')
669 mresult.addfile(standin, ACTION_ADD, None, b'keep standin')
670 else: # pick remote normal file
670 else: # pick remote normal file
671 mresult.addfile(lfile, ACTION_GET, largs, b'replaces standin')
671 mresult.addfile(lfile, ACTION_GET, largs, b'replaces standin')
672 mresult.addfile(
672 mresult.addfile(
673 standin,
673 standin,
674 ACTION_REMOVE,
674 ACTION_REMOVE,
675 None,
675 None,
676 b'replaced by non-standin',
676 b'replaced by non-standin',
677 )
677 )
678
678
679 return mresult
679 return mresult
680
680
681
681
682 @eh.wrapfunction(mergestatemod, b'recordupdates')
682 @eh.wrapfunction(mergestatemod, b'recordupdates')
683 def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata):
683 def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata):
684 if MERGE_ACTION_LARGEFILE_MARK_REMOVED in actions:
684 if MERGE_ACTION_LARGEFILE_MARK_REMOVED in actions:
685 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
685 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
686 with lfdirstate.changing_parents(repo):
686 with lfdirstate.changing_parents(repo):
687 for lfile, args, msg in actions[
687 for lfile, args, msg in actions[
688 MERGE_ACTION_LARGEFILE_MARK_REMOVED
688 MERGE_ACTION_LARGEFILE_MARK_REMOVED
689 ]:
689 ]:
690 # this should be executed before 'orig', to execute 'remove'
690 # this should be executed before 'orig', to execute 'remove'
691 # before all other actions
691 # before all other actions
692 repo.dirstate.update_file(
692 repo.dirstate.update_file(
693 lfile, p1_tracked=True, wc_tracked=False
693 lfile, p1_tracked=True, wc_tracked=False
694 )
694 )
695 # make sure lfile doesn't get synclfdirstate'd as normal
695 # make sure lfile doesn't get synclfdirstate'd as normal
696 lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=True)
696 lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=True)
697 lfdirstate.write(repo.currenttransaction())
697 lfdirstate.write(repo.currenttransaction())
698
698
699 return orig(repo, actions, branchmerge, getfiledata)
699 return orig(repo, actions, branchmerge, getfiledata)
700
700
701
701
702 # Override filemerge to prompt the user about how they wish to merge
702 # Override filemerge to prompt the user about how they wish to merge
703 # largefiles. This will handle identical edits without prompting the user.
703 # largefiles. This will handle identical edits without prompting the user.
704 @eh.wrapfunction(filemerge, b'filemerge')
704 @eh.wrapfunction(filemerge, b'filemerge')
705 def overridefilemerge(
705 def overridefilemerge(
706 origfn, repo, wctx, mynode, orig, fcd, fco, fca, labels=None
706 origfn, repo, wctx, mynode, orig, fcd, fco, fca, labels=None
707 ):
707 ):
708 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
708 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
709 return origfn(repo, wctx, mynode, orig, fcd, fco, fca, labels=labels)
709 return origfn(repo, wctx, mynode, orig, fcd, fco, fca, labels=labels)
710
710
711 ahash = lfutil.readasstandin(fca).lower()
711 ahash = lfutil.readasstandin(fca).lower()
712 dhash = lfutil.readasstandin(fcd).lower()
712 dhash = lfutil.readasstandin(fcd).lower()
713 ohash = lfutil.readasstandin(fco).lower()
713 ohash = lfutil.readasstandin(fco).lower()
714 if (
714 if (
715 ohash != ahash
715 ohash != ahash
716 and ohash != dhash
716 and ohash != dhash
717 and (
717 and (
718 dhash == ahash
718 dhash == ahash
719 or repo.ui.promptchoice(
719 or repo.ui.promptchoice(
720 _(
720 _(
721 b'largefile %s has a merge conflict\nancestor was %s\n'
721 b'largefile %s has a merge conflict\nancestor was %s\n'
722 b'you can keep (l)ocal %s or take (o)ther %s.\n'
722 b'you can keep (l)ocal %s or take (o)ther %s.\n'
723 b'what do you want to do?'
723 b'what do you want to do?'
724 b'$$ &Local $$ &Other'
724 b'$$ &Local $$ &Other'
725 )
725 )
726 % (lfutil.splitstandin(orig), ahash, dhash, ohash),
726 % (lfutil.splitstandin(orig), ahash, dhash, ohash),
727 0,
727 0,
728 )
728 )
729 == 1
729 == 1
730 )
730 )
731 ):
731 ):
732 repo.wwrite(fcd.path(), fco.data(), fco.flags())
732 repo.wwrite(fcd.path(), fco.data(), fco.flags())
733 return 0, False
733 return 0, False
734
734
735
735
736 @eh.wrapfunction(copiesmod, b'pathcopies')
736 @eh.wrapfunction(copiesmod, b'pathcopies')
737 def copiespathcopies(orig, ctx1, ctx2, match=None):
737 def copiespathcopies(orig, ctx1, ctx2, match=None):
738 copies = orig(ctx1, ctx2, match=match)
738 copies = orig(ctx1, ctx2, match=match)
739 updated = {}
739 updated = {}
740
740
741 for k, v in copies.items():
741 for k, v in copies.items():
742 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
742 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
743
743
744 return updated
744 return updated
745
745
746
746
747 # Copy first changes the matchers to match standins instead of
747 # Copy first changes the matchers to match standins instead of
748 # largefiles. Then it overrides util.copyfile in that function it
748 # largefiles. Then it overrides util.copyfile in that function it
749 # checks if the destination largefile already exists. It also keeps a
749 # checks if the destination largefile already exists. It also keeps a
750 # list of copied files so that the largefiles can be copied and the
750 # list of copied files so that the largefiles can be copied and the
751 # dirstate updated.
751 # dirstate updated.
752 @eh.wrapfunction(cmdutil, b'copy')
752 @eh.wrapfunction(cmdutil, b'copy')
753 def overridecopy(orig, ui, repo, pats, opts, rename=False):
753 def overridecopy(orig, ui, repo, pats, opts, rename=False):
754 # doesn't remove largefile on rename
754 # doesn't remove largefile on rename
755 if len(pats) < 2:
755 if len(pats) < 2:
756 # this isn't legal, let the original function deal with it
756 # this isn't legal, let the original function deal with it
757 return orig(ui, repo, pats, opts, rename)
757 return orig(ui, repo, pats, opts, rename)
758
758
759 # This could copy both lfiles and normal files in one command,
759 # This could copy both lfiles and normal files in one command,
760 # but we don't want to do that. First replace their matcher to
760 # but we don't want to do that. First replace their matcher to
761 # only match normal files and run it, then replace it to just
761 # only match normal files and run it, then replace it to just
762 # match largefiles and run it again.
762 # match largefiles and run it again.
763 nonormalfiles = False
763 nonormalfiles = False
764 nolfiles = False
764 nolfiles = False
765 manifest = repo[None].manifest()
765 manifest = repo[None].manifest()
766
766
767 def normalfilesmatchfn(
767 def normalfilesmatchfn(
768 orig,
768 orig,
769 ctx,
769 ctx,
770 pats=(),
770 pats=(),
771 opts=None,
771 opts=None,
772 globbed=False,
772 globbed=False,
773 default=b'relpath',
773 default=b'relpath',
774 badfn=None,
774 badfn=None,
775 ):
775 ):
776 if opts is None:
776 if opts is None:
777 opts = {}
777 opts = {}
778 match = orig(ctx, pats, opts, globbed, default, badfn=badfn)
778 match = orig(ctx, pats, opts, globbed, default, badfn=badfn)
779 return composenormalfilematcher(match, manifest)
779 return composenormalfilematcher(match, manifest)
780
780
781 with extensions.wrappedfunction(scmutil, b'match', normalfilesmatchfn):
781 with extensions.wrappedfunction(scmutil, b'match', normalfilesmatchfn):
782 try:
782 try:
783 result = orig(ui, repo, pats, opts, rename)
783 result = orig(ui, repo, pats, opts, rename)
784 except error.Abort as e:
784 except error.Abort as e:
785 if e.message != _(b'no files to copy'):
785 if e.message != _(b'no files to copy'):
786 raise e
786 raise e
787 else:
787 else:
788 nonormalfiles = True
788 nonormalfiles = True
789 result = 0
789 result = 0
790
790
791 # The first rename can cause our current working directory to be removed.
791 # The first rename can cause our current working directory to be removed.
792 # In that case there is nothing left to copy/rename so just quit.
792 # In that case there is nothing left to copy/rename so just quit.
793 try:
793 try:
794 repo.getcwd()
794 repo.getcwd()
795 except OSError:
795 except OSError:
796 return result
796 return result
797
797
798 def makestandin(relpath):
798 def makestandin(relpath):
799 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
799 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
800 return repo.wvfs.join(lfutil.standin(path))
800 return repo.wvfs.join(lfutil.standin(path))
801
801
802 fullpats = scmutil.expandpats(pats)
802 fullpats = scmutil.expandpats(pats)
803 dest = fullpats[-1]
803 dest = fullpats[-1]
804
804
805 if os.path.isdir(dest):
805 if os.path.isdir(dest):
806 if not os.path.isdir(makestandin(dest)):
806 if not os.path.isdir(makestandin(dest)):
807 os.makedirs(makestandin(dest))
807 os.makedirs(makestandin(dest))
808
808
809 try:
809 try:
810 # When we call orig below it creates the standins but we don't add
810 # When we call orig below it creates the standins but we don't add
811 # them to the dir state until later so lock during that time.
811 # them to the dir state until later so lock during that time.
812 wlock = repo.wlock()
812 wlock = repo.wlock()
813
813
814 manifest = repo[None].manifest()
814 manifest = repo[None].manifest()
815
815
816 def overridematch(
816 def overridematch(
817 orig,
817 orig,
818 ctx,
818 ctx,
819 pats=(),
819 pats=(),
820 opts=None,
820 opts=None,
821 globbed=False,
821 globbed=False,
822 default=b'relpath',
822 default=b'relpath',
823 badfn=None,
823 badfn=None,
824 ):
824 ):
825 if opts is None:
825 if opts is None:
826 opts = {}
826 opts = {}
827 newpats = []
827 newpats = []
828 # The patterns were previously mangled to add the standin
828 # The patterns were previously mangled to add the standin
829 # directory; we need to remove that now
829 # directory; we need to remove that now
830 for pat in pats:
830 for pat in pats:
831 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
831 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
832 newpats.append(pat.replace(lfutil.shortname, b''))
832 newpats.append(pat.replace(lfutil.shortname, b''))
833 else:
833 else:
834 newpats.append(pat)
834 newpats.append(pat)
835 match = orig(ctx, newpats, opts, globbed, default, badfn=badfn)
835 match = orig(ctx, newpats, opts, globbed, default, badfn=badfn)
836 m = copy.copy(match)
836 m = copy.copy(match)
837 lfile = lambda f: lfutil.standin(f) in manifest
837 lfile = lambda f: lfutil.standin(f) in manifest
838 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
838 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
839 m._fileset = set(m._files)
839 m._fileset = set(m._files)
840 origmatchfn = m.matchfn
840 origmatchfn = m.matchfn
841
841
842 def matchfn(f):
842 def matchfn(f):
843 lfile = lfutil.splitstandin(f)
843 lfile = lfutil.splitstandin(f)
844 return (
844 return (
845 lfile is not None
845 lfile is not None
846 and (f in manifest)
846 and (f in manifest)
847 and origmatchfn(lfile)
847 and origmatchfn(lfile)
848 or None
848 or None
849 )
849 )
850
850
851 m.matchfn = matchfn
851 m.matchfn = matchfn
852 return m
852 return m
853
853
854 listpats = []
854 listpats = []
855 for pat in pats:
855 for pat in pats:
856 if matchmod.patkind(pat) is not None:
856 if matchmod.patkind(pat) is not None:
857 listpats.append(pat)
857 listpats.append(pat)
858 else:
858 else:
859 listpats.append(makestandin(pat))
859 listpats.append(makestandin(pat))
860
860
861 copiedfiles = []
861 copiedfiles = []
862
862
863 def overridecopyfile(orig, src, dest, *args, **kwargs):
863 def overridecopyfile(orig, src, dest, *args, **kwargs):
864 if lfutil.shortname in src and dest.startswith(
864 if lfutil.shortname in src and dest.startswith(
865 repo.wjoin(lfutil.shortname)
865 repo.wjoin(lfutil.shortname)
866 ):
866 ):
867 destlfile = dest.replace(lfutil.shortname, b'')
867 destlfile = dest.replace(lfutil.shortname, b'')
868 if not opts[b'force'] and os.path.exists(destlfile):
868 if not opts[b'force'] and os.path.exists(destlfile):
869 raise IOError(
869 raise IOError(
870 b'', _(b'destination largefile already exists')
870 b'', _(b'destination largefile already exists')
871 )
871 )
872 copiedfiles.append((src, dest))
872 copiedfiles.append((src, dest))
873 orig(src, dest, *args, **kwargs)
873 orig(src, dest, *args, **kwargs)
874
874
875 with extensions.wrappedfunction(util, b'copyfile', overridecopyfile):
875 with extensions.wrappedfunction(util, b'copyfile', overridecopyfile):
876 with extensions.wrappedfunction(scmutil, b'match', overridematch):
876 with extensions.wrappedfunction(scmutil, b'match', overridematch):
877 result += orig(ui, repo, listpats, opts, rename)
877 result += orig(ui, repo, listpats, opts, rename)
878
878
879 lfdirstate = lfutil.openlfdirstate(ui, repo)
879 lfdirstate = lfutil.openlfdirstate(ui, repo)
880 for (src, dest) in copiedfiles:
880 for (src, dest) in copiedfiles:
881 if lfutil.shortname in src and dest.startswith(
881 if lfutil.shortname in src and dest.startswith(
882 repo.wjoin(lfutil.shortname)
882 repo.wjoin(lfutil.shortname)
883 ):
883 ):
884 srclfile = src.replace(repo.wjoin(lfutil.standin(b'')), b'')
884 srclfile = src.replace(repo.wjoin(lfutil.standin(b'')), b'')
885 destlfile = dest.replace(repo.wjoin(lfutil.standin(b'')), b'')
885 destlfile = dest.replace(repo.wjoin(lfutil.standin(b'')), b'')
886 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or b'.'
886 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or b'.'
887 if not os.path.isdir(destlfiledir):
887 if not os.path.isdir(destlfiledir):
888 os.makedirs(destlfiledir)
888 os.makedirs(destlfiledir)
889 if rename:
889 if rename:
890 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
890 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
891
891
892 # The file is gone, but this deletes any empty parent
892 # The file is gone, but this deletes any empty parent
893 # directories as a side-effect.
893 # directories as a side-effect.
894 repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
894 repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
895 lfdirstate.set_untracked(srclfile)
895 lfdirstate.set_untracked(srclfile)
896 else:
896 else:
897 util.copyfile(repo.wjoin(srclfile), repo.wjoin(destlfile))
897 util.copyfile(repo.wjoin(srclfile), repo.wjoin(destlfile))
898
898
899 lfdirstate.set_tracked(destlfile)
899 lfdirstate.set_tracked(destlfile)
900 lfdirstate.write(repo.currenttransaction())
900 lfdirstate.write(repo.currenttransaction())
901 except error.Abort as e:
901 except error.Abort as e:
902 if e.message != _(b'no files to copy'):
902 if e.message != _(b'no files to copy'):
903 raise e
903 raise e
904 else:
904 else:
905 nolfiles = True
905 nolfiles = True
906 finally:
906 finally:
907 wlock.release()
907 wlock.release()
908
908
909 if nolfiles and nonormalfiles:
909 if nolfiles and nonormalfiles:
910 raise error.Abort(_(b'no files to copy'))
910 raise error.Abort(_(b'no files to copy'))
911
911
912 return result
912 return result
913
913
914
914
915 # When the user calls revert, we have to be careful to not revert any
915 # When the user calls revert, we have to be careful to not revert any
916 # changes to other largefiles accidentally. This means we have to keep
916 # changes to other largefiles accidentally. This means we have to keep
917 # track of the largefiles that are being reverted so we only pull down
917 # track of the largefiles that are being reverted so we only pull down
918 # the necessary largefiles.
918 # the necessary largefiles.
919 #
919 #
920 # Standins are only updated (to match the hash of largefiles) before
920 # Standins are only updated (to match the hash of largefiles) before
921 # commits. Update the standins then run the original revert, changing
921 # commits. Update the standins then run the original revert, changing
922 # the matcher to hit standins instead of largefiles. Based on the
922 # the matcher to hit standins instead of largefiles. Based on the
923 # resulting standins update the largefiles.
923 # resulting standins update the largefiles.
924 @eh.wrapfunction(cmdutil, b'revert')
924 @eh.wrapfunction(cmdutil, b'revert')
925 def overriderevert(orig, ui, repo, ctx, *pats, **opts):
925 def overriderevert(orig, ui, repo, ctx, *pats, **opts):
926 # Because we put the standins in a bad state (by updating them)
926 # Because we put the standins in a bad state (by updating them)
927 # and then return them to a correct state we need to lock to
927 # and then return them to a correct state we need to lock to
928 # prevent others from changing them in their incorrect state.
928 # prevent others from changing them in their incorrect state.
929 with repo.wlock():
929 with repo.wlock():
930 lfdirstate = lfutil.openlfdirstate(ui, repo)
930 lfdirstate = lfutil.openlfdirstate(ui, repo)
931 s = lfutil.lfdirstatestatus(lfdirstate, repo)
931 s = lfutil.lfdirstatestatus(lfdirstate, repo)
932 lfdirstate.write(repo.currenttransaction())
932 lfdirstate.write(repo.currenttransaction())
933 for lfile in s.modified:
933 for lfile in s.modified:
934 lfutil.updatestandin(repo, lfile, lfutil.standin(lfile))
934 lfutil.updatestandin(repo, lfile, lfutil.standin(lfile))
935 for lfile in s.deleted:
935 for lfile in s.deleted:
936 fstandin = lfutil.standin(lfile)
936 fstandin = lfutil.standin(lfile)
937 if repo.wvfs.exists(fstandin):
937 if repo.wvfs.exists(fstandin):
938 repo.wvfs.unlink(fstandin)
938 repo.wvfs.unlink(fstandin)
939
939
940 oldstandins = lfutil.getstandinsstate(repo)
940 oldstandins = lfutil.getstandinsstate(repo)
941
941
942 def overridematch(
942 def overridematch(
943 orig,
943 orig,
944 mctx,
944 mctx,
945 pats=(),
945 pats=(),
946 opts=None,
946 opts=None,
947 globbed=False,
947 globbed=False,
948 default=b'relpath',
948 default=b'relpath',
949 badfn=None,
949 badfn=None,
950 ):
950 ):
951 if opts is None:
951 if opts is None:
952 opts = {}
952 opts = {}
953 match = orig(mctx, pats, opts, globbed, default, badfn=badfn)
953 match = orig(mctx, pats, opts, globbed, default, badfn=badfn)
954 m = copy.copy(match)
954 m = copy.copy(match)
955
955
956 # revert supports recursing into subrepos, and though largefiles
956 # revert supports recursing into subrepos, and though largefiles
957 # currently doesn't work correctly in that case, this match is
957 # currently doesn't work correctly in that case, this match is
958 # called, so the lfdirstate above may not be the correct one for
958 # called, so the lfdirstate above may not be the correct one for
959 # this invocation of match.
959 # this invocation of match.
960 lfdirstate = lfutil.openlfdirstate(
960 lfdirstate = lfutil.openlfdirstate(
961 mctx.repo().ui, mctx.repo(), False
961 mctx.repo().ui, mctx.repo(), False
962 )
962 )
963
963
964 wctx = repo[None]
964 wctx = repo[None]
965 matchfiles = []
965 matchfiles = []
966 for f in m._files:
966 for f in m._files:
967 standin = lfutil.standin(f)
967 standin = lfutil.standin(f)
968 if standin in ctx or standin in mctx:
968 if standin in ctx or standin in mctx:
969 matchfiles.append(standin)
969 matchfiles.append(standin)
970 elif standin in wctx or lfdirstate.get_entry(f).removed:
970 elif standin in wctx or lfdirstate.get_entry(f).removed:
971 continue
971 continue
972 else:
972 else:
973 matchfiles.append(f)
973 matchfiles.append(f)
974 m._files = matchfiles
974 m._files = matchfiles
975 m._fileset = set(m._files)
975 m._fileset = set(m._files)
976 origmatchfn = m.matchfn
976 origmatchfn = m.matchfn
977
977
978 def matchfn(f):
978 def matchfn(f):
979 lfile = lfutil.splitstandin(f)
979 lfile = lfutil.splitstandin(f)
980 if lfile is not None:
980 if lfile is not None:
981 return origmatchfn(lfile) and (f in ctx or f in mctx)
981 return origmatchfn(lfile) and (f in ctx or f in mctx)
982 return origmatchfn(f)
982 return origmatchfn(f)
983
983
984 m.matchfn = matchfn
984 m.matchfn = matchfn
985 return m
985 return m
986
986
987 with extensions.wrappedfunction(scmutil, b'match', overridematch):
987 with extensions.wrappedfunction(scmutil, b'match', overridematch):
988 orig(ui, repo, ctx, *pats, **opts)
988 orig(ui, repo, ctx, *pats, **opts)
989
989
990 newstandins = lfutil.getstandinsstate(repo)
990 newstandins = lfutil.getstandinsstate(repo)
991 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
991 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
992 # lfdirstate should be 'normallookup'-ed for updated files,
992 # lfdirstate should be 'normallookup'-ed for updated files,
993 # because reverting doesn't touch dirstate for 'normal' files
993 # because reverting doesn't touch dirstate for 'normal' files
994 # when target revision is explicitly specified: in such case,
994 # when target revision is explicitly specified: in such case,
995 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
995 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
996 # of target (standin) file.
996 # of target (standin) file.
997 lfcommands.updatelfiles(
997 lfcommands.updatelfiles(
998 ui, repo, filelist, printmessage=False, normallookup=True
998 ui, repo, filelist, printmessage=False, normallookup=True
999 )
999 )
1000
1000
1001
1001
1002 # after pulling changesets, we need to take some extra care to get
1002 # after pulling changesets, we need to take some extra care to get
1003 # largefiles updated remotely
1003 # largefiles updated remotely
1004 @eh.wrapcommand(
1004 @eh.wrapcommand(
1005 b'pull',
1005 b'pull',
1006 opts=[
1006 opts=[
1007 (
1007 (
1008 b'',
1008 b'',
1009 b'all-largefiles',
1009 b'all-largefiles',
1010 None,
1010 None,
1011 _(b'download all pulled versions of largefiles (DEPRECATED)'),
1011 _(b'download all pulled versions of largefiles (DEPRECATED)'),
1012 ),
1012 ),
1013 (
1013 (
1014 b'',
1014 b'',
1015 b'lfrev',
1015 b'lfrev',
1016 [],
1016 [],
1017 _(b'download largefiles for these revisions'),
1017 _(b'download largefiles for these revisions'),
1018 _(b'REV'),
1018 _(b'REV'),
1019 ),
1019 ),
1020 ],
1020 ],
1021 )
1021 )
1022 def overridepull(orig, ui, repo, source=None, **opts):
1022 def overridepull(orig, ui, repo, source=None, **opts):
1023 revsprepull = len(repo)
1023 revsprepull = len(repo)
1024 if not source:
1024 if not source:
1025 source = b'default'
1025 source = b'default'
1026 repo.lfpullsource = source
1026 repo.lfpullsource = source
1027 result = orig(ui, repo, source, **opts)
1027 result = orig(ui, repo, source, **opts)
1028 revspostpull = len(repo)
1028 revspostpull = len(repo)
1029 lfrevs = opts.get('lfrev', [])
1029 lfrevs = opts.get('lfrev', [])
1030 if opts.get('all_largefiles'):
1030 if opts.get('all_largefiles'):
1031 lfrevs.append(b'pulled()')
1031 lfrevs.append(b'pulled()')
1032 if lfrevs and revspostpull > revsprepull:
1032 if lfrevs and revspostpull > revsprepull:
1033 numcached = 0
1033 numcached = 0
1034 repo.firstpulled = revsprepull # for pulled() revset expression
1034 repo.firstpulled = revsprepull # for pulled() revset expression
1035 try:
1035 try:
1036 for rev in logcmdutil.revrange(repo, lfrevs):
1036 for rev in logcmdutil.revrange(repo, lfrevs):
1037 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
1037 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
1038 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
1038 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
1039 numcached += len(cached)
1039 numcached += len(cached)
1040 finally:
1040 finally:
1041 del repo.firstpulled
1041 del repo.firstpulled
1042 ui.status(_(b"%d largefiles cached\n") % numcached)
1042 ui.status(_(b"%d largefiles cached\n") % numcached)
1043 return result
1043 return result
1044
1044
1045
1045
1046 @eh.wrapcommand(
1046 @eh.wrapcommand(
1047 b'push',
1047 b'push',
1048 opts=[
1048 opts=[
1049 (
1049 (
1050 b'',
1050 b'',
1051 b'lfrev',
1051 b'lfrev',
1052 [],
1052 [],
1053 _(b'upload largefiles for these revisions'),
1053 _(b'upload largefiles for these revisions'),
1054 _(b'REV'),
1054 _(b'REV'),
1055 )
1055 )
1056 ],
1056 ],
1057 )
1057 )
1058 def overridepush(orig, ui, repo, *args, **kwargs):
1058 def overridepush(orig, ui, repo, *args, **kwargs):
1059 """Override push command and store --lfrev parameters in opargs"""
1059 """Override push command and store --lfrev parameters in opargs"""
1060 lfrevs = kwargs.pop('lfrev', None)
1060 lfrevs = kwargs.pop('lfrev', None)
1061 if lfrevs:
1061 if lfrevs:
1062 opargs = kwargs.setdefault('opargs', {})
1062 opargs = kwargs.setdefault('opargs', {})
1063 opargs[b'lfrevs'] = logcmdutil.revrange(repo, lfrevs)
1063 opargs[b'lfrevs'] = logcmdutil.revrange(repo, lfrevs)
1064 return orig(ui, repo, *args, **kwargs)
1064 return orig(ui, repo, *args, **kwargs)
1065
1065
1066
1066
1067 @eh.wrapfunction(exchange, b'pushoperation')
1067 @eh.wrapfunction(exchange, b'pushoperation')
1068 def exchangepushoperation(orig, *args, **kwargs):
1068 def exchangepushoperation(orig, *args, **kwargs):
1069 """Override pushoperation constructor and store lfrevs parameter"""
1069 """Override pushoperation constructor and store lfrevs parameter"""
1070 lfrevs = kwargs.pop('lfrevs', None)
1070 lfrevs = kwargs.pop('lfrevs', None)
1071 pushop = orig(*args, **kwargs)
1071 pushop = orig(*args, **kwargs)
1072 pushop.lfrevs = lfrevs
1072 pushop.lfrevs = lfrevs
1073 return pushop
1073 return pushop
1074
1074
1075
1075
1076 @eh.revsetpredicate(b'pulled()')
1076 @eh.revsetpredicate(b'pulled()')
1077 def pulledrevsetsymbol(repo, subset, x):
1077 def pulledrevsetsymbol(repo, subset, x):
1078 """Changesets that just has been pulled.
1078 """Changesets that just has been pulled.
1079
1079
1080 Only available with largefiles from pull --lfrev expressions.
1080 Only available with largefiles from pull --lfrev expressions.
1081
1081
1082 .. container:: verbose
1082 .. container:: verbose
1083
1083
1084 Some examples:
1084 Some examples:
1085
1085
1086 - pull largefiles for all new changesets::
1086 - pull largefiles for all new changesets::
1087
1087
1088 hg pull -lfrev "pulled()"
1088 hg pull -lfrev "pulled()"
1089
1089
1090 - pull largefiles for all new branch heads::
1090 - pull largefiles for all new branch heads::
1091
1091
1092 hg pull -lfrev "head(pulled()) and not closed()"
1092 hg pull -lfrev "head(pulled()) and not closed()"
1093
1093
1094 """
1094 """
1095
1095
1096 try:
1096 try:
1097 firstpulled = repo.firstpulled
1097 firstpulled = repo.firstpulled
1098 except AttributeError:
1098 except AttributeError:
1099 raise error.Abort(_(b"pulled() only available in --lfrev"))
1099 raise error.Abort(_(b"pulled() only available in --lfrev"))
1100 return smartset.baseset([r for r in subset if r >= firstpulled])
1100 return smartset.baseset([r for r in subset if r >= firstpulled])
1101
1101
1102
1102
1103 @eh.wrapcommand(
1103 @eh.wrapcommand(
1104 b'clone',
1104 b'clone',
1105 opts=[
1105 opts=[
1106 (
1106 (
1107 b'',
1107 b'',
1108 b'all-largefiles',
1108 b'all-largefiles',
1109 None,
1109 None,
1110 _(b'download all versions of all largefiles'),
1110 _(b'download all versions of all largefiles'),
1111 )
1111 )
1112 ],
1112 ],
1113 )
1113 )
1114 def overrideclone(orig, ui, source, dest=None, **opts):
1114 def overrideclone(orig, ui, source, dest=None, **opts):
1115 d = dest
1115 d = dest
1116 if d is None:
1116 if d is None:
1117 d = hg.defaultdest(source)
1117 d = hg.defaultdest(source)
1118 if opts.get('all_largefiles') and not hg.islocal(d):
1118 if opts.get('all_largefiles') and not hg.islocal(d):
1119 raise error.Abort(
1119 raise error.Abort(
1120 _(b'--all-largefiles is incompatible with non-local destination %s')
1120 _(b'--all-largefiles is incompatible with non-local destination %s')
1121 % d
1121 % d
1122 )
1122 )
1123
1123
1124 return orig(ui, source, dest, **opts)
1124 return orig(ui, source, dest, **opts)
1125
1125
1126
1126
1127 @eh.wrapfunction(hg, b'clone')
1127 @eh.wrapfunction(hg, b'clone')
1128 def hgclone(orig, ui, opts, *args, **kwargs):
1128 def hgclone(orig, ui, opts, *args, **kwargs):
1129 result = orig(ui, opts, *args, **kwargs)
1129 result = orig(ui, opts, *args, **kwargs)
1130
1130
1131 if result is not None:
1131 if result is not None:
1132 sourcerepo, destrepo = result
1132 sourcerepo, destrepo = result
1133 repo = destrepo.local()
1133 repo = destrepo.local()
1134
1134
1135 # When cloning to a remote repo (like through SSH), no repo is available
1135 # When cloning to a remote repo (like through SSH), no repo is available
1136 # from the peer. Therefore the largefiles can't be downloaded and the
1136 # from the peer. Therefore the largefiles can't be downloaded and the
1137 # hgrc can't be updated.
1137 # hgrc can't be updated.
1138 if not repo:
1138 if not repo:
1139 return result
1139 return result
1140
1140
1141 # Caching is implicitly limited to 'rev' option, since the dest repo was
1141 # Caching is implicitly limited to 'rev' option, since the dest repo was
1142 # truncated at that point. The user may expect a download count with
1142 # truncated at that point. The user may expect a download count with
1143 # this option, so attempt whether or not this is a largefile repo.
1143 # this option, so attempt whether or not this is a largefile repo.
1144 if opts.get(b'all_largefiles'):
1144 if opts.get(b'all_largefiles'):
1145 success, missing = lfcommands.downloadlfiles(ui, repo)
1145 success, missing = lfcommands.downloadlfiles(ui, repo)
1146
1146
1147 if missing != 0:
1147 if missing != 0:
1148 return None
1148 return None
1149
1149
1150 return result
1150 return result
1151
1151
1152
1152
1153 @eh.wrapcommand(b'rebase', extension=b'rebase')
1153 @eh.wrapcommand(b'rebase', extension=b'rebase')
1154 def overriderebasecmd(orig, ui, repo, **opts):
1154 def overriderebasecmd(orig, ui, repo, **opts):
1155 if not util.safehasattr(repo, b'_largefilesenabled'):
1155 if not util.safehasattr(repo, b'_largefilesenabled'):
1156 return orig(ui, repo, **opts)
1156 return orig(ui, repo, **opts)
1157
1157
1158 resuming = opts.get('continue')
1158 resuming = opts.get('continue')
1159 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1159 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1160 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1160 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1161 try:
1161 try:
1162 with ui.configoverride(
1162 with ui.configoverride(
1163 {(b'rebase', b'experimental.inmemory'): False}, b"largefiles"
1163 {(b'rebase', b'experimental.inmemory'): False}, b"largefiles"
1164 ):
1164 ):
1165 return orig(ui, repo, **opts)
1165 return orig(ui, repo, **opts)
1166 finally:
1166 finally:
1167 repo._lfstatuswriters.pop()
1167 repo._lfstatuswriters.pop()
1168 repo._lfcommithooks.pop()
1168 repo._lfcommithooks.pop()
1169
1169
1170
1170
1171 @eh.extsetup
1171 @eh.extsetup
1172 def overriderebase(ui):
1172 def overriderebase(ui):
1173 try:
1173 try:
1174 rebase = extensions.find(b'rebase')
1174 rebase = extensions.find(b'rebase')
1175 except KeyError:
1175 except KeyError:
1176 pass
1176 pass
1177 else:
1177 else:
1178
1178
1179 def _dorebase(orig, *args, **kwargs):
1179 def _dorebase(orig, *args, **kwargs):
1180 kwargs['inmemory'] = False
1180 kwargs['inmemory'] = False
1181 return orig(*args, **kwargs)
1181 return orig(*args, **kwargs)
1182
1182
1183 extensions.wrapfunction(rebase, b'_dorebase', _dorebase)
1183 extensions.wrapfunction(rebase, b'_dorebase', _dorebase)
1184
1184
1185
1185
1186 @eh.wrapcommand(b'archive')
1186 @eh.wrapcommand(b'archive')
1187 def overridearchivecmd(orig, ui, repo, dest, **opts):
1187 def overridearchivecmd(orig, ui, repo, dest, **opts):
1188 with lfstatus(repo.unfiltered()):
1188 with lfstatus(repo.unfiltered()):
1189 return orig(ui, repo.unfiltered(), dest, **opts)
1189 return orig(ui, repo.unfiltered(), dest, **opts)
1190
1190
1191
1191
1192 @eh.wrapfunction(webcommands, b'archive')
1192 @eh.wrapfunction(webcommands, b'archive')
1193 def hgwebarchive(orig, web):
1193 def hgwebarchive(orig, web):
1194 with lfstatus(web.repo):
1194 with lfstatus(web.repo):
1195 return orig(web)
1195 return orig(web)
1196
1196
1197
1197
1198 @eh.wrapfunction(archival, b'archive')
1198 @eh.wrapfunction(archival, b'archive')
1199 def overridearchive(
1199 def overridearchive(
1200 orig,
1200 orig,
1201 repo,
1201 repo,
1202 dest,
1202 dest,
1203 node,
1203 node,
1204 kind,
1204 kind,
1205 decode=True,
1205 decode=True,
1206 match=None,
1206 match=None,
1207 prefix=b'',
1207 prefix=b'',
1208 mtime=None,
1208 mtime=None,
1209 subrepos=None,
1209 subrepos=None,
1210 ):
1210 ):
1211 # For some reason setting repo.lfstatus in hgwebarchive only changes the
1211 # For some reason setting repo.lfstatus in hgwebarchive only changes the
1212 # unfiltered repo's attr, so check that as well.
1212 # unfiltered repo's attr, so check that as well.
1213 if not repo.lfstatus and not repo.unfiltered().lfstatus:
1213 if not repo.lfstatus and not repo.unfiltered().lfstatus:
1214 return orig(
1214 return orig(
1215 repo, dest, node, kind, decode, match, prefix, mtime, subrepos
1215 repo, dest, node, kind, decode, match, prefix, mtime, subrepos
1216 )
1216 )
1217
1217
1218 # No need to lock because we are only reading history and
1218 # No need to lock because we are only reading history and
1219 # largefile caches, neither of which are modified.
1219 # largefile caches, neither of which are modified.
1220 if node is not None:
1220 if node is not None:
1221 lfcommands.cachelfiles(repo.ui, repo, node)
1221 lfcommands.cachelfiles(repo.ui, repo, node)
1222
1222
1223 if kind not in archival.archivers:
1223 if kind not in archival.archivers:
1224 raise error.Abort(_(b"unknown archive type '%s'") % kind)
1224 raise error.Abort(_(b"unknown archive type '%s'") % kind)
1225
1225
1226 ctx = repo[node]
1226 ctx = repo[node]
1227
1227
1228 if kind == b'files':
1228 if kind == b'files':
1229 if prefix:
1229 if prefix:
1230 raise error.Abort(_(b'cannot give prefix when archiving to files'))
1230 raise error.Abort(_(b'cannot give prefix when archiving to files'))
1231 else:
1231 else:
1232 prefix = archival.tidyprefix(dest, kind, prefix)
1232 prefix = archival.tidyprefix(dest, kind, prefix)
1233
1233
1234 def write(name, mode, islink, getdata):
1234 def write(name, mode, islink, getdata):
1235 if match and not match(name):
1235 if match and not match(name):
1236 return
1236 return
1237 data = getdata()
1237 data = getdata()
1238 if decode:
1238 if decode:
1239 data = repo.wwritedata(name, data)
1239 data = repo.wwritedata(name, data)
1240 archiver.addfile(prefix + name, mode, islink, data)
1240 archiver.addfile(prefix + name, mode, islink, data)
1241
1241
1242 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
1242 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
1243
1243
1244 if repo.ui.configbool(b"ui", b"archivemeta"):
1244 if repo.ui.configbool(b"ui", b"archivemeta"):
1245 write(
1245 write(
1246 b'.hg_archival.txt',
1246 b'.hg_archival.txt',
1247 0o644,
1247 0o644,
1248 False,
1248 False,
1249 lambda: archival.buildmetadata(ctx),
1249 lambda: archival.buildmetadata(ctx),
1250 )
1250 )
1251
1251
1252 for f in ctx:
1252 for f in ctx:
1253 ff = ctx.flags(f)
1253 ff = ctx.flags(f)
1254 getdata = ctx[f].data
1254 getdata = ctx[f].data
1255 lfile = lfutil.splitstandin(f)
1255 lfile = lfutil.splitstandin(f)
1256 if lfile is not None:
1256 if lfile is not None:
1257 if node is not None:
1257 if node is not None:
1258 path = lfutil.findfile(repo, getdata().strip())
1258 path = lfutil.findfile(repo, getdata().strip())
1259
1259
1260 if path is None:
1260 if path is None:
1261 raise error.Abort(
1261 raise error.Abort(
1262 _(
1262 _(
1263 b'largefile %s not found in repo store or system cache'
1263 b'largefile %s not found in repo store or system cache'
1264 )
1264 )
1265 % lfile
1265 % lfile
1266 )
1266 )
1267 else:
1267 else:
1268 path = lfile
1268 path = lfile
1269
1269
1270 f = lfile
1270 f = lfile
1271
1271
1272 getdata = lambda: util.readfile(path)
1272 getdata = lambda: util.readfile(path)
1273 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1273 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1274
1274
1275 if subrepos:
1275 if subrepos:
1276 for subpath in sorted(ctx.substate):
1276 for subpath in sorted(ctx.substate):
1277 sub = ctx.workingsub(subpath)
1277 sub = ctx.workingsub(subpath)
1278 submatch = matchmod.subdirmatcher(subpath, match)
1278 submatch = matchmod.subdirmatcher(subpath, match)
1279 subprefix = prefix + subpath + b'/'
1279 subprefix = prefix + subpath + b'/'
1280
1280
1281 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1281 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1282 # infer and possibly set lfstatus in hgsubrepoarchive. That would
1282 # infer and possibly set lfstatus in hgsubrepoarchive. That would
1283 # allow only hgsubrepos to set this, instead of the current scheme
1283 # allow only hgsubrepos to set this, instead of the current scheme
1284 # where the parent sets this for the child.
1284 # where the parent sets this for the child.
1285 with (
1285 with (
1286 util.safehasattr(sub, '_repo')
1286 util.safehasattr(sub, '_repo')
1287 and lfstatus(sub._repo)
1287 and lfstatus(sub._repo)
1288 or util.nullcontextmanager()
1288 or util.nullcontextmanager()
1289 ):
1289 ):
1290 sub.archive(archiver, subprefix, submatch)
1290 sub.archive(archiver, subprefix, submatch)
1291
1291
1292 archiver.done()
1292 archiver.done()
1293
1293
1294
1294
1295 @eh.wrapfunction(subrepo.hgsubrepo, b'archive')
1295 @eh.wrapfunction(subrepo.hgsubrepo, b'archive')
1296 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
1296 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
1297 lfenabled = util.safehasattr(repo._repo, b'_largefilesenabled')
1297 lfenabled = util.safehasattr(repo._repo, b'_largefilesenabled')
1298 if not lfenabled or not repo._repo.lfstatus:
1298 if not lfenabled or not repo._repo.lfstatus:
1299 return orig(repo, archiver, prefix, match, decode)
1299 return orig(repo, archiver, prefix, match, decode)
1300
1300
1301 repo._get(repo._state + (b'hg',))
1301 repo._get(repo._state + (b'hg',))
1302 rev = repo._state[1]
1302 rev = repo._state[1]
1303 ctx = repo._repo[rev]
1303 ctx = repo._repo[rev]
1304
1304
1305 if ctx.node() is not None:
1305 if ctx.node() is not None:
1306 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1306 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1307
1307
1308 def write(name, mode, islink, getdata):
1308 def write(name, mode, islink, getdata):
1309 # At this point, the standin has been replaced with the largefile name,
1309 # At this point, the standin has been replaced with the largefile name,
1310 # so the normal matcher works here without the lfutil variants.
1310 # so the normal matcher works here without the lfutil variants.
1311 if match and not match(f):
1311 if match and not match(f):
1312 return
1312 return
1313 data = getdata()
1313 data = getdata()
1314 if decode:
1314 if decode:
1315 data = repo._repo.wwritedata(name, data)
1315 data = repo._repo.wwritedata(name, data)
1316
1316
1317 archiver.addfile(prefix + name, mode, islink, data)
1317 archiver.addfile(prefix + name, mode, islink, data)
1318
1318
1319 for f in ctx:
1319 for f in ctx:
1320 ff = ctx.flags(f)
1320 ff = ctx.flags(f)
1321 getdata = ctx[f].data
1321 getdata = ctx[f].data
1322 lfile = lfutil.splitstandin(f)
1322 lfile = lfutil.splitstandin(f)
1323 if lfile is not None:
1323 if lfile is not None:
1324 if ctx.node() is not None:
1324 if ctx.node() is not None:
1325 path = lfutil.findfile(repo._repo, getdata().strip())
1325 path = lfutil.findfile(repo._repo, getdata().strip())
1326
1326
1327 if path is None:
1327 if path is None:
1328 raise error.Abort(
1328 raise error.Abort(
1329 _(
1329 _(
1330 b'largefile %s not found in repo store or system cache'
1330 b'largefile %s not found in repo store or system cache'
1331 )
1331 )
1332 % lfile
1332 % lfile
1333 )
1333 )
1334 else:
1334 else:
1335 path = lfile
1335 path = lfile
1336
1336
1337 f = lfile
1337 f = lfile
1338
1338
1339 getdata = lambda: util.readfile(os.path.join(prefix, path))
1339 getdata = lambda: util.readfile(os.path.join(prefix, path))
1340
1340
1341 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1341 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1342
1342
1343 for subpath in sorted(ctx.substate):
1343 for subpath in sorted(ctx.substate):
1344 sub = ctx.workingsub(subpath)
1344 sub = ctx.workingsub(subpath)
1345 submatch = matchmod.subdirmatcher(subpath, match)
1345 submatch = matchmod.subdirmatcher(subpath, match)
1346 subprefix = prefix + subpath + b'/'
1346 subprefix = prefix + subpath + b'/'
1347 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1347 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1348 # infer and possibly set lfstatus at the top of this function. That
1348 # infer and possibly set lfstatus at the top of this function. That
1349 # would allow only hgsubrepos to set this, instead of the current scheme
1349 # would allow only hgsubrepos to set this, instead of the current scheme
1350 # where the parent sets this for the child.
1350 # where the parent sets this for the child.
1351 with (
1351 with (
1352 util.safehasattr(sub, '_repo')
1352 util.safehasattr(sub, '_repo')
1353 and lfstatus(sub._repo)
1353 and lfstatus(sub._repo)
1354 or util.nullcontextmanager()
1354 or util.nullcontextmanager()
1355 ):
1355 ):
1356 sub.archive(archiver, subprefix, submatch, decode)
1356 sub.archive(archiver, subprefix, submatch, decode)
1357
1357
1358
1358
1359 # If a largefile is modified, the change is not reflected in its
1359 # If a largefile is modified, the change is not reflected in its
1360 # standin until a commit. cmdutil.bailifchanged() raises an exception
1360 # standin until a commit. cmdutil.bailifchanged() raises an exception
1361 # if the repo has uncommitted changes. Wrap it to also check if
1361 # if the repo has uncommitted changes. Wrap it to also check if
1362 # largefiles were changed. This is used by bisect, backout and fetch.
1362 # largefiles were changed. This is used by bisect, backout and fetch.
1363 @eh.wrapfunction(cmdutil, b'bailifchanged')
1363 @eh.wrapfunction(cmdutil, b'bailifchanged')
1364 def overridebailifchanged(orig, repo, *args, **kwargs):
1364 def overridebailifchanged(orig, repo, *args, **kwargs):
1365 orig(repo, *args, **kwargs)
1365 orig(repo, *args, **kwargs)
1366 with lfstatus(repo):
1366 with lfstatus(repo):
1367 s = repo.status()
1367 s = repo.status()
1368 if s.modified or s.added or s.removed or s.deleted:
1368 if s.modified or s.added or s.removed or s.deleted:
1369 raise error.Abort(_(b'uncommitted changes'))
1369 raise error.Abort(_(b'uncommitted changes'))
1370
1370
1371
1371
1372 @eh.wrapfunction(cmdutil, b'postcommitstatus')
1372 @eh.wrapfunction(cmdutil, b'postcommitstatus')
1373 def postcommitstatus(orig, repo, *args, **kwargs):
1373 def postcommitstatus(orig, repo, *args, **kwargs):
1374 with lfstatus(repo):
1374 with lfstatus(repo):
1375 return orig(repo, *args, **kwargs)
1375 return orig(repo, *args, **kwargs)
1376
1376
1377
1377
1378 @eh.wrapfunction(cmdutil, b'forget')
1378 @eh.wrapfunction(cmdutil, b'forget')
1379 def cmdutilforget(
1379 def cmdutilforget(
1380 orig, ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
1380 orig, ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
1381 ):
1381 ):
1382 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1382 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1383 bad, forgot = orig(
1383 bad, forgot = orig(
1384 ui,
1384 ui,
1385 repo,
1385 repo,
1386 normalmatcher,
1386 normalmatcher,
1387 prefix,
1387 prefix,
1388 uipathfn,
1388 uipathfn,
1389 explicitonly,
1389 explicitonly,
1390 dryrun,
1390 dryrun,
1391 interactive,
1391 interactive,
1392 )
1392 )
1393 m = composelargefilematcher(match, repo[None].manifest())
1393 m = composelargefilematcher(match, repo[None].manifest())
1394
1394
1395 with lfstatus(repo):
1395 with lfstatus(repo):
1396 s = repo.status(match=m, clean=True)
1396 s = repo.status(match=m, clean=True)
1397 manifest = repo[None].manifest()
1397 manifest = repo[None].manifest()
1398 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1398 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1399 forget = [f for f in forget if lfutil.standin(f) in manifest]
1399 forget = [f for f in forget if lfutil.standin(f) in manifest]
1400
1400
1401 for f in forget:
1401 for f in forget:
1402 fstandin = lfutil.standin(f)
1402 fstandin = lfutil.standin(f)
1403 if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin):
1403 if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin):
1404 ui.warn(
1404 ui.warn(
1405 _(b'not removing %s: file is already untracked\n') % uipathfn(f)
1405 _(b'not removing %s: file is already untracked\n') % uipathfn(f)
1406 )
1406 )
1407 bad.append(f)
1407 bad.append(f)
1408
1408
1409 for f in forget:
1409 for f in forget:
1410 if ui.verbose or not m.exact(f):
1410 if ui.verbose or not m.exact(f):
1411 ui.status(_(b'removing %s\n') % uipathfn(f))
1411 ui.status(_(b'removing %s\n') % uipathfn(f))
1412
1412
1413 # Need to lock because standin files are deleted then removed from the
1413 # Need to lock because standin files are deleted then removed from the
1414 # repository and we could race in-between.
1414 # repository and we could race in-between.
1415 with repo.wlock():
1415 with repo.wlock():
1416 lfdirstate = lfutil.openlfdirstate(ui, repo)
1416 lfdirstate = lfutil.openlfdirstate(ui, repo)
1417 for f in forget:
1417 for f in forget:
1418 lfdirstate.set_untracked(f)
1418 lfdirstate.set_untracked(f)
1419 lfdirstate.write(repo.currenttransaction())
1419 lfdirstate.write(repo.currenttransaction())
1420 standins = [lfutil.standin(f) for f in forget]
1420 standins = [lfutil.standin(f) for f in forget]
1421 for f in standins:
1421 for f in standins:
1422 repo.wvfs.unlinkpath(f, ignoremissing=True)
1422 repo.wvfs.unlinkpath(f, ignoremissing=True)
1423 rejected = repo[None].forget(standins)
1423 rejected = repo[None].forget(standins)
1424
1424
1425 bad.extend(f for f in rejected if f in m.files())
1425 bad.extend(f for f in rejected if f in m.files())
1426 forgot.extend(f for f in forget if f not in rejected)
1426 forgot.extend(f for f in forget if f not in rejected)
1427 return bad, forgot
1427 return bad, forgot
1428
1428
1429
1429
1430 def _getoutgoings(repo, other, missing, addfunc):
1430 def _getoutgoings(repo, other, missing, addfunc):
1431 """get pairs of filename and largefile hash in outgoing revisions
1431 """get pairs of filename and largefile hash in outgoing revisions
1432 in 'missing'.
1432 in 'missing'.
1433
1433
1434 largefiles already existing on 'other' repository are ignored.
1434 largefiles already existing on 'other' repository are ignored.
1435
1435
1436 'addfunc' is invoked with each unique pairs of filename and
1436 'addfunc' is invoked with each unique pairs of filename and
1437 largefile hash value.
1437 largefile hash value.
1438 """
1438 """
1439 knowns = set()
1439 knowns = set()
1440 lfhashes = set()
1440 lfhashes = set()
1441
1441
1442 def dedup(fn, lfhash):
1442 def dedup(fn, lfhash):
1443 k = (fn, lfhash)
1443 k = (fn, lfhash)
1444 if k not in knowns:
1444 if k not in knowns:
1445 knowns.add(k)
1445 knowns.add(k)
1446 lfhashes.add(lfhash)
1446 lfhashes.add(lfhash)
1447
1447
1448 lfutil.getlfilestoupload(repo, missing, dedup)
1448 lfutil.getlfilestoupload(repo, missing, dedup)
1449 if lfhashes:
1449 if lfhashes:
1450 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1450 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1451 for fn, lfhash in knowns:
1451 for fn, lfhash in knowns:
1452 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1452 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1453 addfunc(fn, lfhash)
1453 addfunc(fn, lfhash)
1454
1454
1455
1455
1456 def outgoinghook(ui, repo, other, opts, missing):
1456 def outgoinghook(ui, repo, other, opts, missing):
1457 if opts.pop(b'large', None):
1457 if opts.pop(b'large', None):
1458 lfhashes = set()
1458 lfhashes = set()
1459 if ui.debugflag:
1459 if ui.debugflag:
1460 toupload = {}
1460 toupload = {}
1461
1461
1462 def addfunc(fn, lfhash):
1462 def addfunc(fn, lfhash):
1463 if fn not in toupload:
1463 if fn not in toupload:
1464 toupload[fn] = [] # pytype: disable=unsupported-operands
1464 toupload[fn] = [] # pytype: disable=unsupported-operands
1465 toupload[fn].append(lfhash)
1465 toupload[fn].append(lfhash)
1466 lfhashes.add(lfhash)
1466 lfhashes.add(lfhash)
1467
1467
1468 def showhashes(fn):
1468 def showhashes(fn):
1469 for lfhash in sorted(toupload[fn]):
1469 for lfhash in sorted(toupload[fn]):
1470 ui.debug(b' %s\n' % lfhash)
1470 ui.debug(b' %s\n' % lfhash)
1471
1471
1472 else:
1472 else:
1473 toupload = set()
1473 toupload = set()
1474
1474
1475 def addfunc(fn, lfhash):
1475 def addfunc(fn, lfhash):
1476 toupload.add(fn)
1476 toupload.add(fn)
1477 lfhashes.add(lfhash)
1477 lfhashes.add(lfhash)
1478
1478
1479 def showhashes(fn):
1479 def showhashes(fn):
1480 pass
1480 pass
1481
1481
1482 _getoutgoings(repo, other, missing, addfunc)
1482 _getoutgoings(repo, other, missing, addfunc)
1483
1483
1484 if not toupload:
1484 if not toupload:
1485 ui.status(_(b'largefiles: no files to upload\n'))
1485 ui.status(_(b'largefiles: no files to upload\n'))
1486 else:
1486 else:
1487 ui.status(
1487 ui.status(
1488 _(b'largefiles to upload (%d entities):\n') % (len(lfhashes))
1488 _(b'largefiles to upload (%d entities):\n') % (len(lfhashes))
1489 )
1489 )
1490 for file in sorted(toupload):
1490 for file in sorted(toupload):
1491 ui.status(lfutil.splitstandin(file) + b'\n')
1491 ui.status(lfutil.splitstandin(file) + b'\n')
1492 showhashes(file)
1492 showhashes(file)
1493 ui.status(b'\n')
1493 ui.status(b'\n')
1494
1494
1495
1495
1496 @eh.wrapcommand(
1496 @eh.wrapcommand(
1497 b'outgoing', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1497 b'outgoing', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1498 )
1498 )
1499 def _outgoingcmd(orig, *args, **kwargs):
1499 def _outgoingcmd(orig, *args, **kwargs):
1500 # Nothing to do here other than add the extra help option- the hook above
1500 # Nothing to do here other than add the extra help option- the hook above
1501 # processes it.
1501 # processes it.
1502 return orig(*args, **kwargs)
1502 return orig(*args, **kwargs)
1503
1503
1504
1504
1505 def summaryremotehook(ui, repo, opts, changes):
1505 def summaryremotehook(ui, repo, opts, changes):
1506 largeopt = opts.get(b'large', False)
1506 largeopt = opts.get(b'large', False)
1507 if changes is None:
1507 if changes is None:
1508 if largeopt:
1508 if largeopt:
1509 return (False, True) # only outgoing check is needed
1509 return (False, True) # only outgoing check is needed
1510 else:
1510 else:
1511 return (False, False)
1511 return (False, False)
1512 elif largeopt:
1512 elif largeopt:
1513 url, branch, peer, outgoing = changes[1]
1513 url, branch, peer, outgoing = changes[1]
1514 if peer is None:
1514 if peer is None:
1515 # i18n: column positioning for "hg summary"
1515 # i18n: column positioning for "hg summary"
1516 ui.status(_(b'largefiles: (no remote repo)\n'))
1516 ui.status(_(b'largefiles: (no remote repo)\n'))
1517 return
1517 return
1518
1518
1519 toupload = set()
1519 toupload = set()
1520 lfhashes = set()
1520 lfhashes = set()
1521
1521
1522 def addfunc(fn, lfhash):
1522 def addfunc(fn, lfhash):
1523 toupload.add(fn)
1523 toupload.add(fn)
1524 lfhashes.add(lfhash)
1524 lfhashes.add(lfhash)
1525
1525
1526 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1526 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1527
1527
1528 if not toupload:
1528 if not toupload:
1529 # i18n: column positioning for "hg summary"
1529 # i18n: column positioning for "hg summary"
1530 ui.status(_(b'largefiles: (no files to upload)\n'))
1530 ui.status(_(b'largefiles: (no files to upload)\n'))
1531 else:
1531 else:
1532 # i18n: column positioning for "hg summary"
1532 # i18n: column positioning for "hg summary"
1533 ui.status(
1533 ui.status(
1534 _(b'largefiles: %d entities for %d files to upload\n')
1534 _(b'largefiles: %d entities for %d files to upload\n')
1535 % (len(lfhashes), len(toupload))
1535 % (len(lfhashes), len(toupload))
1536 )
1536 )
1537
1537
1538
1538
1539 @eh.wrapcommand(
1539 @eh.wrapcommand(
1540 b'summary', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1540 b'summary', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1541 )
1541 )
1542 def overridesummary(orig, ui, repo, *pats, **opts):
1542 def overridesummary(orig, ui, repo, *pats, **opts):
1543 with lfstatus(repo):
1543 with lfstatus(repo):
1544 orig(ui, repo, *pats, **opts)
1544 orig(ui, repo, *pats, **opts)
1545
1545
1546
1546
1547 @eh.wrapfunction(scmutil, b'addremove')
1547 @eh.wrapfunction(scmutil, b'addremove')
1548 def scmutiladdremove(orig, repo, matcher, prefix, uipathfn, opts=None):
1548 def scmutiladdremove(
1549 orig,
1550 repo,
1551 matcher,
1552 prefix,
1553 uipathfn,
1554 opts=None,
1555 open_tr=None,
1556 ):
1549 if opts is None:
1557 if opts is None:
1550 opts = {}
1558 opts = {}
1551 if not lfutil.islfilesrepo(repo):
1559 if not lfutil.islfilesrepo(repo):
1552 return orig(repo, matcher, prefix, uipathfn, opts)
1560 return orig(repo, matcher, prefix, uipathfn, opts, open_tr=open_tr)
1553 # Get the list of missing largefiles so we can remove them
1561 # Get the list of missing largefiles so we can remove them
1554 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1562 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1555 unsure, s, mtime_boundary = lfdirstate.status(
1563 unsure, s, mtime_boundary = lfdirstate.status(
1556 matchmod.always(),
1564 matchmod.always(),
1557 subrepos=[],
1565 subrepos=[],
1558 ignored=False,
1566 ignored=False,
1559 clean=False,
1567 clean=False,
1560 unknown=False,
1568 unknown=False,
1561 )
1569 )
1562
1570
1571 # open the transaction and changing_files context
1572 if open_tr is not None:
1573 open_tr()
1574
1563 # Call into the normal remove code, but the removing of the standin, we want
1575 # Call into the normal remove code, but the removing of the standin, we want
1564 # to have handled by original addremove. Monkey patching here makes sure
1576 # to have handled by original addremove. Monkey patching here makes sure
1565 # we don't remove the standin in the largefiles code, preventing a very
1577 # we don't remove the standin in the largefiles code, preventing a very
1566 # confused state later.
1578 # confused state later.
1567 if s.deleted:
1579 if s.deleted:
1568 m = copy.copy(matcher)
1580 m = copy.copy(matcher)
1569
1581
1570 # The m._files and m._map attributes are not changed to the deleted list
1582 # The m._files and m._map attributes are not changed to the deleted list
1571 # because that affects the m.exact() test, which in turn governs whether
1583 # because that affects the m.exact() test, which in turn governs whether
1572 # or not the file name is printed, and how. Simply limit the original
1584 # or not the file name is printed, and how. Simply limit the original
1573 # matches to those in the deleted status list.
1585 # matches to those in the deleted status list.
1574 matchfn = m.matchfn
1586 matchfn = m.matchfn
1575 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1587 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1576
1588
1577 removelargefiles(
1589 removelargefiles(
1578 repo.ui,
1590 repo.ui,
1579 repo,
1591 repo,
1580 True,
1592 True,
1581 m,
1593 m,
1582 uipathfn,
1594 uipathfn,
1583 opts.get(b'dry_run'),
1595 opts.get(b'dry_run'),
1584 **pycompat.strkwargs(opts)
1596 **pycompat.strkwargs(opts)
1585 )
1597 )
1586 # Call into the normal add code, and any files that *should* be added as
1598 # Call into the normal add code, and any files that *should* be added as
1587 # largefiles will be
1599 # largefiles will be
1588 added, bad = addlargefiles(
1600 added, bad = addlargefiles(
1589 repo.ui, repo, True, matcher, uipathfn, **pycompat.strkwargs(opts)
1601 repo.ui, repo, True, matcher, uipathfn, **pycompat.strkwargs(opts)
1590 )
1602 )
1591 # Now that we've handled largefiles, hand off to the original addremove
1603 # Now that we've handled largefiles, hand off to the original addremove
1592 # function to take care of the rest. Make sure it doesn't do anything with
1604 # function to take care of the rest. Make sure it doesn't do anything with
1593 # largefiles by passing a matcher that will ignore them.
1605 # largefiles by passing a matcher that will ignore them.
1594 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1606 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1595 return orig(repo, matcher, prefix, uipathfn, opts)
1607
1608 return orig(repo, matcher, prefix, uipathfn, opts, open_tr=open_tr)
1596
1609
1597
1610
1598 # Calling purge with --all will cause the largefiles to be deleted.
1611 # Calling purge with --all will cause the largefiles to be deleted.
1599 # Override repo.status to prevent this from happening.
1612 # Override repo.status to prevent this from happening.
1600 @eh.wrapcommand(b'purge')
1613 @eh.wrapcommand(b'purge')
1601 def overridepurge(orig, ui, repo, *dirs, **opts):
1614 def overridepurge(orig, ui, repo, *dirs, **opts):
1602 # XXX Monkey patching a repoview will not work. The assigned attribute will
1615 # XXX Monkey patching a repoview will not work. The assigned attribute will
1603 # be set on the unfiltered repo, but we will only lookup attributes in the
1616 # be set on the unfiltered repo, but we will only lookup attributes in the
1604 # unfiltered repo if the lookup in the repoview object itself fails. As the
1617 # unfiltered repo if the lookup in the repoview object itself fails. As the
1605 # monkey patched method exists on the repoview class the lookup will not
1618 # monkey patched method exists on the repoview class the lookup will not
1606 # fail. As a result, the original version will shadow the monkey patched
1619 # fail. As a result, the original version will shadow the monkey patched
1607 # one, defeating the monkey patch.
1620 # one, defeating the monkey patch.
1608 #
1621 #
1609 # As a work around we use an unfiltered repo here. We should do something
1622 # As a work around we use an unfiltered repo here. We should do something
1610 # cleaner instead.
1623 # cleaner instead.
1611 repo = repo.unfiltered()
1624 repo = repo.unfiltered()
1612 oldstatus = repo.status
1625 oldstatus = repo.status
1613
1626
1614 def overridestatus(
1627 def overridestatus(
1615 node1=b'.',
1628 node1=b'.',
1616 node2=None,
1629 node2=None,
1617 match=None,
1630 match=None,
1618 ignored=False,
1631 ignored=False,
1619 clean=False,
1632 clean=False,
1620 unknown=False,
1633 unknown=False,
1621 listsubrepos=False,
1634 listsubrepos=False,
1622 ):
1635 ):
1623 r = oldstatus(
1636 r = oldstatus(
1624 node1, node2, match, ignored, clean, unknown, listsubrepos
1637 node1, node2, match, ignored, clean, unknown, listsubrepos
1625 )
1638 )
1626 lfdirstate = lfutil.openlfdirstate(ui, repo)
1639 lfdirstate = lfutil.openlfdirstate(ui, repo)
1627 unknown = [
1640 unknown = [
1628 f for f in r.unknown if not lfdirstate.get_entry(f).any_tracked
1641 f for f in r.unknown if not lfdirstate.get_entry(f).any_tracked
1629 ]
1642 ]
1630 ignored = [
1643 ignored = [
1631 f for f in r.ignored if not lfdirstate.get_entry(f).any_tracked
1644 f for f in r.ignored if not lfdirstate.get_entry(f).any_tracked
1632 ]
1645 ]
1633 return scmutil.status(
1646 return scmutil.status(
1634 r.modified, r.added, r.removed, r.deleted, unknown, ignored, r.clean
1647 r.modified, r.added, r.removed, r.deleted, unknown, ignored, r.clean
1635 )
1648 )
1636
1649
1637 repo.status = overridestatus
1650 repo.status = overridestatus
1638 orig(ui, repo, *dirs, **opts)
1651 orig(ui, repo, *dirs, **opts)
1639 repo.status = oldstatus
1652 repo.status = oldstatus
1640
1653
1641
1654
1642 @eh.wrapcommand(b'rollback')
1655 @eh.wrapcommand(b'rollback')
1643 def overriderollback(orig, ui, repo, **opts):
1656 def overriderollback(orig, ui, repo, **opts):
1644 with repo.wlock():
1657 with repo.wlock():
1645 before = repo.dirstate.parents()
1658 before = repo.dirstate.parents()
1646 orphans = {
1659 orphans = {
1647 f
1660 f
1648 for f in repo.dirstate
1661 for f in repo.dirstate
1649 if lfutil.isstandin(f) and not repo.dirstate.get_entry(f).removed
1662 if lfutil.isstandin(f) and not repo.dirstate.get_entry(f).removed
1650 }
1663 }
1651 result = orig(ui, repo, **opts)
1664 result = orig(ui, repo, **opts)
1652 after = repo.dirstate.parents()
1665 after = repo.dirstate.parents()
1653 if before == after:
1666 if before == after:
1654 return result # no need to restore standins
1667 return result # no need to restore standins
1655
1668
1656 pctx = repo[b'.']
1669 pctx = repo[b'.']
1657 for f in repo.dirstate:
1670 for f in repo.dirstate:
1658 if lfutil.isstandin(f):
1671 if lfutil.isstandin(f):
1659 orphans.discard(f)
1672 orphans.discard(f)
1660 if repo.dirstate.get_entry(f).removed:
1673 if repo.dirstate.get_entry(f).removed:
1661 repo.wvfs.unlinkpath(f, ignoremissing=True)
1674 repo.wvfs.unlinkpath(f, ignoremissing=True)
1662 elif f in pctx:
1675 elif f in pctx:
1663 fctx = pctx[f]
1676 fctx = pctx[f]
1664 repo.wwrite(f, fctx.data(), fctx.flags())
1677 repo.wwrite(f, fctx.data(), fctx.flags())
1665 else:
1678 else:
1666 # content of standin is not so important in 'a',
1679 # content of standin is not so important in 'a',
1667 # 'm' or 'n' (coming from the 2nd parent) cases
1680 # 'm' or 'n' (coming from the 2nd parent) cases
1668 lfutil.writestandin(repo, f, b'', False)
1681 lfutil.writestandin(repo, f, b'', False)
1669 for standin in orphans:
1682 for standin in orphans:
1670 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1683 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1671
1684
1672 return result
1685 return result
1673
1686
1674
1687
1675 @eh.wrapcommand(b'transplant', extension=b'transplant')
1688 @eh.wrapcommand(b'transplant', extension=b'transplant')
1676 def overridetransplant(orig, ui, repo, *revs, **opts):
1689 def overridetransplant(orig, ui, repo, *revs, **opts):
1677 resuming = opts.get('continue')
1690 resuming = opts.get('continue')
1678 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1691 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1679 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1692 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1680 try:
1693 try:
1681 result = orig(ui, repo, *revs, **opts)
1694 result = orig(ui, repo, *revs, **opts)
1682 finally:
1695 finally:
1683 repo._lfstatuswriters.pop()
1696 repo._lfstatuswriters.pop()
1684 repo._lfcommithooks.pop()
1697 repo._lfcommithooks.pop()
1685 return result
1698 return result
1686
1699
1687
1700
1688 @eh.wrapcommand(b'cat')
1701 @eh.wrapcommand(b'cat')
1689 def overridecat(orig, ui, repo, file1, *pats, **opts):
1702 def overridecat(orig, ui, repo, file1, *pats, **opts):
1690 opts = pycompat.byteskwargs(opts)
1703 opts = pycompat.byteskwargs(opts)
1691 ctx = logcmdutil.revsingle(repo, opts.get(b'rev'))
1704 ctx = logcmdutil.revsingle(repo, opts.get(b'rev'))
1692 err = 1
1705 err = 1
1693 notbad = set()
1706 notbad = set()
1694 m = scmutil.match(ctx, (file1,) + pats, opts)
1707 m = scmutil.match(ctx, (file1,) + pats, opts)
1695 origmatchfn = m.matchfn
1708 origmatchfn = m.matchfn
1696
1709
1697 def lfmatchfn(f):
1710 def lfmatchfn(f):
1698 if origmatchfn(f):
1711 if origmatchfn(f):
1699 return True
1712 return True
1700 lf = lfutil.splitstandin(f)
1713 lf = lfutil.splitstandin(f)
1701 if lf is None:
1714 if lf is None:
1702 return False
1715 return False
1703 notbad.add(lf)
1716 notbad.add(lf)
1704 return origmatchfn(lf)
1717 return origmatchfn(lf)
1705
1718
1706 m.matchfn = lfmatchfn
1719 m.matchfn = lfmatchfn
1707 origbadfn = m.bad
1720 origbadfn = m.bad
1708
1721
1709 def lfbadfn(f, msg):
1722 def lfbadfn(f, msg):
1710 if not f in notbad:
1723 if not f in notbad:
1711 origbadfn(f, msg)
1724 origbadfn(f, msg)
1712
1725
1713 m.bad = lfbadfn
1726 m.bad = lfbadfn
1714
1727
1715 origvisitdirfn = m.visitdir
1728 origvisitdirfn = m.visitdir
1716
1729
1717 def lfvisitdirfn(dir):
1730 def lfvisitdirfn(dir):
1718 if dir == lfutil.shortname:
1731 if dir == lfutil.shortname:
1719 return True
1732 return True
1720 ret = origvisitdirfn(dir)
1733 ret = origvisitdirfn(dir)
1721 if ret:
1734 if ret:
1722 return ret
1735 return ret
1723 lf = lfutil.splitstandin(dir)
1736 lf = lfutil.splitstandin(dir)
1724 if lf is None:
1737 if lf is None:
1725 return False
1738 return False
1726 return origvisitdirfn(lf)
1739 return origvisitdirfn(lf)
1727
1740
1728 m.visitdir = lfvisitdirfn
1741 m.visitdir = lfvisitdirfn
1729
1742
1730 for f in ctx.walk(m):
1743 for f in ctx.walk(m):
1731 with cmdutil.makefileobj(ctx, opts.get(b'output'), pathname=f) as fp:
1744 with cmdutil.makefileobj(ctx, opts.get(b'output'), pathname=f) as fp:
1732 lf = lfutil.splitstandin(f)
1745 lf = lfutil.splitstandin(f)
1733 if lf is None or origmatchfn(f):
1746 if lf is None or origmatchfn(f):
1734 # duplicating unreachable code from commands.cat
1747 # duplicating unreachable code from commands.cat
1735 data = ctx[f].data()
1748 data = ctx[f].data()
1736 if opts.get(b'decode'):
1749 if opts.get(b'decode'):
1737 data = repo.wwritedata(f, data)
1750 data = repo.wwritedata(f, data)
1738 fp.write(data)
1751 fp.write(data)
1739 else:
1752 else:
1740 hash = lfutil.readasstandin(ctx[f])
1753 hash = lfutil.readasstandin(ctx[f])
1741 if not lfutil.inusercache(repo.ui, hash):
1754 if not lfutil.inusercache(repo.ui, hash):
1742 store = storefactory.openstore(repo)
1755 store = storefactory.openstore(repo)
1743 success, missing = store.get([(lf, hash)])
1756 success, missing = store.get([(lf, hash)])
1744 if len(success) != 1:
1757 if len(success) != 1:
1745 raise error.Abort(
1758 raise error.Abort(
1746 _(
1759 _(
1747 b'largefile %s is not in cache and could not be '
1760 b'largefile %s is not in cache and could not be '
1748 b'downloaded'
1761 b'downloaded'
1749 )
1762 )
1750 % lf
1763 % lf
1751 )
1764 )
1752 path = lfutil.usercachepath(repo.ui, hash)
1765 path = lfutil.usercachepath(repo.ui, hash)
1753 with open(path, b"rb") as fpin:
1766 with open(path, b"rb") as fpin:
1754 for chunk in util.filechunkiter(fpin):
1767 for chunk in util.filechunkiter(fpin):
1755 fp.write(chunk)
1768 fp.write(chunk)
1756 err = 0
1769 err = 0
1757 return err
1770 return err
1758
1771
1759
1772
1760 @eh.wrapfunction(merge, b'_update')
1773 @eh.wrapfunction(merge, b'_update')
1761 def mergeupdate(orig, repo, node, branchmerge, force, *args, **kwargs):
1774 def mergeupdate(orig, repo, node, branchmerge, force, *args, **kwargs):
1762 matcher = kwargs.get('matcher', None)
1775 matcher = kwargs.get('matcher', None)
1763 # note if this is a partial update
1776 # note if this is a partial update
1764 partial = matcher and not matcher.always()
1777 partial = matcher and not matcher.always()
1765 with repo.wlock():
1778 with repo.wlock():
1766 # branch | | |
1779 # branch | | |
1767 # merge | force | partial | action
1780 # merge | force | partial | action
1768 # -------+-------+---------+--------------
1781 # -------+-------+---------+--------------
1769 # x | x | x | linear-merge
1782 # x | x | x | linear-merge
1770 # o | x | x | branch-merge
1783 # o | x | x | branch-merge
1771 # x | o | x | overwrite (as clean update)
1784 # x | o | x | overwrite (as clean update)
1772 # o | o | x | force-branch-merge (*1)
1785 # o | o | x | force-branch-merge (*1)
1773 # x | x | o | (*)
1786 # x | x | o | (*)
1774 # o | x | o | (*)
1787 # o | x | o | (*)
1775 # x | o | o | overwrite (as revert)
1788 # x | o | o | overwrite (as revert)
1776 # o | o | o | (*)
1789 # o | o | o | (*)
1777 #
1790 #
1778 # (*) don't care
1791 # (*) don't care
1779 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1792 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1780
1793
1781 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1794 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1782 unsure, s, mtime_boundary = lfdirstate.status(
1795 unsure, s, mtime_boundary = lfdirstate.status(
1783 matchmod.always(),
1796 matchmod.always(),
1784 subrepos=[],
1797 subrepos=[],
1785 ignored=False,
1798 ignored=False,
1786 clean=True,
1799 clean=True,
1787 unknown=False,
1800 unknown=False,
1788 )
1801 )
1789 oldclean = set(s.clean)
1802 oldclean = set(s.clean)
1790 pctx = repo[b'.']
1803 pctx = repo[b'.']
1791 dctx = repo[node]
1804 dctx = repo[node]
1792 for lfile in unsure + s.modified:
1805 for lfile in unsure + s.modified:
1793 lfileabs = repo.wvfs.join(lfile)
1806 lfileabs = repo.wvfs.join(lfile)
1794 if not repo.wvfs.exists(lfileabs):
1807 if not repo.wvfs.exists(lfileabs):
1795 continue
1808 continue
1796 lfhash = lfutil.hashfile(lfileabs)
1809 lfhash = lfutil.hashfile(lfileabs)
1797 standin = lfutil.standin(lfile)
1810 standin = lfutil.standin(lfile)
1798 lfutil.writestandin(
1811 lfutil.writestandin(
1799 repo, standin, lfhash, lfutil.getexecutable(lfileabs)
1812 repo, standin, lfhash, lfutil.getexecutable(lfileabs)
1800 )
1813 )
1801 if standin in pctx and lfhash == lfutil.readasstandin(
1814 if standin in pctx and lfhash == lfutil.readasstandin(
1802 pctx[standin]
1815 pctx[standin]
1803 ):
1816 ):
1804 oldclean.add(lfile)
1817 oldclean.add(lfile)
1805 for lfile in s.added:
1818 for lfile in s.added:
1806 fstandin = lfutil.standin(lfile)
1819 fstandin = lfutil.standin(lfile)
1807 if fstandin not in dctx:
1820 if fstandin not in dctx:
1808 # in this case, content of standin file is meaningless
1821 # in this case, content of standin file is meaningless
1809 # (in dctx, lfile is unknown, or normal file)
1822 # (in dctx, lfile is unknown, or normal file)
1810 continue
1823 continue
1811 lfutil.updatestandin(repo, lfile, fstandin)
1824 lfutil.updatestandin(repo, lfile, fstandin)
1812 # mark all clean largefiles as dirty, just in case the update gets
1825 # mark all clean largefiles as dirty, just in case the update gets
1813 # interrupted before largefiles and lfdirstate are synchronized
1826 # interrupted before largefiles and lfdirstate are synchronized
1814 for lfile in oldclean:
1827 for lfile in oldclean:
1815 lfdirstate.set_possibly_dirty(lfile)
1828 lfdirstate.set_possibly_dirty(lfile)
1816 lfdirstate.write(repo.currenttransaction())
1829 lfdirstate.write(repo.currenttransaction())
1817
1830
1818 oldstandins = lfutil.getstandinsstate(repo)
1831 oldstandins = lfutil.getstandinsstate(repo)
1819 wc = kwargs.get('wc')
1832 wc = kwargs.get('wc')
1820 if wc and wc.isinmemory():
1833 if wc and wc.isinmemory():
1821 # largefiles is not a good candidate for in-memory merge (large
1834 # largefiles is not a good candidate for in-memory merge (large
1822 # files, custom dirstate, matcher usage).
1835 # files, custom dirstate, matcher usage).
1823 raise error.ProgrammingError(
1836 raise error.ProgrammingError(
1824 b'largefiles is not compatible with in-memory merge'
1837 b'largefiles is not compatible with in-memory merge'
1825 )
1838 )
1826 with lfdirstate.changing_parents(repo):
1839 with lfdirstate.changing_parents(repo):
1827 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1840 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1828
1841
1829 newstandins = lfutil.getstandinsstate(repo)
1842 newstandins = lfutil.getstandinsstate(repo)
1830 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1843 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1831
1844
1832 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1845 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1833 # all the ones that didn't change as clean
1846 # all the ones that didn't change as clean
1834 for lfile in oldclean.difference(filelist):
1847 for lfile in oldclean.difference(filelist):
1835 lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=True)
1848 lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=True)
1836 lfdirstate.write(repo.currenttransaction())
1849 lfdirstate.write(repo.currenttransaction())
1837
1850
1838 if branchmerge or force or partial:
1851 if branchmerge or force or partial:
1839 filelist.extend(s.deleted + s.removed)
1852 filelist.extend(s.deleted + s.removed)
1840
1853
1841 lfcommands.updatelfiles(
1854 lfcommands.updatelfiles(
1842 repo.ui, repo, filelist=filelist, normallookup=partial
1855 repo.ui, repo, filelist=filelist, normallookup=partial
1843 )
1856 )
1844
1857
1845 return result
1858 return result
1846
1859
1847
1860
1848 @eh.wrapfunction(scmutil, b'marktouched')
1861 @eh.wrapfunction(scmutil, b'marktouched')
1849 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1862 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1850 result = orig(repo, files, *args, **kwargs)
1863 result = orig(repo, files, *args, **kwargs)
1851
1864
1852 filelist = []
1865 filelist = []
1853 for f in files:
1866 for f in files:
1854 lf = lfutil.splitstandin(f)
1867 lf = lfutil.splitstandin(f)
1855 if lf is not None:
1868 if lf is not None:
1856 filelist.append(lf)
1869 filelist.append(lf)
1857 if filelist:
1870 if filelist:
1858 lfcommands.updatelfiles(
1871 lfcommands.updatelfiles(
1859 repo.ui,
1872 repo.ui,
1860 repo,
1873 repo,
1861 filelist=filelist,
1874 filelist=filelist,
1862 printmessage=False,
1875 printmessage=False,
1863 normallookup=True,
1876 normallookup=True,
1864 )
1877 )
1865
1878
1866 return result
1879 return result
1867
1880
1868
1881
1869 @eh.wrapfunction(upgrade_actions, b'preservedrequirements')
1882 @eh.wrapfunction(upgrade_actions, b'preservedrequirements')
1870 @eh.wrapfunction(upgrade_actions, b'supporteddestrequirements')
1883 @eh.wrapfunction(upgrade_actions, b'supporteddestrequirements')
1871 def upgraderequirements(orig, repo):
1884 def upgraderequirements(orig, repo):
1872 reqs = orig(repo)
1885 reqs = orig(repo)
1873 if b'largefiles' in repo.requirements:
1886 if b'largefiles' in repo.requirements:
1874 reqs.add(b'largefiles')
1887 reqs.add(b'largefiles')
1875 return reqs
1888 return reqs
1876
1889
1877
1890
1878 _lfscheme = b'largefile://'
1891 _lfscheme = b'largefile://'
1879
1892
1880
1893
1881 @eh.wrapfunction(urlmod, b'open')
1894 @eh.wrapfunction(urlmod, b'open')
1882 def openlargefile(orig, ui, url_, data=None, **kwargs):
1895 def openlargefile(orig, ui, url_, data=None, **kwargs):
1883 if url_.startswith(_lfscheme):
1896 if url_.startswith(_lfscheme):
1884 if data:
1897 if data:
1885 msg = b"cannot use data on a 'largefile://' url"
1898 msg = b"cannot use data on a 'largefile://' url"
1886 raise error.ProgrammingError(msg)
1899 raise error.ProgrammingError(msg)
1887 lfid = url_[len(_lfscheme) :]
1900 lfid = url_[len(_lfscheme) :]
1888 return storefactory.getlfile(ui, lfid)
1901 return storefactory.getlfile(ui, lfid)
1889 else:
1902 else:
1890 return orig(ui, url_, data=data, **kwargs)
1903 return orig(ui, url_, data=data, **kwargs)
@@ -1,4009 +1,4102 b''
1 # cmdutil.py - help for command processing in mercurial
1 # cmdutil.py - help for command processing in mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import copy as copymod
9 import copy as copymod
10 import errno
10 import errno
11 import os
11 import os
12 import re
12 import re
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 hex,
16 hex,
17 nullrev,
17 nullrev,
18 short,
18 short,
19 )
19 )
20 from .pycompat import (
20 from .pycompat import (
21 getattr,
21 getattr,
22 open,
22 open,
23 setattr,
23 setattr,
24 )
24 )
25 from .thirdparty import attr
25 from .thirdparty import attr
26
26
27 from . import (
27 from . import (
28 bookmarks,
28 bookmarks,
29 changelog,
29 changelog,
30 copies,
30 copies,
31 crecord as crecordmod,
31 crecord as crecordmod,
32 dirstateguard,
33 encoding,
32 encoding,
34 error,
33 error,
35 formatter,
34 formatter,
36 logcmdutil,
35 logcmdutil,
37 match as matchmod,
36 match as matchmod,
38 merge as mergemod,
37 merge as mergemod,
39 mergestate as mergestatemod,
38 mergestate as mergestatemod,
40 mergeutil,
39 mergeutil,
41 obsolete,
40 obsolete,
42 patch,
41 patch,
43 pathutil,
42 pathutil,
44 phases,
43 phases,
45 pycompat,
44 pycompat,
46 repair,
45 repair,
47 revlog,
46 revlog,
48 rewriteutil,
47 rewriteutil,
49 scmutil,
48 scmutil,
50 state as statemod,
49 state as statemod,
51 subrepoutil,
50 subrepoutil,
52 templatekw,
51 templatekw,
53 templater,
52 templater,
54 util,
53 util,
55 vfs as vfsmod,
54 vfs as vfsmod,
56 )
55 )
57
56
58 from .utils import (
57 from .utils import (
59 dateutil,
58 dateutil,
60 stringutil,
59 stringutil,
61 )
60 )
62
61
63 from .revlogutils import (
62 from .revlogutils import (
64 constants as revlog_constants,
63 constants as revlog_constants,
65 )
64 )
66
65
67 if pycompat.TYPE_CHECKING:
66 if pycompat.TYPE_CHECKING:
68 from typing import (
67 from typing import (
69 Any,
68 Any,
70 Dict,
69 Dict,
71 )
70 )
72
71
73 for t in (Any, Dict):
72 for t in (Any, Dict):
74 assert t
73 assert t
75
74
76 stringio = util.stringio
75 stringio = util.stringio
77
76
78 # templates of common command options
77 # templates of common command options
79
78
80 dryrunopts = [
79 dryrunopts = [
81 (b'n', b'dry-run', None, _(b'do not perform actions, just print output')),
80 (b'n', b'dry-run', None, _(b'do not perform actions, just print output')),
82 ]
81 ]
83
82
84 confirmopts = [
83 confirmopts = [
85 (b'', b'confirm', None, _(b'ask before applying actions')),
84 (b'', b'confirm', None, _(b'ask before applying actions')),
86 ]
85 ]
87
86
88 remoteopts = [
87 remoteopts = [
89 (b'e', b'ssh', b'', _(b'specify ssh command to use'), _(b'CMD')),
88 (b'e', b'ssh', b'', _(b'specify ssh command to use'), _(b'CMD')),
90 (
89 (
91 b'',
90 b'',
92 b'remotecmd',
91 b'remotecmd',
93 b'',
92 b'',
94 _(b'specify hg command to run on the remote side'),
93 _(b'specify hg command to run on the remote side'),
95 _(b'CMD'),
94 _(b'CMD'),
96 ),
95 ),
97 (
96 (
98 b'',
97 b'',
99 b'insecure',
98 b'insecure',
100 None,
99 None,
101 _(b'do not verify server certificate (ignoring web.cacerts config)'),
100 _(b'do not verify server certificate (ignoring web.cacerts config)'),
102 ),
101 ),
103 ]
102 ]
104
103
105 walkopts = [
104 walkopts = [
106 (
105 (
107 b'I',
106 b'I',
108 b'include',
107 b'include',
109 [],
108 [],
110 _(b'include names matching the given patterns'),
109 _(b'include names matching the given patterns'),
111 _(b'PATTERN'),
110 _(b'PATTERN'),
112 ),
111 ),
113 (
112 (
114 b'X',
113 b'X',
115 b'exclude',
114 b'exclude',
116 [],
115 [],
117 _(b'exclude names matching the given patterns'),
116 _(b'exclude names matching the given patterns'),
118 _(b'PATTERN'),
117 _(b'PATTERN'),
119 ),
118 ),
120 ]
119 ]
121
120
122 commitopts = [
121 commitopts = [
123 (b'm', b'message', b'', _(b'use text as commit message'), _(b'TEXT')),
122 (b'm', b'message', b'', _(b'use text as commit message'), _(b'TEXT')),
124 (b'l', b'logfile', b'', _(b'read commit message from file'), _(b'FILE')),
123 (b'l', b'logfile', b'', _(b'read commit message from file'), _(b'FILE')),
125 ]
124 ]
126
125
127 commitopts2 = [
126 commitopts2 = [
128 (
127 (
129 b'd',
128 b'd',
130 b'date',
129 b'date',
131 b'',
130 b'',
132 _(b'record the specified date as commit date'),
131 _(b'record the specified date as commit date'),
133 _(b'DATE'),
132 _(b'DATE'),
134 ),
133 ),
135 (
134 (
136 b'u',
135 b'u',
137 b'user',
136 b'user',
138 b'',
137 b'',
139 _(b'record the specified user as committer'),
138 _(b'record the specified user as committer'),
140 _(b'USER'),
139 _(b'USER'),
141 ),
140 ),
142 ]
141 ]
143
142
144 commitopts3 = [
143 commitopts3 = [
145 (b'D', b'currentdate', None, _(b'record the current date as commit date')),
144 (b'D', b'currentdate', None, _(b'record the current date as commit date')),
146 (b'U', b'currentuser', None, _(b'record the current user as committer')),
145 (b'U', b'currentuser', None, _(b'record the current user as committer')),
147 ]
146 ]
148
147
149 formatteropts = [
148 formatteropts = [
150 (b'T', b'template', b'', _(b'display with template'), _(b'TEMPLATE')),
149 (b'T', b'template', b'', _(b'display with template'), _(b'TEMPLATE')),
151 ]
150 ]
152
151
153 templateopts = [
152 templateopts = [
154 (
153 (
155 b'',
154 b'',
156 b'style',
155 b'style',
157 b'',
156 b'',
158 _(b'display using template map file (DEPRECATED)'),
157 _(b'display using template map file (DEPRECATED)'),
159 _(b'STYLE'),
158 _(b'STYLE'),
160 ),
159 ),
161 (b'T', b'template', b'', _(b'display with template'), _(b'TEMPLATE')),
160 (b'T', b'template', b'', _(b'display with template'), _(b'TEMPLATE')),
162 ]
161 ]
163
162
164 logopts = [
163 logopts = [
165 (b'p', b'patch', None, _(b'show patch')),
164 (b'p', b'patch', None, _(b'show patch')),
166 (b'g', b'git', None, _(b'use git extended diff format')),
165 (b'g', b'git', None, _(b'use git extended diff format')),
167 (b'l', b'limit', b'', _(b'limit number of changes displayed'), _(b'NUM')),
166 (b'l', b'limit', b'', _(b'limit number of changes displayed'), _(b'NUM')),
168 (b'M', b'no-merges', None, _(b'do not show merges')),
167 (b'M', b'no-merges', None, _(b'do not show merges')),
169 (b'', b'stat', None, _(b'output diffstat-style summary of changes')),
168 (b'', b'stat', None, _(b'output diffstat-style summary of changes')),
170 (b'G', b'graph', None, _(b"show the revision DAG")),
169 (b'G', b'graph', None, _(b"show the revision DAG")),
171 ] + templateopts
170 ] + templateopts
172
171
173 diffopts = [
172 diffopts = [
174 (b'a', b'text', None, _(b'treat all files as text')),
173 (b'a', b'text', None, _(b'treat all files as text')),
175 (
174 (
176 b'g',
175 b'g',
177 b'git',
176 b'git',
178 None,
177 None,
179 _(b'use git extended diff format (DEFAULT: diff.git)'),
178 _(b'use git extended diff format (DEFAULT: diff.git)'),
180 ),
179 ),
181 (b'', b'binary', None, _(b'generate binary diffs in git mode (default)')),
180 (b'', b'binary', None, _(b'generate binary diffs in git mode (default)')),
182 (b'', b'nodates', None, _(b'omit dates from diff headers')),
181 (b'', b'nodates', None, _(b'omit dates from diff headers')),
183 ]
182 ]
184
183
185 diffwsopts = [
184 diffwsopts = [
186 (
185 (
187 b'w',
186 b'w',
188 b'ignore-all-space',
187 b'ignore-all-space',
189 None,
188 None,
190 _(b'ignore white space when comparing lines'),
189 _(b'ignore white space when comparing lines'),
191 ),
190 ),
192 (
191 (
193 b'b',
192 b'b',
194 b'ignore-space-change',
193 b'ignore-space-change',
195 None,
194 None,
196 _(b'ignore changes in the amount of white space'),
195 _(b'ignore changes in the amount of white space'),
197 ),
196 ),
198 (
197 (
199 b'B',
198 b'B',
200 b'ignore-blank-lines',
199 b'ignore-blank-lines',
201 None,
200 None,
202 _(b'ignore changes whose lines are all blank'),
201 _(b'ignore changes whose lines are all blank'),
203 ),
202 ),
204 (
203 (
205 b'Z',
204 b'Z',
206 b'ignore-space-at-eol',
205 b'ignore-space-at-eol',
207 None,
206 None,
208 _(b'ignore changes in whitespace at EOL'),
207 _(b'ignore changes in whitespace at EOL'),
209 ),
208 ),
210 ]
209 ]
211
210
212 diffopts2 = (
211 diffopts2 = (
213 [
212 [
214 (b'', b'noprefix', None, _(b'omit a/ and b/ prefixes from filenames')),
213 (b'', b'noprefix', None, _(b'omit a/ and b/ prefixes from filenames')),
215 (
214 (
216 b'p',
215 b'p',
217 b'show-function',
216 b'show-function',
218 None,
217 None,
219 _(
218 _(
220 b'show which function each change is in (DEFAULT: diff.showfunc)'
219 b'show which function each change is in (DEFAULT: diff.showfunc)'
221 ),
220 ),
222 ),
221 ),
223 (b'', b'reverse', None, _(b'produce a diff that undoes the changes')),
222 (b'', b'reverse', None, _(b'produce a diff that undoes the changes')),
224 ]
223 ]
225 + diffwsopts
224 + diffwsopts
226 + [
225 + [
227 (
226 (
228 b'U',
227 b'U',
229 b'unified',
228 b'unified',
230 b'',
229 b'',
231 _(b'number of lines of context to show'),
230 _(b'number of lines of context to show'),
232 _(b'NUM'),
231 _(b'NUM'),
233 ),
232 ),
234 (b'', b'stat', None, _(b'output diffstat-style summary of changes')),
233 (b'', b'stat', None, _(b'output diffstat-style summary of changes')),
235 (
234 (
236 b'',
235 b'',
237 b'root',
236 b'root',
238 b'',
237 b'',
239 _(b'produce diffs relative to subdirectory'),
238 _(b'produce diffs relative to subdirectory'),
240 _(b'DIR'),
239 _(b'DIR'),
241 ),
240 ),
242 ]
241 ]
243 )
242 )
244
243
245 mergetoolopts = [
244 mergetoolopts = [
246 (b't', b'tool', b'', _(b'specify merge tool'), _(b'TOOL')),
245 (b't', b'tool', b'', _(b'specify merge tool'), _(b'TOOL')),
247 ]
246 ]
248
247
249 similarityopts = [
248 similarityopts = [
250 (
249 (
251 b's',
250 b's',
252 b'similarity',
251 b'similarity',
253 b'',
252 b'',
254 _(b'guess renamed files by similarity (0<=s<=100)'),
253 _(b'guess renamed files by similarity (0<=s<=100)'),
255 _(b'SIMILARITY'),
254 _(b'SIMILARITY'),
256 )
255 )
257 ]
256 ]
258
257
259 subrepoopts = [(b'S', b'subrepos', None, _(b'recurse into subrepositories'))]
258 subrepoopts = [(b'S', b'subrepos', None, _(b'recurse into subrepositories'))]
260
259
261 debugrevlogopts = [
260 debugrevlogopts = [
262 (b'c', b'changelog', False, _(b'open changelog')),
261 (b'c', b'changelog', False, _(b'open changelog')),
263 (b'm', b'manifest', False, _(b'open manifest')),
262 (b'm', b'manifest', False, _(b'open manifest')),
264 (b'', b'dir', b'', _(b'open directory manifest')),
263 (b'', b'dir', b'', _(b'open directory manifest')),
265 ]
264 ]
266
265
267 # special string such that everything below this line will be ingored in the
266 # special string such that everything below this line will be ingored in the
268 # editor text
267 # editor text
269 _linebelow = b"^HG: ------------------------ >8 ------------------------$"
268 _linebelow = b"^HG: ------------------------ >8 ------------------------$"
270
269
271
270
272 def check_at_most_one_arg(opts, *args):
271 def check_at_most_one_arg(opts, *args):
273 """abort if more than one of the arguments are in opts
272 """abort if more than one of the arguments are in opts
274
273
275 Returns the unique argument or None if none of them were specified.
274 Returns the unique argument or None if none of them were specified.
276 """
275 """
277
276
278 def to_display(name):
277 def to_display(name):
279 return pycompat.sysbytes(name).replace(b'_', b'-')
278 return pycompat.sysbytes(name).replace(b'_', b'-')
280
279
281 previous = None
280 previous = None
282 for x in args:
281 for x in args:
283 if opts.get(x):
282 if opts.get(x):
284 if previous:
283 if previous:
285 raise error.InputError(
284 raise error.InputError(
286 _(b'cannot specify both --%s and --%s')
285 _(b'cannot specify both --%s and --%s')
287 % (to_display(previous), to_display(x))
286 % (to_display(previous), to_display(x))
288 )
287 )
289 previous = x
288 previous = x
290 return previous
289 return previous
291
290
292
291
293 def check_incompatible_arguments(opts, first, others):
292 def check_incompatible_arguments(opts, first, others):
294 """abort if the first argument is given along with any of the others
293 """abort if the first argument is given along with any of the others
295
294
296 Unlike check_at_most_one_arg(), `others` are not mutually exclusive
295 Unlike check_at_most_one_arg(), `others` are not mutually exclusive
297 among themselves, and they're passed as a single collection.
296 among themselves, and they're passed as a single collection.
298 """
297 """
299 for other in others:
298 for other in others:
300 check_at_most_one_arg(opts, first, other)
299 check_at_most_one_arg(opts, first, other)
301
300
302
301
303 def resolve_commit_options(ui, opts):
302 def resolve_commit_options(ui, opts):
304 """modify commit options dict to handle related options
303 """modify commit options dict to handle related options
305
304
306 The return value indicates that ``rewrite.update-timestamp`` is the reason
305 The return value indicates that ``rewrite.update-timestamp`` is the reason
307 the ``date`` option is set.
306 the ``date`` option is set.
308 """
307 """
309 check_at_most_one_arg(opts, 'date', 'currentdate')
308 check_at_most_one_arg(opts, 'date', 'currentdate')
310 check_at_most_one_arg(opts, 'user', 'currentuser')
309 check_at_most_one_arg(opts, 'user', 'currentuser')
311
310
312 datemaydiffer = False # date-only change should be ignored?
311 datemaydiffer = False # date-only change should be ignored?
313
312
314 if opts.get('currentdate'):
313 if opts.get('currentdate'):
315 opts['date'] = b'%d %d' % dateutil.makedate()
314 opts['date'] = b'%d %d' % dateutil.makedate()
316 elif (
315 elif (
317 not opts.get('date')
316 not opts.get('date')
318 and ui.configbool(b'rewrite', b'update-timestamp')
317 and ui.configbool(b'rewrite', b'update-timestamp')
319 and opts.get('currentdate') is None
318 and opts.get('currentdate') is None
320 ):
319 ):
321 opts['date'] = b'%d %d' % dateutil.makedate()
320 opts['date'] = b'%d %d' % dateutil.makedate()
322 datemaydiffer = True
321 datemaydiffer = True
323
322
324 if opts.get('currentuser'):
323 if opts.get('currentuser'):
325 opts['user'] = ui.username()
324 opts['user'] = ui.username()
326
325
327 return datemaydiffer
326 return datemaydiffer
328
327
329
328
330 def check_note_size(opts):
329 def check_note_size(opts):
331 """make sure note is of valid format"""
330 """make sure note is of valid format"""
332
331
333 note = opts.get('note')
332 note = opts.get('note')
334 if not note:
333 if not note:
335 return
334 return
336
335
337 if len(note) > 255:
336 if len(note) > 255:
338 raise error.InputError(_(b"cannot store a note of more than 255 bytes"))
337 raise error.InputError(_(b"cannot store a note of more than 255 bytes"))
339 if b'\n' in note:
338 if b'\n' in note:
340 raise error.InputError(_(b"note cannot contain a newline"))
339 raise error.InputError(_(b"note cannot contain a newline"))
341
340
342
341
343 def ishunk(x):
342 def ishunk(x):
344 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
343 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
345 return isinstance(x, hunkclasses)
344 return isinstance(x, hunkclasses)
346
345
347
346
348 def isheader(x):
347 def isheader(x):
349 headerclasses = (crecordmod.uiheader, patch.header)
348 headerclasses = (crecordmod.uiheader, patch.header)
350 return isinstance(x, headerclasses)
349 return isinstance(x, headerclasses)
351
350
352
351
353 def newandmodified(chunks):
352 def newandmodified(chunks):
354 newlyaddedandmodifiedfiles = set()
353 newlyaddedandmodifiedfiles = set()
355 alsorestore = set()
354 alsorestore = set()
356 for chunk in chunks:
355 for chunk in chunks:
357 if isheader(chunk) and chunk.isnewfile():
356 if isheader(chunk) and chunk.isnewfile():
358 newlyaddedandmodifiedfiles.add(chunk.filename())
357 newlyaddedandmodifiedfiles.add(chunk.filename())
359 alsorestore.update(set(chunk.files()) - {chunk.filename()})
358 alsorestore.update(set(chunk.files()) - {chunk.filename()})
360 return newlyaddedandmodifiedfiles, alsorestore
359 return newlyaddedandmodifiedfiles, alsorestore
361
360
362
361
363 def parsealiases(cmd):
362 def parsealiases(cmd):
364 base_aliases = cmd.split(b"|")
363 base_aliases = cmd.split(b"|")
365 all_aliases = set(base_aliases)
364 all_aliases = set(base_aliases)
366 extra_aliases = []
365 extra_aliases = []
367 for alias in base_aliases:
366 for alias in base_aliases:
368 if b'-' in alias:
367 if b'-' in alias:
369 folded_alias = alias.replace(b'-', b'')
368 folded_alias = alias.replace(b'-', b'')
370 if folded_alias not in all_aliases:
369 if folded_alias not in all_aliases:
371 all_aliases.add(folded_alias)
370 all_aliases.add(folded_alias)
372 extra_aliases.append(folded_alias)
371 extra_aliases.append(folded_alias)
373 base_aliases.extend(extra_aliases)
372 base_aliases.extend(extra_aliases)
374 return base_aliases
373 return base_aliases
375
374
376
375
377 def setupwrapcolorwrite(ui):
376 def setupwrapcolorwrite(ui):
378 # wrap ui.write so diff output can be labeled/colorized
377 # wrap ui.write so diff output can be labeled/colorized
379 def wrapwrite(orig, *args, **kw):
378 def wrapwrite(orig, *args, **kw):
380 label = kw.pop('label', b'')
379 label = kw.pop('label', b'')
381 for chunk, l in patch.difflabel(lambda: args):
380 for chunk, l in patch.difflabel(lambda: args):
382 orig(chunk, label=label + l)
381 orig(chunk, label=label + l)
383
382
384 oldwrite = ui.write
383 oldwrite = ui.write
385
384
386 def wrap(*args, **kwargs):
385 def wrap(*args, **kwargs):
387 return wrapwrite(oldwrite, *args, **kwargs)
386 return wrapwrite(oldwrite, *args, **kwargs)
388
387
389 setattr(ui, 'write', wrap)
388 setattr(ui, 'write', wrap)
390 return oldwrite
389 return oldwrite
391
390
392
391
393 def filterchunks(ui, originalhunks, usecurses, testfile, match, operation=None):
392 def filterchunks(ui, originalhunks, usecurses, testfile, match, operation=None):
394 try:
393 try:
395 if usecurses:
394 if usecurses:
396 if testfile:
395 if testfile:
397 recordfn = crecordmod.testdecorator(
396 recordfn = crecordmod.testdecorator(
398 testfile, crecordmod.testchunkselector
397 testfile, crecordmod.testchunkselector
399 )
398 )
400 else:
399 else:
401 recordfn = crecordmod.chunkselector
400 recordfn = crecordmod.chunkselector
402
401
403 return crecordmod.filterpatch(
402 return crecordmod.filterpatch(
404 ui, originalhunks, recordfn, operation
403 ui, originalhunks, recordfn, operation
405 )
404 )
406 except crecordmod.fallbackerror as e:
405 except crecordmod.fallbackerror as e:
407 ui.warn(b'%s\n' % e)
406 ui.warn(b'%s\n' % e)
408 ui.warn(_(b'falling back to text mode\n'))
407 ui.warn(_(b'falling back to text mode\n'))
409
408
410 return patch.filterpatch(ui, originalhunks, match, operation)
409 return patch.filterpatch(ui, originalhunks, match, operation)
411
410
412
411
413 def recordfilter(ui, originalhunks, match, operation=None):
412 def recordfilter(ui, originalhunks, match, operation=None):
414 """Prompts the user to filter the originalhunks and return a list of
413 """Prompts the user to filter the originalhunks and return a list of
415 selected hunks.
414 selected hunks.
416 *operation* is used for to build ui messages to indicate the user what
415 *operation* is used for to build ui messages to indicate the user what
417 kind of filtering they are doing: reverting, committing, shelving, etc.
416 kind of filtering they are doing: reverting, committing, shelving, etc.
418 (see patch.filterpatch).
417 (see patch.filterpatch).
419 """
418 """
420 usecurses = crecordmod.checkcurses(ui)
419 usecurses = crecordmod.checkcurses(ui)
421 testfile = ui.config(b'experimental', b'crecordtest')
420 testfile = ui.config(b'experimental', b'crecordtest')
422 oldwrite = setupwrapcolorwrite(ui)
421 oldwrite = setupwrapcolorwrite(ui)
423 try:
422 try:
424 newchunks, newopts = filterchunks(
423 newchunks, newopts = filterchunks(
425 ui, originalhunks, usecurses, testfile, match, operation
424 ui, originalhunks, usecurses, testfile, match, operation
426 )
425 )
427 finally:
426 finally:
428 ui.write = oldwrite
427 ui.write = oldwrite
429 return newchunks, newopts
428 return newchunks, newopts
430
429
431
430
432 def dorecord(
431 def dorecord(
433 ui, repo, commitfunc, cmdsuggest, backupall, filterfn, *pats, **opts
432 ui, repo, commitfunc, cmdsuggest, backupall, filterfn, *pats, **opts
434 ):
433 ):
435 opts = pycompat.byteskwargs(opts)
434 opts = pycompat.byteskwargs(opts)
436 if not ui.interactive():
435 if not ui.interactive():
437 if cmdsuggest:
436 if cmdsuggest:
438 msg = _(b'running non-interactively, use %s instead') % cmdsuggest
437 msg = _(b'running non-interactively, use %s instead') % cmdsuggest
439 else:
438 else:
440 msg = _(b'running non-interactively')
439 msg = _(b'running non-interactively')
441 raise error.InputError(msg)
440 raise error.InputError(msg)
442
441
443 # make sure username is set before going interactive
442 # make sure username is set before going interactive
444 if not opts.get(b'user'):
443 if not opts.get(b'user'):
445 ui.username() # raise exception, username not provided
444 ui.username() # raise exception, username not provided
446
445
447 def recordfunc(ui, repo, message, match, opts):
446 def recordfunc(ui, repo, message, match, opts):
448 """This is generic record driver.
447 """This is generic record driver.
449
448
450 Its job is to interactively filter local changes, and
449 Its job is to interactively filter local changes, and
451 accordingly prepare working directory into a state in which the
450 accordingly prepare working directory into a state in which the
452 job can be delegated to a non-interactive commit command such as
451 job can be delegated to a non-interactive commit command such as
453 'commit' or 'qrefresh'.
452 'commit' or 'qrefresh'.
454
453
455 After the actual job is done by non-interactive command, the
454 After the actual job is done by non-interactive command, the
456 working directory is restored to its original state.
455 working directory is restored to its original state.
457
456
458 In the end we'll record interesting changes, and everything else
457 In the end we'll record interesting changes, and everything else
459 will be left in place, so the user can continue working.
458 will be left in place, so the user can continue working.
460 """
459 """
461 if not opts.get(b'interactive-unshelve'):
460 if not opts.get(b'interactive-unshelve'):
462 checkunfinished(repo, commit=True)
461 checkunfinished(repo, commit=True)
463 wctx = repo[None]
462 wctx = repo[None]
464 merge = len(wctx.parents()) > 1
463 merge = len(wctx.parents()) > 1
465 if merge:
464 if merge:
466 raise error.InputError(
465 raise error.InputError(
467 _(
466 _(
468 b'cannot partially commit a merge '
467 b'cannot partially commit a merge '
469 b'(use "hg commit" instead)'
468 b'(use "hg commit" instead)'
470 )
469 )
471 )
470 )
472
471
473 def fail(f, msg):
472 def fail(f, msg):
474 raise error.InputError(b'%s: %s' % (f, msg))
473 raise error.InputError(b'%s: %s' % (f, msg))
475
474
476 force = opts.get(b'force')
475 force = opts.get(b'force')
477 if not force:
476 if not force:
478 match = matchmod.badmatch(match, fail)
477 match = matchmod.badmatch(match, fail)
479
478
480 status = repo.status(match=match)
479 status = repo.status(match=match)
481
480
482 overrides = {(b'ui', b'commitsubrepos'): True}
481 overrides = {(b'ui', b'commitsubrepos'): True}
483
482
484 with repo.ui.configoverride(overrides, b'record'):
483 with repo.ui.configoverride(overrides, b'record'):
485 # subrepoutil.precommit() modifies the status
484 # subrepoutil.precommit() modifies the status
486 tmpstatus = scmutil.status(
485 tmpstatus = scmutil.status(
487 copymod.copy(status.modified),
486 copymod.copy(status.modified),
488 copymod.copy(status.added),
487 copymod.copy(status.added),
489 copymod.copy(status.removed),
488 copymod.copy(status.removed),
490 copymod.copy(status.deleted),
489 copymod.copy(status.deleted),
491 copymod.copy(status.unknown),
490 copymod.copy(status.unknown),
492 copymod.copy(status.ignored),
491 copymod.copy(status.ignored),
493 copymod.copy(status.clean), # pytype: disable=wrong-arg-count
492 copymod.copy(status.clean), # pytype: disable=wrong-arg-count
494 )
493 )
495
494
496 # Force allows -X subrepo to skip the subrepo.
495 # Force allows -X subrepo to skip the subrepo.
497 subs, commitsubs, newstate = subrepoutil.precommit(
496 subs, commitsubs, newstate = subrepoutil.precommit(
498 repo.ui, wctx, tmpstatus, match, force=True
497 repo.ui, wctx, tmpstatus, match, force=True
499 )
498 )
500 for s in subs:
499 for s in subs:
501 if s in commitsubs:
500 if s in commitsubs:
502 dirtyreason = wctx.sub(s).dirtyreason(True)
501 dirtyreason = wctx.sub(s).dirtyreason(True)
503 raise error.Abort(dirtyreason)
502 raise error.Abort(dirtyreason)
504
503
505 if not force:
504 if not force:
506 repo.checkcommitpatterns(wctx, match, status, fail)
505 repo.checkcommitpatterns(wctx, match, status, fail)
507 diffopts = patch.difffeatureopts(
506 diffopts = patch.difffeatureopts(
508 ui,
507 ui,
509 opts=opts,
508 opts=opts,
510 whitespace=True,
509 whitespace=True,
511 section=b'commands',
510 section=b'commands',
512 configprefix=b'commit.interactive.',
511 configprefix=b'commit.interactive.',
513 )
512 )
514 diffopts.nodates = True
513 diffopts.nodates = True
515 diffopts.git = True
514 diffopts.git = True
516 diffopts.showfunc = True
515 diffopts.showfunc = True
517 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
516 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
518 original_headers = patch.parsepatch(originaldiff)
517 original_headers = patch.parsepatch(originaldiff)
519 match = scmutil.match(repo[None], pats)
518 match = scmutil.match(repo[None], pats)
520
519
521 # 1. filter patch, since we are intending to apply subset of it
520 # 1. filter patch, since we are intending to apply subset of it
522 try:
521 try:
523 chunks, newopts = filterfn(ui, original_headers, match)
522 chunks, newopts = filterfn(ui, original_headers, match)
524 except error.PatchParseError as err:
523 except error.PatchParseError as err:
525 raise error.InputError(_(b'error parsing patch: %s') % err)
524 raise error.InputError(_(b'error parsing patch: %s') % err)
526 except error.PatchApplicationError as err:
525 except error.PatchApplicationError as err:
527 raise error.StateError(_(b'error applying patch: %s') % err)
526 raise error.StateError(_(b'error applying patch: %s') % err)
528 opts.update(newopts)
527 opts.update(newopts)
529
528
530 # We need to keep a backup of files that have been newly added and
529 # We need to keep a backup of files that have been newly added and
531 # modified during the recording process because there is a previous
530 # modified during the recording process because there is a previous
532 # version without the edit in the workdir. We also will need to restore
531 # version without the edit in the workdir. We also will need to restore
533 # files that were the sources of renames so that the patch application
532 # files that were the sources of renames so that the patch application
534 # works.
533 # works.
535 newlyaddedandmodifiedfiles, alsorestore = newandmodified(chunks)
534 newlyaddedandmodifiedfiles, alsorestore = newandmodified(chunks)
536 contenders = set()
535 contenders = set()
537 for h in chunks:
536 for h in chunks:
538 if isheader(h):
537 if isheader(h):
539 contenders.update(set(h.files()))
538 contenders.update(set(h.files()))
540
539
541 changed = status.modified + status.added + status.removed
540 changed = status.modified + status.added + status.removed
542 newfiles = [f for f in changed if f in contenders]
541 newfiles = [f for f in changed if f in contenders]
543 if not newfiles:
542 if not newfiles:
544 ui.status(_(b'no changes to record\n'))
543 ui.status(_(b'no changes to record\n'))
545 return 0
544 return 0
546
545
547 modified = set(status.modified)
546 modified = set(status.modified)
548
547
549 # 2. backup changed files, so we can restore them in the end
548 # 2. backup changed files, so we can restore them in the end
550
549
551 if backupall:
550 if backupall:
552 tobackup = changed
551 tobackup = changed
553 else:
552 else:
554 tobackup = [
553 tobackup = [
555 f
554 f
556 for f in newfiles
555 for f in newfiles
557 if f in modified or f in newlyaddedandmodifiedfiles
556 if f in modified or f in newlyaddedandmodifiedfiles
558 ]
557 ]
559 backups = {}
558 backups = {}
560 if tobackup:
559 if tobackup:
561 backupdir = repo.vfs.join(b'record-backups')
560 backupdir = repo.vfs.join(b'record-backups')
562 try:
561 try:
563 os.mkdir(backupdir)
562 os.mkdir(backupdir)
564 except FileExistsError:
563 except FileExistsError:
565 pass
564 pass
566 try:
565 try:
567 # backup continues
566 # backup continues
568 for f in tobackup:
567 for f in tobackup:
569 fd, tmpname = pycompat.mkstemp(
568 fd, tmpname = pycompat.mkstemp(
570 prefix=os.path.basename(f) + b'.', dir=backupdir
569 prefix=os.path.basename(f) + b'.', dir=backupdir
571 )
570 )
572 os.close(fd)
571 os.close(fd)
573 ui.debug(b'backup %r as %r\n' % (f, tmpname))
572 ui.debug(b'backup %r as %r\n' % (f, tmpname))
574 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
573 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
575 backups[f] = tmpname
574 backups[f] = tmpname
576
575
577 fp = stringio()
576 fp = stringio()
578 for c in chunks:
577 for c in chunks:
579 fname = c.filename()
578 fname = c.filename()
580 if fname in backups:
579 if fname in backups:
581 c.write(fp)
580 c.write(fp)
582 dopatch = fp.tell()
581 dopatch = fp.tell()
583 fp.seek(0)
582 fp.seek(0)
584
583
585 # 2.5 optionally review / modify patch in text editor
584 # 2.5 optionally review / modify patch in text editor
586 if opts.get(b'review', False):
585 if opts.get(b'review', False):
587 patchtext = (
586 patchtext = (
588 crecordmod.diffhelptext
587 crecordmod.diffhelptext
589 + crecordmod.patchhelptext
588 + crecordmod.patchhelptext
590 + fp.read()
589 + fp.read()
591 )
590 )
592 reviewedpatch = ui.edit(
591 reviewedpatch = ui.edit(
593 patchtext, b"", action=b"diff", repopath=repo.path
592 patchtext, b"", action=b"diff", repopath=repo.path
594 )
593 )
595 fp.truncate(0)
594 fp.truncate(0)
596 fp.write(reviewedpatch)
595 fp.write(reviewedpatch)
597 fp.seek(0)
596 fp.seek(0)
598
597
599 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
598 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
600 # 3a. apply filtered patch to clean repo (clean)
599 # 3a. apply filtered patch to clean repo (clean)
601 if backups:
600 if backups:
602 m = scmutil.matchfiles(repo, set(backups.keys()) | alsorestore)
601 m = scmutil.matchfiles(repo, set(backups.keys()) | alsorestore)
603 mergemod.revert_to(repo[b'.'], matcher=m)
602 mergemod.revert_to(repo[b'.'], matcher=m)
604
603
605 # 3b. (apply)
604 # 3b. (apply)
606 if dopatch:
605 if dopatch:
607 try:
606 try:
608 ui.debug(b'applying patch\n')
607 ui.debug(b'applying patch\n')
609 ui.debug(fp.getvalue())
608 ui.debug(fp.getvalue())
610 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
609 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
611 except error.PatchParseError as err:
610 except error.PatchParseError as err:
612 raise error.InputError(pycompat.bytestr(err))
611 raise error.InputError(pycompat.bytestr(err))
613 except error.PatchApplicationError as err:
612 except error.PatchApplicationError as err:
614 raise error.StateError(pycompat.bytestr(err))
613 raise error.StateError(pycompat.bytestr(err))
615 del fp
614 del fp
616
615
617 # 4. We prepared working directory according to filtered
616 # 4. We prepared working directory according to filtered
618 # patch. Now is the time to delegate the job to
617 # patch. Now is the time to delegate the job to
619 # commit/qrefresh or the like!
618 # commit/qrefresh or the like!
620
619
621 # Make all of the pathnames absolute.
620 # Make all of the pathnames absolute.
622 newfiles = [repo.wjoin(nf) for nf in newfiles]
621 newfiles = [repo.wjoin(nf) for nf in newfiles]
623 return commitfunc(ui, repo, *newfiles, **pycompat.strkwargs(opts))
622 return commitfunc(ui, repo, *newfiles, **pycompat.strkwargs(opts))
624 finally:
623 finally:
625 # 5. finally restore backed-up files
624 # 5. finally restore backed-up files
626 try:
625 try:
627 dirstate = repo.dirstate
626 dirstate = repo.dirstate
628 for realname, tmpname in backups.items():
627 for realname, tmpname in backups.items():
629 ui.debug(b'restoring %r to %r\n' % (tmpname, realname))
628 ui.debug(b'restoring %r to %r\n' % (tmpname, realname))
630
629
631 if dirstate.get_entry(realname).maybe_clean:
630 if dirstate.get_entry(realname).maybe_clean:
632 # without normallookup, restoring timestamp
631 # without normallookup, restoring timestamp
633 # may cause partially committed files
632 # may cause partially committed files
634 # to be treated as unmodified
633 # to be treated as unmodified
635
634
636 # XXX-PENDINGCHANGE: We should clarify the context in
635 # XXX-PENDINGCHANGE: We should clarify the context in
637 # which this function is called to make sure it
636 # which this function is called to make sure it
638 # already called within a `pendingchange`, However we
637 # already called within a `pendingchange`, However we
639 # are taking a shortcut here in order to be able to
638 # are taking a shortcut here in order to be able to
640 # quickly deprecated the older API.
639 # quickly deprecated the older API.
641 with dirstate.changing_parents(repo):
640 with dirstate.changing_parents(repo):
642 dirstate.update_file(
641 dirstate.update_file(
643 realname,
642 realname,
644 p1_tracked=True,
643 p1_tracked=True,
645 wc_tracked=True,
644 wc_tracked=True,
646 possibly_dirty=True,
645 possibly_dirty=True,
647 )
646 )
648
647
649 # copystat=True here and above are a hack to trick any
648 # copystat=True here and above are a hack to trick any
650 # editors that have f open that we haven't modified them.
649 # editors that have f open that we haven't modified them.
651 #
650 #
652 # Also note that this racy as an editor could notice the
651 # Also note that this racy as an editor could notice the
653 # file's mtime before we've finished writing it.
652 # file's mtime before we've finished writing it.
654 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
653 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
655 os.unlink(tmpname)
654 os.unlink(tmpname)
656 if tobackup:
655 if tobackup:
657 os.rmdir(backupdir)
656 os.rmdir(backupdir)
658 except OSError:
657 except OSError:
659 pass
658 pass
660
659
661 def recordinwlock(ui, repo, message, match, opts):
660 def recordinwlock(ui, repo, message, match, opts):
662 with repo.wlock():
661 with repo.wlock():
663 return recordfunc(ui, repo, message, match, opts)
662 return recordfunc(ui, repo, message, match, opts)
664
663
665 return commit(ui, repo, recordinwlock, pats, opts)
664 return commit(ui, repo, recordinwlock, pats, opts)
666
665
667
666
668 class dirnode:
667 class dirnode:
669 """
668 """
670 Represent a directory in user working copy with information required for
669 Represent a directory in user working copy with information required for
671 the purpose of tersing its status.
670 the purpose of tersing its status.
672
671
673 path is the path to the directory, without a trailing '/'
672 path is the path to the directory, without a trailing '/'
674
673
675 statuses is a set of statuses of all files in this directory (this includes
674 statuses is a set of statuses of all files in this directory (this includes
676 all the files in all the subdirectories too)
675 all the files in all the subdirectories too)
677
676
678 files is a list of files which are direct child of this directory
677 files is a list of files which are direct child of this directory
679
678
680 subdirs is a dictionary of sub-directory name as the key and it's own
679 subdirs is a dictionary of sub-directory name as the key and it's own
681 dirnode object as the value
680 dirnode object as the value
682 """
681 """
683
682
684 def __init__(self, dirpath):
683 def __init__(self, dirpath):
685 self.path = dirpath
684 self.path = dirpath
686 self.statuses = set()
685 self.statuses = set()
687 self.files = []
686 self.files = []
688 self.subdirs = {}
687 self.subdirs = {}
689
688
690 def _addfileindir(self, filename, status):
689 def _addfileindir(self, filename, status):
691 """Add a file in this directory as a direct child."""
690 """Add a file in this directory as a direct child."""
692 self.files.append((filename, status))
691 self.files.append((filename, status))
693
692
694 def addfile(self, filename, status):
693 def addfile(self, filename, status):
695 """
694 """
696 Add a file to this directory or to its direct parent directory.
695 Add a file to this directory or to its direct parent directory.
697
696
698 If the file is not direct child of this directory, we traverse to the
697 If the file is not direct child of this directory, we traverse to the
699 directory of which this file is a direct child of and add the file
698 directory of which this file is a direct child of and add the file
700 there.
699 there.
701 """
700 """
702
701
703 # the filename contains a path separator, it means it's not the direct
702 # the filename contains a path separator, it means it's not the direct
704 # child of this directory
703 # child of this directory
705 if b'/' in filename:
704 if b'/' in filename:
706 subdir, filep = filename.split(b'/', 1)
705 subdir, filep = filename.split(b'/', 1)
707
706
708 # does the dirnode object for subdir exists
707 # does the dirnode object for subdir exists
709 if subdir not in self.subdirs:
708 if subdir not in self.subdirs:
710 subdirpath = pathutil.join(self.path, subdir)
709 subdirpath = pathutil.join(self.path, subdir)
711 self.subdirs[subdir] = dirnode(subdirpath)
710 self.subdirs[subdir] = dirnode(subdirpath)
712
711
713 # try adding the file in subdir
712 # try adding the file in subdir
714 self.subdirs[subdir].addfile(filep, status)
713 self.subdirs[subdir].addfile(filep, status)
715
714
716 else:
715 else:
717 self._addfileindir(filename, status)
716 self._addfileindir(filename, status)
718
717
719 if status not in self.statuses:
718 if status not in self.statuses:
720 self.statuses.add(status)
719 self.statuses.add(status)
721
720
722 def iterfilepaths(self):
721 def iterfilepaths(self):
723 """Yield (status, path) for files directly under this directory."""
722 """Yield (status, path) for files directly under this directory."""
724 for f, st in self.files:
723 for f, st in self.files:
725 yield st, pathutil.join(self.path, f)
724 yield st, pathutil.join(self.path, f)
726
725
727 def tersewalk(self, terseargs):
726 def tersewalk(self, terseargs):
728 """
727 """
729 Yield (status, path) obtained by processing the status of this
728 Yield (status, path) obtained by processing the status of this
730 dirnode.
729 dirnode.
731
730
732 terseargs is the string of arguments passed by the user with `--terse`
731 terseargs is the string of arguments passed by the user with `--terse`
733 flag.
732 flag.
734
733
735 Following are the cases which can happen:
734 Following are the cases which can happen:
736
735
737 1) All the files in the directory (including all the files in its
736 1) All the files in the directory (including all the files in its
738 subdirectories) share the same status and the user has asked us to terse
737 subdirectories) share the same status and the user has asked us to terse
739 that status. -> yield (status, dirpath). dirpath will end in '/'.
738 that status. -> yield (status, dirpath). dirpath will end in '/'.
740
739
741 2) Otherwise, we do following:
740 2) Otherwise, we do following:
742
741
743 a) Yield (status, filepath) for all the files which are in this
742 a) Yield (status, filepath) for all the files which are in this
744 directory (only the ones in this directory, not the subdirs)
743 directory (only the ones in this directory, not the subdirs)
745
744
746 b) Recurse the function on all the subdirectories of this
745 b) Recurse the function on all the subdirectories of this
747 directory
746 directory
748 """
747 """
749
748
750 if len(self.statuses) == 1:
749 if len(self.statuses) == 1:
751 onlyst = self.statuses.pop()
750 onlyst = self.statuses.pop()
752
751
753 # Making sure we terse only when the status abbreviation is
752 # Making sure we terse only when the status abbreviation is
754 # passed as terse argument
753 # passed as terse argument
755 if onlyst in terseargs:
754 if onlyst in terseargs:
756 yield onlyst, self.path + b'/'
755 yield onlyst, self.path + b'/'
757 return
756 return
758
757
759 # add the files to status list
758 # add the files to status list
760 for st, fpath in self.iterfilepaths():
759 for st, fpath in self.iterfilepaths():
761 yield st, fpath
760 yield st, fpath
762
761
763 # recurse on the subdirs
762 # recurse on the subdirs
764 for dirobj in self.subdirs.values():
763 for dirobj in self.subdirs.values():
765 for st, fpath in dirobj.tersewalk(terseargs):
764 for st, fpath in dirobj.tersewalk(terseargs):
766 yield st, fpath
765 yield st, fpath
767
766
768
767
769 def tersedir(statuslist, terseargs):
768 def tersedir(statuslist, terseargs):
770 """
769 """
771 Terse the status if all the files in a directory shares the same status.
770 Terse the status if all the files in a directory shares the same status.
772
771
773 statuslist is scmutil.status() object which contains a list of files for
772 statuslist is scmutil.status() object which contains a list of files for
774 each status.
773 each status.
775 terseargs is string which is passed by the user as the argument to `--terse`
774 terseargs is string which is passed by the user as the argument to `--terse`
776 flag.
775 flag.
777
776
778 The function makes a tree of objects of dirnode class, and at each node it
777 The function makes a tree of objects of dirnode class, and at each node it
779 stores the information required to know whether we can terse a certain
778 stores the information required to know whether we can terse a certain
780 directory or not.
779 directory or not.
781 """
780 """
782 # the order matters here as that is used to produce final list
781 # the order matters here as that is used to produce final list
783 allst = (b'm', b'a', b'r', b'd', b'u', b'i', b'c')
782 allst = (b'm', b'a', b'r', b'd', b'u', b'i', b'c')
784
783
785 # checking the argument validity
784 # checking the argument validity
786 for s in pycompat.bytestr(terseargs):
785 for s in pycompat.bytestr(terseargs):
787 if s not in allst:
786 if s not in allst:
788 raise error.InputError(_(b"'%s' not recognized") % s)
787 raise error.InputError(_(b"'%s' not recognized") % s)
789
788
790 # creating a dirnode object for the root of the repo
789 # creating a dirnode object for the root of the repo
791 rootobj = dirnode(b'')
790 rootobj = dirnode(b'')
792 pstatus = (
791 pstatus = (
793 b'modified',
792 b'modified',
794 b'added',
793 b'added',
795 b'deleted',
794 b'deleted',
796 b'clean',
795 b'clean',
797 b'unknown',
796 b'unknown',
798 b'ignored',
797 b'ignored',
799 b'removed',
798 b'removed',
800 )
799 )
801
800
802 tersedict = {}
801 tersedict = {}
803 for attrname in pstatus:
802 for attrname in pstatus:
804 statuschar = attrname[0:1]
803 statuschar = attrname[0:1]
805 for f in getattr(statuslist, attrname):
804 for f in getattr(statuslist, attrname):
806 rootobj.addfile(f, statuschar)
805 rootobj.addfile(f, statuschar)
807 tersedict[statuschar] = []
806 tersedict[statuschar] = []
808
807
809 # we won't be tersing the root dir, so add files in it
808 # we won't be tersing the root dir, so add files in it
810 for st, fpath in rootobj.iterfilepaths():
809 for st, fpath in rootobj.iterfilepaths():
811 tersedict[st].append(fpath)
810 tersedict[st].append(fpath)
812
811
813 # process each sub-directory and build tersedict
812 # process each sub-directory and build tersedict
814 for subdir in rootobj.subdirs.values():
813 for subdir in rootobj.subdirs.values():
815 for st, f in subdir.tersewalk(terseargs):
814 for st, f in subdir.tersewalk(terseargs):
816 tersedict[st].append(f)
815 tersedict[st].append(f)
817
816
818 tersedlist = []
817 tersedlist = []
819 for st in allst:
818 for st in allst:
820 tersedict[st].sort()
819 tersedict[st].sort()
821 tersedlist.append(tersedict[st])
820 tersedlist.append(tersedict[st])
822
821
823 return scmutil.status(*tersedlist)
822 return scmutil.status(*tersedlist)
824
823
825
824
826 def _commentlines(raw):
825 def _commentlines(raw):
827 '''Surround lineswith a comment char and a new line'''
826 '''Surround lineswith a comment char and a new line'''
828 lines = raw.splitlines()
827 lines = raw.splitlines()
829 commentedlines = [b'# %s' % line for line in lines]
828 commentedlines = [b'# %s' % line for line in lines]
830 return b'\n'.join(commentedlines) + b'\n'
829 return b'\n'.join(commentedlines) + b'\n'
831
830
832
831
833 @attr.s(frozen=True)
832 @attr.s(frozen=True)
834 class morestatus:
833 class morestatus:
835 repo = attr.ib()
834 repo = attr.ib()
836 unfinishedop = attr.ib()
835 unfinishedop = attr.ib()
837 unfinishedmsg = attr.ib()
836 unfinishedmsg = attr.ib()
838 activemerge = attr.ib()
837 activemerge = attr.ib()
839 unresolvedpaths = attr.ib()
838 unresolvedpaths = attr.ib()
840 _formattedpaths = attr.ib(init=False, default=set())
839 _formattedpaths = attr.ib(init=False, default=set())
841 _label = b'status.morestatus'
840 _label = b'status.morestatus'
842
841
843 def formatfile(self, path, fm):
842 def formatfile(self, path, fm):
844 self._formattedpaths.add(path)
843 self._formattedpaths.add(path)
845 if self.activemerge and path in self.unresolvedpaths:
844 if self.activemerge and path in self.unresolvedpaths:
846 fm.data(unresolved=True)
845 fm.data(unresolved=True)
847
846
848 def formatfooter(self, fm):
847 def formatfooter(self, fm):
849 if self.unfinishedop or self.unfinishedmsg:
848 if self.unfinishedop or self.unfinishedmsg:
850 fm.startitem()
849 fm.startitem()
851 fm.data(itemtype=b'morestatus')
850 fm.data(itemtype=b'morestatus')
852
851
853 if self.unfinishedop:
852 if self.unfinishedop:
854 fm.data(unfinished=self.unfinishedop)
853 fm.data(unfinished=self.unfinishedop)
855 statemsg = (
854 statemsg = (
856 _(b'The repository is in an unfinished *%s* state.')
855 _(b'The repository is in an unfinished *%s* state.')
857 % self.unfinishedop
856 % self.unfinishedop
858 )
857 )
859 fm.plain(b'%s\n' % _commentlines(statemsg), label=self._label)
858 fm.plain(b'%s\n' % _commentlines(statemsg), label=self._label)
860 if self.unfinishedmsg:
859 if self.unfinishedmsg:
861 fm.data(unfinishedmsg=self.unfinishedmsg)
860 fm.data(unfinishedmsg=self.unfinishedmsg)
862
861
863 # May also start new data items.
862 # May also start new data items.
864 self._formatconflicts(fm)
863 self._formatconflicts(fm)
865
864
866 if self.unfinishedmsg:
865 if self.unfinishedmsg:
867 fm.plain(
866 fm.plain(
868 b'%s\n' % _commentlines(self.unfinishedmsg), label=self._label
867 b'%s\n' % _commentlines(self.unfinishedmsg), label=self._label
869 )
868 )
870
869
871 def _formatconflicts(self, fm):
870 def _formatconflicts(self, fm):
872 if not self.activemerge:
871 if not self.activemerge:
873 return
872 return
874
873
875 if self.unresolvedpaths:
874 if self.unresolvedpaths:
876 mergeliststr = b'\n'.join(
875 mergeliststr = b'\n'.join(
877 [
876 [
878 b' %s'
877 b' %s'
879 % util.pathto(self.repo.root, encoding.getcwd(), path)
878 % util.pathto(self.repo.root, encoding.getcwd(), path)
880 for path in self.unresolvedpaths
879 for path in self.unresolvedpaths
881 ]
880 ]
882 )
881 )
883 msg = (
882 msg = (
884 _(
883 _(
885 b'''Unresolved merge conflicts:
884 b'''Unresolved merge conflicts:
886
885
887 %s
886 %s
888
887
889 To mark files as resolved: hg resolve --mark FILE'''
888 To mark files as resolved: hg resolve --mark FILE'''
890 )
889 )
891 % mergeliststr
890 % mergeliststr
892 )
891 )
893
892
894 # If any paths with unresolved conflicts were not previously
893 # If any paths with unresolved conflicts were not previously
895 # formatted, output them now.
894 # formatted, output them now.
896 for f in self.unresolvedpaths:
895 for f in self.unresolvedpaths:
897 if f in self._formattedpaths:
896 if f in self._formattedpaths:
898 # Already output.
897 # Already output.
899 continue
898 continue
900 fm.startitem()
899 fm.startitem()
901 fm.context(repo=self.repo)
900 fm.context(repo=self.repo)
902 # We can't claim to know the status of the file - it may just
901 # We can't claim to know the status of the file - it may just
903 # have been in one of the states that were not requested for
902 # have been in one of the states that were not requested for
904 # display, so it could be anything.
903 # display, so it could be anything.
905 fm.data(itemtype=b'file', path=f, unresolved=True)
904 fm.data(itemtype=b'file', path=f, unresolved=True)
906
905
907 else:
906 else:
908 msg = _(b'No unresolved merge conflicts.')
907 msg = _(b'No unresolved merge conflicts.')
909
908
910 fm.plain(b'%s\n' % _commentlines(msg), label=self._label)
909 fm.plain(b'%s\n' % _commentlines(msg), label=self._label)
911
910
912
911
913 def readmorestatus(repo):
912 def readmorestatus(repo):
914 """Returns a morestatus object if the repo has unfinished state."""
913 """Returns a morestatus object if the repo has unfinished state."""
915 statetuple = statemod.getrepostate(repo)
914 statetuple = statemod.getrepostate(repo)
916 mergestate = mergestatemod.mergestate.read(repo)
915 mergestate = mergestatemod.mergestate.read(repo)
917 activemerge = mergestate.active()
916 activemerge = mergestate.active()
918 if not statetuple and not activemerge:
917 if not statetuple and not activemerge:
919 return None
918 return None
920
919
921 unfinishedop = unfinishedmsg = unresolved = None
920 unfinishedop = unfinishedmsg = unresolved = None
922 if statetuple:
921 if statetuple:
923 unfinishedop, unfinishedmsg = statetuple
922 unfinishedop, unfinishedmsg = statetuple
924 if activemerge:
923 if activemerge:
925 unresolved = sorted(mergestate.unresolved())
924 unresolved = sorted(mergestate.unresolved())
926 return morestatus(
925 return morestatus(
927 repo, unfinishedop, unfinishedmsg, activemerge, unresolved
926 repo, unfinishedop, unfinishedmsg, activemerge, unresolved
928 )
927 )
929
928
930
929
931 def findpossible(cmd, table, strict=False):
930 def findpossible(cmd, table, strict=False):
932 """
931 """
933 Return cmd -> (aliases, command table entry)
932 Return cmd -> (aliases, command table entry)
934 for each matching command.
933 for each matching command.
935 Return debug commands (or their aliases) only if no normal command matches.
934 Return debug commands (or their aliases) only if no normal command matches.
936 """
935 """
937 choice = {}
936 choice = {}
938 debugchoice = {}
937 debugchoice = {}
939
938
940 if cmd in table:
939 if cmd in table:
941 # short-circuit exact matches, "log" alias beats "log|history"
940 # short-circuit exact matches, "log" alias beats "log|history"
942 keys = [cmd]
941 keys = [cmd]
943 else:
942 else:
944 keys = table.keys()
943 keys = table.keys()
945
944
946 allcmds = []
945 allcmds = []
947 for e in keys:
946 for e in keys:
948 aliases = parsealiases(e)
947 aliases = parsealiases(e)
949 allcmds.extend(aliases)
948 allcmds.extend(aliases)
950 found = None
949 found = None
951 if cmd in aliases:
950 if cmd in aliases:
952 found = cmd
951 found = cmd
953 elif not strict:
952 elif not strict:
954 for a in aliases:
953 for a in aliases:
955 if a.startswith(cmd):
954 if a.startswith(cmd):
956 found = a
955 found = a
957 break
956 break
958 if found is not None:
957 if found is not None:
959 if aliases[0].startswith(b"debug") or found.startswith(b"debug"):
958 if aliases[0].startswith(b"debug") or found.startswith(b"debug"):
960 debugchoice[found] = (aliases, table[e])
959 debugchoice[found] = (aliases, table[e])
961 else:
960 else:
962 choice[found] = (aliases, table[e])
961 choice[found] = (aliases, table[e])
963
962
964 if not choice and debugchoice:
963 if not choice and debugchoice:
965 choice = debugchoice
964 choice = debugchoice
966
965
967 return choice, allcmds
966 return choice, allcmds
968
967
969
968
970 def findcmd(cmd, table, strict=True):
969 def findcmd(cmd, table, strict=True):
971 """Return (aliases, command table entry) for command string."""
970 """Return (aliases, command table entry) for command string."""
972 choice, allcmds = findpossible(cmd, table, strict)
971 choice, allcmds = findpossible(cmd, table, strict)
973
972
974 if cmd in choice:
973 if cmd in choice:
975 return choice[cmd]
974 return choice[cmd]
976
975
977 if len(choice) > 1:
976 if len(choice) > 1:
978 clist = sorted(choice)
977 clist = sorted(choice)
979 raise error.AmbiguousCommand(cmd, clist)
978 raise error.AmbiguousCommand(cmd, clist)
980
979
981 if choice:
980 if choice:
982 return list(choice.values())[0]
981 return list(choice.values())[0]
983
982
984 raise error.UnknownCommand(cmd, allcmds)
983 raise error.UnknownCommand(cmd, allcmds)
985
984
986
985
987 def changebranch(ui, repo, revs, label, opts):
986 def changebranch(ui, repo, revs, label, opts):
988 """Change the branch name of given revs to label"""
987 """Change the branch name of given revs to label"""
989
988
990 with repo.wlock(), repo.lock(), repo.transaction(b'branches'):
989 with repo.wlock(), repo.lock(), repo.transaction(b'branches'):
991 # abort in case of uncommitted merge or dirty wdir
990 # abort in case of uncommitted merge or dirty wdir
992 bailifchanged(repo)
991 bailifchanged(repo)
993 revs = logcmdutil.revrange(repo, revs)
992 revs = logcmdutil.revrange(repo, revs)
994 if not revs:
993 if not revs:
995 raise error.InputError(b"empty revision set")
994 raise error.InputError(b"empty revision set")
996 roots = repo.revs(b'roots(%ld)', revs)
995 roots = repo.revs(b'roots(%ld)', revs)
997 if len(roots) > 1:
996 if len(roots) > 1:
998 raise error.InputError(
997 raise error.InputError(
999 _(b"cannot change branch of non-linear revisions")
998 _(b"cannot change branch of non-linear revisions")
1000 )
999 )
1001 rewriteutil.precheck(repo, revs, b'change branch of')
1000 rewriteutil.precheck(repo, revs, b'change branch of')
1002
1001
1003 root = repo[roots.first()]
1002 root = repo[roots.first()]
1004 rpb = {parent.branch() for parent in root.parents()}
1003 rpb = {parent.branch() for parent in root.parents()}
1005 if (
1004 if (
1006 not opts.get(b'force')
1005 not opts.get(b'force')
1007 and label not in rpb
1006 and label not in rpb
1008 and label in repo.branchmap()
1007 and label in repo.branchmap()
1009 ):
1008 ):
1010 raise error.InputError(
1009 raise error.InputError(
1011 _(b"a branch of the same name already exists")
1010 _(b"a branch of the same name already exists")
1012 )
1011 )
1013
1012
1014 # make sure only topological heads
1013 # make sure only topological heads
1015 if repo.revs(b'heads(%ld) - head()', revs):
1014 if repo.revs(b'heads(%ld) - head()', revs):
1016 raise error.InputError(
1015 raise error.InputError(
1017 _(b"cannot change branch in middle of a stack")
1016 _(b"cannot change branch in middle of a stack")
1018 )
1017 )
1019
1018
1020 replacements = {}
1019 replacements = {}
1021 # avoid import cycle mercurial.cmdutil -> mercurial.context ->
1020 # avoid import cycle mercurial.cmdutil -> mercurial.context ->
1022 # mercurial.subrepo -> mercurial.cmdutil
1021 # mercurial.subrepo -> mercurial.cmdutil
1023 from . import context
1022 from . import context
1024
1023
1025 for rev in revs:
1024 for rev in revs:
1026 ctx = repo[rev]
1025 ctx = repo[rev]
1027 oldbranch = ctx.branch()
1026 oldbranch = ctx.branch()
1028 # check if ctx has same branch
1027 # check if ctx has same branch
1029 if oldbranch == label:
1028 if oldbranch == label:
1030 continue
1029 continue
1031
1030
1032 def filectxfn(repo, newctx, path):
1031 def filectxfn(repo, newctx, path):
1033 try:
1032 try:
1034 return ctx[path]
1033 return ctx[path]
1035 except error.ManifestLookupError:
1034 except error.ManifestLookupError:
1036 return None
1035 return None
1037
1036
1038 ui.debug(
1037 ui.debug(
1039 b"changing branch of '%s' from '%s' to '%s'\n"
1038 b"changing branch of '%s' from '%s' to '%s'\n"
1040 % (hex(ctx.node()), oldbranch, label)
1039 % (hex(ctx.node()), oldbranch, label)
1041 )
1040 )
1042 extra = ctx.extra()
1041 extra = ctx.extra()
1043 extra[b'branch_change'] = hex(ctx.node())
1042 extra[b'branch_change'] = hex(ctx.node())
1044 # While changing branch of set of linear commits, make sure that
1043 # While changing branch of set of linear commits, make sure that
1045 # we base our commits on new parent rather than old parent which
1044 # we base our commits on new parent rather than old parent which
1046 # was obsoleted while changing the branch
1045 # was obsoleted while changing the branch
1047 p1 = ctx.p1().node()
1046 p1 = ctx.p1().node()
1048 p2 = ctx.p2().node()
1047 p2 = ctx.p2().node()
1049 if p1 in replacements:
1048 if p1 in replacements:
1050 p1 = replacements[p1][0]
1049 p1 = replacements[p1][0]
1051 if p2 in replacements:
1050 if p2 in replacements:
1052 p2 = replacements[p2][0]
1051 p2 = replacements[p2][0]
1053
1052
1054 mc = context.memctx(
1053 mc = context.memctx(
1055 repo,
1054 repo,
1056 (p1, p2),
1055 (p1, p2),
1057 ctx.description(),
1056 ctx.description(),
1058 ctx.files(),
1057 ctx.files(),
1059 filectxfn,
1058 filectxfn,
1060 user=ctx.user(),
1059 user=ctx.user(),
1061 date=ctx.date(),
1060 date=ctx.date(),
1062 extra=extra,
1061 extra=extra,
1063 branch=label,
1062 branch=label,
1064 )
1063 )
1065
1064
1066 newnode = repo.commitctx(mc)
1065 newnode = repo.commitctx(mc)
1067 replacements[ctx.node()] = (newnode,)
1066 replacements[ctx.node()] = (newnode,)
1068 ui.debug(b'new node id is %s\n' % hex(newnode))
1067 ui.debug(b'new node id is %s\n' % hex(newnode))
1069
1068
1070 # create obsmarkers and move bookmarks
1069 # create obsmarkers and move bookmarks
1071 scmutil.cleanupnodes(
1070 scmutil.cleanupnodes(
1072 repo, replacements, b'branch-change', fixphase=True
1071 repo, replacements, b'branch-change', fixphase=True
1073 )
1072 )
1074
1073
1075 # move the working copy too
1074 # move the working copy too
1076 wctx = repo[None]
1075 wctx = repo[None]
1077 # in-progress merge is a bit too complex for now.
1076 # in-progress merge is a bit too complex for now.
1078 if len(wctx.parents()) == 1:
1077 if len(wctx.parents()) == 1:
1079 newid = replacements.get(wctx.p1().node())
1078 newid = replacements.get(wctx.p1().node())
1080 if newid is not None:
1079 if newid is not None:
1081 # avoid import cycle mercurial.cmdutil -> mercurial.hg ->
1080 # avoid import cycle mercurial.cmdutil -> mercurial.hg ->
1082 # mercurial.cmdutil
1081 # mercurial.cmdutil
1083 from . import hg
1082 from . import hg
1084
1083
1085 hg.update(repo, newid[0], quietempty=True)
1084 hg.update(repo, newid[0], quietempty=True)
1086
1085
1087 ui.status(_(b"changed branch on %d changesets\n") % len(replacements))
1086 ui.status(_(b"changed branch on %d changesets\n") % len(replacements))
1088
1087
1089
1088
1090 def findrepo(p):
1089 def findrepo(p):
1091 while not os.path.isdir(os.path.join(p, b".hg")):
1090 while not os.path.isdir(os.path.join(p, b".hg")):
1092 oldp, p = p, os.path.dirname(p)
1091 oldp, p = p, os.path.dirname(p)
1093 if p == oldp:
1092 if p == oldp:
1094 return None
1093 return None
1095
1094
1096 return p
1095 return p
1097
1096
1098
1097
1099 def bailifchanged(repo, merge=True, hint=None):
1098 def bailifchanged(repo, merge=True, hint=None):
1100 """enforce the precondition that working directory must be clean.
1099 """enforce the precondition that working directory must be clean.
1101
1100
1102 'merge' can be set to false if a pending uncommitted merge should be
1101 'merge' can be set to false if a pending uncommitted merge should be
1103 ignored (such as when 'update --check' runs).
1102 ignored (such as when 'update --check' runs).
1104
1103
1105 'hint' is the usual hint given to Abort exception.
1104 'hint' is the usual hint given to Abort exception.
1106 """
1105 """
1107
1106
1108 if merge and repo.dirstate.p2() != repo.nullid:
1107 if merge and repo.dirstate.p2() != repo.nullid:
1109 raise error.StateError(_(b'outstanding uncommitted merge'), hint=hint)
1108 raise error.StateError(_(b'outstanding uncommitted merge'), hint=hint)
1110 st = repo.status()
1109 st = repo.status()
1111 if st.modified or st.added or st.removed or st.deleted:
1110 if st.modified or st.added or st.removed or st.deleted:
1112 raise error.StateError(_(b'uncommitted changes'), hint=hint)
1111 raise error.StateError(_(b'uncommitted changes'), hint=hint)
1113 ctx = repo[None]
1112 ctx = repo[None]
1114 for s in sorted(ctx.substate):
1113 for s in sorted(ctx.substate):
1115 ctx.sub(s).bailifchanged(hint=hint)
1114 ctx.sub(s).bailifchanged(hint=hint)
1116
1115
1117
1116
1118 def logmessage(ui, opts):
1117 def logmessage(ui, opts):
1119 """get the log message according to -m and -l option"""
1118 """get the log message according to -m and -l option"""
1120
1119
1121 check_at_most_one_arg(opts, b'message', b'logfile')
1120 check_at_most_one_arg(opts, b'message', b'logfile')
1122
1121
1123 message = opts.get(b'message')
1122 message = opts.get(b'message')
1124 logfile = opts.get(b'logfile')
1123 logfile = opts.get(b'logfile')
1125
1124
1126 if not message and logfile:
1125 if not message and logfile:
1127 try:
1126 try:
1128 if isstdiofilename(logfile):
1127 if isstdiofilename(logfile):
1129 message = ui.fin.read()
1128 message = ui.fin.read()
1130 else:
1129 else:
1131 message = b'\n'.join(util.readfile(logfile).splitlines())
1130 message = b'\n'.join(util.readfile(logfile).splitlines())
1132 except IOError as inst:
1131 except IOError as inst:
1133 raise error.Abort(
1132 raise error.Abort(
1134 _(b"can't read commit message '%s': %s")
1133 _(b"can't read commit message '%s': %s")
1135 % (logfile, encoding.strtolocal(inst.strerror))
1134 % (logfile, encoding.strtolocal(inst.strerror))
1136 )
1135 )
1137 return message
1136 return message
1138
1137
1139
1138
1140 def mergeeditform(ctxorbool, baseformname):
1139 def mergeeditform(ctxorbool, baseformname):
1141 """return appropriate editform name (referencing a committemplate)
1140 """return appropriate editform name (referencing a committemplate)
1142
1141
1143 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
1142 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
1144 merging is committed.
1143 merging is committed.
1145
1144
1146 This returns baseformname with '.merge' appended if it is a merge,
1145 This returns baseformname with '.merge' appended if it is a merge,
1147 otherwise '.normal' is appended.
1146 otherwise '.normal' is appended.
1148 """
1147 """
1149 if isinstance(ctxorbool, bool):
1148 if isinstance(ctxorbool, bool):
1150 if ctxorbool:
1149 if ctxorbool:
1151 return baseformname + b".merge"
1150 return baseformname + b".merge"
1152 elif len(ctxorbool.parents()) > 1:
1151 elif len(ctxorbool.parents()) > 1:
1153 return baseformname + b".merge"
1152 return baseformname + b".merge"
1154
1153
1155 return baseformname + b".normal"
1154 return baseformname + b".normal"
1156
1155
1157
1156
1158 def getcommiteditor(
1157 def getcommiteditor(
1159 edit=False, finishdesc=None, extramsg=None, editform=b'', **opts
1158 edit=False, finishdesc=None, extramsg=None, editform=b'', **opts
1160 ):
1159 ):
1161 """get appropriate commit message editor according to '--edit' option
1160 """get appropriate commit message editor according to '--edit' option
1162
1161
1163 'finishdesc' is a function to be called with edited commit message
1162 'finishdesc' is a function to be called with edited commit message
1164 (= 'description' of the new changeset) just after editing, but
1163 (= 'description' of the new changeset) just after editing, but
1165 before checking empty-ness. It should return actual text to be
1164 before checking empty-ness. It should return actual text to be
1166 stored into history. This allows to change description before
1165 stored into history. This allows to change description before
1167 storing.
1166 storing.
1168
1167
1169 'extramsg' is a extra message to be shown in the editor instead of
1168 'extramsg' is a extra message to be shown in the editor instead of
1170 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
1169 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
1171 is automatically added.
1170 is automatically added.
1172
1171
1173 'editform' is a dot-separated list of names, to distinguish
1172 'editform' is a dot-separated list of names, to distinguish
1174 the purpose of commit text editing.
1173 the purpose of commit text editing.
1175
1174
1176 'getcommiteditor' returns 'commitforceeditor' regardless of
1175 'getcommiteditor' returns 'commitforceeditor' regardless of
1177 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
1176 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
1178 they are specific for usage in MQ.
1177 they are specific for usage in MQ.
1179 """
1178 """
1180 if edit or finishdesc or extramsg:
1179 if edit or finishdesc or extramsg:
1181 return lambda r, c, s: commitforceeditor(
1180 return lambda r, c, s: commitforceeditor(
1182 r, c, s, finishdesc=finishdesc, extramsg=extramsg, editform=editform
1181 r, c, s, finishdesc=finishdesc, extramsg=extramsg, editform=editform
1183 )
1182 )
1184 elif editform:
1183 elif editform:
1185 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
1184 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
1186 else:
1185 else:
1187 return commiteditor
1186 return commiteditor
1188
1187
1189
1188
1190 def _escapecommandtemplate(tmpl):
1189 def _escapecommandtemplate(tmpl):
1191 parts = []
1190 parts = []
1192 for typ, start, end in templater.scantemplate(tmpl, raw=True):
1191 for typ, start, end in templater.scantemplate(tmpl, raw=True):
1193 if typ == b'string':
1192 if typ == b'string':
1194 parts.append(stringutil.escapestr(tmpl[start:end]))
1193 parts.append(stringutil.escapestr(tmpl[start:end]))
1195 else:
1194 else:
1196 parts.append(tmpl[start:end])
1195 parts.append(tmpl[start:end])
1197 return b''.join(parts)
1196 return b''.join(parts)
1198
1197
1199
1198
1200 def rendercommandtemplate(ui, tmpl, props):
1199 def rendercommandtemplate(ui, tmpl, props):
1201 r"""Expand a literal template 'tmpl' in a way suitable for command line
1200 r"""Expand a literal template 'tmpl' in a way suitable for command line
1202
1201
1203 '\' in outermost string is not taken as an escape character because it
1202 '\' in outermost string is not taken as an escape character because it
1204 is a directory separator on Windows.
1203 is a directory separator on Windows.
1205
1204
1206 >>> from . import ui as uimod
1205 >>> from . import ui as uimod
1207 >>> ui = uimod.ui()
1206 >>> ui = uimod.ui()
1208 >>> rendercommandtemplate(ui, b'c:\\{path}', {b'path': b'foo'})
1207 >>> rendercommandtemplate(ui, b'c:\\{path}', {b'path': b'foo'})
1209 'c:\\foo'
1208 'c:\\foo'
1210 >>> rendercommandtemplate(ui, b'{"c:\\{path}"}', {'path': b'foo'})
1209 >>> rendercommandtemplate(ui, b'{"c:\\{path}"}', {'path': b'foo'})
1211 'c:{path}'
1210 'c:{path}'
1212 """
1211 """
1213 if not tmpl:
1212 if not tmpl:
1214 return tmpl
1213 return tmpl
1215 t = formatter.maketemplater(ui, _escapecommandtemplate(tmpl))
1214 t = formatter.maketemplater(ui, _escapecommandtemplate(tmpl))
1216 return t.renderdefault(props)
1215 return t.renderdefault(props)
1217
1216
1218
1217
1219 def rendertemplate(ctx, tmpl, props=None):
1218 def rendertemplate(ctx, tmpl, props=None):
1220 """Expand a literal template 'tmpl' byte-string against one changeset
1219 """Expand a literal template 'tmpl' byte-string against one changeset
1221
1220
1222 Each props item must be a stringify-able value or a callable returning
1221 Each props item must be a stringify-able value or a callable returning
1223 such value, i.e. no bare list nor dict should be passed.
1222 such value, i.e. no bare list nor dict should be passed.
1224 """
1223 """
1225 repo = ctx.repo()
1224 repo = ctx.repo()
1226 tres = formatter.templateresources(repo.ui, repo)
1225 tres = formatter.templateresources(repo.ui, repo)
1227 t = formatter.maketemplater(
1226 t = formatter.maketemplater(
1228 repo.ui, tmpl, defaults=templatekw.keywords, resources=tres
1227 repo.ui, tmpl, defaults=templatekw.keywords, resources=tres
1229 )
1228 )
1230 mapping = {b'ctx': ctx}
1229 mapping = {b'ctx': ctx}
1231 if props:
1230 if props:
1232 mapping.update(props)
1231 mapping.update(props)
1233 return t.renderdefault(mapping)
1232 return t.renderdefault(mapping)
1234
1233
1235
1234
1236 def format_changeset_summary(ui, ctx, command=None, default_spec=None):
1235 def format_changeset_summary(ui, ctx, command=None, default_spec=None):
1237 """Format a changeset summary (one line)."""
1236 """Format a changeset summary (one line)."""
1238 spec = None
1237 spec = None
1239 if command:
1238 if command:
1240 spec = ui.config(
1239 spec = ui.config(
1241 b'command-templates', b'oneline-summary.%s' % command, None
1240 b'command-templates', b'oneline-summary.%s' % command, None
1242 )
1241 )
1243 if not spec:
1242 if not spec:
1244 spec = ui.config(b'command-templates', b'oneline-summary')
1243 spec = ui.config(b'command-templates', b'oneline-summary')
1245 if not spec:
1244 if not spec:
1246 spec = default_spec
1245 spec = default_spec
1247 if not spec:
1246 if not spec:
1248 spec = (
1247 spec = (
1249 b'{separate(" ", '
1248 b'{separate(" ", '
1250 b'label("oneline-summary.changeset", "{rev}:{node|short}")'
1249 b'label("oneline-summary.changeset", "{rev}:{node|short}")'
1251 b', '
1250 b', '
1252 b'join(filter(namespaces % "{ifeq(namespace, "branches", "", join(names % "{label("oneline-summary.{namespace}", name)}", " "))}"), " ")'
1251 b'join(filter(namespaces % "{ifeq(namespace, "branches", "", join(names % "{label("oneline-summary.{namespace}", name)}", " "))}"), " ")'
1253 b')} '
1252 b')} '
1254 b'"{label("oneline-summary.desc", desc|firstline)}"'
1253 b'"{label("oneline-summary.desc", desc|firstline)}"'
1255 )
1254 )
1256 text = rendertemplate(ctx, spec)
1255 text = rendertemplate(ctx, spec)
1257 return text.split(b'\n')[0]
1256 return text.split(b'\n')[0]
1258
1257
1259
1258
1260 def _buildfntemplate(pat, total=None, seqno=None, revwidth=None, pathname=None):
1259 def _buildfntemplate(pat, total=None, seqno=None, revwidth=None, pathname=None):
1261 r"""Convert old-style filename format string to template string
1260 r"""Convert old-style filename format string to template string
1262
1261
1263 >>> _buildfntemplate(b'foo-%b-%n.patch', seqno=0)
1262 >>> _buildfntemplate(b'foo-%b-%n.patch', seqno=0)
1264 'foo-{reporoot|basename}-{seqno}.patch'
1263 'foo-{reporoot|basename}-{seqno}.patch'
1265 >>> _buildfntemplate(b'%R{tags % "{tag}"}%H')
1264 >>> _buildfntemplate(b'%R{tags % "{tag}"}%H')
1266 '{rev}{tags % "{tag}"}{node}'
1265 '{rev}{tags % "{tag}"}{node}'
1267
1266
1268 '\' in outermost strings has to be escaped because it is a directory
1267 '\' in outermost strings has to be escaped because it is a directory
1269 separator on Windows:
1268 separator on Windows:
1270
1269
1271 >>> _buildfntemplate(b'c:\\tmp\\%R\\%n.patch', seqno=0)
1270 >>> _buildfntemplate(b'c:\\tmp\\%R\\%n.patch', seqno=0)
1272 'c:\\\\tmp\\\\{rev}\\\\{seqno}.patch'
1271 'c:\\\\tmp\\\\{rev}\\\\{seqno}.patch'
1273 >>> _buildfntemplate(b'\\\\foo\\bar.patch')
1272 >>> _buildfntemplate(b'\\\\foo\\bar.patch')
1274 '\\\\\\\\foo\\\\bar.patch'
1273 '\\\\\\\\foo\\\\bar.patch'
1275 >>> _buildfntemplate(b'\\{tags % "{tag}"}')
1274 >>> _buildfntemplate(b'\\{tags % "{tag}"}')
1276 '\\\\{tags % "{tag}"}'
1275 '\\\\{tags % "{tag}"}'
1277
1276
1278 but inner strings follow the template rules (i.e. '\' is taken as an
1277 but inner strings follow the template rules (i.e. '\' is taken as an
1279 escape character):
1278 escape character):
1280
1279
1281 >>> _buildfntemplate(br'{"c:\tmp"}', seqno=0)
1280 >>> _buildfntemplate(br'{"c:\tmp"}', seqno=0)
1282 '{"c:\\tmp"}'
1281 '{"c:\\tmp"}'
1283 """
1282 """
1284 expander = {
1283 expander = {
1285 b'H': b'{node}',
1284 b'H': b'{node}',
1286 b'R': b'{rev}',
1285 b'R': b'{rev}',
1287 b'h': b'{node|short}',
1286 b'h': b'{node|short}',
1288 b'm': br'{sub(r"[^\w]", "_", desc|firstline)}',
1287 b'm': br'{sub(r"[^\w]", "_", desc|firstline)}',
1289 b'r': b'{if(revwidth, pad(rev, revwidth, "0", left=True), rev)}',
1288 b'r': b'{if(revwidth, pad(rev, revwidth, "0", left=True), rev)}',
1290 b'%': b'%',
1289 b'%': b'%',
1291 b'b': b'{reporoot|basename}',
1290 b'b': b'{reporoot|basename}',
1292 }
1291 }
1293 if total is not None:
1292 if total is not None:
1294 expander[b'N'] = b'{total}'
1293 expander[b'N'] = b'{total}'
1295 if seqno is not None:
1294 if seqno is not None:
1296 expander[b'n'] = b'{seqno}'
1295 expander[b'n'] = b'{seqno}'
1297 if total is not None and seqno is not None:
1296 if total is not None and seqno is not None:
1298 expander[b'n'] = b'{pad(seqno, total|stringify|count, "0", left=True)}'
1297 expander[b'n'] = b'{pad(seqno, total|stringify|count, "0", left=True)}'
1299 if pathname is not None:
1298 if pathname is not None:
1300 expander[b's'] = b'{pathname|basename}'
1299 expander[b's'] = b'{pathname|basename}'
1301 expander[b'd'] = b'{if(pathname|dirname, pathname|dirname, ".")}'
1300 expander[b'd'] = b'{if(pathname|dirname, pathname|dirname, ".")}'
1302 expander[b'p'] = b'{pathname}'
1301 expander[b'p'] = b'{pathname}'
1303
1302
1304 newname = []
1303 newname = []
1305 for typ, start, end in templater.scantemplate(pat, raw=True):
1304 for typ, start, end in templater.scantemplate(pat, raw=True):
1306 if typ != b'string':
1305 if typ != b'string':
1307 newname.append(pat[start:end])
1306 newname.append(pat[start:end])
1308 continue
1307 continue
1309 i = start
1308 i = start
1310 while i < end:
1309 while i < end:
1311 n = pat.find(b'%', i, end)
1310 n = pat.find(b'%', i, end)
1312 if n < 0:
1311 if n < 0:
1313 newname.append(stringutil.escapestr(pat[i:end]))
1312 newname.append(stringutil.escapestr(pat[i:end]))
1314 break
1313 break
1315 newname.append(stringutil.escapestr(pat[i:n]))
1314 newname.append(stringutil.escapestr(pat[i:n]))
1316 if n + 2 > end:
1315 if n + 2 > end:
1317 raise error.Abort(
1316 raise error.Abort(
1318 _(b"incomplete format spec in output filename")
1317 _(b"incomplete format spec in output filename")
1319 )
1318 )
1320 c = pat[n + 1 : n + 2]
1319 c = pat[n + 1 : n + 2]
1321 i = n + 2
1320 i = n + 2
1322 try:
1321 try:
1323 newname.append(expander[c])
1322 newname.append(expander[c])
1324 except KeyError:
1323 except KeyError:
1325 raise error.Abort(
1324 raise error.Abort(
1326 _(b"invalid format spec '%%%s' in output filename") % c
1325 _(b"invalid format spec '%%%s' in output filename") % c
1327 )
1326 )
1328 return b''.join(newname)
1327 return b''.join(newname)
1329
1328
1330
1329
1331 def makefilename(ctx, pat, **props):
1330 def makefilename(ctx, pat, **props):
1332 if not pat:
1331 if not pat:
1333 return pat
1332 return pat
1334 tmpl = _buildfntemplate(pat, **props)
1333 tmpl = _buildfntemplate(pat, **props)
1335 # BUG: alias expansion shouldn't be made against template fragments
1334 # BUG: alias expansion shouldn't be made against template fragments
1336 # rewritten from %-format strings, but we have no easy way to partially
1335 # rewritten from %-format strings, but we have no easy way to partially
1337 # disable the expansion.
1336 # disable the expansion.
1338 return rendertemplate(ctx, tmpl, pycompat.byteskwargs(props))
1337 return rendertemplate(ctx, tmpl, pycompat.byteskwargs(props))
1339
1338
1340
1339
1341 def isstdiofilename(pat):
1340 def isstdiofilename(pat):
1342 """True if the given pat looks like a filename denoting stdin/stdout"""
1341 """True if the given pat looks like a filename denoting stdin/stdout"""
1343 return not pat or pat == b'-'
1342 return not pat or pat == b'-'
1344
1343
1345
1344
1346 class _unclosablefile:
1345 class _unclosablefile:
1347 def __init__(self, fp):
1346 def __init__(self, fp):
1348 self._fp = fp
1347 self._fp = fp
1349
1348
1350 def close(self):
1349 def close(self):
1351 pass
1350 pass
1352
1351
1353 def __iter__(self):
1352 def __iter__(self):
1354 return iter(self._fp)
1353 return iter(self._fp)
1355
1354
1356 def __getattr__(self, attr):
1355 def __getattr__(self, attr):
1357 return getattr(self._fp, attr)
1356 return getattr(self._fp, attr)
1358
1357
1359 def __enter__(self):
1358 def __enter__(self):
1360 return self
1359 return self
1361
1360
1362 def __exit__(self, exc_type, exc_value, exc_tb):
1361 def __exit__(self, exc_type, exc_value, exc_tb):
1363 pass
1362 pass
1364
1363
1365
1364
1366 def makefileobj(ctx, pat, mode=b'wb', **props):
1365 def makefileobj(ctx, pat, mode=b'wb', **props):
1367 writable = mode not in (b'r', b'rb')
1366 writable = mode not in (b'r', b'rb')
1368
1367
1369 if isstdiofilename(pat):
1368 if isstdiofilename(pat):
1370 repo = ctx.repo()
1369 repo = ctx.repo()
1371 if writable:
1370 if writable:
1372 fp = repo.ui.fout
1371 fp = repo.ui.fout
1373 else:
1372 else:
1374 fp = repo.ui.fin
1373 fp = repo.ui.fin
1375 return _unclosablefile(fp)
1374 return _unclosablefile(fp)
1376 fn = makefilename(ctx, pat, **props)
1375 fn = makefilename(ctx, pat, **props)
1377 return open(fn, mode)
1376 return open(fn, mode)
1378
1377
1379
1378
1380 def openstorage(repo, cmd, file_, opts, returnrevlog=False):
1379 def openstorage(repo, cmd, file_, opts, returnrevlog=False):
1381 """opens the changelog, manifest, a filelog or a given revlog"""
1380 """opens the changelog, manifest, a filelog or a given revlog"""
1382 cl = opts[b'changelog']
1381 cl = opts[b'changelog']
1383 mf = opts[b'manifest']
1382 mf = opts[b'manifest']
1384 dir = opts[b'dir']
1383 dir = opts[b'dir']
1385 msg = None
1384 msg = None
1386 if cl and mf:
1385 if cl and mf:
1387 msg = _(b'cannot specify --changelog and --manifest at the same time')
1386 msg = _(b'cannot specify --changelog and --manifest at the same time')
1388 elif cl and dir:
1387 elif cl and dir:
1389 msg = _(b'cannot specify --changelog and --dir at the same time')
1388 msg = _(b'cannot specify --changelog and --dir at the same time')
1390 elif cl or mf or dir:
1389 elif cl or mf or dir:
1391 if file_:
1390 if file_:
1392 msg = _(b'cannot specify filename with --changelog or --manifest')
1391 msg = _(b'cannot specify filename with --changelog or --manifest')
1393 elif not repo:
1392 elif not repo:
1394 msg = _(
1393 msg = _(
1395 b'cannot specify --changelog or --manifest or --dir '
1394 b'cannot specify --changelog or --manifest or --dir '
1396 b'without a repository'
1395 b'without a repository'
1397 )
1396 )
1398 if msg:
1397 if msg:
1399 raise error.InputError(msg)
1398 raise error.InputError(msg)
1400
1399
1401 r = None
1400 r = None
1402 if repo:
1401 if repo:
1403 if cl:
1402 if cl:
1404 r = repo.unfiltered().changelog
1403 r = repo.unfiltered().changelog
1405 elif dir:
1404 elif dir:
1406 if not scmutil.istreemanifest(repo):
1405 if not scmutil.istreemanifest(repo):
1407 raise error.InputError(
1406 raise error.InputError(
1408 _(
1407 _(
1409 b"--dir can only be used on repos with "
1408 b"--dir can only be used on repos with "
1410 b"treemanifest enabled"
1409 b"treemanifest enabled"
1411 )
1410 )
1412 )
1411 )
1413 if not dir.endswith(b'/'):
1412 if not dir.endswith(b'/'):
1414 dir = dir + b'/'
1413 dir = dir + b'/'
1415 dirlog = repo.manifestlog.getstorage(dir)
1414 dirlog = repo.manifestlog.getstorage(dir)
1416 if len(dirlog):
1415 if len(dirlog):
1417 r = dirlog
1416 r = dirlog
1418 elif mf:
1417 elif mf:
1419 r = repo.manifestlog.getstorage(b'')
1418 r = repo.manifestlog.getstorage(b'')
1420 elif file_:
1419 elif file_:
1421 filelog = repo.file(file_)
1420 filelog = repo.file(file_)
1422 if len(filelog):
1421 if len(filelog):
1423 r = filelog
1422 r = filelog
1424
1423
1425 # Not all storage may be revlogs. If requested, try to return an actual
1424 # Not all storage may be revlogs. If requested, try to return an actual
1426 # revlog instance.
1425 # revlog instance.
1427 if returnrevlog:
1426 if returnrevlog:
1428 if isinstance(r, revlog.revlog):
1427 if isinstance(r, revlog.revlog):
1429 pass
1428 pass
1430 elif util.safehasattr(r, b'_revlog'):
1429 elif util.safehasattr(r, b'_revlog'):
1431 r = r._revlog # pytype: disable=attribute-error
1430 r = r._revlog # pytype: disable=attribute-error
1432 elif r is not None:
1431 elif r is not None:
1433 raise error.InputError(
1432 raise error.InputError(
1434 _(b'%r does not appear to be a revlog') % r
1433 _(b'%r does not appear to be a revlog') % r
1435 )
1434 )
1436
1435
1437 if not r:
1436 if not r:
1438 if not returnrevlog:
1437 if not returnrevlog:
1439 raise error.InputError(_(b'cannot give path to non-revlog'))
1438 raise error.InputError(_(b'cannot give path to non-revlog'))
1440
1439
1441 if not file_:
1440 if not file_:
1442 raise error.CommandError(cmd, _(b'invalid arguments'))
1441 raise error.CommandError(cmd, _(b'invalid arguments'))
1443 if not os.path.isfile(file_):
1442 if not os.path.isfile(file_):
1444 raise error.InputError(_(b"revlog '%s' not found") % file_)
1443 raise error.InputError(_(b"revlog '%s' not found") % file_)
1445
1444
1446 target = (revlog_constants.KIND_OTHER, b'free-form:%s' % file_)
1445 target = (revlog_constants.KIND_OTHER, b'free-form:%s' % file_)
1447 r = revlog.revlog(
1446 r = revlog.revlog(
1448 vfsmod.vfs(encoding.getcwd(), audit=False),
1447 vfsmod.vfs(encoding.getcwd(), audit=False),
1449 target=target,
1448 target=target,
1450 radix=file_[:-2],
1449 radix=file_[:-2],
1451 )
1450 )
1452 return r
1451 return r
1453
1452
1454
1453
1455 def openrevlog(repo, cmd, file_, opts):
1454 def openrevlog(repo, cmd, file_, opts):
1456 """Obtain a revlog backing storage of an item.
1455 """Obtain a revlog backing storage of an item.
1457
1456
1458 This is similar to ``openstorage()`` except it always returns a revlog.
1457 This is similar to ``openstorage()`` except it always returns a revlog.
1459
1458
1460 In most cases, a caller cares about the main storage object - not the
1459 In most cases, a caller cares about the main storage object - not the
1461 revlog backing it. Therefore, this function should only be used by code
1460 revlog backing it. Therefore, this function should only be used by code
1462 that needs to examine low-level revlog implementation details. e.g. debug
1461 that needs to examine low-level revlog implementation details. e.g. debug
1463 commands.
1462 commands.
1464 """
1463 """
1465 return openstorage(repo, cmd, file_, opts, returnrevlog=True)
1464 return openstorage(repo, cmd, file_, opts, returnrevlog=True)
1466
1465
1467
1466
1468 def copy(ui, repo, pats, opts, rename=False):
1467 def copy(ui, repo, pats, opts, rename=False):
1469 check_incompatible_arguments(opts, b'forget', [b'dry_run'])
1468 check_incompatible_arguments(opts, b'forget', [b'dry_run'])
1470
1469
1471 # called with the repo lock held
1470 # called with the repo lock held
1472 #
1471 #
1473 # hgsep => pathname that uses "/" to separate directories
1472 # hgsep => pathname that uses "/" to separate directories
1474 # ossep => pathname that uses os.sep to separate directories
1473 # ossep => pathname that uses os.sep to separate directories
1475 cwd = repo.getcwd()
1474 cwd = repo.getcwd()
1476 targets = {}
1475 targets = {}
1477 forget = opts.get(b"forget")
1476 forget = opts.get(b"forget")
1478 after = opts.get(b"after")
1477 after = opts.get(b"after")
1479 dryrun = opts.get(b"dry_run")
1478 dryrun = opts.get(b"dry_run")
1480 rev = opts.get(b'at_rev')
1479 rev = opts.get(b'at_rev')
1481 if rev:
1480 if rev:
1482 if not forget and not after:
1481 if not forget and not after:
1483 # TODO: Remove this restriction and make it also create the copy
1482 # TODO: Remove this restriction and make it also create the copy
1484 # targets (and remove the rename source if rename==True).
1483 # targets (and remove the rename source if rename==True).
1485 raise error.InputError(_(b'--at-rev requires --after'))
1484 raise error.InputError(_(b'--at-rev requires --after'))
1486 ctx = logcmdutil.revsingle(repo, rev)
1485 ctx = logcmdutil.revsingle(repo, rev)
1487 if len(ctx.parents()) > 1:
1486 if len(ctx.parents()) > 1:
1488 raise error.InputError(
1487 raise error.InputError(
1489 _(b'cannot mark/unmark copy in merge commit')
1488 _(b'cannot mark/unmark copy in merge commit')
1490 )
1489 )
1491 else:
1490 else:
1492 ctx = repo[None]
1491 ctx = repo[None]
1493
1492
1494 pctx = ctx.p1()
1493 pctx = ctx.p1()
1495
1494
1496 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1495 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1497
1496
1498 if forget:
1497 if forget:
1499 if ctx.rev() is None:
1498 if ctx.rev() is None:
1500 new_ctx = ctx
1499 new_ctx = ctx
1501 else:
1500 else:
1502 if len(ctx.parents()) > 1:
1501 if len(ctx.parents()) > 1:
1503 raise error.InputError(_(b'cannot unmark copy in merge commit'))
1502 raise error.InputError(_(b'cannot unmark copy in merge commit'))
1504 # avoid cycle context -> subrepo -> cmdutil
1503 # avoid cycle context -> subrepo -> cmdutil
1505 from . import context
1504 from . import context
1506
1505
1507 rewriteutil.precheck(repo, [ctx.rev()], b'uncopy')
1506 rewriteutil.precheck(repo, [ctx.rev()], b'uncopy')
1508 new_ctx = context.overlayworkingctx(repo)
1507 new_ctx = context.overlayworkingctx(repo)
1509 new_ctx.setbase(ctx.p1())
1508 new_ctx.setbase(ctx.p1())
1510 mergemod.graft(repo, ctx, wctx=new_ctx)
1509 mergemod.graft(repo, ctx, wctx=new_ctx)
1511
1510
1512 match = scmutil.match(ctx, pats, opts)
1511 match = scmutil.match(ctx, pats, opts)
1513
1512
1514 current_copies = ctx.p1copies()
1513 current_copies = ctx.p1copies()
1515 current_copies.update(ctx.p2copies())
1514 current_copies.update(ctx.p2copies())
1516
1515
1517 uipathfn = scmutil.getuipathfn(repo)
1516 uipathfn = scmutil.getuipathfn(repo)
1518 for f in ctx.walk(match):
1517 for f in ctx.walk(match):
1519 if f in current_copies:
1518 if f in current_copies:
1520 new_ctx[f].markcopied(None)
1519 new_ctx[f].markcopied(None)
1521 elif match.exact(f):
1520 elif match.exact(f):
1522 ui.warn(
1521 ui.warn(
1523 _(
1522 _(
1524 b'%s: not unmarking as copy - file is not marked as copied\n'
1523 b'%s: not unmarking as copy - file is not marked as copied\n'
1525 )
1524 )
1526 % uipathfn(f)
1525 % uipathfn(f)
1527 )
1526 )
1528
1527
1529 if ctx.rev() is not None:
1528 if ctx.rev() is not None:
1530 with repo.lock():
1529 with repo.lock():
1531 mem_ctx = new_ctx.tomemctx_for_amend(ctx)
1530 mem_ctx = new_ctx.tomemctx_for_amend(ctx)
1532 new_node = mem_ctx.commit()
1531 new_node = mem_ctx.commit()
1533
1532
1534 if repo.dirstate.p1() == ctx.node():
1533 if repo.dirstate.p1() == ctx.node():
1535 with repo.dirstate.changing_parents(repo):
1534 with repo.dirstate.changing_parents(repo):
1536 scmutil.movedirstate(repo, repo[new_node])
1535 scmutil.movedirstate(repo, repo[new_node])
1537 replacements = {ctx.node(): [new_node]}
1536 replacements = {ctx.node(): [new_node]}
1538 scmutil.cleanupnodes(
1537 scmutil.cleanupnodes(
1539 repo, replacements, b'uncopy', fixphase=True
1538 repo, replacements, b'uncopy', fixphase=True
1540 )
1539 )
1541
1540
1542 return
1541 return
1543
1542
1544 pats = scmutil.expandpats(pats)
1543 pats = scmutil.expandpats(pats)
1545 if not pats:
1544 if not pats:
1546 raise error.InputError(_(b'no source or destination specified'))
1545 raise error.InputError(_(b'no source or destination specified'))
1547 if len(pats) == 1:
1546 if len(pats) == 1:
1548 raise error.InputError(_(b'no destination specified'))
1547 raise error.InputError(_(b'no destination specified'))
1549 dest = pats.pop()
1548 dest = pats.pop()
1550
1549
1551 def walkpat(pat):
1550 def walkpat(pat):
1552 srcs = []
1551 srcs = []
1553 # TODO: Inline and simplify the non-working-copy version of this code
1552 # TODO: Inline and simplify the non-working-copy version of this code
1554 # since it shares very little with the working-copy version of it.
1553 # since it shares very little with the working-copy version of it.
1555 ctx_to_walk = ctx if ctx.rev() is None else pctx
1554 ctx_to_walk = ctx if ctx.rev() is None else pctx
1556 m = scmutil.match(ctx_to_walk, [pat], opts, globbed=True)
1555 m = scmutil.match(ctx_to_walk, [pat], opts, globbed=True)
1557 for abs in ctx_to_walk.walk(m):
1556 for abs in ctx_to_walk.walk(m):
1558 rel = uipathfn(abs)
1557 rel = uipathfn(abs)
1559 exact = m.exact(abs)
1558 exact = m.exact(abs)
1560 if abs not in ctx:
1559 if abs not in ctx:
1561 if abs in pctx:
1560 if abs in pctx:
1562 if not after:
1561 if not after:
1563 if exact:
1562 if exact:
1564 ui.warn(
1563 ui.warn(
1565 _(
1564 _(
1566 b'%s: not copying - file has been marked '
1565 b'%s: not copying - file has been marked '
1567 b'for remove\n'
1566 b'for remove\n'
1568 )
1567 )
1569 % rel
1568 % rel
1570 )
1569 )
1571 continue
1570 continue
1572 else:
1571 else:
1573 if exact:
1572 if exact:
1574 ui.warn(
1573 ui.warn(
1575 _(b'%s: not copying - file is not managed\n') % rel
1574 _(b'%s: not copying - file is not managed\n') % rel
1576 )
1575 )
1577 continue
1576 continue
1578
1577
1579 # abs: hgsep
1578 # abs: hgsep
1580 # rel: ossep
1579 # rel: ossep
1581 srcs.append((abs, rel, exact))
1580 srcs.append((abs, rel, exact))
1582 return srcs
1581 return srcs
1583
1582
1584 if ctx.rev() is not None:
1583 if ctx.rev() is not None:
1585 rewriteutil.precheck(repo, [ctx.rev()], b'uncopy')
1584 rewriteutil.precheck(repo, [ctx.rev()], b'uncopy')
1586 absdest = pathutil.canonpath(repo.root, cwd, dest)
1585 absdest = pathutil.canonpath(repo.root, cwd, dest)
1587 if ctx.hasdir(absdest):
1586 if ctx.hasdir(absdest):
1588 raise error.InputError(
1587 raise error.InputError(
1589 _(b'%s: --at-rev does not support a directory as destination')
1588 _(b'%s: --at-rev does not support a directory as destination')
1590 % uipathfn(absdest)
1589 % uipathfn(absdest)
1591 )
1590 )
1592 if absdest not in ctx:
1591 if absdest not in ctx:
1593 raise error.InputError(
1592 raise error.InputError(
1594 _(b'%s: copy destination does not exist in %s')
1593 _(b'%s: copy destination does not exist in %s')
1595 % (uipathfn(absdest), ctx)
1594 % (uipathfn(absdest), ctx)
1596 )
1595 )
1597
1596
1598 # avoid cycle context -> subrepo -> cmdutil
1597 # avoid cycle context -> subrepo -> cmdutil
1599 from . import context
1598 from . import context
1600
1599
1601 copylist = []
1600 copylist = []
1602 for pat in pats:
1601 for pat in pats:
1603 srcs = walkpat(pat)
1602 srcs = walkpat(pat)
1604 if not srcs:
1603 if not srcs:
1605 continue
1604 continue
1606 for abs, rel, exact in srcs:
1605 for abs, rel, exact in srcs:
1607 copylist.append(abs)
1606 copylist.append(abs)
1608
1607
1609 if not copylist:
1608 if not copylist:
1610 raise error.InputError(_(b'no files to copy'))
1609 raise error.InputError(_(b'no files to copy'))
1611 # TODO: Add support for `hg cp --at-rev . foo bar dir` and
1610 # TODO: Add support for `hg cp --at-rev . foo bar dir` and
1612 # `hg cp --at-rev . dir1 dir2`, preferably unifying the code with the
1611 # `hg cp --at-rev . dir1 dir2`, preferably unifying the code with the
1613 # existing functions below.
1612 # existing functions below.
1614 if len(copylist) != 1:
1613 if len(copylist) != 1:
1615 raise error.InputError(_(b'--at-rev requires a single source'))
1614 raise error.InputError(_(b'--at-rev requires a single source'))
1616
1615
1617 new_ctx = context.overlayworkingctx(repo)
1616 new_ctx = context.overlayworkingctx(repo)
1618 new_ctx.setbase(ctx.p1())
1617 new_ctx.setbase(ctx.p1())
1619 mergemod.graft(repo, ctx, wctx=new_ctx)
1618 mergemod.graft(repo, ctx, wctx=new_ctx)
1620
1619
1621 new_ctx.markcopied(absdest, copylist[0])
1620 new_ctx.markcopied(absdest, copylist[0])
1622
1621
1623 with repo.lock():
1622 with repo.lock():
1624 mem_ctx = new_ctx.tomemctx_for_amend(ctx)
1623 mem_ctx = new_ctx.tomemctx_for_amend(ctx)
1625 new_node = mem_ctx.commit()
1624 new_node = mem_ctx.commit()
1626
1625
1627 if repo.dirstate.p1() == ctx.node():
1626 if repo.dirstate.p1() == ctx.node():
1628 with repo.dirstate.changing_parents(repo):
1627 with repo.dirstate.changing_parents(repo):
1629 scmutil.movedirstate(repo, repo[new_node])
1628 scmutil.movedirstate(repo, repo[new_node])
1630 replacements = {ctx.node(): [new_node]}
1629 replacements = {ctx.node(): [new_node]}
1631 scmutil.cleanupnodes(repo, replacements, b'copy', fixphase=True)
1630 scmutil.cleanupnodes(repo, replacements, b'copy', fixphase=True)
1632
1631
1633 return
1632 return
1634
1633
1635 # abssrc: hgsep
1634 # abssrc: hgsep
1636 # relsrc: ossep
1635 # relsrc: ossep
1637 # otarget: ossep
1636 # otarget: ossep
1638 def copyfile(abssrc, relsrc, otarget, exact):
1637 def copyfile(abssrc, relsrc, otarget, exact):
1639 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
1638 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
1640 if b'/' in abstarget:
1639 if b'/' in abstarget:
1641 # We cannot normalize abstarget itself, this would prevent
1640 # We cannot normalize abstarget itself, this would prevent
1642 # case only renames, like a => A.
1641 # case only renames, like a => A.
1643 abspath, absname = abstarget.rsplit(b'/', 1)
1642 abspath, absname = abstarget.rsplit(b'/', 1)
1644 abstarget = repo.dirstate.normalize(abspath) + b'/' + absname
1643 abstarget = repo.dirstate.normalize(abspath) + b'/' + absname
1645 reltarget = repo.pathto(abstarget, cwd)
1644 reltarget = repo.pathto(abstarget, cwd)
1646 target = repo.wjoin(abstarget)
1645 target = repo.wjoin(abstarget)
1647 src = repo.wjoin(abssrc)
1646 src = repo.wjoin(abssrc)
1648 entry = repo.dirstate.get_entry(abstarget)
1647 entry = repo.dirstate.get_entry(abstarget)
1649
1648
1650 already_commited = entry.tracked and not entry.added
1649 already_commited = entry.tracked and not entry.added
1651
1650
1652 scmutil.checkportable(ui, abstarget)
1651 scmutil.checkportable(ui, abstarget)
1653
1652
1654 # check for collisions
1653 # check for collisions
1655 prevsrc = targets.get(abstarget)
1654 prevsrc = targets.get(abstarget)
1656 if prevsrc is not None:
1655 if prevsrc is not None:
1657 ui.warn(
1656 ui.warn(
1658 _(b'%s: not overwriting - %s collides with %s\n')
1657 _(b'%s: not overwriting - %s collides with %s\n')
1659 % (
1658 % (
1660 reltarget,
1659 reltarget,
1661 repo.pathto(abssrc, cwd),
1660 repo.pathto(abssrc, cwd),
1662 repo.pathto(prevsrc, cwd),
1661 repo.pathto(prevsrc, cwd),
1663 )
1662 )
1664 )
1663 )
1665 return True # report a failure
1664 return True # report a failure
1666
1665
1667 # check for overwrites
1666 # check for overwrites
1668 exists = os.path.lexists(target)
1667 exists = os.path.lexists(target)
1669 samefile = False
1668 samefile = False
1670 if exists and abssrc != abstarget:
1669 if exists and abssrc != abstarget:
1671 if repo.dirstate.normalize(abssrc) == repo.dirstate.normalize(
1670 if repo.dirstate.normalize(abssrc) == repo.dirstate.normalize(
1672 abstarget
1671 abstarget
1673 ):
1672 ):
1674 if not rename:
1673 if not rename:
1675 ui.warn(_(b"%s: can't copy - same file\n") % reltarget)
1674 ui.warn(_(b"%s: can't copy - same file\n") % reltarget)
1676 return True # report a failure
1675 return True # report a failure
1677 exists = False
1676 exists = False
1678 samefile = True
1677 samefile = True
1679
1678
1680 if not after and exists or after and already_commited:
1679 if not after and exists or after and already_commited:
1681 if not opts[b'force']:
1680 if not opts[b'force']:
1682 if already_commited:
1681 if already_commited:
1683 msg = _(b'%s: not overwriting - file already committed\n')
1682 msg = _(b'%s: not overwriting - file already committed\n')
1684 # Check if if the target was added in the parent and the
1683 # Check if if the target was added in the parent and the
1685 # source already existed in the grandparent.
1684 # source already existed in the grandparent.
1686 looks_like_copy_in_pctx = abstarget in pctx and any(
1685 looks_like_copy_in_pctx = abstarget in pctx and any(
1687 abssrc in gpctx and abstarget not in gpctx
1686 abssrc in gpctx and abstarget not in gpctx
1688 for gpctx in pctx.parents()
1687 for gpctx in pctx.parents()
1689 )
1688 )
1690 if looks_like_copy_in_pctx:
1689 if looks_like_copy_in_pctx:
1691 if rename:
1690 if rename:
1692 hint = _(
1691 hint = _(
1693 b"('hg rename --at-rev .' to record the rename "
1692 b"('hg rename --at-rev .' to record the rename "
1694 b"in the parent of the working copy)\n"
1693 b"in the parent of the working copy)\n"
1695 )
1694 )
1696 else:
1695 else:
1697 hint = _(
1696 hint = _(
1698 b"('hg copy --at-rev .' to record the copy in "
1697 b"('hg copy --at-rev .' to record the copy in "
1699 b"the parent of the working copy)\n"
1698 b"the parent of the working copy)\n"
1700 )
1699 )
1701 else:
1700 else:
1702 if after:
1701 if after:
1703 flags = b'--after --force'
1702 flags = b'--after --force'
1704 else:
1703 else:
1705 flags = b'--force'
1704 flags = b'--force'
1706 if rename:
1705 if rename:
1707 hint = (
1706 hint = (
1708 _(
1707 _(
1709 b"('hg rename %s' to replace the file by "
1708 b"('hg rename %s' to replace the file by "
1710 b'recording a rename)\n'
1709 b'recording a rename)\n'
1711 )
1710 )
1712 % flags
1711 % flags
1713 )
1712 )
1714 else:
1713 else:
1715 hint = (
1714 hint = (
1716 _(
1715 _(
1717 b"('hg copy %s' to replace the file by "
1716 b"('hg copy %s' to replace the file by "
1718 b'recording a copy)\n'
1717 b'recording a copy)\n'
1719 )
1718 )
1720 % flags
1719 % flags
1721 )
1720 )
1722 else:
1721 else:
1723 msg = _(b'%s: not overwriting - file exists\n')
1722 msg = _(b'%s: not overwriting - file exists\n')
1724 if rename:
1723 if rename:
1725 hint = _(
1724 hint = _(
1726 b"('hg rename --after' to record the rename)\n"
1725 b"('hg rename --after' to record the rename)\n"
1727 )
1726 )
1728 else:
1727 else:
1729 hint = _(b"('hg copy --after' to record the copy)\n")
1728 hint = _(b"('hg copy --after' to record the copy)\n")
1730 ui.warn(msg % reltarget)
1729 ui.warn(msg % reltarget)
1731 ui.warn(hint)
1730 ui.warn(hint)
1732 return True # report a failure
1731 return True # report a failure
1733
1732
1734 if after:
1733 if after:
1735 if not exists:
1734 if not exists:
1736 if rename:
1735 if rename:
1737 ui.warn(
1736 ui.warn(
1738 _(b'%s: not recording move - %s does not exist\n')
1737 _(b'%s: not recording move - %s does not exist\n')
1739 % (relsrc, reltarget)
1738 % (relsrc, reltarget)
1740 )
1739 )
1741 else:
1740 else:
1742 ui.warn(
1741 ui.warn(
1743 _(b'%s: not recording copy - %s does not exist\n')
1742 _(b'%s: not recording copy - %s does not exist\n')
1744 % (relsrc, reltarget)
1743 % (relsrc, reltarget)
1745 )
1744 )
1746 return True # report a failure
1745 return True # report a failure
1747 elif not dryrun:
1746 elif not dryrun:
1748 try:
1747 try:
1749 if exists:
1748 if exists:
1750 os.unlink(target)
1749 os.unlink(target)
1751 targetdir = os.path.dirname(target) or b'.'
1750 targetdir = os.path.dirname(target) or b'.'
1752 if not os.path.isdir(targetdir):
1751 if not os.path.isdir(targetdir):
1753 os.makedirs(targetdir)
1752 os.makedirs(targetdir)
1754 if samefile:
1753 if samefile:
1755 tmp = target + b"~hgrename"
1754 tmp = target + b"~hgrename"
1756 os.rename(src, tmp)
1755 os.rename(src, tmp)
1757 os.rename(tmp, target)
1756 os.rename(tmp, target)
1758 else:
1757 else:
1759 # Preserve stat info on renames, not on copies; this matches
1758 # Preserve stat info on renames, not on copies; this matches
1760 # Linux CLI behavior.
1759 # Linux CLI behavior.
1761 util.copyfile(src, target, copystat=rename)
1760 util.copyfile(src, target, copystat=rename)
1762 srcexists = True
1761 srcexists = True
1763 except IOError as inst:
1762 except IOError as inst:
1764 if inst.errno == errno.ENOENT:
1763 if inst.errno == errno.ENOENT:
1765 ui.warn(_(b'%s: deleted in working directory\n') % relsrc)
1764 ui.warn(_(b'%s: deleted in working directory\n') % relsrc)
1766 srcexists = False
1765 srcexists = False
1767 else:
1766 else:
1768 ui.warn(
1767 ui.warn(
1769 _(b'%s: cannot copy - %s\n')
1768 _(b'%s: cannot copy - %s\n')
1770 % (relsrc, encoding.strtolocal(inst.strerror))
1769 % (relsrc, encoding.strtolocal(inst.strerror))
1771 )
1770 )
1772 return True # report a failure
1771 return True # report a failure
1773
1772
1774 if ui.verbose or not exact:
1773 if ui.verbose or not exact:
1775 if rename:
1774 if rename:
1776 ui.status(_(b'moving %s to %s\n') % (relsrc, reltarget))
1775 ui.status(_(b'moving %s to %s\n') % (relsrc, reltarget))
1777 else:
1776 else:
1778 ui.status(_(b'copying %s to %s\n') % (relsrc, reltarget))
1777 ui.status(_(b'copying %s to %s\n') % (relsrc, reltarget))
1779
1778
1780 targets[abstarget] = abssrc
1779 targets[abstarget] = abssrc
1781
1780
1782 # fix up dirstate
1781 # fix up dirstate
1783 scmutil.dirstatecopy(
1782 scmutil.dirstatecopy(
1784 ui, repo, ctx, abssrc, abstarget, dryrun=dryrun, cwd=cwd
1783 ui, repo, ctx, abssrc, abstarget, dryrun=dryrun, cwd=cwd
1785 )
1784 )
1786 if rename and not dryrun:
1785 if rename and not dryrun:
1787 if not after and srcexists and not samefile:
1786 if not after and srcexists and not samefile:
1788 rmdir = repo.ui.configbool(b'experimental', b'removeemptydirs')
1787 rmdir = repo.ui.configbool(b'experimental', b'removeemptydirs')
1789 repo.wvfs.unlinkpath(abssrc, rmdir=rmdir)
1788 repo.wvfs.unlinkpath(abssrc, rmdir=rmdir)
1790 ctx.forget([abssrc])
1789 ctx.forget([abssrc])
1791
1790
1792 # pat: ossep
1791 # pat: ossep
1793 # dest ossep
1792 # dest ossep
1794 # srcs: list of (hgsep, hgsep, ossep, bool)
1793 # srcs: list of (hgsep, hgsep, ossep, bool)
1795 # return: function that takes hgsep and returns ossep
1794 # return: function that takes hgsep and returns ossep
1796 def targetpathfn(pat, dest, srcs):
1795 def targetpathfn(pat, dest, srcs):
1797 if os.path.isdir(pat):
1796 if os.path.isdir(pat):
1798 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1797 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1799 abspfx = util.localpath(abspfx)
1798 abspfx = util.localpath(abspfx)
1800 if destdirexists:
1799 if destdirexists:
1801 striplen = len(os.path.split(abspfx)[0])
1800 striplen = len(os.path.split(abspfx)[0])
1802 else:
1801 else:
1803 striplen = len(abspfx)
1802 striplen = len(abspfx)
1804 if striplen:
1803 if striplen:
1805 striplen += len(pycompat.ossep)
1804 striplen += len(pycompat.ossep)
1806 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1805 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1807 elif destdirexists:
1806 elif destdirexists:
1808 res = lambda p: os.path.join(
1807 res = lambda p: os.path.join(
1809 dest, os.path.basename(util.localpath(p))
1808 dest, os.path.basename(util.localpath(p))
1810 )
1809 )
1811 else:
1810 else:
1812 res = lambda p: dest
1811 res = lambda p: dest
1813 return res
1812 return res
1814
1813
1815 # pat: ossep
1814 # pat: ossep
1816 # dest ossep
1815 # dest ossep
1817 # srcs: list of (hgsep, hgsep, ossep, bool)
1816 # srcs: list of (hgsep, hgsep, ossep, bool)
1818 # return: function that takes hgsep and returns ossep
1817 # return: function that takes hgsep and returns ossep
1819 def targetpathafterfn(pat, dest, srcs):
1818 def targetpathafterfn(pat, dest, srcs):
1820 if matchmod.patkind(pat):
1819 if matchmod.patkind(pat):
1821 # a mercurial pattern
1820 # a mercurial pattern
1822 res = lambda p: os.path.join(
1821 res = lambda p: os.path.join(
1823 dest, os.path.basename(util.localpath(p))
1822 dest, os.path.basename(util.localpath(p))
1824 )
1823 )
1825 else:
1824 else:
1826 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1825 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1827 if len(abspfx) < len(srcs[0][0]):
1826 if len(abspfx) < len(srcs[0][0]):
1828 # A directory. Either the target path contains the last
1827 # A directory. Either the target path contains the last
1829 # component of the source path or it does not.
1828 # component of the source path or it does not.
1830 def evalpath(striplen):
1829 def evalpath(striplen):
1831 score = 0
1830 score = 0
1832 for s in srcs:
1831 for s in srcs:
1833 t = os.path.join(dest, util.localpath(s[0])[striplen:])
1832 t = os.path.join(dest, util.localpath(s[0])[striplen:])
1834 if os.path.lexists(t):
1833 if os.path.lexists(t):
1835 score += 1
1834 score += 1
1836 return score
1835 return score
1837
1836
1838 abspfx = util.localpath(abspfx)
1837 abspfx = util.localpath(abspfx)
1839 striplen = len(abspfx)
1838 striplen = len(abspfx)
1840 if striplen:
1839 if striplen:
1841 striplen += len(pycompat.ossep)
1840 striplen += len(pycompat.ossep)
1842 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
1841 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
1843 score = evalpath(striplen)
1842 score = evalpath(striplen)
1844 striplen1 = len(os.path.split(abspfx)[0])
1843 striplen1 = len(os.path.split(abspfx)[0])
1845 if striplen1:
1844 if striplen1:
1846 striplen1 += len(pycompat.ossep)
1845 striplen1 += len(pycompat.ossep)
1847 if evalpath(striplen1) > score:
1846 if evalpath(striplen1) > score:
1848 striplen = striplen1
1847 striplen = striplen1
1849 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1848 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1850 else:
1849 else:
1851 # a file
1850 # a file
1852 if destdirexists:
1851 if destdirexists:
1853 res = lambda p: os.path.join(
1852 res = lambda p: os.path.join(
1854 dest, os.path.basename(util.localpath(p))
1853 dest, os.path.basename(util.localpath(p))
1855 )
1854 )
1856 else:
1855 else:
1857 res = lambda p: dest
1856 res = lambda p: dest
1858 return res
1857 return res
1859
1858
1860 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
1859 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
1861 if not destdirexists:
1860 if not destdirexists:
1862 if len(pats) > 1 or matchmod.patkind(pats[0]):
1861 if len(pats) > 1 or matchmod.patkind(pats[0]):
1863 raise error.InputError(
1862 raise error.InputError(
1864 _(
1863 _(
1865 b'with multiple sources, destination must be an '
1864 b'with multiple sources, destination must be an '
1866 b'existing directory'
1865 b'existing directory'
1867 )
1866 )
1868 )
1867 )
1869 if util.endswithsep(dest):
1868 if util.endswithsep(dest):
1870 raise error.InputError(
1869 raise error.InputError(
1871 _(b'destination %s is not a directory') % dest
1870 _(b'destination %s is not a directory') % dest
1872 )
1871 )
1873
1872
1874 tfn = targetpathfn
1873 tfn = targetpathfn
1875 if after:
1874 if after:
1876 tfn = targetpathafterfn
1875 tfn = targetpathafterfn
1877 copylist = []
1876 copylist = []
1878 for pat in pats:
1877 for pat in pats:
1879 srcs = walkpat(pat)
1878 srcs = walkpat(pat)
1880 if not srcs:
1879 if not srcs:
1881 continue
1880 continue
1882 copylist.append((tfn(pat, dest, srcs), srcs))
1881 copylist.append((tfn(pat, dest, srcs), srcs))
1883 if not copylist:
1882 if not copylist:
1884 hint = None
1883 hint = None
1885 if rename:
1884 if rename:
1886 hint = _(b'maybe you meant to use --after --at-rev=.')
1885 hint = _(b'maybe you meant to use --after --at-rev=.')
1887 raise error.InputError(_(b'no files to copy'), hint=hint)
1886 raise error.InputError(_(b'no files to copy'), hint=hint)
1888
1887
1889 errors = 0
1888 errors = 0
1890 for targetpath, srcs in copylist:
1889 for targetpath, srcs in copylist:
1891 for abssrc, relsrc, exact in srcs:
1890 for abssrc, relsrc, exact in srcs:
1892 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
1891 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
1893 errors += 1
1892 errors += 1
1894
1893
1895 return errors != 0
1894 return errors != 0
1896
1895
1897
1896
1898 ## facility to let extension process additional data into an import patch
1897 ## facility to let extension process additional data into an import patch
1899 # list of identifier to be executed in order
1898 # list of identifier to be executed in order
1900 extrapreimport = [] # run before commit
1899 extrapreimport = [] # run before commit
1901 extrapostimport = [] # run after commit
1900 extrapostimport = [] # run after commit
1902 # mapping from identifier to actual import function
1901 # mapping from identifier to actual import function
1903 #
1902 #
1904 # 'preimport' are run before the commit is made and are provided the following
1903 # 'preimport' are run before the commit is made and are provided the following
1905 # arguments:
1904 # arguments:
1906 # - repo: the localrepository instance,
1905 # - repo: the localrepository instance,
1907 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
1906 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
1908 # - extra: the future extra dictionary of the changeset, please mutate it,
1907 # - extra: the future extra dictionary of the changeset, please mutate it,
1909 # - opts: the import options.
1908 # - opts: the import options.
1910 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
1909 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
1911 # mutation of in memory commit and more. Feel free to rework the code to get
1910 # mutation of in memory commit and more. Feel free to rework the code to get
1912 # there.
1911 # there.
1913 extrapreimportmap = {}
1912 extrapreimportmap = {}
1914 # 'postimport' are run after the commit is made and are provided the following
1913 # 'postimport' are run after the commit is made and are provided the following
1915 # argument:
1914 # argument:
1916 # - ctx: the changectx created by import.
1915 # - ctx: the changectx created by import.
1917 extrapostimportmap = {}
1916 extrapostimportmap = {}
1918
1917
1919
1918
1920 def tryimportone(ui, repo, patchdata, parents, opts, msgs, updatefunc):
1919 def tryimportone(ui, repo, patchdata, parents, opts, msgs, updatefunc):
1921 """Utility function used by commands.import to import a single patch
1920 """Utility function used by commands.import to import a single patch
1922
1921
1923 This function is explicitly defined here to help the evolve extension to
1922 This function is explicitly defined here to help the evolve extension to
1924 wrap this part of the import logic.
1923 wrap this part of the import logic.
1925
1924
1926 The API is currently a bit ugly because it a simple code translation from
1925 The API is currently a bit ugly because it a simple code translation from
1927 the import command. Feel free to make it better.
1926 the import command. Feel free to make it better.
1928
1927
1929 :patchdata: a dictionary containing parsed patch data (such as from
1928 :patchdata: a dictionary containing parsed patch data (such as from
1930 ``patch.extract()``)
1929 ``patch.extract()``)
1931 :parents: nodes that will be parent of the created commit
1930 :parents: nodes that will be parent of the created commit
1932 :opts: the full dict of option passed to the import command
1931 :opts: the full dict of option passed to the import command
1933 :msgs: list to save commit message to.
1932 :msgs: list to save commit message to.
1934 (used in case we need to save it when failing)
1933 (used in case we need to save it when failing)
1935 :updatefunc: a function that update a repo to a given node
1934 :updatefunc: a function that update a repo to a given node
1936 updatefunc(<repo>, <node>)
1935 updatefunc(<repo>, <node>)
1937 """
1936 """
1938 # avoid cycle context -> subrepo -> cmdutil
1937 # avoid cycle context -> subrepo -> cmdutil
1939 from . import context
1938 from . import context
1940
1939
1941 tmpname = patchdata.get(b'filename')
1940 tmpname = patchdata.get(b'filename')
1942 message = patchdata.get(b'message')
1941 message = patchdata.get(b'message')
1943 user = opts.get(b'user') or patchdata.get(b'user')
1942 user = opts.get(b'user') or patchdata.get(b'user')
1944 date = opts.get(b'date') or patchdata.get(b'date')
1943 date = opts.get(b'date') or patchdata.get(b'date')
1945 branch = patchdata.get(b'branch')
1944 branch = patchdata.get(b'branch')
1946 nodeid = patchdata.get(b'nodeid')
1945 nodeid = patchdata.get(b'nodeid')
1947 p1 = patchdata.get(b'p1')
1946 p1 = patchdata.get(b'p1')
1948 p2 = patchdata.get(b'p2')
1947 p2 = patchdata.get(b'p2')
1949
1948
1950 nocommit = opts.get(b'no_commit')
1949 nocommit = opts.get(b'no_commit')
1951 importbranch = opts.get(b'import_branch')
1950 importbranch = opts.get(b'import_branch')
1952 update = not opts.get(b'bypass')
1951 update = not opts.get(b'bypass')
1953 strip = opts[b"strip"]
1952 strip = opts[b"strip"]
1954 prefix = opts[b"prefix"]
1953 prefix = opts[b"prefix"]
1955 sim = float(opts.get(b'similarity') or 0)
1954 sim = float(opts.get(b'similarity') or 0)
1956
1955
1957 if not tmpname:
1956 if not tmpname:
1958 return None, None, False
1957 return None, None, False
1959
1958
1960 rejects = False
1959 rejects = False
1961
1960
1962 cmdline_message = logmessage(ui, opts)
1961 cmdline_message = logmessage(ui, opts)
1963 if cmdline_message:
1962 if cmdline_message:
1964 # pickup the cmdline msg
1963 # pickup the cmdline msg
1965 message = cmdline_message
1964 message = cmdline_message
1966 elif message:
1965 elif message:
1967 # pickup the patch msg
1966 # pickup the patch msg
1968 message = message.strip()
1967 message = message.strip()
1969 else:
1968 else:
1970 # launch the editor
1969 # launch the editor
1971 message = None
1970 message = None
1972 ui.debug(b'message:\n%s\n' % (message or b''))
1971 ui.debug(b'message:\n%s\n' % (message or b''))
1973
1972
1974 if len(parents) == 1:
1973 if len(parents) == 1:
1975 parents.append(repo[nullrev])
1974 parents.append(repo[nullrev])
1976 if opts.get(b'exact'):
1975 if opts.get(b'exact'):
1977 if not nodeid or not p1:
1976 if not nodeid or not p1:
1978 raise error.InputError(_(b'not a Mercurial patch'))
1977 raise error.InputError(_(b'not a Mercurial patch'))
1979 p1 = repo[p1]
1978 p1 = repo[p1]
1980 p2 = repo[p2 or nullrev]
1979 p2 = repo[p2 or nullrev]
1981 elif p2:
1980 elif p2:
1982 try:
1981 try:
1983 p1 = repo[p1]
1982 p1 = repo[p1]
1984 p2 = repo[p2]
1983 p2 = repo[p2]
1985 # Without any options, consider p2 only if the
1984 # Without any options, consider p2 only if the
1986 # patch is being applied on top of the recorded
1985 # patch is being applied on top of the recorded
1987 # first parent.
1986 # first parent.
1988 if p1 != parents[0]:
1987 if p1 != parents[0]:
1989 p1 = parents[0]
1988 p1 = parents[0]
1990 p2 = repo[nullrev]
1989 p2 = repo[nullrev]
1991 except error.RepoError:
1990 except error.RepoError:
1992 p1, p2 = parents
1991 p1, p2 = parents
1993 if p2.rev() == nullrev:
1992 if p2.rev() == nullrev:
1994 ui.warn(
1993 ui.warn(
1995 _(
1994 _(
1996 b"warning: import the patch as a normal revision\n"
1995 b"warning: import the patch as a normal revision\n"
1997 b"(use --exact to import the patch as a merge)\n"
1996 b"(use --exact to import the patch as a merge)\n"
1998 )
1997 )
1999 )
1998 )
2000 else:
1999 else:
2001 p1, p2 = parents
2000 p1, p2 = parents
2002
2001
2003 n = None
2002 n = None
2004 if update:
2003 if update:
2005 if p1 != parents[0]:
2004 if p1 != parents[0]:
2006 updatefunc(repo, p1.node())
2005 updatefunc(repo, p1.node())
2007 if p2 != parents[1]:
2006 if p2 != parents[1]:
2008 repo.setparents(p1.node(), p2.node())
2007 repo.setparents(p1.node(), p2.node())
2009
2008
2010 if opts.get(b'exact') or importbranch:
2009 if opts.get(b'exact') or importbranch:
2011 repo.dirstate.setbranch(branch or b'default')
2010 repo.dirstate.setbranch(branch or b'default')
2012
2011
2013 partial = opts.get(b'partial', False)
2012 partial = opts.get(b'partial', False)
2014 files = set()
2013 files = set()
2015 try:
2014 try:
2016 patch.patch(
2015 patch.patch(
2017 ui,
2016 ui,
2018 repo,
2017 repo,
2019 tmpname,
2018 tmpname,
2020 strip=strip,
2019 strip=strip,
2021 prefix=prefix,
2020 prefix=prefix,
2022 files=files,
2021 files=files,
2023 eolmode=None,
2022 eolmode=None,
2024 similarity=sim / 100.0,
2023 similarity=sim / 100.0,
2025 )
2024 )
2026 except error.PatchParseError as e:
2025 except error.PatchParseError as e:
2027 raise error.InputError(
2026 raise error.InputError(
2028 pycompat.bytestr(e),
2027 pycompat.bytestr(e),
2029 hint=_(
2028 hint=_(
2030 b'check that whitespace in the patch has not been mangled'
2029 b'check that whitespace in the patch has not been mangled'
2031 ),
2030 ),
2032 )
2031 )
2033 except error.PatchApplicationError as e:
2032 except error.PatchApplicationError as e:
2034 if not partial:
2033 if not partial:
2035 raise error.StateError(pycompat.bytestr(e))
2034 raise error.StateError(pycompat.bytestr(e))
2036 if partial:
2035 if partial:
2037 rejects = True
2036 rejects = True
2038
2037
2039 files = list(files)
2038 files = list(files)
2040 if nocommit:
2039 if nocommit:
2041 if message:
2040 if message:
2042 msgs.append(message)
2041 msgs.append(message)
2043 else:
2042 else:
2044 if opts.get(b'exact') or p2:
2043 if opts.get(b'exact') or p2:
2045 # If you got here, you either use --force and know what
2044 # If you got here, you either use --force and know what
2046 # you are doing or used --exact or a merge patch while
2045 # you are doing or used --exact or a merge patch while
2047 # being updated to its first parent.
2046 # being updated to its first parent.
2048 m = None
2047 m = None
2049 else:
2048 else:
2050 m = scmutil.matchfiles(repo, files or [])
2049 m = scmutil.matchfiles(repo, files or [])
2051 editform = mergeeditform(repo[None], b'import.normal')
2050 editform = mergeeditform(repo[None], b'import.normal')
2052 if opts.get(b'exact'):
2051 if opts.get(b'exact'):
2053 editor = None
2052 editor = None
2054 else:
2053 else:
2055 editor = getcommiteditor(
2054 editor = getcommiteditor(
2056 editform=editform, **pycompat.strkwargs(opts)
2055 editform=editform, **pycompat.strkwargs(opts)
2057 )
2056 )
2058 extra = {}
2057 extra = {}
2059 for idfunc in extrapreimport:
2058 for idfunc in extrapreimport:
2060 extrapreimportmap[idfunc](repo, patchdata, extra, opts)
2059 extrapreimportmap[idfunc](repo, patchdata, extra, opts)
2061 overrides = {}
2060 overrides = {}
2062 if partial:
2061 if partial:
2063 overrides[(b'ui', b'allowemptycommit')] = True
2062 overrides[(b'ui', b'allowemptycommit')] = True
2064 if opts.get(b'secret'):
2063 if opts.get(b'secret'):
2065 overrides[(b'phases', b'new-commit')] = b'secret'
2064 overrides[(b'phases', b'new-commit')] = b'secret'
2066 with repo.ui.configoverride(overrides, b'import'):
2065 with repo.ui.configoverride(overrides, b'import'):
2067 n = repo.commit(
2066 n = repo.commit(
2068 message, user, date, match=m, editor=editor, extra=extra
2067 message, user, date, match=m, editor=editor, extra=extra
2069 )
2068 )
2070 for idfunc in extrapostimport:
2069 for idfunc in extrapostimport:
2071 extrapostimportmap[idfunc](repo[n])
2070 extrapostimportmap[idfunc](repo[n])
2072 else:
2071 else:
2073 if opts.get(b'exact') or importbranch:
2072 if opts.get(b'exact') or importbranch:
2074 branch = branch or b'default'
2073 branch = branch or b'default'
2075 else:
2074 else:
2076 branch = p1.branch()
2075 branch = p1.branch()
2077 store = patch.filestore()
2076 store = patch.filestore()
2078 try:
2077 try:
2079 files = set()
2078 files = set()
2080 try:
2079 try:
2081 patch.patchrepo(
2080 patch.patchrepo(
2082 ui,
2081 ui,
2083 repo,
2082 repo,
2084 p1,
2083 p1,
2085 store,
2084 store,
2086 tmpname,
2085 tmpname,
2087 strip,
2086 strip,
2088 prefix,
2087 prefix,
2089 files,
2088 files,
2090 eolmode=None,
2089 eolmode=None,
2091 )
2090 )
2092 except error.PatchParseError as e:
2091 except error.PatchParseError as e:
2093 raise error.InputError(
2092 raise error.InputError(
2094 stringutil.forcebytestr(e),
2093 stringutil.forcebytestr(e),
2095 hint=_(
2094 hint=_(
2096 b'check that whitespace in the patch has not been mangled'
2095 b'check that whitespace in the patch has not been mangled'
2097 ),
2096 ),
2098 )
2097 )
2099 except error.PatchApplicationError as e:
2098 except error.PatchApplicationError as e:
2100 raise error.StateError(stringutil.forcebytestr(e))
2099 raise error.StateError(stringutil.forcebytestr(e))
2101 if opts.get(b'exact'):
2100 if opts.get(b'exact'):
2102 editor = None
2101 editor = None
2103 else:
2102 else:
2104 editor = getcommiteditor(editform=b'import.bypass')
2103 editor = getcommiteditor(editform=b'import.bypass')
2105 memctx = context.memctx(
2104 memctx = context.memctx(
2106 repo,
2105 repo,
2107 (p1.node(), p2.node()),
2106 (p1.node(), p2.node()),
2108 message,
2107 message,
2109 files=files,
2108 files=files,
2110 filectxfn=store,
2109 filectxfn=store,
2111 user=user,
2110 user=user,
2112 date=date,
2111 date=date,
2113 branch=branch,
2112 branch=branch,
2114 editor=editor,
2113 editor=editor,
2115 )
2114 )
2116
2115
2117 overrides = {}
2116 overrides = {}
2118 if opts.get(b'secret'):
2117 if opts.get(b'secret'):
2119 overrides[(b'phases', b'new-commit')] = b'secret'
2118 overrides[(b'phases', b'new-commit')] = b'secret'
2120 with repo.ui.configoverride(overrides, b'import'):
2119 with repo.ui.configoverride(overrides, b'import'):
2121 n = memctx.commit()
2120 n = memctx.commit()
2122 finally:
2121 finally:
2123 store.close()
2122 store.close()
2124 if opts.get(b'exact') and nocommit:
2123 if opts.get(b'exact') and nocommit:
2125 # --exact with --no-commit is still useful in that it does merge
2124 # --exact with --no-commit is still useful in that it does merge
2126 # and branch bits
2125 # and branch bits
2127 ui.warn(_(b"warning: can't check exact import with --no-commit\n"))
2126 ui.warn(_(b"warning: can't check exact import with --no-commit\n"))
2128 elif opts.get(b'exact') and (not n or hex(n) != nodeid):
2127 elif opts.get(b'exact') and (not n or hex(n) != nodeid):
2129 raise error.Abort(_(b'patch is damaged or loses information'))
2128 raise error.Abort(_(b'patch is damaged or loses information'))
2130 msg = _(b'applied to working directory')
2129 msg = _(b'applied to working directory')
2131 if n:
2130 if n:
2132 # i18n: refers to a short changeset id
2131 # i18n: refers to a short changeset id
2133 msg = _(b'created %s') % short(n)
2132 msg = _(b'created %s') % short(n)
2134 return msg, n, rejects
2133 return msg, n, rejects
2135
2134
2136
2135
2137 # facility to let extensions include additional data in an exported patch
2136 # facility to let extensions include additional data in an exported patch
2138 # list of identifiers to be executed in order
2137 # list of identifiers to be executed in order
2139 extraexport = []
2138 extraexport = []
2140 # mapping from identifier to actual export function
2139 # mapping from identifier to actual export function
2141 # function as to return a string to be added to the header or None
2140 # function as to return a string to be added to the header or None
2142 # it is given two arguments (sequencenumber, changectx)
2141 # it is given two arguments (sequencenumber, changectx)
2143 extraexportmap = {}
2142 extraexportmap = {}
2144
2143
2145
2144
2146 def _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts):
2145 def _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts):
2147 node = scmutil.binnode(ctx)
2146 node = scmutil.binnode(ctx)
2148 parents = [p.node() for p in ctx.parents() if p]
2147 parents = [p.node() for p in ctx.parents() if p]
2149 branch = ctx.branch()
2148 branch = ctx.branch()
2150 if switch_parent:
2149 if switch_parent:
2151 parents.reverse()
2150 parents.reverse()
2152
2151
2153 if parents:
2152 if parents:
2154 prev = parents[0]
2153 prev = parents[0]
2155 else:
2154 else:
2156 prev = repo.nullid
2155 prev = repo.nullid
2157
2156
2158 fm.context(ctx=ctx)
2157 fm.context(ctx=ctx)
2159 fm.plain(b'# HG changeset patch\n')
2158 fm.plain(b'# HG changeset patch\n')
2160 fm.write(b'user', b'# User %s\n', ctx.user())
2159 fm.write(b'user', b'# User %s\n', ctx.user())
2161 fm.plain(b'# Date %d %d\n' % ctx.date())
2160 fm.plain(b'# Date %d %d\n' % ctx.date())
2162 fm.write(b'date', b'# %s\n', fm.formatdate(ctx.date()))
2161 fm.write(b'date', b'# %s\n', fm.formatdate(ctx.date()))
2163 fm.condwrite(
2162 fm.condwrite(
2164 branch and branch != b'default', b'branch', b'# Branch %s\n', branch
2163 branch and branch != b'default', b'branch', b'# Branch %s\n', branch
2165 )
2164 )
2166 fm.write(b'node', b'# Node ID %s\n', hex(node))
2165 fm.write(b'node', b'# Node ID %s\n', hex(node))
2167 fm.plain(b'# Parent %s\n' % hex(prev))
2166 fm.plain(b'# Parent %s\n' % hex(prev))
2168 if len(parents) > 1:
2167 if len(parents) > 1:
2169 fm.plain(b'# Parent %s\n' % hex(parents[1]))
2168 fm.plain(b'# Parent %s\n' % hex(parents[1]))
2170 fm.data(parents=fm.formatlist(pycompat.maplist(hex, parents), name=b'node'))
2169 fm.data(parents=fm.formatlist(pycompat.maplist(hex, parents), name=b'node'))
2171
2170
2172 # TODO: redesign extraexportmap function to support formatter
2171 # TODO: redesign extraexportmap function to support formatter
2173 for headerid in extraexport:
2172 for headerid in extraexport:
2174 header = extraexportmap[headerid](seqno, ctx)
2173 header = extraexportmap[headerid](seqno, ctx)
2175 if header is not None:
2174 if header is not None:
2176 fm.plain(b'# %s\n' % header)
2175 fm.plain(b'# %s\n' % header)
2177
2176
2178 fm.write(b'desc', b'%s\n', ctx.description().rstrip())
2177 fm.write(b'desc', b'%s\n', ctx.description().rstrip())
2179 fm.plain(b'\n')
2178 fm.plain(b'\n')
2180
2179
2181 if fm.isplain():
2180 if fm.isplain():
2182 chunkiter = patch.diffui(repo, prev, node, match, opts=diffopts)
2181 chunkiter = patch.diffui(repo, prev, node, match, opts=diffopts)
2183 for chunk, label in chunkiter:
2182 for chunk, label in chunkiter:
2184 fm.plain(chunk, label=label)
2183 fm.plain(chunk, label=label)
2185 else:
2184 else:
2186 chunkiter = patch.diff(repo, prev, node, match, opts=diffopts)
2185 chunkiter = patch.diff(repo, prev, node, match, opts=diffopts)
2187 # TODO: make it structured?
2186 # TODO: make it structured?
2188 fm.data(diff=b''.join(chunkiter))
2187 fm.data(diff=b''.join(chunkiter))
2189
2188
2190
2189
2191 def _exportfile(repo, revs, fm, dest, switch_parent, diffopts, match):
2190 def _exportfile(repo, revs, fm, dest, switch_parent, diffopts, match):
2192 """Export changesets to stdout or a single file"""
2191 """Export changesets to stdout or a single file"""
2193 for seqno, rev in enumerate(revs, 1):
2192 for seqno, rev in enumerate(revs, 1):
2194 ctx = repo[rev]
2193 ctx = repo[rev]
2195 if not dest.startswith(b'<'):
2194 if not dest.startswith(b'<'):
2196 repo.ui.note(b"%s\n" % dest)
2195 repo.ui.note(b"%s\n" % dest)
2197 fm.startitem()
2196 fm.startitem()
2198 _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts)
2197 _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts)
2199
2198
2200
2199
2201 def _exportfntemplate(
2200 def _exportfntemplate(
2202 repo, revs, basefm, fntemplate, switch_parent, diffopts, match
2201 repo, revs, basefm, fntemplate, switch_parent, diffopts, match
2203 ):
2202 ):
2204 """Export changesets to possibly multiple files"""
2203 """Export changesets to possibly multiple files"""
2205 total = len(revs)
2204 total = len(revs)
2206 revwidth = max(len(str(rev)) for rev in revs)
2205 revwidth = max(len(str(rev)) for rev in revs)
2207 filemap = util.sortdict() # filename: [(seqno, rev), ...]
2206 filemap = util.sortdict() # filename: [(seqno, rev), ...]
2208
2207
2209 for seqno, rev in enumerate(revs, 1):
2208 for seqno, rev in enumerate(revs, 1):
2210 ctx = repo[rev]
2209 ctx = repo[rev]
2211 dest = makefilename(
2210 dest = makefilename(
2212 ctx, fntemplate, total=total, seqno=seqno, revwidth=revwidth
2211 ctx, fntemplate, total=total, seqno=seqno, revwidth=revwidth
2213 )
2212 )
2214 filemap.setdefault(dest, []).append((seqno, rev))
2213 filemap.setdefault(dest, []).append((seqno, rev))
2215
2214
2216 for dest in filemap:
2215 for dest in filemap:
2217 with formatter.maybereopen(basefm, dest) as fm:
2216 with formatter.maybereopen(basefm, dest) as fm:
2218 repo.ui.note(b"%s\n" % dest)
2217 repo.ui.note(b"%s\n" % dest)
2219 for seqno, rev in filemap[dest]:
2218 for seqno, rev in filemap[dest]:
2220 fm.startitem()
2219 fm.startitem()
2221 ctx = repo[rev]
2220 ctx = repo[rev]
2222 _exportsingle(
2221 _exportsingle(
2223 repo, ctx, fm, match, switch_parent, seqno, diffopts
2222 repo, ctx, fm, match, switch_parent, seqno, diffopts
2224 )
2223 )
2225
2224
2226
2225
2227 def _prefetchchangedfiles(repo, revs, match):
2226 def _prefetchchangedfiles(repo, revs, match):
2228 allfiles = set()
2227 allfiles = set()
2229 for rev in revs:
2228 for rev in revs:
2230 for file in repo[rev].files():
2229 for file in repo[rev].files():
2231 if not match or match(file):
2230 if not match or match(file):
2232 allfiles.add(file)
2231 allfiles.add(file)
2233 match = scmutil.matchfiles(repo, allfiles)
2232 match = scmutil.matchfiles(repo, allfiles)
2234 revmatches = [(rev, match) for rev in revs]
2233 revmatches = [(rev, match) for rev in revs]
2235 scmutil.prefetchfiles(repo, revmatches)
2234 scmutil.prefetchfiles(repo, revmatches)
2236
2235
2237
2236
2238 def export(
2237 def export(
2239 repo,
2238 repo,
2240 revs,
2239 revs,
2241 basefm,
2240 basefm,
2242 fntemplate=b'hg-%h.patch',
2241 fntemplate=b'hg-%h.patch',
2243 switch_parent=False,
2242 switch_parent=False,
2244 opts=None,
2243 opts=None,
2245 match=None,
2244 match=None,
2246 ):
2245 ):
2247 """export changesets as hg patches
2246 """export changesets as hg patches
2248
2247
2249 Args:
2248 Args:
2250 repo: The repository from which we're exporting revisions.
2249 repo: The repository from which we're exporting revisions.
2251 revs: A list of revisions to export as revision numbers.
2250 revs: A list of revisions to export as revision numbers.
2252 basefm: A formatter to which patches should be written.
2251 basefm: A formatter to which patches should be written.
2253 fntemplate: An optional string to use for generating patch file names.
2252 fntemplate: An optional string to use for generating patch file names.
2254 switch_parent: If True, show diffs against second parent when not nullid.
2253 switch_parent: If True, show diffs against second parent when not nullid.
2255 Default is false, which always shows diff against p1.
2254 Default is false, which always shows diff against p1.
2256 opts: diff options to use for generating the patch.
2255 opts: diff options to use for generating the patch.
2257 match: If specified, only export changes to files matching this matcher.
2256 match: If specified, only export changes to files matching this matcher.
2258
2257
2259 Returns:
2258 Returns:
2260 Nothing.
2259 Nothing.
2261
2260
2262 Side Effect:
2261 Side Effect:
2263 "HG Changeset Patch" data is emitted to one of the following
2262 "HG Changeset Patch" data is emitted to one of the following
2264 destinations:
2263 destinations:
2265 fntemplate specified: Each rev is written to a unique file named using
2264 fntemplate specified: Each rev is written to a unique file named using
2266 the given template.
2265 the given template.
2267 Otherwise: All revs will be written to basefm.
2266 Otherwise: All revs will be written to basefm.
2268 """
2267 """
2269 _prefetchchangedfiles(repo, revs, match)
2268 _prefetchchangedfiles(repo, revs, match)
2270
2269
2271 if not fntemplate:
2270 if not fntemplate:
2272 _exportfile(
2271 _exportfile(
2273 repo, revs, basefm, b'<unnamed>', switch_parent, opts, match
2272 repo, revs, basefm, b'<unnamed>', switch_parent, opts, match
2274 )
2273 )
2275 else:
2274 else:
2276 _exportfntemplate(
2275 _exportfntemplate(
2277 repo, revs, basefm, fntemplate, switch_parent, opts, match
2276 repo, revs, basefm, fntemplate, switch_parent, opts, match
2278 )
2277 )
2279
2278
2280
2279
2281 def exportfile(repo, revs, fp, switch_parent=False, opts=None, match=None):
2280 def exportfile(repo, revs, fp, switch_parent=False, opts=None, match=None):
2282 """Export changesets to the given file stream"""
2281 """Export changesets to the given file stream"""
2283 _prefetchchangedfiles(repo, revs, match)
2282 _prefetchchangedfiles(repo, revs, match)
2284
2283
2285 dest = getattr(fp, 'name', b'<unnamed>')
2284 dest = getattr(fp, 'name', b'<unnamed>')
2286 with formatter.formatter(repo.ui, fp, b'export', {}) as fm:
2285 with formatter.formatter(repo.ui, fp, b'export', {}) as fm:
2287 _exportfile(repo, revs, fm, dest, switch_parent, opts, match)
2286 _exportfile(repo, revs, fm, dest, switch_parent, opts, match)
2288
2287
2289
2288
2290 def showmarker(fm, marker, index=None):
2289 def showmarker(fm, marker, index=None):
2291 """utility function to display obsolescence marker in a readable way
2290 """utility function to display obsolescence marker in a readable way
2292
2291
2293 To be used by debug function."""
2292 To be used by debug function."""
2294 if index is not None:
2293 if index is not None:
2295 fm.write(b'index', b'%i ', index)
2294 fm.write(b'index', b'%i ', index)
2296 fm.write(b'prednode', b'%s ', hex(marker.prednode()))
2295 fm.write(b'prednode', b'%s ', hex(marker.prednode()))
2297 succs = marker.succnodes()
2296 succs = marker.succnodes()
2298 fm.condwrite(
2297 fm.condwrite(
2299 succs,
2298 succs,
2300 b'succnodes',
2299 b'succnodes',
2301 b'%s ',
2300 b'%s ',
2302 fm.formatlist(map(hex, succs), name=b'node'),
2301 fm.formatlist(map(hex, succs), name=b'node'),
2303 )
2302 )
2304 fm.write(b'flag', b'%X ', marker.flags())
2303 fm.write(b'flag', b'%X ', marker.flags())
2305 parents = marker.parentnodes()
2304 parents = marker.parentnodes()
2306 if parents is not None:
2305 if parents is not None:
2307 fm.write(
2306 fm.write(
2308 b'parentnodes',
2307 b'parentnodes',
2309 b'{%s} ',
2308 b'{%s} ',
2310 fm.formatlist(map(hex, parents), name=b'node', sep=b', '),
2309 fm.formatlist(map(hex, parents), name=b'node', sep=b', '),
2311 )
2310 )
2312 fm.write(b'date', b'(%s) ', fm.formatdate(marker.date()))
2311 fm.write(b'date', b'(%s) ', fm.formatdate(marker.date()))
2313 meta = marker.metadata().copy()
2312 meta = marker.metadata().copy()
2314 meta.pop(b'date', None)
2313 meta.pop(b'date', None)
2315 smeta = pycompat.rapply(pycompat.maybebytestr, meta)
2314 smeta = pycompat.rapply(pycompat.maybebytestr, meta)
2316 fm.write(
2315 fm.write(
2317 b'metadata', b'{%s}', fm.formatdict(smeta, fmt=b'%r: %r', sep=b', ')
2316 b'metadata', b'{%s}', fm.formatdict(smeta, fmt=b'%r: %r', sep=b', ')
2318 )
2317 )
2319 fm.plain(b'\n')
2318 fm.plain(b'\n')
2320
2319
2321
2320
2322 def finddate(ui, repo, date):
2321 def finddate(ui, repo, date):
2323 """Find the tipmost changeset that matches the given date spec"""
2322 """Find the tipmost changeset that matches the given date spec"""
2324 mrevs = repo.revs(b'date(%s)', date)
2323 mrevs = repo.revs(b'date(%s)', date)
2325 try:
2324 try:
2326 rev = mrevs.max()
2325 rev = mrevs.max()
2327 except ValueError:
2326 except ValueError:
2328 raise error.InputError(_(b"revision matching date not found"))
2327 raise error.InputError(_(b"revision matching date not found"))
2329
2328
2330 ui.status(
2329 ui.status(
2331 _(b"found revision %d from %s\n")
2330 _(b"found revision %d from %s\n")
2332 % (rev, dateutil.datestr(repo[rev].date()))
2331 % (rev, dateutil.datestr(repo[rev].date()))
2333 )
2332 )
2334 return b'%d' % rev
2333 return b'%d' % rev
2335
2334
2336
2335
2337 def add(ui, repo, match, prefix, uipathfn, explicitonly, **opts):
2336 def add(ui, repo, match, prefix, uipathfn, explicitonly, **opts):
2338 bad = []
2337 bad = []
2339
2338
2340 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2339 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2341 names = []
2340 names = []
2342 wctx = repo[None]
2341 wctx = repo[None]
2343 cca = None
2342 cca = None
2344 abort, warn = scmutil.checkportabilityalert(ui)
2343 abort, warn = scmutil.checkportabilityalert(ui)
2345 if abort or warn:
2344 if abort or warn:
2346 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2345 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2347
2346
2348 match = repo.narrowmatch(match, includeexact=True)
2347 match = repo.narrowmatch(match, includeexact=True)
2349 badmatch = matchmod.badmatch(match, badfn)
2348 badmatch = matchmod.badmatch(match, badfn)
2350 dirstate = repo.dirstate
2349 dirstate = repo.dirstate
2351 # We don't want to just call wctx.walk here, since it would return a lot of
2350 # We don't want to just call wctx.walk here, since it would return a lot of
2352 # clean files, which we aren't interested in and takes time.
2351 # clean files, which we aren't interested in and takes time.
2353 for f in sorted(
2352 for f in sorted(
2354 dirstate.walk(
2353 dirstate.walk(
2355 badmatch,
2354 badmatch,
2356 subrepos=sorted(wctx.substate),
2355 subrepos=sorted(wctx.substate),
2357 unknown=True,
2356 unknown=True,
2358 ignored=False,
2357 ignored=False,
2359 full=False,
2358 full=False,
2360 )
2359 )
2361 ):
2360 ):
2362 exact = match.exact(f)
2361 exact = match.exact(f)
2363 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2362 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2364 if cca:
2363 if cca:
2365 cca(f)
2364 cca(f)
2366 names.append(f)
2365 names.append(f)
2367 if ui.verbose or not exact:
2366 if ui.verbose or not exact:
2368 ui.status(
2367 ui.status(
2369 _(b'adding %s\n') % uipathfn(f), label=b'ui.addremove.added'
2368 _(b'adding %s\n') % uipathfn(f), label=b'ui.addremove.added'
2370 )
2369 )
2371
2370
2372 for subpath in sorted(wctx.substate):
2371 for subpath in sorted(wctx.substate):
2373 sub = wctx.sub(subpath)
2372 sub = wctx.sub(subpath)
2374 try:
2373 try:
2375 submatch = matchmod.subdirmatcher(subpath, match)
2374 submatch = matchmod.subdirmatcher(subpath, match)
2376 subprefix = repo.wvfs.reljoin(prefix, subpath)
2375 subprefix = repo.wvfs.reljoin(prefix, subpath)
2377 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2376 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2378 if opts.get('subrepos'):
2377 if opts.get('subrepos'):
2379 bad.extend(
2378 bad.extend(
2380 sub.add(ui, submatch, subprefix, subuipathfn, False, **opts)
2379 sub.add(ui, submatch, subprefix, subuipathfn, False, **opts)
2381 )
2380 )
2382 else:
2381 else:
2383 bad.extend(
2382 bad.extend(
2384 sub.add(ui, submatch, subprefix, subuipathfn, True, **opts)
2383 sub.add(ui, submatch, subprefix, subuipathfn, True, **opts)
2385 )
2384 )
2386 except error.LookupError:
2385 except error.LookupError:
2387 ui.status(
2386 ui.status(
2388 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
2387 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
2389 )
2388 )
2390
2389
2391 if not opts.get('dry_run'):
2390 if not opts.get('dry_run'):
2392 rejected = wctx.add(names, prefix)
2391 rejected = wctx.add(names, prefix)
2393 bad.extend(f for f in rejected if f in match.files())
2392 bad.extend(f for f in rejected if f in match.files())
2394 return bad
2393 return bad
2395
2394
2396
2395
2397 def addwebdirpath(repo, serverpath, webconf):
2396 def addwebdirpath(repo, serverpath, webconf):
2398 webconf[serverpath] = repo.root
2397 webconf[serverpath] = repo.root
2399 repo.ui.debug(b'adding %s = %s\n' % (serverpath, repo.root))
2398 repo.ui.debug(b'adding %s = %s\n' % (serverpath, repo.root))
2400
2399
2401 for r in repo.revs(b'filelog("path:.hgsub")'):
2400 for r in repo.revs(b'filelog("path:.hgsub")'):
2402 ctx = repo[r]
2401 ctx = repo[r]
2403 for subpath in ctx.substate:
2402 for subpath in ctx.substate:
2404 ctx.sub(subpath).addwebdirpath(serverpath, webconf)
2403 ctx.sub(subpath).addwebdirpath(serverpath, webconf)
2405
2404
2406
2405
2407 def forget(
2406 def forget(
2408 ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
2407 ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
2409 ):
2408 ):
2410 if dryrun and interactive:
2409 if dryrun and interactive:
2411 raise error.InputError(
2410 raise error.InputError(
2412 _(b"cannot specify both --dry-run and --interactive")
2411 _(b"cannot specify both --dry-run and --interactive")
2413 )
2412 )
2414 bad = []
2413 bad = []
2415 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2414 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2416 wctx = repo[None]
2415 wctx = repo[None]
2417 forgot = []
2416 forgot = []
2418
2417
2419 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2418 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2420 forget = sorted(s.modified + s.added + s.deleted + s.clean)
2419 forget = sorted(s.modified + s.added + s.deleted + s.clean)
2421 if explicitonly:
2420 if explicitonly:
2422 forget = [f for f in forget if match.exact(f)]
2421 forget = [f for f in forget if match.exact(f)]
2423
2422
2424 for subpath in sorted(wctx.substate):
2423 for subpath in sorted(wctx.substate):
2425 sub = wctx.sub(subpath)
2424 sub = wctx.sub(subpath)
2426 submatch = matchmod.subdirmatcher(subpath, match)
2425 submatch = matchmod.subdirmatcher(subpath, match)
2427 subprefix = repo.wvfs.reljoin(prefix, subpath)
2426 subprefix = repo.wvfs.reljoin(prefix, subpath)
2428 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2427 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2429 try:
2428 try:
2430 subbad, subforgot = sub.forget(
2429 subbad, subforgot = sub.forget(
2431 submatch,
2430 submatch,
2432 subprefix,
2431 subprefix,
2433 subuipathfn,
2432 subuipathfn,
2434 dryrun=dryrun,
2433 dryrun=dryrun,
2435 interactive=interactive,
2434 interactive=interactive,
2436 )
2435 )
2437 bad.extend([subpath + b'/' + f for f in subbad])
2436 bad.extend([subpath + b'/' + f for f in subbad])
2438 forgot.extend([subpath + b'/' + f for f in subforgot])
2437 forgot.extend([subpath + b'/' + f for f in subforgot])
2439 except error.LookupError:
2438 except error.LookupError:
2440 ui.status(
2439 ui.status(
2441 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
2440 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
2442 )
2441 )
2443
2442
2444 if not explicitonly:
2443 if not explicitonly:
2445 for f in match.files():
2444 for f in match.files():
2446 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2445 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2447 if f not in forgot:
2446 if f not in forgot:
2448 if repo.wvfs.exists(f):
2447 if repo.wvfs.exists(f):
2449 # Don't complain if the exact case match wasn't given.
2448 # Don't complain if the exact case match wasn't given.
2450 # But don't do this until after checking 'forgot', so
2449 # But don't do this until after checking 'forgot', so
2451 # that subrepo files aren't normalized, and this op is
2450 # that subrepo files aren't normalized, and this op is
2452 # purely from data cached by the status walk above.
2451 # purely from data cached by the status walk above.
2453 if repo.dirstate.normalize(f) in repo.dirstate:
2452 if repo.dirstate.normalize(f) in repo.dirstate:
2454 continue
2453 continue
2455 ui.warn(
2454 ui.warn(
2456 _(
2455 _(
2457 b'not removing %s: '
2456 b'not removing %s: '
2458 b'file is already untracked\n'
2457 b'file is already untracked\n'
2459 )
2458 )
2460 % uipathfn(f)
2459 % uipathfn(f)
2461 )
2460 )
2462 bad.append(f)
2461 bad.append(f)
2463
2462
2464 if interactive:
2463 if interactive:
2465 responses = _(
2464 responses = _(
2466 b'[Ynsa?]'
2465 b'[Ynsa?]'
2467 b'$$ &Yes, forget this file'
2466 b'$$ &Yes, forget this file'
2468 b'$$ &No, skip this file'
2467 b'$$ &No, skip this file'
2469 b'$$ &Skip remaining files'
2468 b'$$ &Skip remaining files'
2470 b'$$ Include &all remaining files'
2469 b'$$ Include &all remaining files'
2471 b'$$ &? (display help)'
2470 b'$$ &? (display help)'
2472 )
2471 )
2473 for filename in forget[:]:
2472 for filename in forget[:]:
2474 r = ui.promptchoice(
2473 r = ui.promptchoice(
2475 _(b'forget %s %s') % (uipathfn(filename), responses)
2474 _(b'forget %s %s') % (uipathfn(filename), responses)
2476 )
2475 )
2477 if r == 4: # ?
2476 if r == 4: # ?
2478 while r == 4:
2477 while r == 4:
2479 for c, t in ui.extractchoices(responses)[1]:
2478 for c, t in ui.extractchoices(responses)[1]:
2480 ui.write(b'%s - %s\n' % (c, encoding.lower(t)))
2479 ui.write(b'%s - %s\n' % (c, encoding.lower(t)))
2481 r = ui.promptchoice(
2480 r = ui.promptchoice(
2482 _(b'forget %s %s') % (uipathfn(filename), responses)
2481 _(b'forget %s %s') % (uipathfn(filename), responses)
2483 )
2482 )
2484 if r == 0: # yes
2483 if r == 0: # yes
2485 continue
2484 continue
2486 elif r == 1: # no
2485 elif r == 1: # no
2487 forget.remove(filename)
2486 forget.remove(filename)
2488 elif r == 2: # Skip
2487 elif r == 2: # Skip
2489 fnindex = forget.index(filename)
2488 fnindex = forget.index(filename)
2490 del forget[fnindex:]
2489 del forget[fnindex:]
2491 break
2490 break
2492 elif r == 3: # All
2491 elif r == 3: # All
2493 break
2492 break
2494
2493
2495 for f in forget:
2494 for f in forget:
2496 if ui.verbose or not match.exact(f) or interactive:
2495 if ui.verbose or not match.exact(f) or interactive:
2497 ui.status(
2496 ui.status(
2498 _(b'removing %s\n') % uipathfn(f), label=b'ui.addremove.removed'
2497 _(b'removing %s\n') % uipathfn(f), label=b'ui.addremove.removed'
2499 )
2498 )
2500
2499
2501 if not dryrun:
2500 if not dryrun:
2502 rejected = wctx.forget(forget, prefix)
2501 rejected = wctx.forget(forget, prefix)
2503 bad.extend(f for f in rejected if f in match.files())
2502 bad.extend(f for f in rejected if f in match.files())
2504 forgot.extend(f for f in forget if f not in rejected)
2503 forgot.extend(f for f in forget if f not in rejected)
2505 return bad, forgot
2504 return bad, forgot
2506
2505
2507
2506
2508 def files(ui, ctx, m, uipathfn, fm, fmt, subrepos):
2507 def files(ui, ctx, m, uipathfn, fm, fmt, subrepos):
2509 ret = 1
2508 ret = 1
2510
2509
2511 needsfctx = ui.verbose or {b'size', b'flags'} & fm.datahint()
2510 needsfctx = ui.verbose or {b'size', b'flags'} & fm.datahint()
2512 if fm.isplain() and not needsfctx:
2511 if fm.isplain() and not needsfctx:
2513 # Fast path. The speed-up comes from skipping the formatter, and batching
2512 # Fast path. The speed-up comes from skipping the formatter, and batching
2514 # calls to ui.write.
2513 # calls to ui.write.
2515 buf = []
2514 buf = []
2516 for f in ctx.matches(m):
2515 for f in ctx.matches(m):
2517 buf.append(fmt % uipathfn(f))
2516 buf.append(fmt % uipathfn(f))
2518 if len(buf) > 100:
2517 if len(buf) > 100:
2519 ui.write(b''.join(buf))
2518 ui.write(b''.join(buf))
2520 del buf[:]
2519 del buf[:]
2521 ret = 0
2520 ret = 0
2522 if buf:
2521 if buf:
2523 ui.write(b''.join(buf))
2522 ui.write(b''.join(buf))
2524 else:
2523 else:
2525 for f in ctx.matches(m):
2524 for f in ctx.matches(m):
2526 fm.startitem()
2525 fm.startitem()
2527 fm.context(ctx=ctx)
2526 fm.context(ctx=ctx)
2528 if needsfctx:
2527 if needsfctx:
2529 fc = ctx[f]
2528 fc = ctx[f]
2530 fm.write(b'size flags', b'% 10d % 1s ', fc.size(), fc.flags())
2529 fm.write(b'size flags', b'% 10d % 1s ', fc.size(), fc.flags())
2531 fm.data(path=f)
2530 fm.data(path=f)
2532 fm.plain(fmt % uipathfn(f))
2531 fm.plain(fmt % uipathfn(f))
2533 ret = 0
2532 ret = 0
2534
2533
2535 for subpath in sorted(ctx.substate):
2534 for subpath in sorted(ctx.substate):
2536 submatch = matchmod.subdirmatcher(subpath, m)
2535 submatch = matchmod.subdirmatcher(subpath, m)
2537 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2536 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2538 if subrepos or m.exact(subpath) or any(submatch.files()):
2537 if subrepos or m.exact(subpath) or any(submatch.files()):
2539 sub = ctx.sub(subpath)
2538 sub = ctx.sub(subpath)
2540 try:
2539 try:
2541 recurse = m.exact(subpath) or subrepos
2540 recurse = m.exact(subpath) or subrepos
2542 if (
2541 if (
2543 sub.printfiles(ui, submatch, subuipathfn, fm, fmt, recurse)
2542 sub.printfiles(ui, submatch, subuipathfn, fm, fmt, recurse)
2544 == 0
2543 == 0
2545 ):
2544 ):
2546 ret = 0
2545 ret = 0
2547 except error.LookupError:
2546 except error.LookupError:
2548 ui.status(
2547 ui.status(
2549 _(b"skipping missing subrepository: %s\n")
2548 _(b"skipping missing subrepository: %s\n")
2550 % uipathfn(subpath)
2549 % uipathfn(subpath)
2551 )
2550 )
2552
2551
2553 return ret
2552 return ret
2554
2553
2555
2554
2556 def remove(
2555 def remove(
2557 ui, repo, m, prefix, uipathfn, after, force, subrepos, dryrun, warnings=None
2556 ui, repo, m, prefix, uipathfn, after, force, subrepos, dryrun, warnings=None
2558 ):
2557 ):
2559 ret = 0
2558 ret = 0
2560 s = repo.status(match=m, clean=True)
2559 s = repo.status(match=m, clean=True)
2561 modified, added, deleted, clean = s.modified, s.added, s.deleted, s.clean
2560 modified, added, deleted, clean = s.modified, s.added, s.deleted, s.clean
2562
2561
2563 wctx = repo[None]
2562 wctx = repo[None]
2564
2563
2565 if warnings is None:
2564 if warnings is None:
2566 warnings = []
2565 warnings = []
2567 warn = True
2566 warn = True
2568 else:
2567 else:
2569 warn = False
2568 warn = False
2570
2569
2571 subs = sorted(wctx.substate)
2570 subs = sorted(wctx.substate)
2572 progress = ui.makeprogress(
2571 progress = ui.makeprogress(
2573 _(b'searching'), total=len(subs), unit=_(b'subrepos')
2572 _(b'searching'), total=len(subs), unit=_(b'subrepos')
2574 )
2573 )
2575 for subpath in subs:
2574 for subpath in subs:
2576 submatch = matchmod.subdirmatcher(subpath, m)
2575 submatch = matchmod.subdirmatcher(subpath, m)
2577 subprefix = repo.wvfs.reljoin(prefix, subpath)
2576 subprefix = repo.wvfs.reljoin(prefix, subpath)
2578 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2577 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2579 if subrepos or m.exact(subpath) or any(submatch.files()):
2578 if subrepos or m.exact(subpath) or any(submatch.files()):
2580 progress.increment()
2579 progress.increment()
2581 sub = wctx.sub(subpath)
2580 sub = wctx.sub(subpath)
2582 try:
2581 try:
2583 if sub.removefiles(
2582 if sub.removefiles(
2584 submatch,
2583 submatch,
2585 subprefix,
2584 subprefix,
2586 subuipathfn,
2585 subuipathfn,
2587 after,
2586 after,
2588 force,
2587 force,
2589 subrepos,
2588 subrepos,
2590 dryrun,
2589 dryrun,
2591 warnings,
2590 warnings,
2592 ):
2591 ):
2593 ret = 1
2592 ret = 1
2594 except error.LookupError:
2593 except error.LookupError:
2595 warnings.append(
2594 warnings.append(
2596 _(b"skipping missing subrepository: %s\n")
2595 _(b"skipping missing subrepository: %s\n")
2597 % uipathfn(subpath)
2596 % uipathfn(subpath)
2598 )
2597 )
2599 progress.complete()
2598 progress.complete()
2600
2599
2601 # warn about failure to delete explicit files/dirs
2600 # warn about failure to delete explicit files/dirs
2602 deleteddirs = pathutil.dirs(deleted)
2601 deleteddirs = pathutil.dirs(deleted)
2603 files = m.files()
2602 files = m.files()
2604 progress = ui.makeprogress(
2603 progress = ui.makeprogress(
2605 _(b'deleting'), total=len(files), unit=_(b'files')
2604 _(b'deleting'), total=len(files), unit=_(b'files')
2606 )
2605 )
2607 for f in files:
2606 for f in files:
2608
2607
2609 def insubrepo():
2608 def insubrepo():
2610 for subpath in wctx.substate:
2609 for subpath in wctx.substate:
2611 if f.startswith(subpath + b'/'):
2610 if f.startswith(subpath + b'/'):
2612 return True
2611 return True
2613 return False
2612 return False
2614
2613
2615 progress.increment()
2614 progress.increment()
2616 isdir = f in deleteddirs or wctx.hasdir(f)
2615 isdir = f in deleteddirs or wctx.hasdir(f)
2617 if f in repo.dirstate or isdir or f == b'.' or insubrepo() or f in subs:
2616 if f in repo.dirstate or isdir or f == b'.' or insubrepo() or f in subs:
2618 continue
2617 continue
2619
2618
2620 if repo.wvfs.exists(f):
2619 if repo.wvfs.exists(f):
2621 if repo.wvfs.isdir(f):
2620 if repo.wvfs.isdir(f):
2622 warnings.append(
2621 warnings.append(
2623 _(b'not removing %s: no tracked files\n') % uipathfn(f)
2622 _(b'not removing %s: no tracked files\n') % uipathfn(f)
2624 )
2623 )
2625 else:
2624 else:
2626 warnings.append(
2625 warnings.append(
2627 _(b'not removing %s: file is untracked\n') % uipathfn(f)
2626 _(b'not removing %s: file is untracked\n') % uipathfn(f)
2628 )
2627 )
2629 # missing files will generate a warning elsewhere
2628 # missing files will generate a warning elsewhere
2630 ret = 1
2629 ret = 1
2631 progress.complete()
2630 progress.complete()
2632
2631
2633 if force:
2632 if force:
2634 list = modified + deleted + clean + added
2633 list = modified + deleted + clean + added
2635 elif after:
2634 elif after:
2636 list = deleted
2635 list = deleted
2637 remaining = modified + added + clean
2636 remaining = modified + added + clean
2638 progress = ui.makeprogress(
2637 progress = ui.makeprogress(
2639 _(b'skipping'), total=len(remaining), unit=_(b'files')
2638 _(b'skipping'), total=len(remaining), unit=_(b'files')
2640 )
2639 )
2641 for f in remaining:
2640 for f in remaining:
2642 progress.increment()
2641 progress.increment()
2643 if ui.verbose or (f in files):
2642 if ui.verbose or (f in files):
2644 warnings.append(
2643 warnings.append(
2645 _(b'not removing %s: file still exists\n') % uipathfn(f)
2644 _(b'not removing %s: file still exists\n') % uipathfn(f)
2646 )
2645 )
2647 ret = 1
2646 ret = 1
2648 progress.complete()
2647 progress.complete()
2649 else:
2648 else:
2650 list = deleted + clean
2649 list = deleted + clean
2651 progress = ui.makeprogress(
2650 progress = ui.makeprogress(
2652 _(b'skipping'), total=(len(modified) + len(added)), unit=_(b'files')
2651 _(b'skipping'), total=(len(modified) + len(added)), unit=_(b'files')
2653 )
2652 )
2654 for f in modified:
2653 for f in modified:
2655 progress.increment()
2654 progress.increment()
2656 warnings.append(
2655 warnings.append(
2657 _(
2656 _(
2658 b'not removing %s: file is modified (use -f'
2657 b'not removing %s: file is modified (use -f'
2659 b' to force removal)\n'
2658 b' to force removal)\n'
2660 )
2659 )
2661 % uipathfn(f)
2660 % uipathfn(f)
2662 )
2661 )
2663 ret = 1
2662 ret = 1
2664 for f in added:
2663 for f in added:
2665 progress.increment()
2664 progress.increment()
2666 warnings.append(
2665 warnings.append(
2667 _(
2666 _(
2668 b"not removing %s: file has been marked for add"
2667 b"not removing %s: file has been marked for add"
2669 b" (use 'hg forget' to undo add)\n"
2668 b" (use 'hg forget' to undo add)\n"
2670 )
2669 )
2671 % uipathfn(f)
2670 % uipathfn(f)
2672 )
2671 )
2673 ret = 1
2672 ret = 1
2674 progress.complete()
2673 progress.complete()
2675
2674
2676 list = sorted(list)
2675 list = sorted(list)
2677 progress = ui.makeprogress(
2676 progress = ui.makeprogress(
2678 _(b'deleting'), total=len(list), unit=_(b'files')
2677 _(b'deleting'), total=len(list), unit=_(b'files')
2679 )
2678 )
2680 for f in list:
2679 for f in list:
2681 if ui.verbose or not m.exact(f):
2680 if ui.verbose or not m.exact(f):
2682 progress.increment()
2681 progress.increment()
2683 ui.status(
2682 ui.status(
2684 _(b'removing %s\n') % uipathfn(f), label=b'ui.addremove.removed'
2683 _(b'removing %s\n') % uipathfn(f), label=b'ui.addremove.removed'
2685 )
2684 )
2686 progress.complete()
2685 progress.complete()
2687
2686
2688 if not dryrun:
2687 if not dryrun:
2689 with repo.wlock():
2688 with repo.wlock():
2690 if not after:
2689 if not after:
2691 for f in list:
2690 for f in list:
2692 if f in added:
2691 if f in added:
2693 continue # we never unlink added files on remove
2692 continue # we never unlink added files on remove
2694 rmdir = repo.ui.configbool(
2693 rmdir = repo.ui.configbool(
2695 b'experimental', b'removeemptydirs'
2694 b'experimental', b'removeemptydirs'
2696 )
2695 )
2697 repo.wvfs.unlinkpath(f, ignoremissing=True, rmdir=rmdir)
2696 repo.wvfs.unlinkpath(f, ignoremissing=True, rmdir=rmdir)
2698 repo[None].forget(list)
2697 repo[None].forget(list)
2699
2698
2700 if warn:
2699 if warn:
2701 for warning in warnings:
2700 for warning in warnings:
2702 ui.warn(warning)
2701 ui.warn(warning)
2703
2702
2704 return ret
2703 return ret
2705
2704
2706
2705
2707 def _catfmtneedsdata(fm):
2706 def _catfmtneedsdata(fm):
2708 return not fm.datahint() or b'data' in fm.datahint()
2707 return not fm.datahint() or b'data' in fm.datahint()
2709
2708
2710
2709
2711 def _updatecatformatter(fm, ctx, matcher, path, decode):
2710 def _updatecatformatter(fm, ctx, matcher, path, decode):
2712 """Hook for adding data to the formatter used by ``hg cat``.
2711 """Hook for adding data to the formatter used by ``hg cat``.
2713
2712
2714 Extensions (e.g., lfs) can wrap this to inject keywords/data, but must call
2713 Extensions (e.g., lfs) can wrap this to inject keywords/data, but must call
2715 this method first."""
2714 this method first."""
2716
2715
2717 # data() can be expensive to fetch (e.g. lfs), so don't fetch it if it
2716 # data() can be expensive to fetch (e.g. lfs), so don't fetch it if it
2718 # wasn't requested.
2717 # wasn't requested.
2719 data = b''
2718 data = b''
2720 if _catfmtneedsdata(fm):
2719 if _catfmtneedsdata(fm):
2721 data = ctx[path].data()
2720 data = ctx[path].data()
2722 if decode:
2721 if decode:
2723 data = ctx.repo().wwritedata(path, data)
2722 data = ctx.repo().wwritedata(path, data)
2724 fm.startitem()
2723 fm.startitem()
2725 fm.context(ctx=ctx)
2724 fm.context(ctx=ctx)
2726 fm.write(b'data', b'%s', data)
2725 fm.write(b'data', b'%s', data)
2727 fm.data(path=path)
2726 fm.data(path=path)
2728
2727
2729
2728
2730 def cat(ui, repo, ctx, matcher, basefm, fntemplate, prefix, **opts):
2729 def cat(ui, repo, ctx, matcher, basefm, fntemplate, prefix, **opts):
2731 err = 1
2730 err = 1
2732 opts = pycompat.byteskwargs(opts)
2731 opts = pycompat.byteskwargs(opts)
2733
2732
2734 def write(path):
2733 def write(path):
2735 filename = None
2734 filename = None
2736 if fntemplate:
2735 if fntemplate:
2737 filename = makefilename(
2736 filename = makefilename(
2738 ctx, fntemplate, pathname=os.path.join(prefix, path)
2737 ctx, fntemplate, pathname=os.path.join(prefix, path)
2739 )
2738 )
2740 # attempt to create the directory if it does not already exist
2739 # attempt to create the directory if it does not already exist
2741 try:
2740 try:
2742 os.makedirs(os.path.dirname(filename))
2741 os.makedirs(os.path.dirname(filename))
2743 except OSError:
2742 except OSError:
2744 pass
2743 pass
2745 with formatter.maybereopen(basefm, filename) as fm:
2744 with formatter.maybereopen(basefm, filename) as fm:
2746 _updatecatformatter(fm, ctx, matcher, path, opts.get(b'decode'))
2745 _updatecatformatter(fm, ctx, matcher, path, opts.get(b'decode'))
2747
2746
2748 # Automation often uses hg cat on single files, so special case it
2747 # Automation often uses hg cat on single files, so special case it
2749 # for performance to avoid the cost of parsing the manifest.
2748 # for performance to avoid the cost of parsing the manifest.
2750 if len(matcher.files()) == 1 and not matcher.anypats():
2749 if len(matcher.files()) == 1 and not matcher.anypats():
2751 file = matcher.files()[0]
2750 file = matcher.files()[0]
2752 mfl = repo.manifestlog
2751 mfl = repo.manifestlog
2753 mfnode = ctx.manifestnode()
2752 mfnode = ctx.manifestnode()
2754 try:
2753 try:
2755 if mfnode and mfl[mfnode].find(file)[0]:
2754 if mfnode and mfl[mfnode].find(file)[0]:
2756 if _catfmtneedsdata(basefm):
2755 if _catfmtneedsdata(basefm):
2757 scmutil.prefetchfiles(repo, [(ctx.rev(), matcher)])
2756 scmutil.prefetchfiles(repo, [(ctx.rev(), matcher)])
2758 write(file)
2757 write(file)
2759 return 0
2758 return 0
2760 except KeyError:
2759 except KeyError:
2761 pass
2760 pass
2762
2761
2763 if _catfmtneedsdata(basefm):
2762 if _catfmtneedsdata(basefm):
2764 scmutil.prefetchfiles(repo, [(ctx.rev(), matcher)])
2763 scmutil.prefetchfiles(repo, [(ctx.rev(), matcher)])
2765
2764
2766 for abs in ctx.walk(matcher):
2765 for abs in ctx.walk(matcher):
2767 write(abs)
2766 write(abs)
2768 err = 0
2767 err = 0
2769
2768
2770 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
2769 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
2771 for subpath in sorted(ctx.substate):
2770 for subpath in sorted(ctx.substate):
2772 sub = ctx.sub(subpath)
2771 sub = ctx.sub(subpath)
2773 try:
2772 try:
2774 submatch = matchmod.subdirmatcher(subpath, matcher)
2773 submatch = matchmod.subdirmatcher(subpath, matcher)
2775 subprefix = os.path.join(prefix, subpath)
2774 subprefix = os.path.join(prefix, subpath)
2776 if not sub.cat(
2775 if not sub.cat(
2777 submatch,
2776 submatch,
2778 basefm,
2777 basefm,
2779 fntemplate,
2778 fntemplate,
2780 subprefix,
2779 subprefix,
2781 **pycompat.strkwargs(opts)
2780 **pycompat.strkwargs(opts)
2782 ):
2781 ):
2783 err = 0
2782 err = 0
2784 except error.RepoLookupError:
2783 except error.RepoLookupError:
2785 ui.status(
2784 ui.status(
2786 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
2785 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
2787 )
2786 )
2788
2787
2789 return err
2788 return err
2790
2789
2791
2790
2791 class _AddRemoveContext:
2792 """a small (hacky) context to deal with lazy opening of context
2793
2794 This is to be used in the `commit` function right below. This deals with
2795 lazily open a `changing_files` context inside a `transaction` that span the
2796 full commit operation.
2797
2798 We need :
2799 - a `changing_files` context to wrap the dirstate change within the
2800 "addremove" operation,
2801 - a transaction to make sure these change are not written right after the
2802 addremove, but when the commit operation succeed.
2803
2804 However it get complicated because:
2805 - opening a transaction "this early" shuffle hooks order, especially the
2806 `precommit` one happening after the `pretxtopen` one which I am not too
2807 enthusiastic about.
2808 - the `mq` extensions + the `record` extension stacks many layers of call
2809 to implement `qrefresh --interactive` and this result with `mq` calling a
2810 `strip` in the middle of this function. Which prevent the existence of
2811 transaction wrapping all of its function code. (however, `qrefresh` never
2812 call the `addremove` bits.
2813 - the largefile extensions (and maybe other extensions?) wraps `addremove`
2814 so slicing `addremove` in smaller bits is a complex endeavour.
2815
2816 So I eventually took a this shortcut that open the transaction if we
2817 actually needs it, not disturbing much of the rest of the code.
2818
2819 It will result in some hooks order change for `hg commit --addremove`,
2820 however it seems a corner case enough to ignore that for now (hopefully).
2821
2822 Notes that None of the above problems seems insurmountable, however I have
2823 been fighting with this specific piece of code for a couple of day already
2824 and I need a solution to keep moving forward on the bigger work around
2825 `changing_files` context that is being introduced at the same time as this
2826 hack.
2827
2828 Each problem seems to have a solution:
2829 - the hook order issue could be solved by refactoring the many-layer stack
2830 that currently composes a commit and calling them earlier,
2831 - the mq issue could be solved by refactoring `mq` so that the final strip
2832 is done after transaction closure. Be warned that the mq code is quite
2833 antic however.
2834 - large-file could be reworked in parallel of the `addremove` to be
2835 friendlier to this.
2836
2837 However each of these tasks are too much a diversion right now. In addition
2838 they will be much easier to undertake when the `changing_files` dust has
2839 settled."""
2840
2841 def __init__(self, repo):
2842 self._repo = repo
2843 self._transaction = None
2844 self._dirstate_context = None
2845 self._state = None
2846
2847 def __enter__(self):
2848 assert self._state is None
2849 self._state = True
2850 return self
2851
2852 def open_transaction(self):
2853 """open a `transaction` and `changing_files` context
2854
2855 Call this when you know that change to the dirstate will be needed and
2856 we need to open the transaction early
2857
2858 This will also open the dirstate `changing_files` context, so you should
2859 call `close_dirstate_context` when the distate changes are done.
2860 """
2861 assert self._state is not None
2862 if self._transaction is None:
2863 self._transaction = self._repo.transaction(b'commit')
2864 self._transaction.__enter__()
2865 if self._dirstate_context is None:
2866 self._dirstate_context = self._repo.dirstate.changing_files(
2867 self._repo
2868 )
2869 self._dirstate_context.__enter__()
2870
2871 def close_dirstate_context(self):
2872 """close the change_files if any
2873
2874 Call this after the (potential) `open_transaction` call to close the
2875 (potential) changing_files context.
2876 """
2877 if self._dirstate_context is not None:
2878 self._dirstate_context.__exit__(None, None, None)
2879 self._dirstate_context = None
2880
2881 def __exit__(self, *args):
2882 if self._dirstate_context is not None:
2883 self._dirstate_context.__exit__(*args)
2884 if self._transaction is not None:
2885 self._transaction.__exit__(*args)
2886
2887
2792 def commit(ui, repo, commitfunc, pats, opts):
2888 def commit(ui, repo, commitfunc, pats, opts):
2793 '''commit the specified files or all outstanding changes'''
2889 '''commit the specified files or all outstanding changes'''
2794 date = opts.get(b'date')
2890 date = opts.get(b'date')
2795 if date:
2891 if date:
2796 opts[b'date'] = dateutil.parsedate(date)
2892 opts[b'date'] = dateutil.parsedate(date)
2797
2893
2798 dsguard = None
2894 with repo.wlock(), repo.lock():
2799 # extract addremove carefully -- this function can be called from a command
2800 # that doesn't support addremove
2801 if opts.get(b'addremove'):
2802 dsguard = dirstateguard.dirstateguard(repo, b'commit')
2803 with dsguard or util.nullcontextmanager():
2804 message = logmessage(ui, opts)
2895 message = logmessage(ui, opts)
2805 matcher = scmutil.match(repo[None], pats, opts)
2896 matcher = scmutil.match(repo[None], pats, opts)
2806 if True:
2897
2898 with _AddRemoveContext(repo) as c:
2807 # extract addremove carefully -- this function can be called from a
2899 # extract addremove carefully -- this function can be called from a
2808 # command that doesn't support addremove
2900 # command that doesn't support addremove
2809 if opts.get(b'addremove'):
2901 if opts.get(b'addremove'):
2810 relative = scmutil.anypats(pats, opts)
2902 relative = scmutil.anypats(pats, opts)
2811 uipathfn = scmutil.getuipathfn(
2903 uipathfn = scmutil.getuipathfn(
2812 repo,
2904 repo,
2813 legacyrelativevalue=relative,
2905 legacyrelativevalue=relative,
2814 )
2906 )
2815 r = scmutil.addremove(
2907 r = scmutil.addremove(
2816 repo,
2908 repo,
2817 matcher,
2909 matcher,
2818 b"",
2910 b"",
2819 uipathfn,
2911 uipathfn,
2820 opts,
2912 opts,
2913 open_tr=c.open_transaction,
2821 )
2914 )
2822 m = _(b"failed to mark all new/missing files as added/removed")
2915 m = _(b"failed to mark all new/missing files as added/removed")
2823 if r != 0:
2916 if r != 0:
2824 raise error.Abort(m)
2917 raise error.Abort(m)
2825
2918 c.close_dirstate_context()
2826 return commitfunc(ui, repo, message, matcher, opts)
2919 return commitfunc(ui, repo, message, matcher, opts)
2827
2920
2828
2921
2829 def samefile(f, ctx1, ctx2):
2922 def samefile(f, ctx1, ctx2):
2830 if f in ctx1.manifest():
2923 if f in ctx1.manifest():
2831 a = ctx1.filectx(f)
2924 a = ctx1.filectx(f)
2832 if f in ctx2.manifest():
2925 if f in ctx2.manifest():
2833 b = ctx2.filectx(f)
2926 b = ctx2.filectx(f)
2834 return not a.cmp(b) and a.flags() == b.flags()
2927 return not a.cmp(b) and a.flags() == b.flags()
2835 else:
2928 else:
2836 return False
2929 return False
2837 else:
2930 else:
2838 return f not in ctx2.manifest()
2931 return f not in ctx2.manifest()
2839
2932
2840
2933
2841 def amend(ui, repo, old, extra, pats, opts):
2934 def amend(ui, repo, old, extra, pats, opts):
2842 # avoid cycle context -> subrepo -> cmdutil
2935 # avoid cycle context -> subrepo -> cmdutil
2843 from . import context
2936 from . import context
2844
2937
2845 # amend will reuse the existing user if not specified, but the obsolete
2938 # amend will reuse the existing user if not specified, but the obsolete
2846 # marker creation requires that the current user's name is specified.
2939 # marker creation requires that the current user's name is specified.
2847 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2940 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2848 ui.username() # raise exception if username not set
2941 ui.username() # raise exception if username not set
2849
2942
2850 ui.note(_(b'amending changeset %s\n') % old)
2943 ui.note(_(b'amending changeset %s\n') % old)
2851 base = old.p1()
2944 base = old.p1()
2852
2945
2853 with repo.wlock(), repo.lock(), repo.transaction(b'amend'):
2946 with repo.wlock(), repo.lock(), repo.transaction(b'amend'):
2854 # Participating changesets:
2947 # Participating changesets:
2855 #
2948 #
2856 # wctx o - workingctx that contains changes from working copy
2949 # wctx o - workingctx that contains changes from working copy
2857 # | to go into amending commit
2950 # | to go into amending commit
2858 # |
2951 # |
2859 # old o - changeset to amend
2952 # old o - changeset to amend
2860 # |
2953 # |
2861 # base o - first parent of the changeset to amend
2954 # base o - first parent of the changeset to amend
2862 wctx = repo[None]
2955 wctx = repo[None]
2863
2956
2864 # Copy to avoid mutating input
2957 # Copy to avoid mutating input
2865 extra = extra.copy()
2958 extra = extra.copy()
2866 # Update extra dict from amended commit (e.g. to preserve graft
2959 # Update extra dict from amended commit (e.g. to preserve graft
2867 # source)
2960 # source)
2868 extra.update(old.extra())
2961 extra.update(old.extra())
2869
2962
2870 # Also update it from the from the wctx
2963 # Also update it from the from the wctx
2871 extra.update(wctx.extra())
2964 extra.update(wctx.extra())
2872
2965
2873 # date-only change should be ignored?
2966 # date-only change should be ignored?
2874 datemaydiffer = resolve_commit_options(ui, opts)
2967 datemaydiffer = resolve_commit_options(ui, opts)
2875 opts = pycompat.byteskwargs(opts)
2968 opts = pycompat.byteskwargs(opts)
2876
2969
2877 date = old.date()
2970 date = old.date()
2878 if opts.get(b'date'):
2971 if opts.get(b'date'):
2879 date = dateutil.parsedate(opts.get(b'date'))
2972 date = dateutil.parsedate(opts.get(b'date'))
2880 user = opts.get(b'user') or old.user()
2973 user = opts.get(b'user') or old.user()
2881
2974
2882 if len(old.parents()) > 1:
2975 if len(old.parents()) > 1:
2883 # ctx.files() isn't reliable for merges, so fall back to the
2976 # ctx.files() isn't reliable for merges, so fall back to the
2884 # slower repo.status() method
2977 # slower repo.status() method
2885 st = base.status(old)
2978 st = base.status(old)
2886 files = set(st.modified) | set(st.added) | set(st.removed)
2979 files = set(st.modified) | set(st.added) | set(st.removed)
2887 else:
2980 else:
2888 files = set(old.files())
2981 files = set(old.files())
2889
2982
2890 # add/remove the files to the working copy if the "addremove" option
2983 # add/remove the files to the working copy if the "addremove" option
2891 # was specified.
2984 # was specified.
2892 matcher = scmutil.match(wctx, pats, opts)
2985 matcher = scmutil.match(wctx, pats, opts)
2893 relative = scmutil.anypats(pats, opts)
2986 relative = scmutil.anypats(pats, opts)
2894 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
2987 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
2895 if opts.get(b'addremove') and scmutil.addremove(
2988 if opts.get(b'addremove') and scmutil.addremove(
2896 repo, matcher, b"", uipathfn, opts
2989 repo, matcher, b"", uipathfn, opts
2897 ):
2990 ):
2898 raise error.Abort(
2991 raise error.Abort(
2899 _(b"failed to mark all new/missing files as added/removed")
2992 _(b"failed to mark all new/missing files as added/removed")
2900 )
2993 )
2901
2994
2902 # Check subrepos. This depends on in-place wctx._status update in
2995 # Check subrepos. This depends on in-place wctx._status update in
2903 # subrepo.precommit(). To minimize the risk of this hack, we do
2996 # subrepo.precommit(). To minimize the risk of this hack, we do
2904 # nothing if .hgsub does not exist.
2997 # nothing if .hgsub does not exist.
2905 if b'.hgsub' in wctx or b'.hgsub' in old:
2998 if b'.hgsub' in wctx or b'.hgsub' in old:
2906 subs, commitsubs, newsubstate = subrepoutil.precommit(
2999 subs, commitsubs, newsubstate = subrepoutil.precommit(
2907 ui, wctx, wctx._status, matcher
3000 ui, wctx, wctx._status, matcher
2908 )
3001 )
2909 # amend should abort if commitsubrepos is enabled
3002 # amend should abort if commitsubrepos is enabled
2910 assert not commitsubs
3003 assert not commitsubs
2911 if subs:
3004 if subs:
2912 subrepoutil.writestate(repo, newsubstate)
3005 subrepoutil.writestate(repo, newsubstate)
2913
3006
2914 ms = mergestatemod.mergestate.read(repo)
3007 ms = mergestatemod.mergestate.read(repo)
2915 mergeutil.checkunresolved(ms)
3008 mergeutil.checkunresolved(ms)
2916
3009
2917 filestoamend = {f for f in wctx.files() if matcher(f)}
3010 filestoamend = {f for f in wctx.files() if matcher(f)}
2918
3011
2919 changes = len(filestoamend) > 0
3012 changes = len(filestoamend) > 0
2920 changeset_copies = (
3013 changeset_copies = (
2921 repo.ui.config(b'experimental', b'copies.read-from')
3014 repo.ui.config(b'experimental', b'copies.read-from')
2922 != b'filelog-only'
3015 != b'filelog-only'
2923 )
3016 )
2924 # If there are changes to amend or if copy information needs to be read
3017 # If there are changes to amend or if copy information needs to be read
2925 # from the changeset extras, we cannot take the fast path of using
3018 # from the changeset extras, we cannot take the fast path of using
2926 # filectxs from the old commit.
3019 # filectxs from the old commit.
2927 if changes or changeset_copies:
3020 if changes or changeset_copies:
2928 # Recompute copies (avoid recording a -> b -> a)
3021 # Recompute copies (avoid recording a -> b -> a)
2929 copied = copies.pathcopies(base, wctx)
3022 copied = copies.pathcopies(base, wctx)
2930 if old.p2():
3023 if old.p2():
2931 copied.update(copies.pathcopies(old.p2(), wctx))
3024 copied.update(copies.pathcopies(old.p2(), wctx))
2932
3025
2933 # Prune files which were reverted by the updates: if old
3026 # Prune files which were reverted by the updates: if old
2934 # introduced file X and the file was renamed in the working
3027 # introduced file X and the file was renamed in the working
2935 # copy, then those two files are the same and
3028 # copy, then those two files are the same and
2936 # we can discard X from our list of files. Likewise if X
3029 # we can discard X from our list of files. Likewise if X
2937 # was removed, it's no longer relevant. If X is missing (aka
3030 # was removed, it's no longer relevant. If X is missing (aka
2938 # deleted), old X must be preserved.
3031 # deleted), old X must be preserved.
2939 files.update(filestoamend)
3032 files.update(filestoamend)
2940 files = [
3033 files = [
2941 f
3034 f
2942 for f in files
3035 for f in files
2943 if (f not in filestoamend or not samefile(f, wctx, base))
3036 if (f not in filestoamend or not samefile(f, wctx, base))
2944 ]
3037 ]
2945
3038
2946 def filectxfn(repo, ctx_, path):
3039 def filectxfn(repo, ctx_, path):
2947 try:
3040 try:
2948 # If the file being considered is not amongst the files
3041 # If the file being considered is not amongst the files
2949 # to be amended, we should use the file context from the
3042 # to be amended, we should use the file context from the
2950 # old changeset. This avoids issues when only some files in
3043 # old changeset. This avoids issues when only some files in
2951 # the working copy are being amended but there are also
3044 # the working copy are being amended but there are also
2952 # changes to other files from the old changeset.
3045 # changes to other files from the old changeset.
2953 if path in filestoamend:
3046 if path in filestoamend:
2954 # Return None for removed files.
3047 # Return None for removed files.
2955 if path in wctx.removed():
3048 if path in wctx.removed():
2956 return None
3049 return None
2957 fctx = wctx[path]
3050 fctx = wctx[path]
2958 else:
3051 else:
2959 fctx = old.filectx(path)
3052 fctx = old.filectx(path)
2960 flags = fctx.flags()
3053 flags = fctx.flags()
2961 mctx = context.memfilectx(
3054 mctx = context.memfilectx(
2962 repo,
3055 repo,
2963 ctx_,
3056 ctx_,
2964 fctx.path(),
3057 fctx.path(),
2965 fctx.data(),
3058 fctx.data(),
2966 islink=b'l' in flags,
3059 islink=b'l' in flags,
2967 isexec=b'x' in flags,
3060 isexec=b'x' in flags,
2968 copysource=copied.get(path),
3061 copysource=copied.get(path),
2969 )
3062 )
2970 return mctx
3063 return mctx
2971 except KeyError:
3064 except KeyError:
2972 return None
3065 return None
2973
3066
2974 else:
3067 else:
2975 ui.note(_(b'copying changeset %s to %s\n') % (old, base))
3068 ui.note(_(b'copying changeset %s to %s\n') % (old, base))
2976
3069
2977 # Use version of files as in the old cset
3070 # Use version of files as in the old cset
2978 def filectxfn(repo, ctx_, path):
3071 def filectxfn(repo, ctx_, path):
2979 try:
3072 try:
2980 return old.filectx(path)
3073 return old.filectx(path)
2981 except KeyError:
3074 except KeyError:
2982 return None
3075 return None
2983
3076
2984 # See if we got a message from -m or -l, if not, open the editor with
3077 # See if we got a message from -m or -l, if not, open the editor with
2985 # the message of the changeset to amend.
3078 # the message of the changeset to amend.
2986 message = logmessage(ui, opts)
3079 message = logmessage(ui, opts)
2987
3080
2988 editform = mergeeditform(old, b'commit.amend')
3081 editform = mergeeditform(old, b'commit.amend')
2989
3082
2990 if not message:
3083 if not message:
2991 message = old.description()
3084 message = old.description()
2992 # Default if message isn't provided and --edit is not passed is to
3085 # Default if message isn't provided and --edit is not passed is to
2993 # invoke editor, but allow --no-edit. If somehow we don't have any
3086 # invoke editor, but allow --no-edit. If somehow we don't have any
2994 # description, let's always start the editor.
3087 # description, let's always start the editor.
2995 doedit = not message or opts.get(b'edit') in [True, None]
3088 doedit = not message or opts.get(b'edit') in [True, None]
2996 else:
3089 else:
2997 # Default if message is provided is to not invoke editor, but allow
3090 # Default if message is provided is to not invoke editor, but allow
2998 # --edit.
3091 # --edit.
2999 doedit = opts.get(b'edit') is True
3092 doedit = opts.get(b'edit') is True
3000 editor = getcommiteditor(edit=doedit, editform=editform)
3093 editor = getcommiteditor(edit=doedit, editform=editform)
3001
3094
3002 pureextra = extra.copy()
3095 pureextra = extra.copy()
3003 extra[b'amend_source'] = old.hex()
3096 extra[b'amend_source'] = old.hex()
3004
3097
3005 new = context.memctx(
3098 new = context.memctx(
3006 repo,
3099 repo,
3007 parents=[base.node(), old.p2().node()],
3100 parents=[base.node(), old.p2().node()],
3008 text=message,
3101 text=message,
3009 files=files,
3102 files=files,
3010 filectxfn=filectxfn,
3103 filectxfn=filectxfn,
3011 user=user,
3104 user=user,
3012 date=date,
3105 date=date,
3013 extra=extra,
3106 extra=extra,
3014 editor=editor,
3107 editor=editor,
3015 )
3108 )
3016
3109
3017 newdesc = changelog.stripdesc(new.description())
3110 newdesc = changelog.stripdesc(new.description())
3018 if (
3111 if (
3019 (not changes)
3112 (not changes)
3020 and newdesc == old.description()
3113 and newdesc == old.description()
3021 and user == old.user()
3114 and user == old.user()
3022 and (date == old.date() or datemaydiffer)
3115 and (date == old.date() or datemaydiffer)
3023 and pureextra == old.extra()
3116 and pureextra == old.extra()
3024 ):
3117 ):
3025 # nothing changed. continuing here would create a new node
3118 # nothing changed. continuing here would create a new node
3026 # anyway because of the amend_source noise.
3119 # anyway because of the amend_source noise.
3027 #
3120 #
3028 # This not what we expect from amend.
3121 # This not what we expect from amend.
3029 return old.node()
3122 return old.node()
3030
3123
3031 commitphase = None
3124 commitphase = None
3032 if opts.get(b'secret'):
3125 if opts.get(b'secret'):
3033 commitphase = phases.secret
3126 commitphase = phases.secret
3034 elif opts.get(b'draft'):
3127 elif opts.get(b'draft'):
3035 commitphase = phases.draft
3128 commitphase = phases.draft
3036 newid = repo.commitctx(new)
3129 newid = repo.commitctx(new)
3037 ms.reset()
3130 ms.reset()
3038
3131
3039 with repo.dirstate.changing_parents(repo):
3132 with repo.dirstate.changing_parents(repo):
3040 # Reroute the working copy parent to the new changeset
3133 # Reroute the working copy parent to the new changeset
3041 repo.setparents(newid, repo.nullid)
3134 repo.setparents(newid, repo.nullid)
3042
3135
3043 # Fixing the dirstate because localrepo.commitctx does not update
3136 # Fixing the dirstate because localrepo.commitctx does not update
3044 # it. This is rather convenient because we did not need to update
3137 # it. This is rather convenient because we did not need to update
3045 # the dirstate for all the files in the new commit which commitctx
3138 # the dirstate for all the files in the new commit which commitctx
3046 # could have done if it updated the dirstate. Now, we can
3139 # could have done if it updated the dirstate. Now, we can
3047 # selectively update the dirstate only for the amended files.
3140 # selectively update the dirstate only for the amended files.
3048 dirstate = repo.dirstate
3141 dirstate = repo.dirstate
3049
3142
3050 # Update the state of the files which were added and modified in the
3143 # Update the state of the files which were added and modified in the
3051 # amend to "normal" in the dirstate. We need to use "normallookup" since
3144 # amend to "normal" in the dirstate. We need to use "normallookup" since
3052 # the files may have changed since the command started; using "normal"
3145 # the files may have changed since the command started; using "normal"
3053 # would mark them as clean but with uncommitted contents.
3146 # would mark them as clean but with uncommitted contents.
3054 normalfiles = set(wctx.modified() + wctx.added()) & filestoamend
3147 normalfiles = set(wctx.modified() + wctx.added()) & filestoamend
3055 for f in normalfiles:
3148 for f in normalfiles:
3056 dirstate.update_file(
3149 dirstate.update_file(
3057 f, p1_tracked=True, wc_tracked=True, possibly_dirty=True
3150 f, p1_tracked=True, wc_tracked=True, possibly_dirty=True
3058 )
3151 )
3059
3152
3060 # Update the state of files which were removed in the amend
3153 # Update the state of files which were removed in the amend
3061 # to "removed" in the dirstate.
3154 # to "removed" in the dirstate.
3062 removedfiles = set(wctx.removed()) & filestoamend
3155 removedfiles = set(wctx.removed()) & filestoamend
3063 for f in removedfiles:
3156 for f in removedfiles:
3064 dirstate.update_file(f, p1_tracked=False, wc_tracked=False)
3157 dirstate.update_file(f, p1_tracked=False, wc_tracked=False)
3065
3158
3066 mapping = {old.node(): (newid,)}
3159 mapping = {old.node(): (newid,)}
3067 obsmetadata = None
3160 obsmetadata = None
3068 if opts.get(b'note'):
3161 if opts.get(b'note'):
3069 obsmetadata = {b'note': encoding.fromlocal(opts[b'note'])}
3162 obsmetadata = {b'note': encoding.fromlocal(opts[b'note'])}
3070 backup = ui.configbool(b'rewrite', b'backup-bundle')
3163 backup = ui.configbool(b'rewrite', b'backup-bundle')
3071 scmutil.cleanupnodes(
3164 scmutil.cleanupnodes(
3072 repo,
3165 repo,
3073 mapping,
3166 mapping,
3074 b'amend',
3167 b'amend',
3075 metadata=obsmetadata,
3168 metadata=obsmetadata,
3076 fixphase=True,
3169 fixphase=True,
3077 targetphase=commitphase,
3170 targetphase=commitphase,
3078 backup=backup,
3171 backup=backup,
3079 )
3172 )
3080
3173
3081 return newid
3174 return newid
3082
3175
3083
3176
3084 def commiteditor(repo, ctx, subs, editform=b''):
3177 def commiteditor(repo, ctx, subs, editform=b''):
3085 if ctx.description():
3178 if ctx.description():
3086 return ctx.description()
3179 return ctx.description()
3087 return commitforceeditor(
3180 return commitforceeditor(
3088 repo, ctx, subs, editform=editform, unchangedmessagedetection=True
3181 repo, ctx, subs, editform=editform, unchangedmessagedetection=True
3089 )
3182 )
3090
3183
3091
3184
3092 def commitforceeditor(
3185 def commitforceeditor(
3093 repo,
3186 repo,
3094 ctx,
3187 ctx,
3095 subs,
3188 subs,
3096 finishdesc=None,
3189 finishdesc=None,
3097 extramsg=None,
3190 extramsg=None,
3098 editform=b'',
3191 editform=b'',
3099 unchangedmessagedetection=False,
3192 unchangedmessagedetection=False,
3100 ):
3193 ):
3101 if not extramsg:
3194 if not extramsg:
3102 extramsg = _(b"Leave message empty to abort commit.")
3195 extramsg = _(b"Leave message empty to abort commit.")
3103
3196
3104 forms = [e for e in editform.split(b'.') if e]
3197 forms = [e for e in editform.split(b'.') if e]
3105 forms.insert(0, b'changeset')
3198 forms.insert(0, b'changeset')
3106 templatetext = None
3199 templatetext = None
3107 while forms:
3200 while forms:
3108 ref = b'.'.join(forms)
3201 ref = b'.'.join(forms)
3109 if repo.ui.config(b'committemplate', ref):
3202 if repo.ui.config(b'committemplate', ref):
3110 templatetext = committext = buildcommittemplate(
3203 templatetext = committext = buildcommittemplate(
3111 repo, ctx, subs, extramsg, ref
3204 repo, ctx, subs, extramsg, ref
3112 )
3205 )
3113 break
3206 break
3114 forms.pop()
3207 forms.pop()
3115 else:
3208 else:
3116 committext = buildcommittext(repo, ctx, subs, extramsg)
3209 committext = buildcommittext(repo, ctx, subs, extramsg)
3117
3210
3118 # run editor in the repository root
3211 # run editor in the repository root
3119 olddir = encoding.getcwd()
3212 olddir = encoding.getcwd()
3120 os.chdir(repo.root)
3213 os.chdir(repo.root)
3121
3214
3122 # make in-memory changes visible to external process
3215 # make in-memory changes visible to external process
3123 tr = repo.currenttransaction()
3216 tr = repo.currenttransaction()
3124 repo.dirstate.write(tr)
3217 repo.dirstate.write(tr)
3125 pending = tr and tr.writepending() and repo.root
3218 pending = tr and tr.writepending() and repo.root
3126
3219
3127 editortext = repo.ui.edit(
3220 editortext = repo.ui.edit(
3128 committext,
3221 committext,
3129 ctx.user(),
3222 ctx.user(),
3130 ctx.extra(),
3223 ctx.extra(),
3131 editform=editform,
3224 editform=editform,
3132 pending=pending,
3225 pending=pending,
3133 repopath=repo.path,
3226 repopath=repo.path,
3134 action=b'commit',
3227 action=b'commit',
3135 )
3228 )
3136 text = editortext
3229 text = editortext
3137
3230
3138 # strip away anything below this special string (used for editors that want
3231 # strip away anything below this special string (used for editors that want
3139 # to display the diff)
3232 # to display the diff)
3140 stripbelow = re.search(_linebelow, text, flags=re.MULTILINE)
3233 stripbelow = re.search(_linebelow, text, flags=re.MULTILINE)
3141 if stripbelow:
3234 if stripbelow:
3142 text = text[: stripbelow.start()]
3235 text = text[: stripbelow.start()]
3143
3236
3144 text = re.sub(b"(?m)^HG:.*(\n|$)", b"", text)
3237 text = re.sub(b"(?m)^HG:.*(\n|$)", b"", text)
3145 os.chdir(olddir)
3238 os.chdir(olddir)
3146
3239
3147 if finishdesc:
3240 if finishdesc:
3148 text = finishdesc(text)
3241 text = finishdesc(text)
3149 if not text.strip():
3242 if not text.strip():
3150 raise error.InputError(_(b"empty commit message"))
3243 raise error.InputError(_(b"empty commit message"))
3151 if unchangedmessagedetection and editortext == templatetext:
3244 if unchangedmessagedetection and editortext == templatetext:
3152 raise error.InputError(_(b"commit message unchanged"))
3245 raise error.InputError(_(b"commit message unchanged"))
3153
3246
3154 return text
3247 return text
3155
3248
3156
3249
3157 def buildcommittemplate(repo, ctx, subs, extramsg, ref):
3250 def buildcommittemplate(repo, ctx, subs, extramsg, ref):
3158 ui = repo.ui
3251 ui = repo.ui
3159 spec = formatter.reference_templatespec(ref)
3252 spec = formatter.reference_templatespec(ref)
3160 t = logcmdutil.changesettemplater(ui, repo, spec)
3253 t = logcmdutil.changesettemplater(ui, repo, spec)
3161 t.t.cache.update(
3254 t.t.cache.update(
3162 (k, templater.unquotestring(v))
3255 (k, templater.unquotestring(v))
3163 for k, v in repo.ui.configitems(b'committemplate')
3256 for k, v in repo.ui.configitems(b'committemplate')
3164 )
3257 )
3165
3258
3166 if not extramsg:
3259 if not extramsg:
3167 extramsg = b'' # ensure that extramsg is string
3260 extramsg = b'' # ensure that extramsg is string
3168
3261
3169 ui.pushbuffer()
3262 ui.pushbuffer()
3170 t.show(ctx, extramsg=extramsg)
3263 t.show(ctx, extramsg=extramsg)
3171 return ui.popbuffer()
3264 return ui.popbuffer()
3172
3265
3173
3266
3174 def hgprefix(msg):
3267 def hgprefix(msg):
3175 return b"\n".join([b"HG: %s" % a for a in msg.split(b"\n") if a])
3268 return b"\n".join([b"HG: %s" % a for a in msg.split(b"\n") if a])
3176
3269
3177
3270
3178 def buildcommittext(repo, ctx, subs, extramsg):
3271 def buildcommittext(repo, ctx, subs, extramsg):
3179 edittext = []
3272 edittext = []
3180 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
3273 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
3181 if ctx.description():
3274 if ctx.description():
3182 edittext.append(ctx.description())
3275 edittext.append(ctx.description())
3183 edittext.append(b"")
3276 edittext.append(b"")
3184 edittext.append(b"") # Empty line between message and comments.
3277 edittext.append(b"") # Empty line between message and comments.
3185 edittext.append(
3278 edittext.append(
3186 hgprefix(
3279 hgprefix(
3187 _(
3280 _(
3188 b"Enter commit message."
3281 b"Enter commit message."
3189 b" Lines beginning with 'HG:' are removed."
3282 b" Lines beginning with 'HG:' are removed."
3190 )
3283 )
3191 )
3284 )
3192 )
3285 )
3193 edittext.append(hgprefix(extramsg))
3286 edittext.append(hgprefix(extramsg))
3194 edittext.append(b"HG: --")
3287 edittext.append(b"HG: --")
3195 edittext.append(hgprefix(_(b"user: %s") % ctx.user()))
3288 edittext.append(hgprefix(_(b"user: %s") % ctx.user()))
3196 if ctx.p2():
3289 if ctx.p2():
3197 edittext.append(hgprefix(_(b"branch merge")))
3290 edittext.append(hgprefix(_(b"branch merge")))
3198 if ctx.branch():
3291 if ctx.branch():
3199 edittext.append(hgprefix(_(b"branch '%s'") % ctx.branch()))
3292 edittext.append(hgprefix(_(b"branch '%s'") % ctx.branch()))
3200 if bookmarks.isactivewdirparent(repo):
3293 if bookmarks.isactivewdirparent(repo):
3201 edittext.append(hgprefix(_(b"bookmark '%s'") % repo._activebookmark))
3294 edittext.append(hgprefix(_(b"bookmark '%s'") % repo._activebookmark))
3202 edittext.extend([hgprefix(_(b"subrepo %s") % s) for s in subs])
3295 edittext.extend([hgprefix(_(b"subrepo %s") % s) for s in subs])
3203 edittext.extend([hgprefix(_(b"added %s") % f) for f in added])
3296 edittext.extend([hgprefix(_(b"added %s") % f) for f in added])
3204 edittext.extend([hgprefix(_(b"changed %s") % f) for f in modified])
3297 edittext.extend([hgprefix(_(b"changed %s") % f) for f in modified])
3205 edittext.extend([hgprefix(_(b"removed %s") % f) for f in removed])
3298 edittext.extend([hgprefix(_(b"removed %s") % f) for f in removed])
3206 if not added and not modified and not removed:
3299 if not added and not modified and not removed:
3207 edittext.append(hgprefix(_(b"no files changed")))
3300 edittext.append(hgprefix(_(b"no files changed")))
3208 edittext.append(b"")
3301 edittext.append(b"")
3209
3302
3210 return b"\n".join(edittext)
3303 return b"\n".join(edittext)
3211
3304
3212
3305
3213 def commitstatus(repo, node, branch, bheads=None, tip=None, opts=None):
3306 def commitstatus(repo, node, branch, bheads=None, tip=None, opts=None):
3214 if opts is None:
3307 if opts is None:
3215 opts = {}
3308 opts = {}
3216 ctx = repo[node]
3309 ctx = repo[node]
3217 parents = ctx.parents()
3310 parents = ctx.parents()
3218
3311
3219 if tip is not None and repo.changelog.tip() == tip:
3312 if tip is not None and repo.changelog.tip() == tip:
3220 # avoid reporting something like "committed new head" when
3313 # avoid reporting something like "committed new head" when
3221 # recommitting old changesets, and issue a helpful warning
3314 # recommitting old changesets, and issue a helpful warning
3222 # for most instances
3315 # for most instances
3223 repo.ui.warn(_(b"warning: commit already existed in the repository!\n"))
3316 repo.ui.warn(_(b"warning: commit already existed in the repository!\n"))
3224 elif (
3317 elif (
3225 not opts.get(b'amend')
3318 not opts.get(b'amend')
3226 and bheads
3319 and bheads
3227 and node not in bheads
3320 and node not in bheads
3228 and not any(
3321 and not any(
3229 p.node() in bheads and p.branch() == branch for p in parents
3322 p.node() in bheads and p.branch() == branch for p in parents
3230 )
3323 )
3231 ):
3324 ):
3232 repo.ui.status(_(b'created new head\n'))
3325 repo.ui.status(_(b'created new head\n'))
3233 # The message is not printed for initial roots. For the other
3326 # The message is not printed for initial roots. For the other
3234 # changesets, it is printed in the following situations:
3327 # changesets, it is printed in the following situations:
3235 #
3328 #
3236 # Par column: for the 2 parents with ...
3329 # Par column: for the 2 parents with ...
3237 # N: null or no parent
3330 # N: null or no parent
3238 # B: parent is on another named branch
3331 # B: parent is on another named branch
3239 # C: parent is a regular non head changeset
3332 # C: parent is a regular non head changeset
3240 # H: parent was a branch head of the current branch
3333 # H: parent was a branch head of the current branch
3241 # Msg column: whether we print "created new head" message
3334 # Msg column: whether we print "created new head" message
3242 # In the following, it is assumed that there already exists some
3335 # In the following, it is assumed that there already exists some
3243 # initial branch heads of the current branch, otherwise nothing is
3336 # initial branch heads of the current branch, otherwise nothing is
3244 # printed anyway.
3337 # printed anyway.
3245 #
3338 #
3246 # Par Msg Comment
3339 # Par Msg Comment
3247 # N N y additional topo root
3340 # N N y additional topo root
3248 #
3341 #
3249 # B N y additional branch root
3342 # B N y additional branch root
3250 # C N y additional topo head
3343 # C N y additional topo head
3251 # H N n usual case
3344 # H N n usual case
3252 #
3345 #
3253 # B B y weird additional branch root
3346 # B B y weird additional branch root
3254 # C B y branch merge
3347 # C B y branch merge
3255 # H B n merge with named branch
3348 # H B n merge with named branch
3256 #
3349 #
3257 # C C y additional head from merge
3350 # C C y additional head from merge
3258 # C H n merge with a head
3351 # C H n merge with a head
3259 #
3352 #
3260 # H H n head merge: head count decreases
3353 # H H n head merge: head count decreases
3261
3354
3262 if not opts.get(b'close_branch'):
3355 if not opts.get(b'close_branch'):
3263 for r in parents:
3356 for r in parents:
3264 if r.closesbranch() and r.branch() == branch:
3357 if r.closesbranch() and r.branch() == branch:
3265 repo.ui.status(
3358 repo.ui.status(
3266 _(b'reopening closed branch head %d\n') % r.rev()
3359 _(b'reopening closed branch head %d\n') % r.rev()
3267 )
3360 )
3268
3361
3269 if repo.ui.debugflag:
3362 if repo.ui.debugflag:
3270 repo.ui.write(
3363 repo.ui.write(
3271 _(b'committed changeset %d:%s\n') % (ctx.rev(), ctx.hex())
3364 _(b'committed changeset %d:%s\n') % (ctx.rev(), ctx.hex())
3272 )
3365 )
3273 elif repo.ui.verbose:
3366 elif repo.ui.verbose:
3274 repo.ui.write(_(b'committed changeset %d:%s\n') % (ctx.rev(), ctx))
3367 repo.ui.write(_(b'committed changeset %d:%s\n') % (ctx.rev(), ctx))
3275
3368
3276
3369
3277 def postcommitstatus(repo, pats, opts):
3370 def postcommitstatus(repo, pats, opts):
3278 return repo.status(match=scmutil.match(repo[None], pats, opts))
3371 return repo.status(match=scmutil.match(repo[None], pats, opts))
3279
3372
3280
3373
3281 def revert(ui, repo, ctx, *pats, **opts):
3374 def revert(ui, repo, ctx, *pats, **opts):
3282 opts = pycompat.byteskwargs(opts)
3375 opts = pycompat.byteskwargs(opts)
3283 parent, p2 = repo.dirstate.parents()
3376 parent, p2 = repo.dirstate.parents()
3284 node = ctx.node()
3377 node = ctx.node()
3285
3378
3286 mf = ctx.manifest()
3379 mf = ctx.manifest()
3287 if node == p2:
3380 if node == p2:
3288 parent = p2
3381 parent = p2
3289
3382
3290 # need all matching names in dirstate and manifest of target rev,
3383 # need all matching names in dirstate and manifest of target rev,
3291 # so have to walk both. do not print errors if files exist in one
3384 # so have to walk both. do not print errors if files exist in one
3292 # but not other. in both cases, filesets should be evaluated against
3385 # but not other. in both cases, filesets should be evaluated against
3293 # workingctx to get consistent result (issue4497). this means 'set:**'
3386 # workingctx to get consistent result (issue4497). this means 'set:**'
3294 # cannot be used to select missing files from target rev.
3387 # cannot be used to select missing files from target rev.
3295
3388
3296 # `names` is a mapping for all elements in working copy and target revision
3389 # `names` is a mapping for all elements in working copy and target revision
3297 # The mapping is in the form:
3390 # The mapping is in the form:
3298 # <abs path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
3391 # <abs path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
3299 names = {}
3392 names = {}
3300 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
3393 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
3301
3394
3302 with repo.wlock():
3395 with repo.wlock():
3303 ## filling of the `names` mapping
3396 ## filling of the `names` mapping
3304 # walk dirstate to fill `names`
3397 # walk dirstate to fill `names`
3305
3398
3306 interactive = opts.get(b'interactive', False)
3399 interactive = opts.get(b'interactive', False)
3307 wctx = repo[None]
3400 wctx = repo[None]
3308 m = scmutil.match(wctx, pats, opts)
3401 m = scmutil.match(wctx, pats, opts)
3309
3402
3310 # we'll need this later
3403 # we'll need this later
3311 targetsubs = sorted(s for s in wctx.substate if m(s))
3404 targetsubs = sorted(s for s in wctx.substate if m(s))
3312
3405
3313 if not m.always():
3406 if not m.always():
3314 matcher = matchmod.badmatch(m, lambda x, y: False)
3407 matcher = matchmod.badmatch(m, lambda x, y: False)
3315 for abs in wctx.walk(matcher):
3408 for abs in wctx.walk(matcher):
3316 names[abs] = m.exact(abs)
3409 names[abs] = m.exact(abs)
3317
3410
3318 # walk target manifest to fill `names`
3411 # walk target manifest to fill `names`
3319
3412
3320 def badfn(path, msg):
3413 def badfn(path, msg):
3321 if path in names:
3414 if path in names:
3322 return
3415 return
3323 if path in ctx.substate:
3416 if path in ctx.substate:
3324 return
3417 return
3325 path_ = path + b'/'
3418 path_ = path + b'/'
3326 for f in names:
3419 for f in names:
3327 if f.startswith(path_):
3420 if f.startswith(path_):
3328 return
3421 return
3329 ui.warn(b"%s: %s\n" % (uipathfn(path), msg))
3422 ui.warn(b"%s: %s\n" % (uipathfn(path), msg))
3330
3423
3331 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
3424 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
3332 if abs not in names:
3425 if abs not in names:
3333 names[abs] = m.exact(abs)
3426 names[abs] = m.exact(abs)
3334
3427
3335 # Find status of all file in `names`.
3428 # Find status of all file in `names`.
3336 m = scmutil.matchfiles(repo, names)
3429 m = scmutil.matchfiles(repo, names)
3337
3430
3338 changes = repo.status(
3431 changes = repo.status(
3339 node1=node, match=m, unknown=True, ignored=True, clean=True
3432 node1=node, match=m, unknown=True, ignored=True, clean=True
3340 )
3433 )
3341 else:
3434 else:
3342 changes = repo.status(node1=node, match=m)
3435 changes = repo.status(node1=node, match=m)
3343 for kind in changes:
3436 for kind in changes:
3344 for abs in kind:
3437 for abs in kind:
3345 names[abs] = m.exact(abs)
3438 names[abs] = m.exact(abs)
3346
3439
3347 m = scmutil.matchfiles(repo, names)
3440 m = scmutil.matchfiles(repo, names)
3348
3441
3349 modified = set(changes.modified)
3442 modified = set(changes.modified)
3350 added = set(changes.added)
3443 added = set(changes.added)
3351 removed = set(changes.removed)
3444 removed = set(changes.removed)
3352 _deleted = set(changes.deleted)
3445 _deleted = set(changes.deleted)
3353 unknown = set(changes.unknown)
3446 unknown = set(changes.unknown)
3354 unknown.update(changes.ignored)
3447 unknown.update(changes.ignored)
3355 clean = set(changes.clean)
3448 clean = set(changes.clean)
3356 modadded = set()
3449 modadded = set()
3357
3450
3358 # We need to account for the state of the file in the dirstate,
3451 # We need to account for the state of the file in the dirstate,
3359 # even when we revert against something else than parent. This will
3452 # even when we revert against something else than parent. This will
3360 # slightly alter the behavior of revert (doing back up or not, delete
3453 # slightly alter the behavior of revert (doing back up or not, delete
3361 # or just forget etc).
3454 # or just forget etc).
3362 if parent == node:
3455 if parent == node:
3363 dsmodified = modified
3456 dsmodified = modified
3364 dsadded = added
3457 dsadded = added
3365 dsremoved = removed
3458 dsremoved = removed
3366 # store all local modifications, useful later for rename detection
3459 # store all local modifications, useful later for rename detection
3367 localchanges = dsmodified | dsadded
3460 localchanges = dsmodified | dsadded
3368 modified, added, removed = set(), set(), set()
3461 modified, added, removed = set(), set(), set()
3369 else:
3462 else:
3370 changes = repo.status(node1=parent, match=m)
3463 changes = repo.status(node1=parent, match=m)
3371 dsmodified = set(changes.modified)
3464 dsmodified = set(changes.modified)
3372 dsadded = set(changes.added)
3465 dsadded = set(changes.added)
3373 dsremoved = set(changes.removed)
3466 dsremoved = set(changes.removed)
3374 # store all local modifications, useful later for rename detection
3467 # store all local modifications, useful later for rename detection
3375 localchanges = dsmodified | dsadded
3468 localchanges = dsmodified | dsadded
3376
3469
3377 # only take into account for removes between wc and target
3470 # only take into account for removes between wc and target
3378 clean |= dsremoved - removed
3471 clean |= dsremoved - removed
3379 dsremoved &= removed
3472 dsremoved &= removed
3380 # distinct between dirstate remove and other
3473 # distinct between dirstate remove and other
3381 removed -= dsremoved
3474 removed -= dsremoved
3382
3475
3383 modadded = added & dsmodified
3476 modadded = added & dsmodified
3384 added -= modadded
3477 added -= modadded
3385
3478
3386 # tell newly modified apart.
3479 # tell newly modified apart.
3387 dsmodified &= modified
3480 dsmodified &= modified
3388 dsmodified |= modified & dsadded # dirstate added may need backup
3481 dsmodified |= modified & dsadded # dirstate added may need backup
3389 modified -= dsmodified
3482 modified -= dsmodified
3390
3483
3391 # We need to wait for some post-processing to update this set
3484 # We need to wait for some post-processing to update this set
3392 # before making the distinction. The dirstate will be used for
3485 # before making the distinction. The dirstate will be used for
3393 # that purpose.
3486 # that purpose.
3394 dsadded = added
3487 dsadded = added
3395
3488
3396 # in case of merge, files that are actually added can be reported as
3489 # in case of merge, files that are actually added can be reported as
3397 # modified, we need to post process the result
3490 # modified, we need to post process the result
3398 if p2 != repo.nullid:
3491 if p2 != repo.nullid:
3399 mergeadd = set(dsmodified)
3492 mergeadd = set(dsmodified)
3400 for path in dsmodified:
3493 for path in dsmodified:
3401 if path in mf:
3494 if path in mf:
3402 mergeadd.remove(path)
3495 mergeadd.remove(path)
3403 dsadded |= mergeadd
3496 dsadded |= mergeadd
3404 dsmodified -= mergeadd
3497 dsmodified -= mergeadd
3405
3498
3406 # if f is a rename, update `names` to also revert the source
3499 # if f is a rename, update `names` to also revert the source
3407 for f in localchanges:
3500 for f in localchanges:
3408 src = repo.dirstate.copied(f)
3501 src = repo.dirstate.copied(f)
3409 # XXX should we check for rename down to target node?
3502 # XXX should we check for rename down to target node?
3410 if (
3503 if (
3411 src
3504 src
3412 and src not in names
3505 and src not in names
3413 and repo.dirstate.get_entry(src).removed
3506 and repo.dirstate.get_entry(src).removed
3414 ):
3507 ):
3415 dsremoved.add(src)
3508 dsremoved.add(src)
3416 names[src] = True
3509 names[src] = True
3417
3510
3418 # determine the exact nature of the deleted changesets
3511 # determine the exact nature of the deleted changesets
3419 deladded = set(_deleted)
3512 deladded = set(_deleted)
3420 for path in _deleted:
3513 for path in _deleted:
3421 if path in mf:
3514 if path in mf:
3422 deladded.remove(path)
3515 deladded.remove(path)
3423 deleted = _deleted - deladded
3516 deleted = _deleted - deladded
3424
3517
3425 # distinguish between file to forget and the other
3518 # distinguish between file to forget and the other
3426 added = set()
3519 added = set()
3427 for abs in dsadded:
3520 for abs in dsadded:
3428 if not repo.dirstate.get_entry(abs).added:
3521 if not repo.dirstate.get_entry(abs).added:
3429 added.add(abs)
3522 added.add(abs)
3430 dsadded -= added
3523 dsadded -= added
3431
3524
3432 for abs in deladded:
3525 for abs in deladded:
3433 if repo.dirstate.get_entry(abs).added:
3526 if repo.dirstate.get_entry(abs).added:
3434 dsadded.add(abs)
3527 dsadded.add(abs)
3435 deladded -= dsadded
3528 deladded -= dsadded
3436
3529
3437 # For files marked as removed, we check if an unknown file is present at
3530 # For files marked as removed, we check if an unknown file is present at
3438 # the same path. If a such file exists it may need to be backed up.
3531 # the same path. If a such file exists it may need to be backed up.
3439 # Making the distinction at this stage helps have simpler backup
3532 # Making the distinction at this stage helps have simpler backup
3440 # logic.
3533 # logic.
3441 removunk = set()
3534 removunk = set()
3442 for abs in removed:
3535 for abs in removed:
3443 target = repo.wjoin(abs)
3536 target = repo.wjoin(abs)
3444 if os.path.lexists(target):
3537 if os.path.lexists(target):
3445 removunk.add(abs)
3538 removunk.add(abs)
3446 removed -= removunk
3539 removed -= removunk
3447
3540
3448 dsremovunk = set()
3541 dsremovunk = set()
3449 for abs in dsremoved:
3542 for abs in dsremoved:
3450 target = repo.wjoin(abs)
3543 target = repo.wjoin(abs)
3451 if os.path.lexists(target):
3544 if os.path.lexists(target):
3452 dsremovunk.add(abs)
3545 dsremovunk.add(abs)
3453 dsremoved -= dsremovunk
3546 dsremoved -= dsremovunk
3454
3547
3455 # action to be actually performed by revert
3548 # action to be actually performed by revert
3456 # (<list of file>, message>) tuple
3549 # (<list of file>, message>) tuple
3457 actions = {
3550 actions = {
3458 b'revert': ([], _(b'reverting %s\n')),
3551 b'revert': ([], _(b'reverting %s\n')),
3459 b'add': ([], _(b'adding %s\n')),
3552 b'add': ([], _(b'adding %s\n')),
3460 b'remove': ([], _(b'removing %s\n')),
3553 b'remove': ([], _(b'removing %s\n')),
3461 b'drop': ([], _(b'removing %s\n')),
3554 b'drop': ([], _(b'removing %s\n')),
3462 b'forget': ([], _(b'forgetting %s\n')),
3555 b'forget': ([], _(b'forgetting %s\n')),
3463 b'undelete': ([], _(b'undeleting %s\n')),
3556 b'undelete': ([], _(b'undeleting %s\n')),
3464 b'noop': (None, _(b'no changes needed to %s\n')),
3557 b'noop': (None, _(b'no changes needed to %s\n')),
3465 b'unknown': (None, _(b'file not managed: %s\n')),
3558 b'unknown': (None, _(b'file not managed: %s\n')),
3466 }
3559 }
3467
3560
3468 # "constant" that convey the backup strategy.
3561 # "constant" that convey the backup strategy.
3469 # All set to `discard` if `no-backup` is set do avoid checking
3562 # All set to `discard` if `no-backup` is set do avoid checking
3470 # no_backup lower in the code.
3563 # no_backup lower in the code.
3471 # These values are ordered for comparison purposes
3564 # These values are ordered for comparison purposes
3472 backupinteractive = 3 # do backup if interactively modified
3565 backupinteractive = 3 # do backup if interactively modified
3473 backup = 2 # unconditionally do backup
3566 backup = 2 # unconditionally do backup
3474 check = 1 # check if the existing file differs from target
3567 check = 1 # check if the existing file differs from target
3475 discard = 0 # never do backup
3568 discard = 0 # never do backup
3476 if opts.get(b'no_backup'):
3569 if opts.get(b'no_backup'):
3477 backupinteractive = backup = check = discard
3570 backupinteractive = backup = check = discard
3478 if interactive:
3571 if interactive:
3479 dsmodifiedbackup = backupinteractive
3572 dsmodifiedbackup = backupinteractive
3480 else:
3573 else:
3481 dsmodifiedbackup = backup
3574 dsmodifiedbackup = backup
3482 tobackup = set()
3575 tobackup = set()
3483
3576
3484 backupanddel = actions[b'remove']
3577 backupanddel = actions[b'remove']
3485 if not opts.get(b'no_backup'):
3578 if not opts.get(b'no_backup'):
3486 backupanddel = actions[b'drop']
3579 backupanddel = actions[b'drop']
3487
3580
3488 disptable = (
3581 disptable = (
3489 # dispatch table:
3582 # dispatch table:
3490 # file state
3583 # file state
3491 # action
3584 # action
3492 # make backup
3585 # make backup
3493 ## Sets that results that will change file on disk
3586 ## Sets that results that will change file on disk
3494 # Modified compared to target, no local change
3587 # Modified compared to target, no local change
3495 (modified, actions[b'revert'], discard),
3588 (modified, actions[b'revert'], discard),
3496 # Modified compared to target, but local file is deleted
3589 # Modified compared to target, but local file is deleted
3497 (deleted, actions[b'revert'], discard),
3590 (deleted, actions[b'revert'], discard),
3498 # Modified compared to target, local change
3591 # Modified compared to target, local change
3499 (dsmodified, actions[b'revert'], dsmodifiedbackup),
3592 (dsmodified, actions[b'revert'], dsmodifiedbackup),
3500 # Added since target
3593 # Added since target
3501 (added, actions[b'remove'], discard),
3594 (added, actions[b'remove'], discard),
3502 # Added in working directory
3595 # Added in working directory
3503 (dsadded, actions[b'forget'], discard),
3596 (dsadded, actions[b'forget'], discard),
3504 # Added since target, have local modification
3597 # Added since target, have local modification
3505 (modadded, backupanddel, backup),
3598 (modadded, backupanddel, backup),
3506 # Added since target but file is missing in working directory
3599 # Added since target but file is missing in working directory
3507 (deladded, actions[b'drop'], discard),
3600 (deladded, actions[b'drop'], discard),
3508 # Removed since target, before working copy parent
3601 # Removed since target, before working copy parent
3509 (removed, actions[b'add'], discard),
3602 (removed, actions[b'add'], discard),
3510 # Same as `removed` but an unknown file exists at the same path
3603 # Same as `removed` but an unknown file exists at the same path
3511 (removunk, actions[b'add'], check),
3604 (removunk, actions[b'add'], check),
3512 # Removed since targe, marked as such in working copy parent
3605 # Removed since targe, marked as such in working copy parent
3513 (dsremoved, actions[b'undelete'], discard),
3606 (dsremoved, actions[b'undelete'], discard),
3514 # Same as `dsremoved` but an unknown file exists at the same path
3607 # Same as `dsremoved` but an unknown file exists at the same path
3515 (dsremovunk, actions[b'undelete'], check),
3608 (dsremovunk, actions[b'undelete'], check),
3516 ## the following sets does not result in any file changes
3609 ## the following sets does not result in any file changes
3517 # File with no modification
3610 # File with no modification
3518 (clean, actions[b'noop'], discard),
3611 (clean, actions[b'noop'], discard),
3519 # Existing file, not tracked anywhere
3612 # Existing file, not tracked anywhere
3520 (unknown, actions[b'unknown'], discard),
3613 (unknown, actions[b'unknown'], discard),
3521 )
3614 )
3522
3615
3523 for abs, exact in sorted(names.items()):
3616 for abs, exact in sorted(names.items()):
3524 # target file to be touch on disk (relative to cwd)
3617 # target file to be touch on disk (relative to cwd)
3525 target = repo.wjoin(abs)
3618 target = repo.wjoin(abs)
3526 # search the entry in the dispatch table.
3619 # search the entry in the dispatch table.
3527 # if the file is in any of these sets, it was touched in the working
3620 # if the file is in any of these sets, it was touched in the working
3528 # directory parent and we are sure it needs to be reverted.
3621 # directory parent and we are sure it needs to be reverted.
3529 for table, (xlist, msg), dobackup in disptable:
3622 for table, (xlist, msg), dobackup in disptable:
3530 if abs not in table:
3623 if abs not in table:
3531 continue
3624 continue
3532 if xlist is not None:
3625 if xlist is not None:
3533 xlist.append(abs)
3626 xlist.append(abs)
3534 if dobackup:
3627 if dobackup:
3535 # If in interactive mode, don't automatically create
3628 # If in interactive mode, don't automatically create
3536 # .orig files (issue4793)
3629 # .orig files (issue4793)
3537 if dobackup == backupinteractive:
3630 if dobackup == backupinteractive:
3538 tobackup.add(abs)
3631 tobackup.add(abs)
3539 elif backup <= dobackup or wctx[abs].cmp(ctx[abs]):
3632 elif backup <= dobackup or wctx[abs].cmp(ctx[abs]):
3540 absbakname = scmutil.backuppath(ui, repo, abs)
3633 absbakname = scmutil.backuppath(ui, repo, abs)
3541 bakname = os.path.relpath(
3634 bakname = os.path.relpath(
3542 absbakname, start=repo.root
3635 absbakname, start=repo.root
3543 )
3636 )
3544 ui.note(
3637 ui.note(
3545 _(b'saving current version of %s as %s\n')
3638 _(b'saving current version of %s as %s\n')
3546 % (uipathfn(abs), uipathfn(bakname))
3639 % (uipathfn(abs), uipathfn(bakname))
3547 )
3640 )
3548 if not opts.get(b'dry_run'):
3641 if not opts.get(b'dry_run'):
3549 if interactive:
3642 if interactive:
3550 util.copyfile(target, absbakname)
3643 util.copyfile(target, absbakname)
3551 else:
3644 else:
3552 util.rename(target, absbakname)
3645 util.rename(target, absbakname)
3553 if opts.get(b'dry_run'):
3646 if opts.get(b'dry_run'):
3554 if ui.verbose or not exact:
3647 if ui.verbose or not exact:
3555 ui.status(msg % uipathfn(abs))
3648 ui.status(msg % uipathfn(abs))
3556 elif exact:
3649 elif exact:
3557 ui.warn(msg % uipathfn(abs))
3650 ui.warn(msg % uipathfn(abs))
3558 break
3651 break
3559
3652
3560 if not opts.get(b'dry_run'):
3653 if not opts.get(b'dry_run'):
3561 needdata = (b'revert', b'add', b'undelete')
3654 needdata = (b'revert', b'add', b'undelete')
3562 oplist = [actions[name][0] for name in needdata]
3655 oplist = [actions[name][0] for name in needdata]
3563 prefetch = scmutil.prefetchfiles
3656 prefetch = scmutil.prefetchfiles
3564 matchfiles = scmutil.matchfiles(
3657 matchfiles = scmutil.matchfiles(
3565 repo, [f for sublist in oplist for f in sublist]
3658 repo, [f for sublist in oplist for f in sublist]
3566 )
3659 )
3567 prefetch(
3660 prefetch(
3568 repo,
3661 repo,
3569 [(ctx.rev(), matchfiles)],
3662 [(ctx.rev(), matchfiles)],
3570 )
3663 )
3571 match = scmutil.match(repo[None], pats)
3664 match = scmutil.match(repo[None], pats)
3572 _performrevert(
3665 _performrevert(
3573 repo,
3666 repo,
3574 ctx,
3667 ctx,
3575 names,
3668 names,
3576 uipathfn,
3669 uipathfn,
3577 actions,
3670 actions,
3578 match,
3671 match,
3579 interactive,
3672 interactive,
3580 tobackup,
3673 tobackup,
3581 )
3674 )
3582
3675
3583 if targetsubs:
3676 if targetsubs:
3584 # Revert the subrepos on the revert list
3677 # Revert the subrepos on the revert list
3585 for sub in targetsubs:
3678 for sub in targetsubs:
3586 try:
3679 try:
3587 wctx.sub(sub).revert(
3680 wctx.sub(sub).revert(
3588 ctx.substate[sub], *pats, **pycompat.strkwargs(opts)
3681 ctx.substate[sub], *pats, **pycompat.strkwargs(opts)
3589 )
3682 )
3590 except KeyError:
3683 except KeyError:
3591 raise error.Abort(
3684 raise error.Abort(
3592 b"subrepository '%s' does not exist in %s!"
3685 b"subrepository '%s' does not exist in %s!"
3593 % (sub, short(ctx.node()))
3686 % (sub, short(ctx.node()))
3594 )
3687 )
3595
3688
3596
3689
3597 def _performrevert(
3690 def _performrevert(
3598 repo,
3691 repo,
3599 ctx,
3692 ctx,
3600 names,
3693 names,
3601 uipathfn,
3694 uipathfn,
3602 actions,
3695 actions,
3603 match,
3696 match,
3604 interactive=False,
3697 interactive=False,
3605 tobackup=None,
3698 tobackup=None,
3606 ):
3699 ):
3607 """function that actually perform all the actions computed for revert
3700 """function that actually perform all the actions computed for revert
3608
3701
3609 This is an independent function to let extension to plug in and react to
3702 This is an independent function to let extension to plug in and react to
3610 the imminent revert.
3703 the imminent revert.
3611
3704
3612 Make sure you have the working directory locked when calling this function.
3705 Make sure you have the working directory locked when calling this function.
3613 """
3706 """
3614 parent, p2 = repo.dirstate.parents()
3707 parent, p2 = repo.dirstate.parents()
3615 node = ctx.node()
3708 node = ctx.node()
3616 excluded_files = []
3709 excluded_files = []
3617
3710
3618 def checkout(f):
3711 def checkout(f):
3619 fc = ctx[f]
3712 fc = ctx[f]
3620 repo.wwrite(f, fc.data(), fc.flags())
3713 repo.wwrite(f, fc.data(), fc.flags())
3621
3714
3622 def doremove(f):
3715 def doremove(f):
3623 try:
3716 try:
3624 rmdir = repo.ui.configbool(b'experimental', b'removeemptydirs')
3717 rmdir = repo.ui.configbool(b'experimental', b'removeemptydirs')
3625 repo.wvfs.unlinkpath(f, rmdir=rmdir)
3718 repo.wvfs.unlinkpath(f, rmdir=rmdir)
3626 except OSError:
3719 except OSError:
3627 pass
3720 pass
3628 repo.dirstate.set_untracked(f)
3721 repo.dirstate.set_untracked(f)
3629
3722
3630 def prntstatusmsg(action, f):
3723 def prntstatusmsg(action, f):
3631 exact = names[f]
3724 exact = names[f]
3632 if repo.ui.verbose or not exact:
3725 if repo.ui.verbose or not exact:
3633 repo.ui.status(actions[action][1] % uipathfn(f))
3726 repo.ui.status(actions[action][1] % uipathfn(f))
3634
3727
3635 audit_path = pathutil.pathauditor(repo.root, cached=True)
3728 audit_path = pathutil.pathauditor(repo.root, cached=True)
3636 for f in actions[b'forget'][0]:
3729 for f in actions[b'forget'][0]:
3637 if interactive:
3730 if interactive:
3638 choice = repo.ui.promptchoice(
3731 choice = repo.ui.promptchoice(
3639 _(b"forget added file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f)
3732 _(b"forget added file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f)
3640 )
3733 )
3641 if choice == 0:
3734 if choice == 0:
3642 prntstatusmsg(b'forget', f)
3735 prntstatusmsg(b'forget', f)
3643 repo.dirstate.set_untracked(f)
3736 repo.dirstate.set_untracked(f)
3644 else:
3737 else:
3645 excluded_files.append(f)
3738 excluded_files.append(f)
3646 else:
3739 else:
3647 prntstatusmsg(b'forget', f)
3740 prntstatusmsg(b'forget', f)
3648 repo.dirstate.set_untracked(f)
3741 repo.dirstate.set_untracked(f)
3649 for f in actions[b'remove'][0]:
3742 for f in actions[b'remove'][0]:
3650 audit_path(f)
3743 audit_path(f)
3651 if interactive:
3744 if interactive:
3652 choice = repo.ui.promptchoice(
3745 choice = repo.ui.promptchoice(
3653 _(b"remove added file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f)
3746 _(b"remove added file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f)
3654 )
3747 )
3655 if choice == 0:
3748 if choice == 0:
3656 prntstatusmsg(b'remove', f)
3749 prntstatusmsg(b'remove', f)
3657 doremove(f)
3750 doremove(f)
3658 else:
3751 else:
3659 excluded_files.append(f)
3752 excluded_files.append(f)
3660 else:
3753 else:
3661 prntstatusmsg(b'remove', f)
3754 prntstatusmsg(b'remove', f)
3662 doremove(f)
3755 doremove(f)
3663 for f in actions[b'drop'][0]:
3756 for f in actions[b'drop'][0]:
3664 audit_path(f)
3757 audit_path(f)
3665 prntstatusmsg(b'drop', f)
3758 prntstatusmsg(b'drop', f)
3666 repo.dirstate.set_untracked(f)
3759 repo.dirstate.set_untracked(f)
3667
3760
3668 # We are reverting to our parent. If possible, we had like `hg status`
3761 # We are reverting to our parent. If possible, we had like `hg status`
3669 # to report the file as clean. We have to be less agressive for
3762 # to report the file as clean. We have to be less agressive for
3670 # merges to avoid losing information about copy introduced by the merge.
3763 # merges to avoid losing information about copy introduced by the merge.
3671 # This might comes with bugs ?
3764 # This might comes with bugs ?
3672 reset_copy = p2 == repo.nullid
3765 reset_copy = p2 == repo.nullid
3673
3766
3674 def normal(filename):
3767 def normal(filename):
3675 return repo.dirstate.set_tracked(filename, reset_copy=reset_copy)
3768 return repo.dirstate.set_tracked(filename, reset_copy=reset_copy)
3676
3769
3677 newlyaddedandmodifiedfiles = set()
3770 newlyaddedandmodifiedfiles = set()
3678 if interactive:
3771 if interactive:
3679 # Prompt the user for changes to revert
3772 # Prompt the user for changes to revert
3680 torevert = [f for f in actions[b'revert'][0] if f not in excluded_files]
3773 torevert = [f for f in actions[b'revert'][0] if f not in excluded_files]
3681 m = scmutil.matchfiles(repo, torevert)
3774 m = scmutil.matchfiles(repo, torevert)
3682 diffopts = patch.difffeatureopts(
3775 diffopts = patch.difffeatureopts(
3683 repo.ui,
3776 repo.ui,
3684 whitespace=True,
3777 whitespace=True,
3685 section=b'commands',
3778 section=b'commands',
3686 configprefix=b'revert.interactive.',
3779 configprefix=b'revert.interactive.',
3687 )
3780 )
3688 diffopts.nodates = True
3781 diffopts.nodates = True
3689 diffopts.git = True
3782 diffopts.git = True
3690 operation = b'apply'
3783 operation = b'apply'
3691 if node == parent:
3784 if node == parent:
3692 if repo.ui.configbool(
3785 if repo.ui.configbool(
3693 b'experimental', b'revert.interactive.select-to-keep'
3786 b'experimental', b'revert.interactive.select-to-keep'
3694 ):
3787 ):
3695 operation = b'keep'
3788 operation = b'keep'
3696 else:
3789 else:
3697 operation = b'discard'
3790 operation = b'discard'
3698
3791
3699 if operation == b'apply':
3792 if operation == b'apply':
3700 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3793 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3701 else:
3794 else:
3702 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3795 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3703 original_headers = patch.parsepatch(diff)
3796 original_headers = patch.parsepatch(diff)
3704
3797
3705 try:
3798 try:
3706
3799
3707 chunks, opts = recordfilter(
3800 chunks, opts = recordfilter(
3708 repo.ui, original_headers, match, operation=operation
3801 repo.ui, original_headers, match, operation=operation
3709 )
3802 )
3710 if operation == b'discard':
3803 if operation == b'discard':
3711 chunks = patch.reversehunks(chunks)
3804 chunks = patch.reversehunks(chunks)
3712
3805
3713 except error.PatchParseError as err:
3806 except error.PatchParseError as err:
3714 raise error.InputError(_(b'error parsing patch: %s') % err)
3807 raise error.InputError(_(b'error parsing patch: %s') % err)
3715 except error.PatchApplicationError as err:
3808 except error.PatchApplicationError as err:
3716 raise error.StateError(_(b'error applying patch: %s') % err)
3809 raise error.StateError(_(b'error applying patch: %s') % err)
3717
3810
3718 # FIXME: when doing an interactive revert of a copy, there's no way of
3811 # FIXME: when doing an interactive revert of a copy, there's no way of
3719 # performing a partial revert of the added file, the only option is
3812 # performing a partial revert of the added file, the only option is
3720 # "remove added file <name> (Yn)?", so we don't need to worry about the
3813 # "remove added file <name> (Yn)?", so we don't need to worry about the
3721 # alsorestore value. Ideally we'd be able to partially revert
3814 # alsorestore value. Ideally we'd be able to partially revert
3722 # copied/renamed files.
3815 # copied/renamed files.
3723 newlyaddedandmodifiedfiles, unusedalsorestore = newandmodified(chunks)
3816 newlyaddedandmodifiedfiles, unusedalsorestore = newandmodified(chunks)
3724 if tobackup is None:
3817 if tobackup is None:
3725 tobackup = set()
3818 tobackup = set()
3726 # Apply changes
3819 # Apply changes
3727 fp = stringio()
3820 fp = stringio()
3728 # chunks are serialized per file, but files aren't sorted
3821 # chunks are serialized per file, but files aren't sorted
3729 for f in sorted({c.header.filename() for c in chunks if ishunk(c)}):
3822 for f in sorted({c.header.filename() for c in chunks if ishunk(c)}):
3730 prntstatusmsg(b'revert', f)
3823 prntstatusmsg(b'revert', f)
3731 files = set()
3824 files = set()
3732 for c in chunks:
3825 for c in chunks:
3733 if ishunk(c):
3826 if ishunk(c):
3734 abs = c.header.filename()
3827 abs = c.header.filename()
3735 # Create a backup file only if this hunk should be backed up
3828 # Create a backup file only if this hunk should be backed up
3736 if c.header.filename() in tobackup:
3829 if c.header.filename() in tobackup:
3737 target = repo.wjoin(abs)
3830 target = repo.wjoin(abs)
3738 bakname = scmutil.backuppath(repo.ui, repo, abs)
3831 bakname = scmutil.backuppath(repo.ui, repo, abs)
3739 util.copyfile(target, bakname)
3832 util.copyfile(target, bakname)
3740 tobackup.remove(abs)
3833 tobackup.remove(abs)
3741 if abs not in files:
3834 if abs not in files:
3742 files.add(abs)
3835 files.add(abs)
3743 if operation == b'keep':
3836 if operation == b'keep':
3744 checkout(abs)
3837 checkout(abs)
3745 c.write(fp)
3838 c.write(fp)
3746 dopatch = fp.tell()
3839 dopatch = fp.tell()
3747 fp.seek(0)
3840 fp.seek(0)
3748 if dopatch:
3841 if dopatch:
3749 try:
3842 try:
3750 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3843 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3751 except error.PatchParseError as err:
3844 except error.PatchParseError as err:
3752 raise error.InputError(pycompat.bytestr(err))
3845 raise error.InputError(pycompat.bytestr(err))
3753 except error.PatchApplicationError as err:
3846 except error.PatchApplicationError as err:
3754 raise error.StateError(pycompat.bytestr(err))
3847 raise error.StateError(pycompat.bytestr(err))
3755 del fp
3848 del fp
3756 else:
3849 else:
3757 for f in actions[b'revert'][0]:
3850 for f in actions[b'revert'][0]:
3758 prntstatusmsg(b'revert', f)
3851 prntstatusmsg(b'revert', f)
3759 checkout(f)
3852 checkout(f)
3760 if normal:
3853 if normal:
3761 normal(f)
3854 normal(f)
3762
3855
3763 for f in actions[b'add'][0]:
3856 for f in actions[b'add'][0]:
3764 # Don't checkout modified files, they are already created by the diff
3857 # Don't checkout modified files, they are already created by the diff
3765 if f in newlyaddedandmodifiedfiles:
3858 if f in newlyaddedandmodifiedfiles:
3766 continue
3859 continue
3767
3860
3768 if interactive:
3861 if interactive:
3769 choice = repo.ui.promptchoice(
3862 choice = repo.ui.promptchoice(
3770 _(b"add new file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f)
3863 _(b"add new file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f)
3771 )
3864 )
3772 if choice != 0:
3865 if choice != 0:
3773 continue
3866 continue
3774 prntstatusmsg(b'add', f)
3867 prntstatusmsg(b'add', f)
3775 checkout(f)
3868 checkout(f)
3776 repo.dirstate.set_tracked(f)
3869 repo.dirstate.set_tracked(f)
3777
3870
3778 for f in actions[b'undelete'][0]:
3871 for f in actions[b'undelete'][0]:
3779 if interactive:
3872 if interactive:
3780 choice = repo.ui.promptchoice(
3873 choice = repo.ui.promptchoice(
3781 _(b"add back removed file %s (Yn)?$$ &Yes $$ &No") % f
3874 _(b"add back removed file %s (Yn)?$$ &Yes $$ &No") % f
3782 )
3875 )
3783 if choice == 0:
3876 if choice == 0:
3784 prntstatusmsg(b'undelete', f)
3877 prntstatusmsg(b'undelete', f)
3785 checkout(f)
3878 checkout(f)
3786 normal(f)
3879 normal(f)
3787 else:
3880 else:
3788 excluded_files.append(f)
3881 excluded_files.append(f)
3789 else:
3882 else:
3790 prntstatusmsg(b'undelete', f)
3883 prntstatusmsg(b'undelete', f)
3791 checkout(f)
3884 checkout(f)
3792 normal(f)
3885 normal(f)
3793
3886
3794 copied = copies.pathcopies(repo[parent], ctx)
3887 copied = copies.pathcopies(repo[parent], ctx)
3795
3888
3796 for f in (
3889 for f in (
3797 actions[b'add'][0] + actions[b'undelete'][0] + actions[b'revert'][0]
3890 actions[b'add'][0] + actions[b'undelete'][0] + actions[b'revert'][0]
3798 ):
3891 ):
3799 if f in copied:
3892 if f in copied:
3800 repo.dirstate.copy(copied[f], f)
3893 repo.dirstate.copy(copied[f], f)
3801
3894
3802
3895
3803 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3896 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3804 # commands.outgoing. "missing" is "missing" of the result of
3897 # commands.outgoing. "missing" is "missing" of the result of
3805 # "findcommonoutgoing()"
3898 # "findcommonoutgoing()"
3806 outgoinghooks = util.hooks()
3899 outgoinghooks = util.hooks()
3807
3900
3808 # a list of (ui, repo) functions called by commands.summary
3901 # a list of (ui, repo) functions called by commands.summary
3809 summaryhooks = util.hooks()
3902 summaryhooks = util.hooks()
3810
3903
3811 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3904 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3812 #
3905 #
3813 # functions should return tuple of booleans below, if 'changes' is None:
3906 # functions should return tuple of booleans below, if 'changes' is None:
3814 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3907 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3815 #
3908 #
3816 # otherwise, 'changes' is a tuple of tuples below:
3909 # otherwise, 'changes' is a tuple of tuples below:
3817 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3910 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3818 # - (desturl, destbranch, destpeer, outgoing)
3911 # - (desturl, destbranch, destpeer, outgoing)
3819 summaryremotehooks = util.hooks()
3912 summaryremotehooks = util.hooks()
3820
3913
3821
3914
3822 def checkunfinished(repo, commit=False, skipmerge=False):
3915 def checkunfinished(repo, commit=False, skipmerge=False):
3823 """Look for an unfinished multistep operation, like graft, and abort
3916 """Look for an unfinished multistep operation, like graft, and abort
3824 if found. It's probably good to check this right before
3917 if found. It's probably good to check this right before
3825 bailifchanged().
3918 bailifchanged().
3826 """
3919 """
3827 # Check for non-clearable states first, so things like rebase will take
3920 # Check for non-clearable states first, so things like rebase will take
3828 # precedence over update.
3921 # precedence over update.
3829 for state in statemod._unfinishedstates:
3922 for state in statemod._unfinishedstates:
3830 if (
3923 if (
3831 state._clearable
3924 state._clearable
3832 or (commit and state._allowcommit)
3925 or (commit and state._allowcommit)
3833 or state._reportonly
3926 or state._reportonly
3834 ):
3927 ):
3835 continue
3928 continue
3836 if state.isunfinished(repo):
3929 if state.isunfinished(repo):
3837 raise error.StateError(state.msg(), hint=state.hint())
3930 raise error.StateError(state.msg(), hint=state.hint())
3838
3931
3839 for s in statemod._unfinishedstates:
3932 for s in statemod._unfinishedstates:
3840 if (
3933 if (
3841 not s._clearable
3934 not s._clearable
3842 or (commit and s._allowcommit)
3935 or (commit and s._allowcommit)
3843 or (s._opname == b'merge' and skipmerge)
3936 or (s._opname == b'merge' and skipmerge)
3844 or s._reportonly
3937 or s._reportonly
3845 ):
3938 ):
3846 continue
3939 continue
3847 if s.isunfinished(repo):
3940 if s.isunfinished(repo):
3848 raise error.StateError(s.msg(), hint=s.hint())
3941 raise error.StateError(s.msg(), hint=s.hint())
3849
3942
3850
3943
3851 def clearunfinished(repo):
3944 def clearunfinished(repo):
3852 """Check for unfinished operations (as above), and clear the ones
3945 """Check for unfinished operations (as above), and clear the ones
3853 that are clearable.
3946 that are clearable.
3854 """
3947 """
3855 for state in statemod._unfinishedstates:
3948 for state in statemod._unfinishedstates:
3856 if state._reportonly:
3949 if state._reportonly:
3857 continue
3950 continue
3858 if not state._clearable and state.isunfinished(repo):
3951 if not state._clearable and state.isunfinished(repo):
3859 raise error.StateError(state.msg(), hint=state.hint())
3952 raise error.StateError(state.msg(), hint=state.hint())
3860
3953
3861 for s in statemod._unfinishedstates:
3954 for s in statemod._unfinishedstates:
3862 if s._opname == b'merge' or s._reportonly:
3955 if s._opname == b'merge' or s._reportonly:
3863 continue
3956 continue
3864 if s._clearable and s.isunfinished(repo):
3957 if s._clearable and s.isunfinished(repo):
3865 util.unlink(repo.vfs.join(s._fname))
3958 util.unlink(repo.vfs.join(s._fname))
3866
3959
3867
3960
3868 def getunfinishedstate(repo):
3961 def getunfinishedstate(repo):
3869 """Checks for unfinished operations and returns statecheck object
3962 """Checks for unfinished operations and returns statecheck object
3870 for it"""
3963 for it"""
3871 for state in statemod._unfinishedstates:
3964 for state in statemod._unfinishedstates:
3872 if state.isunfinished(repo):
3965 if state.isunfinished(repo):
3873 return state
3966 return state
3874 return None
3967 return None
3875
3968
3876
3969
3877 def howtocontinue(repo):
3970 def howtocontinue(repo):
3878 """Check for an unfinished operation and return the command to finish
3971 """Check for an unfinished operation and return the command to finish
3879 it.
3972 it.
3880
3973
3881 statemod._unfinishedstates list is checked for an unfinished operation
3974 statemod._unfinishedstates list is checked for an unfinished operation
3882 and the corresponding message to finish it is generated if a method to
3975 and the corresponding message to finish it is generated if a method to
3883 continue is supported by the operation.
3976 continue is supported by the operation.
3884
3977
3885 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
3978 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
3886 a boolean.
3979 a boolean.
3887 """
3980 """
3888 contmsg = _(b"continue: %s")
3981 contmsg = _(b"continue: %s")
3889 for state in statemod._unfinishedstates:
3982 for state in statemod._unfinishedstates:
3890 if not state._continueflag:
3983 if not state._continueflag:
3891 continue
3984 continue
3892 if state.isunfinished(repo):
3985 if state.isunfinished(repo):
3893 return contmsg % state.continuemsg(), True
3986 return contmsg % state.continuemsg(), True
3894 if repo[None].dirty(missing=True, merge=False, branch=False):
3987 if repo[None].dirty(missing=True, merge=False, branch=False):
3895 return contmsg % _(b"hg commit"), False
3988 return contmsg % _(b"hg commit"), False
3896 return None, None
3989 return None, None
3897
3990
3898
3991
3899 def checkafterresolved(repo):
3992 def checkafterresolved(repo):
3900 """Inform the user about the next action after completing hg resolve
3993 """Inform the user about the next action after completing hg resolve
3901
3994
3902 If there's a an unfinished operation that supports continue flag,
3995 If there's a an unfinished operation that supports continue flag,
3903 howtocontinue will yield repo.ui.warn as the reporter.
3996 howtocontinue will yield repo.ui.warn as the reporter.
3904
3997
3905 Otherwise, it will yield repo.ui.note.
3998 Otherwise, it will yield repo.ui.note.
3906 """
3999 """
3907 msg, warning = howtocontinue(repo)
4000 msg, warning = howtocontinue(repo)
3908 if msg is not None:
4001 if msg is not None:
3909 if warning:
4002 if warning:
3910 repo.ui.warn(b"%s\n" % msg)
4003 repo.ui.warn(b"%s\n" % msg)
3911 else:
4004 else:
3912 repo.ui.note(b"%s\n" % msg)
4005 repo.ui.note(b"%s\n" % msg)
3913
4006
3914
4007
3915 def wrongtooltocontinue(repo, task):
4008 def wrongtooltocontinue(repo, task):
3916 """Raise an abort suggesting how to properly continue if there is an
4009 """Raise an abort suggesting how to properly continue if there is an
3917 active task.
4010 active task.
3918
4011
3919 Uses howtocontinue() to find the active task.
4012 Uses howtocontinue() to find the active task.
3920
4013
3921 If there's no task (repo.ui.note for 'hg commit'), it does not offer
4014 If there's no task (repo.ui.note for 'hg commit'), it does not offer
3922 a hint.
4015 a hint.
3923 """
4016 """
3924 after = howtocontinue(repo)
4017 after = howtocontinue(repo)
3925 hint = None
4018 hint = None
3926 if after[1]:
4019 if after[1]:
3927 hint = after[0]
4020 hint = after[0]
3928 raise error.StateError(_(b'no %s in progress') % task, hint=hint)
4021 raise error.StateError(_(b'no %s in progress') % task, hint=hint)
3929
4022
3930
4023
3931 def abortgraft(ui, repo, graftstate):
4024 def abortgraft(ui, repo, graftstate):
3932 """abort the interrupted graft and rollbacks to the state before interrupted
4025 """abort the interrupted graft and rollbacks to the state before interrupted
3933 graft"""
4026 graft"""
3934 if not graftstate.exists():
4027 if not graftstate.exists():
3935 raise error.StateError(_(b"no interrupted graft to abort"))
4028 raise error.StateError(_(b"no interrupted graft to abort"))
3936 statedata = readgraftstate(repo, graftstate)
4029 statedata = readgraftstate(repo, graftstate)
3937 newnodes = statedata.get(b'newnodes')
4030 newnodes = statedata.get(b'newnodes')
3938 if newnodes is None:
4031 if newnodes is None:
3939 # and old graft state which does not have all the data required to abort
4032 # and old graft state which does not have all the data required to abort
3940 # the graft
4033 # the graft
3941 raise error.Abort(_(b"cannot abort using an old graftstate"))
4034 raise error.Abort(_(b"cannot abort using an old graftstate"))
3942
4035
3943 # changeset from which graft operation was started
4036 # changeset from which graft operation was started
3944 if len(newnodes) > 0:
4037 if len(newnodes) > 0:
3945 startctx = repo[newnodes[0]].p1()
4038 startctx = repo[newnodes[0]].p1()
3946 else:
4039 else:
3947 startctx = repo[b'.']
4040 startctx = repo[b'.']
3948 # whether to strip or not
4041 # whether to strip or not
3949 cleanup = False
4042 cleanup = False
3950
4043
3951 if newnodes:
4044 if newnodes:
3952 newnodes = [repo[r].rev() for r in newnodes]
4045 newnodes = [repo[r].rev() for r in newnodes]
3953 cleanup = True
4046 cleanup = True
3954 # checking that none of the newnodes turned public or is public
4047 # checking that none of the newnodes turned public or is public
3955 immutable = [c for c in newnodes if not repo[c].mutable()]
4048 immutable = [c for c in newnodes if not repo[c].mutable()]
3956 if immutable:
4049 if immutable:
3957 repo.ui.warn(
4050 repo.ui.warn(
3958 _(b"cannot clean up public changesets %s\n")
4051 _(b"cannot clean up public changesets %s\n")
3959 % b', '.join(bytes(repo[r]) for r in immutable),
4052 % b', '.join(bytes(repo[r]) for r in immutable),
3960 hint=_(b"see 'hg help phases' for details"),
4053 hint=_(b"see 'hg help phases' for details"),
3961 )
4054 )
3962 cleanup = False
4055 cleanup = False
3963
4056
3964 # checking that no new nodes are created on top of grafted revs
4057 # checking that no new nodes are created on top of grafted revs
3965 desc = set(repo.changelog.descendants(newnodes))
4058 desc = set(repo.changelog.descendants(newnodes))
3966 if desc - set(newnodes):
4059 if desc - set(newnodes):
3967 repo.ui.warn(
4060 repo.ui.warn(
3968 _(
4061 _(
3969 b"new changesets detected on destination "
4062 b"new changesets detected on destination "
3970 b"branch, can't strip\n"
4063 b"branch, can't strip\n"
3971 )
4064 )
3972 )
4065 )
3973 cleanup = False
4066 cleanup = False
3974
4067
3975 if cleanup:
4068 if cleanup:
3976 with repo.wlock(), repo.lock():
4069 with repo.wlock(), repo.lock():
3977 mergemod.clean_update(startctx)
4070 mergemod.clean_update(startctx)
3978 # stripping the new nodes created
4071 # stripping the new nodes created
3979 strippoints = [
4072 strippoints = [
3980 c.node() for c in repo.set(b"roots(%ld)", newnodes)
4073 c.node() for c in repo.set(b"roots(%ld)", newnodes)
3981 ]
4074 ]
3982 repair.strip(repo.ui, repo, strippoints, backup=False)
4075 repair.strip(repo.ui, repo, strippoints, backup=False)
3983
4076
3984 if not cleanup:
4077 if not cleanup:
3985 # we don't update to the startnode if we can't strip
4078 # we don't update to the startnode if we can't strip
3986 startctx = repo[b'.']
4079 startctx = repo[b'.']
3987 mergemod.clean_update(startctx)
4080 mergemod.clean_update(startctx)
3988
4081
3989 ui.status(_(b"graft aborted\n"))
4082 ui.status(_(b"graft aborted\n"))
3990 ui.status(_(b"working directory is now at %s\n") % startctx.hex()[:12])
4083 ui.status(_(b"working directory is now at %s\n") % startctx.hex()[:12])
3991 graftstate.delete()
4084 graftstate.delete()
3992 return 0
4085 return 0
3993
4086
3994
4087
3995 def readgraftstate(repo, graftstate):
4088 def readgraftstate(repo, graftstate):
3996 # type: (Any, statemod.cmdstate) -> Dict[bytes, Any]
4089 # type: (Any, statemod.cmdstate) -> Dict[bytes, Any]
3997 """read the graft state file and return a dict of the data stored in it"""
4090 """read the graft state file and return a dict of the data stored in it"""
3998 try:
4091 try:
3999 return graftstate.read()
4092 return graftstate.read()
4000 except error.CorruptedState:
4093 except error.CorruptedState:
4001 nodes = repo.vfs.read(b'graftstate').splitlines()
4094 nodes = repo.vfs.read(b'graftstate').splitlines()
4002 return {b'nodes': nodes}
4095 return {b'nodes': nodes}
4003
4096
4004
4097
4005 def hgabortgraft(ui, repo):
4098 def hgabortgraft(ui, repo):
4006 """abort logic for aborting graft using 'hg abort'"""
4099 """abort logic for aborting graft using 'hg abort'"""
4007 with repo.wlock():
4100 with repo.wlock():
4008 graftstate = statemod.cmdstate(repo, b'graftstate')
4101 graftstate = statemod.cmdstate(repo, b'graftstate')
4009 return abortgraft(ui, repo, graftstate)
4102 return abortgraft(ui, repo, graftstate)
@@ -1,2313 +1,2315 b''
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Olivia Mackall <olivia@selenic.com>
3 # Copyright Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import binascii
9 import binascii
10 import errno
10 import errno
11 import glob
11 import glob
12 import os
12 import os
13 import posixpath
13 import posixpath
14 import re
14 import re
15 import subprocess
15 import subprocess
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullrev,
22 nullrev,
23 short,
23 short,
24 wdirrev,
24 wdirrev,
25 )
25 )
26 from .pycompat import getattr
26 from .pycompat import getattr
27 from .thirdparty import attr
27 from .thirdparty import attr
28 from . import (
28 from . import (
29 copies as copiesmod,
29 copies as copiesmod,
30 encoding,
30 encoding,
31 error,
31 error,
32 match as matchmod,
32 match as matchmod,
33 obsolete,
33 obsolete,
34 obsutil,
34 obsutil,
35 pathutil,
35 pathutil,
36 phases,
36 phases,
37 policy,
37 policy,
38 pycompat,
38 pycompat,
39 requirements as requirementsmod,
39 requirements as requirementsmod,
40 revsetlang,
40 revsetlang,
41 similar,
41 similar,
42 smartset,
42 smartset,
43 url,
43 url,
44 util,
44 util,
45 vfs,
45 vfs,
46 )
46 )
47
47
48 from .utils import (
48 from .utils import (
49 hashutil,
49 hashutil,
50 procutil,
50 procutil,
51 stringutil,
51 stringutil,
52 )
52 )
53
53
54 if pycompat.iswindows:
54 if pycompat.iswindows:
55 from . import scmwindows as scmplatform
55 from . import scmwindows as scmplatform
56 else:
56 else:
57 from . import scmposix as scmplatform
57 from . import scmposix as scmplatform
58
58
59 parsers = policy.importmod('parsers')
59 parsers = policy.importmod('parsers')
60 rustrevlog = policy.importrust('revlog')
60 rustrevlog = policy.importrust('revlog')
61
61
62 termsize = scmplatform.termsize
62 termsize = scmplatform.termsize
63
63
64
64
65 @attr.s(slots=True, repr=False)
65 @attr.s(slots=True, repr=False)
66 class status:
66 class status:
67 """Struct with a list of files per status.
67 """Struct with a list of files per status.
68
68
69 The 'deleted', 'unknown' and 'ignored' properties are only
69 The 'deleted', 'unknown' and 'ignored' properties are only
70 relevant to the working copy.
70 relevant to the working copy.
71 """
71 """
72
72
73 modified = attr.ib(default=attr.Factory(list))
73 modified = attr.ib(default=attr.Factory(list))
74 added = attr.ib(default=attr.Factory(list))
74 added = attr.ib(default=attr.Factory(list))
75 removed = attr.ib(default=attr.Factory(list))
75 removed = attr.ib(default=attr.Factory(list))
76 deleted = attr.ib(default=attr.Factory(list))
76 deleted = attr.ib(default=attr.Factory(list))
77 unknown = attr.ib(default=attr.Factory(list))
77 unknown = attr.ib(default=attr.Factory(list))
78 ignored = attr.ib(default=attr.Factory(list))
78 ignored = attr.ib(default=attr.Factory(list))
79 clean = attr.ib(default=attr.Factory(list))
79 clean = attr.ib(default=attr.Factory(list))
80
80
81 def __iter__(self):
81 def __iter__(self):
82 yield self.modified
82 yield self.modified
83 yield self.added
83 yield self.added
84 yield self.removed
84 yield self.removed
85 yield self.deleted
85 yield self.deleted
86 yield self.unknown
86 yield self.unknown
87 yield self.ignored
87 yield self.ignored
88 yield self.clean
88 yield self.clean
89
89
90 def __repr__(self):
90 def __repr__(self):
91 return (
91 return (
92 r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
92 r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
93 r'unknown=%s, ignored=%s, clean=%s>'
93 r'unknown=%s, ignored=%s, clean=%s>'
94 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
94 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
95
95
96
96
97 def itersubrepos(ctx1, ctx2):
97 def itersubrepos(ctx1, ctx2):
98 """find subrepos in ctx1 or ctx2"""
98 """find subrepos in ctx1 or ctx2"""
99 # Create a (subpath, ctx) mapping where we prefer subpaths from
99 # Create a (subpath, ctx) mapping where we prefer subpaths from
100 # ctx1. The subpaths from ctx2 are important when the .hgsub file
100 # ctx1. The subpaths from ctx2 are important when the .hgsub file
101 # has been modified (in ctx2) but not yet committed (in ctx1).
101 # has been modified (in ctx2) but not yet committed (in ctx1).
102 subpaths = dict.fromkeys(ctx2.substate, ctx2)
102 subpaths = dict.fromkeys(ctx2.substate, ctx2)
103 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
103 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
104
104
105 missing = set()
105 missing = set()
106
106
107 for subpath in ctx2.substate:
107 for subpath in ctx2.substate:
108 if subpath not in ctx1.substate:
108 if subpath not in ctx1.substate:
109 del subpaths[subpath]
109 del subpaths[subpath]
110 missing.add(subpath)
110 missing.add(subpath)
111
111
112 for subpath, ctx in sorted(subpaths.items()):
112 for subpath, ctx in sorted(subpaths.items()):
113 yield subpath, ctx.sub(subpath)
113 yield subpath, ctx.sub(subpath)
114
114
115 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
115 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
116 # status and diff will have an accurate result when it does
116 # status and diff will have an accurate result when it does
117 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
117 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
118 # against itself.
118 # against itself.
119 for subpath in missing:
119 for subpath in missing:
120 yield subpath, ctx2.nullsub(subpath, ctx1)
120 yield subpath, ctx2.nullsub(subpath, ctx1)
121
121
122
122
123 def nochangesfound(ui, repo, excluded=None):
123 def nochangesfound(ui, repo, excluded=None):
124 """Report no changes for push/pull, excluded is None or a list of
124 """Report no changes for push/pull, excluded is None or a list of
125 nodes excluded from the push/pull.
125 nodes excluded from the push/pull.
126 """
126 """
127 secretlist = []
127 secretlist = []
128 if excluded:
128 if excluded:
129 for n in excluded:
129 for n in excluded:
130 ctx = repo[n]
130 ctx = repo[n]
131 if ctx.phase() >= phases.secret and not ctx.extinct():
131 if ctx.phase() >= phases.secret and not ctx.extinct():
132 secretlist.append(n)
132 secretlist.append(n)
133
133
134 if secretlist:
134 if secretlist:
135 ui.status(
135 ui.status(
136 _(b"no changes found (ignored %d secret changesets)\n")
136 _(b"no changes found (ignored %d secret changesets)\n")
137 % len(secretlist)
137 % len(secretlist)
138 )
138 )
139 else:
139 else:
140 ui.status(_(b"no changes found\n"))
140 ui.status(_(b"no changes found\n"))
141
141
142
142
143 def callcatch(ui, func):
143 def callcatch(ui, func):
144 """call func() with global exception handling
144 """call func() with global exception handling
145
145
146 return func() if no exception happens. otherwise do some error handling
146 return func() if no exception happens. otherwise do some error handling
147 and return an exit code accordingly. does not handle all exceptions.
147 and return an exit code accordingly. does not handle all exceptions.
148 """
148 """
149 coarse_exit_code = -1
149 coarse_exit_code = -1
150 detailed_exit_code = -1
150 detailed_exit_code = -1
151 try:
151 try:
152 try:
152 try:
153 return func()
153 return func()
154 except: # re-raises
154 except: # re-raises
155 ui.traceback()
155 ui.traceback()
156 raise
156 raise
157 # Global exception handling, alphabetically
157 # Global exception handling, alphabetically
158 # Mercurial-specific first, followed by built-in and library exceptions
158 # Mercurial-specific first, followed by built-in and library exceptions
159 except error.LockHeld as inst:
159 except error.LockHeld as inst:
160 detailed_exit_code = 20
160 detailed_exit_code = 20
161 if inst.errno == errno.ETIMEDOUT:
161 if inst.errno == errno.ETIMEDOUT:
162 reason = _(b'timed out waiting for lock held by %r') % (
162 reason = _(b'timed out waiting for lock held by %r') % (
163 pycompat.bytestr(inst.locker)
163 pycompat.bytestr(inst.locker)
164 )
164 )
165 else:
165 else:
166 reason = _(b'lock held by %r') % inst.locker
166 reason = _(b'lock held by %r') % inst.locker
167 ui.error(
167 ui.error(
168 _(b"abort: %s: %s\n")
168 _(b"abort: %s: %s\n")
169 % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
169 % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
170 )
170 )
171 if not inst.locker:
171 if not inst.locker:
172 ui.error(_(b"(lock might be very busy)\n"))
172 ui.error(_(b"(lock might be very busy)\n"))
173 except error.LockUnavailable as inst:
173 except error.LockUnavailable as inst:
174 detailed_exit_code = 20
174 detailed_exit_code = 20
175 ui.error(
175 ui.error(
176 _(b"abort: could not lock %s: %s\n")
176 _(b"abort: could not lock %s: %s\n")
177 % (
177 % (
178 inst.desc or stringutil.forcebytestr(inst.filename),
178 inst.desc or stringutil.forcebytestr(inst.filename),
179 encoding.strtolocal(inst.strerror),
179 encoding.strtolocal(inst.strerror),
180 )
180 )
181 )
181 )
182 except error.RepoError as inst:
182 except error.RepoError as inst:
183 if isinstance(inst, error.RepoLookupError):
183 if isinstance(inst, error.RepoLookupError):
184 detailed_exit_code = 10
184 detailed_exit_code = 10
185 ui.error(_(b"abort: %s\n") % inst)
185 ui.error(_(b"abort: %s\n") % inst)
186 if inst.hint:
186 if inst.hint:
187 ui.error(_(b"(%s)\n") % inst.hint)
187 ui.error(_(b"(%s)\n") % inst.hint)
188 except error.ResponseError as inst:
188 except error.ResponseError as inst:
189 ui.error(_(b"abort: %s") % inst.args[0])
189 ui.error(_(b"abort: %s") % inst.args[0])
190 msg = inst.args[1]
190 msg = inst.args[1]
191 if isinstance(msg, type(u'')):
191 if isinstance(msg, type(u'')):
192 msg = pycompat.sysbytes(msg)
192 msg = pycompat.sysbytes(msg)
193 if msg is None:
193 if msg is None:
194 ui.error(b"\n")
194 ui.error(b"\n")
195 elif not isinstance(msg, bytes):
195 elif not isinstance(msg, bytes):
196 ui.error(b" %r\n" % (msg,))
196 ui.error(b" %r\n" % (msg,))
197 elif not msg:
197 elif not msg:
198 ui.error(_(b" empty string\n"))
198 ui.error(_(b" empty string\n"))
199 else:
199 else:
200 ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
200 ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
201 except error.CensoredNodeError as inst:
201 except error.CensoredNodeError as inst:
202 ui.error(_(b"abort: file censored %s\n") % inst)
202 ui.error(_(b"abort: file censored %s\n") % inst)
203 except error.WdirUnsupported:
203 except error.WdirUnsupported:
204 ui.error(_(b"abort: working directory revision cannot be specified\n"))
204 ui.error(_(b"abort: working directory revision cannot be specified\n"))
205 except error.Error as inst:
205 except error.Error as inst:
206 if inst.detailed_exit_code is not None:
206 if inst.detailed_exit_code is not None:
207 detailed_exit_code = inst.detailed_exit_code
207 detailed_exit_code = inst.detailed_exit_code
208 if inst.coarse_exit_code is not None:
208 if inst.coarse_exit_code is not None:
209 coarse_exit_code = inst.coarse_exit_code
209 coarse_exit_code = inst.coarse_exit_code
210 ui.error(inst.format())
210 ui.error(inst.format())
211 except error.WorkerError as inst:
211 except error.WorkerError as inst:
212 # Don't print a message -- the worker already should have
212 # Don't print a message -- the worker already should have
213 return inst.status_code
213 return inst.status_code
214 except ImportError as inst:
214 except ImportError as inst:
215 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
215 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
216 m = stringutil.forcebytestr(inst).split()[-1]
216 m = stringutil.forcebytestr(inst).split()[-1]
217 if m in b"mpatch bdiff".split():
217 if m in b"mpatch bdiff".split():
218 ui.error(_(b"(did you forget to compile extensions?)\n"))
218 ui.error(_(b"(did you forget to compile extensions?)\n"))
219 elif m in b"zlib".split():
219 elif m in b"zlib".split():
220 ui.error(_(b"(is your Python install correct?)\n"))
220 ui.error(_(b"(is your Python install correct?)\n"))
221 except util.urlerr.httperror as inst:
221 except util.urlerr.httperror as inst:
222 detailed_exit_code = 100
222 detailed_exit_code = 100
223 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
223 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
224 except util.urlerr.urlerror as inst:
224 except util.urlerr.urlerror as inst:
225 detailed_exit_code = 100
225 detailed_exit_code = 100
226 try: # usually it is in the form (errno, strerror)
226 try: # usually it is in the form (errno, strerror)
227 reason = inst.reason.args[1]
227 reason = inst.reason.args[1]
228 except (AttributeError, IndexError):
228 except (AttributeError, IndexError):
229 # it might be anything, for example a string
229 # it might be anything, for example a string
230 reason = inst.reason
230 reason = inst.reason
231 if isinstance(reason, str):
231 if isinstance(reason, str):
232 # SSLError of Python 2.7.9 contains a unicode
232 # SSLError of Python 2.7.9 contains a unicode
233 reason = encoding.unitolocal(reason)
233 reason = encoding.unitolocal(reason)
234 ui.error(_(b"abort: error: %s\n") % stringutil.forcebytestr(reason))
234 ui.error(_(b"abort: error: %s\n") % stringutil.forcebytestr(reason))
235 except (IOError, OSError) as inst:
235 except (IOError, OSError) as inst:
236 if (
236 if (
237 util.safehasattr(inst, b"args")
237 util.safehasattr(inst, b"args")
238 and inst.args
238 and inst.args
239 and inst.args[0] == errno.EPIPE
239 and inst.args[0] == errno.EPIPE
240 ):
240 ):
241 pass
241 pass
242 elif getattr(inst, "strerror", None): # common IOError or OSError
242 elif getattr(inst, "strerror", None): # common IOError or OSError
243 if getattr(inst, "filename", None) is not None:
243 if getattr(inst, "filename", None) is not None:
244 ui.error(
244 ui.error(
245 _(b"abort: %s: '%s'\n")
245 _(b"abort: %s: '%s'\n")
246 % (
246 % (
247 encoding.strtolocal(inst.strerror),
247 encoding.strtolocal(inst.strerror),
248 stringutil.forcebytestr(inst.filename),
248 stringutil.forcebytestr(inst.filename),
249 )
249 )
250 )
250 )
251 else:
251 else:
252 ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
252 ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
253 else: # suspicious IOError
253 else: # suspicious IOError
254 raise
254 raise
255 except MemoryError:
255 except MemoryError:
256 ui.error(_(b"abort: out of memory\n"))
256 ui.error(_(b"abort: out of memory\n"))
257 except SystemExit as inst:
257 except SystemExit as inst:
258 # Commands shouldn't sys.exit directly, but give a return code.
258 # Commands shouldn't sys.exit directly, but give a return code.
259 # Just in case catch this and and pass exit code to caller.
259 # Just in case catch this and and pass exit code to caller.
260 detailed_exit_code = 254
260 detailed_exit_code = 254
261 coarse_exit_code = inst.code
261 coarse_exit_code = inst.code
262
262
263 if ui.configbool(b'ui', b'detailed-exit-code'):
263 if ui.configbool(b'ui', b'detailed-exit-code'):
264 return detailed_exit_code
264 return detailed_exit_code
265 else:
265 else:
266 return coarse_exit_code
266 return coarse_exit_code
267
267
268
268
269 def checknewlabel(repo, lbl, kind):
269 def checknewlabel(repo, lbl, kind):
270 # Do not use the "kind" parameter in ui output.
270 # Do not use the "kind" parameter in ui output.
271 # It makes strings difficult to translate.
271 # It makes strings difficult to translate.
272 if lbl in [b'tip', b'.', b'null']:
272 if lbl in [b'tip', b'.', b'null']:
273 raise error.InputError(_(b"the name '%s' is reserved") % lbl)
273 raise error.InputError(_(b"the name '%s' is reserved") % lbl)
274 for c in (b':', b'\0', b'\n', b'\r'):
274 for c in (b':', b'\0', b'\n', b'\r'):
275 if c in lbl:
275 if c in lbl:
276 raise error.InputError(
276 raise error.InputError(
277 _(b"%r cannot be used in a name") % pycompat.bytestr(c)
277 _(b"%r cannot be used in a name") % pycompat.bytestr(c)
278 )
278 )
279 try:
279 try:
280 int(lbl)
280 int(lbl)
281 if b'_' in lbl:
281 if b'_' in lbl:
282 # If label contains underscores, Python might consider it an
282 # If label contains underscores, Python might consider it an
283 # integer (with "_" as visual separators), but we do not.
283 # integer (with "_" as visual separators), but we do not.
284 # See PEP 515 - Underscores in Numeric Literals.
284 # See PEP 515 - Underscores in Numeric Literals.
285 raise ValueError
285 raise ValueError
286 raise error.InputError(_(b"cannot use an integer as a name"))
286 raise error.InputError(_(b"cannot use an integer as a name"))
287 except ValueError:
287 except ValueError:
288 pass
288 pass
289 if lbl.strip() != lbl:
289 if lbl.strip() != lbl:
290 raise error.InputError(
290 raise error.InputError(
291 _(b"leading or trailing whitespace in name %r") % lbl
291 _(b"leading or trailing whitespace in name %r") % lbl
292 )
292 )
293
293
294
294
295 def checkfilename(f):
295 def checkfilename(f):
296 '''Check that the filename f is an acceptable filename for a tracked file'''
296 '''Check that the filename f is an acceptable filename for a tracked file'''
297 if b'\r' in f or b'\n' in f:
297 if b'\r' in f or b'\n' in f:
298 raise error.InputError(
298 raise error.InputError(
299 _(b"'\\n' and '\\r' disallowed in filenames: %r")
299 _(b"'\\n' and '\\r' disallowed in filenames: %r")
300 % pycompat.bytestr(f)
300 % pycompat.bytestr(f)
301 )
301 )
302
302
303
303
304 def checkportable(ui, f):
304 def checkportable(ui, f):
305 '''Check if filename f is portable and warn or abort depending on config'''
305 '''Check if filename f is portable and warn or abort depending on config'''
306 checkfilename(f)
306 checkfilename(f)
307 abort, warn = checkportabilityalert(ui)
307 abort, warn = checkportabilityalert(ui)
308 if abort or warn:
308 if abort or warn:
309 msg = util.checkwinfilename(f)
309 msg = util.checkwinfilename(f)
310 if msg:
310 if msg:
311 msg = b"%s: %s" % (msg, procutil.shellquote(f))
311 msg = b"%s: %s" % (msg, procutil.shellquote(f))
312 if abort:
312 if abort:
313 raise error.InputError(msg)
313 raise error.InputError(msg)
314 ui.warn(_(b"warning: %s\n") % msg)
314 ui.warn(_(b"warning: %s\n") % msg)
315
315
316
316
317 def checkportabilityalert(ui):
317 def checkportabilityalert(ui):
318 """check if the user's config requests nothing, a warning, or abort for
318 """check if the user's config requests nothing, a warning, or abort for
319 non-portable filenames"""
319 non-portable filenames"""
320 val = ui.config(b'ui', b'portablefilenames')
320 val = ui.config(b'ui', b'portablefilenames')
321 lval = val.lower()
321 lval = val.lower()
322 bval = stringutil.parsebool(val)
322 bval = stringutil.parsebool(val)
323 abort = pycompat.iswindows or lval == b'abort'
323 abort = pycompat.iswindows or lval == b'abort'
324 warn = bval or lval == b'warn'
324 warn = bval or lval == b'warn'
325 if bval is None and not (warn or abort or lval == b'ignore'):
325 if bval is None and not (warn or abort or lval == b'ignore'):
326 raise error.ConfigError(
326 raise error.ConfigError(
327 _(b"ui.portablefilenames value is invalid ('%s')") % val
327 _(b"ui.portablefilenames value is invalid ('%s')") % val
328 )
328 )
329 return abort, warn
329 return abort, warn
330
330
331
331
332 class casecollisionauditor:
332 class casecollisionauditor:
333 def __init__(self, ui, abort, dirstate):
333 def __init__(self, ui, abort, dirstate):
334 self._ui = ui
334 self._ui = ui
335 self._abort = abort
335 self._abort = abort
336 allfiles = b'\0'.join(dirstate)
336 allfiles = b'\0'.join(dirstate)
337 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
337 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
338 self._dirstate = dirstate
338 self._dirstate = dirstate
339 # The purpose of _newfiles is so that we don't complain about
339 # The purpose of _newfiles is so that we don't complain about
340 # case collisions if someone were to call this object with the
340 # case collisions if someone were to call this object with the
341 # same filename twice.
341 # same filename twice.
342 self._newfiles = set()
342 self._newfiles = set()
343
343
344 def __call__(self, f):
344 def __call__(self, f):
345 if f in self._newfiles:
345 if f in self._newfiles:
346 return
346 return
347 fl = encoding.lower(f)
347 fl = encoding.lower(f)
348 if fl in self._loweredfiles and f not in self._dirstate:
348 if fl in self._loweredfiles and f not in self._dirstate:
349 msg = _(b'possible case-folding collision for %s') % f
349 msg = _(b'possible case-folding collision for %s') % f
350 if self._abort:
350 if self._abort:
351 raise error.StateError(msg)
351 raise error.StateError(msg)
352 self._ui.warn(_(b"warning: %s\n") % msg)
352 self._ui.warn(_(b"warning: %s\n") % msg)
353 self._loweredfiles.add(fl)
353 self._loweredfiles.add(fl)
354 self._newfiles.add(f)
354 self._newfiles.add(f)
355
355
356
356
357 def filteredhash(repo, maxrev, needobsolete=False):
357 def filteredhash(repo, maxrev, needobsolete=False):
358 """build hash of filtered revisions in the current repoview.
358 """build hash of filtered revisions in the current repoview.
359
359
360 Multiple caches perform up-to-date validation by checking that the
360 Multiple caches perform up-to-date validation by checking that the
361 tiprev and tipnode stored in the cache file match the current repository.
361 tiprev and tipnode stored in the cache file match the current repository.
362 However, this is not sufficient for validating repoviews because the set
362 However, this is not sufficient for validating repoviews because the set
363 of revisions in the view may change without the repository tiprev and
363 of revisions in the view may change without the repository tiprev and
364 tipnode changing.
364 tipnode changing.
365
365
366 This function hashes all the revs filtered from the view (and, optionally,
366 This function hashes all the revs filtered from the view (and, optionally,
367 all obsolete revs) up to maxrev and returns that SHA-1 digest.
367 all obsolete revs) up to maxrev and returns that SHA-1 digest.
368 """
368 """
369 cl = repo.changelog
369 cl = repo.changelog
370 if needobsolete:
370 if needobsolete:
371 obsrevs = obsolete.getrevs(repo, b'obsolete')
371 obsrevs = obsolete.getrevs(repo, b'obsolete')
372 if not cl.filteredrevs and not obsrevs:
372 if not cl.filteredrevs and not obsrevs:
373 return None
373 return None
374 key = (maxrev, hash(cl.filteredrevs), hash(obsrevs))
374 key = (maxrev, hash(cl.filteredrevs), hash(obsrevs))
375 else:
375 else:
376 if not cl.filteredrevs:
376 if not cl.filteredrevs:
377 return None
377 return None
378 key = maxrev
378 key = maxrev
379 obsrevs = frozenset()
379 obsrevs = frozenset()
380
380
381 result = cl._filteredrevs_hashcache.get(key)
381 result = cl._filteredrevs_hashcache.get(key)
382 if not result:
382 if not result:
383 revs = sorted(r for r in cl.filteredrevs | obsrevs if r <= maxrev)
383 revs = sorted(r for r in cl.filteredrevs | obsrevs if r <= maxrev)
384 if revs:
384 if revs:
385 s = hashutil.sha1()
385 s = hashutil.sha1()
386 for rev in revs:
386 for rev in revs:
387 s.update(b'%d;' % rev)
387 s.update(b'%d;' % rev)
388 result = s.digest()
388 result = s.digest()
389 cl._filteredrevs_hashcache[key] = result
389 cl._filteredrevs_hashcache[key] = result
390 return result
390 return result
391
391
392
392
393 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
393 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
394 """yield every hg repository under path, always recursively.
394 """yield every hg repository under path, always recursively.
395 The recurse flag will only control recursion into repo working dirs"""
395 The recurse flag will only control recursion into repo working dirs"""
396
396
397 def errhandler(err):
397 def errhandler(err):
398 if err.filename == path:
398 if err.filename == path:
399 raise err
399 raise err
400
400
401 samestat = getattr(os.path, 'samestat', None)
401 samestat = getattr(os.path, 'samestat', None)
402 if followsym and samestat is not None:
402 if followsym and samestat is not None:
403
403
404 def adddir(dirlst, dirname):
404 def adddir(dirlst, dirname):
405 dirstat = os.stat(dirname)
405 dirstat = os.stat(dirname)
406 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
406 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
407 if not match:
407 if not match:
408 dirlst.append(dirstat)
408 dirlst.append(dirstat)
409 return not match
409 return not match
410
410
411 else:
411 else:
412 followsym = False
412 followsym = False
413
413
414 if (seen_dirs is None) and followsym:
414 if (seen_dirs is None) and followsym:
415 seen_dirs = []
415 seen_dirs = []
416 adddir(seen_dirs, path)
416 adddir(seen_dirs, path)
417 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
417 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
418 dirs.sort()
418 dirs.sort()
419 if b'.hg' in dirs:
419 if b'.hg' in dirs:
420 yield root # found a repository
420 yield root # found a repository
421 qroot = os.path.join(root, b'.hg', b'patches')
421 qroot = os.path.join(root, b'.hg', b'patches')
422 if os.path.isdir(os.path.join(qroot, b'.hg')):
422 if os.path.isdir(os.path.join(qroot, b'.hg')):
423 yield qroot # we have a patch queue repo here
423 yield qroot # we have a patch queue repo here
424 if recurse:
424 if recurse:
425 # avoid recursing inside the .hg directory
425 # avoid recursing inside the .hg directory
426 dirs.remove(b'.hg')
426 dirs.remove(b'.hg')
427 else:
427 else:
428 dirs[:] = [] # don't descend further
428 dirs[:] = [] # don't descend further
429 elif followsym:
429 elif followsym:
430 newdirs = []
430 newdirs = []
431 for d in dirs:
431 for d in dirs:
432 fname = os.path.join(root, d)
432 fname = os.path.join(root, d)
433 if adddir(seen_dirs, fname):
433 if adddir(seen_dirs, fname):
434 if os.path.islink(fname):
434 if os.path.islink(fname):
435 for hgname in walkrepos(fname, True, seen_dirs):
435 for hgname in walkrepos(fname, True, seen_dirs):
436 yield hgname
436 yield hgname
437 else:
437 else:
438 newdirs.append(d)
438 newdirs.append(d)
439 dirs[:] = newdirs
439 dirs[:] = newdirs
440
440
441
441
442 def binnode(ctx):
442 def binnode(ctx):
443 """Return binary node id for a given basectx"""
443 """Return binary node id for a given basectx"""
444 node = ctx.node()
444 node = ctx.node()
445 if node is None:
445 if node is None:
446 return ctx.repo().nodeconstants.wdirid
446 return ctx.repo().nodeconstants.wdirid
447 return node
447 return node
448
448
449
449
450 def intrev(ctx):
450 def intrev(ctx):
451 """Return integer for a given basectx that can be used in comparison or
451 """Return integer for a given basectx that can be used in comparison or
452 arithmetic operation"""
452 arithmetic operation"""
453 rev = ctx.rev()
453 rev = ctx.rev()
454 if rev is None:
454 if rev is None:
455 return wdirrev
455 return wdirrev
456 return rev
456 return rev
457
457
458
458
459 def formatchangeid(ctx):
459 def formatchangeid(ctx):
460 """Format changectx as '{rev}:{node|formatnode}', which is the default
460 """Format changectx as '{rev}:{node|formatnode}', which is the default
461 template provided by logcmdutil.changesettemplater"""
461 template provided by logcmdutil.changesettemplater"""
462 repo = ctx.repo()
462 repo = ctx.repo()
463 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
463 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
464
464
465
465
466 def formatrevnode(ui, rev, node):
466 def formatrevnode(ui, rev, node):
467 """Format given revision and node depending on the current verbosity"""
467 """Format given revision and node depending on the current verbosity"""
468 if ui.debugflag:
468 if ui.debugflag:
469 hexfunc = hex
469 hexfunc = hex
470 else:
470 else:
471 hexfunc = short
471 hexfunc = short
472 return b'%d:%s' % (rev, hexfunc(node))
472 return b'%d:%s' % (rev, hexfunc(node))
473
473
474
474
475 def resolvehexnodeidprefix(repo, prefix):
475 def resolvehexnodeidprefix(repo, prefix):
476 if prefix.startswith(b'x'):
476 if prefix.startswith(b'x'):
477 prefix = prefix[1:]
477 prefix = prefix[1:]
478 try:
478 try:
479 # Uses unfiltered repo because it's faster when prefix is ambiguous/
479 # Uses unfiltered repo because it's faster when prefix is ambiguous/
480 # This matches the shortesthexnodeidprefix() function below.
480 # This matches the shortesthexnodeidprefix() function below.
481 node = repo.unfiltered().changelog._partialmatch(prefix)
481 node = repo.unfiltered().changelog._partialmatch(prefix)
482 except error.AmbiguousPrefixLookupError:
482 except error.AmbiguousPrefixLookupError:
483 revset = repo.ui.config(
483 revset = repo.ui.config(
484 b'experimental', b'revisions.disambiguatewithin'
484 b'experimental', b'revisions.disambiguatewithin'
485 )
485 )
486 if revset:
486 if revset:
487 # Clear config to avoid infinite recursion
487 # Clear config to avoid infinite recursion
488 configoverrides = {
488 configoverrides = {
489 (b'experimental', b'revisions.disambiguatewithin'): None
489 (b'experimental', b'revisions.disambiguatewithin'): None
490 }
490 }
491 with repo.ui.configoverride(configoverrides):
491 with repo.ui.configoverride(configoverrides):
492 revs = repo.anyrevs([revset], user=True)
492 revs = repo.anyrevs([revset], user=True)
493 matches = []
493 matches = []
494 for rev in revs:
494 for rev in revs:
495 node = repo.changelog.node(rev)
495 node = repo.changelog.node(rev)
496 if hex(node).startswith(prefix):
496 if hex(node).startswith(prefix):
497 matches.append(node)
497 matches.append(node)
498 if len(matches) == 1:
498 if len(matches) == 1:
499 return matches[0]
499 return matches[0]
500 raise
500 raise
501 if node is None:
501 if node is None:
502 return
502 return
503 repo.changelog.rev(node) # make sure node isn't filtered
503 repo.changelog.rev(node) # make sure node isn't filtered
504 return node
504 return node
505
505
506
506
507 def mayberevnum(repo, prefix):
507 def mayberevnum(repo, prefix):
508 """Checks if the given prefix may be mistaken for a revision number"""
508 """Checks if the given prefix may be mistaken for a revision number"""
509 try:
509 try:
510 i = int(prefix)
510 i = int(prefix)
511 # if we are a pure int, then starting with zero will not be
511 # if we are a pure int, then starting with zero will not be
512 # confused as a rev; or, obviously, if the int is larger
512 # confused as a rev; or, obviously, if the int is larger
513 # than the value of the tip rev. We still need to disambiguate if
513 # than the value of the tip rev. We still need to disambiguate if
514 # prefix == '0', since that *is* a valid revnum.
514 # prefix == '0', since that *is* a valid revnum.
515 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
515 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
516 return False
516 return False
517 return True
517 return True
518 except ValueError:
518 except ValueError:
519 return False
519 return False
520
520
521
521
522 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
522 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
523 """Find the shortest unambiguous prefix that matches hexnode.
523 """Find the shortest unambiguous prefix that matches hexnode.
524
524
525 If "cache" is not None, it must be a dictionary that can be used for
525 If "cache" is not None, it must be a dictionary that can be used for
526 caching between calls to this method.
526 caching between calls to this method.
527 """
527 """
528 # _partialmatch() of filtered changelog could take O(len(repo)) time,
528 # _partialmatch() of filtered changelog could take O(len(repo)) time,
529 # which would be unacceptably slow. so we look for hash collision in
529 # which would be unacceptably slow. so we look for hash collision in
530 # unfiltered space, which means some hashes may be slightly longer.
530 # unfiltered space, which means some hashes may be slightly longer.
531
531
532 minlength = max(minlength, 1)
532 minlength = max(minlength, 1)
533
533
534 def disambiguate(prefix):
534 def disambiguate(prefix):
535 """Disambiguate against revnums."""
535 """Disambiguate against revnums."""
536 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
536 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
537 if mayberevnum(repo, prefix):
537 if mayberevnum(repo, prefix):
538 return b'x' + prefix
538 return b'x' + prefix
539 else:
539 else:
540 return prefix
540 return prefix
541
541
542 hexnode = hex(node)
542 hexnode = hex(node)
543 for length in range(len(prefix), len(hexnode) + 1):
543 for length in range(len(prefix), len(hexnode) + 1):
544 prefix = hexnode[:length]
544 prefix = hexnode[:length]
545 if not mayberevnum(repo, prefix):
545 if not mayberevnum(repo, prefix):
546 return prefix
546 return prefix
547
547
548 cl = repo.unfiltered().changelog
548 cl = repo.unfiltered().changelog
549 revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
549 revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
550 if revset:
550 if revset:
551 revs = None
551 revs = None
552 if cache is not None:
552 if cache is not None:
553 revs = cache.get(b'disambiguationrevset')
553 revs = cache.get(b'disambiguationrevset')
554 if revs is None:
554 if revs is None:
555 revs = repo.anyrevs([revset], user=True)
555 revs = repo.anyrevs([revset], user=True)
556 if cache is not None:
556 if cache is not None:
557 cache[b'disambiguationrevset'] = revs
557 cache[b'disambiguationrevset'] = revs
558 if cl.rev(node) in revs:
558 if cl.rev(node) in revs:
559 hexnode = hex(node)
559 hexnode = hex(node)
560 nodetree = None
560 nodetree = None
561 if cache is not None:
561 if cache is not None:
562 nodetree = cache.get(b'disambiguationnodetree')
562 nodetree = cache.get(b'disambiguationnodetree')
563 if not nodetree:
563 if not nodetree:
564 if util.safehasattr(parsers, 'nodetree'):
564 if util.safehasattr(parsers, 'nodetree'):
565 # The CExt is the only implementation to provide a nodetree
565 # The CExt is the only implementation to provide a nodetree
566 # class so far.
566 # class so far.
567 index = cl.index
567 index = cl.index
568 if util.safehasattr(index, 'get_cindex'):
568 if util.safehasattr(index, 'get_cindex'):
569 # the rust wrapped need to give access to its internal index
569 # the rust wrapped need to give access to its internal index
570 index = index.get_cindex()
570 index = index.get_cindex()
571 nodetree = parsers.nodetree(index, len(revs))
571 nodetree = parsers.nodetree(index, len(revs))
572 for r in revs:
572 for r in revs:
573 nodetree.insert(r)
573 nodetree.insert(r)
574 if cache is not None:
574 if cache is not None:
575 cache[b'disambiguationnodetree'] = nodetree
575 cache[b'disambiguationnodetree'] = nodetree
576 if nodetree is not None:
576 if nodetree is not None:
577 length = max(nodetree.shortest(node), minlength)
577 length = max(nodetree.shortest(node), minlength)
578 prefix = hexnode[:length]
578 prefix = hexnode[:length]
579 return disambiguate(prefix)
579 return disambiguate(prefix)
580 for length in range(minlength, len(hexnode) + 1):
580 for length in range(minlength, len(hexnode) + 1):
581 matches = []
581 matches = []
582 prefix = hexnode[:length]
582 prefix = hexnode[:length]
583 for rev in revs:
583 for rev in revs:
584 otherhexnode = repo[rev].hex()
584 otherhexnode = repo[rev].hex()
585 if prefix == otherhexnode[:length]:
585 if prefix == otherhexnode[:length]:
586 matches.append(otherhexnode)
586 matches.append(otherhexnode)
587 if len(matches) == 1:
587 if len(matches) == 1:
588 return disambiguate(prefix)
588 return disambiguate(prefix)
589
589
590 try:
590 try:
591 return disambiguate(cl.shortest(node, minlength))
591 return disambiguate(cl.shortest(node, minlength))
592 except error.LookupError:
592 except error.LookupError:
593 raise error.RepoLookupError()
593 raise error.RepoLookupError()
594
594
595
595
596 def isrevsymbol(repo, symbol):
596 def isrevsymbol(repo, symbol):
597 """Checks if a symbol exists in the repo.
597 """Checks if a symbol exists in the repo.
598
598
599 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
599 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
600 symbol is an ambiguous nodeid prefix.
600 symbol is an ambiguous nodeid prefix.
601 """
601 """
602 try:
602 try:
603 revsymbol(repo, symbol)
603 revsymbol(repo, symbol)
604 return True
604 return True
605 except error.RepoLookupError:
605 except error.RepoLookupError:
606 return False
606 return False
607
607
608
608
609 def revsymbol(repo, symbol):
609 def revsymbol(repo, symbol):
610 """Returns a context given a single revision symbol (as string).
610 """Returns a context given a single revision symbol (as string).
611
611
612 This is similar to revsingle(), but accepts only a single revision symbol,
612 This is similar to revsingle(), but accepts only a single revision symbol,
613 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
613 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
614 not "max(public())".
614 not "max(public())".
615 """
615 """
616 if not isinstance(symbol, bytes):
616 if not isinstance(symbol, bytes):
617 msg = (
617 msg = (
618 b"symbol (%s of type %s) was not a string, did you mean "
618 b"symbol (%s of type %s) was not a string, did you mean "
619 b"repo[symbol]?" % (symbol, type(symbol))
619 b"repo[symbol]?" % (symbol, type(symbol))
620 )
620 )
621 raise error.ProgrammingError(msg)
621 raise error.ProgrammingError(msg)
622 try:
622 try:
623 if symbol in (b'.', b'tip', b'null'):
623 if symbol in (b'.', b'tip', b'null'):
624 return repo[symbol]
624 return repo[symbol]
625
625
626 try:
626 try:
627 r = int(symbol)
627 r = int(symbol)
628 if b'%d' % r != symbol:
628 if b'%d' % r != symbol:
629 raise ValueError
629 raise ValueError
630 l = len(repo.changelog)
630 l = len(repo.changelog)
631 if r < 0:
631 if r < 0:
632 r += l
632 r += l
633 if r < 0 or r >= l and r != wdirrev:
633 if r < 0 or r >= l and r != wdirrev:
634 raise ValueError
634 raise ValueError
635 return repo[r]
635 return repo[r]
636 except error.FilteredIndexError:
636 except error.FilteredIndexError:
637 raise
637 raise
638 except (ValueError, OverflowError, IndexError):
638 except (ValueError, OverflowError, IndexError):
639 pass
639 pass
640
640
641 if len(symbol) == 2 * repo.nodeconstants.nodelen:
641 if len(symbol) == 2 * repo.nodeconstants.nodelen:
642 try:
642 try:
643 node = bin(symbol)
643 node = bin(symbol)
644 rev = repo.changelog.rev(node)
644 rev = repo.changelog.rev(node)
645 return repo[rev]
645 return repo[rev]
646 except error.FilteredLookupError:
646 except error.FilteredLookupError:
647 raise
647 raise
648 except (binascii.Error, LookupError):
648 except (binascii.Error, LookupError):
649 pass
649 pass
650
650
651 # look up bookmarks through the name interface
651 # look up bookmarks through the name interface
652 try:
652 try:
653 node = repo.names.singlenode(repo, symbol)
653 node = repo.names.singlenode(repo, symbol)
654 rev = repo.changelog.rev(node)
654 rev = repo.changelog.rev(node)
655 return repo[rev]
655 return repo[rev]
656 except KeyError:
656 except KeyError:
657 pass
657 pass
658
658
659 node = resolvehexnodeidprefix(repo, symbol)
659 node = resolvehexnodeidprefix(repo, symbol)
660 if node is not None:
660 if node is not None:
661 rev = repo.changelog.rev(node)
661 rev = repo.changelog.rev(node)
662 return repo[rev]
662 return repo[rev]
663
663
664 raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
664 raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
665
665
666 except error.WdirUnsupported:
666 except error.WdirUnsupported:
667 return repo[None]
667 return repo[None]
668 except (
668 except (
669 error.FilteredIndexError,
669 error.FilteredIndexError,
670 error.FilteredLookupError,
670 error.FilteredLookupError,
671 error.FilteredRepoLookupError,
671 error.FilteredRepoLookupError,
672 ):
672 ):
673 raise _filterederror(repo, symbol)
673 raise _filterederror(repo, symbol)
674
674
675
675
676 def _filterederror(repo, changeid):
676 def _filterederror(repo, changeid):
677 """build an exception to be raised about a filtered changeid
677 """build an exception to be raised about a filtered changeid
678
678
679 This is extracted in a function to help extensions (eg: evolve) to
679 This is extracted in a function to help extensions (eg: evolve) to
680 experiment with various message variants."""
680 experiment with various message variants."""
681 if repo.filtername.startswith(b'visible'):
681 if repo.filtername.startswith(b'visible'):
682
682
683 # Check if the changeset is obsolete
683 # Check if the changeset is obsolete
684 unfilteredrepo = repo.unfiltered()
684 unfilteredrepo = repo.unfiltered()
685 ctx = revsymbol(unfilteredrepo, changeid)
685 ctx = revsymbol(unfilteredrepo, changeid)
686
686
687 # If the changeset is obsolete, enrich the message with the reason
687 # If the changeset is obsolete, enrich the message with the reason
688 # that made this changeset not visible
688 # that made this changeset not visible
689 if ctx.obsolete():
689 if ctx.obsolete():
690 msg = obsutil._getfilteredreason(repo, changeid, ctx)
690 msg = obsutil._getfilteredreason(repo, changeid, ctx)
691 else:
691 else:
692 msg = _(b"hidden revision '%s'") % changeid
692 msg = _(b"hidden revision '%s'") % changeid
693
693
694 hint = _(b'use --hidden to access hidden revisions')
694 hint = _(b'use --hidden to access hidden revisions')
695
695
696 return error.FilteredRepoLookupError(msg, hint=hint)
696 return error.FilteredRepoLookupError(msg, hint=hint)
697 msg = _(b"filtered revision '%s' (not in '%s' subset)")
697 msg = _(b"filtered revision '%s' (not in '%s' subset)")
698 msg %= (changeid, repo.filtername)
698 msg %= (changeid, repo.filtername)
699 return error.FilteredRepoLookupError(msg)
699 return error.FilteredRepoLookupError(msg)
700
700
701
701
702 def revsingle(repo, revspec, default=b'.', localalias=None):
702 def revsingle(repo, revspec, default=b'.', localalias=None):
703 if not revspec and revspec != 0:
703 if not revspec and revspec != 0:
704 return repo[default]
704 return repo[default]
705
705
706 l = revrange(repo, [revspec], localalias=localalias)
706 l = revrange(repo, [revspec], localalias=localalias)
707 if not l:
707 if not l:
708 raise error.InputError(_(b'empty revision set'))
708 raise error.InputError(_(b'empty revision set'))
709 return repo[l.last()]
709 return repo[l.last()]
710
710
711
711
712 def _pairspec(revspec):
712 def _pairspec(revspec):
713 tree = revsetlang.parse(revspec)
713 tree = revsetlang.parse(revspec)
714 return tree and tree[0] in (
714 return tree and tree[0] in (
715 b'range',
715 b'range',
716 b'rangepre',
716 b'rangepre',
717 b'rangepost',
717 b'rangepost',
718 b'rangeall',
718 b'rangeall',
719 )
719 )
720
720
721
721
722 def revpair(repo, revs):
722 def revpair(repo, revs):
723 if not revs:
723 if not revs:
724 return repo[b'.'], repo[None]
724 return repo[b'.'], repo[None]
725
725
726 l = revrange(repo, revs)
726 l = revrange(repo, revs)
727
727
728 if not l:
728 if not l:
729 raise error.InputError(_(b'empty revision range'))
729 raise error.InputError(_(b'empty revision range'))
730
730
731 first = l.first()
731 first = l.first()
732 second = l.last()
732 second = l.last()
733
733
734 if (
734 if (
735 first == second
735 first == second
736 and len(revs) >= 2
736 and len(revs) >= 2
737 and not all(revrange(repo, [r]) for r in revs)
737 and not all(revrange(repo, [r]) for r in revs)
738 ):
738 ):
739 raise error.InputError(_(b'empty revision on one side of range'))
739 raise error.InputError(_(b'empty revision on one side of range'))
740
740
741 # if top-level is range expression, the result must always be a pair
741 # if top-level is range expression, the result must always be a pair
742 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
742 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
743 return repo[first], repo[None]
743 return repo[first], repo[None]
744
744
745 return repo[first], repo[second]
745 return repo[first], repo[second]
746
746
747
747
748 def revrange(repo, specs, localalias=None):
748 def revrange(repo, specs, localalias=None):
749 """Execute 1 to many revsets and return the union.
749 """Execute 1 to many revsets and return the union.
750
750
751 This is the preferred mechanism for executing revsets using user-specified
751 This is the preferred mechanism for executing revsets using user-specified
752 config options, such as revset aliases.
752 config options, such as revset aliases.
753
753
754 The revsets specified by ``specs`` will be executed via a chained ``OR``
754 The revsets specified by ``specs`` will be executed via a chained ``OR``
755 expression. If ``specs`` is empty, an empty result is returned.
755 expression. If ``specs`` is empty, an empty result is returned.
756
756
757 ``specs`` can contain integers, in which case they are assumed to be
757 ``specs`` can contain integers, in which case they are assumed to be
758 revision numbers.
758 revision numbers.
759
759
760 It is assumed the revsets are already formatted. If you have arguments
760 It is assumed the revsets are already formatted. If you have arguments
761 that need to be expanded in the revset, call ``revsetlang.formatspec()``
761 that need to be expanded in the revset, call ``revsetlang.formatspec()``
762 and pass the result as an element of ``specs``.
762 and pass the result as an element of ``specs``.
763
763
764 Specifying a single revset is allowed.
764 Specifying a single revset is allowed.
765
765
766 Returns a ``smartset.abstractsmartset`` which is a list-like interface over
766 Returns a ``smartset.abstractsmartset`` which is a list-like interface over
767 integer revisions.
767 integer revisions.
768 """
768 """
769 allspecs = []
769 allspecs = []
770 for spec in specs:
770 for spec in specs:
771 if isinstance(spec, int):
771 if isinstance(spec, int):
772 spec = revsetlang.formatspec(b'%d', spec)
772 spec = revsetlang.formatspec(b'%d', spec)
773 allspecs.append(spec)
773 allspecs.append(spec)
774 return repo.anyrevs(allspecs, user=True, localalias=localalias)
774 return repo.anyrevs(allspecs, user=True, localalias=localalias)
775
775
776
776
777 def increasingwindows(windowsize=8, sizelimit=512):
777 def increasingwindows(windowsize=8, sizelimit=512):
778 while True:
778 while True:
779 yield windowsize
779 yield windowsize
780 if windowsize < sizelimit:
780 if windowsize < sizelimit:
781 windowsize *= 2
781 windowsize *= 2
782
782
783
783
784 def walkchangerevs(repo, revs, makefilematcher, prepare):
784 def walkchangerevs(repo, revs, makefilematcher, prepare):
785 """Iterate over files and the revs in a "windowed" way.
785 """Iterate over files and the revs in a "windowed" way.
786
786
787 Callers most commonly need to iterate backwards over the history
787 Callers most commonly need to iterate backwards over the history
788 in which they are interested. Doing so has awful (quadratic-looking)
788 in which they are interested. Doing so has awful (quadratic-looking)
789 performance, so we use iterators in a "windowed" way.
789 performance, so we use iterators in a "windowed" way.
790
790
791 We walk a window of revisions in the desired order. Within the
791 We walk a window of revisions in the desired order. Within the
792 window, we first walk forwards to gather data, then in the desired
792 window, we first walk forwards to gather data, then in the desired
793 order (usually backwards) to display it.
793 order (usually backwards) to display it.
794
794
795 This function returns an iterator yielding contexts. Before
795 This function returns an iterator yielding contexts. Before
796 yielding each context, the iterator will first call the prepare
796 yielding each context, the iterator will first call the prepare
797 function on each context in the window in forward order."""
797 function on each context in the window in forward order."""
798
798
799 if not revs:
799 if not revs:
800 return []
800 return []
801 change = repo.__getitem__
801 change = repo.__getitem__
802
802
803 def iterate():
803 def iterate():
804 it = iter(revs)
804 it = iter(revs)
805 stopiteration = False
805 stopiteration = False
806 for windowsize in increasingwindows():
806 for windowsize in increasingwindows():
807 nrevs = []
807 nrevs = []
808 for i in range(windowsize):
808 for i in range(windowsize):
809 rev = next(it, None)
809 rev = next(it, None)
810 if rev is None:
810 if rev is None:
811 stopiteration = True
811 stopiteration = True
812 break
812 break
813 nrevs.append(rev)
813 nrevs.append(rev)
814 for rev in sorted(nrevs):
814 for rev in sorted(nrevs):
815 ctx = change(rev)
815 ctx = change(rev)
816 prepare(ctx, makefilematcher(ctx))
816 prepare(ctx, makefilematcher(ctx))
817 for rev in nrevs:
817 for rev in nrevs:
818 yield change(rev)
818 yield change(rev)
819
819
820 if stopiteration:
820 if stopiteration:
821 break
821 break
822
822
823 return iterate()
823 return iterate()
824
824
825
825
826 def meaningfulparents(repo, ctx):
826 def meaningfulparents(repo, ctx):
827 """Return list of meaningful (or all if debug) parentrevs for rev.
827 """Return list of meaningful (or all if debug) parentrevs for rev.
828
828
829 For merges (two non-nullrev revisions) both parents are meaningful.
829 For merges (two non-nullrev revisions) both parents are meaningful.
830 Otherwise the first parent revision is considered meaningful if it
830 Otherwise the first parent revision is considered meaningful if it
831 is not the preceding revision.
831 is not the preceding revision.
832 """
832 """
833 parents = ctx.parents()
833 parents = ctx.parents()
834 if len(parents) > 1:
834 if len(parents) > 1:
835 return parents
835 return parents
836 if repo.ui.debugflag:
836 if repo.ui.debugflag:
837 return [parents[0], repo[nullrev]]
837 return [parents[0], repo[nullrev]]
838 if parents[0].rev() >= intrev(ctx) - 1:
838 if parents[0].rev() >= intrev(ctx) - 1:
839 return []
839 return []
840 return parents
840 return parents
841
841
842
842
843 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
843 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
844 """Return a function that produced paths for presenting to the user.
844 """Return a function that produced paths for presenting to the user.
845
845
846 The returned function takes a repo-relative path and produces a path
846 The returned function takes a repo-relative path and produces a path
847 that can be presented in the UI.
847 that can be presented in the UI.
848
848
849 Depending on the value of ui.relative-paths, either a repo-relative or
849 Depending on the value of ui.relative-paths, either a repo-relative or
850 cwd-relative path will be produced.
850 cwd-relative path will be produced.
851
851
852 legacyrelativevalue is the value to use if ui.relative-paths=legacy
852 legacyrelativevalue is the value to use if ui.relative-paths=legacy
853
853
854 If forcerelativevalue is not None, then that value will be used regardless
854 If forcerelativevalue is not None, then that value will be used regardless
855 of what ui.relative-paths is set to.
855 of what ui.relative-paths is set to.
856 """
856 """
857 if forcerelativevalue is not None:
857 if forcerelativevalue is not None:
858 relative = forcerelativevalue
858 relative = forcerelativevalue
859 else:
859 else:
860 config = repo.ui.config(b'ui', b'relative-paths')
860 config = repo.ui.config(b'ui', b'relative-paths')
861 if config == b'legacy':
861 if config == b'legacy':
862 relative = legacyrelativevalue
862 relative = legacyrelativevalue
863 else:
863 else:
864 relative = stringutil.parsebool(config)
864 relative = stringutil.parsebool(config)
865 if relative is None:
865 if relative is None:
866 raise error.ConfigError(
866 raise error.ConfigError(
867 _(b"ui.relative-paths is not a boolean ('%s')") % config
867 _(b"ui.relative-paths is not a boolean ('%s')") % config
868 )
868 )
869
869
870 if relative:
870 if relative:
871 cwd = repo.getcwd()
871 cwd = repo.getcwd()
872 if cwd != b'':
872 if cwd != b'':
873 # this branch would work even if cwd == b'' (ie cwd = repo
873 # this branch would work even if cwd == b'' (ie cwd = repo
874 # root), but its generality makes the returned function slower
874 # root), but its generality makes the returned function slower
875 pathto = repo.pathto
875 pathto = repo.pathto
876 return lambda f: pathto(f, cwd)
876 return lambda f: pathto(f, cwd)
877 if repo.ui.configbool(b'ui', b'slash'):
877 if repo.ui.configbool(b'ui', b'slash'):
878 return lambda f: f
878 return lambda f: f
879 else:
879 else:
880 return util.localpath
880 return util.localpath
881
881
882
882
883 def subdiruipathfn(subpath, uipathfn):
883 def subdiruipathfn(subpath, uipathfn):
884 '''Create a new uipathfn that treats the file as relative to subpath.'''
884 '''Create a new uipathfn that treats the file as relative to subpath.'''
885 return lambda f: uipathfn(posixpath.join(subpath, f))
885 return lambda f: uipathfn(posixpath.join(subpath, f))
886
886
887
887
888 def anypats(pats, opts):
888 def anypats(pats, opts):
889 """Checks if any patterns, including --include and --exclude were given.
889 """Checks if any patterns, including --include and --exclude were given.
890
890
891 Some commands (e.g. addremove) use this condition for deciding whether to
891 Some commands (e.g. addremove) use this condition for deciding whether to
892 print absolute or relative paths.
892 print absolute or relative paths.
893 """
893 """
894 return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
894 return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
895
895
896
896
897 def expandpats(pats):
897 def expandpats(pats):
898 """Expand bare globs when running on windows.
898 """Expand bare globs when running on windows.
899 On posix we assume it already has already been done by sh."""
899 On posix we assume it already has already been done by sh."""
900 if not util.expandglobs:
900 if not util.expandglobs:
901 return list(pats)
901 return list(pats)
902 ret = []
902 ret = []
903 for kindpat in pats:
903 for kindpat in pats:
904 kind, pat = matchmod._patsplit(kindpat, None)
904 kind, pat = matchmod._patsplit(kindpat, None)
905 if kind is None:
905 if kind is None:
906 try:
906 try:
907 globbed = glob.glob(pat)
907 globbed = glob.glob(pat)
908 except re.error:
908 except re.error:
909 globbed = [pat]
909 globbed = [pat]
910 if globbed:
910 if globbed:
911 ret.extend(globbed)
911 ret.extend(globbed)
912 continue
912 continue
913 ret.append(kindpat)
913 ret.append(kindpat)
914 return ret
914 return ret
915
915
916
916
917 def matchandpats(
917 def matchandpats(
918 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
918 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
919 ):
919 ):
920 """Return a matcher and the patterns that were used.
920 """Return a matcher and the patterns that were used.
921 The matcher will warn about bad matches, unless an alternate badfn callback
921 The matcher will warn about bad matches, unless an alternate badfn callback
922 is provided."""
922 is provided."""
923 if opts is None:
923 if opts is None:
924 opts = {}
924 opts = {}
925 if not globbed and default == b'relpath':
925 if not globbed and default == b'relpath':
926 pats = expandpats(pats or [])
926 pats = expandpats(pats or [])
927
927
928 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
928 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
929
929
930 def bad(f, msg):
930 def bad(f, msg):
931 ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
931 ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
932
932
933 if badfn is None:
933 if badfn is None:
934 badfn = bad
934 badfn = bad
935
935
936 m = ctx.match(
936 m = ctx.match(
937 pats,
937 pats,
938 opts.get(b'include'),
938 opts.get(b'include'),
939 opts.get(b'exclude'),
939 opts.get(b'exclude'),
940 default,
940 default,
941 listsubrepos=opts.get(b'subrepos'),
941 listsubrepos=opts.get(b'subrepos'),
942 badfn=badfn,
942 badfn=badfn,
943 )
943 )
944
944
945 if m.always():
945 if m.always():
946 pats = []
946 pats = []
947 return m, pats
947 return m, pats
948
948
949
949
950 def match(
950 def match(
951 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
951 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
952 ):
952 ):
953 '''Return a matcher that will warn about bad matches.'''
953 '''Return a matcher that will warn about bad matches.'''
954 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
954 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
955
955
956
956
957 def matchall(repo):
957 def matchall(repo):
958 '''Return a matcher that will efficiently match everything.'''
958 '''Return a matcher that will efficiently match everything.'''
959 return matchmod.always()
959 return matchmod.always()
960
960
961
961
962 def matchfiles(repo, files, badfn=None):
962 def matchfiles(repo, files, badfn=None):
963 '''Return a matcher that will efficiently match exactly these files.'''
963 '''Return a matcher that will efficiently match exactly these files.'''
964 return matchmod.exact(files, badfn=badfn)
964 return matchmod.exact(files, badfn=badfn)
965
965
966
966
967 def parsefollowlinespattern(repo, rev, pat, msg):
967 def parsefollowlinespattern(repo, rev, pat, msg):
968 """Return a file name from `pat` pattern suitable for usage in followlines
968 """Return a file name from `pat` pattern suitable for usage in followlines
969 logic.
969 logic.
970 """
970 """
971 if not matchmod.patkind(pat):
971 if not matchmod.patkind(pat):
972 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
972 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
973 else:
973 else:
974 ctx = repo[rev]
974 ctx = repo[rev]
975 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
975 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
976 files = [f for f in ctx if m(f)]
976 files = [f for f in ctx if m(f)]
977 if len(files) != 1:
977 if len(files) != 1:
978 raise error.ParseError(msg)
978 raise error.ParseError(msg)
979 return files[0]
979 return files[0]
980
980
981
981
982 def getorigvfs(ui, repo):
982 def getorigvfs(ui, repo):
983 """return a vfs suitable to save 'orig' file
983 """return a vfs suitable to save 'orig' file
984
984
985 return None if no special directory is configured"""
985 return None if no special directory is configured"""
986 origbackuppath = ui.config(b'ui', b'origbackuppath')
986 origbackuppath = ui.config(b'ui', b'origbackuppath')
987 if not origbackuppath:
987 if not origbackuppath:
988 return None
988 return None
989 return vfs.vfs(repo.wvfs.join(origbackuppath))
989 return vfs.vfs(repo.wvfs.join(origbackuppath))
990
990
991
991
992 def backuppath(ui, repo, filepath):
992 def backuppath(ui, repo, filepath):
993 """customize where working copy backup files (.orig files) are created
993 """customize where working copy backup files (.orig files) are created
994
994
995 Fetch user defined path from config file: [ui] origbackuppath = <path>
995 Fetch user defined path from config file: [ui] origbackuppath = <path>
996 Fall back to default (filepath with .orig suffix) if not specified
996 Fall back to default (filepath with .orig suffix) if not specified
997
997
998 filepath is repo-relative
998 filepath is repo-relative
999
999
1000 Returns an absolute path
1000 Returns an absolute path
1001 """
1001 """
1002 origvfs = getorigvfs(ui, repo)
1002 origvfs = getorigvfs(ui, repo)
1003 if origvfs is None:
1003 if origvfs is None:
1004 return repo.wjoin(filepath + b".orig")
1004 return repo.wjoin(filepath + b".orig")
1005
1005
1006 origbackupdir = origvfs.dirname(filepath)
1006 origbackupdir = origvfs.dirname(filepath)
1007 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
1007 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
1008 ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
1008 ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
1009
1009
1010 # Remove any files that conflict with the backup file's path
1010 # Remove any files that conflict with the backup file's path
1011 for f in reversed(list(pathutil.finddirs(filepath))):
1011 for f in reversed(list(pathutil.finddirs(filepath))):
1012 if origvfs.isfileorlink(f):
1012 if origvfs.isfileorlink(f):
1013 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
1013 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
1014 origvfs.unlink(f)
1014 origvfs.unlink(f)
1015 break
1015 break
1016
1016
1017 origvfs.makedirs(origbackupdir)
1017 origvfs.makedirs(origbackupdir)
1018
1018
1019 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
1019 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
1020 ui.note(
1020 ui.note(
1021 _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
1021 _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
1022 )
1022 )
1023 origvfs.rmtree(filepath, forcibly=True)
1023 origvfs.rmtree(filepath, forcibly=True)
1024
1024
1025 return origvfs.join(filepath)
1025 return origvfs.join(filepath)
1026
1026
1027
1027
1028 class _containsnode:
1028 class _containsnode:
1029 """proxy __contains__(node) to container.__contains__ which accepts revs"""
1029 """proxy __contains__(node) to container.__contains__ which accepts revs"""
1030
1030
1031 def __init__(self, repo, revcontainer):
1031 def __init__(self, repo, revcontainer):
1032 self._torev = repo.changelog.rev
1032 self._torev = repo.changelog.rev
1033 self._revcontains = revcontainer.__contains__
1033 self._revcontains = revcontainer.__contains__
1034
1034
1035 def __contains__(self, node):
1035 def __contains__(self, node):
1036 return self._revcontains(self._torev(node))
1036 return self._revcontains(self._torev(node))
1037
1037
1038
1038
1039 def cleanupnodes(
1039 def cleanupnodes(
1040 repo,
1040 repo,
1041 replacements,
1041 replacements,
1042 operation,
1042 operation,
1043 moves=None,
1043 moves=None,
1044 metadata=None,
1044 metadata=None,
1045 fixphase=False,
1045 fixphase=False,
1046 targetphase=None,
1046 targetphase=None,
1047 backup=True,
1047 backup=True,
1048 ):
1048 ):
1049 """do common cleanups when old nodes are replaced by new nodes
1049 """do common cleanups when old nodes are replaced by new nodes
1050
1050
1051 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
1051 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
1052 (we might also want to move working directory parent in the future)
1052 (we might also want to move working directory parent in the future)
1053
1053
1054 By default, bookmark moves are calculated automatically from 'replacements',
1054 By default, bookmark moves are calculated automatically from 'replacements',
1055 but 'moves' can be used to override that. Also, 'moves' may include
1055 but 'moves' can be used to override that. Also, 'moves' may include
1056 additional bookmark moves that should not have associated obsmarkers.
1056 additional bookmark moves that should not have associated obsmarkers.
1057
1057
1058 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
1058 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
1059 have replacements. operation is a string, like "rebase".
1059 have replacements. operation is a string, like "rebase".
1060
1060
1061 metadata is dictionary containing metadata to be stored in obsmarker if
1061 metadata is dictionary containing metadata to be stored in obsmarker if
1062 obsolescence is enabled.
1062 obsolescence is enabled.
1063 """
1063 """
1064 assert fixphase or targetphase is None
1064 assert fixphase or targetphase is None
1065 if not replacements and not moves:
1065 if not replacements and not moves:
1066 return
1066 return
1067
1067
1068 # translate mapping's other forms
1068 # translate mapping's other forms
1069 if not util.safehasattr(replacements, b'items'):
1069 if not util.safehasattr(replacements, b'items'):
1070 replacements = {(n,): () for n in replacements}
1070 replacements = {(n,): () for n in replacements}
1071 else:
1071 else:
1072 # upgrading non tuple "source" to tuple ones for BC
1072 # upgrading non tuple "source" to tuple ones for BC
1073 repls = {}
1073 repls = {}
1074 for key, value in replacements.items():
1074 for key, value in replacements.items():
1075 if not isinstance(key, tuple):
1075 if not isinstance(key, tuple):
1076 key = (key,)
1076 key = (key,)
1077 repls[key] = value
1077 repls[key] = value
1078 replacements = repls
1078 replacements = repls
1079
1079
1080 # Unfiltered repo is needed since nodes in replacements might be hidden.
1080 # Unfiltered repo is needed since nodes in replacements might be hidden.
1081 unfi = repo.unfiltered()
1081 unfi = repo.unfiltered()
1082
1082
1083 # Calculate bookmark movements
1083 # Calculate bookmark movements
1084 if moves is None:
1084 if moves is None:
1085 moves = {}
1085 moves = {}
1086 for oldnodes, newnodes in replacements.items():
1086 for oldnodes, newnodes in replacements.items():
1087 for oldnode in oldnodes:
1087 for oldnode in oldnodes:
1088 if oldnode in moves:
1088 if oldnode in moves:
1089 continue
1089 continue
1090 if len(newnodes) > 1:
1090 if len(newnodes) > 1:
1091 # usually a split, take the one with biggest rev number
1091 # usually a split, take the one with biggest rev number
1092 newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
1092 newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
1093 elif len(newnodes) == 0:
1093 elif len(newnodes) == 0:
1094 # move bookmark backwards
1094 # move bookmark backwards
1095 allreplaced = []
1095 allreplaced = []
1096 for rep in replacements:
1096 for rep in replacements:
1097 allreplaced.extend(rep)
1097 allreplaced.extend(rep)
1098 roots = list(
1098 roots = list(
1099 unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
1099 unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
1100 )
1100 )
1101 if roots:
1101 if roots:
1102 newnode = roots[0].node()
1102 newnode = roots[0].node()
1103 else:
1103 else:
1104 newnode = repo.nullid
1104 newnode = repo.nullid
1105 else:
1105 else:
1106 newnode = newnodes[0]
1106 newnode = newnodes[0]
1107 moves[oldnode] = newnode
1107 moves[oldnode] = newnode
1108
1108
1109 allnewnodes = [n for ns in replacements.values() for n in ns]
1109 allnewnodes = [n for ns in replacements.values() for n in ns]
1110 toretract = {}
1110 toretract = {}
1111 toadvance = {}
1111 toadvance = {}
1112 if fixphase:
1112 if fixphase:
1113 precursors = {}
1113 precursors = {}
1114 for oldnodes, newnodes in replacements.items():
1114 for oldnodes, newnodes in replacements.items():
1115 for oldnode in oldnodes:
1115 for oldnode in oldnodes:
1116 for newnode in newnodes:
1116 for newnode in newnodes:
1117 precursors.setdefault(newnode, []).append(oldnode)
1117 precursors.setdefault(newnode, []).append(oldnode)
1118
1118
1119 allnewnodes.sort(key=lambda n: unfi[n].rev())
1119 allnewnodes.sort(key=lambda n: unfi[n].rev())
1120 newphases = {}
1120 newphases = {}
1121
1121
1122 def phase(ctx):
1122 def phase(ctx):
1123 return newphases.get(ctx.node(), ctx.phase())
1123 return newphases.get(ctx.node(), ctx.phase())
1124
1124
1125 for newnode in allnewnodes:
1125 for newnode in allnewnodes:
1126 ctx = unfi[newnode]
1126 ctx = unfi[newnode]
1127 parentphase = max(phase(p) for p in ctx.parents())
1127 parentphase = max(phase(p) for p in ctx.parents())
1128 if targetphase is None:
1128 if targetphase is None:
1129 oldphase = max(
1129 oldphase = max(
1130 unfi[oldnode].phase() for oldnode in precursors[newnode]
1130 unfi[oldnode].phase() for oldnode in precursors[newnode]
1131 )
1131 )
1132 newphase = max(oldphase, parentphase)
1132 newphase = max(oldphase, parentphase)
1133 else:
1133 else:
1134 newphase = max(targetphase, parentphase)
1134 newphase = max(targetphase, parentphase)
1135 newphases[newnode] = newphase
1135 newphases[newnode] = newphase
1136 if newphase > ctx.phase():
1136 if newphase > ctx.phase():
1137 toretract.setdefault(newphase, []).append(newnode)
1137 toretract.setdefault(newphase, []).append(newnode)
1138 elif newphase < ctx.phase():
1138 elif newphase < ctx.phase():
1139 toadvance.setdefault(newphase, []).append(newnode)
1139 toadvance.setdefault(newphase, []).append(newnode)
1140
1140
1141 with repo.transaction(b'cleanup') as tr:
1141 with repo.transaction(b'cleanup') as tr:
1142 # Move bookmarks
1142 # Move bookmarks
1143 bmarks = repo._bookmarks
1143 bmarks = repo._bookmarks
1144 bmarkchanges = []
1144 bmarkchanges = []
1145 for oldnode, newnode in moves.items():
1145 for oldnode, newnode in moves.items():
1146 oldbmarks = repo.nodebookmarks(oldnode)
1146 oldbmarks = repo.nodebookmarks(oldnode)
1147 if not oldbmarks:
1147 if not oldbmarks:
1148 continue
1148 continue
1149 from . import bookmarks # avoid import cycle
1149 from . import bookmarks # avoid import cycle
1150
1150
1151 repo.ui.debug(
1151 repo.ui.debug(
1152 b'moving bookmarks %r from %s to %s\n'
1152 b'moving bookmarks %r from %s to %s\n'
1153 % (
1153 % (
1154 pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1154 pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1155 hex(oldnode),
1155 hex(oldnode),
1156 hex(newnode),
1156 hex(newnode),
1157 )
1157 )
1158 )
1158 )
1159 # Delete divergent bookmarks being parents of related newnodes
1159 # Delete divergent bookmarks being parents of related newnodes
1160 deleterevs = repo.revs(
1160 deleterevs = repo.revs(
1161 b'parents(roots(%ln & (::%n))) - parents(%n)',
1161 b'parents(roots(%ln & (::%n))) - parents(%n)',
1162 allnewnodes,
1162 allnewnodes,
1163 newnode,
1163 newnode,
1164 oldnode,
1164 oldnode,
1165 )
1165 )
1166 deletenodes = _containsnode(repo, deleterevs)
1166 deletenodes = _containsnode(repo, deleterevs)
1167 for name in oldbmarks:
1167 for name in oldbmarks:
1168 bmarkchanges.append((name, newnode))
1168 bmarkchanges.append((name, newnode))
1169 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1169 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1170 bmarkchanges.append((b, None))
1170 bmarkchanges.append((b, None))
1171
1171
1172 if bmarkchanges:
1172 if bmarkchanges:
1173 bmarks.applychanges(repo, tr, bmarkchanges)
1173 bmarks.applychanges(repo, tr, bmarkchanges)
1174
1174
1175 for phase, nodes in toretract.items():
1175 for phase, nodes in toretract.items():
1176 phases.retractboundary(repo, tr, phase, nodes)
1176 phases.retractboundary(repo, tr, phase, nodes)
1177 for phase, nodes in toadvance.items():
1177 for phase, nodes in toadvance.items():
1178 phases.advanceboundary(repo, tr, phase, nodes)
1178 phases.advanceboundary(repo, tr, phase, nodes)
1179
1179
1180 mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
1180 mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
1181 # Obsolete or strip nodes
1181 # Obsolete or strip nodes
1182 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1182 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1183 # If a node is already obsoleted, and we want to obsolete it
1183 # If a node is already obsoleted, and we want to obsolete it
1184 # without a successor, skip that obssolete request since it's
1184 # without a successor, skip that obssolete request since it's
1185 # unnecessary. That's the "if s or not isobs(n)" check below.
1185 # unnecessary. That's the "if s or not isobs(n)" check below.
1186 # Also sort the node in topology order, that might be useful for
1186 # Also sort the node in topology order, that might be useful for
1187 # some obsstore logic.
1187 # some obsstore logic.
1188 # NOTE: the sorting might belong to createmarkers.
1188 # NOTE: the sorting might belong to createmarkers.
1189 torev = unfi.changelog.rev
1189 torev = unfi.changelog.rev
1190 sortfunc = lambda ns: torev(ns[0][0])
1190 sortfunc = lambda ns: torev(ns[0][0])
1191 rels = []
1191 rels = []
1192 for ns, s in sorted(replacements.items(), key=sortfunc):
1192 for ns, s in sorted(replacements.items(), key=sortfunc):
1193 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1193 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1194 rels.append(rel)
1194 rels.append(rel)
1195 if rels:
1195 if rels:
1196 obsolete.createmarkers(
1196 obsolete.createmarkers(
1197 repo, rels, operation=operation, metadata=metadata
1197 repo, rels, operation=operation, metadata=metadata
1198 )
1198 )
1199 elif phases.supportarchived(repo) and mayusearchived:
1199 elif phases.supportarchived(repo) and mayusearchived:
1200 # this assume we do not have "unstable" nodes above the cleaned ones
1200 # this assume we do not have "unstable" nodes above the cleaned ones
1201 allreplaced = set()
1201 allreplaced = set()
1202 for ns in replacements.keys():
1202 for ns in replacements.keys():
1203 allreplaced.update(ns)
1203 allreplaced.update(ns)
1204 if backup:
1204 if backup:
1205 from . import repair # avoid import cycle
1205 from . import repair # avoid import cycle
1206
1206
1207 node = min(allreplaced, key=repo.changelog.rev)
1207 node = min(allreplaced, key=repo.changelog.rev)
1208 repair.backupbundle(
1208 repair.backupbundle(
1209 repo, allreplaced, allreplaced, node, operation
1209 repo, allreplaced, allreplaced, node, operation
1210 )
1210 )
1211 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1211 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1212 else:
1212 else:
1213 from . import repair # avoid import cycle
1213 from . import repair # avoid import cycle
1214
1214
1215 tostrip = list(n for ns in replacements for n in ns)
1215 tostrip = list(n for ns in replacements for n in ns)
1216 if tostrip:
1216 if tostrip:
1217 repair.delayedstrip(
1217 repair.delayedstrip(
1218 repo.ui, repo, tostrip, operation, backup=backup
1218 repo.ui, repo, tostrip, operation, backup=backup
1219 )
1219 )
1220
1220
1221
1221
1222 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1222 def addremove(repo, matcher, prefix, uipathfn, opts=None, open_tr=None):
1223 if opts is None:
1223 if opts is None:
1224 opts = {}
1224 opts = {}
1225 m = matcher
1225 m = matcher
1226 dry_run = opts.get(b'dry_run')
1226 dry_run = opts.get(b'dry_run')
1227 try:
1227 try:
1228 similarity = float(opts.get(b'similarity') or 0)
1228 similarity = float(opts.get(b'similarity') or 0)
1229 except ValueError:
1229 except ValueError:
1230 raise error.InputError(_(b'similarity must be a number'))
1230 raise error.InputError(_(b'similarity must be a number'))
1231 if similarity < 0 or similarity > 100:
1231 if similarity < 0 or similarity > 100:
1232 raise error.InputError(_(b'similarity must be between 0 and 100'))
1232 raise error.InputError(_(b'similarity must be between 0 and 100'))
1233 similarity /= 100.0
1233 similarity /= 100.0
1234
1234
1235 ret = 0
1235 ret = 0
1236
1236
1237 wctx = repo[None]
1237 wctx = repo[None]
1238 for subpath in sorted(wctx.substate):
1238 for subpath in sorted(wctx.substate):
1239 submatch = matchmod.subdirmatcher(subpath, m)
1239 submatch = matchmod.subdirmatcher(subpath, m)
1240 if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
1240 if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
1241 sub = wctx.sub(subpath)
1241 sub = wctx.sub(subpath)
1242 subprefix = repo.wvfs.reljoin(prefix, subpath)
1242 subprefix = repo.wvfs.reljoin(prefix, subpath)
1243 subuipathfn = subdiruipathfn(subpath, uipathfn)
1243 subuipathfn = subdiruipathfn(subpath, uipathfn)
1244 try:
1244 try:
1245 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1245 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1246 ret = 1
1246 ret = 1
1247 except error.LookupError:
1247 except error.LookupError:
1248 repo.ui.status(
1248 repo.ui.status(
1249 _(b"skipping missing subrepository: %s\n")
1249 _(b"skipping missing subrepository: %s\n")
1250 % uipathfn(subpath)
1250 % uipathfn(subpath)
1251 )
1251 )
1252
1252
1253 rejected = []
1253 rejected = []
1254
1254
1255 def badfn(f, msg):
1255 def badfn(f, msg):
1256 if f in m.files():
1256 if f in m.files():
1257 m.bad(f, msg)
1257 m.bad(f, msg)
1258 rejected.append(f)
1258 rejected.append(f)
1259
1259
1260 badmatch = matchmod.badmatch(m, badfn)
1260 badmatch = matchmod.badmatch(m, badfn)
1261 added, unknown, deleted, removed, forgotten = _interestingfiles(
1261 added, unknown, deleted, removed, forgotten = _interestingfiles(
1262 repo, badmatch
1262 repo, badmatch
1263 )
1263 )
1264
1264
1265 unknownset = set(unknown + forgotten)
1265 unknownset = set(unknown + forgotten)
1266 toprint = unknownset.copy()
1266 toprint = unknownset.copy()
1267 toprint.update(deleted)
1267 toprint.update(deleted)
1268 for abs in sorted(toprint):
1268 for abs in sorted(toprint):
1269 if repo.ui.verbose or not m.exact(abs):
1269 if repo.ui.verbose or not m.exact(abs):
1270 if abs in unknownset:
1270 if abs in unknownset:
1271 status = _(b'adding %s\n') % uipathfn(abs)
1271 status = _(b'adding %s\n') % uipathfn(abs)
1272 label = b'ui.addremove.added'
1272 label = b'ui.addremove.added'
1273 else:
1273 else:
1274 status = _(b'removing %s\n') % uipathfn(abs)
1274 status = _(b'removing %s\n') % uipathfn(abs)
1275 label = b'ui.addremove.removed'
1275 label = b'ui.addremove.removed'
1276 repo.ui.status(status, label=label)
1276 repo.ui.status(status, label=label)
1277
1277
1278 renames = _findrenames(
1278 renames = _findrenames(
1279 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1279 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1280 )
1280 )
1281
1281
1282 if not dry_run:
1282 if not dry_run and (unknown or forgotten or deleted or renames):
1283 if open_tr is not None:
1284 open_tr()
1283 _markchanges(repo, unknown + forgotten, deleted, renames)
1285 _markchanges(repo, unknown + forgotten, deleted, renames)
1284
1286
1285 for f in rejected:
1287 for f in rejected:
1286 if f in m.files():
1288 if f in m.files():
1287 return 1
1289 return 1
1288 return ret
1290 return ret
1289
1291
1290
1292
1291 def marktouched(repo, files, similarity=0.0):
1293 def marktouched(repo, files, similarity=0.0):
1292 """Assert that files have somehow been operated upon. files are relative to
1294 """Assert that files have somehow been operated upon. files are relative to
1293 the repo root."""
1295 the repo root."""
1294 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1296 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1295 rejected = []
1297 rejected = []
1296
1298
1297 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1299 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1298
1300
1299 if repo.ui.verbose:
1301 if repo.ui.verbose:
1300 unknownset = set(unknown + forgotten)
1302 unknownset = set(unknown + forgotten)
1301 toprint = unknownset.copy()
1303 toprint = unknownset.copy()
1302 toprint.update(deleted)
1304 toprint.update(deleted)
1303 for abs in sorted(toprint):
1305 for abs in sorted(toprint):
1304 if abs in unknownset:
1306 if abs in unknownset:
1305 status = _(b'adding %s\n') % abs
1307 status = _(b'adding %s\n') % abs
1306 else:
1308 else:
1307 status = _(b'removing %s\n') % abs
1309 status = _(b'removing %s\n') % abs
1308 repo.ui.status(status)
1310 repo.ui.status(status)
1309
1311
1310 # TODO: We should probably have the caller pass in uipathfn and apply it to
1312 # TODO: We should probably have the caller pass in uipathfn and apply it to
1311 # the messages above too. legacyrelativevalue=True is consistent with how
1313 # the messages above too. legacyrelativevalue=True is consistent with how
1312 # it used to work.
1314 # it used to work.
1313 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1315 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1314 renames = _findrenames(
1316 renames = _findrenames(
1315 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1317 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1316 )
1318 )
1317
1319
1318 _markchanges(repo, unknown + forgotten, deleted, renames)
1320 _markchanges(repo, unknown + forgotten, deleted, renames)
1319
1321
1320 for f in rejected:
1322 for f in rejected:
1321 if f in m.files():
1323 if f in m.files():
1322 return 1
1324 return 1
1323 return 0
1325 return 0
1324
1326
1325
1327
1326 def _interestingfiles(repo, matcher):
1328 def _interestingfiles(repo, matcher):
1327 """Walk dirstate with matcher, looking for files that addremove would care
1329 """Walk dirstate with matcher, looking for files that addremove would care
1328 about.
1330 about.
1329
1331
1330 This is different from dirstate.status because it doesn't care about
1332 This is different from dirstate.status because it doesn't care about
1331 whether files are modified or clean."""
1333 whether files are modified or clean."""
1332 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1334 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1333 audit_path = pathutil.pathauditor(repo.root, cached=True)
1335 audit_path = pathutil.pathauditor(repo.root, cached=True)
1334
1336
1335 ctx = repo[None]
1337 ctx = repo[None]
1336 dirstate = repo.dirstate
1338 dirstate = repo.dirstate
1337 matcher = repo.narrowmatch(matcher, includeexact=True)
1339 matcher = repo.narrowmatch(matcher, includeexact=True)
1338 walkresults = dirstate.walk(
1340 walkresults = dirstate.walk(
1339 matcher,
1341 matcher,
1340 subrepos=sorted(ctx.substate),
1342 subrepos=sorted(ctx.substate),
1341 unknown=True,
1343 unknown=True,
1342 ignored=False,
1344 ignored=False,
1343 full=False,
1345 full=False,
1344 )
1346 )
1345 for abs, st in walkresults.items():
1347 for abs, st in walkresults.items():
1346 entry = dirstate.get_entry(abs)
1348 entry = dirstate.get_entry(abs)
1347 if (not entry.any_tracked) and audit_path.check(abs):
1349 if (not entry.any_tracked) and audit_path.check(abs):
1348 unknown.append(abs)
1350 unknown.append(abs)
1349 elif (not entry.removed) and not st:
1351 elif (not entry.removed) and not st:
1350 deleted.append(abs)
1352 deleted.append(abs)
1351 elif entry.removed and st:
1353 elif entry.removed and st:
1352 forgotten.append(abs)
1354 forgotten.append(abs)
1353 # for finding renames
1355 # for finding renames
1354 elif entry.removed and not st:
1356 elif entry.removed and not st:
1355 removed.append(abs)
1357 removed.append(abs)
1356 elif entry.added:
1358 elif entry.added:
1357 added.append(abs)
1359 added.append(abs)
1358
1360
1359 return added, unknown, deleted, removed, forgotten
1361 return added, unknown, deleted, removed, forgotten
1360
1362
1361
1363
1362 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1364 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1363 '''Find renames from removed files to added ones.'''
1365 '''Find renames from removed files to added ones.'''
1364 renames = {}
1366 renames = {}
1365 if similarity > 0:
1367 if similarity > 0:
1366 for old, new, score in similar.findrenames(
1368 for old, new, score in similar.findrenames(
1367 repo, added, removed, similarity
1369 repo, added, removed, similarity
1368 ):
1370 ):
1369 if (
1371 if (
1370 repo.ui.verbose
1372 repo.ui.verbose
1371 or not matcher.exact(old)
1373 or not matcher.exact(old)
1372 or not matcher.exact(new)
1374 or not matcher.exact(new)
1373 ):
1375 ):
1374 repo.ui.status(
1376 repo.ui.status(
1375 _(
1377 _(
1376 b'recording removal of %s as rename to %s '
1378 b'recording removal of %s as rename to %s '
1377 b'(%d%% similar)\n'
1379 b'(%d%% similar)\n'
1378 )
1380 )
1379 % (uipathfn(old), uipathfn(new), score * 100)
1381 % (uipathfn(old), uipathfn(new), score * 100)
1380 )
1382 )
1381 renames[new] = old
1383 renames[new] = old
1382 return renames
1384 return renames
1383
1385
1384
1386
1385 def _markchanges(repo, unknown, deleted, renames):
1387 def _markchanges(repo, unknown, deleted, renames):
1386 """Marks the files in unknown as added, the files in deleted as removed,
1388 """Marks the files in unknown as added, the files in deleted as removed,
1387 and the files in renames as copied."""
1389 and the files in renames as copied."""
1388 wctx = repo[None]
1390 wctx = repo[None]
1389 with repo.wlock():
1391 with repo.wlock():
1390 wctx.forget(deleted)
1392 wctx.forget(deleted)
1391 wctx.add(unknown)
1393 wctx.add(unknown)
1392 for new, old in renames.items():
1394 for new, old in renames.items():
1393 wctx.copy(old, new)
1395 wctx.copy(old, new)
1394
1396
1395
1397
1396 def getrenamedfn(repo, endrev=None):
1398 def getrenamedfn(repo, endrev=None):
1397 if copiesmod.usechangesetcentricalgo(repo):
1399 if copiesmod.usechangesetcentricalgo(repo):
1398
1400
1399 def getrenamed(fn, rev):
1401 def getrenamed(fn, rev):
1400 ctx = repo[rev]
1402 ctx = repo[rev]
1401 p1copies = ctx.p1copies()
1403 p1copies = ctx.p1copies()
1402 if fn in p1copies:
1404 if fn in p1copies:
1403 return p1copies[fn]
1405 return p1copies[fn]
1404 p2copies = ctx.p2copies()
1406 p2copies = ctx.p2copies()
1405 if fn in p2copies:
1407 if fn in p2copies:
1406 return p2copies[fn]
1408 return p2copies[fn]
1407 return None
1409 return None
1408
1410
1409 return getrenamed
1411 return getrenamed
1410
1412
1411 rcache = {}
1413 rcache = {}
1412 if endrev is None:
1414 if endrev is None:
1413 endrev = len(repo)
1415 endrev = len(repo)
1414
1416
1415 def getrenamed(fn, rev):
1417 def getrenamed(fn, rev):
1416 """looks up all renames for a file (up to endrev) the first
1418 """looks up all renames for a file (up to endrev) the first
1417 time the file is given. It indexes on the changerev and only
1419 time the file is given. It indexes on the changerev and only
1418 parses the manifest if linkrev != changerev.
1420 parses the manifest if linkrev != changerev.
1419 Returns rename info for fn at changerev rev."""
1421 Returns rename info for fn at changerev rev."""
1420 if fn not in rcache:
1422 if fn not in rcache:
1421 rcache[fn] = {}
1423 rcache[fn] = {}
1422 fl = repo.file(fn)
1424 fl = repo.file(fn)
1423 for i in fl:
1425 for i in fl:
1424 lr = fl.linkrev(i)
1426 lr = fl.linkrev(i)
1425 renamed = fl.renamed(fl.node(i))
1427 renamed = fl.renamed(fl.node(i))
1426 rcache[fn][lr] = renamed and renamed[0]
1428 rcache[fn][lr] = renamed and renamed[0]
1427 if lr >= endrev:
1429 if lr >= endrev:
1428 break
1430 break
1429 if rev in rcache[fn]:
1431 if rev in rcache[fn]:
1430 return rcache[fn][rev]
1432 return rcache[fn][rev]
1431
1433
1432 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1434 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1433 # filectx logic.
1435 # filectx logic.
1434 try:
1436 try:
1435 return repo[rev][fn].copysource()
1437 return repo[rev][fn].copysource()
1436 except error.LookupError:
1438 except error.LookupError:
1437 return None
1439 return None
1438
1440
1439 return getrenamed
1441 return getrenamed
1440
1442
1441
1443
1442 def getcopiesfn(repo, endrev=None):
1444 def getcopiesfn(repo, endrev=None):
1443 if copiesmod.usechangesetcentricalgo(repo):
1445 if copiesmod.usechangesetcentricalgo(repo):
1444
1446
1445 def copiesfn(ctx):
1447 def copiesfn(ctx):
1446 if ctx.p2copies():
1448 if ctx.p2copies():
1447 allcopies = ctx.p1copies().copy()
1449 allcopies = ctx.p1copies().copy()
1448 # There should be no overlap
1450 # There should be no overlap
1449 allcopies.update(ctx.p2copies())
1451 allcopies.update(ctx.p2copies())
1450 return sorted(allcopies.items())
1452 return sorted(allcopies.items())
1451 else:
1453 else:
1452 return sorted(ctx.p1copies().items())
1454 return sorted(ctx.p1copies().items())
1453
1455
1454 else:
1456 else:
1455 getrenamed = getrenamedfn(repo, endrev)
1457 getrenamed = getrenamedfn(repo, endrev)
1456
1458
1457 def copiesfn(ctx):
1459 def copiesfn(ctx):
1458 copies = []
1460 copies = []
1459 for fn in ctx.files():
1461 for fn in ctx.files():
1460 rename = getrenamed(fn, ctx.rev())
1462 rename = getrenamed(fn, ctx.rev())
1461 if rename:
1463 if rename:
1462 copies.append((fn, rename))
1464 copies.append((fn, rename))
1463 return copies
1465 return copies
1464
1466
1465 return copiesfn
1467 return copiesfn
1466
1468
1467
1469
1468 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1470 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1469 """Update the dirstate to reflect the intent of copying src to dst. For
1471 """Update the dirstate to reflect the intent of copying src to dst. For
1470 different reasons it might not end with dst being marked as copied from src.
1472 different reasons it might not end with dst being marked as copied from src.
1471 """
1473 """
1472 origsrc = repo.dirstate.copied(src) or src
1474 origsrc = repo.dirstate.copied(src) or src
1473 if dst == origsrc: # copying back a copy?
1475 if dst == origsrc: # copying back a copy?
1474 entry = repo.dirstate.get_entry(dst)
1476 entry = repo.dirstate.get_entry(dst)
1475 if (entry.added or not entry.tracked) and not dryrun:
1477 if (entry.added or not entry.tracked) and not dryrun:
1476 repo.dirstate.set_tracked(dst)
1478 repo.dirstate.set_tracked(dst)
1477 else:
1479 else:
1478 if repo.dirstate.get_entry(origsrc).added and origsrc == src:
1480 if repo.dirstate.get_entry(origsrc).added and origsrc == src:
1479 if not ui.quiet:
1481 if not ui.quiet:
1480 ui.warn(
1482 ui.warn(
1481 _(
1483 _(
1482 b"%s has not been committed yet, so no copy "
1484 b"%s has not been committed yet, so no copy "
1483 b"data will be stored for %s.\n"
1485 b"data will be stored for %s.\n"
1484 )
1486 )
1485 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1487 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1486 )
1488 )
1487 if not repo.dirstate.get_entry(dst).tracked and not dryrun:
1489 if not repo.dirstate.get_entry(dst).tracked and not dryrun:
1488 wctx.add([dst])
1490 wctx.add([dst])
1489 elif not dryrun:
1491 elif not dryrun:
1490 wctx.copy(origsrc, dst)
1492 wctx.copy(origsrc, dst)
1491
1493
1492
1494
1493 def movedirstate(repo, newctx, match=None):
1495 def movedirstate(repo, newctx, match=None):
1494 """Move the dirstate to newctx and adjust it as necessary.
1496 """Move the dirstate to newctx and adjust it as necessary.
1495
1497
1496 A matcher can be provided as an optimization. It is probably a bug to pass
1498 A matcher can be provided as an optimization. It is probably a bug to pass
1497 a matcher that doesn't match all the differences between the parent of the
1499 a matcher that doesn't match all the differences between the parent of the
1498 working copy and newctx.
1500 working copy and newctx.
1499 """
1501 """
1500 oldctx = repo[b'.']
1502 oldctx = repo[b'.']
1501 ds = repo.dirstate
1503 ds = repo.dirstate
1502 copies = dict(ds.copies())
1504 copies = dict(ds.copies())
1503 ds.setparents(newctx.node(), repo.nullid)
1505 ds.setparents(newctx.node(), repo.nullid)
1504 s = newctx.status(oldctx, match=match)
1506 s = newctx.status(oldctx, match=match)
1505
1507
1506 for f in s.modified:
1508 for f in s.modified:
1507 ds.update_file_p1(f, p1_tracked=True)
1509 ds.update_file_p1(f, p1_tracked=True)
1508
1510
1509 for f in s.added:
1511 for f in s.added:
1510 ds.update_file_p1(f, p1_tracked=False)
1512 ds.update_file_p1(f, p1_tracked=False)
1511
1513
1512 for f in s.removed:
1514 for f in s.removed:
1513 ds.update_file_p1(f, p1_tracked=True)
1515 ds.update_file_p1(f, p1_tracked=True)
1514
1516
1515 # Merge old parent and old working dir copies
1517 # Merge old parent and old working dir copies
1516 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1518 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1517 oldcopies.update(copies)
1519 oldcopies.update(copies)
1518 copies = {dst: oldcopies.get(src, src) for dst, src in oldcopies.items()}
1520 copies = {dst: oldcopies.get(src, src) for dst, src in oldcopies.items()}
1519 # Adjust the dirstate copies
1521 # Adjust the dirstate copies
1520 for dst, src in copies.items():
1522 for dst, src in copies.items():
1521 if src not in newctx or dst in newctx or not ds.get_entry(dst).added:
1523 if src not in newctx or dst in newctx or not ds.get_entry(dst).added:
1522 src = None
1524 src = None
1523 ds.copy(src, dst)
1525 ds.copy(src, dst)
1524 repo._quick_access_changeid_invalidate()
1526 repo._quick_access_changeid_invalidate()
1525
1527
1526
1528
1527 def filterrequirements(requirements):
1529 def filterrequirements(requirements):
1528 """filters the requirements into two sets:
1530 """filters the requirements into two sets:
1529
1531
1530 wcreq: requirements which should be written in .hg/requires
1532 wcreq: requirements which should be written in .hg/requires
1531 storereq: which should be written in .hg/store/requires
1533 storereq: which should be written in .hg/store/requires
1532
1534
1533 Returns (wcreq, storereq)
1535 Returns (wcreq, storereq)
1534 """
1536 """
1535 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
1537 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
1536 wc, store = set(), set()
1538 wc, store = set(), set()
1537 for r in requirements:
1539 for r in requirements:
1538 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
1540 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
1539 wc.add(r)
1541 wc.add(r)
1540 else:
1542 else:
1541 store.add(r)
1543 store.add(r)
1542 return wc, store
1544 return wc, store
1543 return requirements, None
1545 return requirements, None
1544
1546
1545
1547
1546 def istreemanifest(repo):
1548 def istreemanifest(repo):
1547 """returns whether the repository is using treemanifest or not"""
1549 """returns whether the repository is using treemanifest or not"""
1548 return requirementsmod.TREEMANIFEST_REQUIREMENT in repo.requirements
1550 return requirementsmod.TREEMANIFEST_REQUIREMENT in repo.requirements
1549
1551
1550
1552
1551 def writereporequirements(repo, requirements=None):
1553 def writereporequirements(repo, requirements=None):
1552 """writes requirements for the repo
1554 """writes requirements for the repo
1553
1555
1554 Requirements are written to .hg/requires and .hg/store/requires based
1556 Requirements are written to .hg/requires and .hg/store/requires based
1555 on whether share-safe mode is enabled and which requirements are wdir
1557 on whether share-safe mode is enabled and which requirements are wdir
1556 requirements and which are store requirements
1558 requirements and which are store requirements
1557 """
1559 """
1558 if requirements:
1560 if requirements:
1559 repo.requirements = requirements
1561 repo.requirements = requirements
1560 wcreq, storereq = filterrequirements(repo.requirements)
1562 wcreq, storereq = filterrequirements(repo.requirements)
1561 if wcreq is not None:
1563 if wcreq is not None:
1562 writerequires(repo.vfs, wcreq)
1564 writerequires(repo.vfs, wcreq)
1563 if storereq is not None:
1565 if storereq is not None:
1564 writerequires(repo.svfs, storereq)
1566 writerequires(repo.svfs, storereq)
1565 elif repo.ui.configbool(b'format', b'usestore'):
1567 elif repo.ui.configbool(b'format', b'usestore'):
1566 # only remove store requires if we are using store
1568 # only remove store requires if we are using store
1567 repo.svfs.tryunlink(b'requires')
1569 repo.svfs.tryunlink(b'requires')
1568
1570
1569
1571
1570 def writerequires(opener, requirements):
1572 def writerequires(opener, requirements):
1571 with opener(b'requires', b'w', atomictemp=True) as fp:
1573 with opener(b'requires', b'w', atomictemp=True) as fp:
1572 for r in sorted(requirements):
1574 for r in sorted(requirements):
1573 fp.write(b"%s\n" % r)
1575 fp.write(b"%s\n" % r)
1574
1576
1575
1577
1576 class filecachesubentry:
1578 class filecachesubentry:
1577 def __init__(self, path, stat):
1579 def __init__(self, path, stat):
1578 self.path = path
1580 self.path = path
1579 self.cachestat = None
1581 self.cachestat = None
1580 self._cacheable = None
1582 self._cacheable = None
1581
1583
1582 if stat:
1584 if stat:
1583 self.cachestat = filecachesubentry.stat(self.path)
1585 self.cachestat = filecachesubentry.stat(self.path)
1584
1586
1585 if self.cachestat:
1587 if self.cachestat:
1586 self._cacheable = self.cachestat.cacheable()
1588 self._cacheable = self.cachestat.cacheable()
1587 else:
1589 else:
1588 # None means we don't know yet
1590 # None means we don't know yet
1589 self._cacheable = None
1591 self._cacheable = None
1590
1592
1591 def refresh(self):
1593 def refresh(self):
1592 if self.cacheable():
1594 if self.cacheable():
1593 self.cachestat = filecachesubentry.stat(self.path)
1595 self.cachestat = filecachesubentry.stat(self.path)
1594
1596
1595 def cacheable(self):
1597 def cacheable(self):
1596 if self._cacheable is not None:
1598 if self._cacheable is not None:
1597 return self._cacheable
1599 return self._cacheable
1598
1600
1599 # we don't know yet, assume it is for now
1601 # we don't know yet, assume it is for now
1600 return True
1602 return True
1601
1603
1602 def changed(self):
1604 def changed(self):
1603 # no point in going further if we can't cache it
1605 # no point in going further if we can't cache it
1604 if not self.cacheable():
1606 if not self.cacheable():
1605 return True
1607 return True
1606
1608
1607 newstat = filecachesubentry.stat(self.path)
1609 newstat = filecachesubentry.stat(self.path)
1608
1610
1609 # we may not know if it's cacheable yet, check again now
1611 # we may not know if it's cacheable yet, check again now
1610 if newstat and self._cacheable is None:
1612 if newstat and self._cacheable is None:
1611 self._cacheable = newstat.cacheable()
1613 self._cacheable = newstat.cacheable()
1612
1614
1613 # check again
1615 # check again
1614 if not self._cacheable:
1616 if not self._cacheable:
1615 return True
1617 return True
1616
1618
1617 if self.cachestat != newstat:
1619 if self.cachestat != newstat:
1618 self.cachestat = newstat
1620 self.cachestat = newstat
1619 return True
1621 return True
1620 else:
1622 else:
1621 return False
1623 return False
1622
1624
1623 @staticmethod
1625 @staticmethod
1624 def stat(path):
1626 def stat(path):
1625 try:
1627 try:
1626 return util.cachestat(path)
1628 return util.cachestat(path)
1627 except FileNotFoundError:
1629 except FileNotFoundError:
1628 pass
1630 pass
1629
1631
1630
1632
1631 class filecacheentry:
1633 class filecacheentry:
1632 def __init__(self, paths, stat=True):
1634 def __init__(self, paths, stat=True):
1633 self._entries = []
1635 self._entries = []
1634 for path in paths:
1636 for path in paths:
1635 self._entries.append(filecachesubentry(path, stat))
1637 self._entries.append(filecachesubentry(path, stat))
1636
1638
1637 def changed(self):
1639 def changed(self):
1638 '''true if any entry has changed'''
1640 '''true if any entry has changed'''
1639 for entry in self._entries:
1641 for entry in self._entries:
1640 if entry.changed():
1642 if entry.changed():
1641 return True
1643 return True
1642 return False
1644 return False
1643
1645
1644 def refresh(self):
1646 def refresh(self):
1645 for entry in self._entries:
1647 for entry in self._entries:
1646 entry.refresh()
1648 entry.refresh()
1647
1649
1648
1650
1649 class filecache:
1651 class filecache:
1650 """A property like decorator that tracks files under .hg/ for updates.
1652 """A property like decorator that tracks files under .hg/ for updates.
1651
1653
1652 On first access, the files defined as arguments are stat()ed and the
1654 On first access, the files defined as arguments are stat()ed and the
1653 results cached. The decorated function is called. The results are stashed
1655 results cached. The decorated function is called. The results are stashed
1654 away in a ``_filecache`` dict on the object whose method is decorated.
1656 away in a ``_filecache`` dict on the object whose method is decorated.
1655
1657
1656 On subsequent access, the cached result is used as it is set to the
1658 On subsequent access, the cached result is used as it is set to the
1657 instance dictionary.
1659 instance dictionary.
1658
1660
1659 On external property set/delete operations, the caller must update the
1661 On external property set/delete operations, the caller must update the
1660 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1662 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1661 instead of directly setting <attr>.
1663 instead of directly setting <attr>.
1662
1664
1663 When using the property API, the cached data is always used if available.
1665 When using the property API, the cached data is always used if available.
1664 No stat() is performed to check if the file has changed.
1666 No stat() is performed to check if the file has changed.
1665
1667
1666 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1668 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1667 can populate an entry before the property's getter is called. In this case,
1669 can populate an entry before the property's getter is called. In this case,
1668 entries in ``_filecache`` will be used during property operations,
1670 entries in ``_filecache`` will be used during property operations,
1669 if available. If the underlying file changes, it is up to external callers
1671 if available. If the underlying file changes, it is up to external callers
1670 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1672 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1671 method result as well as possibly calling ``del obj._filecache[attr]`` to
1673 method result as well as possibly calling ``del obj._filecache[attr]`` to
1672 remove the ``filecacheentry``.
1674 remove the ``filecacheentry``.
1673 """
1675 """
1674
1676
1675 def __init__(self, *paths):
1677 def __init__(self, *paths):
1676 self.paths = paths
1678 self.paths = paths
1677
1679
1678 def tracked_paths(self, obj):
1680 def tracked_paths(self, obj):
1679 return [self.join(obj, path) for path in self.paths]
1681 return [self.join(obj, path) for path in self.paths]
1680
1682
1681 def join(self, obj, fname):
1683 def join(self, obj, fname):
1682 """Used to compute the runtime path of a cached file.
1684 """Used to compute the runtime path of a cached file.
1683
1685
1684 Users should subclass filecache and provide their own version of this
1686 Users should subclass filecache and provide their own version of this
1685 function to call the appropriate join function on 'obj' (an instance
1687 function to call the appropriate join function on 'obj' (an instance
1686 of the class that its member function was decorated).
1688 of the class that its member function was decorated).
1687 """
1689 """
1688 raise NotImplementedError
1690 raise NotImplementedError
1689
1691
1690 def __call__(self, func):
1692 def __call__(self, func):
1691 self.func = func
1693 self.func = func
1692 self.sname = func.__name__
1694 self.sname = func.__name__
1693 self.name = pycompat.sysbytes(self.sname)
1695 self.name = pycompat.sysbytes(self.sname)
1694 return self
1696 return self
1695
1697
1696 def __get__(self, obj, type=None):
1698 def __get__(self, obj, type=None):
1697 # if accessed on the class, return the descriptor itself.
1699 # if accessed on the class, return the descriptor itself.
1698 if obj is None:
1700 if obj is None:
1699 return self
1701 return self
1700
1702
1701 assert self.sname not in obj.__dict__
1703 assert self.sname not in obj.__dict__
1702
1704
1703 entry = obj._filecache.get(self.name)
1705 entry = obj._filecache.get(self.name)
1704
1706
1705 if entry:
1707 if entry:
1706 if entry.changed():
1708 if entry.changed():
1707 entry.obj = self.func(obj)
1709 entry.obj = self.func(obj)
1708 else:
1710 else:
1709 paths = self.tracked_paths(obj)
1711 paths = self.tracked_paths(obj)
1710
1712
1711 # We stat -before- creating the object so our cache doesn't lie if
1713 # We stat -before- creating the object so our cache doesn't lie if
1712 # a writer modified between the time we read and stat
1714 # a writer modified between the time we read and stat
1713 entry = filecacheentry(paths, True)
1715 entry = filecacheentry(paths, True)
1714 entry.obj = self.func(obj)
1716 entry.obj = self.func(obj)
1715
1717
1716 obj._filecache[self.name] = entry
1718 obj._filecache[self.name] = entry
1717
1719
1718 obj.__dict__[self.sname] = entry.obj
1720 obj.__dict__[self.sname] = entry.obj
1719 return entry.obj
1721 return entry.obj
1720
1722
1721 # don't implement __set__(), which would make __dict__ lookup as slow as
1723 # don't implement __set__(), which would make __dict__ lookup as slow as
1722 # function call.
1724 # function call.
1723
1725
1724 def set(self, obj, value):
1726 def set(self, obj, value):
1725 if self.name not in obj._filecache:
1727 if self.name not in obj._filecache:
1726 # we add an entry for the missing value because X in __dict__
1728 # we add an entry for the missing value because X in __dict__
1727 # implies X in _filecache
1729 # implies X in _filecache
1728 paths = self.tracked_paths(obj)
1730 paths = self.tracked_paths(obj)
1729 ce = filecacheentry(paths, False)
1731 ce = filecacheentry(paths, False)
1730 obj._filecache[self.name] = ce
1732 obj._filecache[self.name] = ce
1731 else:
1733 else:
1732 ce = obj._filecache[self.name]
1734 ce = obj._filecache[self.name]
1733
1735
1734 ce.obj = value # update cached copy
1736 ce.obj = value # update cached copy
1735 obj.__dict__[self.sname] = value # update copy returned by obj.x
1737 obj.__dict__[self.sname] = value # update copy returned by obj.x
1736
1738
1737
1739
1738 def extdatasource(repo, source):
1740 def extdatasource(repo, source):
1739 """Gather a map of rev -> value dict from the specified source
1741 """Gather a map of rev -> value dict from the specified source
1740
1742
1741 A source spec is treated as a URL, with a special case shell: type
1743 A source spec is treated as a URL, with a special case shell: type
1742 for parsing the output from a shell command.
1744 for parsing the output from a shell command.
1743
1745
1744 The data is parsed as a series of newline-separated records where
1746 The data is parsed as a series of newline-separated records where
1745 each record is a revision specifier optionally followed by a space
1747 each record is a revision specifier optionally followed by a space
1746 and a freeform string value. If the revision is known locally, it
1748 and a freeform string value. If the revision is known locally, it
1747 is converted to a rev, otherwise the record is skipped.
1749 is converted to a rev, otherwise the record is skipped.
1748
1750
1749 Note that both key and value are treated as UTF-8 and converted to
1751 Note that both key and value are treated as UTF-8 and converted to
1750 the local encoding. This allows uniformity between local and
1752 the local encoding. This allows uniformity between local and
1751 remote data sources.
1753 remote data sources.
1752 """
1754 """
1753
1755
1754 spec = repo.ui.config(b"extdata", source)
1756 spec = repo.ui.config(b"extdata", source)
1755 if not spec:
1757 if not spec:
1756 raise error.Abort(_(b"unknown extdata source '%s'") % source)
1758 raise error.Abort(_(b"unknown extdata source '%s'") % source)
1757
1759
1758 data = {}
1760 data = {}
1759 src = proc = None
1761 src = proc = None
1760 try:
1762 try:
1761 if spec.startswith(b"shell:"):
1763 if spec.startswith(b"shell:"):
1762 # external commands should be run relative to the repo root
1764 # external commands should be run relative to the repo root
1763 cmd = spec[6:]
1765 cmd = spec[6:]
1764 proc = subprocess.Popen(
1766 proc = subprocess.Popen(
1765 procutil.tonativestr(cmd),
1767 procutil.tonativestr(cmd),
1766 shell=True,
1768 shell=True,
1767 bufsize=-1,
1769 bufsize=-1,
1768 close_fds=procutil.closefds,
1770 close_fds=procutil.closefds,
1769 stdout=subprocess.PIPE,
1771 stdout=subprocess.PIPE,
1770 cwd=procutil.tonativestr(repo.root),
1772 cwd=procutil.tonativestr(repo.root),
1771 )
1773 )
1772 src = proc.stdout
1774 src = proc.stdout
1773 else:
1775 else:
1774 # treat as a URL or file
1776 # treat as a URL or file
1775 src = url.open(repo.ui, spec)
1777 src = url.open(repo.ui, spec)
1776 for l in src:
1778 for l in src:
1777 if b" " in l:
1779 if b" " in l:
1778 k, v = l.strip().split(b" ", 1)
1780 k, v = l.strip().split(b" ", 1)
1779 else:
1781 else:
1780 k, v = l.strip(), b""
1782 k, v = l.strip(), b""
1781
1783
1782 k = encoding.tolocal(k)
1784 k = encoding.tolocal(k)
1783 try:
1785 try:
1784 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1786 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1785 except (error.LookupError, error.RepoLookupError, error.InputError):
1787 except (error.LookupError, error.RepoLookupError, error.InputError):
1786 pass # we ignore data for nodes that don't exist locally
1788 pass # we ignore data for nodes that don't exist locally
1787 finally:
1789 finally:
1788 if proc:
1790 if proc:
1789 try:
1791 try:
1790 proc.communicate()
1792 proc.communicate()
1791 except ValueError:
1793 except ValueError:
1792 # This happens if we started iterating src and then
1794 # This happens if we started iterating src and then
1793 # get a parse error on a line. It should be safe to ignore.
1795 # get a parse error on a line. It should be safe to ignore.
1794 pass
1796 pass
1795 if src:
1797 if src:
1796 src.close()
1798 src.close()
1797 if proc and proc.returncode != 0:
1799 if proc and proc.returncode != 0:
1798 raise error.Abort(
1800 raise error.Abort(
1799 _(b"extdata command '%s' failed: %s")
1801 _(b"extdata command '%s' failed: %s")
1800 % (cmd, procutil.explainexit(proc.returncode))
1802 % (cmd, procutil.explainexit(proc.returncode))
1801 )
1803 )
1802
1804
1803 return data
1805 return data
1804
1806
1805
1807
1806 class progress:
1808 class progress:
1807 def __init__(self, ui, updatebar, topic, unit=b"", total=None):
1809 def __init__(self, ui, updatebar, topic, unit=b"", total=None):
1808 self.ui = ui
1810 self.ui = ui
1809 self.pos = 0
1811 self.pos = 0
1810 self.topic = topic
1812 self.topic = topic
1811 self.unit = unit
1813 self.unit = unit
1812 self.total = total
1814 self.total = total
1813 self.debug = ui.configbool(b'progress', b'debug')
1815 self.debug = ui.configbool(b'progress', b'debug')
1814 self._updatebar = updatebar
1816 self._updatebar = updatebar
1815
1817
1816 def __enter__(self):
1818 def __enter__(self):
1817 return self
1819 return self
1818
1820
1819 def __exit__(self, exc_type, exc_value, exc_tb):
1821 def __exit__(self, exc_type, exc_value, exc_tb):
1820 self.complete()
1822 self.complete()
1821
1823
1822 def update(self, pos, item=b"", total=None):
1824 def update(self, pos, item=b"", total=None):
1823 assert pos is not None
1825 assert pos is not None
1824 if total:
1826 if total:
1825 self.total = total
1827 self.total = total
1826 self.pos = pos
1828 self.pos = pos
1827 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1829 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1828 if self.debug:
1830 if self.debug:
1829 self._printdebug(item)
1831 self._printdebug(item)
1830
1832
1831 def increment(self, step=1, item=b"", total=None):
1833 def increment(self, step=1, item=b"", total=None):
1832 self.update(self.pos + step, item, total)
1834 self.update(self.pos + step, item, total)
1833
1835
1834 def complete(self):
1836 def complete(self):
1835 self.pos = None
1837 self.pos = None
1836 self.unit = b""
1838 self.unit = b""
1837 self.total = None
1839 self.total = None
1838 self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
1840 self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
1839
1841
1840 def _printdebug(self, item):
1842 def _printdebug(self, item):
1841 unit = b''
1843 unit = b''
1842 if self.unit:
1844 if self.unit:
1843 unit = b' ' + self.unit
1845 unit = b' ' + self.unit
1844 if item:
1846 if item:
1845 item = b' ' + item
1847 item = b' ' + item
1846
1848
1847 if self.total:
1849 if self.total:
1848 pct = 100.0 * self.pos / self.total
1850 pct = 100.0 * self.pos / self.total
1849 self.ui.debug(
1851 self.ui.debug(
1850 b'%s:%s %d/%d%s (%4.2f%%)\n'
1852 b'%s:%s %d/%d%s (%4.2f%%)\n'
1851 % (self.topic, item, self.pos, self.total, unit, pct)
1853 % (self.topic, item, self.pos, self.total, unit, pct)
1852 )
1854 )
1853 else:
1855 else:
1854 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1856 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1855
1857
1856
1858
1857 def gdinitconfig(ui):
1859 def gdinitconfig(ui):
1858 """helper function to know if a repo should be created as general delta"""
1860 """helper function to know if a repo should be created as general delta"""
1859 # experimental config: format.generaldelta
1861 # experimental config: format.generaldelta
1860 return ui.configbool(b'format', b'generaldelta') or ui.configbool(
1862 return ui.configbool(b'format', b'generaldelta') or ui.configbool(
1861 b'format', b'usegeneraldelta'
1863 b'format', b'usegeneraldelta'
1862 )
1864 )
1863
1865
1864
1866
1865 def gddeltaconfig(ui):
1867 def gddeltaconfig(ui):
1866 """helper function to know if incoming deltas should be optimized
1868 """helper function to know if incoming deltas should be optimized
1867
1869
1868 The `format.generaldelta` config is an old form of the config that also
1870 The `format.generaldelta` config is an old form of the config that also
1869 implies that incoming delta-bases should be never be trusted. This function
1871 implies that incoming delta-bases should be never be trusted. This function
1870 exists for this purpose.
1872 exists for this purpose.
1871 """
1873 """
1872 # experimental config: format.generaldelta
1874 # experimental config: format.generaldelta
1873 return ui.configbool(b'format', b'generaldelta')
1875 return ui.configbool(b'format', b'generaldelta')
1874
1876
1875
1877
1876 class simplekeyvaluefile:
1878 class simplekeyvaluefile:
1877 """A simple file with key=value lines
1879 """A simple file with key=value lines
1878
1880
1879 Keys must be alphanumerics and start with a letter, values must not
1881 Keys must be alphanumerics and start with a letter, values must not
1880 contain '\n' characters"""
1882 contain '\n' characters"""
1881
1883
1882 firstlinekey = b'__firstline'
1884 firstlinekey = b'__firstline'
1883
1885
1884 def __init__(self, vfs, path, keys=None):
1886 def __init__(self, vfs, path, keys=None):
1885 self.vfs = vfs
1887 self.vfs = vfs
1886 self.path = path
1888 self.path = path
1887
1889
1888 def read(self, firstlinenonkeyval=False):
1890 def read(self, firstlinenonkeyval=False):
1889 """Read the contents of a simple key-value file
1891 """Read the contents of a simple key-value file
1890
1892
1891 'firstlinenonkeyval' indicates whether the first line of file should
1893 'firstlinenonkeyval' indicates whether the first line of file should
1892 be treated as a key-value pair or reuturned fully under the
1894 be treated as a key-value pair or reuturned fully under the
1893 __firstline key."""
1895 __firstline key."""
1894 lines = self.vfs.readlines(self.path)
1896 lines = self.vfs.readlines(self.path)
1895 d = {}
1897 d = {}
1896 if firstlinenonkeyval:
1898 if firstlinenonkeyval:
1897 if not lines:
1899 if not lines:
1898 e = _(b"empty simplekeyvalue file")
1900 e = _(b"empty simplekeyvalue file")
1899 raise error.CorruptedState(e)
1901 raise error.CorruptedState(e)
1900 # we don't want to include '\n' in the __firstline
1902 # we don't want to include '\n' in the __firstline
1901 d[self.firstlinekey] = lines[0][:-1]
1903 d[self.firstlinekey] = lines[0][:-1]
1902 del lines[0]
1904 del lines[0]
1903
1905
1904 try:
1906 try:
1905 # the 'if line.strip()' part prevents us from failing on empty
1907 # the 'if line.strip()' part prevents us from failing on empty
1906 # lines which only contain '\n' therefore are not skipped
1908 # lines which only contain '\n' therefore are not skipped
1907 # by 'if line'
1909 # by 'if line'
1908 updatedict = dict(
1910 updatedict = dict(
1909 line[:-1].split(b'=', 1) for line in lines if line.strip()
1911 line[:-1].split(b'=', 1) for line in lines if line.strip()
1910 )
1912 )
1911 if self.firstlinekey in updatedict:
1913 if self.firstlinekey in updatedict:
1912 e = _(b"%r can't be used as a key")
1914 e = _(b"%r can't be used as a key")
1913 raise error.CorruptedState(e % self.firstlinekey)
1915 raise error.CorruptedState(e % self.firstlinekey)
1914 d.update(updatedict)
1916 d.update(updatedict)
1915 except ValueError as e:
1917 except ValueError as e:
1916 raise error.CorruptedState(stringutil.forcebytestr(e))
1918 raise error.CorruptedState(stringutil.forcebytestr(e))
1917 return d
1919 return d
1918
1920
1919 def write(self, data, firstline=None):
1921 def write(self, data, firstline=None):
1920 """Write key=>value mapping to a file
1922 """Write key=>value mapping to a file
1921 data is a dict. Keys must be alphanumerical and start with a letter.
1923 data is a dict. Keys must be alphanumerical and start with a letter.
1922 Values must not contain newline characters.
1924 Values must not contain newline characters.
1923
1925
1924 If 'firstline' is not None, it is written to file before
1926 If 'firstline' is not None, it is written to file before
1925 everything else, as it is, not in a key=value form"""
1927 everything else, as it is, not in a key=value form"""
1926 lines = []
1928 lines = []
1927 if firstline is not None:
1929 if firstline is not None:
1928 lines.append(b'%s\n' % firstline)
1930 lines.append(b'%s\n' % firstline)
1929
1931
1930 for k, v in data.items():
1932 for k, v in data.items():
1931 if k == self.firstlinekey:
1933 if k == self.firstlinekey:
1932 e = b"key name '%s' is reserved" % self.firstlinekey
1934 e = b"key name '%s' is reserved" % self.firstlinekey
1933 raise error.ProgrammingError(e)
1935 raise error.ProgrammingError(e)
1934 if not k[0:1].isalpha():
1936 if not k[0:1].isalpha():
1935 e = b"keys must start with a letter in a key-value file"
1937 e = b"keys must start with a letter in a key-value file"
1936 raise error.ProgrammingError(e)
1938 raise error.ProgrammingError(e)
1937 if not k.isalnum():
1939 if not k.isalnum():
1938 e = b"invalid key name in a simple key-value file"
1940 e = b"invalid key name in a simple key-value file"
1939 raise error.ProgrammingError(e)
1941 raise error.ProgrammingError(e)
1940 if b'\n' in v:
1942 if b'\n' in v:
1941 e = b"invalid value in a simple key-value file"
1943 e = b"invalid value in a simple key-value file"
1942 raise error.ProgrammingError(e)
1944 raise error.ProgrammingError(e)
1943 lines.append(b"%s=%s\n" % (k, v))
1945 lines.append(b"%s=%s\n" % (k, v))
1944 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
1946 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
1945 fp.write(b''.join(lines))
1947 fp.write(b''.join(lines))
1946
1948
1947
1949
1948 _reportobsoletedsource = [
1950 _reportobsoletedsource = [
1949 b'debugobsolete',
1951 b'debugobsolete',
1950 b'pull',
1952 b'pull',
1951 b'push',
1953 b'push',
1952 b'serve',
1954 b'serve',
1953 b'unbundle',
1955 b'unbundle',
1954 ]
1956 ]
1955
1957
1956 _reportnewcssource = [
1958 _reportnewcssource = [
1957 b'pull',
1959 b'pull',
1958 b'unbundle',
1960 b'unbundle',
1959 ]
1961 ]
1960
1962
1961
1963
1962 def prefetchfiles(repo, revmatches):
1964 def prefetchfiles(repo, revmatches):
1963 """Invokes the registered file prefetch functions, allowing extensions to
1965 """Invokes the registered file prefetch functions, allowing extensions to
1964 ensure the corresponding files are available locally, before the command
1966 ensure the corresponding files are available locally, before the command
1965 uses them.
1967 uses them.
1966
1968
1967 Args:
1969 Args:
1968 revmatches: a list of (revision, match) tuples to indicate the files to
1970 revmatches: a list of (revision, match) tuples to indicate the files to
1969 fetch at each revision. If any of the match elements is None, it matches
1971 fetch at each revision. If any of the match elements is None, it matches
1970 all files.
1972 all files.
1971 """
1973 """
1972
1974
1973 def _matcher(m):
1975 def _matcher(m):
1974 if m:
1976 if m:
1975 assert isinstance(m, matchmod.basematcher)
1977 assert isinstance(m, matchmod.basematcher)
1976 # The command itself will complain about files that don't exist, so
1978 # The command itself will complain about files that don't exist, so
1977 # don't duplicate the message.
1979 # don't duplicate the message.
1978 return matchmod.badmatch(m, lambda fn, msg: None)
1980 return matchmod.badmatch(m, lambda fn, msg: None)
1979 else:
1981 else:
1980 return matchall(repo)
1982 return matchall(repo)
1981
1983
1982 revbadmatches = [(rev, _matcher(match)) for (rev, match) in revmatches]
1984 revbadmatches = [(rev, _matcher(match)) for (rev, match) in revmatches]
1983
1985
1984 fileprefetchhooks(repo, revbadmatches)
1986 fileprefetchhooks(repo, revbadmatches)
1985
1987
1986
1988
1987 # a list of (repo, revs, match) prefetch functions
1989 # a list of (repo, revs, match) prefetch functions
1988 fileprefetchhooks = util.hooks()
1990 fileprefetchhooks = util.hooks()
1989
1991
1990 # A marker that tells the evolve extension to suppress its own reporting
1992 # A marker that tells the evolve extension to suppress its own reporting
1991 _reportstroubledchangesets = True
1993 _reportstroubledchangesets = True
1992
1994
1993
1995
1994 def registersummarycallback(repo, otr, txnname=b'', as_validator=False):
1996 def registersummarycallback(repo, otr, txnname=b'', as_validator=False):
1995 """register a callback to issue a summary after the transaction is closed
1997 """register a callback to issue a summary after the transaction is closed
1996
1998
1997 If as_validator is true, then the callbacks are registered as transaction
1999 If as_validator is true, then the callbacks are registered as transaction
1998 validators instead
2000 validators instead
1999 """
2001 """
2000
2002
2001 def txmatch(sources):
2003 def txmatch(sources):
2002 return any(txnname.startswith(source) for source in sources)
2004 return any(txnname.startswith(source) for source in sources)
2003
2005
2004 categories = []
2006 categories = []
2005
2007
2006 def reportsummary(func):
2008 def reportsummary(func):
2007 """decorator for report callbacks."""
2009 """decorator for report callbacks."""
2008 # The repoview life cycle is shorter than the one of the actual
2010 # The repoview life cycle is shorter than the one of the actual
2009 # underlying repository. So the filtered object can die before the
2011 # underlying repository. So the filtered object can die before the
2010 # weakref is used leading to troubles. We keep a reference to the
2012 # weakref is used leading to troubles. We keep a reference to the
2011 # unfiltered object and restore the filtering when retrieving the
2013 # unfiltered object and restore the filtering when retrieving the
2012 # repository through the weakref.
2014 # repository through the weakref.
2013 filtername = repo.filtername
2015 filtername = repo.filtername
2014 reporef = weakref.ref(repo.unfiltered())
2016 reporef = weakref.ref(repo.unfiltered())
2015
2017
2016 def wrapped(tr):
2018 def wrapped(tr):
2017 repo = reporef()
2019 repo = reporef()
2018 if filtername:
2020 if filtername:
2019 assert repo is not None # help pytype
2021 assert repo is not None # help pytype
2020 repo = repo.filtered(filtername)
2022 repo = repo.filtered(filtername)
2021 func(repo, tr)
2023 func(repo, tr)
2022
2024
2023 newcat = b'%02i-txnreport' % len(categories)
2025 newcat = b'%02i-txnreport' % len(categories)
2024 if as_validator:
2026 if as_validator:
2025 otr.addvalidator(newcat, wrapped)
2027 otr.addvalidator(newcat, wrapped)
2026 else:
2028 else:
2027 otr.addpostclose(newcat, wrapped)
2029 otr.addpostclose(newcat, wrapped)
2028 categories.append(newcat)
2030 categories.append(newcat)
2029 return wrapped
2031 return wrapped
2030
2032
2031 @reportsummary
2033 @reportsummary
2032 def reportchangegroup(repo, tr):
2034 def reportchangegroup(repo, tr):
2033 cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
2035 cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
2034 cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
2036 cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
2035 cgfiles = tr.changes.get(b'changegroup-count-files', 0)
2037 cgfiles = tr.changes.get(b'changegroup-count-files', 0)
2036 cgheads = tr.changes.get(b'changegroup-count-heads', 0)
2038 cgheads = tr.changes.get(b'changegroup-count-heads', 0)
2037 if cgchangesets or cgrevisions or cgfiles:
2039 if cgchangesets or cgrevisions or cgfiles:
2038 htext = b""
2040 htext = b""
2039 if cgheads:
2041 if cgheads:
2040 htext = _(b" (%+d heads)") % cgheads
2042 htext = _(b" (%+d heads)") % cgheads
2041 msg = _(b"added %d changesets with %d changes to %d files%s\n")
2043 msg = _(b"added %d changesets with %d changes to %d files%s\n")
2042 if as_validator:
2044 if as_validator:
2043 msg = _(b"adding %d changesets with %d changes to %d files%s\n")
2045 msg = _(b"adding %d changesets with %d changes to %d files%s\n")
2044 assert repo is not None # help pytype
2046 assert repo is not None # help pytype
2045 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
2047 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
2046
2048
2047 if txmatch(_reportobsoletedsource):
2049 if txmatch(_reportobsoletedsource):
2048
2050
2049 @reportsummary
2051 @reportsummary
2050 def reportobsoleted(repo, tr):
2052 def reportobsoleted(repo, tr):
2051 obsoleted = obsutil.getobsoleted(repo, tr)
2053 obsoleted = obsutil.getobsoleted(repo, tr)
2052 newmarkers = len(tr.changes.get(b'obsmarkers', ()))
2054 newmarkers = len(tr.changes.get(b'obsmarkers', ()))
2053 if newmarkers:
2055 if newmarkers:
2054 repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
2056 repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
2055 if obsoleted:
2057 if obsoleted:
2056 msg = _(b'obsoleted %i changesets\n')
2058 msg = _(b'obsoleted %i changesets\n')
2057 if as_validator:
2059 if as_validator:
2058 msg = _(b'obsoleting %i changesets\n')
2060 msg = _(b'obsoleting %i changesets\n')
2059 repo.ui.status(msg % len(obsoleted))
2061 repo.ui.status(msg % len(obsoleted))
2060
2062
2061 if obsolete.isenabled(
2063 if obsolete.isenabled(
2062 repo, obsolete.createmarkersopt
2064 repo, obsolete.createmarkersopt
2063 ) and repo.ui.configbool(
2065 ) and repo.ui.configbool(
2064 b'experimental', b'evolution.report-instabilities'
2066 b'experimental', b'evolution.report-instabilities'
2065 ):
2067 ):
2066 instabilitytypes = [
2068 instabilitytypes = [
2067 (b'orphan', b'orphan'),
2069 (b'orphan', b'orphan'),
2068 (b'phase-divergent', b'phasedivergent'),
2070 (b'phase-divergent', b'phasedivergent'),
2069 (b'content-divergent', b'contentdivergent'),
2071 (b'content-divergent', b'contentdivergent'),
2070 ]
2072 ]
2071
2073
2072 def getinstabilitycounts(repo):
2074 def getinstabilitycounts(repo):
2073 filtered = repo.changelog.filteredrevs
2075 filtered = repo.changelog.filteredrevs
2074 counts = {}
2076 counts = {}
2075 for instability, revset in instabilitytypes:
2077 for instability, revset in instabilitytypes:
2076 counts[instability] = len(
2078 counts[instability] = len(
2077 set(obsolete.getrevs(repo, revset)) - filtered
2079 set(obsolete.getrevs(repo, revset)) - filtered
2078 )
2080 )
2079 return counts
2081 return counts
2080
2082
2081 oldinstabilitycounts = getinstabilitycounts(repo)
2083 oldinstabilitycounts = getinstabilitycounts(repo)
2082
2084
2083 @reportsummary
2085 @reportsummary
2084 def reportnewinstabilities(repo, tr):
2086 def reportnewinstabilities(repo, tr):
2085 newinstabilitycounts = getinstabilitycounts(repo)
2087 newinstabilitycounts = getinstabilitycounts(repo)
2086 for instability, revset in instabilitytypes:
2088 for instability, revset in instabilitytypes:
2087 delta = (
2089 delta = (
2088 newinstabilitycounts[instability]
2090 newinstabilitycounts[instability]
2089 - oldinstabilitycounts[instability]
2091 - oldinstabilitycounts[instability]
2090 )
2092 )
2091 msg = getinstabilitymessage(delta, instability)
2093 msg = getinstabilitymessage(delta, instability)
2092 if msg:
2094 if msg:
2093 repo.ui.warn(msg)
2095 repo.ui.warn(msg)
2094
2096
2095 if txmatch(_reportnewcssource):
2097 if txmatch(_reportnewcssource):
2096
2098
2097 @reportsummary
2099 @reportsummary
2098 def reportnewcs(repo, tr):
2100 def reportnewcs(repo, tr):
2099 """Report the range of new revisions pulled/unbundled."""
2101 """Report the range of new revisions pulled/unbundled."""
2100 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2102 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2101 unfi = repo.unfiltered()
2103 unfi = repo.unfiltered()
2102 if origrepolen >= len(unfi):
2104 if origrepolen >= len(unfi):
2103 return
2105 return
2104
2106
2105 # Compute the bounds of new visible revisions' range.
2107 # Compute the bounds of new visible revisions' range.
2106 revs = smartset.spanset(repo, start=origrepolen)
2108 revs = smartset.spanset(repo, start=origrepolen)
2107 if revs:
2109 if revs:
2108 minrev, maxrev = repo[revs.min()], repo[revs.max()]
2110 minrev, maxrev = repo[revs.min()], repo[revs.max()]
2109
2111
2110 if minrev == maxrev:
2112 if minrev == maxrev:
2111 revrange = minrev
2113 revrange = minrev
2112 else:
2114 else:
2113 revrange = b'%s:%s' % (minrev, maxrev)
2115 revrange = b'%s:%s' % (minrev, maxrev)
2114 draft = len(repo.revs(b'%ld and draft()', revs))
2116 draft = len(repo.revs(b'%ld and draft()', revs))
2115 secret = len(repo.revs(b'%ld and secret()', revs))
2117 secret = len(repo.revs(b'%ld and secret()', revs))
2116 if not (draft or secret):
2118 if not (draft or secret):
2117 msg = _(b'new changesets %s\n') % revrange
2119 msg = _(b'new changesets %s\n') % revrange
2118 elif draft and secret:
2120 elif draft and secret:
2119 msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
2121 msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
2120 msg %= (revrange, draft, secret)
2122 msg %= (revrange, draft, secret)
2121 elif draft:
2123 elif draft:
2122 msg = _(b'new changesets %s (%d drafts)\n')
2124 msg = _(b'new changesets %s (%d drafts)\n')
2123 msg %= (revrange, draft)
2125 msg %= (revrange, draft)
2124 elif secret:
2126 elif secret:
2125 msg = _(b'new changesets %s (%d secrets)\n')
2127 msg = _(b'new changesets %s (%d secrets)\n')
2126 msg %= (revrange, secret)
2128 msg %= (revrange, secret)
2127 else:
2129 else:
2128 errormsg = b'entered unreachable condition'
2130 errormsg = b'entered unreachable condition'
2129 raise error.ProgrammingError(errormsg)
2131 raise error.ProgrammingError(errormsg)
2130 repo.ui.status(msg)
2132 repo.ui.status(msg)
2131
2133
2132 # search new changesets directly pulled as obsolete
2134 # search new changesets directly pulled as obsolete
2133 duplicates = tr.changes.get(b'revduplicates', ())
2135 duplicates = tr.changes.get(b'revduplicates', ())
2134 obsadded = unfi.revs(
2136 obsadded = unfi.revs(
2135 b'(%d: + %ld) and obsolete()', origrepolen, duplicates
2137 b'(%d: + %ld) and obsolete()', origrepolen, duplicates
2136 )
2138 )
2137 cl = repo.changelog
2139 cl = repo.changelog
2138 extinctadded = [r for r in obsadded if r not in cl]
2140 extinctadded = [r for r in obsadded if r not in cl]
2139 if extinctadded:
2141 if extinctadded:
2140 # They are not just obsolete, but obsolete and invisible
2142 # They are not just obsolete, but obsolete and invisible
2141 # we call them "extinct" internally but the terms have not been
2143 # we call them "extinct" internally but the terms have not been
2142 # exposed to users.
2144 # exposed to users.
2143 msg = b'(%d other changesets obsolete on arrival)\n'
2145 msg = b'(%d other changesets obsolete on arrival)\n'
2144 repo.ui.status(msg % len(extinctadded))
2146 repo.ui.status(msg % len(extinctadded))
2145
2147
2146 @reportsummary
2148 @reportsummary
2147 def reportphasechanges(repo, tr):
2149 def reportphasechanges(repo, tr):
2148 """Report statistics of phase changes for changesets pre-existing
2150 """Report statistics of phase changes for changesets pre-existing
2149 pull/unbundle.
2151 pull/unbundle.
2150 """
2152 """
2151 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2153 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2152 published = []
2154 published = []
2153 for revs, (old, new) in tr.changes.get(b'phases', []):
2155 for revs, (old, new) in tr.changes.get(b'phases', []):
2154 if new != phases.public:
2156 if new != phases.public:
2155 continue
2157 continue
2156 published.extend(rev for rev in revs if rev < origrepolen)
2158 published.extend(rev for rev in revs if rev < origrepolen)
2157 if not published:
2159 if not published:
2158 return
2160 return
2159 msg = _(b'%d local changesets published\n')
2161 msg = _(b'%d local changesets published\n')
2160 if as_validator:
2162 if as_validator:
2161 msg = _(b'%d local changesets will be published\n')
2163 msg = _(b'%d local changesets will be published\n')
2162 repo.ui.status(msg % len(published))
2164 repo.ui.status(msg % len(published))
2163
2165
2164
2166
2165 def getinstabilitymessage(delta, instability):
2167 def getinstabilitymessage(delta, instability):
2166 """function to return the message to show warning about new instabilities
2168 """function to return the message to show warning about new instabilities
2167
2169
2168 exists as a separate function so that extension can wrap to show more
2170 exists as a separate function so that extension can wrap to show more
2169 information like how to fix instabilities"""
2171 information like how to fix instabilities"""
2170 if delta > 0:
2172 if delta > 0:
2171 return _(b'%i new %s changesets\n') % (delta, instability)
2173 return _(b'%i new %s changesets\n') % (delta, instability)
2172
2174
2173
2175
2174 def nodesummaries(repo, nodes, maxnumnodes=4):
2176 def nodesummaries(repo, nodes, maxnumnodes=4):
2175 if len(nodes) <= maxnumnodes or repo.ui.verbose:
2177 if len(nodes) <= maxnumnodes or repo.ui.verbose:
2176 return b' '.join(short(h) for h in nodes)
2178 return b' '.join(short(h) for h in nodes)
2177 first = b' '.join(short(h) for h in nodes[:maxnumnodes])
2179 first = b' '.join(short(h) for h in nodes[:maxnumnodes])
2178 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
2180 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
2179
2181
2180
2182
2181 def enforcesinglehead(repo, tr, desc, accountclosed, filtername):
2183 def enforcesinglehead(repo, tr, desc, accountclosed, filtername):
2182 """check that no named branch has multiple heads"""
2184 """check that no named branch has multiple heads"""
2183 if desc in (b'strip', b'repair'):
2185 if desc in (b'strip', b'repair'):
2184 # skip the logic during strip
2186 # skip the logic during strip
2185 return
2187 return
2186 visible = repo.filtered(filtername)
2188 visible = repo.filtered(filtername)
2187 # possible improvement: we could restrict the check to affected branch
2189 # possible improvement: we could restrict the check to affected branch
2188 bm = visible.branchmap()
2190 bm = visible.branchmap()
2189 for name in bm:
2191 for name in bm:
2190 heads = bm.branchheads(name, closed=accountclosed)
2192 heads = bm.branchheads(name, closed=accountclosed)
2191 if len(heads) > 1:
2193 if len(heads) > 1:
2192 msg = _(b'rejecting multiple heads on branch "%s"')
2194 msg = _(b'rejecting multiple heads on branch "%s"')
2193 msg %= name
2195 msg %= name
2194 hint = _(b'%d heads: %s')
2196 hint = _(b'%d heads: %s')
2195 hint %= (len(heads), nodesummaries(repo, heads))
2197 hint %= (len(heads), nodesummaries(repo, heads))
2196 raise error.Abort(msg, hint=hint)
2198 raise error.Abort(msg, hint=hint)
2197
2199
2198
2200
2199 def wrapconvertsink(sink):
2201 def wrapconvertsink(sink):
2200 """Allow extensions to wrap the sink returned by convcmd.convertsink()
2202 """Allow extensions to wrap the sink returned by convcmd.convertsink()
2201 before it is used, whether or not the convert extension was formally loaded.
2203 before it is used, whether or not the convert extension was formally loaded.
2202 """
2204 """
2203 return sink
2205 return sink
2204
2206
2205
2207
2206 def unhidehashlikerevs(repo, specs, hiddentype):
2208 def unhidehashlikerevs(repo, specs, hiddentype):
2207 """parse the user specs and unhide changesets whose hash or revision number
2209 """parse the user specs and unhide changesets whose hash or revision number
2208 is passed.
2210 is passed.
2209
2211
2210 hiddentype can be: 1) 'warn': warn while unhiding changesets
2212 hiddentype can be: 1) 'warn': warn while unhiding changesets
2211 2) 'nowarn': don't warn while unhiding changesets
2213 2) 'nowarn': don't warn while unhiding changesets
2212
2214
2213 returns a repo object with the required changesets unhidden
2215 returns a repo object with the required changesets unhidden
2214 """
2216 """
2215 if not specs:
2217 if not specs:
2216 return repo
2218 return repo
2217
2219
2218 if not repo.filtername or not repo.ui.configbool(
2220 if not repo.filtername or not repo.ui.configbool(
2219 b'experimental', b'directaccess'
2221 b'experimental', b'directaccess'
2220 ):
2222 ):
2221 return repo
2223 return repo
2222
2224
2223 if repo.filtername not in (b'visible', b'visible-hidden'):
2225 if repo.filtername not in (b'visible', b'visible-hidden'):
2224 return repo
2226 return repo
2225
2227
2226 symbols = set()
2228 symbols = set()
2227 for spec in specs:
2229 for spec in specs:
2228 try:
2230 try:
2229 tree = revsetlang.parse(spec)
2231 tree = revsetlang.parse(spec)
2230 except error.ParseError: # will be reported by scmutil.revrange()
2232 except error.ParseError: # will be reported by scmutil.revrange()
2231 continue
2233 continue
2232
2234
2233 symbols.update(revsetlang.gethashlikesymbols(tree))
2235 symbols.update(revsetlang.gethashlikesymbols(tree))
2234
2236
2235 if not symbols:
2237 if not symbols:
2236 return repo
2238 return repo
2237
2239
2238 revs = _getrevsfromsymbols(repo, symbols)
2240 revs = _getrevsfromsymbols(repo, symbols)
2239
2241
2240 if not revs:
2242 if not revs:
2241 return repo
2243 return repo
2242
2244
2243 if hiddentype == b'warn':
2245 if hiddentype == b'warn':
2244 unfi = repo.unfiltered()
2246 unfi = repo.unfiltered()
2245 revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
2247 revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
2246 repo.ui.warn(
2248 repo.ui.warn(
2247 _(
2249 _(
2248 b"warning: accessing hidden changesets for write "
2250 b"warning: accessing hidden changesets for write "
2249 b"operation: %s\n"
2251 b"operation: %s\n"
2250 )
2252 )
2251 % revstr
2253 % revstr
2252 )
2254 )
2253
2255
2254 # we have to use new filtername to separate branch/tags cache until we can
2256 # we have to use new filtername to separate branch/tags cache until we can
2255 # disbale these cache when revisions are dynamically pinned.
2257 # disbale these cache when revisions are dynamically pinned.
2256 return repo.filtered(b'visible-hidden', revs)
2258 return repo.filtered(b'visible-hidden', revs)
2257
2259
2258
2260
2259 def _getrevsfromsymbols(repo, symbols):
2261 def _getrevsfromsymbols(repo, symbols):
2260 """parse the list of symbols and returns a set of revision numbers of hidden
2262 """parse the list of symbols and returns a set of revision numbers of hidden
2261 changesets present in symbols"""
2263 changesets present in symbols"""
2262 revs = set()
2264 revs = set()
2263 unfi = repo.unfiltered()
2265 unfi = repo.unfiltered()
2264 unficl = unfi.changelog
2266 unficl = unfi.changelog
2265 cl = repo.changelog
2267 cl = repo.changelog
2266 tiprev = len(unficl)
2268 tiprev = len(unficl)
2267 allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
2269 allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
2268 for s in symbols:
2270 for s in symbols:
2269 try:
2271 try:
2270 n = int(s)
2272 n = int(s)
2271 if n <= tiprev:
2273 if n <= tiprev:
2272 if not allowrevnums:
2274 if not allowrevnums:
2273 continue
2275 continue
2274 else:
2276 else:
2275 if n not in cl:
2277 if n not in cl:
2276 revs.add(n)
2278 revs.add(n)
2277 continue
2279 continue
2278 except ValueError:
2280 except ValueError:
2279 pass
2281 pass
2280
2282
2281 try:
2283 try:
2282 s = resolvehexnodeidprefix(unfi, s)
2284 s = resolvehexnodeidprefix(unfi, s)
2283 except (error.LookupError, error.WdirUnsupported):
2285 except (error.LookupError, error.WdirUnsupported):
2284 s = None
2286 s = None
2285
2287
2286 if s is not None:
2288 if s is not None:
2287 rev = unficl.rev(s)
2289 rev = unficl.rev(s)
2288 if rev not in cl:
2290 if rev not in cl:
2289 revs.add(rev)
2291 revs.add(rev)
2290
2292
2291 return revs
2293 return revs
2292
2294
2293
2295
2294 def bookmarkrevs(repo, mark):
2296 def bookmarkrevs(repo, mark):
2295 """Select revisions reachable by a given bookmark
2297 """Select revisions reachable by a given bookmark
2296
2298
2297 If the bookmarked revision isn't a head, an empty set will be returned.
2299 If the bookmarked revision isn't a head, an empty set will be returned.
2298 """
2300 """
2299 return repo.revs(format_bookmark_revspec(mark))
2301 return repo.revs(format_bookmark_revspec(mark))
2300
2302
2301
2303
2302 def format_bookmark_revspec(mark):
2304 def format_bookmark_revspec(mark):
2303 """Build a revset expression to select revisions reachable by a given
2305 """Build a revset expression to select revisions reachable by a given
2304 bookmark"""
2306 bookmark"""
2305 mark = b'literal:' + mark
2307 mark = b'literal:' + mark
2306 return revsetlang.formatspec(
2308 return revsetlang.formatspec(
2307 b"ancestors(bookmark(%s)) - "
2309 b"ancestors(bookmark(%s)) - "
2308 b"ancestors(head() and not bookmark(%s)) - "
2310 b"ancestors(head() and not bookmark(%s)) - "
2309 b"ancestors(bookmark() and not bookmark(%s))",
2311 b"ancestors(bookmark() and not bookmark(%s))",
2310 mark,
2312 mark,
2311 mark,
2313 mark,
2312 mark,
2314 mark,
2313 )
2315 )
@@ -1,537 +1,533 b''
1 #require repofncache
1 #require repofncache
2
2
3 An extension which will set fncache chunksize to 1 byte to make sure that logic
3 An extension which will set fncache chunksize to 1 byte to make sure that logic
4 does not break
4 does not break
5
5
6 $ cat > chunksize.py <<EOF
6 $ cat > chunksize.py <<EOF
7 > from mercurial import store
7 > from mercurial import store
8 > store.fncache_chunksize = 1
8 > store.fncache_chunksize = 1
9 > EOF
9 > EOF
10
10
11 $ cat >> $HGRCPATH <<EOF
11 $ cat >> $HGRCPATH <<EOF
12 > [extensions]
12 > [extensions]
13 > chunksize = $TESTTMP/chunksize.py
13 > chunksize = $TESTTMP/chunksize.py
14 > EOF
14 > EOF
15
15
16 Init repo1:
16 Init repo1:
17
17
18 $ hg init repo1
18 $ hg init repo1
19 $ cd repo1
19 $ cd repo1
20 $ echo "some text" > a
20 $ echo "some text" > a
21 $ hg add
21 $ hg add
22 adding a
22 adding a
23 $ hg ci -m first
23 $ hg ci -m first
24 $ cat .hg/store/fncache | sort
24 $ cat .hg/store/fncache | sort
25 data/a.i
25 data/a.i
26
26
27 Testing a.i/b:
27 Testing a.i/b:
28
28
29 $ mkdir a.i
29 $ mkdir a.i
30 $ echo "some other text" > a.i/b
30 $ echo "some other text" > a.i/b
31 $ hg add
31 $ hg add
32 adding a.i/b
32 adding a.i/b
33 $ hg ci -m second
33 $ hg ci -m second
34 $ cat .hg/store/fncache | sort
34 $ cat .hg/store/fncache | sort
35 data/a.i
35 data/a.i
36 data/a.i.hg/b.i
36 data/a.i.hg/b.i
37
37
38 Testing a.i.hg/c:
38 Testing a.i.hg/c:
39
39
40 $ mkdir a.i.hg
40 $ mkdir a.i.hg
41 $ echo "yet another text" > a.i.hg/c
41 $ echo "yet another text" > a.i.hg/c
42 $ hg add
42 $ hg add
43 adding a.i.hg/c
43 adding a.i.hg/c
44 $ hg ci -m third
44 $ hg ci -m third
45 $ cat .hg/store/fncache | sort
45 $ cat .hg/store/fncache | sort
46 data/a.i
46 data/a.i
47 data/a.i.hg.hg/c.i
47 data/a.i.hg.hg/c.i
48 data/a.i.hg/b.i
48 data/a.i.hg/b.i
49
49
50 Testing verify:
50 Testing verify:
51
51
52 $ hg verify -q
52 $ hg verify -q
53
53
54 $ rm .hg/store/fncache
54 $ rm .hg/store/fncache
55
55
56 $ hg verify
56 $ hg verify
57 checking changesets
57 checking changesets
58 checking manifests
58 checking manifests
59 crosschecking files in changesets and manifests
59 crosschecking files in changesets and manifests
60 checking files
60 checking files
61 warning: revlog 'data/a.i' not in fncache!
61 warning: revlog 'data/a.i' not in fncache!
62 warning: revlog 'data/a.i.hg/c.i' not in fncache!
62 warning: revlog 'data/a.i.hg/c.i' not in fncache!
63 warning: revlog 'data/a.i/b.i' not in fncache!
63 warning: revlog 'data/a.i/b.i' not in fncache!
64 checking dirstate
64 checking dirstate
65 checked 3 changesets with 3 changes to 3 files
65 checked 3 changesets with 3 changes to 3 files
66 3 warnings encountered!
66 3 warnings encountered!
67 hint: run "hg debugrebuildfncache" to recover from corrupt fncache
67 hint: run "hg debugrebuildfncache" to recover from corrupt fncache
68
68
69 Follow the hint to make sure it works
69 Follow the hint to make sure it works
70
70
71 $ hg debugrebuildfncache
71 $ hg debugrebuildfncache
72 adding data/a.i
72 adding data/a.i
73 adding data/a.i.hg/c.i
73 adding data/a.i.hg/c.i
74 adding data/a.i/b.i
74 adding data/a.i/b.i
75 3 items added, 0 removed from fncache
75 3 items added, 0 removed from fncache
76
76
77 $ hg verify -q
77 $ hg verify -q
78
78
79 $ cd ..
79 $ cd ..
80
80
81 Non store repo:
81 Non store repo:
82
82
83 $ hg --config format.usestore=False init foo
83 $ hg --config format.usestore=False init foo
84 $ cd foo
84 $ cd foo
85 $ mkdir tst.d
85 $ mkdir tst.d
86 $ echo foo > tst.d/foo
86 $ echo foo > tst.d/foo
87 $ hg ci -Amfoo
87 $ hg ci -Amfoo
88 adding tst.d/foo
88 adding tst.d/foo
89 $ find .hg | sort
89 $ find .hg | sort
90 .hg
90 .hg
91 .hg/00changelog.i
91 .hg/00changelog.i
92 .hg/00manifest.i
92 .hg/00manifest.i
93 .hg/cache
93 .hg/cache
94 .hg/cache/branch2-served
94 .hg/cache/branch2-served
95 .hg/cache/rbc-names-v1
95 .hg/cache/rbc-names-v1
96 .hg/cache/rbc-revs-v1
96 .hg/cache/rbc-revs-v1
97 .hg/data
97 .hg/data
98 .hg/data/tst.d.hg
98 .hg/data/tst.d.hg
99 .hg/data/tst.d.hg/foo.i
99 .hg/data/tst.d.hg/foo.i
100 .hg/dirstate
100 .hg/dirstate
101 .hg/fsmonitor.state (fsmonitor !)
101 .hg/fsmonitor.state (fsmonitor !)
102 .hg/last-message.txt
102 .hg/last-message.txt
103 .hg/phaseroots
103 .hg/phaseroots
104 .hg/requires
104 .hg/requires
105 .hg/undo
105 .hg/undo
106 .hg/undo.backup.dirstate
107 .hg/undo.backupfiles
106 .hg/undo.backupfiles
108 .hg/undo.bookmarks
107 .hg/undo.bookmarks
109 .hg/undo.branch
108 .hg/undo.branch
110 .hg/undo.desc
109 .hg/undo.desc
111 .hg/undo.dirstate
112 .hg/undo.phaseroots
110 .hg/undo.phaseroots
113 .hg/wcache
111 .hg/wcache
114 .hg/wcache/checkisexec (execbit !)
112 .hg/wcache/checkisexec (execbit !)
115 .hg/wcache/checklink (symlink !)
113 .hg/wcache/checklink (symlink !)
116 .hg/wcache/checklink-target (symlink !)
114 .hg/wcache/checklink-target (symlink !)
117 .hg/wcache/manifestfulltextcache (reporevlogstore !)
115 .hg/wcache/manifestfulltextcache (reporevlogstore !)
118 $ cd ..
116 $ cd ..
119
117
120 Non fncache repo:
118 Non fncache repo:
121
119
122 $ hg --config format.usefncache=False init bar
120 $ hg --config format.usefncache=False init bar
123 $ cd bar
121 $ cd bar
124 $ mkdir tst.d
122 $ mkdir tst.d
125 $ echo foo > tst.d/Foo
123 $ echo foo > tst.d/Foo
126 $ hg ci -Amfoo
124 $ hg ci -Amfoo
127 adding tst.d/Foo
125 adding tst.d/Foo
128 $ find .hg | sort
126 $ find .hg | sort
129 .hg
127 .hg
130 .hg/00changelog.i
128 .hg/00changelog.i
131 .hg/cache
129 .hg/cache
132 .hg/cache/branch2-served
130 .hg/cache/branch2-served
133 .hg/cache/rbc-names-v1
131 .hg/cache/rbc-names-v1
134 .hg/cache/rbc-revs-v1
132 .hg/cache/rbc-revs-v1
135 .hg/dirstate
133 .hg/dirstate
136 .hg/fsmonitor.state (fsmonitor !)
134 .hg/fsmonitor.state (fsmonitor !)
137 .hg/last-message.txt
135 .hg/last-message.txt
138 .hg/requires
136 .hg/requires
139 .hg/store
137 .hg/store
140 .hg/store/00changelog.i
138 .hg/store/00changelog.i
141 .hg/store/00manifest.i
139 .hg/store/00manifest.i
142 .hg/store/data
140 .hg/store/data
143 .hg/store/data/tst.d.hg
141 .hg/store/data/tst.d.hg
144 .hg/store/data/tst.d.hg/_foo.i
142 .hg/store/data/tst.d.hg/_foo.i
145 .hg/store/phaseroots
143 .hg/store/phaseroots
146 .hg/store/requires
144 .hg/store/requires
147 .hg/store/undo
145 .hg/store/undo
148 .hg/store/undo.backupfiles
146 .hg/store/undo.backupfiles
149 .hg/store/undo.phaseroots
147 .hg/store/undo.phaseroots
150 .hg/undo.backup.dirstate
151 .hg/undo.bookmarks
148 .hg/undo.bookmarks
152 .hg/undo.branch
149 .hg/undo.branch
153 .hg/undo.desc
150 .hg/undo.desc
154 .hg/undo.dirstate
155 .hg/wcache
151 .hg/wcache
156 .hg/wcache/checkisexec (execbit !)
152 .hg/wcache/checkisexec (execbit !)
157 .hg/wcache/checklink (symlink !)
153 .hg/wcache/checklink (symlink !)
158 .hg/wcache/checklink-target (symlink !)
154 .hg/wcache/checklink-target (symlink !)
159 .hg/wcache/manifestfulltextcache (reporevlogstore !)
155 .hg/wcache/manifestfulltextcache (reporevlogstore !)
160 $ cd ..
156 $ cd ..
161
157
162 Encoding of reserved / long paths in the store
158 Encoding of reserved / long paths in the store
163
159
164 $ hg init r2
160 $ hg init r2
165 $ cd r2
161 $ cd r2
166 $ cat <<EOF > .hg/hgrc
162 $ cat <<EOF > .hg/hgrc
167 > [ui]
163 > [ui]
168 > portablefilenames = ignore
164 > portablefilenames = ignore
169 > EOF
165 > EOF
170
166
171 $ hg import -q --bypass - <<EOF
167 $ hg import -q --bypass - <<EOF
172 > # HG changeset patch
168 > # HG changeset patch
173 > # User test
169 > # User test
174 > # Date 0 0
170 > # Date 0 0
175 > # Node ID 1c7a2f7cb77be1a0def34e4c7cabc562ad98fbd7
171 > # Node ID 1c7a2f7cb77be1a0def34e4c7cabc562ad98fbd7
176 > # Parent 0000000000000000000000000000000000000000
172 > # Parent 0000000000000000000000000000000000000000
177 > 1
173 > 1
178 >
174 >
179 > diff --git a/12345678/12345678/12345678/12345678/12345678/12345678/12345678/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3456789-12345-ABCDEFGHIJKLMNOPRSTUVWXYZ-abcdefghjiklmnopqrstuvwxyz b/12345678/12345678/12345678/12345678/12345678/12345678/12345678/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3456789-12345-ABCDEFGHIJKLMNOPRSTUVWXYZ-abcdefghjiklmnopqrstuvwxyz
175 > diff --git a/12345678/12345678/12345678/12345678/12345678/12345678/12345678/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3456789-12345-ABCDEFGHIJKLMNOPRSTUVWXYZ-abcdefghjiklmnopqrstuvwxyz b/12345678/12345678/12345678/12345678/12345678/12345678/12345678/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3456789-12345-ABCDEFGHIJKLMNOPRSTUVWXYZ-abcdefghjiklmnopqrstuvwxyz
180 > new file mode 100644
176 > new file mode 100644
181 > --- /dev/null
177 > --- /dev/null
182 > +++ b/12345678/12345678/12345678/12345678/12345678/12345678/12345678/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3456789-12345-ABCDEFGHIJKLMNOPRSTUVWXYZ-abcdefghjiklmnopqrstuvwxyz
178 > +++ b/12345678/12345678/12345678/12345678/12345678/12345678/12345678/12345/xxxxxxxxx-xxxxxxxxx-xxxxxxxxx-123456789-12.3456789-12345-ABCDEFGHIJKLMNOPRSTUVWXYZ-abcdefghjiklmnopqrstuvwxyz
183 > @@ -0,0 +1,1 @@
179 > @@ -0,0 +1,1 @@
184 > +foo
180 > +foo
185 > diff --git a/AUX/SECOND/X.PRN/FOURTH/FI:FTH/SIXTH/SEVENTH/EIGHTH/NINETH/TENTH/ELEVENTH/LOREMIPSUM.TXT b/AUX/SECOND/X.PRN/FOURTH/FI:FTH/SIXTH/SEVENTH/EIGHTH/NINETH/TENTH/ELEVENTH/LOREMIPSUM.TXT
181 > diff --git a/AUX/SECOND/X.PRN/FOURTH/FI:FTH/SIXTH/SEVENTH/EIGHTH/NINETH/TENTH/ELEVENTH/LOREMIPSUM.TXT b/AUX/SECOND/X.PRN/FOURTH/FI:FTH/SIXTH/SEVENTH/EIGHTH/NINETH/TENTH/ELEVENTH/LOREMIPSUM.TXT
186 > new file mode 100644
182 > new file mode 100644
187 > --- /dev/null
183 > --- /dev/null
188 > +++ b/AUX/SECOND/X.PRN/FOURTH/FI:FTH/SIXTH/SEVENTH/EIGHTH/NINETH/TENTH/ELEVENTH/LOREMIPSUM.TXT
184 > +++ b/AUX/SECOND/X.PRN/FOURTH/FI:FTH/SIXTH/SEVENTH/EIGHTH/NINETH/TENTH/ELEVENTH/LOREMIPSUM.TXT
189 > @@ -0,0 +1,1 @@
185 > @@ -0,0 +1,1 @@
190 > +foo
186 > +foo
191 > diff --git a/Project Planning/Resources/AnotherLongDirectoryName/Followedbyanother/AndAnother/AndThenAnExtremelyLongFileName.txt b/Project Planning/Resources/AnotherLongDirectoryName/Followedbyanother/AndAnother/AndThenAnExtremelyLongFileName.txt
187 > diff --git a/Project Planning/Resources/AnotherLongDirectoryName/Followedbyanother/AndAnother/AndThenAnExtremelyLongFileName.txt b/Project Planning/Resources/AnotherLongDirectoryName/Followedbyanother/AndAnother/AndThenAnExtremelyLongFileName.txt
192 > new file mode 100644
188 > new file mode 100644
193 > --- /dev/null
189 > --- /dev/null
194 > +++ b/Project Planning/Resources/AnotherLongDirectoryName/Followedbyanother/AndAnother/AndThenAnExtremelyLongFileName.txt
190 > +++ b/Project Planning/Resources/AnotherLongDirectoryName/Followedbyanother/AndAnother/AndThenAnExtremelyLongFileName.txt
195 > @@ -0,0 +1,1 @@
191 > @@ -0,0 +1,1 @@
196 > +foo
192 > +foo
197 > diff --git a/bla.aux/prn/PRN/lpt/com3/nul/coma/foo.NUL/normal.c b/bla.aux/prn/PRN/lpt/com3/nul/coma/foo.NUL/normal.c
193 > diff --git a/bla.aux/prn/PRN/lpt/com3/nul/coma/foo.NUL/normal.c b/bla.aux/prn/PRN/lpt/com3/nul/coma/foo.NUL/normal.c
198 > new file mode 100644
194 > new file mode 100644
199 > --- /dev/null
195 > --- /dev/null
200 > +++ b/bla.aux/prn/PRN/lpt/com3/nul/coma/foo.NUL/normal.c
196 > +++ b/bla.aux/prn/PRN/lpt/com3/nul/coma/foo.NUL/normal.c
201 > @@ -0,0 +1,1 @@
197 > @@ -0,0 +1,1 @@
202 > +foo
198 > +foo
203 > diff --git a/enterprise/openesbaddons/contrib-imola/corba-bc/netbeansplugin/wsdlExtension/src/main/java/META-INF/services/org.netbeans.modules.xml.wsdl.bindingsupport.spi.ExtensibilityElementTemplateProvider b/enterprise/openesbaddons/contrib-imola/corba-bc/netbeansplugin/wsdlExtension/src/main/java/META-INF/services/org.netbeans.modules.xml.wsdl.bindingsupport.spi.ExtensibilityElementTemplateProvider
199 > diff --git a/enterprise/openesbaddons/contrib-imola/corba-bc/netbeansplugin/wsdlExtension/src/main/java/META-INF/services/org.netbeans.modules.xml.wsdl.bindingsupport.spi.ExtensibilityElementTemplateProvider b/enterprise/openesbaddons/contrib-imola/corba-bc/netbeansplugin/wsdlExtension/src/main/java/META-INF/services/org.netbeans.modules.xml.wsdl.bindingsupport.spi.ExtensibilityElementTemplateProvider
204 > new file mode 100644
200 > new file mode 100644
205 > --- /dev/null
201 > --- /dev/null
206 > +++ b/enterprise/openesbaddons/contrib-imola/corba-bc/netbeansplugin/wsdlExtension/src/main/java/META-INF/services/org.netbeans.modules.xml.wsdl.bindingsupport.spi.ExtensibilityElementTemplateProvider
202 > +++ b/enterprise/openesbaddons/contrib-imola/corba-bc/netbeansplugin/wsdlExtension/src/main/java/META-INF/services/org.netbeans.modules.xml.wsdl.bindingsupport.spi.ExtensibilityElementTemplateProvider
207 > @@ -0,0 +1,1 @@
203 > @@ -0,0 +1,1 @@
208 > +foo
204 > +foo
209 > EOF
205 > EOF
210
206
211 $ find .hg/store -name *.i | sort
207 $ find .hg/store -name *.i | sort
212 .hg/store/00changelog.i
208 .hg/store/00changelog.i
213 .hg/store/00manifest.i
209 .hg/store/00manifest.i
214 .hg/store/data/bla.aux/pr~6e/_p_r_n/lpt/co~6d3/nu~6c/coma/foo._n_u_l/normal.c.i
210 .hg/store/data/bla.aux/pr~6e/_p_r_n/lpt/co~6d3/nu~6c/coma/foo._n_u_l/normal.c.i
215 .hg/store/dh/12345678/12345678/12345678/12345678/12345678/12345678/12345678/12345/xxxxxx168e07b38e65eff86ab579afaaa8e30bfbe0f35f.i
211 .hg/store/dh/12345678/12345678/12345678/12345678/12345678/12345678/12345678/12345/xxxxxx168e07b38e65eff86ab579afaaa8e30bfbe0f35f.i
216 .hg/store/dh/au~78/second/x.prn/fourth/fi~3afth/sixth/seventh/eighth/nineth/tenth/loremia20419e358ddff1bf8751e38288aff1d7c32ec05.i
212 .hg/store/dh/au~78/second/x.prn/fourth/fi~3afth/sixth/seventh/eighth/nineth/tenth/loremia20419e358ddff1bf8751e38288aff1d7c32ec05.i
217 .hg/store/dh/enterpri/openesba/contrib-/corba-bc/netbeans/wsdlexte/src/main/java/org.net7018f27961fdf338a598a40c4683429e7ffb9743.i
213 .hg/store/dh/enterpri/openesba/contrib-/corba-bc/netbeans/wsdlexte/src/main/java/org.net7018f27961fdf338a598a40c4683429e7ffb9743.i
218 .hg/store/dh/project_/resource/anotherl/followed/andanoth/andthenanextremelylongfilename0d8e1f4187c650e2f1fdca9fd90f786bc0976b6b.i
214 .hg/store/dh/project_/resource/anotherl/followed/andanoth/andthenanextremelylongfilename0d8e1f4187c650e2f1fdca9fd90f786bc0976b6b.i
219
215
220 $ cd ..
216 $ cd ..
221
217
222 Aborting lock does not prevent fncache writes
218 Aborting lock does not prevent fncache writes
223
219
224 $ cat > exceptionext.py <<EOF
220 $ cat > exceptionext.py <<EOF
225 > import os
221 > import os
226 > from mercurial import commands, error, extensions
222 > from mercurial import commands, error, extensions
227 >
223 >
228 > def lockexception(orig, vfs, lockname, wait, releasefn, *args, **kwargs):
224 > def lockexception(orig, vfs, lockname, wait, releasefn, *args, **kwargs):
229 > def releasewrap():
225 > def releasewrap():
230 > l.held = False # ensure __del__ is a noop
226 > l.held = False # ensure __del__ is a noop
231 > raise error.Abort(b"forced lock failure")
227 > raise error.Abort(b"forced lock failure")
232 > l = orig(vfs, lockname, wait, releasewrap, *args, **kwargs)
228 > l = orig(vfs, lockname, wait, releasewrap, *args, **kwargs)
233 > return l
229 > return l
234 >
230 >
235 > def reposetup(ui, repo):
231 > def reposetup(ui, repo):
236 > extensions.wrapfunction(repo, '_lock', lockexception)
232 > extensions.wrapfunction(repo, '_lock', lockexception)
237 >
233 >
238 > cmdtable = {}
234 > cmdtable = {}
239 >
235 >
240 > # wrap "commit" command to prevent wlock from being '__del__()'-ed
236 > # wrap "commit" command to prevent wlock from being '__del__()'-ed
241 > # at the end of dispatching (for intentional "forced lcok failure")
237 > # at the end of dispatching (for intentional "forced lcok failure")
242 > def commitwrap(orig, ui, repo, *pats, **opts):
238 > def commitwrap(orig, ui, repo, *pats, **opts):
243 > repo = repo.unfiltered() # to use replaced repo._lock certainly
239 > repo = repo.unfiltered() # to use replaced repo._lock certainly
244 > wlock = repo.wlock()
240 > wlock = repo.wlock()
245 > try:
241 > try:
246 > return orig(ui, repo, *pats, **opts)
242 > return orig(ui, repo, *pats, **opts)
247 > finally:
243 > finally:
248 > # multiple 'relase()' is needed for complete releasing wlock,
244 > # multiple 'relase()' is needed for complete releasing wlock,
249 > # because "forced" abort at last releasing store lock
245 > # because "forced" abort at last releasing store lock
250 > # prevents wlock from being released at same 'lockmod.release()'
246 > # prevents wlock from being released at same 'lockmod.release()'
251 > for i in range(wlock.held):
247 > for i in range(wlock.held):
252 > wlock.release()
248 > wlock.release()
253 >
249 >
254 > def extsetup(ui):
250 > def extsetup(ui):
255 > extensions.wrapcommand(commands.table, b"commit", commitwrap)
251 > extensions.wrapcommand(commands.table, b"commit", commitwrap)
256 > EOF
252 > EOF
257 $ extpath=`pwd`/exceptionext.py
253 $ extpath=`pwd`/exceptionext.py
258 $ hg init fncachetxn
254 $ hg init fncachetxn
259 $ cd fncachetxn
255 $ cd fncachetxn
260 $ printf "[extensions]\nexceptionext=$extpath\n" >> .hg/hgrc
256 $ printf "[extensions]\nexceptionext=$extpath\n" >> .hg/hgrc
261 $ touch y
257 $ touch y
262 $ hg ci -qAm y
258 $ hg ci -qAm y
263 abort: forced lock failure
259 abort: forced lock failure
264 [255]
260 [255]
265 $ cat .hg/store/fncache
261 $ cat .hg/store/fncache
266 data/y.i
262 data/y.i
267
263
268 Aborting transaction prevents fncache change
264 Aborting transaction prevents fncache change
269
265
270 $ cat > ../exceptionext.py <<EOF
266 $ cat > ../exceptionext.py <<EOF
271 > import os
267 > import os
272 > from mercurial import commands, error, extensions, localrepo
268 > from mercurial import commands, error, extensions, localrepo
273 >
269 >
274 > def wrapper(orig, self, *args, **kwargs):
270 > def wrapper(orig, self, *args, **kwargs):
275 > tr = orig(self, *args, **kwargs)
271 > tr = orig(self, *args, **kwargs)
276 > def fail(tr):
272 > def fail(tr):
277 > raise error.Abort(b"forced transaction failure")
273 > raise error.Abort(b"forced transaction failure")
278 > # zzz prefix to ensure it sorted after store.write
274 > # zzz prefix to ensure it sorted after store.write
279 > tr.addfinalize(b'zzz-forcefails', fail)
275 > tr.addfinalize(b'zzz-forcefails', fail)
280 > return tr
276 > return tr
281 >
277 >
282 > def uisetup(ui):
278 > def uisetup(ui):
283 > extensions.wrapfunction(
279 > extensions.wrapfunction(
284 > localrepo.localrepository, b'transaction', wrapper)
280 > localrepo.localrepository, b'transaction', wrapper)
285 >
281 >
286 > cmdtable = {}
282 > cmdtable = {}
287 >
283 >
288 > EOF
284 > EOF
289
285
290 Clean cached version
286 Clean cached version
291 $ rm -f "${extpath}c"
287 $ rm -f "${extpath}c"
292 $ rm -Rf "`dirname $extpath`/__pycache__"
288 $ rm -Rf "`dirname $extpath`/__pycache__"
293
289
294 $ touch z
290 $ touch z
295 $ hg ci -qAm z
291 $ hg ci -qAm z
296 transaction abort!
292 transaction abort!
297 rollback completed
293 rollback completed
298 abort: forced transaction failure
294 abort: forced transaction failure
299 [255]
295 [255]
300 $ cat .hg/store/fncache
296 $ cat .hg/store/fncache
301 data/y.i
297 data/y.i
302
298
303 Aborted transactions can be recovered later
299 Aborted transactions can be recovered later
304
300
305 $ cat > ../exceptionext.py <<EOF
301 $ cat > ../exceptionext.py <<EOF
306 > import os
302 > import os
307 > import signal
303 > import signal
308 > from mercurial import (
304 > from mercurial import (
309 > commands,
305 > commands,
310 > error,
306 > error,
311 > extensions,
307 > extensions,
312 > localrepo,
308 > localrepo,
313 > transaction,
309 > transaction,
314 > )
310 > )
315 >
311 >
316 > def trwrapper(orig, self, *args, **kwargs):
312 > def trwrapper(orig, self, *args, **kwargs):
317 > tr = orig(self, *args, **kwargs)
313 > tr = orig(self, *args, **kwargs)
318 > def fail(tr):
314 > def fail(tr):
319 > os.kill(os.getpid(), signal.SIGKILL)
315 > os.kill(os.getpid(), signal.SIGKILL)
320 > # zzz prefix to ensure it sorted after store.write
316 > # zzz prefix to ensure it sorted after store.write
321 > tr.addfinalize(b'zzz-forcefails', fail)
317 > tr.addfinalize(b'zzz-forcefails', fail)
322 > return tr
318 > return tr
323 >
319 >
324 > def uisetup(ui):
320 > def uisetup(ui):
325 > extensions.wrapfunction(localrepo.localrepository, 'transaction',
321 > extensions.wrapfunction(localrepo.localrepository, 'transaction',
326 > trwrapper)
322 > trwrapper)
327 >
323 >
328 > cmdtable = {}
324 > cmdtable = {}
329 >
325 >
330 > EOF
326 > EOF
331
327
332 Clean cached versions
328 Clean cached versions
333 $ rm -f "${extpath}c"
329 $ rm -f "${extpath}c"
334 $ rm -Rf "`dirname $extpath`/__pycache__"
330 $ rm -Rf "`dirname $extpath`/__pycache__"
335
331
336 $ hg up -q 1
332 $ hg up -q 1
337 $ touch z
333 $ touch z
338 # Cannot rely on the return code value as chg use a different one.
334 # Cannot rely on the return code value as chg use a different one.
339 # So we use a `|| echo` trick
335 # So we use a `|| echo` trick
340 # XXX-CHG fixing chg behavior would be nice here.
336 # XXX-CHG fixing chg behavior would be nice here.
341 $ hg ci -qAm z || echo "He's Dead, Jim." 2>/dev/null
337 $ hg ci -qAm z || echo "He's Dead, Jim." 2>/dev/null
342 Killed (?)
338 Killed (?)
343 He's Dead, Jim.
339 He's Dead, Jim.
344 $ cat .hg/store/fncache | sort
340 $ cat .hg/store/fncache | sort
345 data/y.i
341 data/y.i
346 data/z.i
342 data/z.i
347 $ hg recover --verify
343 $ hg recover --verify
348 rolling back interrupted transaction
344 rolling back interrupted transaction
349 checking changesets
345 checking changesets
350 checking manifests
346 checking manifests
351 crosschecking files in changesets and manifests
347 crosschecking files in changesets and manifests
352 checking files
348 checking files
353 checking dirstate
349 checking dirstate
354 checked 1 changesets with 1 changes to 1 files
350 checked 1 changesets with 1 changes to 1 files
355 $ cat .hg/store/fncache
351 $ cat .hg/store/fncache
356 data/y.i
352 data/y.i
357
353
358 $ cd ..
354 $ cd ..
359
355
360 debugrebuildfncache does nothing unless repo has fncache requirement
356 debugrebuildfncache does nothing unless repo has fncache requirement
361
357
362 $ hg --config format.usefncache=false init nofncache
358 $ hg --config format.usefncache=false init nofncache
363 $ cd nofncache
359 $ cd nofncache
364 $ hg debugrebuildfncache
360 $ hg debugrebuildfncache
365 (not rebuilding fncache because repository does not support fncache)
361 (not rebuilding fncache because repository does not support fncache)
366
362
367 $ cd ..
363 $ cd ..
368
364
369 debugrebuildfncache works on empty repository
365 debugrebuildfncache works on empty repository
370
366
371 $ hg init empty
367 $ hg init empty
372 $ cd empty
368 $ cd empty
373 $ hg debugrebuildfncache
369 $ hg debugrebuildfncache
374 fncache already up to date
370 fncache already up to date
375 $ cd ..
371 $ cd ..
376
372
377 debugrebuildfncache on an up to date repository no-ops
373 debugrebuildfncache on an up to date repository no-ops
378
374
379 $ hg init repo
375 $ hg init repo
380 $ cd repo
376 $ cd repo
381 $ echo initial > foo
377 $ echo initial > foo
382 $ echo initial > .bar
378 $ echo initial > .bar
383 $ hg commit -A -m initial
379 $ hg commit -A -m initial
384 adding .bar
380 adding .bar
385 adding foo
381 adding foo
386
382
387 $ cat .hg/store/fncache | sort
383 $ cat .hg/store/fncache | sort
388 data/.bar.i
384 data/.bar.i
389 data/foo.i
385 data/foo.i
390
386
391 $ hg debugrebuildfncache
387 $ hg debugrebuildfncache
392 fncache already up to date
388 fncache already up to date
393
389
394 debugrebuildfncache restores deleted fncache file
390 debugrebuildfncache restores deleted fncache file
395
391
396 $ rm -f .hg/store/fncache
392 $ rm -f .hg/store/fncache
397 $ hg debugrebuildfncache
393 $ hg debugrebuildfncache
398 adding data/.bar.i
394 adding data/.bar.i
399 adding data/foo.i
395 adding data/foo.i
400 2 items added, 0 removed from fncache
396 2 items added, 0 removed from fncache
401
397
402 $ cat .hg/store/fncache | sort
398 $ cat .hg/store/fncache | sort
403 data/.bar.i
399 data/.bar.i
404 data/foo.i
400 data/foo.i
405
401
406 Rebuild after rebuild should no-op
402 Rebuild after rebuild should no-op
407
403
408 $ hg debugrebuildfncache
404 $ hg debugrebuildfncache
409 fncache already up to date
405 fncache already up to date
410
406
411 A single missing file should get restored, an extra file should be removed
407 A single missing file should get restored, an extra file should be removed
412
408
413 $ cat > .hg/store/fncache << EOF
409 $ cat > .hg/store/fncache << EOF
414 > data/foo.i
410 > data/foo.i
415 > data/bad-entry.i
411 > data/bad-entry.i
416 > EOF
412 > EOF
417
413
418 $ hg debugrebuildfncache
414 $ hg debugrebuildfncache
419 removing data/bad-entry.i
415 removing data/bad-entry.i
420 adding data/.bar.i
416 adding data/.bar.i
421 1 items added, 1 removed from fncache
417 1 items added, 1 removed from fncache
422
418
423 $ cat .hg/store/fncache | sort
419 $ cat .hg/store/fncache | sort
424 data/.bar.i
420 data/.bar.i
425 data/foo.i
421 data/foo.i
426
422
427 debugrebuildfncache recovers from truncated line in fncache
423 debugrebuildfncache recovers from truncated line in fncache
428
424
429 $ printf a > .hg/store/fncache
425 $ printf a > .hg/store/fncache
430 $ hg debugrebuildfncache
426 $ hg debugrebuildfncache
431 fncache does not ends with a newline
427 fncache does not ends with a newline
432 adding data/.bar.i
428 adding data/.bar.i
433 adding data/foo.i
429 adding data/foo.i
434 2 items added, 0 removed from fncache
430 2 items added, 0 removed from fncache
435
431
436 $ cat .hg/store/fncache | sort
432 $ cat .hg/store/fncache | sort
437 data/.bar.i
433 data/.bar.i
438 data/foo.i
434 data/foo.i
439
435
440 $ cd ..
436 $ cd ..
441
437
442 Try a simple variation without dotencode to ensure fncache is ignorant of encoding
438 Try a simple variation without dotencode to ensure fncache is ignorant of encoding
443
439
444 $ hg --config format.dotencode=false init nodotencode
440 $ hg --config format.dotencode=false init nodotencode
445 $ cd nodotencode
441 $ cd nodotencode
446 $ echo initial > foo
442 $ echo initial > foo
447 $ echo initial > .bar
443 $ echo initial > .bar
448 $ hg commit -A -m initial
444 $ hg commit -A -m initial
449 adding .bar
445 adding .bar
450 adding foo
446 adding foo
451
447
452 $ cat .hg/store/fncache | sort
448 $ cat .hg/store/fncache | sort
453 data/.bar.i
449 data/.bar.i
454 data/foo.i
450 data/foo.i
455
451
456 $ rm .hg/store/fncache
452 $ rm .hg/store/fncache
457 $ hg debugrebuildfncache
453 $ hg debugrebuildfncache
458 adding data/.bar.i
454 adding data/.bar.i
459 adding data/foo.i
455 adding data/foo.i
460 2 items added, 0 removed from fncache
456 2 items added, 0 removed from fncache
461
457
462 $ cat .hg/store/fncache | sort
458 $ cat .hg/store/fncache | sort
463 data/.bar.i
459 data/.bar.i
464 data/foo.i
460 data/foo.i
465
461
466 $ cd ..
462 $ cd ..
467
463
468 In repositories that have accumulated a large number of files over time, the
464 In repositories that have accumulated a large number of files over time, the
469 fncache file is going to be large. If we possibly can avoid loading it, so much the better.
465 fncache file is going to be large. If we possibly can avoid loading it, so much the better.
470 The cache should not loaded when committing changes to existing files, or when unbundling
466 The cache should not loaded when committing changes to existing files, or when unbundling
471 changesets that only contain changes to existing files:
467 changesets that only contain changes to existing files:
472
468
473 $ cat > fncacheloadwarn.py << EOF
469 $ cat > fncacheloadwarn.py << EOF
474 > from mercurial import extensions, localrepo
470 > from mercurial import extensions, localrepo
475 >
471 >
476 > def extsetup(ui):
472 > def extsetup(ui):
477 > def wrapstore(orig, requirements, *args):
473 > def wrapstore(orig, requirements, *args):
478 > store = orig(requirements, *args)
474 > store = orig(requirements, *args)
479 > if b'store' in requirements and b'fncache' in requirements:
475 > if b'store' in requirements and b'fncache' in requirements:
480 > instrumentfncachestore(store, ui)
476 > instrumentfncachestore(store, ui)
481 > return store
477 > return store
482 > extensions.wrapfunction(localrepo, 'makestore', wrapstore)
478 > extensions.wrapfunction(localrepo, 'makestore', wrapstore)
483 >
479 >
484 > def instrumentfncachestore(fncachestore, ui):
480 > def instrumentfncachestore(fncachestore, ui):
485 > class instrumentedfncache(type(fncachestore.fncache)):
481 > class instrumentedfncache(type(fncachestore.fncache)):
486 > def _load(self):
482 > def _load(self):
487 > ui.warn(b'fncache load triggered!\n')
483 > ui.warn(b'fncache load triggered!\n')
488 > super(instrumentedfncache, self)._load()
484 > super(instrumentedfncache, self)._load()
489 > fncachestore.fncache.__class__ = instrumentedfncache
485 > fncachestore.fncache.__class__ = instrumentedfncache
490 > EOF
486 > EOF
491
487
492 $ fncachextpath=`pwd`/fncacheloadwarn.py
488 $ fncachextpath=`pwd`/fncacheloadwarn.py
493 $ hg init nofncacheload
489 $ hg init nofncacheload
494 $ cd nofncacheload
490 $ cd nofncacheload
495 $ printf "[extensions]\nfncacheloadwarn=$fncachextpath\n" >> .hg/hgrc
491 $ printf "[extensions]\nfncacheloadwarn=$fncachextpath\n" >> .hg/hgrc
496
492
497 A new file should trigger a load, as we'd want to update the fncache set in that case:
493 A new file should trigger a load, as we'd want to update the fncache set in that case:
498
494
499 $ touch foo
495 $ touch foo
500 $ hg ci -qAm foo
496 $ hg ci -qAm foo
501 fncache load triggered!
497 fncache load triggered!
502
498
503 But modifying that file should not:
499 But modifying that file should not:
504
500
505 $ echo bar >> foo
501 $ echo bar >> foo
506 $ hg ci -qm foo
502 $ hg ci -qm foo
507
503
508 If a transaction has been aborted, the zero-size truncated index file will
504 If a transaction has been aborted, the zero-size truncated index file will
509 not prevent the fncache from being loaded; rather than actually abort
505 not prevent the fncache from being loaded; rather than actually abort
510 a transaction, we simulate the situation by creating a zero-size index file:
506 a transaction, we simulate the situation by creating a zero-size index file:
511
507
512 $ touch .hg/store/data/bar.i
508 $ touch .hg/store/data/bar.i
513 $ touch bar
509 $ touch bar
514 $ hg ci -qAm bar
510 $ hg ci -qAm bar
515 fncache load triggered!
511 fncache load triggered!
516
512
517 Unbundling should follow the same rules; existing files should not cause a load:
513 Unbundling should follow the same rules; existing files should not cause a load:
518
514
519 (loading during the clone is expected)
515 (loading during the clone is expected)
520 $ hg clone -q . tobundle
516 $ hg clone -q . tobundle
521 fncache load triggered!
517 fncache load triggered!
522 fncache load triggered!
518 fncache load triggered!
523
519
524 $ echo 'new line' > tobundle/bar
520 $ echo 'new line' > tobundle/bar
525 $ hg -R tobundle ci -qm bar
521 $ hg -R tobundle ci -qm bar
526 $ hg -R tobundle bundle -q barupdated.hg
522 $ hg -R tobundle bundle -q barupdated.hg
527 $ hg unbundle -q barupdated.hg
523 $ hg unbundle -q barupdated.hg
528
524
529 but adding new files should:
525 but adding new files should:
530
526
531 $ touch tobundle/newfile
527 $ touch tobundle/newfile
532 $ hg -R tobundle ci -qAm newfile
528 $ hg -R tobundle ci -qAm newfile
533 $ hg -R tobundle bundle -q newfile.hg
529 $ hg -R tobundle bundle -q newfile.hg
534 $ hg unbundle -q newfile.hg
530 $ hg unbundle -q newfile.hg
535 fncache load triggered!
531 fncache load triggered!
536
532
537 $ cd ..
533 $ cd ..
@@ -1,184 +1,182 b''
1 #require unix-permissions
1 #require unix-permissions
2
2
3 test that new files created in .hg inherit the permissions from .hg/store
3 test that new files created in .hg inherit the permissions from .hg/store
4
4
5 $ mkdir dir
5 $ mkdir dir
6
6
7 just in case somebody has a strange $TMPDIR
7 just in case somebody has a strange $TMPDIR
8
8
9 $ chmod g-s dir
9 $ chmod g-s dir
10 $ cd dir
10 $ cd dir
11
11
12 $ cat >printmodes.py <<EOF
12 $ cat >printmodes.py <<EOF
13 > import os
13 > import os
14 > import sys
14 > import sys
15 >
15 >
16 > allnames = []
16 > allnames = []
17 > isdir = {}
17 > isdir = {}
18 > for root, dirs, files in os.walk(sys.argv[1]):
18 > for root, dirs, files in os.walk(sys.argv[1]):
19 > for d in dirs:
19 > for d in dirs:
20 > name = os.path.join(root, d)
20 > name = os.path.join(root, d)
21 > isdir[name] = 1
21 > isdir[name] = 1
22 > allnames.append(name)
22 > allnames.append(name)
23 > for f in files:
23 > for f in files:
24 > name = os.path.join(root, f)
24 > name = os.path.join(root, f)
25 > allnames.append(name)
25 > allnames.append(name)
26 > allnames.sort()
26 > allnames.sort()
27 > for name in allnames:
27 > for name in allnames:
28 > suffix = name in isdir and '/' or ''
28 > suffix = name in isdir and '/' or ''
29 > print('%05o %s%s' % (os.lstat(name).st_mode & 0o7777, name, suffix))
29 > print('%05o %s%s' % (os.lstat(name).st_mode & 0o7777, name, suffix))
30 > EOF
30 > EOF
31
31
32 $ cat >mode.py <<EOF
32 $ cat >mode.py <<EOF
33 > import os
33 > import os
34 > import sys
34 > import sys
35 > print('%05o' % os.lstat(sys.argv[1]).st_mode)
35 > print('%05o' % os.lstat(sys.argv[1]).st_mode)
36 > EOF
36 > EOF
37
37
38 $ umask 077
38 $ umask 077
39
39
40 $ hg init repo
40 $ hg init repo
41 $ cd repo
41 $ cd repo
42
42
43 $ chmod 0770 .hg/store .hg/cache .hg/wcache
43 $ chmod 0770 .hg/store .hg/cache .hg/wcache
44
44
45 before commit
45 before commit
46 store can be written by the group, other files cannot
46 store can be written by the group, other files cannot
47 store is setgid
47 store is setgid
48
48
49 $ "$PYTHON" ../printmodes.py .
49 $ "$PYTHON" ../printmodes.py .
50 00700 ./.hg/
50 00700 ./.hg/
51 00600 ./.hg/00changelog.i
51 00600 ./.hg/00changelog.i
52 00770 ./.hg/cache/
52 00770 ./.hg/cache/
53 00600 ./.hg/requires
53 00600 ./.hg/requires
54 00770 ./.hg/store/
54 00770 ./.hg/store/
55 00600 ./.hg/store/requires
55 00600 ./.hg/store/requires
56 00770 ./.hg/wcache/
56 00770 ./.hg/wcache/
57
57
58 $ mkdir dir
58 $ mkdir dir
59 $ touch foo dir/bar
59 $ touch foo dir/bar
60 $ hg ci -qAm 'add files'
60 $ hg ci -qAm 'add files'
61
61
62 after commit
62 after commit
63 working dir files can only be written by the owner
63 working dir files can only be written by the owner
64 files created in .hg can be written by the group
64 files created in .hg can be written by the group
65 (in particular, store/**, dirstate, branch cache file, undo files)
65 (in particular, store/**, dirstate, branch cache file, undo files)
66 new directories are setgid
66 new directories are setgid
67
67
68 $ "$PYTHON" ../printmodes.py .
68 $ "$PYTHON" ../printmodes.py .
69 00700 ./.hg/
69 00700 ./.hg/
70 00600 ./.hg/00changelog.i
70 00600 ./.hg/00changelog.i
71 00770 ./.hg/cache/
71 00770 ./.hg/cache/
72 00660 ./.hg/cache/branch2-served
72 00660 ./.hg/cache/branch2-served
73 00660 ./.hg/cache/rbc-names-v1
73 00660 ./.hg/cache/rbc-names-v1
74 00660 ./.hg/cache/rbc-revs-v1
74 00660 ./.hg/cache/rbc-revs-v1
75 00660 ./.hg/dirstate
75 00660 ./.hg/dirstate
76 00660 ./.hg/fsmonitor.state (fsmonitor !)
76 00660 ./.hg/fsmonitor.state (fsmonitor !)
77 00660 ./.hg/last-message.txt
77 00660 ./.hg/last-message.txt
78 00600 ./.hg/requires
78 00600 ./.hg/requires
79 00770 ./.hg/store/
79 00770 ./.hg/store/
80 00660 ./.hg/store/00changelog.i
80 00660 ./.hg/store/00changelog.i
81 00660 ./.hg/store/00manifest.i
81 00660 ./.hg/store/00manifest.i
82 00770 ./.hg/store/data/
82 00770 ./.hg/store/data/
83 00770 ./.hg/store/data/dir/
83 00770 ./.hg/store/data/dir/
84 00660 ./.hg/store/data/dir/bar.i (reporevlogstore !)
84 00660 ./.hg/store/data/dir/bar.i (reporevlogstore !)
85 00660 ./.hg/store/data/foo.i (reporevlogstore !)
85 00660 ./.hg/store/data/foo.i (reporevlogstore !)
86 00770 ./.hg/store/data/dir/bar/ (reposimplestore !)
86 00770 ./.hg/store/data/dir/bar/ (reposimplestore !)
87 00660 ./.hg/store/data/dir/bar/b80de5d138758541c5f05265ad144ab9fa86d1db (reposimplestore !)
87 00660 ./.hg/store/data/dir/bar/b80de5d138758541c5f05265ad144ab9fa86d1db (reposimplestore !)
88 00660 ./.hg/store/data/dir/bar/index (reposimplestore !)
88 00660 ./.hg/store/data/dir/bar/index (reposimplestore !)
89 00770 ./.hg/store/data/foo/ (reposimplestore !)
89 00770 ./.hg/store/data/foo/ (reposimplestore !)
90 00660 ./.hg/store/data/foo/b80de5d138758541c5f05265ad144ab9fa86d1db (reposimplestore !)
90 00660 ./.hg/store/data/foo/b80de5d138758541c5f05265ad144ab9fa86d1db (reposimplestore !)
91 00660 ./.hg/store/data/foo/index (reposimplestore !)
91 00660 ./.hg/store/data/foo/index (reposimplestore !)
92 00660 ./.hg/store/fncache (repofncache !)
92 00660 ./.hg/store/fncache (repofncache !)
93 00660 ./.hg/store/phaseroots
93 00660 ./.hg/store/phaseroots
94 00600 ./.hg/store/requires
94 00600 ./.hg/store/requires
95 00660 ./.hg/store/undo
95 00660 ./.hg/store/undo
96 00660 ./.hg/store/undo.backupfiles
96 00660 ./.hg/store/undo.backupfiles
97 00660 ./.hg/store/undo.phaseroots
97 00660 ./.hg/store/undo.phaseroots
98 00660 ./.hg/undo.backup.dirstate
99 00660 ./.hg/undo.bookmarks
98 00660 ./.hg/undo.bookmarks
100 00660 ./.hg/undo.branch
99 00660 ./.hg/undo.branch
101 00660 ./.hg/undo.desc
100 00660 ./.hg/undo.desc
102 00660 ./.hg/undo.dirstate
103 00770 ./.hg/wcache/
101 00770 ./.hg/wcache/
104 00711 ./.hg/wcache/checkisexec
102 00711 ./.hg/wcache/checkisexec
105 007.. ./.hg/wcache/checklink (re)
103 007.. ./.hg/wcache/checklink (re)
106 00600 ./.hg/wcache/checklink-target
104 00600 ./.hg/wcache/checklink-target
107 00660 ./.hg/wcache/manifestfulltextcache (reporevlogstore !)
105 00660 ./.hg/wcache/manifestfulltextcache (reporevlogstore !)
108 00700 ./dir/
106 00700 ./dir/
109 00600 ./dir/bar
107 00600 ./dir/bar
110 00600 ./foo
108 00600 ./foo
111
109
112 $ umask 007
110 $ umask 007
113 $ hg init ../push
111 $ hg init ../push
114
112
115 before push
113 before push
116 group can write everything
114 group can write everything
117
115
118 $ "$PYTHON" ../printmodes.py ../push
116 $ "$PYTHON" ../printmodes.py ../push
119 00770 ../push/.hg/
117 00770 ../push/.hg/
120 00660 ../push/.hg/00changelog.i
118 00660 ../push/.hg/00changelog.i
121 00770 ../push/.hg/cache/
119 00770 ../push/.hg/cache/
122 00660 ../push/.hg/requires
120 00660 ../push/.hg/requires
123 00770 ../push/.hg/store/
121 00770 ../push/.hg/store/
124 00660 ../push/.hg/store/requires
122 00660 ../push/.hg/store/requires
125 00770 ../push/.hg/wcache/
123 00770 ../push/.hg/wcache/
126
124
127 $ umask 077
125 $ umask 077
128 $ hg -q push ../push
126 $ hg -q push ../push
129
127
130 after push
128 after push
131 group can still write everything
129 group can still write everything
132
130
133 $ "$PYTHON" ../printmodes.py ../push
131 $ "$PYTHON" ../printmodes.py ../push
134 00770 ../push/.hg/
132 00770 ../push/.hg/
135 00660 ../push/.hg/00changelog.i
133 00660 ../push/.hg/00changelog.i
136 00770 ../push/.hg/cache/
134 00770 ../push/.hg/cache/
137 00660 ../push/.hg/cache/branch2-base
135 00660 ../push/.hg/cache/branch2-base
138 00660 ../push/.hg/cache/rbc-names-v1
136 00660 ../push/.hg/cache/rbc-names-v1
139 00660 ../push/.hg/cache/rbc-revs-v1
137 00660 ../push/.hg/cache/rbc-revs-v1
140 00660 ../push/.hg/requires
138 00660 ../push/.hg/requires
141 00770 ../push/.hg/store/
139 00770 ../push/.hg/store/
142 00660 ../push/.hg/store/00changelog.i
140 00660 ../push/.hg/store/00changelog.i
143 00660 ../push/.hg/store/00manifest.i
141 00660 ../push/.hg/store/00manifest.i
144 00770 ../push/.hg/store/data/
142 00770 ../push/.hg/store/data/
145 00770 ../push/.hg/store/data/dir/
143 00770 ../push/.hg/store/data/dir/
146 00660 ../push/.hg/store/data/dir/bar.i (reporevlogstore !)
144 00660 ../push/.hg/store/data/dir/bar.i (reporevlogstore !)
147 00660 ../push/.hg/store/data/foo.i (reporevlogstore !)
145 00660 ../push/.hg/store/data/foo.i (reporevlogstore !)
148 00770 ../push/.hg/store/data/dir/bar/ (reposimplestore !)
146 00770 ../push/.hg/store/data/dir/bar/ (reposimplestore !)
149 00660 ../push/.hg/store/data/dir/bar/b80de5d138758541c5f05265ad144ab9fa86d1db (reposimplestore !)
147 00660 ../push/.hg/store/data/dir/bar/b80de5d138758541c5f05265ad144ab9fa86d1db (reposimplestore !)
150 00660 ../push/.hg/store/data/dir/bar/index (reposimplestore !)
148 00660 ../push/.hg/store/data/dir/bar/index (reposimplestore !)
151 00770 ../push/.hg/store/data/foo/ (reposimplestore !)
149 00770 ../push/.hg/store/data/foo/ (reposimplestore !)
152 00660 ../push/.hg/store/data/foo/b80de5d138758541c5f05265ad144ab9fa86d1db (reposimplestore !)
150 00660 ../push/.hg/store/data/foo/b80de5d138758541c5f05265ad144ab9fa86d1db (reposimplestore !)
153 00660 ../push/.hg/store/data/foo/index (reposimplestore !)
151 00660 ../push/.hg/store/data/foo/index (reposimplestore !)
154 00660 ../push/.hg/store/fncache (repofncache !)
152 00660 ../push/.hg/store/fncache (repofncache !)
155 00660 ../push/.hg/store/requires
153 00660 ../push/.hg/store/requires
156 00660 ../push/.hg/store/undo
154 00660 ../push/.hg/store/undo
157 00660 ../push/.hg/store/undo.backupfiles
155 00660 ../push/.hg/store/undo.backupfiles
158 00660 ../push/.hg/store/undo.phaseroots
156 00660 ../push/.hg/store/undo.phaseroots
159 00660 ../push/.hg/undo.bookmarks
157 00660 ../push/.hg/undo.bookmarks
160 00660 ../push/.hg/undo.branch
158 00660 ../push/.hg/undo.branch
161 00660 ../push/.hg/undo.desc
159 00660 ../push/.hg/undo.desc
162 00770 ../push/.hg/wcache/
160 00770 ../push/.hg/wcache/
163
161
164
162
165 Test that we don't lose the setgid bit when we call chmod.
163 Test that we don't lose the setgid bit when we call chmod.
166 Not all systems support setgid directories (e.g. HFS+), so
164 Not all systems support setgid directories (e.g. HFS+), so
167 just check that directories have the same mode.
165 just check that directories have the same mode.
168
166
169 $ cd ..
167 $ cd ..
170 $ hg init setgid
168 $ hg init setgid
171 $ cd setgid
169 $ cd setgid
172 $ chmod g+rwx .hg/store
170 $ chmod g+rwx .hg/store
173 $ chmod g+s .hg/store 2> /dev/null || true
171 $ chmod g+s .hg/store 2> /dev/null || true
174 $ mkdir dir
172 $ mkdir dir
175 $ touch dir/file
173 $ touch dir/file
176 $ hg ci -qAm 'add dir/file'
174 $ hg ci -qAm 'add dir/file'
177 $ storemode=`"$PYTHON" ../mode.py .hg/store`
175 $ storemode=`"$PYTHON" ../mode.py .hg/store`
178 $ dirmode=`"$PYTHON" ../mode.py .hg/store/data/dir`
176 $ dirmode=`"$PYTHON" ../mode.py .hg/store/data/dir`
179 $ if [ "$storemode" != "$dirmode" ]; then
177 $ if [ "$storemode" != "$dirmode" ]; then
180 > echo "$storemode != $dirmode"
178 > echo "$storemode != $dirmode"
181 > fi
179 > fi
182 $ cd ..
180 $ cd ..
183
181
184 $ cd .. # g-s dir
182 $ cd .. # g-s dir
@@ -1,125 +1,125 b''
1 #testcases skip-detection fail-if-detected
1 #testcases skip-detection fail-if-detected
2
2
3 Test situations that "should" only be reproducible:
3 Test situations that "should" only be reproducible:
4 - on networked filesystems, or
4 - on networked filesystems, or
5 - user using `hg debuglocks` to eliminate the lock file, or
5 - user using `hg debuglocks` to eliminate the lock file, or
6 - something (that doesn't respect the lock file) writing to the .hg directory
6 - something (that doesn't respect the lock file) writing to the .hg directory
7 while we're running
7 while we're running
8
8
9
9
10 Initial setup
10 Initial setup
11 -------------
11 -------------
12
12
13 $ hg init base-repo
13 $ hg init base-repo
14 $ cd base-repo
14 $ cd base-repo
15
15
16 $ cat > "$TESTTMP_FORWARD_SLASH/waitlock_editor.sh" <<EOF
16 $ cat > "$TESTTMP_FORWARD_SLASH/waitlock_editor.sh" <<EOF
17 > [ -n "\${WAITLOCK_ANNOUNCE:-}" ] && touch "\${WAITLOCK_ANNOUNCE}"
17 > [ -n "\${WAITLOCK_ANNOUNCE:-}" ] && touch "\${WAITLOCK_ANNOUNCE}"
18 > f="\${WAITLOCK_FILE}"
18 > f="\${WAITLOCK_FILE}"
19 > start=\`date +%s\`
19 > start=\`date +%s\`
20 > timeout=5
20 > timeout=5
21 > "$RUNTESTDIR_FORWARD_SLASH/testlib/wait-on-file" "\$timeout" "\$f"
21 > "$RUNTESTDIR_FORWARD_SLASH/testlib/wait-on-file" "\$timeout" "\$f"
22 > if [ \$# -gt 1 ]; then
22 > if [ \$# -gt 1 ]; then
23 > cat "\$@"
23 > cat "\$@"
24 > fi
24 > fi
25 > EOF
25 > EOF
26
26
27 Things behave differently if we don't already have a 00changelog.i file when
27 Things behave differently if we don't already have a 00changelog.i file when
28 this all starts, so let's make one.
28 this all starts, so let's make one.
29
29
30 $ echo r0 > r0
30 $ echo r0 > r0
31 $ hg commit -qAm 'r0'
31 $ hg commit -qAm 'r0'
32
32
33 $ cd ..
33 $ cd ..
34 $ cp -R base-repo main-client
34 $ cp -R base-repo main-client
35 $ cp -R base-repo racing-client
35 $ cp -R base-repo racing-client
36
36
37 $ mkdir sync
37 $ mkdir sync
38 $ EDITOR_STARTED="$TESTTMP_FORWARD_SLASH/sync/.editor_started"
38 $ EDITOR_STARTED="$TESTTMP_FORWARD_SLASH/sync/.editor_started"
39 $ MISCHIEF_MANAGED="$TESTTMP_FORWARD_SLASH/sync/.mischief_managed"
39 $ MISCHIEF_MANAGED="$TESTTMP_FORWARD_SLASH/sync/.mischief_managed"
40 $ JOBS_FINISHED="$TESTTMP_FORWARD_SLASH/sync/.jobs_finished"
40 $ JOBS_FINISHED="$TESTTMP_FORWARD_SLASH/sync/.jobs_finished"
41
41
42 Actual test
42 Actual test
43 -----------
43 -----------
44
44
45 Start an hg commit that will take a while
45 Start an hg commit that will take a while
46
46
47 $ cd main-client
47 $ cd main-client
48
48
49 #if fail-if-detected
49 #if fail-if-detected
50 $ cat >> $HGRCPATH << EOF
50 $ cat >> $HGRCPATH << EOF
51 > [debug]
51 > [debug]
52 > revlog.verifyposition.changelog = fail
52 > revlog.verifyposition.changelog = fail
53 > EOF
53 > EOF
54 #endif
54 #endif
55
55
56 $ echo foo > foo
56 $ echo foo > foo
57 $ (
57 $ (
58 > unset HGEDITOR;
58 > unset HGEDITOR;
59 > WAITLOCK_ANNOUNCE="${EDITOR_STARTED}" \
59 > WAITLOCK_ANNOUNCE="${EDITOR_STARTED}" \
60 > WAITLOCK_FILE="${MISCHIEF_MANAGED}" \
60 > WAITLOCK_FILE="${MISCHIEF_MANAGED}" \
61 > hg commit -qAm 'r1 (foo)' --edit foo \
61 > hg commit -qAm 'r1 (foo)' --edit foo \
62 > --config ui.editor="sh $TESTTMP_FORWARD_SLASH/waitlock_editor.sh" \
62 > --config ui.editor="sh $TESTTMP_FORWARD_SLASH/waitlock_editor.sh" \
63 > > .foo_commit_out 2>&1 ;\
63 > > .foo_commit_out 2>&1 ;\
64 > touch "${JOBS_FINISHED}"
64 > touch "${JOBS_FINISHED}"
65 > ) &
65 > ) &
66
66
67 Wait for the "editor" to actually start
67 Wait for the "editor" to actually start
68 $ sh "$RUNTESTDIR_FORWARD_SLASH/testlib/wait-on-file" 5 "${EDITOR_STARTED}"
68 $ sh "$RUNTESTDIR_FORWARD_SLASH/testlib/wait-on-file" 5 "${EDITOR_STARTED}"
69
69
70
70
71 Do a concurrent edition
71 Do a concurrent edition
72 $ cd ../racing-client
72 $ cd ../racing-client
73 $ touch ../pre-race
73 $ touch ../pre-race
74 $ sleep 1
74 $ sleep 1
75 $ echo bar > bar
75 $ echo bar > bar
76 $ hg --repository ../racing-client commit -qAm 'r2 (bar)' bar
76 $ hg --repository ../racing-client commit -qAm 'r2 (bar)' bar
77 $ hg --repository ../racing-client debugrevlogindex -c
77 $ hg --repository ../racing-client debugrevlogindex -c
78 rev linkrev nodeid p1 p2
78 rev linkrev nodeid p1 p2
79 0 0 222799e2f90b 000000000000 000000000000
79 0 0 222799e2f90b 000000000000 000000000000
80 1 1 6f124f6007a0 222799e2f90b 000000000000
80 1 1 6f124f6007a0 222799e2f90b 000000000000
81
81
82 We simulate an network FS race by overwriting raced repo content with the new
82 We simulate an network FS race by overwriting raced repo content with the new
83 content of the files changed in the racing repository
83 content of the files changed in the racing repository
84
84
85 $ for x in `find . -type f -newer ../pre-race`; do
85 $ for x in `find . -type f -newer ../pre-race`; do
86 > cp $x ../main-client/$x
86 > cp $x ../main-client/$x
87 > done
87 > done
88 $ cd ../main-client
88 $ cd ../main-client
89
89
90 Awaken the editor from that first commit
90 Awaken the editor from that first commit
91 $ touch "${MISCHIEF_MANAGED}"
91 $ touch "${MISCHIEF_MANAGED}"
92 And wait for it to finish
92 And wait for it to finish
93 $ WAITLOCK_FILE="${JOBS_FINISHED}" sh "$TESTTMP_FORWARD_SLASH/waitlock_editor.sh"
93 $ WAITLOCK_FILE="${JOBS_FINISHED}" sh "$TESTTMP_FORWARD_SLASH/waitlock_editor.sh"
94
94
95 #if skip-detection
95 #if skip-detection
96 (Ensure there was no output)
96 (Ensure there was no output)
97 $ cat .foo_commit_out
97 $ cat .foo_commit_out
98 And observe a corrupted repository -- rev 2's linkrev is 1, which should never
98 And observe a corrupted repository -- rev 2's linkrev is 1, which should never
99 happen for the changelog (the linkrev should always refer to itself).
99 happen for the changelog (the linkrev should always refer to itself).
100 $ hg debugrevlogindex -c
100 $ hg debugrevlogindex -c
101 rev linkrev nodeid p1 p2
101 rev linkrev nodeid p1 p2
102 0 0 222799e2f90b 000000000000 000000000000
102 0 0 222799e2f90b 000000000000 000000000000
103 1 1 6f124f6007a0 222799e2f90b 000000000000
103 1 1 6f124f6007a0 222799e2f90b 000000000000
104 2 1 ac80e6205bb2 222799e2f90b 000000000000
104 2 1 ac80e6205bb2 222799e2f90b 000000000000
105 #endif
105 #endif
106
106
107 #if fail-if-detected
107 #if fail-if-detected
108 $ cat .foo_commit_out
108 $ cat .foo_commit_out
109 note: commit message saved in .hg/last-message.txt
110 note: use 'hg commit --logfile .hg/last-message.txt --edit' to reuse it
109 transaction abort!
111 transaction abort!
110 rollback completed
112 rollback completed
111 note: commit message saved in .hg/last-message.txt
112 note: use 'hg commit --logfile .hg/last-message.txt --edit' to reuse it
113 abort: 00changelog.i: file cursor at position 249, expected 121
113 abort: 00changelog.i: file cursor at position 249, expected 121
114 And no corruption in the changelog.
114 And no corruption in the changelog.
115 $ hg debugrevlogindex -c
115 $ hg debugrevlogindex -c
116 rev linkrev nodeid p1 p2
116 rev linkrev nodeid p1 p2
117 0 0 222799e2f90b 000000000000 000000000000
117 0 0 222799e2f90b 000000000000 000000000000
118 1 1 6f124f6007a0 222799e2f90b 000000000000 (missing-correct-output !)
118 1 1 6f124f6007a0 222799e2f90b 000000000000 (missing-correct-output !)
119 And, because of transactions, there's none in the manifestlog either.
119 And, because of transactions, there's none in the manifestlog either.
120 $ hg debugrevlogindex -m
120 $ hg debugrevlogindex -m
121 rev linkrev nodeid p1 p2
121 rev linkrev nodeid p1 p2
122 0 0 7b7020262a56 000000000000 000000000000
122 0 0 7b7020262a56 000000000000 000000000000
123 1 1 ad3fe36d86d9 7b7020262a56 000000000000
123 1 1 ad3fe36d86d9 7b7020262a56 000000000000
124 #endif
124 #endif
125
125
General Comments 0
You need to be logged in to leave comments. Login now