##// END OF EJS Templates
uncommit: move _movedirstate() to scmutil for reuse...
Martin von Zweigbergk -
r42103:232d4b9d default
parent child Browse files
Show More
@@ -1,261 +1,223 b''
1 1 # uncommit - undo the actions of a commit
2 2 #
3 3 # Copyright 2011 Peter Arrenbrecht <peter.arrenbrecht@gmail.com>
4 4 # Logilab SA <contact@logilab.fr>
5 5 # Pierre-Yves David <pierre-yves.david@ens-lyon.org>
6 6 # Patrick Mezard <patrick@mezard.eu>
7 7 # Copyright 2016 Facebook, Inc.
8 8 #
9 9 # This software may be used and distributed according to the terms of the
10 10 # GNU General Public License version 2 or any later version.
11 11
12 12 """uncommit part or all of a local changeset (EXPERIMENTAL)
13 13
14 14 This command undoes the effect of a local commit, returning the affected
15 15 files to their uncommitted state. This means that files modified, added or
16 16 removed in the changeset will be left unchanged, and so will remain modified,
17 17 added and removed in the working directory.
18 18 """
19 19
20 20 from __future__ import absolute_import
21 21
22 22 from mercurial.i18n import _
23 23
24 24 from mercurial import (
25 25 cmdutil,
26 26 commands,
27 27 context,
28 28 copies as copiesmod,
29 29 error,
30 30 node,
31 31 obsutil,
32 32 pycompat,
33 33 registrar,
34 34 rewriteutil,
35 35 scmutil,
36 36 )
37 37
38 38 cmdtable = {}
39 39 command = registrar.command(cmdtable)
40 40
41 41 configtable = {}
42 42 configitem = registrar.configitem(configtable)
43 43
44 44 configitem('experimental', 'uncommitondirtywdir',
45 45 default=False,
46 46 )
47 47 configitem('experimental', 'uncommit.keep',
48 48 default=False,
49 49 )
50 50
51 51 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
52 52 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
53 53 # be specifying the version(s) of Mercurial they are tested with, or
54 54 # leave the attribute unspecified.
55 55 testedwith = 'ships-with-hg-core'
56 56
57 57 def _commitfiltered(repo, ctx, match, keepcommit):
58 58 """Recommit ctx with changed files not in match. Return the new
59 59 node identifier, or None if nothing changed.
60 60 """
61 61 base = ctx.p1()
62 62 # ctx
63 63 initialfiles = set(ctx.files())
64 64 exclude = set(f for f in initialfiles if match(f))
65 65
66 66 # No files matched commit, so nothing excluded
67 67 if not exclude:
68 68 return None
69 69
70 70 # return the p1 so that we don't create an obsmarker later
71 71 if not keepcommit:
72 72 return ctx.p1().node()
73 73
74 74 files = (initialfiles - exclude)
75 75 # Filter copies
76 76 copied = copiesmod.pathcopies(base, ctx)
77 77 copied = dict((dst, src) for dst, src in copied.iteritems()
78 78 if dst in files)
79 79 def filectxfn(repo, memctx, path, contentctx=ctx, redirect=()):
80 80 if path not in contentctx:
81 81 return None
82 82 fctx = contentctx[path]
83 83 mctx = context.memfilectx(repo, memctx, fctx.path(), fctx.data(),
84 84 fctx.islink(),
85 85 fctx.isexec(),
86 86 copied=copied.get(path))
87 87 return mctx
88 88
89 89 if not files:
90 90 repo.ui.status(_("note: keeping empty commit\n"))
91 91
92 92 new = context.memctx(repo,
93 93 parents=[base.node(), node.nullid],
94 94 text=ctx.description(),
95 95 files=files,
96 96 filectxfn=filectxfn,
97 97 user=ctx.user(),
98 98 date=ctx.date(),
99 99 extra=ctx.extra())
100 100 return repo.commitctx(new)
101 101
102 def _movedirstate(repo, newctx, match=None):
103 """Move the dirstate to newctx and adjust it as necessary."""
104 oldctx = repo['.']
105 ds = repo.dirstate
106 ds.setparents(newctx.node(), node.nullid)
107 copies = dict(ds.copies())
108 s = newctx.status(oldctx, match=match)
109 for f in s.modified:
110 if ds[f] == 'r':
111 # modified + removed -> removed
112 continue
113 ds.normallookup(f)
114
115 for f in s.added:
116 if ds[f] == 'r':
117 # added + removed -> unknown
118 ds.drop(f)
119 elif ds[f] != 'a':
120 ds.add(f)
121
122 for f in s.removed:
123 if ds[f] == 'a':
124 # removed + added -> normal
125 ds.normallookup(f)
126 elif ds[f] != 'r':
127 ds.remove(f)
128
129 # Merge old parent and old working dir copies
130 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
131 oldcopies.update(copies)
132 copies = dict((dst, oldcopies.get(src, src))
133 for dst, src in oldcopies.iteritems())
134 # Adjust the dirstate copies
135 for dst, src in copies.iteritems():
136 if (src not in newctx or dst in newctx or ds[dst] != 'a'):
137 src = None
138 ds.copy(src, dst)
139
140 102 @command('uncommit',
141 103 [('', 'keep', None, _('allow an empty commit after uncommiting')),
142 104 ('', 'allow-dirty-working-copy', False,
143 105 _('allow uncommit with outstanding changes'))
144 106 ] + commands.walkopts,
145 107 _('[OPTION]... [FILE]...'),
146 108 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT)
147 109 def uncommit(ui, repo, *pats, **opts):
148 110 """uncommit part or all of a local changeset
149 111
150 112 This command undoes the effect of a local commit, returning the affected
151 113 files to their uncommitted state. This means that files modified or
152 114 deleted in the changeset will be left unchanged, and so will remain
153 115 modified in the working directory.
154 116
155 117 If no files are specified, the commit will be pruned, unless --keep is
156 118 given.
157 119 """
158 120 opts = pycompat.byteskwargs(opts)
159 121
160 122 with repo.wlock(), repo.lock():
161 123
162 124 m, a, r, d = repo.status()[:4]
163 125 isdirtypath = any(set(m + a + r + d) & set(pats))
164 126 allowdirtywcopy = (opts['allow_dirty_working_copy'] or
165 127 repo.ui.configbool('experimental', 'uncommitondirtywdir'))
166 128 if not allowdirtywcopy and (not pats or isdirtypath):
167 129 cmdutil.bailifchanged(repo, hint=_('requires '
168 130 '--allow-dirty-working-copy to uncommit'))
169 131 old = repo['.']
170 132 rewriteutil.precheck(repo, [old.rev()], 'uncommit')
171 133 if len(old.parents()) > 1:
172 134 raise error.Abort(_("cannot uncommit merge changeset"))
173 135
174 136 with repo.transaction('uncommit'):
175 137 match = scmutil.match(old, pats, opts)
176 138 keepcommit = pats
177 139 if not keepcommit:
178 140 if opts.get('keep') is not None:
179 141 keepcommit = opts.get('keep')
180 142 else:
181 143 keepcommit = ui.configbool('experimental', 'uncommit.keep')
182 144 newid = _commitfiltered(repo, old, match, keepcommit)
183 145 if newid is None:
184 146 ui.status(_("nothing to uncommit\n"))
185 147 return 1
186 148
187 149 mapping = {}
188 150 if newid != old.p1().node():
189 151 # Move local changes on filtered changeset
190 152 mapping[old.node()] = (newid,)
191 153 else:
192 154 # Fully removed the old commit
193 155 mapping[old.node()] = ()
194 156
195 157 with repo.dirstate.parentchange():
196 _movedirstate(repo, repo[newid], match)
158 scmutil.movedirstate(repo, repo[newid], match)
197 159
198 160 scmutil.cleanupnodes(repo, mapping, 'uncommit', fixphase=True)
199 161
200 162 def predecessormarkers(ctx):
201 163 """yields the obsolete markers marking the given changeset as a successor"""
202 164 for data in ctx.repo().obsstore.predecessors.get(ctx.node(), ()):
203 165 yield obsutil.marker(ctx.repo(), data)
204 166
205 167 @command('unamend', [], helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
206 168 helpbasic=True)
207 169 def unamend(ui, repo, **opts):
208 170 """undo the most recent amend operation on a current changeset
209 171
210 172 This command will roll back to the previous version of a changeset,
211 173 leaving working directory in state in which it was before running
212 174 `hg amend` (e.g. files modified as part of an amend will be
213 175 marked as modified `hg status`)
214 176 """
215 177
216 178 unfi = repo.unfiltered()
217 179 with repo.wlock(), repo.lock(), repo.transaction('unamend'):
218 180
219 181 # identify the commit from which to unamend
220 182 curctx = repo['.']
221 183
222 184 rewriteutil.precheck(repo, [curctx.rev()], 'unamend')
223 185
224 186 # identify the commit to which to unamend
225 187 markers = list(predecessormarkers(curctx))
226 188 if len(markers) != 1:
227 189 e = _("changeset must have one predecessor, found %i predecessors")
228 190 raise error.Abort(e % len(markers))
229 191
230 192 prednode = markers[0].prednode()
231 193 predctx = unfi[prednode]
232 194
233 195 # add an extra so that we get a new hash
234 196 # note: allowing unamend to undo an unamend is an intentional feature
235 197 extras = predctx.extra()
236 198 extras['unamend_source'] = curctx.hex()
237 199
238 200 def filectxfn(repo, ctx_, path):
239 201 try:
240 202 return predctx.filectx(path)
241 203 except KeyError:
242 204 return None
243 205
244 206 # Make a new commit same as predctx
245 207 newctx = context.memctx(repo,
246 208 parents=(predctx.p1(), predctx.p2()),
247 209 text=predctx.description(),
248 210 files=predctx.files(),
249 211 filectxfn=filectxfn,
250 212 user=predctx.user(),
251 213 date=predctx.date(),
252 214 extra=extras)
253 215 newprednode = repo.commitctx(newctx)
254 216 newpredctx = repo[newprednode]
255 217 dirstate = repo.dirstate
256 218
257 219 with dirstate.parentchange():
258 _movedirstate(repo, newpredctx)
220 scmutil.movedirstate(repo, newpredctx)
259 221
260 222 mapping = {curctx.node(): (newprednode,)}
261 223 scmutil.cleanupnodes(repo, mapping, 'unamend', fixphase=True)
@@ -1,1903 +1,1942 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import glob
12 12 import hashlib
13 13 import os
14 14 import posixpath
15 15 import re
16 16 import subprocess
17 17 import weakref
18 18
19 19 from .i18n import _
20 20 from .node import (
21 21 bin,
22 22 hex,
23 23 nullid,
24 24 nullrev,
25 25 short,
26 26 wdirid,
27 27 wdirrev,
28 28 )
29 29
30 30 from . import (
31 copies as copiesmod,
31 32 encoding,
32 33 error,
33 34 match as matchmod,
34 35 obsolete,
35 36 obsutil,
36 37 pathutil,
37 38 phases,
38 39 policy,
39 40 pycompat,
40 41 revsetlang,
41 42 similar,
42 43 smartset,
43 44 url,
44 45 util,
45 46 vfs,
46 47 )
47 48
48 49 from .utils import (
49 50 procutil,
50 51 stringutil,
51 52 )
52 53
53 54 if pycompat.iswindows:
54 55 from . import scmwindows as scmplatform
55 56 else:
56 57 from . import scmposix as scmplatform
57 58
58 59 parsers = policy.importmod(r'parsers')
59 60
60 61 termsize = scmplatform.termsize
61 62
62 63 class status(tuple):
63 64 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
64 65 and 'ignored' properties are only relevant to the working copy.
65 66 '''
66 67
67 68 __slots__ = ()
68 69
69 70 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
70 71 clean):
71 72 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
72 73 ignored, clean))
73 74
74 75 @property
75 76 def modified(self):
76 77 '''files that have been modified'''
77 78 return self[0]
78 79
79 80 @property
80 81 def added(self):
81 82 '''files that have been added'''
82 83 return self[1]
83 84
84 85 @property
85 86 def removed(self):
86 87 '''files that have been removed'''
87 88 return self[2]
88 89
89 90 @property
90 91 def deleted(self):
91 92 '''files that are in the dirstate, but have been deleted from the
92 93 working copy (aka "missing")
93 94 '''
94 95 return self[3]
95 96
96 97 @property
97 98 def unknown(self):
98 99 '''files not in the dirstate that are not ignored'''
99 100 return self[4]
100 101
101 102 @property
102 103 def ignored(self):
103 104 '''files not in the dirstate that are ignored (by _dirignore())'''
104 105 return self[5]
105 106
106 107 @property
107 108 def clean(self):
108 109 '''files that have not been modified'''
109 110 return self[6]
110 111
111 112 def __repr__(self, *args, **kwargs):
112 113 return ((r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
113 114 r'unknown=%s, ignored=%s, clean=%s>') %
114 115 tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self))
115 116
116 117 def itersubrepos(ctx1, ctx2):
117 118 """find subrepos in ctx1 or ctx2"""
118 119 # Create a (subpath, ctx) mapping where we prefer subpaths from
119 120 # ctx1. The subpaths from ctx2 are important when the .hgsub file
120 121 # has been modified (in ctx2) but not yet committed (in ctx1).
121 122 subpaths = dict.fromkeys(ctx2.substate, ctx2)
122 123 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
123 124
124 125 missing = set()
125 126
126 127 for subpath in ctx2.substate:
127 128 if subpath not in ctx1.substate:
128 129 del subpaths[subpath]
129 130 missing.add(subpath)
130 131
131 132 for subpath, ctx in sorted(subpaths.iteritems()):
132 133 yield subpath, ctx.sub(subpath)
133 134
134 135 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
135 136 # status and diff will have an accurate result when it does
136 137 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
137 138 # against itself.
138 139 for subpath in missing:
139 140 yield subpath, ctx2.nullsub(subpath, ctx1)
140 141
141 142 def nochangesfound(ui, repo, excluded=None):
142 143 '''Report no changes for push/pull, excluded is None or a list of
143 144 nodes excluded from the push/pull.
144 145 '''
145 146 secretlist = []
146 147 if excluded:
147 148 for n in excluded:
148 149 ctx = repo[n]
149 150 if ctx.phase() >= phases.secret and not ctx.extinct():
150 151 secretlist.append(n)
151 152
152 153 if secretlist:
153 154 ui.status(_("no changes found (ignored %d secret changesets)\n")
154 155 % len(secretlist))
155 156 else:
156 157 ui.status(_("no changes found\n"))
157 158
158 159 def callcatch(ui, func):
159 160 """call func() with global exception handling
160 161
161 162 return func() if no exception happens. otherwise do some error handling
162 163 and return an exit code accordingly. does not handle all exceptions.
163 164 """
164 165 try:
165 166 try:
166 167 return func()
167 168 except: # re-raises
168 169 ui.traceback()
169 170 raise
170 171 # Global exception handling, alphabetically
171 172 # Mercurial-specific first, followed by built-in and library exceptions
172 173 except error.LockHeld as inst:
173 174 if inst.errno == errno.ETIMEDOUT:
174 175 reason = _('timed out waiting for lock held by %r') % (
175 176 pycompat.bytestr(inst.locker))
176 177 else:
177 178 reason = _('lock held by %r') % inst.locker
178 179 ui.error(_("abort: %s: %s\n") % (
179 180 inst.desc or stringutil.forcebytestr(inst.filename), reason))
180 181 if not inst.locker:
181 182 ui.error(_("(lock might be very busy)\n"))
182 183 except error.LockUnavailable as inst:
183 184 ui.error(_("abort: could not lock %s: %s\n") %
184 185 (inst.desc or stringutil.forcebytestr(inst.filename),
185 186 encoding.strtolocal(inst.strerror)))
186 187 except error.OutOfBandError as inst:
187 188 if inst.args:
188 189 msg = _("abort: remote error:\n")
189 190 else:
190 191 msg = _("abort: remote error\n")
191 192 ui.error(msg)
192 193 if inst.args:
193 194 ui.error(''.join(inst.args))
194 195 if inst.hint:
195 196 ui.error('(%s)\n' % inst.hint)
196 197 except error.RepoError as inst:
197 198 ui.error(_("abort: %s!\n") % inst)
198 199 if inst.hint:
199 200 ui.error(_("(%s)\n") % inst.hint)
200 201 except error.ResponseError as inst:
201 202 ui.error(_("abort: %s") % inst.args[0])
202 203 msg = inst.args[1]
203 204 if isinstance(msg, type(u'')):
204 205 msg = pycompat.sysbytes(msg)
205 206 if not isinstance(msg, bytes):
206 207 ui.error(" %r\n" % (msg,))
207 208 elif not msg:
208 209 ui.error(_(" empty string\n"))
209 210 else:
210 211 ui.error("\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
211 212 except error.CensoredNodeError as inst:
212 213 ui.error(_("abort: file censored %s!\n") % inst)
213 214 except error.StorageError as inst:
214 215 ui.error(_("abort: %s!\n") % inst)
215 216 if inst.hint:
216 217 ui.error(_("(%s)\n") % inst.hint)
217 218 except error.InterventionRequired as inst:
218 219 ui.error("%s\n" % inst)
219 220 if inst.hint:
220 221 ui.error(_("(%s)\n") % inst.hint)
221 222 return 1
222 223 except error.WdirUnsupported:
223 224 ui.error(_("abort: working directory revision cannot be specified\n"))
224 225 except error.Abort as inst:
225 226 ui.error(_("abort: %s\n") % inst)
226 227 if inst.hint:
227 228 ui.error(_("(%s)\n") % inst.hint)
228 229 except ImportError as inst:
229 230 ui.error(_("abort: %s!\n") % stringutil.forcebytestr(inst))
230 231 m = stringutil.forcebytestr(inst).split()[-1]
231 232 if m in "mpatch bdiff".split():
232 233 ui.error(_("(did you forget to compile extensions?)\n"))
233 234 elif m in "zlib".split():
234 235 ui.error(_("(is your Python install correct?)\n"))
235 236 except (IOError, OSError) as inst:
236 237 if util.safehasattr(inst, "code"): # HTTPError
237 238 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst))
238 239 elif util.safehasattr(inst, "reason"): # URLError or SSLError
239 240 try: # usually it is in the form (errno, strerror)
240 241 reason = inst.reason.args[1]
241 242 except (AttributeError, IndexError):
242 243 # it might be anything, for example a string
243 244 reason = inst.reason
244 245 if isinstance(reason, pycompat.unicode):
245 246 # SSLError of Python 2.7.9 contains a unicode
246 247 reason = encoding.unitolocal(reason)
247 248 ui.error(_("abort: error: %s\n") % reason)
248 249 elif (util.safehasattr(inst, "args")
249 250 and inst.args and inst.args[0] == errno.EPIPE):
250 251 pass
251 252 elif getattr(inst, "strerror", None): # common IOError or OSError
252 253 if getattr(inst, "filename", None) is not None:
253 254 ui.error(_("abort: %s: '%s'\n") % (
254 255 encoding.strtolocal(inst.strerror),
255 256 stringutil.forcebytestr(inst.filename)))
256 257 else:
257 258 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
258 259 else: # suspicious IOError
259 260 raise
260 261 except MemoryError:
261 262 ui.error(_("abort: out of memory\n"))
262 263 except SystemExit as inst:
263 264 # Commands shouldn't sys.exit directly, but give a return code.
264 265 # Just in case catch this and and pass exit code to caller.
265 266 return inst.code
266 267
267 268 return -1
268 269
269 270 def checknewlabel(repo, lbl, kind):
270 271 # Do not use the "kind" parameter in ui output.
271 272 # It makes strings difficult to translate.
272 273 if lbl in ['tip', '.', 'null']:
273 274 raise error.Abort(_("the name '%s' is reserved") % lbl)
274 275 for c in (':', '\0', '\n', '\r'):
275 276 if c in lbl:
276 277 raise error.Abort(
277 278 _("%r cannot be used in a name") % pycompat.bytestr(c))
278 279 try:
279 280 int(lbl)
280 281 raise error.Abort(_("cannot use an integer as a name"))
281 282 except ValueError:
282 283 pass
283 284 if lbl.strip() != lbl:
284 285 raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
285 286
286 287 def checkfilename(f):
287 288 '''Check that the filename f is an acceptable filename for a tracked file'''
288 289 if '\r' in f or '\n' in f:
289 290 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r")
290 291 % pycompat.bytestr(f))
291 292
292 293 def checkportable(ui, f):
293 294 '''Check if filename f is portable and warn or abort depending on config'''
294 295 checkfilename(f)
295 296 abort, warn = checkportabilityalert(ui)
296 297 if abort or warn:
297 298 msg = util.checkwinfilename(f)
298 299 if msg:
299 300 msg = "%s: %s" % (msg, procutil.shellquote(f))
300 301 if abort:
301 302 raise error.Abort(msg)
302 303 ui.warn(_("warning: %s\n") % msg)
303 304
304 305 def checkportabilityalert(ui):
305 306 '''check if the user's config requests nothing, a warning, or abort for
306 307 non-portable filenames'''
307 308 val = ui.config('ui', 'portablefilenames')
308 309 lval = val.lower()
309 310 bval = stringutil.parsebool(val)
310 311 abort = pycompat.iswindows or lval == 'abort'
311 312 warn = bval or lval == 'warn'
312 313 if bval is None and not (warn or abort or lval == 'ignore'):
313 314 raise error.ConfigError(
314 315 _("ui.portablefilenames value is invalid ('%s')") % val)
315 316 return abort, warn
316 317
317 318 class casecollisionauditor(object):
318 319 def __init__(self, ui, abort, dirstate):
319 320 self._ui = ui
320 321 self._abort = abort
321 322 allfiles = '\0'.join(dirstate._map)
322 323 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
323 324 self._dirstate = dirstate
324 325 # The purpose of _newfiles is so that we don't complain about
325 326 # case collisions if someone were to call this object with the
326 327 # same filename twice.
327 328 self._newfiles = set()
328 329
329 330 def __call__(self, f):
330 331 if f in self._newfiles:
331 332 return
332 333 fl = encoding.lower(f)
333 334 if fl in self._loweredfiles and f not in self._dirstate:
334 335 msg = _('possible case-folding collision for %s') % f
335 336 if self._abort:
336 337 raise error.Abort(msg)
337 338 self._ui.warn(_("warning: %s\n") % msg)
338 339 self._loweredfiles.add(fl)
339 340 self._newfiles.add(f)
340 341
341 342 def filteredhash(repo, maxrev):
342 343 """build hash of filtered revisions in the current repoview.
343 344
344 345 Multiple caches perform up-to-date validation by checking that the
345 346 tiprev and tipnode stored in the cache file match the current repository.
346 347 However, this is not sufficient for validating repoviews because the set
347 348 of revisions in the view may change without the repository tiprev and
348 349 tipnode changing.
349 350
350 351 This function hashes all the revs filtered from the view and returns
351 352 that SHA-1 digest.
352 353 """
353 354 cl = repo.changelog
354 355 if not cl.filteredrevs:
355 356 return None
356 357 key = None
357 358 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
358 359 if revs:
359 360 s = hashlib.sha1()
360 361 for rev in revs:
361 362 s.update('%d;' % rev)
362 363 key = s.digest()
363 364 return key
364 365
365 366 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
366 367 '''yield every hg repository under path, always recursively.
367 368 The recurse flag will only control recursion into repo working dirs'''
368 369 def errhandler(err):
369 370 if err.filename == path:
370 371 raise err
371 372 samestat = getattr(os.path, 'samestat', None)
372 373 if followsym and samestat is not None:
373 374 def adddir(dirlst, dirname):
374 375 dirstat = os.stat(dirname)
375 376 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
376 377 if not match:
377 378 dirlst.append(dirstat)
378 379 return not match
379 380 else:
380 381 followsym = False
381 382
382 383 if (seen_dirs is None) and followsym:
383 384 seen_dirs = []
384 385 adddir(seen_dirs, path)
385 386 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
386 387 dirs.sort()
387 388 if '.hg' in dirs:
388 389 yield root # found a repository
389 390 qroot = os.path.join(root, '.hg', 'patches')
390 391 if os.path.isdir(os.path.join(qroot, '.hg')):
391 392 yield qroot # we have a patch queue repo here
392 393 if recurse:
393 394 # avoid recursing inside the .hg directory
394 395 dirs.remove('.hg')
395 396 else:
396 397 dirs[:] = [] # don't descend further
397 398 elif followsym:
398 399 newdirs = []
399 400 for d in dirs:
400 401 fname = os.path.join(root, d)
401 402 if adddir(seen_dirs, fname):
402 403 if os.path.islink(fname):
403 404 for hgname in walkrepos(fname, True, seen_dirs):
404 405 yield hgname
405 406 else:
406 407 newdirs.append(d)
407 408 dirs[:] = newdirs
408 409
409 410 def binnode(ctx):
410 411 """Return binary node id for a given basectx"""
411 412 node = ctx.node()
412 413 if node is None:
413 414 return wdirid
414 415 return node
415 416
416 417 def intrev(ctx):
417 418 """Return integer for a given basectx that can be used in comparison or
418 419 arithmetic operation"""
419 420 rev = ctx.rev()
420 421 if rev is None:
421 422 return wdirrev
422 423 return rev
423 424
424 425 def formatchangeid(ctx):
425 426 """Format changectx as '{rev}:{node|formatnode}', which is the default
426 427 template provided by logcmdutil.changesettemplater"""
427 428 repo = ctx.repo()
428 429 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
429 430
430 431 def formatrevnode(ui, rev, node):
431 432 """Format given revision and node depending on the current verbosity"""
432 433 if ui.debugflag:
433 434 hexfunc = hex
434 435 else:
435 436 hexfunc = short
436 437 return '%d:%s' % (rev, hexfunc(node))
437 438
438 439 def resolvehexnodeidprefix(repo, prefix):
439 440 if (prefix.startswith('x') and
440 441 repo.ui.configbool('experimental', 'revisions.prefixhexnode')):
441 442 prefix = prefix[1:]
442 443 try:
443 444 # Uses unfiltered repo because it's faster when prefix is ambiguous/
444 445 # This matches the shortesthexnodeidprefix() function below.
445 446 node = repo.unfiltered().changelog._partialmatch(prefix)
446 447 except error.AmbiguousPrefixLookupError:
447 448 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
448 449 if revset:
449 450 # Clear config to avoid infinite recursion
450 451 configoverrides = {('experimental',
451 452 'revisions.disambiguatewithin'): None}
452 453 with repo.ui.configoverride(configoverrides):
453 454 revs = repo.anyrevs([revset], user=True)
454 455 matches = []
455 456 for rev in revs:
456 457 node = repo.changelog.node(rev)
457 458 if hex(node).startswith(prefix):
458 459 matches.append(node)
459 460 if len(matches) == 1:
460 461 return matches[0]
461 462 raise
462 463 if node is None:
463 464 return
464 465 repo.changelog.rev(node) # make sure node isn't filtered
465 466 return node
466 467
467 468 def mayberevnum(repo, prefix):
468 469 """Checks if the given prefix may be mistaken for a revision number"""
469 470 try:
470 471 i = int(prefix)
471 472 # if we are a pure int, then starting with zero will not be
472 473 # confused as a rev; or, obviously, if the int is larger
473 474 # than the value of the tip rev. We still need to disambiguate if
474 475 # prefix == '0', since that *is* a valid revnum.
475 476 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
476 477 return False
477 478 return True
478 479 except ValueError:
479 480 return False
480 481
481 482 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
482 483 """Find the shortest unambiguous prefix that matches hexnode.
483 484
484 485 If "cache" is not None, it must be a dictionary that can be used for
485 486 caching between calls to this method.
486 487 """
487 488 # _partialmatch() of filtered changelog could take O(len(repo)) time,
488 489 # which would be unacceptably slow. so we look for hash collision in
489 490 # unfiltered space, which means some hashes may be slightly longer.
490 491
491 492 minlength=max(minlength, 1)
492 493
493 494 def disambiguate(prefix):
494 495 """Disambiguate against revnums."""
495 496 if repo.ui.configbool('experimental', 'revisions.prefixhexnode'):
496 497 if mayberevnum(repo, prefix):
497 498 return 'x' + prefix
498 499 else:
499 500 return prefix
500 501
501 502 hexnode = hex(node)
502 503 for length in range(len(prefix), len(hexnode) + 1):
503 504 prefix = hexnode[:length]
504 505 if not mayberevnum(repo, prefix):
505 506 return prefix
506 507
507 508 cl = repo.unfiltered().changelog
508 509 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
509 510 if revset:
510 511 revs = None
511 512 if cache is not None:
512 513 revs = cache.get('disambiguationrevset')
513 514 if revs is None:
514 515 revs = repo.anyrevs([revset], user=True)
515 516 if cache is not None:
516 517 cache['disambiguationrevset'] = revs
517 518 if cl.rev(node) in revs:
518 519 hexnode = hex(node)
519 520 nodetree = None
520 521 if cache is not None:
521 522 nodetree = cache.get('disambiguationnodetree')
522 523 if not nodetree:
523 524 try:
524 525 nodetree = parsers.nodetree(cl.index, len(revs))
525 526 except AttributeError:
526 527 # no native nodetree
527 528 pass
528 529 else:
529 530 for r in revs:
530 531 nodetree.insert(r)
531 532 if cache is not None:
532 533 cache['disambiguationnodetree'] = nodetree
533 534 if nodetree is not None:
534 535 length = max(nodetree.shortest(node), minlength)
535 536 prefix = hexnode[:length]
536 537 return disambiguate(prefix)
537 538 for length in range(minlength, len(hexnode) + 1):
538 539 matches = []
539 540 prefix = hexnode[:length]
540 541 for rev in revs:
541 542 otherhexnode = repo[rev].hex()
542 543 if prefix == otherhexnode[:length]:
543 544 matches.append(otherhexnode)
544 545 if len(matches) == 1:
545 546 return disambiguate(prefix)
546 547
547 548 try:
548 549 return disambiguate(cl.shortest(node, minlength))
549 550 except error.LookupError:
550 551 raise error.RepoLookupError()
551 552
552 553 def isrevsymbol(repo, symbol):
553 554 """Checks if a symbol exists in the repo.
554 555
555 556 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
556 557 symbol is an ambiguous nodeid prefix.
557 558 """
558 559 try:
559 560 revsymbol(repo, symbol)
560 561 return True
561 562 except error.RepoLookupError:
562 563 return False
563 564
564 565 def revsymbol(repo, symbol):
565 566 """Returns a context given a single revision symbol (as string).
566 567
567 568 This is similar to revsingle(), but accepts only a single revision symbol,
568 569 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
569 570 not "max(public())".
570 571 """
571 572 if not isinstance(symbol, bytes):
572 573 msg = ("symbol (%s of type %s) was not a string, did you mean "
573 574 "repo[symbol]?" % (symbol, type(symbol)))
574 575 raise error.ProgrammingError(msg)
575 576 try:
576 577 if symbol in ('.', 'tip', 'null'):
577 578 return repo[symbol]
578 579
579 580 try:
580 581 r = int(symbol)
581 582 if '%d' % r != symbol:
582 583 raise ValueError
583 584 l = len(repo.changelog)
584 585 if r < 0:
585 586 r += l
586 587 if r < 0 or r >= l and r != wdirrev:
587 588 raise ValueError
588 589 return repo[r]
589 590 except error.FilteredIndexError:
590 591 raise
591 592 except (ValueError, OverflowError, IndexError):
592 593 pass
593 594
594 595 if len(symbol) == 40:
595 596 try:
596 597 node = bin(symbol)
597 598 rev = repo.changelog.rev(node)
598 599 return repo[rev]
599 600 except error.FilteredLookupError:
600 601 raise
601 602 except (TypeError, LookupError):
602 603 pass
603 604
604 605 # look up bookmarks through the name interface
605 606 try:
606 607 node = repo.names.singlenode(repo, symbol)
607 608 rev = repo.changelog.rev(node)
608 609 return repo[rev]
609 610 except KeyError:
610 611 pass
611 612
612 613 node = resolvehexnodeidprefix(repo, symbol)
613 614 if node is not None:
614 615 rev = repo.changelog.rev(node)
615 616 return repo[rev]
616 617
617 618 raise error.RepoLookupError(_("unknown revision '%s'") % symbol)
618 619
619 620 except error.WdirUnsupported:
620 621 return repo[None]
621 622 except (error.FilteredIndexError, error.FilteredLookupError,
622 623 error.FilteredRepoLookupError):
623 624 raise _filterederror(repo, symbol)
624 625
625 626 def _filterederror(repo, changeid):
626 627 """build an exception to be raised about a filtered changeid
627 628
628 629 This is extracted in a function to help extensions (eg: evolve) to
629 630 experiment with various message variants."""
630 631 if repo.filtername.startswith('visible'):
631 632
632 633 # Check if the changeset is obsolete
633 634 unfilteredrepo = repo.unfiltered()
634 635 ctx = revsymbol(unfilteredrepo, changeid)
635 636
636 637 # If the changeset is obsolete, enrich the message with the reason
637 638 # that made this changeset not visible
638 639 if ctx.obsolete():
639 640 msg = obsutil._getfilteredreason(repo, changeid, ctx)
640 641 else:
641 642 msg = _("hidden revision '%s'") % changeid
642 643
643 644 hint = _('use --hidden to access hidden revisions')
644 645
645 646 return error.FilteredRepoLookupError(msg, hint=hint)
646 647 msg = _("filtered revision '%s' (not in '%s' subset)")
647 648 msg %= (changeid, repo.filtername)
648 649 return error.FilteredRepoLookupError(msg)
649 650
650 651 def revsingle(repo, revspec, default='.', localalias=None):
651 652 if not revspec and revspec != 0:
652 653 return repo[default]
653 654
654 655 l = revrange(repo, [revspec], localalias=localalias)
655 656 if not l:
656 657 raise error.Abort(_('empty revision set'))
657 658 return repo[l.last()]
658 659
659 660 def _pairspec(revspec):
660 661 tree = revsetlang.parse(revspec)
661 662 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
662 663
663 664 def revpair(repo, revs):
664 665 if not revs:
665 666 return repo['.'], repo[None]
666 667
667 668 l = revrange(repo, revs)
668 669
669 670 if not l:
670 671 raise error.Abort(_('empty revision range'))
671 672
672 673 first = l.first()
673 674 second = l.last()
674 675
675 676 if (first == second and len(revs) >= 2
676 677 and not all(revrange(repo, [r]) for r in revs)):
677 678 raise error.Abort(_('empty revision on one side of range'))
678 679
679 680 # if top-level is range expression, the result must always be a pair
680 681 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
681 682 return repo[first], repo[None]
682 683
683 684 return repo[first], repo[second]
684 685
685 686 def revrange(repo, specs, localalias=None):
686 687 """Execute 1 to many revsets and return the union.
687 688
688 689 This is the preferred mechanism for executing revsets using user-specified
689 690 config options, such as revset aliases.
690 691
691 692 The revsets specified by ``specs`` will be executed via a chained ``OR``
692 693 expression. If ``specs`` is empty, an empty result is returned.
693 694
694 695 ``specs`` can contain integers, in which case they are assumed to be
695 696 revision numbers.
696 697
697 698 It is assumed the revsets are already formatted. If you have arguments
698 699 that need to be expanded in the revset, call ``revsetlang.formatspec()``
699 700 and pass the result as an element of ``specs``.
700 701
701 702 Specifying a single revset is allowed.
702 703
703 704 Returns a ``revset.abstractsmartset`` which is a list-like interface over
704 705 integer revisions.
705 706 """
706 707 allspecs = []
707 708 for spec in specs:
708 709 if isinstance(spec, int):
709 710 spec = revsetlang.formatspec('%d', spec)
710 711 allspecs.append(spec)
711 712 return repo.anyrevs(allspecs, user=True, localalias=localalias)
712 713
713 714 def meaningfulparents(repo, ctx):
714 715 """Return list of meaningful (or all if debug) parentrevs for rev.
715 716
716 717 For merges (two non-nullrev revisions) both parents are meaningful.
717 718 Otherwise the first parent revision is considered meaningful if it
718 719 is not the preceding revision.
719 720 """
720 721 parents = ctx.parents()
721 722 if len(parents) > 1:
722 723 return parents
723 724 if repo.ui.debugflag:
724 725 return [parents[0], repo[nullrev]]
725 726 if parents[0].rev() >= intrev(ctx) - 1:
726 727 return []
727 728 return parents
728 729
729 730 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
730 731 """Return a function that produced paths for presenting to the user.
731 732
732 733 The returned function takes a repo-relative path and produces a path
733 734 that can be presented in the UI.
734 735
735 736 Depending on the value of ui.relative-paths, either a repo-relative or
736 737 cwd-relative path will be produced.
737 738
738 739 legacyrelativevalue is the value to use if ui.relative-paths=legacy
739 740
740 741 If forcerelativevalue is not None, then that value will be used regardless
741 742 of what ui.relative-paths is set to.
742 743 """
743 744 if forcerelativevalue is not None:
744 745 relative = forcerelativevalue
745 746 else:
746 747 config = repo.ui.config('ui', 'relative-paths')
747 748 if config == 'legacy':
748 749 relative = legacyrelativevalue
749 750 else:
750 751 relative = stringutil.parsebool(config)
751 752 if relative is None:
752 753 raise error.ConfigError(
753 754 _("ui.relative-paths is not a boolean ('%s')") % config)
754 755
755 756 if relative:
756 757 cwd = repo.getcwd()
757 758 pathto = repo.pathto
758 759 return lambda f: pathto(f, cwd)
759 760 elif repo.ui.configbool('ui', 'slash'):
760 761 return lambda f: f
761 762 else:
762 763 return util.localpath
763 764
764 765 def subdiruipathfn(subpath, uipathfn):
765 766 '''Create a new uipathfn that treats the file as relative to subpath.'''
766 767 return lambda f: uipathfn(posixpath.join(subpath, f))
767 768
768 769 def anypats(pats, opts):
769 770 '''Checks if any patterns, including --include and --exclude were given.
770 771
771 772 Some commands (e.g. addremove) use this condition for deciding whether to
772 773 print absolute or relative paths.
773 774 '''
774 775 return bool(pats or opts.get('include') or opts.get('exclude'))
775 776
776 777 def expandpats(pats):
777 778 '''Expand bare globs when running on windows.
778 779 On posix we assume it already has already been done by sh.'''
779 780 if not util.expandglobs:
780 781 return list(pats)
781 782 ret = []
782 783 for kindpat in pats:
783 784 kind, pat = matchmod._patsplit(kindpat, None)
784 785 if kind is None:
785 786 try:
786 787 globbed = glob.glob(pat)
787 788 except re.error:
788 789 globbed = [pat]
789 790 if globbed:
790 791 ret.extend(globbed)
791 792 continue
792 793 ret.append(kindpat)
793 794 return ret
794 795
795 796 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
796 797 badfn=None):
797 798 '''Return a matcher and the patterns that were used.
798 799 The matcher will warn about bad matches, unless an alternate badfn callback
799 800 is provided.'''
800 801 if opts is None:
801 802 opts = {}
802 803 if not globbed and default == 'relpath':
803 804 pats = expandpats(pats or [])
804 805
805 806 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
806 807 def bad(f, msg):
807 808 ctx.repo().ui.warn("%s: %s\n" % (uipathfn(f), msg))
808 809
809 810 if badfn is None:
810 811 badfn = bad
811 812
812 813 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
813 814 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
814 815
815 816 if m.always():
816 817 pats = []
817 818 return m, pats
818 819
819 820 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
820 821 badfn=None):
821 822 '''Return a matcher that will warn about bad matches.'''
822 823 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
823 824
824 825 def matchall(repo):
825 826 '''Return a matcher that will efficiently match everything.'''
826 827 return matchmod.always()
827 828
828 829 def matchfiles(repo, files, badfn=None):
829 830 '''Return a matcher that will efficiently match exactly these files.'''
830 831 return matchmod.exact(files, badfn=badfn)
831 832
832 833 def parsefollowlinespattern(repo, rev, pat, msg):
833 834 """Return a file name from `pat` pattern suitable for usage in followlines
834 835 logic.
835 836 """
836 837 if not matchmod.patkind(pat):
837 838 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
838 839 else:
839 840 ctx = repo[rev]
840 841 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
841 842 files = [f for f in ctx if m(f)]
842 843 if len(files) != 1:
843 844 raise error.ParseError(msg)
844 845 return files[0]
845 846
846 847 def getorigvfs(ui, repo):
847 848 """return a vfs suitable to save 'orig' file
848 849
849 850 return None if no special directory is configured"""
850 851 origbackuppath = ui.config('ui', 'origbackuppath')
851 852 if not origbackuppath:
852 853 return None
853 854 return vfs.vfs(repo.wvfs.join(origbackuppath))
854 855
855 856 def backuppath(ui, repo, filepath):
856 857 '''customize where working copy backup files (.orig files) are created
857 858
858 859 Fetch user defined path from config file: [ui] origbackuppath = <path>
859 860 Fall back to default (filepath with .orig suffix) if not specified
860 861
861 862 filepath is repo-relative
862 863
863 864 Returns an absolute path
864 865 '''
865 866 origvfs = getorigvfs(ui, repo)
866 867 if origvfs is None:
867 868 return repo.wjoin(filepath + ".orig")
868 869
869 870 origbackupdir = origvfs.dirname(filepath)
870 871 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
871 872 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
872 873
873 874 # Remove any files that conflict with the backup file's path
874 875 for f in reversed(list(util.finddirs(filepath))):
875 876 if origvfs.isfileorlink(f):
876 877 ui.note(_('removing conflicting file: %s\n')
877 878 % origvfs.join(f))
878 879 origvfs.unlink(f)
879 880 break
880 881
881 882 origvfs.makedirs(origbackupdir)
882 883
883 884 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
884 885 ui.note(_('removing conflicting directory: %s\n')
885 886 % origvfs.join(filepath))
886 887 origvfs.rmtree(filepath, forcibly=True)
887 888
888 889 return origvfs.join(filepath)
889 890
890 891 class _containsnode(object):
891 892 """proxy __contains__(node) to container.__contains__ which accepts revs"""
892 893
893 894 def __init__(self, repo, revcontainer):
894 895 self._torev = repo.changelog.rev
895 896 self._revcontains = revcontainer.__contains__
896 897
897 898 def __contains__(self, node):
898 899 return self._revcontains(self._torev(node))
899 900
900 901 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None,
901 902 fixphase=False, targetphase=None, backup=True):
902 903 """do common cleanups when old nodes are replaced by new nodes
903 904
904 905 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
905 906 (we might also want to move working directory parent in the future)
906 907
907 908 By default, bookmark moves are calculated automatically from 'replacements',
908 909 but 'moves' can be used to override that. Also, 'moves' may include
909 910 additional bookmark moves that should not have associated obsmarkers.
910 911
911 912 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
912 913 have replacements. operation is a string, like "rebase".
913 914
914 915 metadata is dictionary containing metadata to be stored in obsmarker if
915 916 obsolescence is enabled.
916 917 """
917 918 assert fixphase or targetphase is None
918 919 if not replacements and not moves:
919 920 return
920 921
921 922 # translate mapping's other forms
922 923 if not util.safehasattr(replacements, 'items'):
923 924 replacements = {(n,): () for n in replacements}
924 925 else:
925 926 # upgrading non tuple "source" to tuple ones for BC
926 927 repls = {}
927 928 for key, value in replacements.items():
928 929 if not isinstance(key, tuple):
929 930 key = (key,)
930 931 repls[key] = value
931 932 replacements = repls
932 933
933 934 # Unfiltered repo is needed since nodes in replacements might be hidden.
934 935 unfi = repo.unfiltered()
935 936
936 937 # Calculate bookmark movements
937 938 if moves is None:
938 939 moves = {}
939 940 for oldnodes, newnodes in replacements.items():
940 941 for oldnode in oldnodes:
941 942 if oldnode in moves:
942 943 continue
943 944 if len(newnodes) > 1:
944 945 # usually a split, take the one with biggest rev number
945 946 newnode = next(unfi.set('max(%ln)', newnodes)).node()
946 947 elif len(newnodes) == 0:
947 948 # move bookmark backwards
948 949 allreplaced = []
949 950 for rep in replacements:
950 951 allreplaced.extend(rep)
951 952 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
952 953 allreplaced))
953 954 if roots:
954 955 newnode = roots[0].node()
955 956 else:
956 957 newnode = nullid
957 958 else:
958 959 newnode = newnodes[0]
959 960 moves[oldnode] = newnode
960 961
961 962 allnewnodes = [n for ns in replacements.values() for n in ns]
962 963 toretract = {}
963 964 toadvance = {}
964 965 if fixphase:
965 966 precursors = {}
966 967 for oldnodes, newnodes in replacements.items():
967 968 for oldnode in oldnodes:
968 969 for newnode in newnodes:
969 970 precursors.setdefault(newnode, []).append(oldnode)
970 971
971 972 allnewnodes.sort(key=lambda n: unfi[n].rev())
972 973 newphases = {}
973 974 def phase(ctx):
974 975 return newphases.get(ctx.node(), ctx.phase())
975 976 for newnode in allnewnodes:
976 977 ctx = unfi[newnode]
977 978 parentphase = max(phase(p) for p in ctx.parents())
978 979 if targetphase is None:
979 980 oldphase = max(unfi[oldnode].phase()
980 981 for oldnode in precursors[newnode])
981 982 newphase = max(oldphase, parentphase)
982 983 else:
983 984 newphase = max(targetphase, parentphase)
984 985 newphases[newnode] = newphase
985 986 if newphase > ctx.phase():
986 987 toretract.setdefault(newphase, []).append(newnode)
987 988 elif newphase < ctx.phase():
988 989 toadvance.setdefault(newphase, []).append(newnode)
989 990
990 991 with repo.transaction('cleanup') as tr:
991 992 # Move bookmarks
992 993 bmarks = repo._bookmarks
993 994 bmarkchanges = []
994 995 for oldnode, newnode in moves.items():
995 996 oldbmarks = repo.nodebookmarks(oldnode)
996 997 if not oldbmarks:
997 998 continue
998 999 from . import bookmarks # avoid import cycle
999 1000 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
1000 1001 (pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1001 1002 hex(oldnode), hex(newnode)))
1002 1003 # Delete divergent bookmarks being parents of related newnodes
1003 1004 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
1004 1005 allnewnodes, newnode, oldnode)
1005 1006 deletenodes = _containsnode(repo, deleterevs)
1006 1007 for name in oldbmarks:
1007 1008 bmarkchanges.append((name, newnode))
1008 1009 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1009 1010 bmarkchanges.append((b, None))
1010 1011
1011 1012 if bmarkchanges:
1012 1013 bmarks.applychanges(repo, tr, bmarkchanges)
1013 1014
1014 1015 for phase, nodes in toretract.items():
1015 1016 phases.retractboundary(repo, tr, phase, nodes)
1016 1017 for phase, nodes in toadvance.items():
1017 1018 phases.advanceboundary(repo, tr, phase, nodes)
1018 1019
1019 1020 mayusearchived = repo.ui.config('experimental', 'cleanup-as-archived')
1020 1021 # Obsolete or strip nodes
1021 1022 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1022 1023 # If a node is already obsoleted, and we want to obsolete it
1023 1024 # without a successor, skip that obssolete request since it's
1024 1025 # unnecessary. That's the "if s or not isobs(n)" check below.
1025 1026 # Also sort the node in topology order, that might be useful for
1026 1027 # some obsstore logic.
1027 1028 # NOTE: the sorting might belong to createmarkers.
1028 1029 torev = unfi.changelog.rev
1029 1030 sortfunc = lambda ns: torev(ns[0][0])
1030 1031 rels = []
1031 1032 for ns, s in sorted(replacements.items(), key=sortfunc):
1032 1033 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1033 1034 rels.append(rel)
1034 1035 if rels:
1035 1036 obsolete.createmarkers(repo, rels, operation=operation,
1036 1037 metadata=metadata)
1037 1038 elif phases.supportinternal(repo) and mayusearchived:
1038 1039 # this assume we do not have "unstable" nodes above the cleaned ones
1039 1040 allreplaced = set()
1040 1041 for ns in replacements.keys():
1041 1042 allreplaced.update(ns)
1042 1043 if backup:
1043 1044 from . import repair # avoid import cycle
1044 1045 node = min(allreplaced, key=repo.changelog.rev)
1045 1046 repair.backupbundle(repo, allreplaced, allreplaced, node,
1046 1047 operation)
1047 1048 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1048 1049 else:
1049 1050 from . import repair # avoid import cycle
1050 1051 tostrip = list(n for ns in replacements for n in ns)
1051 1052 if tostrip:
1052 1053 repair.delayedstrip(repo.ui, repo, tostrip, operation,
1053 1054 backup=backup)
1054 1055
1055 1056 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1056 1057 if opts is None:
1057 1058 opts = {}
1058 1059 m = matcher
1059 1060 dry_run = opts.get('dry_run')
1060 1061 try:
1061 1062 similarity = float(opts.get('similarity') or 0)
1062 1063 except ValueError:
1063 1064 raise error.Abort(_('similarity must be a number'))
1064 1065 if similarity < 0 or similarity > 100:
1065 1066 raise error.Abort(_('similarity must be between 0 and 100'))
1066 1067 similarity /= 100.0
1067 1068
1068 1069 ret = 0
1069 1070
1070 1071 wctx = repo[None]
1071 1072 for subpath in sorted(wctx.substate):
1072 1073 submatch = matchmod.subdirmatcher(subpath, m)
1073 1074 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
1074 1075 sub = wctx.sub(subpath)
1075 1076 subprefix = repo.wvfs.reljoin(prefix, subpath)
1076 1077 subuipathfn = subdiruipathfn(subpath, uipathfn)
1077 1078 try:
1078 1079 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1079 1080 ret = 1
1080 1081 except error.LookupError:
1081 1082 repo.ui.status(_("skipping missing subrepository: %s\n")
1082 1083 % uipathfn(subpath))
1083 1084
1084 1085 rejected = []
1085 1086 def badfn(f, msg):
1086 1087 if f in m.files():
1087 1088 m.bad(f, msg)
1088 1089 rejected.append(f)
1089 1090
1090 1091 badmatch = matchmod.badmatch(m, badfn)
1091 1092 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
1092 1093 badmatch)
1093 1094
1094 1095 unknownset = set(unknown + forgotten)
1095 1096 toprint = unknownset.copy()
1096 1097 toprint.update(deleted)
1097 1098 for abs in sorted(toprint):
1098 1099 if repo.ui.verbose or not m.exact(abs):
1099 1100 if abs in unknownset:
1100 1101 status = _('adding %s\n') % uipathfn(abs)
1101 1102 label = 'ui.addremove.added'
1102 1103 else:
1103 1104 status = _('removing %s\n') % uipathfn(abs)
1104 1105 label = 'ui.addremove.removed'
1105 1106 repo.ui.status(status, label=label)
1106 1107
1107 1108 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1108 1109 similarity, uipathfn)
1109 1110
1110 1111 if not dry_run:
1111 1112 _markchanges(repo, unknown + forgotten, deleted, renames)
1112 1113
1113 1114 for f in rejected:
1114 1115 if f in m.files():
1115 1116 return 1
1116 1117 return ret
1117 1118
1118 1119 def marktouched(repo, files, similarity=0.0):
1119 1120 '''Assert that files have somehow been operated upon. files are relative to
1120 1121 the repo root.'''
1121 1122 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1122 1123 rejected = []
1123 1124
1124 1125 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1125 1126
1126 1127 if repo.ui.verbose:
1127 1128 unknownset = set(unknown + forgotten)
1128 1129 toprint = unknownset.copy()
1129 1130 toprint.update(deleted)
1130 1131 for abs in sorted(toprint):
1131 1132 if abs in unknownset:
1132 1133 status = _('adding %s\n') % abs
1133 1134 else:
1134 1135 status = _('removing %s\n') % abs
1135 1136 repo.ui.status(status)
1136 1137
1137 1138 # TODO: We should probably have the caller pass in uipathfn and apply it to
1138 1139 # the messages above too. legacyrelativevalue=True is consistent with how
1139 1140 # it used to work.
1140 1141 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1141 1142 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1142 1143 similarity, uipathfn)
1143 1144
1144 1145 _markchanges(repo, unknown + forgotten, deleted, renames)
1145 1146
1146 1147 for f in rejected:
1147 1148 if f in m.files():
1148 1149 return 1
1149 1150 return 0
1150 1151
1151 1152 def _interestingfiles(repo, matcher):
1152 1153 '''Walk dirstate with matcher, looking for files that addremove would care
1153 1154 about.
1154 1155
1155 1156 This is different from dirstate.status because it doesn't care about
1156 1157 whether files are modified or clean.'''
1157 1158 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1158 1159 audit_path = pathutil.pathauditor(repo.root, cached=True)
1159 1160
1160 1161 ctx = repo[None]
1161 1162 dirstate = repo.dirstate
1162 1163 matcher = repo.narrowmatch(matcher, includeexact=True)
1163 1164 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
1164 1165 unknown=True, ignored=False, full=False)
1165 1166 for abs, st in walkresults.iteritems():
1166 1167 dstate = dirstate[abs]
1167 1168 if dstate == '?' and audit_path.check(abs):
1168 1169 unknown.append(abs)
1169 1170 elif dstate != 'r' and not st:
1170 1171 deleted.append(abs)
1171 1172 elif dstate == 'r' and st:
1172 1173 forgotten.append(abs)
1173 1174 # for finding renames
1174 1175 elif dstate == 'r' and not st:
1175 1176 removed.append(abs)
1176 1177 elif dstate == 'a':
1177 1178 added.append(abs)
1178 1179
1179 1180 return added, unknown, deleted, removed, forgotten
1180 1181
1181 1182 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1182 1183 '''Find renames from removed files to added ones.'''
1183 1184 renames = {}
1184 1185 if similarity > 0:
1185 1186 for old, new, score in similar.findrenames(repo, added, removed,
1186 1187 similarity):
1187 1188 if (repo.ui.verbose or not matcher.exact(old)
1188 1189 or not matcher.exact(new)):
1189 1190 repo.ui.status(_('recording removal of %s as rename to %s '
1190 1191 '(%d%% similar)\n') %
1191 1192 (uipathfn(old), uipathfn(new),
1192 1193 score * 100))
1193 1194 renames[new] = old
1194 1195 return renames
1195 1196
1196 1197 def _markchanges(repo, unknown, deleted, renames):
1197 1198 '''Marks the files in unknown as added, the files in deleted as removed,
1198 1199 and the files in renames as copied.'''
1199 1200 wctx = repo[None]
1200 1201 with repo.wlock():
1201 1202 wctx.forget(deleted)
1202 1203 wctx.add(unknown)
1203 1204 for new, old in renames.iteritems():
1204 1205 wctx.copy(old, new)
1205 1206
1206 1207 def getrenamedfn(repo, endrev=None):
1207 1208 rcache = {}
1208 1209 if endrev is None:
1209 1210 endrev = len(repo)
1210 1211
1211 1212 def getrenamed(fn, rev):
1212 1213 '''looks up all renames for a file (up to endrev) the first
1213 1214 time the file is given. It indexes on the changerev and only
1214 1215 parses the manifest if linkrev != changerev.
1215 1216 Returns rename info for fn at changerev rev.'''
1216 1217 if fn not in rcache:
1217 1218 rcache[fn] = {}
1218 1219 fl = repo.file(fn)
1219 1220 for i in fl:
1220 1221 lr = fl.linkrev(i)
1221 1222 renamed = fl.renamed(fl.node(i))
1222 1223 rcache[fn][lr] = renamed and renamed[0]
1223 1224 if lr >= endrev:
1224 1225 break
1225 1226 if rev in rcache[fn]:
1226 1227 return rcache[fn][rev]
1227 1228
1228 1229 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1229 1230 # filectx logic.
1230 1231 try:
1231 1232 return repo[rev][fn].copysource()
1232 1233 except error.LookupError:
1233 1234 return None
1234 1235
1235 1236 return getrenamed
1236 1237
1237 1238 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1238 1239 """Update the dirstate to reflect the intent of copying src to dst. For
1239 1240 different reasons it might not end with dst being marked as copied from src.
1240 1241 """
1241 1242 origsrc = repo.dirstate.copied(src) or src
1242 1243 if dst == origsrc: # copying back a copy?
1243 1244 if repo.dirstate[dst] not in 'mn' and not dryrun:
1244 1245 repo.dirstate.normallookup(dst)
1245 1246 else:
1246 1247 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1247 1248 if not ui.quiet:
1248 1249 ui.warn(_("%s has not been committed yet, so no copy "
1249 1250 "data will be stored for %s.\n")
1250 1251 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1251 1252 if repo.dirstate[dst] in '?r' and not dryrun:
1252 1253 wctx.add([dst])
1253 1254 elif not dryrun:
1254 1255 wctx.copy(origsrc, dst)
1255 1256
1257 def movedirstate(repo, newctx, match=None):
1258 """Move the dirstate to newctx and adjust it as necessary."""
1259 oldctx = repo['.']
1260 ds = repo.dirstate
1261 ds.setparents(newctx.node(), nullid)
1262 copies = dict(ds.copies())
1263 s = newctx.status(oldctx, match=match)
1264 for f in s.modified:
1265 if ds[f] == 'r':
1266 # modified + removed -> removed
1267 continue
1268 ds.normallookup(f)
1269
1270 for f in s.added:
1271 if ds[f] == 'r':
1272 # added + removed -> unknown
1273 ds.drop(f)
1274 elif ds[f] != 'a':
1275 ds.add(f)
1276
1277 for f in s.removed:
1278 if ds[f] == 'a':
1279 # removed + added -> normal
1280 ds.normallookup(f)
1281 elif ds[f] != 'r':
1282 ds.remove(f)
1283
1284 # Merge old parent and old working dir copies
1285 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1286 oldcopies.update(copies)
1287 copies = dict((dst, oldcopies.get(src, src))
1288 for dst, src in oldcopies.iteritems())
1289 # Adjust the dirstate copies
1290 for dst, src in copies.iteritems():
1291 if (src not in newctx or dst in newctx or ds[dst] != 'a'):
1292 src = None
1293 ds.copy(src, dst)
1294
1256 1295 def writerequires(opener, requirements):
1257 1296 with opener('requires', 'w', atomictemp=True) as fp:
1258 1297 for r in sorted(requirements):
1259 1298 fp.write("%s\n" % r)
1260 1299
1261 1300 class filecachesubentry(object):
1262 1301 def __init__(self, path, stat):
1263 1302 self.path = path
1264 1303 self.cachestat = None
1265 1304 self._cacheable = None
1266 1305
1267 1306 if stat:
1268 1307 self.cachestat = filecachesubentry.stat(self.path)
1269 1308
1270 1309 if self.cachestat:
1271 1310 self._cacheable = self.cachestat.cacheable()
1272 1311 else:
1273 1312 # None means we don't know yet
1274 1313 self._cacheable = None
1275 1314
1276 1315 def refresh(self):
1277 1316 if self.cacheable():
1278 1317 self.cachestat = filecachesubentry.stat(self.path)
1279 1318
1280 1319 def cacheable(self):
1281 1320 if self._cacheable is not None:
1282 1321 return self._cacheable
1283 1322
1284 1323 # we don't know yet, assume it is for now
1285 1324 return True
1286 1325
1287 1326 def changed(self):
1288 1327 # no point in going further if we can't cache it
1289 1328 if not self.cacheable():
1290 1329 return True
1291 1330
1292 1331 newstat = filecachesubentry.stat(self.path)
1293 1332
1294 1333 # we may not know if it's cacheable yet, check again now
1295 1334 if newstat and self._cacheable is None:
1296 1335 self._cacheable = newstat.cacheable()
1297 1336
1298 1337 # check again
1299 1338 if not self._cacheable:
1300 1339 return True
1301 1340
1302 1341 if self.cachestat != newstat:
1303 1342 self.cachestat = newstat
1304 1343 return True
1305 1344 else:
1306 1345 return False
1307 1346
1308 1347 @staticmethod
1309 1348 def stat(path):
1310 1349 try:
1311 1350 return util.cachestat(path)
1312 1351 except OSError as e:
1313 1352 if e.errno != errno.ENOENT:
1314 1353 raise
1315 1354
1316 1355 class filecacheentry(object):
1317 1356 def __init__(self, paths, stat=True):
1318 1357 self._entries = []
1319 1358 for path in paths:
1320 1359 self._entries.append(filecachesubentry(path, stat))
1321 1360
1322 1361 def changed(self):
1323 1362 '''true if any entry has changed'''
1324 1363 for entry in self._entries:
1325 1364 if entry.changed():
1326 1365 return True
1327 1366 return False
1328 1367
1329 1368 def refresh(self):
1330 1369 for entry in self._entries:
1331 1370 entry.refresh()
1332 1371
1333 1372 class filecache(object):
1334 1373 """A property like decorator that tracks files under .hg/ for updates.
1335 1374
1336 1375 On first access, the files defined as arguments are stat()ed and the
1337 1376 results cached. The decorated function is called. The results are stashed
1338 1377 away in a ``_filecache`` dict on the object whose method is decorated.
1339 1378
1340 1379 On subsequent access, the cached result is used as it is set to the
1341 1380 instance dictionary.
1342 1381
1343 1382 On external property set/delete operations, the caller must update the
1344 1383 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1345 1384 instead of directly setting <attr>.
1346 1385
1347 1386 When using the property API, the cached data is always used if available.
1348 1387 No stat() is performed to check if the file has changed.
1349 1388
1350 1389 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1351 1390 can populate an entry before the property's getter is called. In this case,
1352 1391 entries in ``_filecache`` will be used during property operations,
1353 1392 if available. If the underlying file changes, it is up to external callers
1354 1393 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1355 1394 method result as well as possibly calling ``del obj._filecache[attr]`` to
1356 1395 remove the ``filecacheentry``.
1357 1396 """
1358 1397
1359 1398 def __init__(self, *paths):
1360 1399 self.paths = paths
1361 1400
1362 1401 def join(self, obj, fname):
1363 1402 """Used to compute the runtime path of a cached file.
1364 1403
1365 1404 Users should subclass filecache and provide their own version of this
1366 1405 function to call the appropriate join function on 'obj' (an instance
1367 1406 of the class that its member function was decorated).
1368 1407 """
1369 1408 raise NotImplementedError
1370 1409
1371 1410 def __call__(self, func):
1372 1411 self.func = func
1373 1412 self.sname = func.__name__
1374 1413 self.name = pycompat.sysbytes(self.sname)
1375 1414 return self
1376 1415
1377 1416 def __get__(self, obj, type=None):
1378 1417 # if accessed on the class, return the descriptor itself.
1379 1418 if obj is None:
1380 1419 return self
1381 1420
1382 1421 assert self.sname not in obj.__dict__
1383 1422
1384 1423 entry = obj._filecache.get(self.name)
1385 1424
1386 1425 if entry:
1387 1426 if entry.changed():
1388 1427 entry.obj = self.func(obj)
1389 1428 else:
1390 1429 paths = [self.join(obj, path) for path in self.paths]
1391 1430
1392 1431 # We stat -before- creating the object so our cache doesn't lie if
1393 1432 # a writer modified between the time we read and stat
1394 1433 entry = filecacheentry(paths, True)
1395 1434 entry.obj = self.func(obj)
1396 1435
1397 1436 obj._filecache[self.name] = entry
1398 1437
1399 1438 obj.__dict__[self.sname] = entry.obj
1400 1439 return entry.obj
1401 1440
1402 1441 # don't implement __set__(), which would make __dict__ lookup as slow as
1403 1442 # function call.
1404 1443
1405 1444 def set(self, obj, value):
1406 1445 if self.name not in obj._filecache:
1407 1446 # we add an entry for the missing value because X in __dict__
1408 1447 # implies X in _filecache
1409 1448 paths = [self.join(obj, path) for path in self.paths]
1410 1449 ce = filecacheentry(paths, False)
1411 1450 obj._filecache[self.name] = ce
1412 1451 else:
1413 1452 ce = obj._filecache[self.name]
1414 1453
1415 1454 ce.obj = value # update cached copy
1416 1455 obj.__dict__[self.sname] = value # update copy returned by obj.x
1417 1456
1418 1457 def extdatasource(repo, source):
1419 1458 """Gather a map of rev -> value dict from the specified source
1420 1459
1421 1460 A source spec is treated as a URL, with a special case shell: type
1422 1461 for parsing the output from a shell command.
1423 1462
1424 1463 The data is parsed as a series of newline-separated records where
1425 1464 each record is a revision specifier optionally followed by a space
1426 1465 and a freeform string value. If the revision is known locally, it
1427 1466 is converted to a rev, otherwise the record is skipped.
1428 1467
1429 1468 Note that both key and value are treated as UTF-8 and converted to
1430 1469 the local encoding. This allows uniformity between local and
1431 1470 remote data sources.
1432 1471 """
1433 1472
1434 1473 spec = repo.ui.config("extdata", source)
1435 1474 if not spec:
1436 1475 raise error.Abort(_("unknown extdata source '%s'") % source)
1437 1476
1438 1477 data = {}
1439 1478 src = proc = None
1440 1479 try:
1441 1480 if spec.startswith("shell:"):
1442 1481 # external commands should be run relative to the repo root
1443 1482 cmd = spec[6:]
1444 1483 proc = subprocess.Popen(procutil.tonativestr(cmd),
1445 1484 shell=True, bufsize=-1,
1446 1485 close_fds=procutil.closefds,
1447 1486 stdout=subprocess.PIPE,
1448 1487 cwd=procutil.tonativestr(repo.root))
1449 1488 src = proc.stdout
1450 1489 else:
1451 1490 # treat as a URL or file
1452 1491 src = url.open(repo.ui, spec)
1453 1492 for l in src:
1454 1493 if " " in l:
1455 1494 k, v = l.strip().split(" ", 1)
1456 1495 else:
1457 1496 k, v = l.strip(), ""
1458 1497
1459 1498 k = encoding.tolocal(k)
1460 1499 try:
1461 1500 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1462 1501 except (error.LookupError, error.RepoLookupError):
1463 1502 pass # we ignore data for nodes that don't exist locally
1464 1503 finally:
1465 1504 if proc:
1466 1505 proc.communicate()
1467 1506 if src:
1468 1507 src.close()
1469 1508 if proc and proc.returncode != 0:
1470 1509 raise error.Abort(_("extdata command '%s' failed: %s")
1471 1510 % (cmd, procutil.explainexit(proc.returncode)))
1472 1511
1473 1512 return data
1474 1513
1475 1514 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1476 1515 if lock is None:
1477 1516 raise error.LockInheritanceContractViolation(
1478 1517 'lock can only be inherited while held')
1479 1518 if environ is None:
1480 1519 environ = {}
1481 1520 with lock.inherit() as locker:
1482 1521 environ[envvar] = locker
1483 1522 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1484 1523
1485 1524 def wlocksub(repo, cmd, *args, **kwargs):
1486 1525 """run cmd as a subprocess that allows inheriting repo's wlock
1487 1526
1488 1527 This can only be called while the wlock is held. This takes all the
1489 1528 arguments that ui.system does, and returns the exit code of the
1490 1529 subprocess."""
1491 1530 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1492 1531 **kwargs)
1493 1532
1494 1533 class progress(object):
1495 1534 def __init__(self, ui, updatebar, topic, unit="", total=None):
1496 1535 self.ui = ui
1497 1536 self.pos = 0
1498 1537 self.topic = topic
1499 1538 self.unit = unit
1500 1539 self.total = total
1501 1540 self.debug = ui.configbool('progress', 'debug')
1502 1541 self._updatebar = updatebar
1503 1542
1504 1543 def __enter__(self):
1505 1544 return self
1506 1545
1507 1546 def __exit__(self, exc_type, exc_value, exc_tb):
1508 1547 self.complete()
1509 1548
1510 1549 def update(self, pos, item="", total=None):
1511 1550 assert pos is not None
1512 1551 if total:
1513 1552 self.total = total
1514 1553 self.pos = pos
1515 1554 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1516 1555 if self.debug:
1517 1556 self._printdebug(item)
1518 1557
1519 1558 def increment(self, step=1, item="", total=None):
1520 1559 self.update(self.pos + step, item, total)
1521 1560
1522 1561 def complete(self):
1523 1562 self.pos = None
1524 1563 self.unit = ""
1525 1564 self.total = None
1526 1565 self._updatebar(self.topic, self.pos, "", self.unit, self.total)
1527 1566
1528 1567 def _printdebug(self, item):
1529 1568 if self.unit:
1530 1569 unit = ' ' + self.unit
1531 1570 if item:
1532 1571 item = ' ' + item
1533 1572
1534 1573 if self.total:
1535 1574 pct = 100.0 * self.pos / self.total
1536 1575 self.ui.debug('%s:%s %d/%d%s (%4.2f%%)\n'
1537 1576 % (self.topic, item, self.pos, self.total, unit, pct))
1538 1577 else:
1539 1578 self.ui.debug('%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1540 1579
1541 1580 def gdinitconfig(ui):
1542 1581 """helper function to know if a repo should be created as general delta
1543 1582 """
1544 1583 # experimental config: format.generaldelta
1545 1584 return (ui.configbool('format', 'generaldelta')
1546 1585 or ui.configbool('format', 'usegeneraldelta'))
1547 1586
1548 1587 def gddeltaconfig(ui):
1549 1588 """helper function to know if incoming delta should be optimised
1550 1589 """
1551 1590 # experimental config: format.generaldelta
1552 1591 return ui.configbool('format', 'generaldelta')
1553 1592
1554 1593 class simplekeyvaluefile(object):
1555 1594 """A simple file with key=value lines
1556 1595
1557 1596 Keys must be alphanumerics and start with a letter, values must not
1558 1597 contain '\n' characters"""
1559 1598 firstlinekey = '__firstline'
1560 1599
1561 1600 def __init__(self, vfs, path, keys=None):
1562 1601 self.vfs = vfs
1563 1602 self.path = path
1564 1603
1565 1604 def read(self, firstlinenonkeyval=False):
1566 1605 """Read the contents of a simple key-value file
1567 1606
1568 1607 'firstlinenonkeyval' indicates whether the first line of file should
1569 1608 be treated as a key-value pair or reuturned fully under the
1570 1609 __firstline key."""
1571 1610 lines = self.vfs.readlines(self.path)
1572 1611 d = {}
1573 1612 if firstlinenonkeyval:
1574 1613 if not lines:
1575 1614 e = _("empty simplekeyvalue file")
1576 1615 raise error.CorruptedState(e)
1577 1616 # we don't want to include '\n' in the __firstline
1578 1617 d[self.firstlinekey] = lines[0][:-1]
1579 1618 del lines[0]
1580 1619
1581 1620 try:
1582 1621 # the 'if line.strip()' part prevents us from failing on empty
1583 1622 # lines which only contain '\n' therefore are not skipped
1584 1623 # by 'if line'
1585 1624 updatedict = dict(line[:-1].split('=', 1) for line in lines
1586 1625 if line.strip())
1587 1626 if self.firstlinekey in updatedict:
1588 1627 e = _("%r can't be used as a key")
1589 1628 raise error.CorruptedState(e % self.firstlinekey)
1590 1629 d.update(updatedict)
1591 1630 except ValueError as e:
1592 1631 raise error.CorruptedState(str(e))
1593 1632 return d
1594 1633
1595 1634 def write(self, data, firstline=None):
1596 1635 """Write key=>value mapping to a file
1597 1636 data is a dict. Keys must be alphanumerical and start with a letter.
1598 1637 Values must not contain newline characters.
1599 1638
1600 1639 If 'firstline' is not None, it is written to file before
1601 1640 everything else, as it is, not in a key=value form"""
1602 1641 lines = []
1603 1642 if firstline is not None:
1604 1643 lines.append('%s\n' % firstline)
1605 1644
1606 1645 for k, v in data.items():
1607 1646 if k == self.firstlinekey:
1608 1647 e = "key name '%s' is reserved" % self.firstlinekey
1609 1648 raise error.ProgrammingError(e)
1610 1649 if not k[0:1].isalpha():
1611 1650 e = "keys must start with a letter in a key-value file"
1612 1651 raise error.ProgrammingError(e)
1613 1652 if not k.isalnum():
1614 1653 e = "invalid key name in a simple key-value file"
1615 1654 raise error.ProgrammingError(e)
1616 1655 if '\n' in v:
1617 1656 e = "invalid value in a simple key-value file"
1618 1657 raise error.ProgrammingError(e)
1619 1658 lines.append("%s=%s\n" % (k, v))
1620 1659 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1621 1660 fp.write(''.join(lines))
1622 1661
1623 1662 _reportobsoletedsource = [
1624 1663 'debugobsolete',
1625 1664 'pull',
1626 1665 'push',
1627 1666 'serve',
1628 1667 'unbundle',
1629 1668 ]
1630 1669
1631 1670 _reportnewcssource = [
1632 1671 'pull',
1633 1672 'unbundle',
1634 1673 ]
1635 1674
1636 1675 def prefetchfiles(repo, revs, match):
1637 1676 """Invokes the registered file prefetch functions, allowing extensions to
1638 1677 ensure the corresponding files are available locally, before the command
1639 1678 uses them."""
1640 1679 if match:
1641 1680 # The command itself will complain about files that don't exist, so
1642 1681 # don't duplicate the message.
1643 1682 match = matchmod.badmatch(match, lambda fn, msg: None)
1644 1683 else:
1645 1684 match = matchall(repo)
1646 1685
1647 1686 fileprefetchhooks(repo, revs, match)
1648 1687
1649 1688 # a list of (repo, revs, match) prefetch functions
1650 1689 fileprefetchhooks = util.hooks()
1651 1690
1652 1691 # A marker that tells the evolve extension to suppress its own reporting
1653 1692 _reportstroubledchangesets = True
1654 1693
1655 1694 def registersummarycallback(repo, otr, txnname=''):
1656 1695 """register a callback to issue a summary after the transaction is closed
1657 1696 """
1658 1697 def txmatch(sources):
1659 1698 return any(txnname.startswith(source) for source in sources)
1660 1699
1661 1700 categories = []
1662 1701
1663 1702 def reportsummary(func):
1664 1703 """decorator for report callbacks."""
1665 1704 # The repoview life cycle is shorter than the one of the actual
1666 1705 # underlying repository. So the filtered object can die before the
1667 1706 # weakref is used leading to troubles. We keep a reference to the
1668 1707 # unfiltered object and restore the filtering when retrieving the
1669 1708 # repository through the weakref.
1670 1709 filtername = repo.filtername
1671 1710 reporef = weakref.ref(repo.unfiltered())
1672 1711 def wrapped(tr):
1673 1712 repo = reporef()
1674 1713 if filtername:
1675 1714 repo = repo.filtered(filtername)
1676 1715 func(repo, tr)
1677 1716 newcat = '%02i-txnreport' % len(categories)
1678 1717 otr.addpostclose(newcat, wrapped)
1679 1718 categories.append(newcat)
1680 1719 return wrapped
1681 1720
1682 1721 if txmatch(_reportobsoletedsource):
1683 1722 @reportsummary
1684 1723 def reportobsoleted(repo, tr):
1685 1724 obsoleted = obsutil.getobsoleted(repo, tr)
1686 1725 if obsoleted:
1687 1726 repo.ui.status(_('obsoleted %i changesets\n')
1688 1727 % len(obsoleted))
1689 1728
1690 1729 if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
1691 1730 repo.ui.configbool('experimental', 'evolution.report-instabilities')):
1692 1731 instabilitytypes = [
1693 1732 ('orphan', 'orphan'),
1694 1733 ('phase-divergent', 'phasedivergent'),
1695 1734 ('content-divergent', 'contentdivergent'),
1696 1735 ]
1697 1736
1698 1737 def getinstabilitycounts(repo):
1699 1738 filtered = repo.changelog.filteredrevs
1700 1739 counts = {}
1701 1740 for instability, revset in instabilitytypes:
1702 1741 counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
1703 1742 filtered)
1704 1743 return counts
1705 1744
1706 1745 oldinstabilitycounts = getinstabilitycounts(repo)
1707 1746 @reportsummary
1708 1747 def reportnewinstabilities(repo, tr):
1709 1748 newinstabilitycounts = getinstabilitycounts(repo)
1710 1749 for instability, revset in instabilitytypes:
1711 1750 delta = (newinstabilitycounts[instability] -
1712 1751 oldinstabilitycounts[instability])
1713 1752 msg = getinstabilitymessage(delta, instability)
1714 1753 if msg:
1715 1754 repo.ui.warn(msg)
1716 1755
1717 1756 if txmatch(_reportnewcssource):
1718 1757 @reportsummary
1719 1758 def reportnewcs(repo, tr):
1720 1759 """Report the range of new revisions pulled/unbundled."""
1721 1760 origrepolen = tr.changes.get('origrepolen', len(repo))
1722 1761 unfi = repo.unfiltered()
1723 1762 if origrepolen >= len(unfi):
1724 1763 return
1725 1764
1726 1765 # Compute the bounds of new visible revisions' range.
1727 1766 revs = smartset.spanset(repo, start=origrepolen)
1728 1767 if revs:
1729 1768 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1730 1769
1731 1770 if minrev == maxrev:
1732 1771 revrange = minrev
1733 1772 else:
1734 1773 revrange = '%s:%s' % (minrev, maxrev)
1735 1774 draft = len(repo.revs('%ld and draft()', revs))
1736 1775 secret = len(repo.revs('%ld and secret()', revs))
1737 1776 if not (draft or secret):
1738 1777 msg = _('new changesets %s\n') % revrange
1739 1778 elif draft and secret:
1740 1779 msg = _('new changesets %s (%d drafts, %d secrets)\n')
1741 1780 msg %= (revrange, draft, secret)
1742 1781 elif draft:
1743 1782 msg = _('new changesets %s (%d drafts)\n')
1744 1783 msg %= (revrange, draft)
1745 1784 elif secret:
1746 1785 msg = _('new changesets %s (%d secrets)\n')
1747 1786 msg %= (revrange, secret)
1748 1787 else:
1749 1788 errormsg = 'entered unreachable condition'
1750 1789 raise error.ProgrammingError(errormsg)
1751 1790 repo.ui.status(msg)
1752 1791
1753 1792 # search new changesets directly pulled as obsolete
1754 1793 duplicates = tr.changes.get('revduplicates', ())
1755 1794 obsadded = unfi.revs('(%d: + %ld) and obsolete()',
1756 1795 origrepolen, duplicates)
1757 1796 cl = repo.changelog
1758 1797 extinctadded = [r for r in obsadded if r not in cl]
1759 1798 if extinctadded:
1760 1799 # They are not just obsolete, but obsolete and invisible
1761 1800 # we call them "extinct" internally but the terms have not been
1762 1801 # exposed to users.
1763 1802 msg = '(%d other changesets obsolete on arrival)\n'
1764 1803 repo.ui.status(msg % len(extinctadded))
1765 1804
1766 1805 @reportsummary
1767 1806 def reportphasechanges(repo, tr):
1768 1807 """Report statistics of phase changes for changesets pre-existing
1769 1808 pull/unbundle.
1770 1809 """
1771 1810 origrepolen = tr.changes.get('origrepolen', len(repo))
1772 1811 phasetracking = tr.changes.get('phases', {})
1773 1812 if not phasetracking:
1774 1813 return
1775 1814 published = [
1776 1815 rev for rev, (old, new) in phasetracking.iteritems()
1777 1816 if new == phases.public and rev < origrepolen
1778 1817 ]
1779 1818 if not published:
1780 1819 return
1781 1820 repo.ui.status(_('%d local changesets published\n')
1782 1821 % len(published))
1783 1822
1784 1823 def getinstabilitymessage(delta, instability):
1785 1824 """function to return the message to show warning about new instabilities
1786 1825
1787 1826 exists as a separate function so that extension can wrap to show more
1788 1827 information like how to fix instabilities"""
1789 1828 if delta > 0:
1790 1829 return _('%i new %s changesets\n') % (delta, instability)
1791 1830
1792 1831 def nodesummaries(repo, nodes, maxnumnodes=4):
1793 1832 if len(nodes) <= maxnumnodes or repo.ui.verbose:
1794 1833 return ' '.join(short(h) for h in nodes)
1795 1834 first = ' '.join(short(h) for h in nodes[:maxnumnodes])
1796 1835 return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
1797 1836
1798 1837 def enforcesinglehead(repo, tr, desc):
1799 1838 """check that no named branch has multiple heads"""
1800 1839 if desc in ('strip', 'repair'):
1801 1840 # skip the logic during strip
1802 1841 return
1803 1842 visible = repo.filtered('visible')
1804 1843 # possible improvement: we could restrict the check to affected branch
1805 1844 for name, heads in visible.branchmap().iteritems():
1806 1845 if len(heads) > 1:
1807 1846 msg = _('rejecting multiple heads on branch "%s"')
1808 1847 msg %= name
1809 1848 hint = _('%d heads: %s')
1810 1849 hint %= (len(heads), nodesummaries(repo, heads))
1811 1850 raise error.Abort(msg, hint=hint)
1812 1851
1813 1852 def wrapconvertsink(sink):
1814 1853 """Allow extensions to wrap the sink returned by convcmd.convertsink()
1815 1854 before it is used, whether or not the convert extension was formally loaded.
1816 1855 """
1817 1856 return sink
1818 1857
1819 1858 def unhidehashlikerevs(repo, specs, hiddentype):
1820 1859 """parse the user specs and unhide changesets whose hash or revision number
1821 1860 is passed.
1822 1861
1823 1862 hiddentype can be: 1) 'warn': warn while unhiding changesets
1824 1863 2) 'nowarn': don't warn while unhiding changesets
1825 1864
1826 1865 returns a repo object with the required changesets unhidden
1827 1866 """
1828 1867 if not repo.filtername or not repo.ui.configbool('experimental',
1829 1868 'directaccess'):
1830 1869 return repo
1831 1870
1832 1871 if repo.filtername not in ('visible', 'visible-hidden'):
1833 1872 return repo
1834 1873
1835 1874 symbols = set()
1836 1875 for spec in specs:
1837 1876 try:
1838 1877 tree = revsetlang.parse(spec)
1839 1878 except error.ParseError: # will be reported by scmutil.revrange()
1840 1879 continue
1841 1880
1842 1881 symbols.update(revsetlang.gethashlikesymbols(tree))
1843 1882
1844 1883 if not symbols:
1845 1884 return repo
1846 1885
1847 1886 revs = _getrevsfromsymbols(repo, symbols)
1848 1887
1849 1888 if not revs:
1850 1889 return repo
1851 1890
1852 1891 if hiddentype == 'warn':
1853 1892 unfi = repo.unfiltered()
1854 1893 revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
1855 1894 repo.ui.warn(_("warning: accessing hidden changesets for write "
1856 1895 "operation: %s\n") % revstr)
1857 1896
1858 1897 # we have to use new filtername to separate branch/tags cache until we can
1859 1898 # disbale these cache when revisions are dynamically pinned.
1860 1899 return repo.filtered('visible-hidden', revs)
1861 1900
1862 1901 def _getrevsfromsymbols(repo, symbols):
1863 1902 """parse the list of symbols and returns a set of revision numbers of hidden
1864 1903 changesets present in symbols"""
1865 1904 revs = set()
1866 1905 unfi = repo.unfiltered()
1867 1906 unficl = unfi.changelog
1868 1907 cl = repo.changelog
1869 1908 tiprev = len(unficl)
1870 1909 allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
1871 1910 for s in symbols:
1872 1911 try:
1873 1912 n = int(s)
1874 1913 if n <= tiprev:
1875 1914 if not allowrevnums:
1876 1915 continue
1877 1916 else:
1878 1917 if n not in cl:
1879 1918 revs.add(n)
1880 1919 continue
1881 1920 except ValueError:
1882 1921 pass
1883 1922
1884 1923 try:
1885 1924 s = resolvehexnodeidprefix(unfi, s)
1886 1925 except (error.LookupError, error.WdirUnsupported):
1887 1926 s = None
1888 1927
1889 1928 if s is not None:
1890 1929 rev = unficl.rev(s)
1891 1930 if rev not in cl:
1892 1931 revs.add(rev)
1893 1932
1894 1933 return revs
1895 1934
1896 1935 def bookmarkrevs(repo, mark):
1897 1936 """
1898 1937 Select revisions reachable by a given bookmark
1899 1938 """
1900 1939 return repo.revs("ancestors(bookmark(%s)) - "
1901 1940 "ancestors(head() and not bookmark(%s)) - "
1902 1941 "ancestors(bookmark() and not bookmark(%s))",
1903 1942 mark, mark, mark)
General Comments 0
You need to be logged in to leave comments. Login now