##// END OF EJS Templates
clonebundles: move a bundle of clone bundle related code to a new module...
marmoute -
r46369:74271829 default
parent child Browse files
Show More
@@ -0,0 +1,422 b''
1 # bundlecaches.py - utility to deal with pre-computed bundle for servers
2 #
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
5
6 from .i18n import _
7
8 from .thirdparty import attr
9
10 from . import (
11 error,
12 sslutil,
13 util,
14 )
15 from .utils import stringutil
16
17 urlreq = util.urlreq
18
19
20 @attr.s
21 class bundlespec(object):
22 compression = attr.ib()
23 wirecompression = attr.ib()
24 version = attr.ib()
25 wireversion = attr.ib()
26 params = attr.ib()
27 contentopts = attr.ib()
28
29
30 # Maps bundle version human names to changegroup versions.
31 _bundlespeccgversions = {
32 b'v1': b'01',
33 b'v2': b'02',
34 b'packed1': b's1',
35 b'bundle2': b'02', # legacy
36 }
37
38 # Maps bundle version with content opts to choose which part to bundle
39 _bundlespeccontentopts = {
40 b'v1': {
41 b'changegroup': True,
42 b'cg.version': b'01',
43 b'obsolescence': False,
44 b'phases': False,
45 b'tagsfnodescache': False,
46 b'revbranchcache': False,
47 },
48 b'v2': {
49 b'changegroup': True,
50 b'cg.version': b'02',
51 b'obsolescence': False,
52 b'phases': False,
53 b'tagsfnodescache': True,
54 b'revbranchcache': True,
55 },
56 b'packed1': {b'cg.version': b's1'},
57 }
58 _bundlespeccontentopts[b'bundle2'] = _bundlespeccontentopts[b'v2']
59
60 _bundlespecvariants = {
61 b"streamv2": {
62 b"changegroup": False,
63 b"streamv2": True,
64 b"tagsfnodescache": False,
65 b"revbranchcache": False,
66 }
67 }
68
69 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
70 _bundlespecv1compengines = {b'gzip', b'bzip2', b'none'}
71
72
73 def parsebundlespec(repo, spec, strict=True):
74 """Parse a bundle string specification into parts.
75
76 Bundle specifications denote a well-defined bundle/exchange format.
77 The content of a given specification should not change over time in
78 order to ensure that bundles produced by a newer version of Mercurial are
79 readable from an older version.
80
81 The string currently has the form:
82
83 <compression>-<type>[;<parameter0>[;<parameter1>]]
84
85 Where <compression> is one of the supported compression formats
86 and <type> is (currently) a version string. A ";" can follow the type and
87 all text afterwards is interpreted as URI encoded, ";" delimited key=value
88 pairs.
89
90 If ``strict`` is True (the default) <compression> is required. Otherwise,
91 it is optional.
92
93 Returns a bundlespec object of (compression, version, parameters).
94 Compression will be ``None`` if not in strict mode and a compression isn't
95 defined.
96
97 An ``InvalidBundleSpecification`` is raised when the specification is
98 not syntactically well formed.
99
100 An ``UnsupportedBundleSpecification`` is raised when the compression or
101 bundle type/version is not recognized.
102
103 Note: this function will likely eventually return a more complex data
104 structure, including bundle2 part information.
105 """
106
107 def parseparams(s):
108 if b';' not in s:
109 return s, {}
110
111 params = {}
112 version, paramstr = s.split(b';', 1)
113
114 for p in paramstr.split(b';'):
115 if b'=' not in p:
116 raise error.InvalidBundleSpecification(
117 _(
118 b'invalid bundle specification: '
119 b'missing "=" in parameter: %s'
120 )
121 % p
122 )
123
124 key, value = p.split(b'=', 1)
125 key = urlreq.unquote(key)
126 value = urlreq.unquote(value)
127 params[key] = value
128
129 return version, params
130
131 if strict and b'-' not in spec:
132 raise error.InvalidBundleSpecification(
133 _(
134 b'invalid bundle specification; '
135 b'must be prefixed with compression: %s'
136 )
137 % spec
138 )
139
140 if b'-' in spec:
141 compression, version = spec.split(b'-', 1)
142
143 if compression not in util.compengines.supportedbundlenames:
144 raise error.UnsupportedBundleSpecification(
145 _(b'%s compression is not supported') % compression
146 )
147
148 version, params = parseparams(version)
149
150 if version not in _bundlespeccgversions:
151 raise error.UnsupportedBundleSpecification(
152 _(b'%s is not a recognized bundle version') % version
153 )
154 else:
155 # Value could be just the compression or just the version, in which
156 # case some defaults are assumed (but only when not in strict mode).
157 assert not strict
158
159 spec, params = parseparams(spec)
160
161 if spec in util.compengines.supportedbundlenames:
162 compression = spec
163 version = b'v1'
164 # Generaldelta repos require v2.
165 if b'generaldelta' in repo.requirements:
166 version = b'v2'
167 # Modern compression engines require v2.
168 if compression not in _bundlespecv1compengines:
169 version = b'v2'
170 elif spec in _bundlespeccgversions:
171 if spec == b'packed1':
172 compression = b'none'
173 else:
174 compression = b'bzip2'
175 version = spec
176 else:
177 raise error.UnsupportedBundleSpecification(
178 _(b'%s is not a recognized bundle specification') % spec
179 )
180
181 # Bundle version 1 only supports a known set of compression engines.
182 if version == b'v1' and compression not in _bundlespecv1compengines:
183 raise error.UnsupportedBundleSpecification(
184 _(b'compression engine %s is not supported on v1 bundles')
185 % compression
186 )
187
188 # The specification for packed1 can optionally declare the data formats
189 # required to apply it. If we see this metadata, compare against what the
190 # repo supports and error if the bundle isn't compatible.
191 if version == b'packed1' and b'requirements' in params:
192 requirements = set(params[b'requirements'].split(b','))
193 missingreqs = requirements - repo.supportedformats
194 if missingreqs:
195 raise error.UnsupportedBundleSpecification(
196 _(b'missing support for repository features: %s')
197 % b', '.join(sorted(missingreqs))
198 )
199
200 # Compute contentopts based on the version
201 contentopts = _bundlespeccontentopts.get(version, {}).copy()
202
203 # Process the variants
204 if b"stream" in params and params[b"stream"] == b"v2":
205 variant = _bundlespecvariants[b"streamv2"]
206 contentopts.update(variant)
207
208 engine = util.compengines.forbundlename(compression)
209 compression, wirecompression = engine.bundletype()
210 wireversion = _bundlespeccgversions[version]
211
212 return bundlespec(
213 compression, wirecompression, version, wireversion, params, contentopts
214 )
215
216
217 def parseclonebundlesmanifest(repo, s):
218 """Parses the raw text of a clone bundles manifest.
219
220 Returns a list of dicts. The dicts have a ``URL`` key corresponding
221 to the URL and other keys are the attributes for the entry.
222 """
223 m = []
224 for line in s.splitlines():
225 fields = line.split()
226 if not fields:
227 continue
228 attrs = {b'URL': fields[0]}
229 for rawattr in fields[1:]:
230 key, value = rawattr.split(b'=', 1)
231 key = util.urlreq.unquote(key)
232 value = util.urlreq.unquote(value)
233 attrs[key] = value
234
235 # Parse BUNDLESPEC into components. This makes client-side
236 # preferences easier to specify since you can prefer a single
237 # component of the BUNDLESPEC.
238 if key == b'BUNDLESPEC':
239 try:
240 bundlespec = parsebundlespec(repo, value)
241 attrs[b'COMPRESSION'] = bundlespec.compression
242 attrs[b'VERSION'] = bundlespec.version
243 except error.InvalidBundleSpecification:
244 pass
245 except error.UnsupportedBundleSpecification:
246 pass
247
248 m.append(attrs)
249
250 return m
251
252
253 def isstreamclonespec(bundlespec):
254 # Stream clone v1
255 if bundlespec.wirecompression == b'UN' and bundlespec.wireversion == b's1':
256 return True
257
258 # Stream clone v2
259 if (
260 bundlespec.wirecompression == b'UN'
261 and bundlespec.wireversion == b'02'
262 and bundlespec.contentopts.get(b'streamv2')
263 ):
264 return True
265
266 return False
267
268
269 def filterclonebundleentries(repo, entries, streamclonerequested=False):
270 """Remove incompatible clone bundle manifest entries.
271
272 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
273 and returns a new list consisting of only the entries that this client
274 should be able to apply.
275
276 There is no guarantee we'll be able to apply all returned entries because
277 the metadata we use to filter on may be missing or wrong.
278 """
279 newentries = []
280 for entry in entries:
281 spec = entry.get(b'BUNDLESPEC')
282 if spec:
283 try:
284 bundlespec = parsebundlespec(repo, spec, strict=True)
285
286 # If a stream clone was requested, filter out non-streamclone
287 # entries.
288 if streamclonerequested and not isstreamclonespec(bundlespec):
289 repo.ui.debug(
290 b'filtering %s because not a stream clone\n'
291 % entry[b'URL']
292 )
293 continue
294
295 except error.InvalidBundleSpecification as e:
296 repo.ui.debug(stringutil.forcebytestr(e) + b'\n')
297 continue
298 except error.UnsupportedBundleSpecification as e:
299 repo.ui.debug(
300 b'filtering %s because unsupported bundle '
301 b'spec: %s\n' % (entry[b'URL'], stringutil.forcebytestr(e))
302 )
303 continue
304 # If we don't have a spec and requested a stream clone, we don't know
305 # what the entry is so don't attempt to apply it.
306 elif streamclonerequested:
307 repo.ui.debug(
308 b'filtering %s because cannot determine if a stream '
309 b'clone bundle\n' % entry[b'URL']
310 )
311 continue
312
313 if b'REQUIRESNI' in entry and not sslutil.hassni:
314 repo.ui.debug(
315 b'filtering %s because SNI not supported\n' % entry[b'URL']
316 )
317 continue
318
319 if b'REQUIREDRAM' in entry:
320 try:
321 requiredram = util.sizetoint(entry[b'REQUIREDRAM'])
322 except error.ParseError:
323 repo.ui.debug(
324 b'filtering %s due to a bad REQUIREDRAM attribute\n'
325 % entry[b'URL']
326 )
327 continue
328 actualram = repo.ui.estimatememory()
329 if actualram is not None and actualram * 0.66 < requiredram:
330 repo.ui.debug(
331 b'filtering %s as it needs more than 2/3 of system memory\n'
332 % entry[b'URL']
333 )
334 continue
335
336 newentries.append(entry)
337
338 return newentries
339
340
341 class clonebundleentry(object):
342 """Represents an item in a clone bundles manifest.
343
344 This rich class is needed to support sorting since sorted() in Python 3
345 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
346 won't work.
347 """
348
349 def __init__(self, value, prefers):
350 self.value = value
351 self.prefers = prefers
352
353 def _cmp(self, other):
354 for prefkey, prefvalue in self.prefers:
355 avalue = self.value.get(prefkey)
356 bvalue = other.value.get(prefkey)
357
358 # Special case for b missing attribute and a matches exactly.
359 if avalue is not None and bvalue is None and avalue == prefvalue:
360 return -1
361
362 # Special case for a missing attribute and b matches exactly.
363 if bvalue is not None and avalue is None and bvalue == prefvalue:
364 return 1
365
366 # We can't compare unless attribute present on both.
367 if avalue is None or bvalue is None:
368 continue
369
370 # Same values should fall back to next attribute.
371 if avalue == bvalue:
372 continue
373
374 # Exact matches come first.
375 if avalue == prefvalue:
376 return -1
377 if bvalue == prefvalue:
378 return 1
379
380 # Fall back to next attribute.
381 continue
382
383 # If we got here we couldn't sort by attributes and prefers. Fall
384 # back to index order.
385 return 0
386
387 def __lt__(self, other):
388 return self._cmp(other) < 0
389
390 def __gt__(self, other):
391 return self._cmp(other) > 0
392
393 def __eq__(self, other):
394 return self._cmp(other) == 0
395
396 def __le__(self, other):
397 return self._cmp(other) <= 0
398
399 def __ge__(self, other):
400 return self._cmp(other) >= 0
401
402 def __ne__(self, other):
403 return self._cmp(other) != 0
404
405
406 def sortclonebundleentries(ui, entries):
407 prefers = ui.configlist(b'ui', b'clonebundleprefers')
408 if not prefers:
409 return list(entries)
410
411 def _split(p):
412 if b'=' not in p:
413 hint = _(b"each comma separated item should be key=value pairs")
414 raise error.Abort(
415 _(b"invalid ui.clonebundleprefers item: %s") % p, hint=hint
416 )
417 return p.split(b'=', 1)
418
419 prefers = [_split(p) for p in prefers]
420
421 items = sorted(clonebundleentry(v, prefers) for v in entries)
422 return [i.value for i in items]
@@ -1,426 +1,426 b''
1 1 # lfs - hash-preserving large file support using Git-LFS protocol
2 2 #
3 3 # Copyright 2017 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """lfs - large file support (EXPERIMENTAL)
9 9
10 10 This extension allows large files to be tracked outside of the normal
11 11 repository storage and stored on a centralized server, similar to the
12 12 ``largefiles`` extension. The ``git-lfs`` protocol is used when
13 13 communicating with the server, so existing git infrastructure can be
14 14 harnessed. Even though the files are stored outside of the repository,
15 15 they are still integrity checked in the same manner as normal files.
16 16
17 17 The files stored outside of the repository are downloaded on demand,
18 18 which reduces the time to clone, and possibly the local disk usage.
19 19 This changes fundamental workflows in a DVCS, so careful thought
20 20 should be given before deploying it. :hg:`convert` can be used to
21 21 convert LFS repositories to normal repositories that no longer
22 22 require this extension, and do so without changing the commit hashes.
23 23 This allows the extension to be disabled if the centralized workflow
24 24 becomes burdensome. However, the pre and post convert clones will
25 25 not be able to communicate with each other unless the extension is
26 26 enabled on both.
27 27
28 28 To start a new repository, or to add LFS files to an existing one, just
29 29 create an ``.hglfs`` file as described below in the root directory of
30 30 the repository. Typically, this file should be put under version
31 31 control, so that the settings will propagate to other repositories with
32 32 push and pull. During any commit, Mercurial will consult this file to
33 33 determine if an added or modified file should be stored externally. The
34 34 type of storage depends on the characteristics of the file at each
35 35 commit. A file that is near a size threshold may switch back and forth
36 36 between LFS and normal storage, as needed.
37 37
38 38 Alternately, both normal repositories and largefile controlled
39 39 repositories can be converted to LFS by using :hg:`convert` and the
40 40 ``lfs.track`` config option described below. The ``.hglfs`` file
41 41 should then be created and added, to control subsequent LFS selection.
42 42 The hashes are also unchanged in this case. The LFS and non-LFS
43 43 repositories can be distinguished because the LFS repository will
44 44 abort any command if this extension is disabled.
45 45
46 46 Committed LFS files are held locally, until the repository is pushed.
47 47 Prior to pushing the normal repository data, the LFS files that are
48 48 tracked by the outgoing commits are automatically uploaded to the
49 49 configured central server. No LFS files are transferred on
50 50 :hg:`pull` or :hg:`clone`. Instead, the files are downloaded on
51 51 demand as they need to be read, if a cached copy cannot be found
52 52 locally. Both committing and downloading an LFS file will link the
53 53 file to a usercache, to speed up future access. See the `usercache`
54 54 config setting described below.
55 55
56 56 The extension reads its configuration from a versioned ``.hglfs``
57 57 configuration file found in the root of the working directory. The
58 58 ``.hglfs`` file uses the same syntax as all other Mercurial
59 59 configuration files. It uses a single section, ``[track]``.
60 60
61 61 The ``[track]`` section specifies which files are stored as LFS (or
62 62 not). Each line is keyed by a file pattern, with a predicate value.
63 63 The first file pattern match is used, so put more specific patterns
64 64 first. The available predicates are ``all()``, ``none()``, and
65 65 ``size()``. See "hg help filesets.size" for the latter.
66 66
67 67 Example versioned ``.hglfs`` file::
68 68
69 69 [track]
70 70 # No Makefile or python file, anywhere, will be LFS
71 71 **Makefile = none()
72 72 **.py = none()
73 73
74 74 **.zip = all()
75 75 **.exe = size(">1MB")
76 76
77 77 # Catchall for everything not matched above
78 78 ** = size(">10MB")
79 79
80 80 Configs::
81 81
82 82 [lfs]
83 83 # Remote endpoint. Multiple protocols are supported:
84 84 # - http(s)://user:pass@example.com/path
85 85 # git-lfs endpoint
86 86 # - file:///tmp/path
87 87 # local filesystem, usually for testing
88 88 # if unset, lfs will assume the remote repository also handles blob storage
89 89 # for http(s) URLs. Otherwise, lfs will prompt to set this when it must
90 90 # use this value.
91 91 # (default: unset)
92 92 url = https://example.com/repo.git/info/lfs
93 93
94 94 # Which files to track in LFS. Path tests are "**.extname" for file
95 95 # extensions, and "path:under/some/directory" for path prefix. Both
96 96 # are relative to the repository root.
97 97 # File size can be tested with the "size()" fileset, and tests can be
98 98 # joined with fileset operators. (See "hg help filesets.operators".)
99 99 #
100 100 # Some examples:
101 101 # - all() # everything
102 102 # - none() # nothing
103 103 # - size(">20MB") # larger than 20MB
104 104 # - !**.txt # anything not a *.txt file
105 105 # - **.zip | **.tar.gz | **.7z # some types of compressed files
106 106 # - path:bin # files under "bin" in the project root
107 107 # - (**.php & size(">2MB")) | (**.js & size(">5MB")) | **.tar.gz
108 108 # | (path:bin & !path:/bin/README) | size(">1GB")
109 109 # (default: none())
110 110 #
111 111 # This is ignored if there is a tracked '.hglfs' file, and this setting
112 112 # will eventually be deprecated and removed.
113 113 track = size(">10M")
114 114
115 115 # how many times to retry before giving up on transferring an object
116 116 retry = 5
117 117
118 118 # the local directory to store lfs files for sharing across local clones.
119 119 # If not set, the cache is located in an OS specific cache location.
120 120 usercache = /path/to/global/cache
121 121 """
122 122
123 123 from __future__ import absolute_import
124 124
125 125 import sys
126 126
127 127 from mercurial.i18n import _
128 128
129 129 from mercurial import (
130 bundlecaches,
130 131 config,
131 132 context,
132 133 error,
133 exchange,
134 134 extensions,
135 135 exthelper,
136 136 filelog,
137 137 filesetlang,
138 138 localrepo,
139 139 minifileset,
140 140 node,
141 141 pycompat,
142 142 revlog,
143 143 scmutil,
144 144 templateutil,
145 145 util,
146 146 )
147 147
148 148 from mercurial.interfaces import repository
149 149
150 150 from . import (
151 151 blobstore,
152 152 wireprotolfsserver,
153 153 wrapper,
154 154 )
155 155
156 156 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
157 157 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
158 158 # be specifying the version(s) of Mercurial they are tested with, or
159 159 # leave the attribute unspecified.
160 160 testedwith = b'ships-with-hg-core'
161 161
162 162 eh = exthelper.exthelper()
163 163 eh.merge(wrapper.eh)
164 164 eh.merge(wireprotolfsserver.eh)
165 165
166 166 cmdtable = eh.cmdtable
167 167 configtable = eh.configtable
168 168 extsetup = eh.finalextsetup
169 169 uisetup = eh.finaluisetup
170 170 filesetpredicate = eh.filesetpredicate
171 171 reposetup = eh.finalreposetup
172 172 templatekeyword = eh.templatekeyword
173 173
174 174 eh.configitem(
175 175 b'experimental', b'lfs.serve', default=True,
176 176 )
177 177 eh.configitem(
178 178 b'experimental', b'lfs.user-agent', default=None,
179 179 )
180 180 eh.configitem(
181 181 b'experimental', b'lfs.disableusercache', default=False,
182 182 )
183 183 eh.configitem(
184 184 b'experimental', b'lfs.worker-enable', default=True,
185 185 )
186 186
187 187 eh.configitem(
188 188 b'lfs', b'url', default=None,
189 189 )
190 190 eh.configitem(
191 191 b'lfs', b'usercache', default=None,
192 192 )
193 193 # Deprecated
194 194 eh.configitem(
195 195 b'lfs', b'threshold', default=None,
196 196 )
197 197 eh.configitem(
198 198 b'lfs', b'track', default=b'none()',
199 199 )
200 200 eh.configitem(
201 201 b'lfs', b'retry', default=5,
202 202 )
203 203
204 204 lfsprocessor = (
205 205 wrapper.readfromstore,
206 206 wrapper.writetostore,
207 207 wrapper.bypasscheckhash,
208 208 )
209 209
210 210
211 211 def featuresetup(ui, supported):
212 212 # don't die on seeing a repo with the lfs requirement
213 213 supported |= {b'lfs'}
214 214
215 215
216 216 @eh.uisetup
217 217 def _uisetup(ui):
218 218 localrepo.featuresetupfuncs.add(featuresetup)
219 219
220 220
221 221 @eh.reposetup
222 222 def _reposetup(ui, repo):
223 223 # Nothing to do with a remote repo
224 224 if not repo.local():
225 225 return
226 226
227 227 repo.svfs.lfslocalblobstore = blobstore.local(repo)
228 228 repo.svfs.lfsremoteblobstore = blobstore.remote(repo)
229 229
230 230 class lfsrepo(repo.__class__):
231 231 @localrepo.unfilteredmethod
232 232 def commitctx(self, ctx, error=False, origctx=None):
233 233 repo.svfs.options[b'lfstrack'] = _trackedmatcher(self)
234 234 return super(lfsrepo, self).commitctx(ctx, error, origctx=origctx)
235 235
236 236 repo.__class__ = lfsrepo
237 237
238 238 if b'lfs' not in repo.requirements:
239 239
240 240 def checkrequireslfs(ui, repo, **kwargs):
241 241 if b'lfs' in repo.requirements:
242 242 return 0
243 243
244 244 last = kwargs.get('node_last')
245 245 _bin = node.bin
246 246 if last:
247 247 s = repo.set(b'%n:%n', _bin(kwargs['node']), _bin(last))
248 248 else:
249 249 s = repo.set(b'%n', _bin(kwargs['node']))
250 250 match = repo._storenarrowmatch
251 251 for ctx in s:
252 252 # TODO: is there a way to just walk the files in the commit?
253 253 if any(
254 254 ctx[f].islfs() for f in ctx.files() if f in ctx and match(f)
255 255 ):
256 256 repo.requirements.add(b'lfs')
257 257 repo.features.add(repository.REPO_FEATURE_LFS)
258 258 scmutil.writereporequirements(repo)
259 259 repo.prepushoutgoinghooks.add(b'lfs', wrapper.prepush)
260 260 break
261 261
262 262 ui.setconfig(b'hooks', b'commit.lfs', checkrequireslfs, b'lfs')
263 263 ui.setconfig(
264 264 b'hooks', b'pretxnchangegroup.lfs', checkrequireslfs, b'lfs'
265 265 )
266 266 else:
267 267 repo.prepushoutgoinghooks.add(b'lfs', wrapper.prepush)
268 268
269 269
270 270 def _trackedmatcher(repo):
271 271 """Return a function (path, size) -> bool indicating whether or not to
272 272 track a given file with lfs."""
273 273 if not repo.wvfs.exists(b'.hglfs'):
274 274 # No '.hglfs' in wdir. Fallback to config for now.
275 275 trackspec = repo.ui.config(b'lfs', b'track')
276 276
277 277 # deprecated config: lfs.threshold
278 278 threshold = repo.ui.configbytes(b'lfs', b'threshold')
279 279 if threshold:
280 280 filesetlang.parse(trackspec) # make sure syntax errors are confined
281 281 trackspec = b"(%s) | size('>%d')" % (trackspec, threshold)
282 282
283 283 return minifileset.compile(trackspec)
284 284
285 285 data = repo.wvfs.tryread(b'.hglfs')
286 286 if not data:
287 287 return lambda p, s: False
288 288
289 289 # Parse errors here will abort with a message that points to the .hglfs file
290 290 # and line number.
291 291 cfg = config.config()
292 292 cfg.parse(b'.hglfs', data)
293 293
294 294 try:
295 295 rules = [
296 296 (minifileset.compile(pattern), minifileset.compile(rule))
297 297 for pattern, rule in cfg.items(b'track')
298 298 ]
299 299 except error.ParseError as e:
300 300 # The original exception gives no indicator that the error is in the
301 301 # .hglfs file, so add that.
302 302
303 303 # TODO: See if the line number of the file can be made available.
304 304 raise error.Abort(_(b'parse error in .hglfs: %s') % e)
305 305
306 306 def _match(path, size):
307 307 for pat, rule in rules:
308 308 if pat(path, size):
309 309 return rule(path, size)
310 310
311 311 return False
312 312
313 313 return _match
314 314
315 315
316 316 # Called by remotefilelog
317 317 def wrapfilelog(filelog):
318 318 wrapfunction = extensions.wrapfunction
319 319
320 320 wrapfunction(filelog, 'addrevision', wrapper.filelogaddrevision)
321 321 wrapfunction(filelog, 'renamed', wrapper.filelogrenamed)
322 322 wrapfunction(filelog, 'size', wrapper.filelogsize)
323 323
324 324
325 325 @eh.wrapfunction(localrepo, b'resolverevlogstorevfsoptions')
326 326 def _resolverevlogstorevfsoptions(orig, ui, requirements, features):
327 327 opts = orig(ui, requirements, features)
328 328 for name, module in extensions.extensions(ui):
329 329 if module is sys.modules[__name__]:
330 330 if revlog.REVIDX_EXTSTORED in opts[b'flagprocessors']:
331 331 msg = (
332 332 _(b"cannot register multiple processors on flag '%#x'.")
333 333 % revlog.REVIDX_EXTSTORED
334 334 )
335 335 raise error.Abort(msg)
336 336
337 337 opts[b'flagprocessors'][revlog.REVIDX_EXTSTORED] = lfsprocessor
338 338 break
339 339
340 340 return opts
341 341
342 342
343 343 @eh.extsetup
344 344 def _extsetup(ui):
345 345 wrapfilelog(filelog.filelog)
346 346
347 347 context.basefilectx.islfs = wrapper.filectxislfs
348 348
349 349 scmutil.fileprefetchhooks.add(b'lfs', wrapper._prefetchfiles)
350 350
351 351 # Make bundle choose changegroup3 instead of changegroup2. This affects
352 352 # "hg bundle" command. Note: it does not cover all bundle formats like
353 353 # "packed1". Using "packed1" with lfs will likely cause trouble.
354 exchange._bundlespeccontentopts[b"v2"][b"cg.version"] = b"03"
354 bundlecaches._bundlespeccontentopts[b"v2"][b"cg.version"] = b"03"
355 355
356 356
357 357 @eh.filesetpredicate(b'lfs()')
358 358 def lfsfileset(mctx, x):
359 359 """File that uses LFS storage."""
360 360 # i18n: "lfs" is a keyword
361 361 filesetlang.getargs(x, 0, 0, _(b"lfs takes no arguments"))
362 362 ctx = mctx.ctx
363 363
364 364 def lfsfilep(f):
365 365 return wrapper.pointerfromctx(ctx, f, removed=True) is not None
366 366
367 367 return mctx.predicate(lfsfilep, predrepr=b'<lfs>')
368 368
369 369
370 370 @eh.templatekeyword(b'lfs_files', requires={b'ctx'})
371 371 def lfsfiles(context, mapping):
372 372 """List of strings. All files modified, added, or removed by this
373 373 changeset."""
374 374 ctx = context.resource(mapping, b'ctx')
375 375
376 376 pointers = wrapper.pointersfromctx(ctx, removed=True) # {path: pointer}
377 377 files = sorted(pointers.keys())
378 378
379 379 def pointer(v):
380 380 # In the file spec, version is first and the other keys are sorted.
381 381 sortkeyfunc = lambda x: (x[0] != b'version', x)
382 382 items = sorted(pycompat.iteritems(pointers[v]), key=sortkeyfunc)
383 383 return util.sortdict(items)
384 384
385 385 makemap = lambda v: {
386 386 b'file': v,
387 387 b'lfsoid': pointers[v].oid() if pointers[v] else None,
388 388 b'lfspointer': templateutil.hybriddict(pointer(v)),
389 389 }
390 390
391 391 # TODO: make the separator ', '?
392 392 f = templateutil._showcompatlist(context, mapping, b'lfs_file', files)
393 393 return templateutil.hybrid(f, files, makemap, pycompat.identity)
394 394
395 395
396 396 @eh.command(
397 397 b'debuglfsupload',
398 398 [(b'r', b'rev', [], _(b'upload large files introduced by REV'))],
399 399 )
400 400 def debuglfsupload(ui, repo, **opts):
401 401 """upload lfs blobs added by the working copy parent or given revisions"""
402 402 revs = opts.get('rev', [])
403 403 pointers = wrapper.extractpointers(repo, scmutil.revrange(repo, revs))
404 404 wrapper.uploadblobs(repo, pointers)
405 405
406 406
407 407 @eh.wrapcommand(
408 408 b'verify',
409 409 opts=[(b'', b'no-lfs', None, _(b'skip missing lfs blob content'))],
410 410 )
411 411 def verify(orig, ui, repo, **opts):
412 412 skipflags = repo.ui.configint(b'verify', b'skipflags')
413 413 no_lfs = opts.pop('no_lfs')
414 414
415 415 if skipflags:
416 416 # --lfs overrides the config bit, if set.
417 417 if no_lfs is False:
418 418 skipflags &= ~repository.REVISION_FLAG_EXTSTORED
419 419 else:
420 420 skipflags = 0
421 421
422 422 if no_lfs is True:
423 423 skipflags |= repository.REVISION_FLAG_EXTSTORED
424 424
425 425 with ui.configoverride({(b'verify', b'skipflags'): skipflags}):
426 426 return orig(ui, repo, **opts)
@@ -1,7663 +1,7666 b''
1 1 # commands.py - command processing for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import re
13 13 import sys
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 hex,
18 18 nullid,
19 19 nullrev,
20 20 short,
21 21 wdirhex,
22 22 wdirrev,
23 23 )
24 24 from .pycompat import open
25 25 from . import (
26 26 archival,
27 27 bookmarks,
28 28 bundle2,
29 bundlecaches,
29 30 changegroup,
30 31 cmdutil,
31 32 copies,
32 33 debugcommands as debugcommandsmod,
33 34 destutil,
34 35 dirstateguard,
35 36 discovery,
36 37 encoding,
37 38 error,
38 39 exchange,
39 40 extensions,
40 41 filemerge,
41 42 formatter,
42 43 graphmod,
43 44 grep as grepmod,
44 45 hbisect,
45 46 help,
46 47 hg,
47 48 logcmdutil,
48 49 merge as mergemod,
49 50 mergestate as mergestatemod,
50 51 narrowspec,
51 52 obsolete,
52 53 obsutil,
53 54 patch,
54 55 phases,
55 56 pycompat,
56 57 rcutil,
57 58 registrar,
58 59 requirements,
59 60 revsetlang,
60 61 rewriteutil,
61 62 scmutil,
62 63 server,
63 64 shelve as shelvemod,
64 65 state as statemod,
65 66 streamclone,
66 67 tags as tagsmod,
67 68 ui as uimod,
68 69 util,
69 70 verify as verifymod,
70 71 vfs as vfsmod,
71 72 wireprotoserver,
72 73 )
73 74 from .utils import (
74 75 dateutil,
75 76 stringutil,
76 77 )
77 78
78 79 table = {}
79 80 table.update(debugcommandsmod.command._table)
80 81
81 82 command = registrar.command(table)
82 83 INTENT_READONLY = registrar.INTENT_READONLY
83 84
84 85 # common command options
85 86
86 87 globalopts = [
87 88 (
88 89 b'R',
89 90 b'repository',
90 91 b'',
91 92 _(b'repository root directory or name of overlay bundle file'),
92 93 _(b'REPO'),
93 94 ),
94 95 (b'', b'cwd', b'', _(b'change working directory'), _(b'DIR')),
95 96 (
96 97 b'y',
97 98 b'noninteractive',
98 99 None,
99 100 _(
100 101 b'do not prompt, automatically pick the first choice for all prompts'
101 102 ),
102 103 ),
103 104 (b'q', b'quiet', None, _(b'suppress output')),
104 105 (b'v', b'verbose', None, _(b'enable additional output')),
105 106 (
106 107 b'',
107 108 b'color',
108 109 b'',
109 110 # i18n: 'always', 'auto', 'never', and 'debug' are keywords
110 111 # and should not be translated
111 112 _(b"when to colorize (boolean, always, auto, never, or debug)"),
112 113 _(b'TYPE'),
113 114 ),
114 115 (
115 116 b'',
116 117 b'config',
117 118 [],
118 119 _(b'set/override config option (use \'section.name=value\')'),
119 120 _(b'CONFIG'),
120 121 ),
121 122 (b'', b'debug', None, _(b'enable debugging output')),
122 123 (b'', b'debugger', None, _(b'start debugger')),
123 124 (
124 125 b'',
125 126 b'encoding',
126 127 encoding.encoding,
127 128 _(b'set the charset encoding'),
128 129 _(b'ENCODE'),
129 130 ),
130 131 (
131 132 b'',
132 133 b'encodingmode',
133 134 encoding.encodingmode,
134 135 _(b'set the charset encoding mode'),
135 136 _(b'MODE'),
136 137 ),
137 138 (b'', b'traceback', None, _(b'always print a traceback on exception')),
138 139 (b'', b'time', None, _(b'time how long the command takes')),
139 140 (b'', b'profile', None, _(b'print command execution profile')),
140 141 (b'', b'version', None, _(b'output version information and exit')),
141 142 (b'h', b'help', None, _(b'display help and exit')),
142 143 (b'', b'hidden', False, _(b'consider hidden changesets')),
143 144 (
144 145 b'',
145 146 b'pager',
146 147 b'auto',
147 148 _(b"when to paginate (boolean, always, auto, or never)"),
148 149 _(b'TYPE'),
149 150 ),
150 151 ]
151 152
152 153 dryrunopts = cmdutil.dryrunopts
153 154 remoteopts = cmdutil.remoteopts
154 155 walkopts = cmdutil.walkopts
155 156 commitopts = cmdutil.commitopts
156 157 commitopts2 = cmdutil.commitopts2
157 158 commitopts3 = cmdutil.commitopts3
158 159 formatteropts = cmdutil.formatteropts
159 160 templateopts = cmdutil.templateopts
160 161 logopts = cmdutil.logopts
161 162 diffopts = cmdutil.diffopts
162 163 diffwsopts = cmdutil.diffwsopts
163 164 diffopts2 = cmdutil.diffopts2
164 165 mergetoolopts = cmdutil.mergetoolopts
165 166 similarityopts = cmdutil.similarityopts
166 167 subrepoopts = cmdutil.subrepoopts
167 168 debugrevlogopts = cmdutil.debugrevlogopts
168 169
169 170 # Commands start here, listed alphabetically
170 171
171 172
172 173 @command(
173 174 b'abort',
174 175 dryrunopts,
175 176 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
176 177 helpbasic=True,
177 178 )
178 179 def abort(ui, repo, **opts):
179 180 """abort an unfinished operation (EXPERIMENTAL)
180 181
181 182 Aborts a multistep operation like graft, histedit, rebase, merge,
182 183 and unshelve if they are in an unfinished state.
183 184
184 185 use --dry-run/-n to dry run the command.
185 186 """
186 187 dryrun = opts.get('dry_run')
187 188 abortstate = cmdutil.getunfinishedstate(repo)
188 189 if not abortstate:
189 190 raise error.Abort(_(b'no operation in progress'))
190 191 if not abortstate.abortfunc:
191 192 raise error.Abort(
192 193 (
193 194 _(b"%s in progress but does not support 'hg abort'")
194 195 % (abortstate._opname)
195 196 ),
196 197 hint=abortstate.hint(),
197 198 )
198 199 if dryrun:
199 200 ui.status(
200 201 _(b'%s in progress, will be aborted\n') % (abortstate._opname)
201 202 )
202 203 return
203 204 return abortstate.abortfunc(ui, repo)
204 205
205 206
206 207 @command(
207 208 b'add',
208 209 walkopts + subrepoopts + dryrunopts,
209 210 _(b'[OPTION]... [FILE]...'),
210 211 helpcategory=command.CATEGORY_WORKING_DIRECTORY,
211 212 helpbasic=True,
212 213 inferrepo=True,
213 214 )
214 215 def add(ui, repo, *pats, **opts):
215 216 """add the specified files on the next commit
216 217
217 218 Schedule files to be version controlled and added to the
218 219 repository.
219 220
220 221 The files will be added to the repository at the next commit. To
221 222 undo an add before that, see :hg:`forget`.
222 223
223 224 If no names are given, add all files to the repository (except
224 225 files matching ``.hgignore``).
225 226
226 227 .. container:: verbose
227 228
228 229 Examples:
229 230
230 231 - New (unknown) files are added
231 232 automatically by :hg:`add`::
232 233
233 234 $ ls
234 235 foo.c
235 236 $ hg status
236 237 ? foo.c
237 238 $ hg add
238 239 adding foo.c
239 240 $ hg status
240 241 A foo.c
241 242
242 243 - Specific files to be added can be specified::
243 244
244 245 $ ls
245 246 bar.c foo.c
246 247 $ hg status
247 248 ? bar.c
248 249 ? foo.c
249 250 $ hg add bar.c
250 251 $ hg status
251 252 A bar.c
252 253 ? foo.c
253 254
254 255 Returns 0 if all files are successfully added.
255 256 """
256 257
257 258 m = scmutil.match(repo[None], pats, pycompat.byteskwargs(opts))
258 259 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
259 260 rejected = cmdutil.add(ui, repo, m, b"", uipathfn, False, **opts)
260 261 return rejected and 1 or 0
261 262
262 263
263 264 @command(
264 265 b'addremove',
265 266 similarityopts + subrepoopts + walkopts + dryrunopts,
266 267 _(b'[OPTION]... [FILE]...'),
267 268 helpcategory=command.CATEGORY_WORKING_DIRECTORY,
268 269 inferrepo=True,
269 270 )
270 271 def addremove(ui, repo, *pats, **opts):
271 272 """add all new files, delete all missing files
272 273
273 274 Add all new files and remove all missing files from the
274 275 repository.
275 276
276 277 Unless names are given, new files are ignored if they match any of
277 278 the patterns in ``.hgignore``. As with add, these changes take
278 279 effect at the next commit.
279 280
280 281 Use the -s/--similarity option to detect renamed files. This
281 282 option takes a percentage between 0 (disabled) and 100 (files must
282 283 be identical) as its parameter. With a parameter greater than 0,
283 284 this compares every removed file with every added file and records
284 285 those similar enough as renames. Detecting renamed files this way
285 286 can be expensive. After using this option, :hg:`status -C` can be
286 287 used to check which files were identified as moved or renamed. If
287 288 not specified, -s/--similarity defaults to 100 and only renames of
288 289 identical files are detected.
289 290
290 291 .. container:: verbose
291 292
292 293 Examples:
293 294
294 295 - A number of files (bar.c and foo.c) are new,
295 296 while foobar.c has been removed (without using :hg:`remove`)
296 297 from the repository::
297 298
298 299 $ ls
299 300 bar.c foo.c
300 301 $ hg status
301 302 ! foobar.c
302 303 ? bar.c
303 304 ? foo.c
304 305 $ hg addremove
305 306 adding bar.c
306 307 adding foo.c
307 308 removing foobar.c
308 309 $ hg status
309 310 A bar.c
310 311 A foo.c
311 312 R foobar.c
312 313
313 314 - A file foobar.c was moved to foo.c without using :hg:`rename`.
314 315 Afterwards, it was edited slightly::
315 316
316 317 $ ls
317 318 foo.c
318 319 $ hg status
319 320 ! foobar.c
320 321 ? foo.c
321 322 $ hg addremove --similarity 90
322 323 removing foobar.c
323 324 adding foo.c
324 325 recording removal of foobar.c as rename to foo.c (94% similar)
325 326 $ hg status -C
326 327 A foo.c
327 328 foobar.c
328 329 R foobar.c
329 330
330 331 Returns 0 if all files are successfully added.
331 332 """
332 333 opts = pycompat.byteskwargs(opts)
333 334 if not opts.get(b'similarity'):
334 335 opts[b'similarity'] = b'100'
335 336 matcher = scmutil.match(repo[None], pats, opts)
336 337 relative = scmutil.anypats(pats, opts)
337 338 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
338 339 return scmutil.addremove(repo, matcher, b"", uipathfn, opts)
339 340
340 341
341 342 @command(
342 343 b'annotate|blame',
343 344 [
344 345 (b'r', b'rev', b'', _(b'annotate the specified revision'), _(b'REV')),
345 346 (
346 347 b'',
347 348 b'follow',
348 349 None,
349 350 _(b'follow copies/renames and list the filename (DEPRECATED)'),
350 351 ),
351 352 (b'', b'no-follow', None, _(b"don't follow copies and renames")),
352 353 (b'a', b'text', None, _(b'treat all files as text')),
353 354 (b'u', b'user', None, _(b'list the author (long with -v)')),
354 355 (b'f', b'file', None, _(b'list the filename')),
355 356 (b'd', b'date', None, _(b'list the date (short with -q)')),
356 357 (b'n', b'number', None, _(b'list the revision number (default)')),
357 358 (b'c', b'changeset', None, _(b'list the changeset')),
358 359 (
359 360 b'l',
360 361 b'line-number',
361 362 None,
362 363 _(b'show line number at the first appearance'),
363 364 ),
364 365 (
365 366 b'',
366 367 b'skip',
367 368 [],
368 369 _(b'revset to not display (EXPERIMENTAL)'),
369 370 _(b'REV'),
370 371 ),
371 372 ]
372 373 + diffwsopts
373 374 + walkopts
374 375 + formatteropts,
375 376 _(b'[-r REV] [-f] [-a] [-u] [-d] [-n] [-c] [-l] FILE...'),
376 377 helpcategory=command.CATEGORY_FILE_CONTENTS,
377 378 helpbasic=True,
378 379 inferrepo=True,
379 380 )
380 381 def annotate(ui, repo, *pats, **opts):
381 382 """show changeset information by line for each file
382 383
383 384 List changes in files, showing the revision id responsible for
384 385 each line.
385 386
386 387 This command is useful for discovering when a change was made and
387 388 by whom.
388 389
389 390 If you include --file, --user, or --date, the revision number is
390 391 suppressed unless you also include --number.
391 392
392 393 Without the -a/--text option, annotate will avoid processing files
393 394 it detects as binary. With -a, annotate will annotate the file
394 395 anyway, although the results will probably be neither useful
395 396 nor desirable.
396 397
397 398 .. container:: verbose
398 399
399 400 Template:
400 401
401 402 The following keywords are supported in addition to the common template
402 403 keywords and functions. See also :hg:`help templates`.
403 404
404 405 :lines: List of lines with annotation data.
405 406 :path: String. Repository-absolute path of the specified file.
406 407
407 408 And each entry of ``{lines}`` provides the following sub-keywords in
408 409 addition to ``{date}``, ``{node}``, ``{rev}``, ``{user}``, etc.
409 410
410 411 :line: String. Line content.
411 412 :lineno: Integer. Line number at that revision.
412 413 :path: String. Repository-absolute path of the file at that revision.
413 414
414 415 See :hg:`help templates.operators` for the list expansion syntax.
415 416
416 417 Returns 0 on success.
417 418 """
418 419 opts = pycompat.byteskwargs(opts)
419 420 if not pats:
420 421 raise error.Abort(_(b'at least one filename or pattern is required'))
421 422
422 423 if opts.get(b'follow'):
423 424 # --follow is deprecated and now just an alias for -f/--file
424 425 # to mimic the behavior of Mercurial before version 1.5
425 426 opts[b'file'] = True
426 427
427 428 if (
428 429 not opts.get(b'user')
429 430 and not opts.get(b'changeset')
430 431 and not opts.get(b'date')
431 432 and not opts.get(b'file')
432 433 ):
433 434 opts[b'number'] = True
434 435
435 436 linenumber = opts.get(b'line_number') is not None
436 437 if (
437 438 linenumber
438 439 and (not opts.get(b'changeset'))
439 440 and (not opts.get(b'number'))
440 441 ):
441 442 raise error.Abort(_(b'at least one of -n/-c is required for -l'))
442 443
443 444 rev = opts.get(b'rev')
444 445 if rev:
445 446 repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
446 447 ctx = scmutil.revsingle(repo, rev)
447 448
448 449 ui.pager(b'annotate')
449 450 rootfm = ui.formatter(b'annotate', opts)
450 451 if ui.debugflag:
451 452 shorthex = pycompat.identity
452 453 else:
453 454
454 455 def shorthex(h):
455 456 return h[:12]
456 457
457 458 if ui.quiet:
458 459 datefunc = dateutil.shortdate
459 460 else:
460 461 datefunc = dateutil.datestr
461 462 if ctx.rev() is None:
462 463 if opts.get(b'changeset'):
463 464 # omit "+" suffix which is appended to node hex
464 465 def formatrev(rev):
465 466 if rev == wdirrev:
466 467 return b'%d' % ctx.p1().rev()
467 468 else:
468 469 return b'%d' % rev
469 470
470 471 else:
471 472
472 473 def formatrev(rev):
473 474 if rev == wdirrev:
474 475 return b'%d+' % ctx.p1().rev()
475 476 else:
476 477 return b'%d ' % rev
477 478
478 479 def formathex(h):
479 480 if h == wdirhex:
480 481 return b'%s+' % shorthex(hex(ctx.p1().node()))
481 482 else:
482 483 return b'%s ' % shorthex(h)
483 484
484 485 else:
485 486 formatrev = b'%d'.__mod__
486 487 formathex = shorthex
487 488
488 489 opmap = [
489 490 (b'user', b' ', lambda x: x.fctx.user(), ui.shortuser),
490 491 (b'rev', b' ', lambda x: scmutil.intrev(x.fctx), formatrev),
491 492 (b'node', b' ', lambda x: hex(scmutil.binnode(x.fctx)), formathex),
492 493 (b'date', b' ', lambda x: x.fctx.date(), util.cachefunc(datefunc)),
493 494 (b'path', b' ', lambda x: x.fctx.path(), pycompat.bytestr),
494 495 (b'lineno', b':', lambda x: x.lineno, pycompat.bytestr),
495 496 ]
496 497 opnamemap = {
497 498 b'rev': b'number',
498 499 b'node': b'changeset',
499 500 b'path': b'file',
500 501 b'lineno': b'line_number',
501 502 }
502 503
503 504 if rootfm.isplain():
504 505
505 506 def makefunc(get, fmt):
506 507 return lambda x: fmt(get(x))
507 508
508 509 else:
509 510
510 511 def makefunc(get, fmt):
511 512 return get
512 513
513 514 datahint = rootfm.datahint()
514 515 funcmap = [
515 516 (makefunc(get, fmt), sep)
516 517 for fn, sep, get, fmt in opmap
517 518 if opts.get(opnamemap.get(fn, fn)) or fn in datahint
518 519 ]
519 520 funcmap[0] = (funcmap[0][0], b'') # no separator in front of first column
520 521 fields = b' '.join(
521 522 fn
522 523 for fn, sep, get, fmt in opmap
523 524 if opts.get(opnamemap.get(fn, fn)) or fn in datahint
524 525 )
525 526
526 527 def bad(x, y):
527 528 raise error.Abort(b"%s: %s" % (x, y))
528 529
529 530 m = scmutil.match(ctx, pats, opts, badfn=bad)
530 531
531 532 follow = not opts.get(b'no_follow')
532 533 diffopts = patch.difffeatureopts(
533 534 ui, opts, section=b'annotate', whitespace=True
534 535 )
535 536 skiprevs = opts.get(b'skip')
536 537 if skiprevs:
537 538 skiprevs = scmutil.revrange(repo, skiprevs)
538 539
539 540 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
540 541 for abs in ctx.walk(m):
541 542 fctx = ctx[abs]
542 543 rootfm.startitem()
543 544 rootfm.data(path=abs)
544 545 if not opts.get(b'text') and fctx.isbinary():
545 546 rootfm.plain(_(b"%s: binary file\n") % uipathfn(abs))
546 547 continue
547 548
548 549 fm = rootfm.nested(b'lines', tmpl=b'{rev}: {line}')
549 550 lines = fctx.annotate(
550 551 follow=follow, skiprevs=skiprevs, diffopts=diffopts
551 552 )
552 553 if not lines:
553 554 fm.end()
554 555 continue
555 556 formats = []
556 557 pieces = []
557 558
558 559 for f, sep in funcmap:
559 560 l = [f(n) for n in lines]
560 561 if fm.isplain():
561 562 sizes = [encoding.colwidth(x) for x in l]
562 563 ml = max(sizes)
563 564 formats.append([sep + b' ' * (ml - w) + b'%s' for w in sizes])
564 565 else:
565 566 formats.append([b'%s'] * len(l))
566 567 pieces.append(l)
567 568
568 569 for f, p, n in zip(zip(*formats), zip(*pieces), lines):
569 570 fm.startitem()
570 571 fm.context(fctx=n.fctx)
571 572 fm.write(fields, b"".join(f), *p)
572 573 if n.skip:
573 574 fmt = b"* %s"
574 575 else:
575 576 fmt = b": %s"
576 577 fm.write(b'line', fmt, n.text)
577 578
578 579 if not lines[-1].text.endswith(b'\n'):
579 580 fm.plain(b'\n')
580 581 fm.end()
581 582
582 583 rootfm.end()
583 584
584 585
585 586 @command(
586 587 b'archive',
587 588 [
588 589 (b'', b'no-decode', None, _(b'do not pass files through decoders')),
589 590 (
590 591 b'p',
591 592 b'prefix',
592 593 b'',
593 594 _(b'directory prefix for files in archive'),
594 595 _(b'PREFIX'),
595 596 ),
596 597 (b'r', b'rev', b'', _(b'revision to distribute'), _(b'REV')),
597 598 (b't', b'type', b'', _(b'type of distribution to create'), _(b'TYPE')),
598 599 ]
599 600 + subrepoopts
600 601 + walkopts,
601 602 _(b'[OPTION]... DEST'),
602 603 helpcategory=command.CATEGORY_IMPORT_EXPORT,
603 604 )
604 605 def archive(ui, repo, dest, **opts):
605 606 '''create an unversioned archive of a repository revision
606 607
607 608 By default, the revision used is the parent of the working
608 609 directory; use -r/--rev to specify a different revision.
609 610
610 611 The archive type is automatically detected based on file
611 612 extension (to override, use -t/--type).
612 613
613 614 .. container:: verbose
614 615
615 616 Examples:
616 617
617 618 - create a zip file containing the 1.0 release::
618 619
619 620 hg archive -r 1.0 project-1.0.zip
620 621
621 622 - create a tarball excluding .hg files::
622 623
623 624 hg archive project.tar.gz -X ".hg*"
624 625
625 626 Valid types are:
626 627
627 628 :``files``: a directory full of files (default)
628 629 :``tar``: tar archive, uncompressed
629 630 :``tbz2``: tar archive, compressed using bzip2
630 631 :``tgz``: tar archive, compressed using gzip
631 632 :``txz``: tar archive, compressed using lzma (only in Python 3)
632 633 :``uzip``: zip archive, uncompressed
633 634 :``zip``: zip archive, compressed using deflate
634 635
635 636 The exact name of the destination archive or directory is given
636 637 using a format string; see :hg:`help export` for details.
637 638
638 639 Each member added to an archive file has a directory prefix
639 640 prepended. Use -p/--prefix to specify a format string for the
640 641 prefix. The default is the basename of the archive, with suffixes
641 642 removed.
642 643
643 644 Returns 0 on success.
644 645 '''
645 646
646 647 opts = pycompat.byteskwargs(opts)
647 648 rev = opts.get(b'rev')
648 649 if rev:
649 650 repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
650 651 ctx = scmutil.revsingle(repo, rev)
651 652 if not ctx:
652 653 raise error.Abort(_(b'no working directory: please specify a revision'))
653 654 node = ctx.node()
654 655 dest = cmdutil.makefilename(ctx, dest)
655 656 if os.path.realpath(dest) == repo.root:
656 657 raise error.Abort(_(b'repository root cannot be destination'))
657 658
658 659 kind = opts.get(b'type') or archival.guesskind(dest) or b'files'
659 660 prefix = opts.get(b'prefix')
660 661
661 662 if dest == b'-':
662 663 if kind == b'files':
663 664 raise error.Abort(_(b'cannot archive plain files to stdout'))
664 665 dest = cmdutil.makefileobj(ctx, dest)
665 666 if not prefix:
666 667 prefix = os.path.basename(repo.root) + b'-%h'
667 668
668 669 prefix = cmdutil.makefilename(ctx, prefix)
669 670 match = scmutil.match(ctx, [], opts)
670 671 archival.archive(
671 672 repo,
672 673 dest,
673 674 node,
674 675 kind,
675 676 not opts.get(b'no_decode'),
676 677 match,
677 678 prefix,
678 679 subrepos=opts.get(b'subrepos'),
679 680 )
680 681
681 682
682 683 @command(
683 684 b'backout',
684 685 [
685 686 (
686 687 b'',
687 688 b'merge',
688 689 None,
689 690 _(b'merge with old dirstate parent after backout'),
690 691 ),
691 692 (
692 693 b'',
693 694 b'commit',
694 695 None,
695 696 _(b'commit if no conflicts were encountered (DEPRECATED)'),
696 697 ),
697 698 (b'', b'no-commit', None, _(b'do not commit')),
698 699 (
699 700 b'',
700 701 b'parent',
701 702 b'',
702 703 _(b'parent to choose when backing out merge (DEPRECATED)'),
703 704 _(b'REV'),
704 705 ),
705 706 (b'r', b'rev', b'', _(b'revision to backout'), _(b'REV')),
706 707 (b'e', b'edit', False, _(b'invoke editor on commit messages')),
707 708 ]
708 709 + mergetoolopts
709 710 + walkopts
710 711 + commitopts
711 712 + commitopts2,
712 713 _(b'[OPTION]... [-r] REV'),
713 714 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
714 715 )
715 716 def backout(ui, repo, node=None, rev=None, **opts):
716 717 '''reverse effect of earlier changeset
717 718
718 719 Prepare a new changeset with the effect of REV undone in the
719 720 current working directory. If no conflicts were encountered,
720 721 it will be committed immediately.
721 722
722 723 If REV is the parent of the working directory, then this new changeset
723 724 is committed automatically (unless --no-commit is specified).
724 725
725 726 .. note::
726 727
727 728 :hg:`backout` cannot be used to fix either an unwanted or
728 729 incorrect merge.
729 730
730 731 .. container:: verbose
731 732
732 733 Examples:
733 734
734 735 - Reverse the effect of the parent of the working directory.
735 736 This backout will be committed immediately::
736 737
737 738 hg backout -r .
738 739
739 740 - Reverse the effect of previous bad revision 23::
740 741
741 742 hg backout -r 23
742 743
743 744 - Reverse the effect of previous bad revision 23 and
744 745 leave changes uncommitted::
745 746
746 747 hg backout -r 23 --no-commit
747 748 hg commit -m "Backout revision 23"
748 749
749 750 By default, the pending changeset will have one parent,
750 751 maintaining a linear history. With --merge, the pending
751 752 changeset will instead have two parents: the old parent of the
752 753 working directory and a new child of REV that simply undoes REV.
753 754
754 755 Before version 1.7, the behavior without --merge was equivalent
755 756 to specifying --merge followed by :hg:`update --clean .` to
756 757 cancel the merge and leave the child of REV as a head to be
757 758 merged separately.
758 759
759 760 See :hg:`help dates` for a list of formats valid for -d/--date.
760 761
761 762 See :hg:`help revert` for a way to restore files to the state
762 763 of another revision.
763 764
764 765 Returns 0 on success, 1 if nothing to backout or there are unresolved
765 766 files.
766 767 '''
767 768 with repo.wlock(), repo.lock():
768 769 return _dobackout(ui, repo, node, rev, **opts)
769 770
770 771
771 772 def _dobackout(ui, repo, node=None, rev=None, **opts):
772 773 cmdutil.check_incompatible_arguments(opts, 'no_commit', ['commit', 'merge'])
773 774 opts = pycompat.byteskwargs(opts)
774 775
775 776 if rev and node:
776 777 raise error.Abort(_(b"please specify just one revision"))
777 778
778 779 if not rev:
779 780 rev = node
780 781
781 782 if not rev:
782 783 raise error.Abort(_(b"please specify a revision to backout"))
783 784
784 785 date = opts.get(b'date')
785 786 if date:
786 787 opts[b'date'] = dateutil.parsedate(date)
787 788
788 789 cmdutil.checkunfinished(repo)
789 790 cmdutil.bailifchanged(repo)
790 791 ctx = scmutil.revsingle(repo, rev)
791 792 node = ctx.node()
792 793
793 794 op1, op2 = repo.dirstate.parents()
794 795 if not repo.changelog.isancestor(node, op1):
795 796 raise error.Abort(_(b'cannot backout change that is not an ancestor'))
796 797
797 798 p1, p2 = repo.changelog.parents(node)
798 799 if p1 == nullid:
799 800 raise error.Abort(_(b'cannot backout a change with no parents'))
800 801 if p2 != nullid:
801 802 if not opts.get(b'parent'):
802 803 raise error.Abort(_(b'cannot backout a merge changeset'))
803 804 p = repo.lookup(opts[b'parent'])
804 805 if p not in (p1, p2):
805 806 raise error.Abort(
806 807 _(b'%s is not a parent of %s') % (short(p), short(node))
807 808 )
808 809 parent = p
809 810 else:
810 811 if opts.get(b'parent'):
811 812 raise error.Abort(_(b'cannot use --parent on non-merge changeset'))
812 813 parent = p1
813 814
814 815 # the backout should appear on the same branch
815 816 branch = repo.dirstate.branch()
816 817 bheads = repo.branchheads(branch)
817 818 rctx = scmutil.revsingle(repo, hex(parent))
818 819 if not opts.get(b'merge') and op1 != node:
819 820 with dirstateguard.dirstateguard(repo, b'backout'):
820 821 overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')}
821 822 with ui.configoverride(overrides, b'backout'):
822 823 stats = mergemod.back_out(ctx, parent=repo[parent])
823 824 repo.setparents(op1, op2)
824 825 hg._showstats(repo, stats)
825 826 if stats.unresolvedcount:
826 827 repo.ui.status(
827 828 _(b"use 'hg resolve' to retry unresolved file merges\n")
828 829 )
829 830 return 1
830 831 else:
831 832 hg.clean(repo, node, show_stats=False)
832 833 repo.dirstate.setbranch(branch)
833 834 cmdutil.revert(ui, repo, rctx)
834 835
835 836 if opts.get(b'no_commit'):
836 837 msg = _(b"changeset %s backed out, don't forget to commit.\n")
837 838 ui.status(msg % short(node))
838 839 return 0
839 840
840 841 def commitfunc(ui, repo, message, match, opts):
841 842 editform = b'backout'
842 843 e = cmdutil.getcommiteditor(
843 844 editform=editform, **pycompat.strkwargs(opts)
844 845 )
845 846 if not message:
846 847 # we don't translate commit messages
847 848 message = b"Backed out changeset %s" % short(node)
848 849 e = cmdutil.getcommiteditor(edit=True, editform=editform)
849 850 return repo.commit(
850 851 message, opts.get(b'user'), opts.get(b'date'), match, editor=e
851 852 )
852 853
853 854 newnode = cmdutil.commit(ui, repo, commitfunc, [], opts)
854 855 if not newnode:
855 856 ui.status(_(b"nothing changed\n"))
856 857 return 1
857 858 cmdutil.commitstatus(repo, newnode, branch, bheads)
858 859
859 860 def nice(node):
860 861 return b'%d:%s' % (repo.changelog.rev(node), short(node))
861 862
862 863 ui.status(
863 864 _(b'changeset %s backs out changeset %s\n')
864 865 % (nice(repo.changelog.tip()), nice(node))
865 866 )
866 867 if opts.get(b'merge') and op1 != node:
867 868 hg.clean(repo, op1, show_stats=False)
868 869 ui.status(
869 870 _(b'merging with changeset %s\n') % nice(repo.changelog.tip())
870 871 )
871 872 overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')}
872 873 with ui.configoverride(overrides, b'backout'):
873 874 return hg.merge(repo[b'tip'])
874 875 return 0
875 876
876 877
877 878 @command(
878 879 b'bisect',
879 880 [
880 881 (b'r', b'reset', False, _(b'reset bisect state')),
881 882 (b'g', b'good', False, _(b'mark changeset good')),
882 883 (b'b', b'bad', False, _(b'mark changeset bad')),
883 884 (b's', b'skip', False, _(b'skip testing changeset')),
884 885 (b'e', b'extend', False, _(b'extend the bisect range')),
885 886 (
886 887 b'c',
887 888 b'command',
888 889 b'',
889 890 _(b'use command to check changeset state'),
890 891 _(b'CMD'),
891 892 ),
892 893 (b'U', b'noupdate', False, _(b'do not update to target')),
893 894 ],
894 895 _(b"[-gbsr] [-U] [-c CMD] [REV]"),
895 896 helpcategory=command.CATEGORY_CHANGE_NAVIGATION,
896 897 )
897 898 def bisect(
898 899 ui,
899 900 repo,
900 901 rev=None,
901 902 extra=None,
902 903 command=None,
903 904 reset=None,
904 905 good=None,
905 906 bad=None,
906 907 skip=None,
907 908 extend=None,
908 909 noupdate=None,
909 910 ):
910 911 """subdivision search of changesets
911 912
912 913 This command helps to find changesets which introduce problems. To
913 914 use, mark the earliest changeset you know exhibits the problem as
914 915 bad, then mark the latest changeset which is free from the problem
915 916 as good. Bisect will update your working directory to a revision
916 917 for testing (unless the -U/--noupdate option is specified). Once
917 918 you have performed tests, mark the working directory as good or
918 919 bad, and bisect will either update to another candidate changeset
919 920 or announce that it has found the bad revision.
920 921
921 922 As a shortcut, you can also use the revision argument to mark a
922 923 revision as good or bad without checking it out first.
923 924
924 925 If you supply a command, it will be used for automatic bisection.
925 926 The environment variable HG_NODE will contain the ID of the
926 927 changeset being tested. The exit status of the command will be
927 928 used to mark revisions as good or bad: status 0 means good, 125
928 929 means to skip the revision, 127 (command not found) will abort the
929 930 bisection, and any other non-zero exit status means the revision
930 931 is bad.
931 932
932 933 .. container:: verbose
933 934
934 935 Some examples:
935 936
936 937 - start a bisection with known bad revision 34, and good revision 12::
937 938
938 939 hg bisect --bad 34
939 940 hg bisect --good 12
940 941
941 942 - advance the current bisection by marking current revision as good or
942 943 bad::
943 944
944 945 hg bisect --good
945 946 hg bisect --bad
946 947
947 948 - mark the current revision, or a known revision, to be skipped (e.g. if
948 949 that revision is not usable because of another issue)::
949 950
950 951 hg bisect --skip
951 952 hg bisect --skip 23
952 953
953 954 - skip all revisions that do not touch directories ``foo`` or ``bar``::
954 955
955 956 hg bisect --skip "!( file('path:foo') & file('path:bar') )"
956 957
957 958 - forget the current bisection::
958 959
959 960 hg bisect --reset
960 961
961 962 - use 'make && make tests' to automatically find the first broken
962 963 revision::
963 964
964 965 hg bisect --reset
965 966 hg bisect --bad 34
966 967 hg bisect --good 12
967 968 hg bisect --command "make && make tests"
968 969
969 970 - see all changesets whose states are already known in the current
970 971 bisection::
971 972
972 973 hg log -r "bisect(pruned)"
973 974
974 975 - see the changeset currently being bisected (especially useful
975 976 if running with -U/--noupdate)::
976 977
977 978 hg log -r "bisect(current)"
978 979
979 980 - see all changesets that took part in the current bisection::
980 981
981 982 hg log -r "bisect(range)"
982 983
983 984 - you can even get a nice graph::
984 985
985 986 hg log --graph -r "bisect(range)"
986 987
987 988 See :hg:`help revisions.bisect` for more about the `bisect()` predicate.
988 989
989 990 Returns 0 on success.
990 991 """
991 992 # backward compatibility
992 993 if rev in b"good bad reset init".split():
993 994 ui.warn(_(b"(use of 'hg bisect <cmd>' is deprecated)\n"))
994 995 cmd, rev, extra = rev, extra, None
995 996 if cmd == b"good":
996 997 good = True
997 998 elif cmd == b"bad":
998 999 bad = True
999 1000 else:
1000 1001 reset = True
1001 1002 elif extra:
1002 1003 raise error.Abort(_(b'incompatible arguments'))
1003 1004
1004 1005 incompatibles = {
1005 1006 b'--bad': bad,
1006 1007 b'--command': bool(command),
1007 1008 b'--extend': extend,
1008 1009 b'--good': good,
1009 1010 b'--reset': reset,
1010 1011 b'--skip': skip,
1011 1012 }
1012 1013
1013 1014 enabled = [x for x in incompatibles if incompatibles[x]]
1014 1015
1015 1016 if len(enabled) > 1:
1016 1017 raise error.Abort(
1017 1018 _(b'%s and %s are incompatible') % tuple(sorted(enabled)[0:2])
1018 1019 )
1019 1020
1020 1021 if reset:
1021 1022 hbisect.resetstate(repo)
1022 1023 return
1023 1024
1024 1025 state = hbisect.load_state(repo)
1025 1026
1026 1027 # update state
1027 1028 if good or bad or skip:
1028 1029 if rev:
1029 1030 nodes = [repo[i].node() for i in scmutil.revrange(repo, [rev])]
1030 1031 else:
1031 1032 nodes = [repo.lookup(b'.')]
1032 1033 if good:
1033 1034 state[b'good'] += nodes
1034 1035 elif bad:
1035 1036 state[b'bad'] += nodes
1036 1037 elif skip:
1037 1038 state[b'skip'] += nodes
1038 1039 hbisect.save_state(repo, state)
1039 1040 if not (state[b'good'] and state[b'bad']):
1040 1041 return
1041 1042
1042 1043 def mayupdate(repo, node, show_stats=True):
1043 1044 """common used update sequence"""
1044 1045 if noupdate:
1045 1046 return
1046 1047 cmdutil.checkunfinished(repo)
1047 1048 cmdutil.bailifchanged(repo)
1048 1049 return hg.clean(repo, node, show_stats=show_stats)
1049 1050
1050 1051 displayer = logcmdutil.changesetdisplayer(ui, repo, {})
1051 1052
1052 1053 if command:
1053 1054 changesets = 1
1054 1055 if noupdate:
1055 1056 try:
1056 1057 node = state[b'current'][0]
1057 1058 except LookupError:
1058 1059 raise error.Abort(
1059 1060 _(
1060 1061 b'current bisect revision is unknown - '
1061 1062 b'start a new bisect to fix'
1062 1063 )
1063 1064 )
1064 1065 else:
1065 1066 node, p2 = repo.dirstate.parents()
1066 1067 if p2 != nullid:
1067 1068 raise error.Abort(_(b'current bisect revision is a merge'))
1068 1069 if rev:
1069 1070 node = repo[scmutil.revsingle(repo, rev, node)].node()
1070 1071 with hbisect.restore_state(repo, state, node):
1071 1072 while changesets:
1072 1073 # update state
1073 1074 state[b'current'] = [node]
1074 1075 hbisect.save_state(repo, state)
1075 1076 status = ui.system(
1076 1077 command,
1077 1078 environ={b'HG_NODE': hex(node)},
1078 1079 blockedtag=b'bisect_check',
1079 1080 )
1080 1081 if status == 125:
1081 1082 transition = b"skip"
1082 1083 elif status == 0:
1083 1084 transition = b"good"
1084 1085 # status < 0 means process was killed
1085 1086 elif status == 127:
1086 1087 raise error.Abort(_(b"failed to execute %s") % command)
1087 1088 elif status < 0:
1088 1089 raise error.Abort(_(b"%s killed") % command)
1089 1090 else:
1090 1091 transition = b"bad"
1091 1092 state[transition].append(node)
1092 1093 ctx = repo[node]
1093 1094 ui.status(
1094 1095 _(b'changeset %d:%s: %s\n') % (ctx.rev(), ctx, transition)
1095 1096 )
1096 1097 hbisect.checkstate(state)
1097 1098 # bisect
1098 1099 nodes, changesets, bgood = hbisect.bisect(repo, state)
1099 1100 # update to next check
1100 1101 node = nodes[0]
1101 1102 mayupdate(repo, node, show_stats=False)
1102 1103 hbisect.printresult(ui, repo, state, displayer, nodes, bgood)
1103 1104 return
1104 1105
1105 1106 hbisect.checkstate(state)
1106 1107
1107 1108 # actually bisect
1108 1109 nodes, changesets, good = hbisect.bisect(repo, state)
1109 1110 if extend:
1110 1111 if not changesets:
1111 1112 extendnode = hbisect.extendrange(repo, state, nodes, good)
1112 1113 if extendnode is not None:
1113 1114 ui.write(
1114 1115 _(b"Extending search to changeset %d:%s\n")
1115 1116 % (extendnode.rev(), extendnode)
1116 1117 )
1117 1118 state[b'current'] = [extendnode.node()]
1118 1119 hbisect.save_state(repo, state)
1119 1120 return mayupdate(repo, extendnode.node())
1120 1121 raise error.Abort(_(b"nothing to extend"))
1121 1122
1122 1123 if changesets == 0:
1123 1124 hbisect.printresult(ui, repo, state, displayer, nodes, good)
1124 1125 else:
1125 1126 assert len(nodes) == 1 # only a single node can be tested next
1126 1127 node = nodes[0]
1127 1128 # compute the approximate number of remaining tests
1128 1129 tests, size = 0, 2
1129 1130 while size <= changesets:
1130 1131 tests, size = tests + 1, size * 2
1131 1132 rev = repo.changelog.rev(node)
1132 1133 ui.write(
1133 1134 _(
1134 1135 b"Testing changeset %d:%s "
1135 1136 b"(%d changesets remaining, ~%d tests)\n"
1136 1137 )
1137 1138 % (rev, short(node), changesets, tests)
1138 1139 )
1139 1140 state[b'current'] = [node]
1140 1141 hbisect.save_state(repo, state)
1141 1142 return mayupdate(repo, node)
1142 1143
1143 1144
1144 1145 @command(
1145 1146 b'bookmarks|bookmark',
1146 1147 [
1147 1148 (b'f', b'force', False, _(b'force')),
1148 1149 (b'r', b'rev', b'', _(b'revision for bookmark action'), _(b'REV')),
1149 1150 (b'd', b'delete', False, _(b'delete a given bookmark')),
1150 1151 (b'm', b'rename', b'', _(b'rename a given bookmark'), _(b'OLD')),
1151 1152 (b'i', b'inactive', False, _(b'mark a bookmark inactive')),
1152 1153 (b'l', b'list', False, _(b'list existing bookmarks')),
1153 1154 ]
1154 1155 + formatteropts,
1155 1156 _(b'hg bookmarks [OPTIONS]... [NAME]...'),
1156 1157 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
1157 1158 )
1158 1159 def bookmark(ui, repo, *names, **opts):
1159 1160 '''create a new bookmark or list existing bookmarks
1160 1161
1161 1162 Bookmarks are labels on changesets to help track lines of development.
1162 1163 Bookmarks are unversioned and can be moved, renamed and deleted.
1163 1164 Deleting or moving a bookmark has no effect on the associated changesets.
1164 1165
1165 1166 Creating or updating to a bookmark causes it to be marked as 'active'.
1166 1167 The active bookmark is indicated with a '*'.
1167 1168 When a commit is made, the active bookmark will advance to the new commit.
1168 1169 A plain :hg:`update` will also advance an active bookmark, if possible.
1169 1170 Updating away from a bookmark will cause it to be deactivated.
1170 1171
1171 1172 Bookmarks can be pushed and pulled between repositories (see
1172 1173 :hg:`help push` and :hg:`help pull`). If a shared bookmark has
1173 1174 diverged, a new 'divergent bookmark' of the form 'name@path' will
1174 1175 be created. Using :hg:`merge` will resolve the divergence.
1175 1176
1176 1177 Specifying bookmark as '.' to -m/-d/-l options is equivalent to specifying
1177 1178 the active bookmark's name.
1178 1179
1179 1180 A bookmark named '@' has the special property that :hg:`clone` will
1180 1181 check it out by default if it exists.
1181 1182
1182 1183 .. container:: verbose
1183 1184
1184 1185 Template:
1185 1186
1186 1187 The following keywords are supported in addition to the common template
1187 1188 keywords and functions such as ``{bookmark}``. See also
1188 1189 :hg:`help templates`.
1189 1190
1190 1191 :active: Boolean. True if the bookmark is active.
1191 1192
1192 1193 Examples:
1193 1194
1194 1195 - create an active bookmark for a new line of development::
1195 1196
1196 1197 hg book new-feature
1197 1198
1198 1199 - create an inactive bookmark as a place marker::
1199 1200
1200 1201 hg book -i reviewed
1201 1202
1202 1203 - create an inactive bookmark on another changeset::
1203 1204
1204 1205 hg book -r .^ tested
1205 1206
1206 1207 - rename bookmark turkey to dinner::
1207 1208
1208 1209 hg book -m turkey dinner
1209 1210
1210 1211 - move the '@' bookmark from another branch::
1211 1212
1212 1213 hg book -f @
1213 1214
1214 1215 - print only the active bookmark name::
1215 1216
1216 1217 hg book -ql .
1217 1218 '''
1218 1219 opts = pycompat.byteskwargs(opts)
1219 1220 force = opts.get(b'force')
1220 1221 rev = opts.get(b'rev')
1221 1222 inactive = opts.get(b'inactive') # meaning add/rename to inactive bookmark
1222 1223
1223 1224 action = cmdutil.check_at_most_one_arg(opts, b'delete', b'rename', b'list')
1224 1225 if action:
1225 1226 cmdutil.check_incompatible_arguments(opts, action, [b'rev'])
1226 1227 elif names or rev:
1227 1228 action = b'add'
1228 1229 elif inactive:
1229 1230 action = b'inactive' # meaning deactivate
1230 1231 else:
1231 1232 action = b'list'
1232 1233
1233 1234 cmdutil.check_incompatible_arguments(
1234 1235 opts, b'inactive', [b'delete', b'list']
1235 1236 )
1236 1237 if not names and action in {b'add', b'delete'}:
1237 1238 raise error.Abort(_(b"bookmark name required"))
1238 1239
1239 1240 if action in {b'add', b'delete', b'rename', b'inactive'}:
1240 1241 with repo.wlock(), repo.lock(), repo.transaction(b'bookmark') as tr:
1241 1242 if action == b'delete':
1242 1243 names = pycompat.maplist(repo._bookmarks.expandname, names)
1243 1244 bookmarks.delete(repo, tr, names)
1244 1245 elif action == b'rename':
1245 1246 if not names:
1246 1247 raise error.Abort(_(b"new bookmark name required"))
1247 1248 elif len(names) > 1:
1248 1249 raise error.Abort(_(b"only one new bookmark name allowed"))
1249 1250 oldname = repo._bookmarks.expandname(opts[b'rename'])
1250 1251 bookmarks.rename(repo, tr, oldname, names[0], force, inactive)
1251 1252 elif action == b'add':
1252 1253 bookmarks.addbookmarks(repo, tr, names, rev, force, inactive)
1253 1254 elif action == b'inactive':
1254 1255 if len(repo._bookmarks) == 0:
1255 1256 ui.status(_(b"no bookmarks set\n"))
1256 1257 elif not repo._activebookmark:
1257 1258 ui.status(_(b"no active bookmark\n"))
1258 1259 else:
1259 1260 bookmarks.deactivate(repo)
1260 1261 elif action == b'list':
1261 1262 names = pycompat.maplist(repo._bookmarks.expandname, names)
1262 1263 with ui.formatter(b'bookmarks', opts) as fm:
1263 1264 bookmarks.printbookmarks(ui, repo, fm, names)
1264 1265 else:
1265 1266 raise error.ProgrammingError(b'invalid action: %s' % action)
1266 1267
1267 1268
1268 1269 @command(
1269 1270 b'branch',
1270 1271 [
1271 1272 (
1272 1273 b'f',
1273 1274 b'force',
1274 1275 None,
1275 1276 _(b'set branch name even if it shadows an existing branch'),
1276 1277 ),
1277 1278 (b'C', b'clean', None, _(b'reset branch name to parent branch name')),
1278 1279 (
1279 1280 b'r',
1280 1281 b'rev',
1281 1282 [],
1282 1283 _(b'change branches of the given revs (EXPERIMENTAL)'),
1283 1284 ),
1284 1285 ],
1285 1286 _(b'[-fC] [NAME]'),
1286 1287 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
1287 1288 )
1288 1289 def branch(ui, repo, label=None, **opts):
1289 1290 """set or show the current branch name
1290 1291
1291 1292 .. note::
1292 1293
1293 1294 Branch names are permanent and global. Use :hg:`bookmark` to create a
1294 1295 light-weight bookmark instead. See :hg:`help glossary` for more
1295 1296 information about named branches and bookmarks.
1296 1297
1297 1298 With no argument, show the current branch name. With one argument,
1298 1299 set the working directory branch name (the branch will not exist
1299 1300 in the repository until the next commit). Standard practice
1300 1301 recommends that primary development take place on the 'default'
1301 1302 branch.
1302 1303
1303 1304 Unless -f/--force is specified, branch will not let you set a
1304 1305 branch name that already exists.
1305 1306
1306 1307 Use -C/--clean to reset the working directory branch to that of
1307 1308 the parent of the working directory, negating a previous branch
1308 1309 change.
1309 1310
1310 1311 Use the command :hg:`update` to switch to an existing branch. Use
1311 1312 :hg:`commit --close-branch` to mark this branch head as closed.
1312 1313 When all heads of a branch are closed, the branch will be
1313 1314 considered closed.
1314 1315
1315 1316 Returns 0 on success.
1316 1317 """
1317 1318 opts = pycompat.byteskwargs(opts)
1318 1319 revs = opts.get(b'rev')
1319 1320 if label:
1320 1321 label = label.strip()
1321 1322
1322 1323 if not opts.get(b'clean') and not label:
1323 1324 if revs:
1324 1325 raise error.Abort(_(b"no branch name specified for the revisions"))
1325 1326 ui.write(b"%s\n" % repo.dirstate.branch())
1326 1327 return
1327 1328
1328 1329 with repo.wlock():
1329 1330 if opts.get(b'clean'):
1330 1331 label = repo[b'.'].branch()
1331 1332 repo.dirstate.setbranch(label)
1332 1333 ui.status(_(b'reset working directory to branch %s\n') % label)
1333 1334 elif label:
1334 1335
1335 1336 scmutil.checknewlabel(repo, label, b'branch')
1336 1337 if revs:
1337 1338 return cmdutil.changebranch(ui, repo, revs, label, opts)
1338 1339
1339 1340 if not opts.get(b'force') and label in repo.branchmap():
1340 1341 if label not in [p.branch() for p in repo[None].parents()]:
1341 1342 raise error.Abort(
1342 1343 _(b'a branch of the same name already exists'),
1343 1344 # i18n: "it" refers to an existing branch
1344 1345 hint=_(b"use 'hg update' to switch to it"),
1345 1346 )
1346 1347
1347 1348 repo.dirstate.setbranch(label)
1348 1349 ui.status(_(b'marked working directory as branch %s\n') % label)
1349 1350
1350 1351 # find any open named branches aside from default
1351 1352 for n, h, t, c in repo.branchmap().iterbranches():
1352 1353 if n != b"default" and not c:
1353 1354 return 0
1354 1355 ui.status(
1355 1356 _(
1356 1357 b'(branches are permanent and global, '
1357 1358 b'did you want a bookmark?)\n'
1358 1359 )
1359 1360 )
1360 1361
1361 1362
1362 1363 @command(
1363 1364 b'branches',
1364 1365 [
1365 1366 (
1366 1367 b'a',
1367 1368 b'active',
1368 1369 False,
1369 1370 _(b'show only branches that have unmerged heads (DEPRECATED)'),
1370 1371 ),
1371 1372 (b'c', b'closed', False, _(b'show normal and closed branches')),
1372 1373 (b'r', b'rev', [], _(b'show branch name(s) of the given rev')),
1373 1374 ]
1374 1375 + formatteropts,
1375 1376 _(b'[-c]'),
1376 1377 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
1377 1378 intents={INTENT_READONLY},
1378 1379 )
1379 1380 def branches(ui, repo, active=False, closed=False, **opts):
1380 1381 """list repository named branches
1381 1382
1382 1383 List the repository's named branches, indicating which ones are
1383 1384 inactive. If -c/--closed is specified, also list branches which have
1384 1385 been marked closed (see :hg:`commit --close-branch`).
1385 1386
1386 1387 Use the command :hg:`update` to switch to an existing branch.
1387 1388
1388 1389 .. container:: verbose
1389 1390
1390 1391 Template:
1391 1392
1392 1393 The following keywords are supported in addition to the common template
1393 1394 keywords and functions such as ``{branch}``. See also
1394 1395 :hg:`help templates`.
1395 1396
1396 1397 :active: Boolean. True if the branch is active.
1397 1398 :closed: Boolean. True if the branch is closed.
1398 1399 :current: Boolean. True if it is the current branch.
1399 1400
1400 1401 Returns 0.
1401 1402 """
1402 1403
1403 1404 opts = pycompat.byteskwargs(opts)
1404 1405 revs = opts.get(b'rev')
1405 1406 selectedbranches = None
1406 1407 if revs:
1407 1408 revs = scmutil.revrange(repo, revs)
1408 1409 getbi = repo.revbranchcache().branchinfo
1409 1410 selectedbranches = {getbi(r)[0] for r in revs}
1410 1411
1411 1412 ui.pager(b'branches')
1412 1413 fm = ui.formatter(b'branches', opts)
1413 1414 hexfunc = fm.hexfunc
1414 1415
1415 1416 allheads = set(repo.heads())
1416 1417 branches = []
1417 1418 for tag, heads, tip, isclosed in repo.branchmap().iterbranches():
1418 1419 if selectedbranches is not None and tag not in selectedbranches:
1419 1420 continue
1420 1421 isactive = False
1421 1422 if not isclosed:
1422 1423 openheads = set(repo.branchmap().iteropen(heads))
1423 1424 isactive = bool(openheads & allheads)
1424 1425 branches.append((tag, repo[tip], isactive, not isclosed))
1425 1426 branches.sort(key=lambda i: (i[2], i[1].rev(), i[0], i[3]), reverse=True)
1426 1427
1427 1428 for tag, ctx, isactive, isopen in branches:
1428 1429 if active and not isactive:
1429 1430 continue
1430 1431 if isactive:
1431 1432 label = b'branches.active'
1432 1433 notice = b''
1433 1434 elif not isopen:
1434 1435 if not closed:
1435 1436 continue
1436 1437 label = b'branches.closed'
1437 1438 notice = _(b' (closed)')
1438 1439 else:
1439 1440 label = b'branches.inactive'
1440 1441 notice = _(b' (inactive)')
1441 1442 current = tag == repo.dirstate.branch()
1442 1443 if current:
1443 1444 label = b'branches.current'
1444 1445
1445 1446 fm.startitem()
1446 1447 fm.write(b'branch', b'%s', tag, label=label)
1447 1448 rev = ctx.rev()
1448 1449 padsize = max(31 - len(b"%d" % rev) - encoding.colwidth(tag), 0)
1449 1450 fmt = b' ' * padsize + b' %d:%s'
1450 1451 fm.condwrite(
1451 1452 not ui.quiet,
1452 1453 b'rev node',
1453 1454 fmt,
1454 1455 rev,
1455 1456 hexfunc(ctx.node()),
1456 1457 label=b'log.changeset changeset.%s' % ctx.phasestr(),
1457 1458 )
1458 1459 fm.context(ctx=ctx)
1459 1460 fm.data(active=isactive, closed=not isopen, current=current)
1460 1461 if not ui.quiet:
1461 1462 fm.plain(notice)
1462 1463 fm.plain(b'\n')
1463 1464 fm.end()
1464 1465
1465 1466
1466 1467 @command(
1467 1468 b'bundle',
1468 1469 [
1469 1470 (
1470 1471 b'f',
1471 1472 b'force',
1472 1473 None,
1473 1474 _(b'run even when the destination is unrelated'),
1474 1475 ),
1475 1476 (
1476 1477 b'r',
1477 1478 b'rev',
1478 1479 [],
1479 1480 _(b'a changeset intended to be added to the destination'),
1480 1481 _(b'REV'),
1481 1482 ),
1482 1483 (
1483 1484 b'b',
1484 1485 b'branch',
1485 1486 [],
1486 1487 _(b'a specific branch you would like to bundle'),
1487 1488 _(b'BRANCH'),
1488 1489 ),
1489 1490 (
1490 1491 b'',
1491 1492 b'base',
1492 1493 [],
1493 1494 _(b'a base changeset assumed to be available at the destination'),
1494 1495 _(b'REV'),
1495 1496 ),
1496 1497 (b'a', b'all', None, _(b'bundle all changesets in the repository')),
1497 1498 (
1498 1499 b't',
1499 1500 b'type',
1500 1501 b'bzip2',
1501 1502 _(b'bundle compression type to use'),
1502 1503 _(b'TYPE'),
1503 1504 ),
1504 1505 ]
1505 1506 + remoteopts,
1506 1507 _(b'[-f] [-t BUNDLESPEC] [-a] [-r REV]... [--base REV]... FILE [DEST]'),
1507 1508 helpcategory=command.CATEGORY_IMPORT_EXPORT,
1508 1509 )
1509 1510 def bundle(ui, repo, fname, dest=None, **opts):
1510 1511 """create a bundle file
1511 1512
1512 1513 Generate a bundle file containing data to be transferred to another
1513 1514 repository.
1514 1515
1515 1516 To create a bundle containing all changesets, use -a/--all
1516 1517 (or --base null). Otherwise, hg assumes the destination will have
1517 1518 all the nodes you specify with --base parameters. Otherwise, hg
1518 1519 will assume the repository has all the nodes in destination, or
1519 1520 default-push/default if no destination is specified, where destination
1520 1521 is the repository you provide through DEST option.
1521 1522
1522 1523 You can change bundle format with the -t/--type option. See
1523 1524 :hg:`help bundlespec` for documentation on this format. By default,
1524 1525 the most appropriate format is used and compression defaults to
1525 1526 bzip2.
1526 1527
1527 1528 The bundle file can then be transferred using conventional means
1528 1529 and applied to another repository with the unbundle or pull
1529 1530 command. This is useful when direct push and pull are not
1530 1531 available or when exporting an entire repository is undesirable.
1531 1532
1532 1533 Applying bundles preserves all changeset contents including
1533 1534 permissions, copy/rename information, and revision history.
1534 1535
1535 1536 Returns 0 on success, 1 if no changes found.
1536 1537 """
1537 1538 opts = pycompat.byteskwargs(opts)
1538 1539 revs = None
1539 1540 if b'rev' in opts:
1540 1541 revstrings = opts[b'rev']
1541 1542 revs = scmutil.revrange(repo, revstrings)
1542 1543 if revstrings and not revs:
1543 1544 raise error.Abort(_(b'no commits to bundle'))
1544 1545
1545 1546 bundletype = opts.get(b'type', b'bzip2').lower()
1546 1547 try:
1547 bundlespec = exchange.parsebundlespec(repo, bundletype, strict=False)
1548 bundlespec = bundlecaches.parsebundlespec(
1549 repo, bundletype, strict=False
1550 )
1548 1551 except error.UnsupportedBundleSpecification as e:
1549 1552 raise error.Abort(
1550 1553 pycompat.bytestr(e),
1551 1554 hint=_(b"see 'hg help bundlespec' for supported values for --type"),
1552 1555 )
1553 1556 cgversion = bundlespec.contentopts[b"cg.version"]
1554 1557
1555 1558 # Packed bundles are a pseudo bundle format for now.
1556 1559 if cgversion == b's1':
1557 1560 raise error.Abort(
1558 1561 _(b'packed bundles cannot be produced by "hg bundle"'),
1559 1562 hint=_(b"use 'hg debugcreatestreamclonebundle'"),
1560 1563 )
1561 1564
1562 1565 if opts.get(b'all'):
1563 1566 if dest:
1564 1567 raise error.Abort(
1565 1568 _(b"--all is incompatible with specifying a destination")
1566 1569 )
1567 1570 if opts.get(b'base'):
1568 1571 ui.warn(_(b"ignoring --base because --all was specified\n"))
1569 1572 base = [nullrev]
1570 1573 else:
1571 1574 base = scmutil.revrange(repo, opts.get(b'base'))
1572 1575 if cgversion not in changegroup.supportedoutgoingversions(repo):
1573 1576 raise error.Abort(
1574 1577 _(b"repository does not support bundle version %s") % cgversion
1575 1578 )
1576 1579
1577 1580 if base:
1578 1581 if dest:
1579 1582 raise error.Abort(
1580 1583 _(b"--base is incompatible with specifying a destination")
1581 1584 )
1582 1585 common = [repo[rev].node() for rev in base]
1583 1586 heads = [repo[r].node() for r in revs] if revs else None
1584 1587 outgoing = discovery.outgoing(repo, common, heads)
1585 1588 else:
1586 1589 dest = ui.expandpath(dest or b'default-push', dest or b'default')
1587 1590 dest, branches = hg.parseurl(dest, opts.get(b'branch'))
1588 1591 other = hg.peer(repo, opts, dest)
1589 1592 revs = [repo[r].hex() for r in revs]
1590 1593 revs, checkout = hg.addbranchrevs(repo, repo, branches, revs)
1591 1594 heads = revs and pycompat.maplist(repo.lookup, revs) or revs
1592 1595 outgoing = discovery.findcommonoutgoing(
1593 1596 repo,
1594 1597 other,
1595 1598 onlyheads=heads,
1596 1599 force=opts.get(b'force'),
1597 1600 portable=True,
1598 1601 )
1599 1602
1600 1603 if not outgoing.missing:
1601 1604 scmutil.nochangesfound(ui, repo, not base and outgoing.excluded)
1602 1605 return 1
1603 1606
1604 1607 if cgversion == b'01': # bundle1
1605 1608 bversion = b'HG10' + bundlespec.wirecompression
1606 1609 bcompression = None
1607 1610 elif cgversion in (b'02', b'03'):
1608 1611 bversion = b'HG20'
1609 1612 bcompression = bundlespec.wirecompression
1610 1613 else:
1611 1614 raise error.ProgrammingError(
1612 1615 b'bundle: unexpected changegroup version %s' % cgversion
1613 1616 )
1614 1617
1615 1618 # TODO compression options should be derived from bundlespec parsing.
1616 1619 # This is a temporary hack to allow adjusting bundle compression
1617 1620 # level without a) formalizing the bundlespec changes to declare it
1618 1621 # b) introducing a command flag.
1619 1622 compopts = {}
1620 1623 complevel = ui.configint(
1621 1624 b'experimental', b'bundlecomplevel.' + bundlespec.compression
1622 1625 )
1623 1626 if complevel is None:
1624 1627 complevel = ui.configint(b'experimental', b'bundlecomplevel')
1625 1628 if complevel is not None:
1626 1629 compopts[b'level'] = complevel
1627 1630
1628 1631 # Allow overriding the bundling of obsmarker in phases through
1629 1632 # configuration while we don't have a bundle version that include them
1630 1633 if repo.ui.configbool(b'experimental', b'evolution.bundle-obsmarker'):
1631 1634 bundlespec.contentopts[b'obsolescence'] = True
1632 1635 if repo.ui.configbool(b'experimental', b'bundle-phases'):
1633 1636 bundlespec.contentopts[b'phases'] = True
1634 1637
1635 1638 bundle2.writenewbundle(
1636 1639 ui,
1637 1640 repo,
1638 1641 b'bundle',
1639 1642 fname,
1640 1643 bversion,
1641 1644 outgoing,
1642 1645 bundlespec.contentopts,
1643 1646 compression=bcompression,
1644 1647 compopts=compopts,
1645 1648 )
1646 1649
1647 1650
1648 1651 @command(
1649 1652 b'cat',
1650 1653 [
1651 1654 (
1652 1655 b'o',
1653 1656 b'output',
1654 1657 b'',
1655 1658 _(b'print output to file with formatted name'),
1656 1659 _(b'FORMAT'),
1657 1660 ),
1658 1661 (b'r', b'rev', b'', _(b'print the given revision'), _(b'REV')),
1659 1662 (b'', b'decode', None, _(b'apply any matching decode filter')),
1660 1663 ]
1661 1664 + walkopts
1662 1665 + formatteropts,
1663 1666 _(b'[OPTION]... FILE...'),
1664 1667 helpcategory=command.CATEGORY_FILE_CONTENTS,
1665 1668 inferrepo=True,
1666 1669 intents={INTENT_READONLY},
1667 1670 )
1668 1671 def cat(ui, repo, file1, *pats, **opts):
1669 1672 """output the current or given revision of files
1670 1673
1671 1674 Print the specified files as they were at the given revision. If
1672 1675 no revision is given, the parent of the working directory is used.
1673 1676
1674 1677 Output may be to a file, in which case the name of the file is
1675 1678 given using a template string. See :hg:`help templates`. In addition
1676 1679 to the common template keywords, the following formatting rules are
1677 1680 supported:
1678 1681
1679 1682 :``%%``: literal "%" character
1680 1683 :``%s``: basename of file being printed
1681 1684 :``%d``: dirname of file being printed, or '.' if in repository root
1682 1685 :``%p``: root-relative path name of file being printed
1683 1686 :``%H``: changeset hash (40 hexadecimal digits)
1684 1687 :``%R``: changeset revision number
1685 1688 :``%h``: short-form changeset hash (12 hexadecimal digits)
1686 1689 :``%r``: zero-padded changeset revision number
1687 1690 :``%b``: basename of the exporting repository
1688 1691 :``\\``: literal "\\" character
1689 1692
1690 1693 .. container:: verbose
1691 1694
1692 1695 Template:
1693 1696
1694 1697 The following keywords are supported in addition to the common template
1695 1698 keywords and functions. See also :hg:`help templates`.
1696 1699
1697 1700 :data: String. File content.
1698 1701 :path: String. Repository-absolute path of the file.
1699 1702
1700 1703 Returns 0 on success.
1701 1704 """
1702 1705 opts = pycompat.byteskwargs(opts)
1703 1706 rev = opts.get(b'rev')
1704 1707 if rev:
1705 1708 repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
1706 1709 ctx = scmutil.revsingle(repo, rev)
1707 1710 m = scmutil.match(ctx, (file1,) + pats, opts)
1708 1711 fntemplate = opts.pop(b'output', b'')
1709 1712 if cmdutil.isstdiofilename(fntemplate):
1710 1713 fntemplate = b''
1711 1714
1712 1715 if fntemplate:
1713 1716 fm = formatter.nullformatter(ui, b'cat', opts)
1714 1717 else:
1715 1718 ui.pager(b'cat')
1716 1719 fm = ui.formatter(b'cat', opts)
1717 1720 with fm:
1718 1721 return cmdutil.cat(
1719 1722 ui, repo, ctx, m, fm, fntemplate, b'', **pycompat.strkwargs(opts)
1720 1723 )
1721 1724
1722 1725
1723 1726 @command(
1724 1727 b'clone',
1725 1728 [
1726 1729 (
1727 1730 b'U',
1728 1731 b'noupdate',
1729 1732 None,
1730 1733 _(
1731 1734 b'the clone will include an empty working '
1732 1735 b'directory (only a repository)'
1733 1736 ),
1734 1737 ),
1735 1738 (
1736 1739 b'u',
1737 1740 b'updaterev',
1738 1741 b'',
1739 1742 _(b'revision, tag, or branch to check out'),
1740 1743 _(b'REV'),
1741 1744 ),
1742 1745 (
1743 1746 b'r',
1744 1747 b'rev',
1745 1748 [],
1746 1749 _(
1747 1750 b'do not clone everything, but include this changeset'
1748 1751 b' and its ancestors'
1749 1752 ),
1750 1753 _(b'REV'),
1751 1754 ),
1752 1755 (
1753 1756 b'b',
1754 1757 b'branch',
1755 1758 [],
1756 1759 _(
1757 1760 b'do not clone everything, but include this branch\'s'
1758 1761 b' changesets and their ancestors'
1759 1762 ),
1760 1763 _(b'BRANCH'),
1761 1764 ),
1762 1765 (b'', b'pull', None, _(b'use pull protocol to copy metadata')),
1763 1766 (b'', b'uncompressed', None, _(b'an alias to --stream (DEPRECATED)')),
1764 1767 (b'', b'stream', None, _(b'clone with minimal data processing')),
1765 1768 ]
1766 1769 + remoteopts,
1767 1770 _(b'[OPTION]... SOURCE [DEST]'),
1768 1771 helpcategory=command.CATEGORY_REPO_CREATION,
1769 1772 helpbasic=True,
1770 1773 norepo=True,
1771 1774 )
1772 1775 def clone(ui, source, dest=None, **opts):
1773 1776 """make a copy of an existing repository
1774 1777
1775 1778 Create a copy of an existing repository in a new directory.
1776 1779
1777 1780 If no destination directory name is specified, it defaults to the
1778 1781 basename of the source.
1779 1782
1780 1783 The location of the source is added to the new repository's
1781 1784 ``.hg/hgrc`` file, as the default to be used for future pulls.
1782 1785
1783 1786 Only local paths and ``ssh://`` URLs are supported as
1784 1787 destinations. For ``ssh://`` destinations, no working directory or
1785 1788 ``.hg/hgrc`` will be created on the remote side.
1786 1789
1787 1790 If the source repository has a bookmark called '@' set, that
1788 1791 revision will be checked out in the new repository by default.
1789 1792
1790 1793 To check out a particular version, use -u/--update, or
1791 1794 -U/--noupdate to create a clone with no working directory.
1792 1795
1793 1796 To pull only a subset of changesets, specify one or more revisions
1794 1797 identifiers with -r/--rev or branches with -b/--branch. The
1795 1798 resulting clone will contain only the specified changesets and
1796 1799 their ancestors. These options (or 'clone src#rev dest') imply
1797 1800 --pull, even for local source repositories.
1798 1801
1799 1802 In normal clone mode, the remote normalizes repository data into a common
1800 1803 exchange format and the receiving end translates this data into its local
1801 1804 storage format. --stream activates a different clone mode that essentially
1802 1805 copies repository files from the remote with minimal data processing. This
1803 1806 significantly reduces the CPU cost of a clone both remotely and locally.
1804 1807 However, it often increases the transferred data size by 30-40%. This can
1805 1808 result in substantially faster clones where I/O throughput is plentiful,
1806 1809 especially for larger repositories. A side-effect of --stream clones is
1807 1810 that storage settings and requirements on the remote are applied locally:
1808 1811 a modern client may inherit legacy or inefficient storage used by the
1809 1812 remote or a legacy Mercurial client may not be able to clone from a
1810 1813 modern Mercurial remote.
1811 1814
1812 1815 .. note::
1813 1816
1814 1817 Specifying a tag will include the tagged changeset but not the
1815 1818 changeset containing the tag.
1816 1819
1817 1820 .. container:: verbose
1818 1821
1819 1822 For efficiency, hardlinks are used for cloning whenever the
1820 1823 source and destination are on the same filesystem (note this
1821 1824 applies only to the repository data, not to the working
1822 1825 directory). Some filesystems, such as AFS, implement hardlinking
1823 1826 incorrectly, but do not report errors. In these cases, use the
1824 1827 --pull option to avoid hardlinking.
1825 1828
1826 1829 Mercurial will update the working directory to the first applicable
1827 1830 revision from this list:
1828 1831
1829 1832 a) null if -U or the source repository has no changesets
1830 1833 b) if -u . and the source repository is local, the first parent of
1831 1834 the source repository's working directory
1832 1835 c) the changeset specified with -u (if a branch name, this means the
1833 1836 latest head of that branch)
1834 1837 d) the changeset specified with -r
1835 1838 e) the tipmost head specified with -b
1836 1839 f) the tipmost head specified with the url#branch source syntax
1837 1840 g) the revision marked with the '@' bookmark, if present
1838 1841 h) the tipmost head of the default branch
1839 1842 i) tip
1840 1843
1841 1844 When cloning from servers that support it, Mercurial may fetch
1842 1845 pre-generated data from a server-advertised URL or inline from the
1843 1846 same stream. When this is done, hooks operating on incoming changesets
1844 1847 and changegroups may fire more than once, once for each pre-generated
1845 1848 bundle and as well as for any additional remaining data. In addition,
1846 1849 if an error occurs, the repository may be rolled back to a partial
1847 1850 clone. This behavior may change in future releases.
1848 1851 See :hg:`help -e clonebundles` for more.
1849 1852
1850 1853 Examples:
1851 1854
1852 1855 - clone a remote repository to a new directory named hg/::
1853 1856
1854 1857 hg clone https://www.mercurial-scm.org/repo/hg/
1855 1858
1856 1859 - create a lightweight local clone::
1857 1860
1858 1861 hg clone project/ project-feature/
1859 1862
1860 1863 - clone from an absolute path on an ssh server (note double-slash)::
1861 1864
1862 1865 hg clone ssh://user@server//home/projects/alpha/
1863 1866
1864 1867 - do a streaming clone while checking out a specified version::
1865 1868
1866 1869 hg clone --stream http://server/repo -u 1.5
1867 1870
1868 1871 - create a repository without changesets after a particular revision::
1869 1872
1870 1873 hg clone -r 04e544 experimental/ good/
1871 1874
1872 1875 - clone (and track) a particular named branch::
1873 1876
1874 1877 hg clone https://www.mercurial-scm.org/repo/hg/#stable
1875 1878
1876 1879 See :hg:`help urls` for details on specifying URLs.
1877 1880
1878 1881 Returns 0 on success.
1879 1882 """
1880 1883 opts = pycompat.byteskwargs(opts)
1881 1884 cmdutil.check_at_most_one_arg(opts, b'noupdate', b'updaterev')
1882 1885
1883 1886 # --include/--exclude can come from narrow or sparse.
1884 1887 includepats, excludepats = None, None
1885 1888
1886 1889 # hg.clone() differentiates between None and an empty set. So make sure
1887 1890 # patterns are sets if narrow is requested without patterns.
1888 1891 if opts.get(b'narrow'):
1889 1892 includepats = set()
1890 1893 excludepats = set()
1891 1894
1892 1895 if opts.get(b'include'):
1893 1896 includepats = narrowspec.parsepatterns(opts.get(b'include'))
1894 1897 if opts.get(b'exclude'):
1895 1898 excludepats = narrowspec.parsepatterns(opts.get(b'exclude'))
1896 1899
1897 1900 r = hg.clone(
1898 1901 ui,
1899 1902 opts,
1900 1903 source,
1901 1904 dest,
1902 1905 pull=opts.get(b'pull'),
1903 1906 stream=opts.get(b'stream') or opts.get(b'uncompressed'),
1904 1907 revs=opts.get(b'rev'),
1905 1908 update=opts.get(b'updaterev') or not opts.get(b'noupdate'),
1906 1909 branch=opts.get(b'branch'),
1907 1910 shareopts=opts.get(b'shareopts'),
1908 1911 storeincludepats=includepats,
1909 1912 storeexcludepats=excludepats,
1910 1913 depth=opts.get(b'depth') or None,
1911 1914 )
1912 1915
1913 1916 return r is None
1914 1917
1915 1918
1916 1919 @command(
1917 1920 b'commit|ci',
1918 1921 [
1919 1922 (
1920 1923 b'A',
1921 1924 b'addremove',
1922 1925 None,
1923 1926 _(b'mark new/missing files as added/removed before committing'),
1924 1927 ),
1925 1928 (b'', b'close-branch', None, _(b'mark a branch head as closed')),
1926 1929 (b'', b'amend', None, _(b'amend the parent of the working directory')),
1927 1930 (b's', b'secret', None, _(b'use the secret phase for committing')),
1928 1931 (b'e', b'edit', None, _(b'invoke editor on commit messages')),
1929 1932 (
1930 1933 b'',
1931 1934 b'force-close-branch',
1932 1935 None,
1933 1936 _(b'forcibly close branch from a non-head changeset (ADVANCED)'),
1934 1937 ),
1935 1938 (b'i', b'interactive', None, _(b'use interactive mode')),
1936 1939 ]
1937 1940 + walkopts
1938 1941 + commitopts
1939 1942 + commitopts2
1940 1943 + subrepoopts,
1941 1944 _(b'[OPTION]... [FILE]...'),
1942 1945 helpcategory=command.CATEGORY_COMMITTING,
1943 1946 helpbasic=True,
1944 1947 inferrepo=True,
1945 1948 )
1946 1949 def commit(ui, repo, *pats, **opts):
1947 1950 """commit the specified files or all outstanding changes
1948 1951
1949 1952 Commit changes to the given files into the repository. Unlike a
1950 1953 centralized SCM, this operation is a local operation. See
1951 1954 :hg:`push` for a way to actively distribute your changes.
1952 1955
1953 1956 If a list of files is omitted, all changes reported by :hg:`status`
1954 1957 will be committed.
1955 1958
1956 1959 If you are committing the result of a merge, do not provide any
1957 1960 filenames or -I/-X filters.
1958 1961
1959 1962 If no commit message is specified, Mercurial starts your
1960 1963 configured editor where you can enter a message. In case your
1961 1964 commit fails, you will find a backup of your message in
1962 1965 ``.hg/last-message.txt``.
1963 1966
1964 1967 The --close-branch flag can be used to mark the current branch
1965 1968 head closed. When all heads of a branch are closed, the branch
1966 1969 will be considered closed and no longer listed.
1967 1970
1968 1971 The --amend flag can be used to amend the parent of the
1969 1972 working directory with a new commit that contains the changes
1970 1973 in the parent in addition to those currently reported by :hg:`status`,
1971 1974 if there are any. The old commit is stored in a backup bundle in
1972 1975 ``.hg/strip-backup`` (see :hg:`help bundle` and :hg:`help unbundle`
1973 1976 on how to restore it).
1974 1977
1975 1978 Message, user and date are taken from the amended commit unless
1976 1979 specified. When a message isn't specified on the command line,
1977 1980 the editor will open with the message of the amended commit.
1978 1981
1979 1982 It is not possible to amend public changesets (see :hg:`help phases`)
1980 1983 or changesets that have children.
1981 1984
1982 1985 See :hg:`help dates` for a list of formats valid for -d/--date.
1983 1986
1984 1987 Returns 0 on success, 1 if nothing changed.
1985 1988
1986 1989 .. container:: verbose
1987 1990
1988 1991 Examples:
1989 1992
1990 1993 - commit all files ending in .py::
1991 1994
1992 1995 hg commit --include "set:**.py"
1993 1996
1994 1997 - commit all non-binary files::
1995 1998
1996 1999 hg commit --exclude "set:binary()"
1997 2000
1998 2001 - amend the current commit and set the date to now::
1999 2002
2000 2003 hg commit --amend --date now
2001 2004 """
2002 2005 with repo.wlock(), repo.lock():
2003 2006 return _docommit(ui, repo, *pats, **opts)
2004 2007
2005 2008
2006 2009 def _docommit(ui, repo, *pats, **opts):
2007 2010 if opts.get('interactive'):
2008 2011 opts.pop('interactive')
2009 2012 ret = cmdutil.dorecord(
2010 2013 ui, repo, commit, None, False, cmdutil.recordfilter, *pats, **opts
2011 2014 )
2012 2015 # ret can be 0 (no changes to record) or the value returned by
2013 2016 # commit(), 1 if nothing changed or None on success.
2014 2017 return 1 if ret == 0 else ret
2015 2018
2016 2019 opts = pycompat.byteskwargs(opts)
2017 2020 if opts.get(b'subrepos'):
2018 2021 cmdutil.check_incompatible_arguments(opts, b'subrepos', [b'amend'])
2019 2022 # Let --subrepos on the command line override config setting.
2020 2023 ui.setconfig(b'ui', b'commitsubrepos', True, b'commit')
2021 2024
2022 2025 cmdutil.checkunfinished(repo, commit=True)
2023 2026
2024 2027 branch = repo[None].branch()
2025 2028 bheads = repo.branchheads(branch)
2026 2029
2027 2030 extra = {}
2028 2031 if opts.get(b'close_branch') or opts.get(b'force_close_branch'):
2029 2032 extra[b'close'] = b'1'
2030 2033
2031 2034 if repo[b'.'].closesbranch():
2032 2035 raise error.Abort(
2033 2036 _(b'current revision is already a branch closing head')
2034 2037 )
2035 2038 elif not bheads:
2036 2039 raise error.Abort(_(b'branch "%s" has no heads to close') % branch)
2037 2040 elif (
2038 2041 branch == repo[b'.'].branch()
2039 2042 and repo[b'.'].node() not in bheads
2040 2043 and not opts.get(b'force_close_branch')
2041 2044 ):
2042 2045 hint = _(
2043 2046 b'use --force-close-branch to close branch from a non-head'
2044 2047 b' changeset'
2045 2048 )
2046 2049 raise error.Abort(_(b'can only close branch heads'), hint=hint)
2047 2050 elif opts.get(b'amend'):
2048 2051 if (
2049 2052 repo[b'.'].p1().branch() != branch
2050 2053 and repo[b'.'].p2().branch() != branch
2051 2054 ):
2052 2055 raise error.Abort(_(b'can only close branch heads'))
2053 2056
2054 2057 if opts.get(b'amend'):
2055 2058 if ui.configbool(b'ui', b'commitsubrepos'):
2056 2059 raise error.Abort(_(b'cannot amend with ui.commitsubrepos enabled'))
2057 2060
2058 2061 old = repo[b'.']
2059 2062 rewriteutil.precheck(repo, [old.rev()], b'amend')
2060 2063
2061 2064 # Currently histedit gets confused if an amend happens while histedit
2062 2065 # is in progress. Since we have a checkunfinished command, we are
2063 2066 # temporarily honoring it.
2064 2067 #
2065 2068 # Note: eventually this guard will be removed. Please do not expect
2066 2069 # this behavior to remain.
2067 2070 if not obsolete.isenabled(repo, obsolete.createmarkersopt):
2068 2071 cmdutil.checkunfinished(repo)
2069 2072
2070 2073 node = cmdutil.amend(ui, repo, old, extra, pats, opts)
2071 2074 if node == old.node():
2072 2075 ui.status(_(b"nothing changed\n"))
2073 2076 return 1
2074 2077 else:
2075 2078
2076 2079 def commitfunc(ui, repo, message, match, opts):
2077 2080 overrides = {}
2078 2081 if opts.get(b'secret'):
2079 2082 overrides[(b'phases', b'new-commit')] = b'secret'
2080 2083
2081 2084 baseui = repo.baseui
2082 2085 with baseui.configoverride(overrides, b'commit'):
2083 2086 with ui.configoverride(overrides, b'commit'):
2084 2087 editform = cmdutil.mergeeditform(
2085 2088 repo[None], b'commit.normal'
2086 2089 )
2087 2090 editor = cmdutil.getcommiteditor(
2088 2091 editform=editform, **pycompat.strkwargs(opts)
2089 2092 )
2090 2093 return repo.commit(
2091 2094 message,
2092 2095 opts.get(b'user'),
2093 2096 opts.get(b'date'),
2094 2097 match,
2095 2098 editor=editor,
2096 2099 extra=extra,
2097 2100 )
2098 2101
2099 2102 node = cmdutil.commit(ui, repo, commitfunc, pats, opts)
2100 2103
2101 2104 if not node:
2102 2105 stat = cmdutil.postcommitstatus(repo, pats, opts)
2103 2106 if stat.deleted:
2104 2107 ui.status(
2105 2108 _(
2106 2109 b"nothing changed (%d missing files, see "
2107 2110 b"'hg status')\n"
2108 2111 )
2109 2112 % len(stat.deleted)
2110 2113 )
2111 2114 else:
2112 2115 ui.status(_(b"nothing changed\n"))
2113 2116 return 1
2114 2117
2115 2118 cmdutil.commitstatus(repo, node, branch, bheads, opts)
2116 2119
2117 2120 if not ui.quiet and ui.configbool(b'commands', b'commit.post-status'):
2118 2121 status(
2119 2122 ui,
2120 2123 repo,
2121 2124 modified=True,
2122 2125 added=True,
2123 2126 removed=True,
2124 2127 deleted=True,
2125 2128 unknown=True,
2126 2129 subrepos=opts.get(b'subrepos'),
2127 2130 )
2128 2131
2129 2132
2130 2133 @command(
2131 2134 b'config|showconfig|debugconfig',
2132 2135 [
2133 2136 (b'u', b'untrusted', None, _(b'show untrusted configuration options')),
2134 2137 (b'e', b'edit', None, _(b'edit user config')),
2135 2138 (b'l', b'local', None, _(b'edit repository config')),
2136 2139 (
2137 2140 b'',
2138 2141 b'shared',
2139 2142 None,
2140 2143 _(b'edit shared source repository config (EXPERIMENTAL)'),
2141 2144 ),
2142 2145 (b'', b'non-shared', None, _(b'edit non shared config (EXPERIMENTAL)')),
2143 2146 (b'g', b'global', None, _(b'edit global config')),
2144 2147 ]
2145 2148 + formatteropts,
2146 2149 _(b'[-u] [NAME]...'),
2147 2150 helpcategory=command.CATEGORY_HELP,
2148 2151 optionalrepo=True,
2149 2152 intents={INTENT_READONLY},
2150 2153 )
2151 2154 def config(ui, repo, *values, **opts):
2152 2155 """show combined config settings from all hgrc files
2153 2156
2154 2157 With no arguments, print names and values of all config items.
2155 2158
2156 2159 With one argument of the form section.name, print just the value
2157 2160 of that config item.
2158 2161
2159 2162 With multiple arguments, print names and values of all config
2160 2163 items with matching section names or section.names.
2161 2164
2162 2165 With --edit, start an editor on the user-level config file. With
2163 2166 --global, edit the system-wide config file. With --local, edit the
2164 2167 repository-level config file.
2165 2168
2166 2169 With --debug, the source (filename and line number) is printed
2167 2170 for each config item.
2168 2171
2169 2172 See :hg:`help config` for more information about config files.
2170 2173
2171 2174 .. container:: verbose
2172 2175
2173 2176 --non-shared flag is used to edit `.hg/hgrc-not-shared` config file.
2174 2177 This file is not shared across shares when in share-safe mode.
2175 2178
2176 2179 Template:
2177 2180
2178 2181 The following keywords are supported. See also :hg:`help templates`.
2179 2182
2180 2183 :name: String. Config name.
2181 2184 :source: String. Filename and line number where the item is defined.
2182 2185 :value: String. Config value.
2183 2186
2184 2187 The --shared flag can be used to edit the config file of shared source
2185 2188 repository. It only works when you have shared using the experimental
2186 2189 share safe feature.
2187 2190
2188 2191 Returns 0 on success, 1 if NAME does not exist.
2189 2192
2190 2193 """
2191 2194
2192 2195 opts = pycompat.byteskwargs(opts)
2193 2196 editopts = (b'edit', b'local', b'global', b'shared', b'non_shared')
2194 2197 if any(opts.get(o) for o in editopts):
2195 2198 cmdutil.check_at_most_one_arg(opts, *editopts[1:])
2196 2199 if opts.get(b'local'):
2197 2200 if not repo:
2198 2201 raise error.Abort(_(b"can't use --local outside a repository"))
2199 2202 paths = [repo.vfs.join(b'hgrc')]
2200 2203 elif opts.get(b'global'):
2201 2204 paths = rcutil.systemrcpath()
2202 2205 elif opts.get(b'shared'):
2203 2206 if not repo.shared():
2204 2207 raise error.Abort(
2205 2208 _(b"repository is not shared; can't use --shared")
2206 2209 )
2207 2210 if requirements.SHARESAFE_REQUIREMENT not in repo.requirements:
2208 2211 raise error.Abort(
2209 2212 _(
2210 2213 b"share safe feature not unabled; "
2211 2214 b"unable to edit shared source repository config"
2212 2215 )
2213 2216 )
2214 2217 paths = [vfsmod.vfs(repo.sharedpath).join(b'hgrc')]
2215 2218 elif opts.get(b'non_shared'):
2216 2219 paths = [repo.vfs.join(b'hgrc-not-shared')]
2217 2220 else:
2218 2221 paths = rcutil.userrcpath()
2219 2222
2220 2223 for f in paths:
2221 2224 if os.path.exists(f):
2222 2225 break
2223 2226 else:
2224 2227 if opts.get(b'global'):
2225 2228 samplehgrc = uimod.samplehgrcs[b'global']
2226 2229 elif opts.get(b'local'):
2227 2230 samplehgrc = uimod.samplehgrcs[b'local']
2228 2231 else:
2229 2232 samplehgrc = uimod.samplehgrcs[b'user']
2230 2233
2231 2234 f = paths[0]
2232 2235 fp = open(f, b"wb")
2233 2236 fp.write(util.tonativeeol(samplehgrc))
2234 2237 fp.close()
2235 2238
2236 2239 editor = ui.geteditor()
2237 2240 ui.system(
2238 2241 b"%s \"%s\"" % (editor, f),
2239 2242 onerr=error.Abort,
2240 2243 errprefix=_(b"edit failed"),
2241 2244 blockedtag=b'config_edit',
2242 2245 )
2243 2246 return
2244 2247 ui.pager(b'config')
2245 2248 fm = ui.formatter(b'config', opts)
2246 2249 for t, f in rcutil.rccomponents():
2247 2250 if t == b'path':
2248 2251 ui.debug(b'read config from: %s\n' % f)
2249 2252 elif t == b'resource':
2250 2253 ui.debug(b'read config from: resource:%s.%s\n' % (f[0], f[1]))
2251 2254 elif t == b'items':
2252 2255 # Don't print anything for 'items'.
2253 2256 pass
2254 2257 else:
2255 2258 raise error.ProgrammingError(b'unknown rctype: %s' % t)
2256 2259 untrusted = bool(opts.get(b'untrusted'))
2257 2260
2258 2261 selsections = selentries = []
2259 2262 if values:
2260 2263 selsections = [v for v in values if b'.' not in v]
2261 2264 selentries = [v for v in values if b'.' in v]
2262 2265 uniquesel = len(selentries) == 1 and not selsections
2263 2266 selsections = set(selsections)
2264 2267 selentries = set(selentries)
2265 2268
2266 2269 matched = False
2267 2270 for section, name, value in ui.walkconfig(untrusted=untrusted):
2268 2271 source = ui.configsource(section, name, untrusted)
2269 2272 value = pycompat.bytestr(value)
2270 2273 defaultvalue = ui.configdefault(section, name)
2271 2274 if fm.isplain():
2272 2275 source = source or b'none'
2273 2276 value = value.replace(b'\n', b'\\n')
2274 2277 entryname = section + b'.' + name
2275 2278 if values and not (section in selsections or entryname in selentries):
2276 2279 continue
2277 2280 fm.startitem()
2278 2281 fm.condwrite(ui.debugflag, b'source', b'%s: ', source)
2279 2282 if uniquesel:
2280 2283 fm.data(name=entryname)
2281 2284 fm.write(b'value', b'%s\n', value)
2282 2285 else:
2283 2286 fm.write(b'name value', b'%s=%s\n', entryname, value)
2284 2287 if formatter.isprintable(defaultvalue):
2285 2288 fm.data(defaultvalue=defaultvalue)
2286 2289 elif isinstance(defaultvalue, list) and all(
2287 2290 formatter.isprintable(e) for e in defaultvalue
2288 2291 ):
2289 2292 fm.data(defaultvalue=fm.formatlist(defaultvalue, name=b'value'))
2290 2293 # TODO: no idea how to process unsupported defaultvalue types
2291 2294 matched = True
2292 2295 fm.end()
2293 2296 if matched:
2294 2297 return 0
2295 2298 return 1
2296 2299
2297 2300
2298 2301 @command(
2299 2302 b'continue',
2300 2303 dryrunopts,
2301 2304 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
2302 2305 helpbasic=True,
2303 2306 )
2304 2307 def continuecmd(ui, repo, **opts):
2305 2308 """resumes an interrupted operation (EXPERIMENTAL)
2306 2309
2307 2310 Finishes a multistep operation like graft, histedit, rebase, merge,
2308 2311 and unshelve if they are in an interrupted state.
2309 2312
2310 2313 use --dry-run/-n to dry run the command.
2311 2314 """
2312 2315 dryrun = opts.get('dry_run')
2313 2316 contstate = cmdutil.getunfinishedstate(repo)
2314 2317 if not contstate:
2315 2318 raise error.Abort(_(b'no operation in progress'))
2316 2319 if not contstate.continuefunc:
2317 2320 raise error.Abort(
2318 2321 (
2319 2322 _(b"%s in progress but does not support 'hg continue'")
2320 2323 % (contstate._opname)
2321 2324 ),
2322 2325 hint=contstate.continuemsg(),
2323 2326 )
2324 2327 if dryrun:
2325 2328 ui.status(_(b'%s in progress, will be resumed\n') % (contstate._opname))
2326 2329 return
2327 2330 return contstate.continuefunc(ui, repo)
2328 2331
2329 2332
2330 2333 @command(
2331 2334 b'copy|cp',
2332 2335 [
2333 2336 (b'', b'forget', None, _(b'unmark a destination file as copied')),
2334 2337 (b'A', b'after', None, _(b'record a copy that has already occurred')),
2335 2338 (
2336 2339 b'',
2337 2340 b'at-rev',
2338 2341 b'',
2339 2342 _(b'(un)mark copies in the given revision (EXPERIMENTAL)'),
2340 2343 _(b'REV'),
2341 2344 ),
2342 2345 (
2343 2346 b'f',
2344 2347 b'force',
2345 2348 None,
2346 2349 _(b'forcibly copy over an existing managed file'),
2347 2350 ),
2348 2351 ]
2349 2352 + walkopts
2350 2353 + dryrunopts,
2351 2354 _(b'[OPTION]... SOURCE... DEST'),
2352 2355 helpcategory=command.CATEGORY_FILE_CONTENTS,
2353 2356 )
2354 2357 def copy(ui, repo, *pats, **opts):
2355 2358 """mark files as copied for the next commit
2356 2359
2357 2360 Mark dest as having copies of source files. If dest is a
2358 2361 directory, copies are put in that directory. If dest is a file,
2359 2362 the source must be a single file.
2360 2363
2361 2364 By default, this command copies the contents of files as they
2362 2365 exist in the working directory. If invoked with -A/--after, the
2363 2366 operation is recorded, but no copying is performed.
2364 2367
2365 2368 To undo marking a destination file as copied, use --forget. With that
2366 2369 option, all given (positional) arguments are unmarked as copies. The
2367 2370 destination file(s) will be left in place (still tracked).
2368 2371
2369 2372 This command takes effect with the next commit by default.
2370 2373
2371 2374 Returns 0 on success, 1 if errors are encountered.
2372 2375 """
2373 2376 opts = pycompat.byteskwargs(opts)
2374 2377 with repo.wlock():
2375 2378 return cmdutil.copy(ui, repo, pats, opts)
2376 2379
2377 2380
2378 2381 @command(
2379 2382 b'debugcommands',
2380 2383 [],
2381 2384 _(b'[COMMAND]'),
2382 2385 helpcategory=command.CATEGORY_HELP,
2383 2386 norepo=True,
2384 2387 )
2385 2388 def debugcommands(ui, cmd=b'', *args):
2386 2389 """list all available commands and options"""
2387 2390 for cmd, vals in sorted(pycompat.iteritems(table)):
2388 2391 cmd = cmd.split(b'|')[0]
2389 2392 opts = b', '.join([i[1] for i in vals[1]])
2390 2393 ui.write(b'%s: %s\n' % (cmd, opts))
2391 2394
2392 2395
2393 2396 @command(
2394 2397 b'debugcomplete',
2395 2398 [(b'o', b'options', None, _(b'show the command options'))],
2396 2399 _(b'[-o] CMD'),
2397 2400 helpcategory=command.CATEGORY_HELP,
2398 2401 norepo=True,
2399 2402 )
2400 2403 def debugcomplete(ui, cmd=b'', **opts):
2401 2404 """returns the completion list associated with the given command"""
2402 2405
2403 2406 if opts.get('options'):
2404 2407 options = []
2405 2408 otables = [globalopts]
2406 2409 if cmd:
2407 2410 aliases, entry = cmdutil.findcmd(cmd, table, False)
2408 2411 otables.append(entry[1])
2409 2412 for t in otables:
2410 2413 for o in t:
2411 2414 if b"(DEPRECATED)" in o[3]:
2412 2415 continue
2413 2416 if o[0]:
2414 2417 options.append(b'-%s' % o[0])
2415 2418 options.append(b'--%s' % o[1])
2416 2419 ui.write(b"%s\n" % b"\n".join(options))
2417 2420 return
2418 2421
2419 2422 cmdlist, unused_allcmds = cmdutil.findpossible(cmd, table)
2420 2423 if ui.verbose:
2421 2424 cmdlist = [b' '.join(c[0]) for c in cmdlist.values()]
2422 2425 ui.write(b"%s\n" % b"\n".join(sorted(cmdlist)))
2423 2426
2424 2427
2425 2428 @command(
2426 2429 b'diff',
2427 2430 [
2428 2431 (b'r', b'rev', [], _(b'revision'), _(b'REV')),
2429 2432 (b'c', b'change', b'', _(b'change made by revision'), _(b'REV')),
2430 2433 ]
2431 2434 + diffopts
2432 2435 + diffopts2
2433 2436 + walkopts
2434 2437 + subrepoopts,
2435 2438 _(b'[OPTION]... ([-c REV] | [-r REV1 [-r REV2]]) [FILE]...'),
2436 2439 helpcategory=command.CATEGORY_FILE_CONTENTS,
2437 2440 helpbasic=True,
2438 2441 inferrepo=True,
2439 2442 intents={INTENT_READONLY},
2440 2443 )
2441 2444 def diff(ui, repo, *pats, **opts):
2442 2445 """diff repository (or selected files)
2443 2446
2444 2447 Show differences between revisions for the specified files.
2445 2448
2446 2449 Differences between files are shown using the unified diff format.
2447 2450
2448 2451 .. note::
2449 2452
2450 2453 :hg:`diff` may generate unexpected results for merges, as it will
2451 2454 default to comparing against the working directory's first
2452 2455 parent changeset if no revisions are specified.
2453 2456
2454 2457 When two revision arguments are given, then changes are shown
2455 2458 between those revisions. If only one revision is specified then
2456 2459 that revision is compared to the working directory, and, when no
2457 2460 revisions are specified, the working directory files are compared
2458 2461 to its first parent.
2459 2462
2460 2463 Alternatively you can specify -c/--change with a revision to see
2461 2464 the changes in that changeset relative to its first parent.
2462 2465
2463 2466 Without the -a/--text option, diff will avoid generating diffs of
2464 2467 files it detects as binary. With -a, diff will generate a diff
2465 2468 anyway, probably with undesirable results.
2466 2469
2467 2470 Use the -g/--git option to generate diffs in the git extended diff
2468 2471 format. For more information, read :hg:`help diffs`.
2469 2472
2470 2473 .. container:: verbose
2471 2474
2472 2475 Examples:
2473 2476
2474 2477 - compare a file in the current working directory to its parent::
2475 2478
2476 2479 hg diff foo.c
2477 2480
2478 2481 - compare two historical versions of a directory, with rename info::
2479 2482
2480 2483 hg diff --git -r 1.0:1.2 lib/
2481 2484
2482 2485 - get change stats relative to the last change on some date::
2483 2486
2484 2487 hg diff --stat -r "date('may 2')"
2485 2488
2486 2489 - diff all newly-added files that contain a keyword::
2487 2490
2488 2491 hg diff "set:added() and grep(GNU)"
2489 2492
2490 2493 - compare a revision and its parents::
2491 2494
2492 2495 hg diff -c 9353 # compare against first parent
2493 2496 hg diff -r 9353^:9353 # same using revset syntax
2494 2497 hg diff -r 9353^2:9353 # compare against the second parent
2495 2498
2496 2499 Returns 0 on success.
2497 2500 """
2498 2501
2499 2502 cmdutil.check_at_most_one_arg(opts, 'rev', 'change')
2500 2503 opts = pycompat.byteskwargs(opts)
2501 2504 revs = opts.get(b'rev')
2502 2505 change = opts.get(b'change')
2503 2506 stat = opts.get(b'stat')
2504 2507 reverse = opts.get(b'reverse')
2505 2508
2506 2509 if change:
2507 2510 repo = scmutil.unhidehashlikerevs(repo, [change], b'nowarn')
2508 2511 ctx2 = scmutil.revsingle(repo, change, None)
2509 2512 ctx1 = ctx2.p1()
2510 2513 else:
2511 2514 repo = scmutil.unhidehashlikerevs(repo, revs, b'nowarn')
2512 2515 ctx1, ctx2 = scmutil.revpair(repo, revs)
2513 2516
2514 2517 if reverse:
2515 2518 ctxleft = ctx2
2516 2519 ctxright = ctx1
2517 2520 else:
2518 2521 ctxleft = ctx1
2519 2522 ctxright = ctx2
2520 2523
2521 2524 diffopts = patch.diffallopts(ui, opts)
2522 2525 m = scmutil.match(ctx2, pats, opts)
2523 2526 m = repo.narrowmatch(m)
2524 2527 ui.pager(b'diff')
2525 2528 logcmdutil.diffordiffstat(
2526 2529 ui,
2527 2530 repo,
2528 2531 diffopts,
2529 2532 ctxleft,
2530 2533 ctxright,
2531 2534 m,
2532 2535 stat=stat,
2533 2536 listsubrepos=opts.get(b'subrepos'),
2534 2537 root=opts.get(b'root'),
2535 2538 )
2536 2539
2537 2540
2538 2541 @command(
2539 2542 b'export',
2540 2543 [
2541 2544 (
2542 2545 b'B',
2543 2546 b'bookmark',
2544 2547 b'',
2545 2548 _(b'export changes only reachable by given bookmark'),
2546 2549 _(b'BOOKMARK'),
2547 2550 ),
2548 2551 (
2549 2552 b'o',
2550 2553 b'output',
2551 2554 b'',
2552 2555 _(b'print output to file with formatted name'),
2553 2556 _(b'FORMAT'),
2554 2557 ),
2555 2558 (b'', b'switch-parent', None, _(b'diff against the second parent')),
2556 2559 (b'r', b'rev', [], _(b'revisions to export'), _(b'REV')),
2557 2560 ]
2558 2561 + diffopts
2559 2562 + formatteropts,
2560 2563 _(b'[OPTION]... [-o OUTFILESPEC] [-r] [REV]...'),
2561 2564 helpcategory=command.CATEGORY_IMPORT_EXPORT,
2562 2565 helpbasic=True,
2563 2566 intents={INTENT_READONLY},
2564 2567 )
2565 2568 def export(ui, repo, *changesets, **opts):
2566 2569 """dump the header and diffs for one or more changesets
2567 2570
2568 2571 Print the changeset header and diffs for one or more revisions.
2569 2572 If no revision is given, the parent of the working directory is used.
2570 2573
2571 2574 The information shown in the changeset header is: author, date,
2572 2575 branch name (if non-default), changeset hash, parent(s) and commit
2573 2576 comment.
2574 2577
2575 2578 .. note::
2576 2579
2577 2580 :hg:`export` may generate unexpected diff output for merge
2578 2581 changesets, as it will compare the merge changeset against its
2579 2582 first parent only.
2580 2583
2581 2584 Output may be to a file, in which case the name of the file is
2582 2585 given using a template string. See :hg:`help templates`. In addition
2583 2586 to the common template keywords, the following formatting rules are
2584 2587 supported:
2585 2588
2586 2589 :``%%``: literal "%" character
2587 2590 :``%H``: changeset hash (40 hexadecimal digits)
2588 2591 :``%N``: number of patches being generated
2589 2592 :``%R``: changeset revision number
2590 2593 :``%b``: basename of the exporting repository
2591 2594 :``%h``: short-form changeset hash (12 hexadecimal digits)
2592 2595 :``%m``: first line of the commit message (only alphanumeric characters)
2593 2596 :``%n``: zero-padded sequence number, starting at 1
2594 2597 :``%r``: zero-padded changeset revision number
2595 2598 :``\\``: literal "\\" character
2596 2599
2597 2600 Without the -a/--text option, export will avoid generating diffs
2598 2601 of files it detects as binary. With -a, export will generate a
2599 2602 diff anyway, probably with undesirable results.
2600 2603
2601 2604 With -B/--bookmark changesets reachable by the given bookmark are
2602 2605 selected.
2603 2606
2604 2607 Use the -g/--git option to generate diffs in the git extended diff
2605 2608 format. See :hg:`help diffs` for more information.
2606 2609
2607 2610 With the --switch-parent option, the diff will be against the
2608 2611 second parent. It can be useful to review a merge.
2609 2612
2610 2613 .. container:: verbose
2611 2614
2612 2615 Template:
2613 2616
2614 2617 The following keywords are supported in addition to the common template
2615 2618 keywords and functions. See also :hg:`help templates`.
2616 2619
2617 2620 :diff: String. Diff content.
2618 2621 :parents: List of strings. Parent nodes of the changeset.
2619 2622
2620 2623 Examples:
2621 2624
2622 2625 - use export and import to transplant a bugfix to the current
2623 2626 branch::
2624 2627
2625 2628 hg export -r 9353 | hg import -
2626 2629
2627 2630 - export all the changesets between two revisions to a file with
2628 2631 rename information::
2629 2632
2630 2633 hg export --git -r 123:150 > changes.txt
2631 2634
2632 2635 - split outgoing changes into a series of patches with
2633 2636 descriptive names::
2634 2637
2635 2638 hg export -r "outgoing()" -o "%n-%m.patch"
2636 2639
2637 2640 Returns 0 on success.
2638 2641 """
2639 2642 opts = pycompat.byteskwargs(opts)
2640 2643 bookmark = opts.get(b'bookmark')
2641 2644 changesets += tuple(opts.get(b'rev', []))
2642 2645
2643 2646 cmdutil.check_at_most_one_arg(opts, b'rev', b'bookmark')
2644 2647
2645 2648 if bookmark:
2646 2649 if bookmark not in repo._bookmarks:
2647 2650 raise error.Abort(_(b"bookmark '%s' not found") % bookmark)
2648 2651
2649 2652 revs = scmutil.bookmarkrevs(repo, bookmark)
2650 2653 else:
2651 2654 if not changesets:
2652 2655 changesets = [b'.']
2653 2656
2654 2657 repo = scmutil.unhidehashlikerevs(repo, changesets, b'nowarn')
2655 2658 revs = scmutil.revrange(repo, changesets)
2656 2659
2657 2660 if not revs:
2658 2661 raise error.Abort(_(b"export requires at least one changeset"))
2659 2662 if len(revs) > 1:
2660 2663 ui.note(_(b'exporting patches:\n'))
2661 2664 else:
2662 2665 ui.note(_(b'exporting patch:\n'))
2663 2666
2664 2667 fntemplate = opts.get(b'output')
2665 2668 if cmdutil.isstdiofilename(fntemplate):
2666 2669 fntemplate = b''
2667 2670
2668 2671 if fntemplate:
2669 2672 fm = formatter.nullformatter(ui, b'export', opts)
2670 2673 else:
2671 2674 ui.pager(b'export')
2672 2675 fm = ui.formatter(b'export', opts)
2673 2676 with fm:
2674 2677 cmdutil.export(
2675 2678 repo,
2676 2679 revs,
2677 2680 fm,
2678 2681 fntemplate=fntemplate,
2679 2682 switch_parent=opts.get(b'switch_parent'),
2680 2683 opts=patch.diffallopts(ui, opts),
2681 2684 )
2682 2685
2683 2686
2684 2687 @command(
2685 2688 b'files',
2686 2689 [
2687 2690 (
2688 2691 b'r',
2689 2692 b'rev',
2690 2693 b'',
2691 2694 _(b'search the repository as it is in REV'),
2692 2695 _(b'REV'),
2693 2696 ),
2694 2697 (
2695 2698 b'0',
2696 2699 b'print0',
2697 2700 None,
2698 2701 _(b'end filenames with NUL, for use with xargs'),
2699 2702 ),
2700 2703 ]
2701 2704 + walkopts
2702 2705 + formatteropts
2703 2706 + subrepoopts,
2704 2707 _(b'[OPTION]... [FILE]...'),
2705 2708 helpcategory=command.CATEGORY_WORKING_DIRECTORY,
2706 2709 intents={INTENT_READONLY},
2707 2710 )
2708 2711 def files(ui, repo, *pats, **opts):
2709 2712 """list tracked files
2710 2713
2711 2714 Print files under Mercurial control in the working directory or
2712 2715 specified revision for given files (excluding removed files).
2713 2716 Files can be specified as filenames or filesets.
2714 2717
2715 2718 If no files are given to match, this command prints the names
2716 2719 of all files under Mercurial control.
2717 2720
2718 2721 .. container:: verbose
2719 2722
2720 2723 Template:
2721 2724
2722 2725 The following keywords are supported in addition to the common template
2723 2726 keywords and functions. See also :hg:`help templates`.
2724 2727
2725 2728 :flags: String. Character denoting file's symlink and executable bits.
2726 2729 :path: String. Repository-absolute path of the file.
2727 2730 :size: Integer. Size of the file in bytes.
2728 2731
2729 2732 Examples:
2730 2733
2731 2734 - list all files under the current directory::
2732 2735
2733 2736 hg files .
2734 2737
2735 2738 - shows sizes and flags for current revision::
2736 2739
2737 2740 hg files -vr .
2738 2741
2739 2742 - list all files named README::
2740 2743
2741 2744 hg files -I "**/README"
2742 2745
2743 2746 - list all binary files::
2744 2747
2745 2748 hg files "set:binary()"
2746 2749
2747 2750 - find files containing a regular expression::
2748 2751
2749 2752 hg files "set:grep('bob')"
2750 2753
2751 2754 - search tracked file contents with xargs and grep::
2752 2755
2753 2756 hg files -0 | xargs -0 grep foo
2754 2757
2755 2758 See :hg:`help patterns` and :hg:`help filesets` for more information
2756 2759 on specifying file patterns.
2757 2760
2758 2761 Returns 0 if a match is found, 1 otherwise.
2759 2762
2760 2763 """
2761 2764
2762 2765 opts = pycompat.byteskwargs(opts)
2763 2766 rev = opts.get(b'rev')
2764 2767 if rev:
2765 2768 repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
2766 2769 ctx = scmutil.revsingle(repo, rev, None)
2767 2770
2768 2771 end = b'\n'
2769 2772 if opts.get(b'print0'):
2770 2773 end = b'\0'
2771 2774 fmt = b'%s' + end
2772 2775
2773 2776 m = scmutil.match(ctx, pats, opts)
2774 2777 ui.pager(b'files')
2775 2778 uipathfn = scmutil.getuipathfn(ctx.repo(), legacyrelativevalue=True)
2776 2779 with ui.formatter(b'files', opts) as fm:
2777 2780 return cmdutil.files(
2778 2781 ui, ctx, m, uipathfn, fm, fmt, opts.get(b'subrepos')
2779 2782 )
2780 2783
2781 2784
2782 2785 @command(
2783 2786 b'forget',
2784 2787 [(b'i', b'interactive', None, _(b'use interactive mode')),]
2785 2788 + walkopts
2786 2789 + dryrunopts,
2787 2790 _(b'[OPTION]... FILE...'),
2788 2791 helpcategory=command.CATEGORY_WORKING_DIRECTORY,
2789 2792 helpbasic=True,
2790 2793 inferrepo=True,
2791 2794 )
2792 2795 def forget(ui, repo, *pats, **opts):
2793 2796 """forget the specified files on the next commit
2794 2797
2795 2798 Mark the specified files so they will no longer be tracked
2796 2799 after the next commit.
2797 2800
2798 2801 This only removes files from the current branch, not from the
2799 2802 entire project history, and it does not delete them from the
2800 2803 working directory.
2801 2804
2802 2805 To delete the file from the working directory, see :hg:`remove`.
2803 2806
2804 2807 To undo a forget before the next commit, see :hg:`add`.
2805 2808
2806 2809 .. container:: verbose
2807 2810
2808 2811 Examples:
2809 2812
2810 2813 - forget newly-added binary files::
2811 2814
2812 2815 hg forget "set:added() and binary()"
2813 2816
2814 2817 - forget files that would be excluded by .hgignore::
2815 2818
2816 2819 hg forget "set:hgignore()"
2817 2820
2818 2821 Returns 0 on success.
2819 2822 """
2820 2823
2821 2824 opts = pycompat.byteskwargs(opts)
2822 2825 if not pats:
2823 2826 raise error.Abort(_(b'no files specified'))
2824 2827
2825 2828 m = scmutil.match(repo[None], pats, opts)
2826 2829 dryrun, interactive = opts.get(b'dry_run'), opts.get(b'interactive')
2827 2830 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
2828 2831 rejected = cmdutil.forget(
2829 2832 ui,
2830 2833 repo,
2831 2834 m,
2832 2835 prefix=b"",
2833 2836 uipathfn=uipathfn,
2834 2837 explicitonly=False,
2835 2838 dryrun=dryrun,
2836 2839 interactive=interactive,
2837 2840 )[0]
2838 2841 return rejected and 1 or 0
2839 2842
2840 2843
2841 2844 @command(
2842 2845 b'graft',
2843 2846 [
2844 2847 (b'r', b'rev', [], _(b'revisions to graft'), _(b'REV')),
2845 2848 (
2846 2849 b'',
2847 2850 b'base',
2848 2851 b'',
2849 2852 _(b'base revision when doing the graft merge (ADVANCED)'),
2850 2853 _(b'REV'),
2851 2854 ),
2852 2855 (b'c', b'continue', False, _(b'resume interrupted graft')),
2853 2856 (b'', b'stop', False, _(b'stop interrupted graft')),
2854 2857 (b'', b'abort', False, _(b'abort interrupted graft')),
2855 2858 (b'e', b'edit', False, _(b'invoke editor on commit messages')),
2856 2859 (b'', b'log', None, _(b'append graft info to log message')),
2857 2860 (
2858 2861 b'',
2859 2862 b'no-commit',
2860 2863 None,
2861 2864 _(b"don't commit, just apply the changes in working directory"),
2862 2865 ),
2863 2866 (b'f', b'force', False, _(b'force graft')),
2864 2867 (
2865 2868 b'D',
2866 2869 b'currentdate',
2867 2870 False,
2868 2871 _(b'record the current date as commit date'),
2869 2872 ),
2870 2873 (
2871 2874 b'U',
2872 2875 b'currentuser',
2873 2876 False,
2874 2877 _(b'record the current user as committer'),
2875 2878 ),
2876 2879 ]
2877 2880 + commitopts2
2878 2881 + mergetoolopts
2879 2882 + dryrunopts,
2880 2883 _(b'[OPTION]... [-r REV]... REV...'),
2881 2884 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
2882 2885 )
2883 2886 def graft(ui, repo, *revs, **opts):
2884 2887 '''copy changes from other branches onto the current branch
2885 2888
2886 2889 This command uses Mercurial's merge logic to copy individual
2887 2890 changes from other branches without merging branches in the
2888 2891 history graph. This is sometimes known as 'backporting' or
2889 2892 'cherry-picking'. By default, graft will copy user, date, and
2890 2893 description from the source changesets.
2891 2894
2892 2895 Changesets that are ancestors of the current revision, that have
2893 2896 already been grafted, or that are merges will be skipped.
2894 2897
2895 2898 If --log is specified, log messages will have a comment appended
2896 2899 of the form::
2897 2900
2898 2901 (grafted from CHANGESETHASH)
2899 2902
2900 2903 If --force is specified, revisions will be grafted even if they
2901 2904 are already ancestors of, or have been grafted to, the destination.
2902 2905 This is useful when the revisions have since been backed out.
2903 2906
2904 2907 If a graft merge results in conflicts, the graft process is
2905 2908 interrupted so that the current merge can be manually resolved.
2906 2909 Once all conflicts are addressed, the graft process can be
2907 2910 continued with the -c/--continue option.
2908 2911
2909 2912 The -c/--continue option reapplies all the earlier options.
2910 2913
2911 2914 .. container:: verbose
2912 2915
2913 2916 The --base option exposes more of how graft internally uses merge with a
2914 2917 custom base revision. --base can be used to specify another ancestor than
2915 2918 the first and only parent.
2916 2919
2917 2920 The command::
2918 2921
2919 2922 hg graft -r 345 --base 234
2920 2923
2921 2924 is thus pretty much the same as::
2922 2925
2923 2926 hg diff -r 234 -r 345 | hg import
2924 2927
2925 2928 but using merge to resolve conflicts and track moved files.
2926 2929
2927 2930 The result of a merge can thus be backported as a single commit by
2928 2931 specifying one of the merge parents as base, and thus effectively
2929 2932 grafting the changes from the other side.
2930 2933
2931 2934 It is also possible to collapse multiple changesets and clean up history
2932 2935 by specifying another ancestor as base, much like rebase --collapse
2933 2936 --keep.
2934 2937
2935 2938 The commit message can be tweaked after the fact using commit --amend .
2936 2939
2937 2940 For using non-ancestors as the base to backout changes, see the backout
2938 2941 command and the hidden --parent option.
2939 2942
2940 2943 .. container:: verbose
2941 2944
2942 2945 Examples:
2943 2946
2944 2947 - copy a single change to the stable branch and edit its description::
2945 2948
2946 2949 hg update stable
2947 2950 hg graft --edit 9393
2948 2951
2949 2952 - graft a range of changesets with one exception, updating dates::
2950 2953
2951 2954 hg graft -D "2085::2093 and not 2091"
2952 2955
2953 2956 - continue a graft after resolving conflicts::
2954 2957
2955 2958 hg graft -c
2956 2959
2957 2960 - show the source of a grafted changeset::
2958 2961
2959 2962 hg log --debug -r .
2960 2963
2961 2964 - show revisions sorted by date::
2962 2965
2963 2966 hg log -r "sort(all(), date)"
2964 2967
2965 2968 - backport the result of a merge as a single commit::
2966 2969
2967 2970 hg graft -r 123 --base 123^
2968 2971
2969 2972 - land a feature branch as one changeset::
2970 2973
2971 2974 hg up -cr default
2972 2975 hg graft -r featureX --base "ancestor('featureX', 'default')"
2973 2976
2974 2977 See :hg:`help revisions` for more about specifying revisions.
2975 2978
2976 2979 Returns 0 on successful completion, 1 if there are unresolved files.
2977 2980 '''
2978 2981 with repo.wlock():
2979 2982 return _dograft(ui, repo, *revs, **opts)
2980 2983
2981 2984
2982 2985 def _dograft(ui, repo, *revs, **opts):
2983 2986 opts = pycompat.byteskwargs(opts)
2984 2987 if revs and opts.get(b'rev'):
2985 2988 ui.warn(
2986 2989 _(
2987 2990 b'warning: inconsistent use of --rev might give unexpected '
2988 2991 b'revision ordering!\n'
2989 2992 )
2990 2993 )
2991 2994
2992 2995 revs = list(revs)
2993 2996 revs.extend(opts.get(b'rev'))
2994 2997 # a dict of data to be stored in state file
2995 2998 statedata = {}
2996 2999 # list of new nodes created by ongoing graft
2997 3000 statedata[b'newnodes'] = []
2998 3001
2999 3002 cmdutil.resolvecommitoptions(ui, opts)
3000 3003
3001 3004 editor = cmdutil.getcommiteditor(
3002 3005 editform=b'graft', **pycompat.strkwargs(opts)
3003 3006 )
3004 3007
3005 3008 cmdutil.check_at_most_one_arg(opts, b'abort', b'stop', b'continue')
3006 3009
3007 3010 cont = False
3008 3011 if opts.get(b'no_commit'):
3009 3012 cmdutil.check_incompatible_arguments(
3010 3013 opts,
3011 3014 b'no_commit',
3012 3015 [b'edit', b'currentuser', b'currentdate', b'log'],
3013 3016 )
3014 3017
3015 3018 graftstate = statemod.cmdstate(repo, b'graftstate')
3016 3019
3017 3020 if opts.get(b'stop'):
3018 3021 cmdutil.check_incompatible_arguments(
3019 3022 opts,
3020 3023 b'stop',
3021 3024 [
3022 3025 b'edit',
3023 3026 b'log',
3024 3027 b'user',
3025 3028 b'date',
3026 3029 b'currentdate',
3027 3030 b'currentuser',
3028 3031 b'rev',
3029 3032 ],
3030 3033 )
3031 3034 return _stopgraft(ui, repo, graftstate)
3032 3035 elif opts.get(b'abort'):
3033 3036 cmdutil.check_incompatible_arguments(
3034 3037 opts,
3035 3038 b'abort',
3036 3039 [
3037 3040 b'edit',
3038 3041 b'log',
3039 3042 b'user',
3040 3043 b'date',
3041 3044 b'currentdate',
3042 3045 b'currentuser',
3043 3046 b'rev',
3044 3047 ],
3045 3048 )
3046 3049 return cmdutil.abortgraft(ui, repo, graftstate)
3047 3050 elif opts.get(b'continue'):
3048 3051 cont = True
3049 3052 if revs:
3050 3053 raise error.Abort(_(b"can't specify --continue and revisions"))
3051 3054 # read in unfinished revisions
3052 3055 if graftstate.exists():
3053 3056 statedata = cmdutil.readgraftstate(repo, graftstate)
3054 3057 if statedata.get(b'date'):
3055 3058 opts[b'date'] = statedata[b'date']
3056 3059 if statedata.get(b'user'):
3057 3060 opts[b'user'] = statedata[b'user']
3058 3061 if statedata.get(b'log'):
3059 3062 opts[b'log'] = True
3060 3063 if statedata.get(b'no_commit'):
3061 3064 opts[b'no_commit'] = statedata.get(b'no_commit')
3062 3065 if statedata.get(b'base'):
3063 3066 opts[b'base'] = statedata.get(b'base')
3064 3067 nodes = statedata[b'nodes']
3065 3068 revs = [repo[node].rev() for node in nodes]
3066 3069 else:
3067 3070 cmdutil.wrongtooltocontinue(repo, _(b'graft'))
3068 3071 else:
3069 3072 if not revs:
3070 3073 raise error.Abort(_(b'no revisions specified'))
3071 3074 cmdutil.checkunfinished(repo)
3072 3075 cmdutil.bailifchanged(repo)
3073 3076 revs = scmutil.revrange(repo, revs)
3074 3077
3075 3078 skipped = set()
3076 3079 basectx = None
3077 3080 if opts.get(b'base'):
3078 3081 basectx = scmutil.revsingle(repo, opts[b'base'], None)
3079 3082 if basectx is None:
3080 3083 # check for merges
3081 3084 for rev in repo.revs(b'%ld and merge()', revs):
3082 3085 ui.warn(_(b'skipping ungraftable merge revision %d\n') % rev)
3083 3086 skipped.add(rev)
3084 3087 revs = [r for r in revs if r not in skipped]
3085 3088 if not revs:
3086 3089 return -1
3087 3090 if basectx is not None and len(revs) != 1:
3088 3091 raise error.Abort(_(b'only one revision allowed with --base '))
3089 3092
3090 3093 # Don't check in the --continue case, in effect retaining --force across
3091 3094 # --continues. That's because without --force, any revisions we decided to
3092 3095 # skip would have been filtered out here, so they wouldn't have made their
3093 3096 # way to the graftstate. With --force, any revisions we would have otherwise
3094 3097 # skipped would not have been filtered out, and if they hadn't been applied
3095 3098 # already, they'd have been in the graftstate.
3096 3099 if not (cont or opts.get(b'force')) and basectx is None:
3097 3100 # check for ancestors of dest branch
3098 3101 ancestors = repo.revs(b'%ld & (::.)', revs)
3099 3102 for rev in ancestors:
3100 3103 ui.warn(_(b'skipping ancestor revision %d:%s\n') % (rev, repo[rev]))
3101 3104
3102 3105 revs = [r for r in revs if r not in ancestors]
3103 3106
3104 3107 if not revs:
3105 3108 return -1
3106 3109
3107 3110 # analyze revs for earlier grafts
3108 3111 ids = {}
3109 3112 for ctx in repo.set(b"%ld", revs):
3110 3113 ids[ctx.hex()] = ctx.rev()
3111 3114 n = ctx.extra().get(b'source')
3112 3115 if n:
3113 3116 ids[n] = ctx.rev()
3114 3117
3115 3118 # check ancestors for earlier grafts
3116 3119 ui.debug(b'scanning for duplicate grafts\n')
3117 3120
3118 3121 # The only changesets we can be sure doesn't contain grafts of any
3119 3122 # revs, are the ones that are common ancestors of *all* revs:
3120 3123 for rev in repo.revs(b'only(%d,ancestor(%ld))', repo[b'.'].rev(), revs):
3121 3124 ctx = repo[rev]
3122 3125 n = ctx.extra().get(b'source')
3123 3126 if n in ids:
3124 3127 try:
3125 3128 r = repo[n].rev()
3126 3129 except error.RepoLookupError:
3127 3130 r = None
3128 3131 if r in revs:
3129 3132 ui.warn(
3130 3133 _(
3131 3134 b'skipping revision %d:%s '
3132 3135 b'(already grafted to %d:%s)\n'
3133 3136 )
3134 3137 % (r, repo[r], rev, ctx)
3135 3138 )
3136 3139 revs.remove(r)
3137 3140 elif ids[n] in revs:
3138 3141 if r is None:
3139 3142 ui.warn(
3140 3143 _(
3141 3144 b'skipping already grafted revision %d:%s '
3142 3145 b'(%d:%s also has unknown origin %s)\n'
3143 3146 )
3144 3147 % (ids[n], repo[ids[n]], rev, ctx, n[:12])
3145 3148 )
3146 3149 else:
3147 3150 ui.warn(
3148 3151 _(
3149 3152 b'skipping already grafted revision %d:%s '
3150 3153 b'(%d:%s also has origin %d:%s)\n'
3151 3154 )
3152 3155 % (ids[n], repo[ids[n]], rev, ctx, r, n[:12])
3153 3156 )
3154 3157 revs.remove(ids[n])
3155 3158 elif ctx.hex() in ids:
3156 3159 r = ids[ctx.hex()]
3157 3160 if r in revs:
3158 3161 ui.warn(
3159 3162 _(
3160 3163 b'skipping already grafted revision %d:%s '
3161 3164 b'(was grafted from %d:%s)\n'
3162 3165 )
3163 3166 % (r, repo[r], rev, ctx)
3164 3167 )
3165 3168 revs.remove(r)
3166 3169 if not revs:
3167 3170 return -1
3168 3171
3169 3172 if opts.get(b'no_commit'):
3170 3173 statedata[b'no_commit'] = True
3171 3174 if opts.get(b'base'):
3172 3175 statedata[b'base'] = opts[b'base']
3173 3176 for pos, ctx in enumerate(repo.set(b"%ld", revs)):
3174 3177 desc = b'%d:%s "%s"' % (
3175 3178 ctx.rev(),
3176 3179 ctx,
3177 3180 ctx.description().split(b'\n', 1)[0],
3178 3181 )
3179 3182 names = repo.nodetags(ctx.node()) + repo.nodebookmarks(ctx.node())
3180 3183 if names:
3181 3184 desc += b' (%s)' % b' '.join(names)
3182 3185 ui.status(_(b'grafting %s\n') % desc)
3183 3186 if opts.get(b'dry_run'):
3184 3187 continue
3185 3188
3186 3189 source = ctx.extra().get(b'source')
3187 3190 extra = {}
3188 3191 if source:
3189 3192 extra[b'source'] = source
3190 3193 extra[b'intermediate-source'] = ctx.hex()
3191 3194 else:
3192 3195 extra[b'source'] = ctx.hex()
3193 3196 user = ctx.user()
3194 3197 if opts.get(b'user'):
3195 3198 user = opts[b'user']
3196 3199 statedata[b'user'] = user
3197 3200 date = ctx.date()
3198 3201 if opts.get(b'date'):
3199 3202 date = opts[b'date']
3200 3203 statedata[b'date'] = date
3201 3204 message = ctx.description()
3202 3205 if opts.get(b'log'):
3203 3206 message += b'\n(grafted from %s)' % ctx.hex()
3204 3207 statedata[b'log'] = True
3205 3208
3206 3209 # we don't merge the first commit when continuing
3207 3210 if not cont:
3208 3211 # perform the graft merge with p1(rev) as 'ancestor'
3209 3212 overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')}
3210 3213 base = ctx.p1() if basectx is None else basectx
3211 3214 with ui.configoverride(overrides, b'graft'):
3212 3215 stats = mergemod.graft(repo, ctx, base, [b'local', b'graft'])
3213 3216 # report any conflicts
3214 3217 if stats.unresolvedcount > 0:
3215 3218 # write out state for --continue
3216 3219 nodes = [repo[rev].hex() for rev in revs[pos:]]
3217 3220 statedata[b'nodes'] = nodes
3218 3221 stateversion = 1
3219 3222 graftstate.save(stateversion, statedata)
3220 3223 ui.error(_(b"abort: unresolved conflicts, can't continue\n"))
3221 3224 ui.error(_(b"(use 'hg resolve' and 'hg graft --continue')\n"))
3222 3225 return 1
3223 3226 else:
3224 3227 cont = False
3225 3228
3226 3229 # commit if --no-commit is false
3227 3230 if not opts.get(b'no_commit'):
3228 3231 node = repo.commit(
3229 3232 text=message, user=user, date=date, extra=extra, editor=editor
3230 3233 )
3231 3234 if node is None:
3232 3235 ui.warn(
3233 3236 _(b'note: graft of %d:%s created no changes to commit\n')
3234 3237 % (ctx.rev(), ctx)
3235 3238 )
3236 3239 # checking that newnodes exist because old state files won't have it
3237 3240 elif statedata.get(b'newnodes') is not None:
3238 3241 statedata[b'newnodes'].append(node)
3239 3242
3240 3243 # remove state when we complete successfully
3241 3244 if not opts.get(b'dry_run'):
3242 3245 graftstate.delete()
3243 3246
3244 3247 return 0
3245 3248
3246 3249
3247 3250 def _stopgraft(ui, repo, graftstate):
3248 3251 """stop the interrupted graft"""
3249 3252 if not graftstate.exists():
3250 3253 raise error.Abort(_(b"no interrupted graft found"))
3251 3254 pctx = repo[b'.']
3252 3255 mergemod.clean_update(pctx)
3253 3256 graftstate.delete()
3254 3257 ui.status(_(b"stopped the interrupted graft\n"))
3255 3258 ui.status(_(b"working directory is now at %s\n") % pctx.hex()[:12])
3256 3259 return 0
3257 3260
3258 3261
3259 3262 statemod.addunfinished(
3260 3263 b'graft',
3261 3264 fname=b'graftstate',
3262 3265 clearable=True,
3263 3266 stopflag=True,
3264 3267 continueflag=True,
3265 3268 abortfunc=cmdutil.hgabortgraft,
3266 3269 cmdhint=_(b"use 'hg graft --continue' or 'hg graft --stop' to stop"),
3267 3270 )
3268 3271
3269 3272
3270 3273 @command(
3271 3274 b'grep',
3272 3275 [
3273 3276 (b'0', b'print0', None, _(b'end fields with NUL')),
3274 3277 (b'', b'all', None, _(b'an alias to --diff (DEPRECATED)')),
3275 3278 (
3276 3279 b'',
3277 3280 b'diff',
3278 3281 None,
3279 3282 _(
3280 3283 b'search revision differences for when the pattern was added '
3281 3284 b'or removed'
3282 3285 ),
3283 3286 ),
3284 3287 (b'a', b'text', None, _(b'treat all files as text')),
3285 3288 (
3286 3289 b'f',
3287 3290 b'follow',
3288 3291 None,
3289 3292 _(
3290 3293 b'follow changeset history,'
3291 3294 b' or file history across copies and renames'
3292 3295 ),
3293 3296 ),
3294 3297 (b'i', b'ignore-case', None, _(b'ignore case when matching')),
3295 3298 (
3296 3299 b'l',
3297 3300 b'files-with-matches',
3298 3301 None,
3299 3302 _(b'print only filenames and revisions that match'),
3300 3303 ),
3301 3304 (b'n', b'line-number', None, _(b'print matching line numbers')),
3302 3305 (
3303 3306 b'r',
3304 3307 b'rev',
3305 3308 [],
3306 3309 _(b'search files changed within revision range'),
3307 3310 _(b'REV'),
3308 3311 ),
3309 3312 (
3310 3313 b'',
3311 3314 b'all-files',
3312 3315 None,
3313 3316 _(
3314 3317 b'include all files in the changeset while grepping (DEPRECATED)'
3315 3318 ),
3316 3319 ),
3317 3320 (b'u', b'user', None, _(b'list the author (long with -v)')),
3318 3321 (b'd', b'date', None, _(b'list the date (short with -q)')),
3319 3322 ]
3320 3323 + formatteropts
3321 3324 + walkopts,
3322 3325 _(b'[--diff] [OPTION]... PATTERN [FILE]...'),
3323 3326 helpcategory=command.CATEGORY_FILE_CONTENTS,
3324 3327 inferrepo=True,
3325 3328 intents={INTENT_READONLY},
3326 3329 )
3327 3330 def grep(ui, repo, pattern, *pats, **opts):
3328 3331 """search for a pattern in specified files
3329 3332
3330 3333 Search the working directory or revision history for a regular
3331 3334 expression in the specified files for the entire repository.
3332 3335
3333 3336 By default, grep searches the repository files in the working
3334 3337 directory and prints the files where it finds a match. To specify
3335 3338 historical revisions instead of the working directory, use the
3336 3339 --rev flag.
3337 3340
3338 3341 To search instead historical revision differences that contains a
3339 3342 change in match status ("-" for a match that becomes a non-match,
3340 3343 or "+" for a non-match that becomes a match), use the --diff flag.
3341 3344
3342 3345 PATTERN can be any Python (roughly Perl-compatible) regular
3343 3346 expression.
3344 3347
3345 3348 If no FILEs are specified and the --rev flag isn't supplied, all
3346 3349 files in the working directory are searched. When using the --rev
3347 3350 flag and specifying FILEs, use the --follow argument to also
3348 3351 follow the specified FILEs across renames and copies.
3349 3352
3350 3353 .. container:: verbose
3351 3354
3352 3355 Template:
3353 3356
3354 3357 The following keywords are supported in addition to the common template
3355 3358 keywords and functions. See also :hg:`help templates`.
3356 3359
3357 3360 :change: String. Character denoting insertion ``+`` or removal ``-``.
3358 3361 Available if ``--diff`` is specified.
3359 3362 :lineno: Integer. Line number of the match.
3360 3363 :path: String. Repository-absolute path of the file.
3361 3364 :texts: List of text chunks.
3362 3365
3363 3366 And each entry of ``{texts}`` provides the following sub-keywords.
3364 3367
3365 3368 :matched: Boolean. True if the chunk matches the specified pattern.
3366 3369 :text: String. Chunk content.
3367 3370
3368 3371 See :hg:`help templates.operators` for the list expansion syntax.
3369 3372
3370 3373 Returns 0 if a match is found, 1 otherwise.
3371 3374
3372 3375 """
3373 3376 cmdutil.check_incompatible_arguments(opts, 'all_files', ['all', 'diff'])
3374 3377 opts = pycompat.byteskwargs(opts)
3375 3378 diff = opts.get(b'all') or opts.get(b'diff')
3376 3379 follow = opts.get(b'follow')
3377 3380 if opts.get(b'all_files') is None and not diff:
3378 3381 opts[b'all_files'] = True
3379 3382 plaingrep = (
3380 3383 opts.get(b'all_files')
3381 3384 and not opts.get(b'rev')
3382 3385 and not opts.get(b'follow')
3383 3386 )
3384 3387 all_files = opts.get(b'all_files')
3385 3388 if plaingrep:
3386 3389 opts[b'rev'] = [b'wdir()']
3387 3390
3388 3391 reflags = re.M
3389 3392 if opts.get(b'ignore_case'):
3390 3393 reflags |= re.I
3391 3394 try:
3392 3395 regexp = util.re.compile(pattern, reflags)
3393 3396 except re.error as inst:
3394 3397 ui.warn(
3395 3398 _(b"grep: invalid match pattern: %s\n") % pycompat.bytestr(inst)
3396 3399 )
3397 3400 return 1
3398 3401 sep, eol = b':', b'\n'
3399 3402 if opts.get(b'print0'):
3400 3403 sep = eol = b'\0'
3401 3404
3402 3405 searcher = grepmod.grepsearcher(
3403 3406 ui, repo, regexp, all_files=all_files, diff=diff, follow=follow
3404 3407 )
3405 3408
3406 3409 getfile = searcher._getfile
3407 3410
3408 3411 uipathfn = scmutil.getuipathfn(repo)
3409 3412
3410 3413 def display(fm, fn, ctx, pstates, states):
3411 3414 rev = scmutil.intrev(ctx)
3412 3415 if fm.isplain():
3413 3416 formatuser = ui.shortuser
3414 3417 else:
3415 3418 formatuser = pycompat.bytestr
3416 3419 if ui.quiet:
3417 3420 datefmt = b'%Y-%m-%d'
3418 3421 else:
3419 3422 datefmt = b'%a %b %d %H:%M:%S %Y %1%2'
3420 3423 found = False
3421 3424
3422 3425 @util.cachefunc
3423 3426 def binary():
3424 3427 flog = getfile(fn)
3425 3428 try:
3426 3429 return stringutil.binary(flog.read(ctx.filenode(fn)))
3427 3430 except error.WdirUnsupported:
3428 3431 return ctx[fn].isbinary()
3429 3432
3430 3433 fieldnamemap = {b'linenumber': b'lineno'}
3431 3434 if diff:
3432 3435 iter = grepmod.difflinestates(pstates, states)
3433 3436 else:
3434 3437 iter = [(b'', l) for l in states]
3435 3438 for change, l in iter:
3436 3439 fm.startitem()
3437 3440 fm.context(ctx=ctx)
3438 3441 fm.data(node=fm.hexfunc(scmutil.binnode(ctx)), path=fn)
3439 3442 fm.plain(uipathfn(fn), label=b'grep.filename')
3440 3443
3441 3444 cols = [
3442 3445 (b'rev', b'%d', rev, not plaingrep, b''),
3443 3446 (
3444 3447 b'linenumber',
3445 3448 b'%d',
3446 3449 l.linenum,
3447 3450 opts.get(b'line_number'),
3448 3451 b'',
3449 3452 ),
3450 3453 ]
3451 3454 if diff:
3452 3455 cols.append(
3453 3456 (
3454 3457 b'change',
3455 3458 b'%s',
3456 3459 change,
3457 3460 True,
3458 3461 b'grep.inserted '
3459 3462 if change == b'+'
3460 3463 else b'grep.deleted ',
3461 3464 )
3462 3465 )
3463 3466 cols.extend(
3464 3467 [
3465 3468 (
3466 3469 b'user',
3467 3470 b'%s',
3468 3471 formatuser(ctx.user()),
3469 3472 opts.get(b'user'),
3470 3473 b'',
3471 3474 ),
3472 3475 (
3473 3476 b'date',
3474 3477 b'%s',
3475 3478 fm.formatdate(ctx.date(), datefmt),
3476 3479 opts.get(b'date'),
3477 3480 b'',
3478 3481 ),
3479 3482 ]
3480 3483 )
3481 3484 for name, fmt, data, cond, extra_label in cols:
3482 3485 if cond:
3483 3486 fm.plain(sep, label=b'grep.sep')
3484 3487 field = fieldnamemap.get(name, name)
3485 3488 label = extra_label + (b'grep.%s' % name)
3486 3489 fm.condwrite(cond, field, fmt, data, label=label)
3487 3490 if not opts.get(b'files_with_matches'):
3488 3491 fm.plain(sep, label=b'grep.sep')
3489 3492 if not opts.get(b'text') and binary():
3490 3493 fm.plain(_(b" Binary file matches"))
3491 3494 else:
3492 3495 displaymatches(fm.nested(b'texts', tmpl=b'{text}'), l)
3493 3496 fm.plain(eol)
3494 3497 found = True
3495 3498 if opts.get(b'files_with_matches'):
3496 3499 break
3497 3500 return found
3498 3501
3499 3502 def displaymatches(fm, l):
3500 3503 p = 0
3501 3504 for s, e in l.findpos(regexp):
3502 3505 if p < s:
3503 3506 fm.startitem()
3504 3507 fm.write(b'text', b'%s', l.line[p:s])
3505 3508 fm.data(matched=False)
3506 3509 fm.startitem()
3507 3510 fm.write(b'text', b'%s', l.line[s:e], label=b'grep.match')
3508 3511 fm.data(matched=True)
3509 3512 p = e
3510 3513 if p < len(l.line):
3511 3514 fm.startitem()
3512 3515 fm.write(b'text', b'%s', l.line[p:])
3513 3516 fm.data(matched=False)
3514 3517 fm.end()
3515 3518
3516 3519 found = False
3517 3520
3518 3521 wopts = logcmdutil.walkopts(
3519 3522 pats=pats,
3520 3523 opts=opts,
3521 3524 revspec=opts[b'rev'],
3522 3525 include_pats=opts[b'include'],
3523 3526 exclude_pats=opts[b'exclude'],
3524 3527 follow=follow,
3525 3528 force_changelog_traversal=all_files,
3526 3529 filter_revisions_by_pats=not all_files,
3527 3530 )
3528 3531 revs, makefilematcher = logcmdutil.makewalker(repo, wopts)
3529 3532
3530 3533 ui.pager(b'grep')
3531 3534 fm = ui.formatter(b'grep', opts)
3532 3535 for fn, ctx, pstates, states in searcher.searchfiles(revs, makefilematcher):
3533 3536 r = display(fm, fn, ctx, pstates, states)
3534 3537 found = found or r
3535 3538 if r and not diff and not all_files:
3536 3539 searcher.skipfile(fn, ctx.rev())
3537 3540 fm.end()
3538 3541
3539 3542 return not found
3540 3543
3541 3544
3542 3545 @command(
3543 3546 b'heads',
3544 3547 [
3545 3548 (
3546 3549 b'r',
3547 3550 b'rev',
3548 3551 b'',
3549 3552 _(b'show only heads which are descendants of STARTREV'),
3550 3553 _(b'STARTREV'),
3551 3554 ),
3552 3555 (b't', b'topo', False, _(b'show topological heads only')),
3553 3556 (
3554 3557 b'a',
3555 3558 b'active',
3556 3559 False,
3557 3560 _(b'show active branchheads only (DEPRECATED)'),
3558 3561 ),
3559 3562 (b'c', b'closed', False, _(b'show normal and closed branch heads')),
3560 3563 ]
3561 3564 + templateopts,
3562 3565 _(b'[-ct] [-r STARTREV] [REV]...'),
3563 3566 helpcategory=command.CATEGORY_CHANGE_NAVIGATION,
3564 3567 intents={INTENT_READONLY},
3565 3568 )
3566 3569 def heads(ui, repo, *branchrevs, **opts):
3567 3570 """show branch heads
3568 3571
3569 3572 With no arguments, show all open branch heads in the repository.
3570 3573 Branch heads are changesets that have no descendants on the
3571 3574 same branch. They are where development generally takes place and
3572 3575 are the usual targets for update and merge operations.
3573 3576
3574 3577 If one or more REVs are given, only open branch heads on the
3575 3578 branches associated with the specified changesets are shown. This
3576 3579 means that you can use :hg:`heads .` to see the heads on the
3577 3580 currently checked-out branch.
3578 3581
3579 3582 If -c/--closed is specified, also show branch heads marked closed
3580 3583 (see :hg:`commit --close-branch`).
3581 3584
3582 3585 If STARTREV is specified, only those heads that are descendants of
3583 3586 STARTREV will be displayed.
3584 3587
3585 3588 If -t/--topo is specified, named branch mechanics will be ignored and only
3586 3589 topological heads (changesets with no children) will be shown.
3587 3590
3588 3591 Returns 0 if matching heads are found, 1 if not.
3589 3592 """
3590 3593
3591 3594 opts = pycompat.byteskwargs(opts)
3592 3595 start = None
3593 3596 rev = opts.get(b'rev')
3594 3597 if rev:
3595 3598 repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
3596 3599 start = scmutil.revsingle(repo, rev, None).node()
3597 3600
3598 3601 if opts.get(b'topo'):
3599 3602 heads = [repo[h] for h in repo.heads(start)]
3600 3603 else:
3601 3604 heads = []
3602 3605 for branch in repo.branchmap():
3603 3606 heads += repo.branchheads(branch, start, opts.get(b'closed'))
3604 3607 heads = [repo[h] for h in heads]
3605 3608
3606 3609 if branchrevs:
3607 3610 branches = {
3608 3611 repo[r].branch() for r in scmutil.revrange(repo, branchrevs)
3609 3612 }
3610 3613 heads = [h for h in heads if h.branch() in branches]
3611 3614
3612 3615 if opts.get(b'active') and branchrevs:
3613 3616 dagheads = repo.heads(start)
3614 3617 heads = [h for h in heads if h.node() in dagheads]
3615 3618
3616 3619 if branchrevs:
3617 3620 haveheads = {h.branch() for h in heads}
3618 3621 if branches - haveheads:
3619 3622 headless = b', '.join(b for b in branches - haveheads)
3620 3623 msg = _(b'no open branch heads found on branches %s')
3621 3624 if opts.get(b'rev'):
3622 3625 msg += _(b' (started at %s)') % opts[b'rev']
3623 3626 ui.warn((msg + b'\n') % headless)
3624 3627
3625 3628 if not heads:
3626 3629 return 1
3627 3630
3628 3631 ui.pager(b'heads')
3629 3632 heads = sorted(heads, key=lambda x: -(x.rev()))
3630 3633 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
3631 3634 for ctx in heads:
3632 3635 displayer.show(ctx)
3633 3636 displayer.close()
3634 3637
3635 3638
3636 3639 @command(
3637 3640 b'help',
3638 3641 [
3639 3642 (b'e', b'extension', None, _(b'show only help for extensions')),
3640 3643 (b'c', b'command', None, _(b'show only help for commands')),
3641 3644 (b'k', b'keyword', None, _(b'show topics matching keyword')),
3642 3645 (
3643 3646 b's',
3644 3647 b'system',
3645 3648 [],
3646 3649 _(b'show help for specific platform(s)'),
3647 3650 _(b'PLATFORM'),
3648 3651 ),
3649 3652 ],
3650 3653 _(b'[-eck] [-s PLATFORM] [TOPIC]'),
3651 3654 helpcategory=command.CATEGORY_HELP,
3652 3655 norepo=True,
3653 3656 intents={INTENT_READONLY},
3654 3657 )
3655 3658 def help_(ui, name=None, **opts):
3656 3659 """show help for a given topic or a help overview
3657 3660
3658 3661 With no arguments, print a list of commands with short help messages.
3659 3662
3660 3663 Given a topic, extension, or command name, print help for that
3661 3664 topic.
3662 3665
3663 3666 Returns 0 if successful.
3664 3667 """
3665 3668
3666 3669 keep = opts.get('system') or []
3667 3670 if len(keep) == 0:
3668 3671 if pycompat.sysplatform.startswith(b'win'):
3669 3672 keep.append(b'windows')
3670 3673 elif pycompat.sysplatform == b'OpenVMS':
3671 3674 keep.append(b'vms')
3672 3675 elif pycompat.sysplatform == b'plan9':
3673 3676 keep.append(b'plan9')
3674 3677 else:
3675 3678 keep.append(b'unix')
3676 3679 keep.append(pycompat.sysplatform.lower())
3677 3680 if ui.verbose:
3678 3681 keep.append(b'verbose')
3679 3682
3680 3683 commands = sys.modules[__name__]
3681 3684 formatted = help.formattedhelp(ui, commands, name, keep=keep, **opts)
3682 3685 ui.pager(b'help')
3683 3686 ui.write(formatted)
3684 3687
3685 3688
3686 3689 @command(
3687 3690 b'identify|id',
3688 3691 [
3689 3692 (b'r', b'rev', b'', _(b'identify the specified revision'), _(b'REV')),
3690 3693 (b'n', b'num', None, _(b'show local revision number')),
3691 3694 (b'i', b'id', None, _(b'show global revision id')),
3692 3695 (b'b', b'branch', None, _(b'show branch')),
3693 3696 (b't', b'tags', None, _(b'show tags')),
3694 3697 (b'B', b'bookmarks', None, _(b'show bookmarks')),
3695 3698 ]
3696 3699 + remoteopts
3697 3700 + formatteropts,
3698 3701 _(b'[-nibtB] [-r REV] [SOURCE]'),
3699 3702 helpcategory=command.CATEGORY_CHANGE_NAVIGATION,
3700 3703 optionalrepo=True,
3701 3704 intents={INTENT_READONLY},
3702 3705 )
3703 3706 def identify(
3704 3707 ui,
3705 3708 repo,
3706 3709 source=None,
3707 3710 rev=None,
3708 3711 num=None,
3709 3712 id=None,
3710 3713 branch=None,
3711 3714 tags=None,
3712 3715 bookmarks=None,
3713 3716 **opts
3714 3717 ):
3715 3718 """identify the working directory or specified revision
3716 3719
3717 3720 Print a summary identifying the repository state at REV using one or
3718 3721 two parent hash identifiers, followed by a "+" if the working
3719 3722 directory has uncommitted changes, the branch name (if not default),
3720 3723 a list of tags, and a list of bookmarks.
3721 3724
3722 3725 When REV is not given, print a summary of the current state of the
3723 3726 repository including the working directory. Specify -r. to get information
3724 3727 of the working directory parent without scanning uncommitted changes.
3725 3728
3726 3729 Specifying a path to a repository root or Mercurial bundle will
3727 3730 cause lookup to operate on that repository/bundle.
3728 3731
3729 3732 .. container:: verbose
3730 3733
3731 3734 Template:
3732 3735
3733 3736 The following keywords are supported in addition to the common template
3734 3737 keywords and functions. See also :hg:`help templates`.
3735 3738
3736 3739 :dirty: String. Character ``+`` denoting if the working directory has
3737 3740 uncommitted changes.
3738 3741 :id: String. One or two nodes, optionally followed by ``+``.
3739 3742 :parents: List of strings. Parent nodes of the changeset.
3740 3743
3741 3744 Examples:
3742 3745
3743 3746 - generate a build identifier for the working directory::
3744 3747
3745 3748 hg id --id > build-id.dat
3746 3749
3747 3750 - find the revision corresponding to a tag::
3748 3751
3749 3752 hg id -n -r 1.3
3750 3753
3751 3754 - check the most recent revision of a remote repository::
3752 3755
3753 3756 hg id -r tip https://www.mercurial-scm.org/repo/hg/
3754 3757
3755 3758 See :hg:`log` for generating more information about specific revisions,
3756 3759 including full hash identifiers.
3757 3760
3758 3761 Returns 0 if successful.
3759 3762 """
3760 3763
3761 3764 opts = pycompat.byteskwargs(opts)
3762 3765 if not repo and not source:
3763 3766 raise error.Abort(
3764 3767 _(b"there is no Mercurial repository here (.hg not found)")
3765 3768 )
3766 3769
3767 3770 default = not (num or id or branch or tags or bookmarks)
3768 3771 output = []
3769 3772 revs = []
3770 3773
3771 3774 if source:
3772 3775 source, branches = hg.parseurl(ui.expandpath(source))
3773 3776 peer = hg.peer(repo or ui, opts, source) # only pass ui when no repo
3774 3777 repo = peer.local()
3775 3778 revs, checkout = hg.addbranchrevs(repo, peer, branches, None)
3776 3779
3777 3780 fm = ui.formatter(b'identify', opts)
3778 3781 fm.startitem()
3779 3782
3780 3783 if not repo:
3781 3784 if num or branch or tags:
3782 3785 raise error.Abort(
3783 3786 _(b"can't query remote revision number, branch, or tags")
3784 3787 )
3785 3788 if not rev and revs:
3786 3789 rev = revs[0]
3787 3790 if not rev:
3788 3791 rev = b"tip"
3789 3792
3790 3793 remoterev = peer.lookup(rev)
3791 3794 hexrev = fm.hexfunc(remoterev)
3792 3795 if default or id:
3793 3796 output = [hexrev]
3794 3797 fm.data(id=hexrev)
3795 3798
3796 3799 @util.cachefunc
3797 3800 def getbms():
3798 3801 bms = []
3799 3802
3800 3803 if b'bookmarks' in peer.listkeys(b'namespaces'):
3801 3804 hexremoterev = hex(remoterev)
3802 3805 bms = [
3803 3806 bm
3804 3807 for bm, bmr in pycompat.iteritems(
3805 3808 peer.listkeys(b'bookmarks')
3806 3809 )
3807 3810 if bmr == hexremoterev
3808 3811 ]
3809 3812
3810 3813 return sorted(bms)
3811 3814
3812 3815 if fm.isplain():
3813 3816 if bookmarks:
3814 3817 output.extend(getbms())
3815 3818 elif default and not ui.quiet:
3816 3819 # multiple bookmarks for a single parent separated by '/'
3817 3820 bm = b'/'.join(getbms())
3818 3821 if bm:
3819 3822 output.append(bm)
3820 3823 else:
3821 3824 fm.data(node=hex(remoterev))
3822 3825 if bookmarks or b'bookmarks' in fm.datahint():
3823 3826 fm.data(bookmarks=fm.formatlist(getbms(), name=b'bookmark'))
3824 3827 else:
3825 3828 if rev:
3826 3829 repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
3827 3830 ctx = scmutil.revsingle(repo, rev, None)
3828 3831
3829 3832 if ctx.rev() is None:
3830 3833 ctx = repo[None]
3831 3834 parents = ctx.parents()
3832 3835 taglist = []
3833 3836 for p in parents:
3834 3837 taglist.extend(p.tags())
3835 3838
3836 3839 dirty = b""
3837 3840 if ctx.dirty(missing=True, merge=False, branch=False):
3838 3841 dirty = b'+'
3839 3842 fm.data(dirty=dirty)
3840 3843
3841 3844 hexoutput = [fm.hexfunc(p.node()) for p in parents]
3842 3845 if default or id:
3843 3846 output = [b"%s%s" % (b'+'.join(hexoutput), dirty)]
3844 3847 fm.data(id=b"%s%s" % (b'+'.join(hexoutput), dirty))
3845 3848
3846 3849 if num:
3847 3850 numoutput = [b"%d" % p.rev() for p in parents]
3848 3851 output.append(b"%s%s" % (b'+'.join(numoutput), dirty))
3849 3852
3850 3853 fm.data(
3851 3854 parents=fm.formatlist(
3852 3855 [fm.hexfunc(p.node()) for p in parents], name=b'node'
3853 3856 )
3854 3857 )
3855 3858 else:
3856 3859 hexoutput = fm.hexfunc(ctx.node())
3857 3860 if default or id:
3858 3861 output = [hexoutput]
3859 3862 fm.data(id=hexoutput)
3860 3863
3861 3864 if num:
3862 3865 output.append(pycompat.bytestr(ctx.rev()))
3863 3866 taglist = ctx.tags()
3864 3867
3865 3868 if default and not ui.quiet:
3866 3869 b = ctx.branch()
3867 3870 if b != b'default':
3868 3871 output.append(b"(%s)" % b)
3869 3872
3870 3873 # multiple tags for a single parent separated by '/'
3871 3874 t = b'/'.join(taglist)
3872 3875 if t:
3873 3876 output.append(t)
3874 3877
3875 3878 # multiple bookmarks for a single parent separated by '/'
3876 3879 bm = b'/'.join(ctx.bookmarks())
3877 3880 if bm:
3878 3881 output.append(bm)
3879 3882 else:
3880 3883 if branch:
3881 3884 output.append(ctx.branch())
3882 3885
3883 3886 if tags:
3884 3887 output.extend(taglist)
3885 3888
3886 3889 if bookmarks:
3887 3890 output.extend(ctx.bookmarks())
3888 3891
3889 3892 fm.data(node=ctx.hex())
3890 3893 fm.data(branch=ctx.branch())
3891 3894 fm.data(tags=fm.formatlist(taglist, name=b'tag', sep=b':'))
3892 3895 fm.data(bookmarks=fm.formatlist(ctx.bookmarks(), name=b'bookmark'))
3893 3896 fm.context(ctx=ctx)
3894 3897
3895 3898 fm.plain(b"%s\n" % b' '.join(output))
3896 3899 fm.end()
3897 3900
3898 3901
3899 3902 @command(
3900 3903 b'import|patch',
3901 3904 [
3902 3905 (
3903 3906 b'p',
3904 3907 b'strip',
3905 3908 1,
3906 3909 _(
3907 3910 b'directory strip option for patch. This has the same '
3908 3911 b'meaning as the corresponding patch option'
3909 3912 ),
3910 3913 _(b'NUM'),
3911 3914 ),
3912 3915 (b'b', b'base', b'', _(b'base path (DEPRECATED)'), _(b'PATH')),
3913 3916 (b'', b'secret', None, _(b'use the secret phase for committing')),
3914 3917 (b'e', b'edit', False, _(b'invoke editor on commit messages')),
3915 3918 (
3916 3919 b'f',
3917 3920 b'force',
3918 3921 None,
3919 3922 _(b'skip check for outstanding uncommitted changes (DEPRECATED)'),
3920 3923 ),
3921 3924 (
3922 3925 b'',
3923 3926 b'no-commit',
3924 3927 None,
3925 3928 _(b"don't commit, just update the working directory"),
3926 3929 ),
3927 3930 (
3928 3931 b'',
3929 3932 b'bypass',
3930 3933 None,
3931 3934 _(b"apply patch without touching the working directory"),
3932 3935 ),
3933 3936 (b'', b'partial', None, _(b'commit even if some hunks fail')),
3934 3937 (b'', b'exact', None, _(b'abort if patch would apply lossily')),
3935 3938 (b'', b'prefix', b'', _(b'apply patch to subdirectory'), _(b'DIR')),
3936 3939 (
3937 3940 b'',
3938 3941 b'import-branch',
3939 3942 None,
3940 3943 _(b'use any branch information in patch (implied by --exact)'),
3941 3944 ),
3942 3945 ]
3943 3946 + commitopts
3944 3947 + commitopts2
3945 3948 + similarityopts,
3946 3949 _(b'[OPTION]... PATCH...'),
3947 3950 helpcategory=command.CATEGORY_IMPORT_EXPORT,
3948 3951 )
3949 3952 def import_(ui, repo, patch1=None, *patches, **opts):
3950 3953 """import an ordered set of patches
3951 3954
3952 3955 Import a list of patches and commit them individually (unless
3953 3956 --no-commit is specified).
3954 3957
3955 3958 To read a patch from standard input (stdin), use "-" as the patch
3956 3959 name. If a URL is specified, the patch will be downloaded from
3957 3960 there.
3958 3961
3959 3962 Import first applies changes to the working directory (unless
3960 3963 --bypass is specified), import will abort if there are outstanding
3961 3964 changes.
3962 3965
3963 3966 Use --bypass to apply and commit patches directly to the
3964 3967 repository, without affecting the working directory. Without
3965 3968 --exact, patches will be applied on top of the working directory
3966 3969 parent revision.
3967 3970
3968 3971 You can import a patch straight from a mail message. Even patches
3969 3972 as attachments work (to use the body part, it must have type
3970 3973 text/plain or text/x-patch). From and Subject headers of email
3971 3974 message are used as default committer and commit message. All
3972 3975 text/plain body parts before first diff are added to the commit
3973 3976 message.
3974 3977
3975 3978 If the imported patch was generated by :hg:`export`, user and
3976 3979 description from patch override values from message headers and
3977 3980 body. Values given on command line with -m/--message and -u/--user
3978 3981 override these.
3979 3982
3980 3983 If --exact is specified, import will set the working directory to
3981 3984 the parent of each patch before applying it, and will abort if the
3982 3985 resulting changeset has a different ID than the one recorded in
3983 3986 the patch. This will guard against various ways that portable
3984 3987 patch formats and mail systems might fail to transfer Mercurial
3985 3988 data or metadata. See :hg:`bundle` for lossless transmission.
3986 3989
3987 3990 Use --partial to ensure a changeset will be created from the patch
3988 3991 even if some hunks fail to apply. Hunks that fail to apply will be
3989 3992 written to a <target-file>.rej file. Conflicts can then be resolved
3990 3993 by hand before :hg:`commit --amend` is run to update the created
3991 3994 changeset. This flag exists to let people import patches that
3992 3995 partially apply without losing the associated metadata (author,
3993 3996 date, description, ...).
3994 3997
3995 3998 .. note::
3996 3999
3997 4000 When no hunks apply cleanly, :hg:`import --partial` will create
3998 4001 an empty changeset, importing only the patch metadata.
3999 4002
4000 4003 With -s/--similarity, hg will attempt to discover renames and
4001 4004 copies in the patch in the same way as :hg:`addremove`.
4002 4005
4003 4006 It is possible to use external patch programs to perform the patch
4004 4007 by setting the ``ui.patch`` configuration option. For the default
4005 4008 internal tool, the fuzz can also be configured via ``patch.fuzz``.
4006 4009 See :hg:`help config` for more information about configuration
4007 4010 files and how to use these options.
4008 4011
4009 4012 See :hg:`help dates` for a list of formats valid for -d/--date.
4010 4013
4011 4014 .. container:: verbose
4012 4015
4013 4016 Examples:
4014 4017
4015 4018 - import a traditional patch from a website and detect renames::
4016 4019
4017 4020 hg import -s 80 http://example.com/bugfix.patch
4018 4021
4019 4022 - import a changeset from an hgweb server::
4020 4023
4021 4024 hg import https://www.mercurial-scm.org/repo/hg/rev/5ca8c111e9aa
4022 4025
4023 4026 - import all the patches in an Unix-style mbox::
4024 4027
4025 4028 hg import incoming-patches.mbox
4026 4029
4027 4030 - import patches from stdin::
4028 4031
4029 4032 hg import -
4030 4033
4031 4034 - attempt to exactly restore an exported changeset (not always
4032 4035 possible)::
4033 4036
4034 4037 hg import --exact proposed-fix.patch
4035 4038
4036 4039 - use an external tool to apply a patch which is too fuzzy for
4037 4040 the default internal tool.
4038 4041
4039 4042 hg import --config ui.patch="patch --merge" fuzzy.patch
4040 4043
4041 4044 - change the default fuzzing from 2 to a less strict 7
4042 4045
4043 4046 hg import --config ui.fuzz=7 fuzz.patch
4044 4047
4045 4048 Returns 0 on success, 1 on partial success (see --partial).
4046 4049 """
4047 4050
4048 4051 cmdutil.check_incompatible_arguments(
4049 4052 opts, 'no_commit', ['bypass', 'secret']
4050 4053 )
4051 4054 cmdutil.check_incompatible_arguments(opts, 'exact', ['edit', 'prefix'])
4052 4055 opts = pycompat.byteskwargs(opts)
4053 4056 if not patch1:
4054 4057 raise error.Abort(_(b'need at least one patch to import'))
4055 4058
4056 4059 patches = (patch1,) + patches
4057 4060
4058 4061 date = opts.get(b'date')
4059 4062 if date:
4060 4063 opts[b'date'] = dateutil.parsedate(date)
4061 4064
4062 4065 exact = opts.get(b'exact')
4063 4066 update = not opts.get(b'bypass')
4064 4067 try:
4065 4068 sim = float(opts.get(b'similarity') or 0)
4066 4069 except ValueError:
4067 4070 raise error.Abort(_(b'similarity must be a number'))
4068 4071 if sim < 0 or sim > 100:
4069 4072 raise error.Abort(_(b'similarity must be between 0 and 100'))
4070 4073 if sim and not update:
4071 4074 raise error.Abort(_(b'cannot use --similarity with --bypass'))
4072 4075
4073 4076 base = opts[b"base"]
4074 4077 msgs = []
4075 4078 ret = 0
4076 4079
4077 4080 with repo.wlock():
4078 4081 if update:
4079 4082 cmdutil.checkunfinished(repo)
4080 4083 if exact or not opts.get(b'force'):
4081 4084 cmdutil.bailifchanged(repo)
4082 4085
4083 4086 if not opts.get(b'no_commit'):
4084 4087 lock = repo.lock
4085 4088 tr = lambda: repo.transaction(b'import')
4086 4089 dsguard = util.nullcontextmanager
4087 4090 else:
4088 4091 lock = util.nullcontextmanager
4089 4092 tr = util.nullcontextmanager
4090 4093 dsguard = lambda: dirstateguard.dirstateguard(repo, b'import')
4091 4094 with lock(), tr(), dsguard():
4092 4095 parents = repo[None].parents()
4093 4096 for patchurl in patches:
4094 4097 if patchurl == b'-':
4095 4098 ui.status(_(b'applying patch from stdin\n'))
4096 4099 patchfile = ui.fin
4097 4100 patchurl = b'stdin' # for error message
4098 4101 else:
4099 4102 patchurl = os.path.join(base, patchurl)
4100 4103 ui.status(_(b'applying %s\n') % patchurl)
4101 4104 patchfile = hg.openpath(ui, patchurl, sendaccept=False)
4102 4105
4103 4106 haspatch = False
4104 4107 for hunk in patch.split(patchfile):
4105 4108 with patch.extract(ui, hunk) as patchdata:
4106 4109 msg, node, rej = cmdutil.tryimportone(
4107 4110 ui, repo, patchdata, parents, opts, msgs, hg.clean
4108 4111 )
4109 4112 if msg:
4110 4113 haspatch = True
4111 4114 ui.note(msg + b'\n')
4112 4115 if update or exact:
4113 4116 parents = repo[None].parents()
4114 4117 else:
4115 4118 parents = [repo[node]]
4116 4119 if rej:
4117 4120 ui.write_err(_(b"patch applied partially\n"))
4118 4121 ui.write_err(
4119 4122 _(
4120 4123 b"(fix the .rej files and run "
4121 4124 b"`hg commit --amend`)\n"
4122 4125 )
4123 4126 )
4124 4127 ret = 1
4125 4128 break
4126 4129
4127 4130 if not haspatch:
4128 4131 raise error.Abort(_(b'%s: no diffs found') % patchurl)
4129 4132
4130 4133 if msgs:
4131 4134 repo.savecommitmessage(b'\n* * *\n'.join(msgs))
4132 4135 return ret
4133 4136
4134 4137
4135 4138 @command(
4136 4139 b'incoming|in',
4137 4140 [
4138 4141 (
4139 4142 b'f',
4140 4143 b'force',
4141 4144 None,
4142 4145 _(b'run even if remote repository is unrelated'),
4143 4146 ),
4144 4147 (b'n', b'newest-first', None, _(b'show newest record first')),
4145 4148 (b'', b'bundle', b'', _(b'file to store the bundles into'), _(b'FILE')),
4146 4149 (
4147 4150 b'r',
4148 4151 b'rev',
4149 4152 [],
4150 4153 _(b'a remote changeset intended to be added'),
4151 4154 _(b'REV'),
4152 4155 ),
4153 4156 (b'B', b'bookmarks', False, _(b"compare bookmarks")),
4154 4157 (
4155 4158 b'b',
4156 4159 b'branch',
4157 4160 [],
4158 4161 _(b'a specific branch you would like to pull'),
4159 4162 _(b'BRANCH'),
4160 4163 ),
4161 4164 ]
4162 4165 + logopts
4163 4166 + remoteopts
4164 4167 + subrepoopts,
4165 4168 _(b'[-p] [-n] [-M] [-f] [-r REV]... [--bundle FILENAME] [SOURCE]'),
4166 4169 helpcategory=command.CATEGORY_REMOTE_REPO_MANAGEMENT,
4167 4170 )
4168 4171 def incoming(ui, repo, source=b"default", **opts):
4169 4172 """show new changesets found in source
4170 4173
4171 4174 Show new changesets found in the specified path/URL or the default
4172 4175 pull location. These are the changesets that would have been pulled
4173 4176 by :hg:`pull` at the time you issued this command.
4174 4177
4175 4178 See pull for valid source format details.
4176 4179
4177 4180 .. container:: verbose
4178 4181
4179 4182 With -B/--bookmarks, the result of bookmark comparison between
4180 4183 local and remote repositories is displayed. With -v/--verbose,
4181 4184 status is also displayed for each bookmark like below::
4182 4185
4183 4186 BM1 01234567890a added
4184 4187 BM2 1234567890ab advanced
4185 4188 BM3 234567890abc diverged
4186 4189 BM4 34567890abcd changed
4187 4190
4188 4191 The action taken locally when pulling depends on the
4189 4192 status of each bookmark:
4190 4193
4191 4194 :``added``: pull will create it
4192 4195 :``advanced``: pull will update it
4193 4196 :``diverged``: pull will create a divergent bookmark
4194 4197 :``changed``: result depends on remote changesets
4195 4198
4196 4199 From the point of view of pulling behavior, bookmark
4197 4200 existing only in the remote repository are treated as ``added``,
4198 4201 even if it is in fact locally deleted.
4199 4202
4200 4203 .. container:: verbose
4201 4204
4202 4205 For remote repository, using --bundle avoids downloading the
4203 4206 changesets twice if the incoming is followed by a pull.
4204 4207
4205 4208 Examples:
4206 4209
4207 4210 - show incoming changes with patches and full description::
4208 4211
4209 4212 hg incoming -vp
4210 4213
4211 4214 - show incoming changes excluding merges, store a bundle::
4212 4215
4213 4216 hg in -vpM --bundle incoming.hg
4214 4217 hg pull incoming.hg
4215 4218
4216 4219 - briefly list changes inside a bundle::
4217 4220
4218 4221 hg in changes.hg -T "{desc|firstline}\\n"
4219 4222
4220 4223 Returns 0 if there are incoming changes, 1 otherwise.
4221 4224 """
4222 4225 opts = pycompat.byteskwargs(opts)
4223 4226 if opts.get(b'graph'):
4224 4227 logcmdutil.checkunsupportedgraphflags([], opts)
4225 4228
4226 4229 def display(other, chlist, displayer):
4227 4230 revdag = logcmdutil.graphrevs(other, chlist, opts)
4228 4231 logcmdutil.displaygraph(
4229 4232 ui, repo, revdag, displayer, graphmod.asciiedges
4230 4233 )
4231 4234
4232 4235 hg._incoming(display, lambda: 1, ui, repo, source, opts, buffered=True)
4233 4236 return 0
4234 4237
4235 4238 cmdutil.check_incompatible_arguments(opts, b'subrepos', [b'bundle'])
4236 4239
4237 4240 if opts.get(b'bookmarks'):
4238 4241 source, branches = hg.parseurl(
4239 4242 ui.expandpath(source), opts.get(b'branch')
4240 4243 )
4241 4244 other = hg.peer(repo, opts, source)
4242 4245 if b'bookmarks' not in other.listkeys(b'namespaces'):
4243 4246 ui.warn(_(b"remote doesn't support bookmarks\n"))
4244 4247 return 0
4245 4248 ui.pager(b'incoming')
4246 4249 ui.status(_(b'comparing with %s\n') % util.hidepassword(source))
4247 4250 return bookmarks.incoming(ui, repo, other)
4248 4251
4249 4252 repo._subtoppath = ui.expandpath(source)
4250 4253 try:
4251 4254 return hg.incoming(ui, repo, source, opts)
4252 4255 finally:
4253 4256 del repo._subtoppath
4254 4257
4255 4258
4256 4259 @command(
4257 4260 b'init',
4258 4261 remoteopts,
4259 4262 _(b'[-e CMD] [--remotecmd CMD] [DEST]'),
4260 4263 helpcategory=command.CATEGORY_REPO_CREATION,
4261 4264 helpbasic=True,
4262 4265 norepo=True,
4263 4266 )
4264 4267 def init(ui, dest=b".", **opts):
4265 4268 """create a new repository in the given directory
4266 4269
4267 4270 Initialize a new repository in the given directory. If the given
4268 4271 directory does not exist, it will be created.
4269 4272
4270 4273 If no directory is given, the current directory is used.
4271 4274
4272 4275 It is possible to specify an ``ssh://`` URL as the destination.
4273 4276 See :hg:`help urls` for more information.
4274 4277
4275 4278 Returns 0 on success.
4276 4279 """
4277 4280 opts = pycompat.byteskwargs(opts)
4278 4281 hg.peer(ui, opts, ui.expandpath(dest), create=True)
4279 4282
4280 4283
4281 4284 @command(
4282 4285 b'locate',
4283 4286 [
4284 4287 (
4285 4288 b'r',
4286 4289 b'rev',
4287 4290 b'',
4288 4291 _(b'search the repository as it is in REV'),
4289 4292 _(b'REV'),
4290 4293 ),
4291 4294 (
4292 4295 b'0',
4293 4296 b'print0',
4294 4297 None,
4295 4298 _(b'end filenames with NUL, for use with xargs'),
4296 4299 ),
4297 4300 (
4298 4301 b'f',
4299 4302 b'fullpath',
4300 4303 None,
4301 4304 _(b'print complete paths from the filesystem root'),
4302 4305 ),
4303 4306 ]
4304 4307 + walkopts,
4305 4308 _(b'[OPTION]... [PATTERN]...'),
4306 4309 helpcategory=command.CATEGORY_WORKING_DIRECTORY,
4307 4310 )
4308 4311 def locate(ui, repo, *pats, **opts):
4309 4312 """locate files matching specific patterns (DEPRECATED)
4310 4313
4311 4314 Print files under Mercurial control in the working directory whose
4312 4315 names match the given patterns.
4313 4316
4314 4317 By default, this command searches all directories in the working
4315 4318 directory. To search just the current directory and its
4316 4319 subdirectories, use "--include .".
4317 4320
4318 4321 If no patterns are given to match, this command prints the names
4319 4322 of all files under Mercurial control in the working directory.
4320 4323
4321 4324 If you want to feed the output of this command into the "xargs"
4322 4325 command, use the -0 option to both this command and "xargs". This
4323 4326 will avoid the problem of "xargs" treating single filenames that
4324 4327 contain whitespace as multiple filenames.
4325 4328
4326 4329 See :hg:`help files` for a more versatile command.
4327 4330
4328 4331 Returns 0 if a match is found, 1 otherwise.
4329 4332 """
4330 4333 opts = pycompat.byteskwargs(opts)
4331 4334 if opts.get(b'print0'):
4332 4335 end = b'\0'
4333 4336 else:
4334 4337 end = b'\n'
4335 4338 ctx = scmutil.revsingle(repo, opts.get(b'rev'), None)
4336 4339
4337 4340 ret = 1
4338 4341 m = scmutil.match(
4339 4342 ctx, pats, opts, default=b'relglob', badfn=lambda x, y: False
4340 4343 )
4341 4344
4342 4345 ui.pager(b'locate')
4343 4346 if ctx.rev() is None:
4344 4347 # When run on the working copy, "locate" includes removed files, so
4345 4348 # we get the list of files from the dirstate.
4346 4349 filesgen = sorted(repo.dirstate.matches(m))
4347 4350 else:
4348 4351 filesgen = ctx.matches(m)
4349 4352 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=bool(pats))
4350 4353 for abs in filesgen:
4351 4354 if opts.get(b'fullpath'):
4352 4355 ui.write(repo.wjoin(abs), end)
4353 4356 else:
4354 4357 ui.write(uipathfn(abs), end)
4355 4358 ret = 0
4356 4359
4357 4360 return ret
4358 4361
4359 4362
4360 4363 @command(
4361 4364 b'log|history',
4362 4365 [
4363 4366 (
4364 4367 b'f',
4365 4368 b'follow',
4366 4369 None,
4367 4370 _(
4368 4371 b'follow changeset history, or file history across copies and renames'
4369 4372 ),
4370 4373 ),
4371 4374 (
4372 4375 b'',
4373 4376 b'follow-first',
4374 4377 None,
4375 4378 _(b'only follow the first parent of merge changesets (DEPRECATED)'),
4376 4379 ),
4377 4380 (
4378 4381 b'd',
4379 4382 b'date',
4380 4383 b'',
4381 4384 _(b'show revisions matching date spec'),
4382 4385 _(b'DATE'),
4383 4386 ),
4384 4387 (b'C', b'copies', None, _(b'show copied files')),
4385 4388 (
4386 4389 b'k',
4387 4390 b'keyword',
4388 4391 [],
4389 4392 _(b'do case-insensitive search for a given text'),
4390 4393 _(b'TEXT'),
4391 4394 ),
4392 4395 (
4393 4396 b'r',
4394 4397 b'rev',
4395 4398 [],
4396 4399 _(b'show the specified revision or revset'),
4397 4400 _(b'REV'),
4398 4401 ),
4399 4402 (
4400 4403 b'L',
4401 4404 b'line-range',
4402 4405 [],
4403 4406 _(b'follow line range of specified file (EXPERIMENTAL)'),
4404 4407 _(b'FILE,RANGE'),
4405 4408 ),
4406 4409 (
4407 4410 b'',
4408 4411 b'removed',
4409 4412 None,
4410 4413 _(b'include revisions where files were removed'),
4411 4414 ),
4412 4415 (
4413 4416 b'm',
4414 4417 b'only-merges',
4415 4418 None,
4416 4419 _(b'show only merges (DEPRECATED) (use -r "merge()" instead)'),
4417 4420 ),
4418 4421 (b'u', b'user', [], _(b'revisions committed by user'), _(b'USER')),
4419 4422 (
4420 4423 b'',
4421 4424 b'only-branch',
4422 4425 [],
4423 4426 _(
4424 4427 b'show only changesets within the given named branch (DEPRECATED)'
4425 4428 ),
4426 4429 _(b'BRANCH'),
4427 4430 ),
4428 4431 (
4429 4432 b'b',
4430 4433 b'branch',
4431 4434 [],
4432 4435 _(b'show changesets within the given named branch'),
4433 4436 _(b'BRANCH'),
4434 4437 ),
4435 4438 (
4436 4439 b'P',
4437 4440 b'prune',
4438 4441 [],
4439 4442 _(b'do not display revision or any of its ancestors'),
4440 4443 _(b'REV'),
4441 4444 ),
4442 4445 ]
4443 4446 + logopts
4444 4447 + walkopts,
4445 4448 _(b'[OPTION]... [FILE]'),
4446 4449 helpcategory=command.CATEGORY_CHANGE_NAVIGATION,
4447 4450 helpbasic=True,
4448 4451 inferrepo=True,
4449 4452 intents={INTENT_READONLY},
4450 4453 )
4451 4454 def log(ui, repo, *pats, **opts):
4452 4455 """show revision history of entire repository or files
4453 4456
4454 4457 Print the revision history of the specified files or the entire
4455 4458 project.
4456 4459
4457 4460 If no revision range is specified, the default is ``tip:0`` unless
4458 4461 --follow is set, in which case the working directory parent is
4459 4462 used as the starting revision.
4460 4463
4461 4464 File history is shown without following rename or copy history of
4462 4465 files. Use -f/--follow with a filename to follow history across
4463 4466 renames and copies. --follow without a filename will only show
4464 4467 ancestors of the starting revision.
4465 4468
4466 4469 By default this command prints revision number and changeset id,
4467 4470 tags, non-trivial parents, user, date and time, and a summary for
4468 4471 each commit. When the -v/--verbose switch is used, the list of
4469 4472 changed files and full commit message are shown.
4470 4473
4471 4474 With --graph the revisions are shown as an ASCII art DAG with the most
4472 4475 recent changeset at the top.
4473 4476 'o' is a changeset, '@' is a working directory parent, '%' is a changeset
4474 4477 involved in an unresolved merge conflict, '_' closes a branch,
4475 4478 'x' is obsolete, '*' is unstable, and '+' represents a fork where the
4476 4479 changeset from the lines below is a parent of the 'o' merge on the same
4477 4480 line.
4478 4481 Paths in the DAG are represented with '|', '/' and so forth. ':' in place
4479 4482 of a '|' indicates one or more revisions in a path are omitted.
4480 4483
4481 4484 .. container:: verbose
4482 4485
4483 4486 Use -L/--line-range FILE,M:N options to follow the history of lines
4484 4487 from M to N in FILE. With -p/--patch only diff hunks affecting
4485 4488 specified line range will be shown. This option requires --follow;
4486 4489 it can be specified multiple times. Currently, this option is not
4487 4490 compatible with --graph. This option is experimental.
4488 4491
4489 4492 .. note::
4490 4493
4491 4494 :hg:`log --patch` may generate unexpected diff output for merge
4492 4495 changesets, as it will only compare the merge changeset against
4493 4496 its first parent. Also, only files different from BOTH parents
4494 4497 will appear in files:.
4495 4498
4496 4499 .. note::
4497 4500
4498 4501 For performance reasons, :hg:`log FILE` may omit duplicate changes
4499 4502 made on branches and will not show removals or mode changes. To
4500 4503 see all such changes, use the --removed switch.
4501 4504
4502 4505 .. container:: verbose
4503 4506
4504 4507 .. note::
4505 4508
4506 4509 The history resulting from -L/--line-range options depends on diff
4507 4510 options; for instance if white-spaces are ignored, respective changes
4508 4511 with only white-spaces in specified line range will not be listed.
4509 4512
4510 4513 .. container:: verbose
4511 4514
4512 4515 Some examples:
4513 4516
4514 4517 - changesets with full descriptions and file lists::
4515 4518
4516 4519 hg log -v
4517 4520
4518 4521 - changesets ancestral to the working directory::
4519 4522
4520 4523 hg log -f
4521 4524
4522 4525 - last 10 commits on the current branch::
4523 4526
4524 4527 hg log -l 10 -b .
4525 4528
4526 4529 - changesets showing all modifications of a file, including removals::
4527 4530
4528 4531 hg log --removed file.c
4529 4532
4530 4533 - all changesets that touch a directory, with diffs, excluding merges::
4531 4534
4532 4535 hg log -Mp lib/
4533 4536
4534 4537 - all revision numbers that match a keyword::
4535 4538
4536 4539 hg log -k bug --template "{rev}\\n"
4537 4540
4538 4541 - the full hash identifier of the working directory parent::
4539 4542
4540 4543 hg log -r . --template "{node}\\n"
4541 4544
4542 4545 - list available log templates::
4543 4546
4544 4547 hg log -T list
4545 4548
4546 4549 - check if a given changeset is included in a tagged release::
4547 4550
4548 4551 hg log -r "a21ccf and ancestor(1.9)"
4549 4552
4550 4553 - find all changesets by some user in a date range::
4551 4554
4552 4555 hg log -k alice -d "may 2008 to jul 2008"
4553 4556
4554 4557 - summary of all changesets after the last tag::
4555 4558
4556 4559 hg log -r "last(tagged())::" --template "{desc|firstline}\\n"
4557 4560
4558 4561 - changesets touching lines 13 to 23 for file.c::
4559 4562
4560 4563 hg log -L file.c,13:23
4561 4564
4562 4565 - changesets touching lines 13 to 23 for file.c and lines 2 to 6 of
4563 4566 main.c with patch::
4564 4567
4565 4568 hg log -L file.c,13:23 -L main.c,2:6 -p
4566 4569
4567 4570 See :hg:`help dates` for a list of formats valid for -d/--date.
4568 4571
4569 4572 See :hg:`help revisions` for more about specifying and ordering
4570 4573 revisions.
4571 4574
4572 4575 See :hg:`help templates` for more about pre-packaged styles and
4573 4576 specifying custom templates. The default template used by the log
4574 4577 command can be customized via the ``command-templates.log`` configuration
4575 4578 setting.
4576 4579
4577 4580 Returns 0 on success.
4578 4581
4579 4582 """
4580 4583 opts = pycompat.byteskwargs(opts)
4581 4584 linerange = opts.get(b'line_range')
4582 4585
4583 4586 if linerange and not opts.get(b'follow'):
4584 4587 raise error.Abort(_(b'--line-range requires --follow'))
4585 4588
4586 4589 if linerange and pats:
4587 4590 # TODO: take pats as patterns with no line-range filter
4588 4591 raise error.Abort(
4589 4592 _(b'FILE arguments are not compatible with --line-range option')
4590 4593 )
4591 4594
4592 4595 repo = scmutil.unhidehashlikerevs(repo, opts.get(b'rev'), b'nowarn')
4593 4596 revs, differ = logcmdutil.getrevs(
4594 4597 repo, logcmdutil.parseopts(ui, pats, opts)
4595 4598 )
4596 4599 if linerange:
4597 4600 # TODO: should follow file history from logcmdutil._initialrevs(),
4598 4601 # then filter the result by logcmdutil._makerevset() and --limit
4599 4602 revs, differ = logcmdutil.getlinerangerevs(repo, revs, opts)
4600 4603
4601 4604 getcopies = None
4602 4605 if opts.get(b'copies'):
4603 4606 endrev = None
4604 4607 if revs:
4605 4608 endrev = revs.max() + 1
4606 4609 getcopies = scmutil.getcopiesfn(repo, endrev=endrev)
4607 4610
4608 4611 ui.pager(b'log')
4609 4612 displayer = logcmdutil.changesetdisplayer(
4610 4613 ui, repo, opts, differ, buffered=True
4611 4614 )
4612 4615 if opts.get(b'graph'):
4613 4616 displayfn = logcmdutil.displaygraphrevs
4614 4617 else:
4615 4618 displayfn = logcmdutil.displayrevs
4616 4619 displayfn(ui, repo, revs, displayer, getcopies)
4617 4620
4618 4621
4619 4622 @command(
4620 4623 b'manifest',
4621 4624 [
4622 4625 (b'r', b'rev', b'', _(b'revision to display'), _(b'REV')),
4623 4626 (b'', b'all', False, _(b"list files from all revisions")),
4624 4627 ]
4625 4628 + formatteropts,
4626 4629 _(b'[-r REV]'),
4627 4630 helpcategory=command.CATEGORY_MAINTENANCE,
4628 4631 intents={INTENT_READONLY},
4629 4632 )
4630 4633 def manifest(ui, repo, node=None, rev=None, **opts):
4631 4634 """output the current or given revision of the project manifest
4632 4635
4633 4636 Print a list of version controlled files for the given revision.
4634 4637 If no revision is given, the first parent of the working directory
4635 4638 is used, or the null revision if no revision is checked out.
4636 4639
4637 4640 With -v, print file permissions, symlink and executable bits.
4638 4641 With --debug, print file revision hashes.
4639 4642
4640 4643 If option --all is specified, the list of all files from all revisions
4641 4644 is printed. This includes deleted and renamed files.
4642 4645
4643 4646 Returns 0 on success.
4644 4647 """
4645 4648 opts = pycompat.byteskwargs(opts)
4646 4649 fm = ui.formatter(b'manifest', opts)
4647 4650
4648 4651 if opts.get(b'all'):
4649 4652 if rev or node:
4650 4653 raise error.Abort(_(b"can't specify a revision with --all"))
4651 4654
4652 4655 res = set()
4653 4656 for rev in repo:
4654 4657 ctx = repo[rev]
4655 4658 res |= set(ctx.files())
4656 4659
4657 4660 ui.pager(b'manifest')
4658 4661 for f in sorted(res):
4659 4662 fm.startitem()
4660 4663 fm.write(b"path", b'%s\n', f)
4661 4664 fm.end()
4662 4665 return
4663 4666
4664 4667 if rev and node:
4665 4668 raise error.Abort(_(b"please specify just one revision"))
4666 4669
4667 4670 if not node:
4668 4671 node = rev
4669 4672
4670 4673 char = {b'l': b'@', b'x': b'*', b'': b'', b't': b'd'}
4671 4674 mode = {b'l': b'644', b'x': b'755', b'': b'644', b't': b'755'}
4672 4675 if node:
4673 4676 repo = scmutil.unhidehashlikerevs(repo, [node], b'nowarn')
4674 4677 ctx = scmutil.revsingle(repo, node)
4675 4678 mf = ctx.manifest()
4676 4679 ui.pager(b'manifest')
4677 4680 for f in ctx:
4678 4681 fm.startitem()
4679 4682 fm.context(ctx=ctx)
4680 4683 fl = ctx[f].flags()
4681 4684 fm.condwrite(ui.debugflag, b'hash', b'%s ', hex(mf[f]))
4682 4685 fm.condwrite(ui.verbose, b'mode type', b'%s %1s ', mode[fl], char[fl])
4683 4686 fm.write(b'path', b'%s\n', f)
4684 4687 fm.end()
4685 4688
4686 4689
4687 4690 @command(
4688 4691 b'merge',
4689 4692 [
4690 4693 (
4691 4694 b'f',
4692 4695 b'force',
4693 4696 None,
4694 4697 _(b'force a merge including outstanding changes (DEPRECATED)'),
4695 4698 ),
4696 4699 (b'r', b'rev', b'', _(b'revision to merge'), _(b'REV')),
4697 4700 (
4698 4701 b'P',
4699 4702 b'preview',
4700 4703 None,
4701 4704 _(b'review revisions to merge (no merge is performed)'),
4702 4705 ),
4703 4706 (b'', b'abort', None, _(b'abort the ongoing merge')),
4704 4707 ]
4705 4708 + mergetoolopts,
4706 4709 _(b'[-P] [[-r] REV]'),
4707 4710 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
4708 4711 helpbasic=True,
4709 4712 )
4710 4713 def merge(ui, repo, node=None, **opts):
4711 4714 """merge another revision into working directory
4712 4715
4713 4716 The current working directory is updated with all changes made in
4714 4717 the requested revision since the last common predecessor revision.
4715 4718
4716 4719 Files that changed between either parent are marked as changed for
4717 4720 the next commit and a commit must be performed before any further
4718 4721 updates to the repository are allowed. The next commit will have
4719 4722 two parents.
4720 4723
4721 4724 ``--tool`` can be used to specify the merge tool used for file
4722 4725 merges. It overrides the HGMERGE environment variable and your
4723 4726 configuration files. See :hg:`help merge-tools` for options.
4724 4727
4725 4728 If no revision is specified, the working directory's parent is a
4726 4729 head revision, and the current branch contains exactly one other
4727 4730 head, the other head is merged with by default. Otherwise, an
4728 4731 explicit revision with which to merge must be provided.
4729 4732
4730 4733 See :hg:`help resolve` for information on handling file conflicts.
4731 4734
4732 4735 To undo an uncommitted merge, use :hg:`merge --abort` which
4733 4736 will check out a clean copy of the original merge parent, losing
4734 4737 all changes.
4735 4738
4736 4739 Returns 0 on success, 1 if there are unresolved files.
4737 4740 """
4738 4741
4739 4742 opts = pycompat.byteskwargs(opts)
4740 4743 abort = opts.get(b'abort')
4741 4744 if abort and repo.dirstate.p2() == nullid:
4742 4745 cmdutil.wrongtooltocontinue(repo, _(b'merge'))
4743 4746 cmdutil.check_incompatible_arguments(opts, b'abort', [b'rev', b'preview'])
4744 4747 if abort:
4745 4748 state = cmdutil.getunfinishedstate(repo)
4746 4749 if state and state._opname != b'merge':
4747 4750 raise error.Abort(
4748 4751 _(b'cannot abort merge with %s in progress') % (state._opname),
4749 4752 hint=state.hint(),
4750 4753 )
4751 4754 if node:
4752 4755 raise error.Abort(_(b"cannot specify a node with --abort"))
4753 4756 return hg.abortmerge(repo.ui, repo)
4754 4757
4755 4758 if opts.get(b'rev') and node:
4756 4759 raise error.Abort(_(b"please specify just one revision"))
4757 4760 if not node:
4758 4761 node = opts.get(b'rev')
4759 4762
4760 4763 if node:
4761 4764 ctx = scmutil.revsingle(repo, node)
4762 4765 else:
4763 4766 if ui.configbool(b'commands', b'merge.require-rev'):
4764 4767 raise error.Abort(
4765 4768 _(
4766 4769 b'configuration requires specifying revision to merge '
4767 4770 b'with'
4768 4771 )
4769 4772 )
4770 4773 ctx = repo[destutil.destmerge(repo)]
4771 4774
4772 4775 if ctx.node() is None:
4773 4776 raise error.Abort(_(b'merging with the working copy has no effect'))
4774 4777
4775 4778 if opts.get(b'preview'):
4776 4779 # find nodes that are ancestors of p2 but not of p1
4777 4780 p1 = repo[b'.'].node()
4778 4781 p2 = ctx.node()
4779 4782 nodes = repo.changelog.findmissing(common=[p1], heads=[p2])
4780 4783
4781 4784 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
4782 4785 for node in nodes:
4783 4786 displayer.show(repo[node])
4784 4787 displayer.close()
4785 4788 return 0
4786 4789
4787 4790 # ui.forcemerge is an internal variable, do not document
4788 4791 overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')}
4789 4792 with ui.configoverride(overrides, b'merge'):
4790 4793 force = opts.get(b'force')
4791 4794 labels = [b'working copy', b'merge rev']
4792 4795 return hg.merge(ctx, force=force, labels=labels)
4793 4796
4794 4797
4795 4798 statemod.addunfinished(
4796 4799 b'merge',
4797 4800 fname=None,
4798 4801 clearable=True,
4799 4802 allowcommit=True,
4800 4803 cmdmsg=_(b'outstanding uncommitted merge'),
4801 4804 abortfunc=hg.abortmerge,
4802 4805 statushint=_(
4803 4806 b'To continue: hg commit\nTo abort: hg merge --abort'
4804 4807 ),
4805 4808 cmdhint=_(b"use 'hg commit' or 'hg merge --abort'"),
4806 4809 )
4807 4810
4808 4811
4809 4812 @command(
4810 4813 b'outgoing|out',
4811 4814 [
4812 4815 (
4813 4816 b'f',
4814 4817 b'force',
4815 4818 None,
4816 4819 _(b'run even when the destination is unrelated'),
4817 4820 ),
4818 4821 (
4819 4822 b'r',
4820 4823 b'rev',
4821 4824 [],
4822 4825 _(b'a changeset intended to be included in the destination'),
4823 4826 _(b'REV'),
4824 4827 ),
4825 4828 (b'n', b'newest-first', None, _(b'show newest record first')),
4826 4829 (b'B', b'bookmarks', False, _(b'compare bookmarks')),
4827 4830 (
4828 4831 b'b',
4829 4832 b'branch',
4830 4833 [],
4831 4834 _(b'a specific branch you would like to push'),
4832 4835 _(b'BRANCH'),
4833 4836 ),
4834 4837 ]
4835 4838 + logopts
4836 4839 + remoteopts
4837 4840 + subrepoopts,
4838 4841 _(b'[-M] [-p] [-n] [-f] [-r REV]... [DEST]'),
4839 4842 helpcategory=command.CATEGORY_REMOTE_REPO_MANAGEMENT,
4840 4843 )
4841 4844 def outgoing(ui, repo, dest=None, **opts):
4842 4845 """show changesets not found in the destination
4843 4846
4844 4847 Show changesets not found in the specified destination repository
4845 4848 or the default push location. These are the changesets that would
4846 4849 be pushed if a push was requested.
4847 4850
4848 4851 See pull for details of valid destination formats.
4849 4852
4850 4853 .. container:: verbose
4851 4854
4852 4855 With -B/--bookmarks, the result of bookmark comparison between
4853 4856 local and remote repositories is displayed. With -v/--verbose,
4854 4857 status is also displayed for each bookmark like below::
4855 4858
4856 4859 BM1 01234567890a added
4857 4860 BM2 deleted
4858 4861 BM3 234567890abc advanced
4859 4862 BM4 34567890abcd diverged
4860 4863 BM5 4567890abcde changed
4861 4864
4862 4865 The action taken when pushing depends on the
4863 4866 status of each bookmark:
4864 4867
4865 4868 :``added``: push with ``-B`` will create it
4866 4869 :``deleted``: push with ``-B`` will delete it
4867 4870 :``advanced``: push will update it
4868 4871 :``diverged``: push with ``-B`` will update it
4869 4872 :``changed``: push with ``-B`` will update it
4870 4873
4871 4874 From the point of view of pushing behavior, bookmarks
4872 4875 existing only in the remote repository are treated as
4873 4876 ``deleted``, even if it is in fact added remotely.
4874 4877
4875 4878 Returns 0 if there are outgoing changes, 1 otherwise.
4876 4879 """
4877 4880 # hg._outgoing() needs to re-resolve the path in order to handle #branch
4878 4881 # style URLs, so don't overwrite dest.
4879 4882 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
4880 4883 if not path:
4881 4884 raise error.Abort(
4882 4885 _(b'default repository not configured!'),
4883 4886 hint=_(b"see 'hg help config.paths'"),
4884 4887 )
4885 4888
4886 4889 opts = pycompat.byteskwargs(opts)
4887 4890 if opts.get(b'graph'):
4888 4891 logcmdutil.checkunsupportedgraphflags([], opts)
4889 4892 o, other = hg._outgoing(ui, repo, dest, opts)
4890 4893 if not o:
4891 4894 cmdutil.outgoinghooks(ui, repo, other, opts, o)
4892 4895 return
4893 4896
4894 4897 revdag = logcmdutil.graphrevs(repo, o, opts)
4895 4898 ui.pager(b'outgoing')
4896 4899 displayer = logcmdutil.changesetdisplayer(ui, repo, opts, buffered=True)
4897 4900 logcmdutil.displaygraph(
4898 4901 ui, repo, revdag, displayer, graphmod.asciiedges
4899 4902 )
4900 4903 cmdutil.outgoinghooks(ui, repo, other, opts, o)
4901 4904 return 0
4902 4905
4903 4906 if opts.get(b'bookmarks'):
4904 4907 dest = path.pushloc or path.loc
4905 4908 other = hg.peer(repo, opts, dest)
4906 4909 if b'bookmarks' not in other.listkeys(b'namespaces'):
4907 4910 ui.warn(_(b"remote doesn't support bookmarks\n"))
4908 4911 return 0
4909 4912 ui.status(_(b'comparing with %s\n') % util.hidepassword(dest))
4910 4913 ui.pager(b'outgoing')
4911 4914 return bookmarks.outgoing(ui, repo, other)
4912 4915
4913 4916 repo._subtoppath = path.pushloc or path.loc
4914 4917 try:
4915 4918 return hg.outgoing(ui, repo, dest, opts)
4916 4919 finally:
4917 4920 del repo._subtoppath
4918 4921
4919 4922
4920 4923 @command(
4921 4924 b'parents',
4922 4925 [
4923 4926 (
4924 4927 b'r',
4925 4928 b'rev',
4926 4929 b'',
4927 4930 _(b'show parents of the specified revision'),
4928 4931 _(b'REV'),
4929 4932 ),
4930 4933 ]
4931 4934 + templateopts,
4932 4935 _(b'[-r REV] [FILE]'),
4933 4936 helpcategory=command.CATEGORY_CHANGE_NAVIGATION,
4934 4937 inferrepo=True,
4935 4938 )
4936 4939 def parents(ui, repo, file_=None, **opts):
4937 4940 """show the parents of the working directory or revision (DEPRECATED)
4938 4941
4939 4942 Print the working directory's parent revisions. If a revision is
4940 4943 given via -r/--rev, the parent of that revision will be printed.
4941 4944 If a file argument is given, the revision in which the file was
4942 4945 last changed (before the working directory revision or the
4943 4946 argument to --rev if given) is printed.
4944 4947
4945 4948 This command is equivalent to::
4946 4949
4947 4950 hg log -r "p1()+p2()" or
4948 4951 hg log -r "p1(REV)+p2(REV)" or
4949 4952 hg log -r "max(::p1() and file(FILE))+max(::p2() and file(FILE))" or
4950 4953 hg log -r "max(::p1(REV) and file(FILE))+max(::p2(REV) and file(FILE))"
4951 4954
4952 4955 See :hg:`summary` and :hg:`help revsets` for related information.
4953 4956
4954 4957 Returns 0 on success.
4955 4958 """
4956 4959
4957 4960 opts = pycompat.byteskwargs(opts)
4958 4961 rev = opts.get(b'rev')
4959 4962 if rev:
4960 4963 repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
4961 4964 ctx = scmutil.revsingle(repo, rev, None)
4962 4965
4963 4966 if file_:
4964 4967 m = scmutil.match(ctx, (file_,), opts)
4965 4968 if m.anypats() or len(m.files()) != 1:
4966 4969 raise error.Abort(_(b'can only specify an explicit filename'))
4967 4970 file_ = m.files()[0]
4968 4971 filenodes = []
4969 4972 for cp in ctx.parents():
4970 4973 if not cp:
4971 4974 continue
4972 4975 try:
4973 4976 filenodes.append(cp.filenode(file_))
4974 4977 except error.LookupError:
4975 4978 pass
4976 4979 if not filenodes:
4977 4980 raise error.Abort(_(b"'%s' not found in manifest!") % file_)
4978 4981 p = []
4979 4982 for fn in filenodes:
4980 4983 fctx = repo.filectx(file_, fileid=fn)
4981 4984 p.append(fctx.node())
4982 4985 else:
4983 4986 p = [cp.node() for cp in ctx.parents()]
4984 4987
4985 4988 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
4986 4989 for n in p:
4987 4990 if n != nullid:
4988 4991 displayer.show(repo[n])
4989 4992 displayer.close()
4990 4993
4991 4994
4992 4995 @command(
4993 4996 b'paths',
4994 4997 formatteropts,
4995 4998 _(b'[NAME]'),
4996 4999 helpcategory=command.CATEGORY_REMOTE_REPO_MANAGEMENT,
4997 5000 optionalrepo=True,
4998 5001 intents={INTENT_READONLY},
4999 5002 )
5000 5003 def paths(ui, repo, search=None, **opts):
5001 5004 """show aliases for remote repositories
5002 5005
5003 5006 Show definition of symbolic path name NAME. If no name is given,
5004 5007 show definition of all available names.
5005 5008
5006 5009 Option -q/--quiet suppresses all output when searching for NAME
5007 5010 and shows only the path names when listing all definitions.
5008 5011
5009 5012 Path names are defined in the [paths] section of your
5010 5013 configuration file and in ``/etc/mercurial/hgrc``. If run inside a
5011 5014 repository, ``.hg/hgrc`` is used, too.
5012 5015
5013 5016 The path names ``default`` and ``default-push`` have a special
5014 5017 meaning. When performing a push or pull operation, they are used
5015 5018 as fallbacks if no location is specified on the command-line.
5016 5019 When ``default-push`` is set, it will be used for push and
5017 5020 ``default`` will be used for pull; otherwise ``default`` is used
5018 5021 as the fallback for both. When cloning a repository, the clone
5019 5022 source is written as ``default`` in ``.hg/hgrc``.
5020 5023
5021 5024 .. note::
5022 5025
5023 5026 ``default`` and ``default-push`` apply to all inbound (e.g.
5024 5027 :hg:`incoming`) and outbound (e.g. :hg:`outgoing`, :hg:`email`
5025 5028 and :hg:`bundle`) operations.
5026 5029
5027 5030 See :hg:`help urls` for more information.
5028 5031
5029 5032 .. container:: verbose
5030 5033
5031 5034 Template:
5032 5035
5033 5036 The following keywords are supported. See also :hg:`help templates`.
5034 5037
5035 5038 :name: String. Symbolic name of the path alias.
5036 5039 :pushurl: String. URL for push operations.
5037 5040 :url: String. URL or directory path for the other operations.
5038 5041
5039 5042 Returns 0 on success.
5040 5043 """
5041 5044
5042 5045 opts = pycompat.byteskwargs(opts)
5043 5046 ui.pager(b'paths')
5044 5047 if search:
5045 5048 pathitems = [
5046 5049 (name, path)
5047 5050 for name, path in pycompat.iteritems(ui.paths)
5048 5051 if name == search
5049 5052 ]
5050 5053 else:
5051 5054 pathitems = sorted(pycompat.iteritems(ui.paths))
5052 5055
5053 5056 fm = ui.formatter(b'paths', opts)
5054 5057 if fm.isplain():
5055 5058 hidepassword = util.hidepassword
5056 5059 else:
5057 5060 hidepassword = bytes
5058 5061 if ui.quiet:
5059 5062 namefmt = b'%s\n'
5060 5063 else:
5061 5064 namefmt = b'%s = '
5062 5065 showsubopts = not search and not ui.quiet
5063 5066
5064 5067 for name, path in pathitems:
5065 5068 fm.startitem()
5066 5069 fm.condwrite(not search, b'name', namefmt, name)
5067 5070 fm.condwrite(not ui.quiet, b'url', b'%s\n', hidepassword(path.rawloc))
5068 5071 for subopt, value in sorted(path.suboptions.items()):
5069 5072 assert subopt not in (b'name', b'url')
5070 5073 if showsubopts:
5071 5074 fm.plain(b'%s:%s = ' % (name, subopt))
5072 5075 fm.condwrite(showsubopts, subopt, b'%s\n', value)
5073 5076
5074 5077 fm.end()
5075 5078
5076 5079 if search and not pathitems:
5077 5080 if not ui.quiet:
5078 5081 ui.warn(_(b"not found!\n"))
5079 5082 return 1
5080 5083 else:
5081 5084 return 0
5082 5085
5083 5086
5084 5087 @command(
5085 5088 b'phase',
5086 5089 [
5087 5090 (b'p', b'public', False, _(b'set changeset phase to public')),
5088 5091 (b'd', b'draft', False, _(b'set changeset phase to draft')),
5089 5092 (b's', b'secret', False, _(b'set changeset phase to secret')),
5090 5093 (b'f', b'force', False, _(b'allow to move boundary backward')),
5091 5094 (b'r', b'rev', [], _(b'target revision'), _(b'REV')),
5092 5095 ],
5093 5096 _(b'[-p|-d|-s] [-f] [-r] [REV...]'),
5094 5097 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
5095 5098 )
5096 5099 def phase(ui, repo, *revs, **opts):
5097 5100 """set or show the current phase name
5098 5101
5099 5102 With no argument, show the phase name of the current revision(s).
5100 5103
5101 5104 With one of -p/--public, -d/--draft or -s/--secret, change the
5102 5105 phase value of the specified revisions.
5103 5106
5104 5107 Unless -f/--force is specified, :hg:`phase` won't move changesets from a
5105 5108 lower phase to a higher phase. Phases are ordered as follows::
5106 5109
5107 5110 public < draft < secret
5108 5111
5109 5112 Returns 0 on success, 1 if some phases could not be changed.
5110 5113
5111 5114 (For more information about the phases concept, see :hg:`help phases`.)
5112 5115 """
5113 5116 opts = pycompat.byteskwargs(opts)
5114 5117 # search for a unique phase argument
5115 5118 targetphase = None
5116 5119 for idx, name in enumerate(phases.cmdphasenames):
5117 5120 if opts[name]:
5118 5121 if targetphase is not None:
5119 5122 raise error.Abort(_(b'only one phase can be specified'))
5120 5123 targetphase = idx
5121 5124
5122 5125 # look for specified revision
5123 5126 revs = list(revs)
5124 5127 revs.extend(opts[b'rev'])
5125 5128 if not revs:
5126 5129 # display both parents as the second parent phase can influence
5127 5130 # the phase of a merge commit
5128 5131 revs = [c.rev() for c in repo[None].parents()]
5129 5132
5130 5133 revs = scmutil.revrange(repo, revs)
5131 5134
5132 5135 ret = 0
5133 5136 if targetphase is None:
5134 5137 # display
5135 5138 for r in revs:
5136 5139 ctx = repo[r]
5137 5140 ui.write(b'%i: %s\n' % (ctx.rev(), ctx.phasestr()))
5138 5141 else:
5139 5142 with repo.lock(), repo.transaction(b"phase") as tr:
5140 5143 # set phase
5141 5144 if not revs:
5142 5145 raise error.Abort(_(b'empty revision set'))
5143 5146 nodes = [repo[r].node() for r in revs]
5144 5147 # moving revision from public to draft may hide them
5145 5148 # We have to check result on an unfiltered repository
5146 5149 unfi = repo.unfiltered()
5147 5150 getphase = unfi._phasecache.phase
5148 5151 olddata = [getphase(unfi, r) for r in unfi]
5149 5152 phases.advanceboundary(repo, tr, targetphase, nodes)
5150 5153 if opts[b'force']:
5151 5154 phases.retractboundary(repo, tr, targetphase, nodes)
5152 5155 getphase = unfi._phasecache.phase
5153 5156 newdata = [getphase(unfi, r) for r in unfi]
5154 5157 changes = sum(newdata[r] != olddata[r] for r in unfi)
5155 5158 cl = unfi.changelog
5156 5159 rejected = [n for n in nodes if newdata[cl.rev(n)] < targetphase]
5157 5160 if rejected:
5158 5161 ui.warn(
5159 5162 _(
5160 5163 b'cannot move %i changesets to a higher '
5161 5164 b'phase, use --force\n'
5162 5165 )
5163 5166 % len(rejected)
5164 5167 )
5165 5168 ret = 1
5166 5169 if changes:
5167 5170 msg = _(b'phase changed for %i changesets\n') % changes
5168 5171 if ret:
5169 5172 ui.status(msg)
5170 5173 else:
5171 5174 ui.note(msg)
5172 5175 else:
5173 5176 ui.warn(_(b'no phases changed\n'))
5174 5177 return ret
5175 5178
5176 5179
5177 5180 def postincoming(ui, repo, modheads, optupdate, checkout, brev):
5178 5181 """Run after a changegroup has been added via pull/unbundle
5179 5182
5180 5183 This takes arguments below:
5181 5184
5182 5185 :modheads: change of heads by pull/unbundle
5183 5186 :optupdate: updating working directory is needed or not
5184 5187 :checkout: update destination revision (or None to default destination)
5185 5188 :brev: a name, which might be a bookmark to be activated after updating
5186 5189 """
5187 5190 if modheads == 0:
5188 5191 return
5189 5192 if optupdate:
5190 5193 try:
5191 5194 return hg.updatetotally(ui, repo, checkout, brev)
5192 5195 except error.UpdateAbort as inst:
5193 5196 msg = _(b"not updating: %s") % stringutil.forcebytestr(inst)
5194 5197 hint = inst.hint
5195 5198 raise error.UpdateAbort(msg, hint=hint)
5196 5199 if modheads is not None and modheads > 1:
5197 5200 currentbranchheads = len(repo.branchheads())
5198 5201 if currentbranchheads == modheads:
5199 5202 ui.status(
5200 5203 _(b"(run 'hg heads' to see heads, 'hg merge' to merge)\n")
5201 5204 )
5202 5205 elif currentbranchheads > 1:
5203 5206 ui.status(
5204 5207 _(b"(run 'hg heads .' to see heads, 'hg merge' to merge)\n")
5205 5208 )
5206 5209 else:
5207 5210 ui.status(_(b"(run 'hg heads' to see heads)\n"))
5208 5211 elif not ui.configbool(b'commands', b'update.requiredest'):
5209 5212 ui.status(_(b"(run 'hg update' to get a working copy)\n"))
5210 5213
5211 5214
5212 5215 @command(
5213 5216 b'pull',
5214 5217 [
5215 5218 (
5216 5219 b'u',
5217 5220 b'update',
5218 5221 None,
5219 5222 _(b'update to new branch head if new descendants were pulled'),
5220 5223 ),
5221 5224 (
5222 5225 b'f',
5223 5226 b'force',
5224 5227 None,
5225 5228 _(b'run even when remote repository is unrelated'),
5226 5229 ),
5227 5230 (b'', b'confirm', None, _(b'confirm pull before applying changes'),),
5228 5231 (
5229 5232 b'r',
5230 5233 b'rev',
5231 5234 [],
5232 5235 _(b'a remote changeset intended to be added'),
5233 5236 _(b'REV'),
5234 5237 ),
5235 5238 (b'B', b'bookmark', [], _(b"bookmark to pull"), _(b'BOOKMARK')),
5236 5239 (
5237 5240 b'b',
5238 5241 b'branch',
5239 5242 [],
5240 5243 _(b'a specific branch you would like to pull'),
5241 5244 _(b'BRANCH'),
5242 5245 ),
5243 5246 ]
5244 5247 + remoteopts,
5245 5248 _(b'[-u] [-f] [-r REV]... [-e CMD] [--remotecmd CMD] [SOURCE]'),
5246 5249 helpcategory=command.CATEGORY_REMOTE_REPO_MANAGEMENT,
5247 5250 helpbasic=True,
5248 5251 )
5249 5252 def pull(ui, repo, source=b"default", **opts):
5250 5253 """pull changes from the specified source
5251 5254
5252 5255 Pull changes from a remote repository to a local one.
5253 5256
5254 5257 This finds all changes from the repository at the specified path
5255 5258 or URL and adds them to a local repository (the current one unless
5256 5259 -R is specified). By default, this does not update the copy of the
5257 5260 project in the working directory.
5258 5261
5259 5262 When cloning from servers that support it, Mercurial may fetch
5260 5263 pre-generated data. When this is done, hooks operating on incoming
5261 5264 changesets and changegroups may fire more than once, once for each
5262 5265 pre-generated bundle and as well as for any additional remaining
5263 5266 data. See :hg:`help -e clonebundles` for more.
5264 5267
5265 5268 Use :hg:`incoming` if you want to see what would have been added
5266 5269 by a pull at the time you issued this command. If you then decide
5267 5270 to add those changes to the repository, you should use :hg:`pull
5268 5271 -r X` where ``X`` is the last changeset listed by :hg:`incoming`.
5269 5272
5270 5273 If SOURCE is omitted, the 'default' path will be used.
5271 5274 See :hg:`help urls` for more information.
5272 5275
5273 5276 Specifying bookmark as ``.`` is equivalent to specifying the active
5274 5277 bookmark's name.
5275 5278
5276 5279 Returns 0 on success, 1 if an update had unresolved files.
5277 5280 """
5278 5281
5279 5282 opts = pycompat.byteskwargs(opts)
5280 5283 if ui.configbool(b'commands', b'update.requiredest') and opts.get(
5281 5284 b'update'
5282 5285 ):
5283 5286 msg = _(b'update destination required by configuration')
5284 5287 hint = _(b'use hg pull followed by hg update DEST')
5285 5288 raise error.Abort(msg, hint=hint)
5286 5289
5287 5290 source, branches = hg.parseurl(ui.expandpath(source), opts.get(b'branch'))
5288 5291 ui.status(_(b'pulling from %s\n') % util.hidepassword(source))
5289 5292 other = hg.peer(repo, opts, source)
5290 5293 try:
5291 5294 revs, checkout = hg.addbranchrevs(
5292 5295 repo, other, branches, opts.get(b'rev')
5293 5296 )
5294 5297
5295 5298 pullopargs = {}
5296 5299
5297 5300 nodes = None
5298 5301 if opts.get(b'bookmark') or revs:
5299 5302 # The list of bookmark used here is the same used to actually update
5300 5303 # the bookmark names, to avoid the race from issue 4689 and we do
5301 5304 # all lookup and bookmark queries in one go so they see the same
5302 5305 # version of the server state (issue 4700).
5303 5306 nodes = []
5304 5307 fnodes = []
5305 5308 revs = revs or []
5306 5309 if revs and not other.capable(b'lookup'):
5307 5310 err = _(
5308 5311 b"other repository doesn't support revision lookup, "
5309 5312 b"so a rev cannot be specified."
5310 5313 )
5311 5314 raise error.Abort(err)
5312 5315 with other.commandexecutor() as e:
5313 5316 fremotebookmarks = e.callcommand(
5314 5317 b'listkeys', {b'namespace': b'bookmarks'}
5315 5318 )
5316 5319 for r in revs:
5317 5320 fnodes.append(e.callcommand(b'lookup', {b'key': r}))
5318 5321 remotebookmarks = fremotebookmarks.result()
5319 5322 remotebookmarks = bookmarks.unhexlifybookmarks(remotebookmarks)
5320 5323 pullopargs[b'remotebookmarks'] = remotebookmarks
5321 5324 for b in opts.get(b'bookmark', []):
5322 5325 b = repo._bookmarks.expandname(b)
5323 5326 if b not in remotebookmarks:
5324 5327 raise error.Abort(_(b'remote bookmark %s not found!') % b)
5325 5328 nodes.append(remotebookmarks[b])
5326 5329 for i, rev in enumerate(revs):
5327 5330 node = fnodes[i].result()
5328 5331 nodes.append(node)
5329 5332 if rev == checkout:
5330 5333 checkout = node
5331 5334
5332 5335 wlock = util.nullcontextmanager()
5333 5336 if opts.get(b'update'):
5334 5337 wlock = repo.wlock()
5335 5338 with wlock:
5336 5339 pullopargs.update(opts.get(b'opargs', {}))
5337 5340 modheads = exchange.pull(
5338 5341 repo,
5339 5342 other,
5340 5343 heads=nodes,
5341 5344 force=opts.get(b'force'),
5342 5345 bookmarks=opts.get(b'bookmark', ()),
5343 5346 opargs=pullopargs,
5344 5347 confirm=opts.get(b'confirm'),
5345 5348 ).cgresult
5346 5349
5347 5350 # brev is a name, which might be a bookmark to be activated at
5348 5351 # the end of the update. In other words, it is an explicit
5349 5352 # destination of the update
5350 5353 brev = None
5351 5354
5352 5355 if checkout:
5353 5356 checkout = repo.unfiltered().changelog.rev(checkout)
5354 5357
5355 5358 # order below depends on implementation of
5356 5359 # hg.addbranchrevs(). opts['bookmark'] is ignored,
5357 5360 # because 'checkout' is determined without it.
5358 5361 if opts.get(b'rev'):
5359 5362 brev = opts[b'rev'][0]
5360 5363 elif opts.get(b'branch'):
5361 5364 brev = opts[b'branch'][0]
5362 5365 else:
5363 5366 brev = branches[0]
5364 5367 repo._subtoppath = source
5365 5368 try:
5366 5369 ret = postincoming(
5367 5370 ui, repo, modheads, opts.get(b'update'), checkout, brev
5368 5371 )
5369 5372 except error.FilteredRepoLookupError as exc:
5370 5373 msg = _(b'cannot update to target: %s') % exc.args[0]
5371 5374 exc.args = (msg,) + exc.args[1:]
5372 5375 raise
5373 5376 finally:
5374 5377 del repo._subtoppath
5375 5378
5376 5379 finally:
5377 5380 other.close()
5378 5381 return ret
5379 5382
5380 5383
5381 5384 @command(
5382 5385 b'push',
5383 5386 [
5384 5387 (b'f', b'force', None, _(b'force push')),
5385 5388 (
5386 5389 b'r',
5387 5390 b'rev',
5388 5391 [],
5389 5392 _(b'a changeset intended to be included in the destination'),
5390 5393 _(b'REV'),
5391 5394 ),
5392 5395 (b'B', b'bookmark', [], _(b"bookmark to push"), _(b'BOOKMARK')),
5393 5396 (
5394 5397 b'b',
5395 5398 b'branch',
5396 5399 [],
5397 5400 _(b'a specific branch you would like to push'),
5398 5401 _(b'BRANCH'),
5399 5402 ),
5400 5403 (b'', b'new-branch', False, _(b'allow pushing a new branch')),
5401 5404 (
5402 5405 b'',
5403 5406 b'pushvars',
5404 5407 [],
5405 5408 _(b'variables that can be sent to server (ADVANCED)'),
5406 5409 ),
5407 5410 (
5408 5411 b'',
5409 5412 b'publish',
5410 5413 False,
5411 5414 _(b'push the changeset as public (EXPERIMENTAL)'),
5412 5415 ),
5413 5416 ]
5414 5417 + remoteopts,
5415 5418 _(b'[-f] [-r REV]... [-e CMD] [--remotecmd CMD] [DEST]'),
5416 5419 helpcategory=command.CATEGORY_REMOTE_REPO_MANAGEMENT,
5417 5420 helpbasic=True,
5418 5421 )
5419 5422 def push(ui, repo, dest=None, **opts):
5420 5423 """push changes to the specified destination
5421 5424
5422 5425 Push changesets from the local repository to the specified
5423 5426 destination.
5424 5427
5425 5428 This operation is symmetrical to pull: it is identical to a pull
5426 5429 in the destination repository from the current one.
5427 5430
5428 5431 By default, push will not allow creation of new heads at the
5429 5432 destination, since multiple heads would make it unclear which head
5430 5433 to use. In this situation, it is recommended to pull and merge
5431 5434 before pushing.
5432 5435
5433 5436 Use --new-branch if you want to allow push to create a new named
5434 5437 branch that is not present at the destination. This allows you to
5435 5438 only create a new branch without forcing other changes.
5436 5439
5437 5440 .. note::
5438 5441
5439 5442 Extra care should be taken with the -f/--force option,
5440 5443 which will push all new heads on all branches, an action which will
5441 5444 almost always cause confusion for collaborators.
5442 5445
5443 5446 If -r/--rev is used, the specified revision and all its ancestors
5444 5447 will be pushed to the remote repository.
5445 5448
5446 5449 If -B/--bookmark is used, the specified bookmarked revision, its
5447 5450 ancestors, and the bookmark will be pushed to the remote
5448 5451 repository. Specifying ``.`` is equivalent to specifying the active
5449 5452 bookmark's name.
5450 5453
5451 5454 Please see :hg:`help urls` for important details about ``ssh://``
5452 5455 URLs. If DESTINATION is omitted, a default path will be used.
5453 5456
5454 5457 .. container:: verbose
5455 5458
5456 5459 The --pushvars option sends strings to the server that become
5457 5460 environment variables prepended with ``HG_USERVAR_``. For example,
5458 5461 ``--pushvars ENABLE_FEATURE=true``, provides the server side hooks with
5459 5462 ``HG_USERVAR_ENABLE_FEATURE=true`` as part of their environment.
5460 5463
5461 5464 pushvars can provide for user-overridable hooks as well as set debug
5462 5465 levels. One example is having a hook that blocks commits containing
5463 5466 conflict markers, but enables the user to override the hook if the file
5464 5467 is using conflict markers for testing purposes or the file format has
5465 5468 strings that look like conflict markers.
5466 5469
5467 5470 By default, servers will ignore `--pushvars`. To enable it add the
5468 5471 following to your configuration file::
5469 5472
5470 5473 [push]
5471 5474 pushvars.server = true
5472 5475
5473 5476 Returns 0 if push was successful, 1 if nothing to push.
5474 5477 """
5475 5478
5476 5479 opts = pycompat.byteskwargs(opts)
5477 5480 if opts.get(b'bookmark'):
5478 5481 ui.setconfig(b'bookmarks', b'pushing', opts[b'bookmark'], b'push')
5479 5482 for b in opts[b'bookmark']:
5480 5483 # translate -B options to -r so changesets get pushed
5481 5484 b = repo._bookmarks.expandname(b)
5482 5485 if b in repo._bookmarks:
5483 5486 opts.setdefault(b'rev', []).append(b)
5484 5487 else:
5485 5488 # if we try to push a deleted bookmark, translate it to null
5486 5489 # this lets simultaneous -r, -b options continue working
5487 5490 opts.setdefault(b'rev', []).append(b"null")
5488 5491
5489 5492 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
5490 5493 if not path:
5491 5494 raise error.Abort(
5492 5495 _(b'default repository not configured!'),
5493 5496 hint=_(b"see 'hg help config.paths'"),
5494 5497 )
5495 5498 dest = path.pushloc or path.loc
5496 5499 branches = (path.branch, opts.get(b'branch') or [])
5497 5500 ui.status(_(b'pushing to %s\n') % util.hidepassword(dest))
5498 5501 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get(b'rev'))
5499 5502 other = hg.peer(repo, opts, dest)
5500 5503
5501 5504 if revs:
5502 5505 revs = [repo[r].node() for r in scmutil.revrange(repo, revs)]
5503 5506 if not revs:
5504 5507 raise error.Abort(
5505 5508 _(b"specified revisions evaluate to an empty set"),
5506 5509 hint=_(b"use different revision arguments"),
5507 5510 )
5508 5511 elif path.pushrev:
5509 5512 # It doesn't make any sense to specify ancestor revisions. So limit
5510 5513 # to DAG heads to make discovery simpler.
5511 5514 expr = revsetlang.formatspec(b'heads(%r)', path.pushrev)
5512 5515 revs = scmutil.revrange(repo, [expr])
5513 5516 revs = [repo[rev].node() for rev in revs]
5514 5517 if not revs:
5515 5518 raise error.Abort(
5516 5519 _(b'default push revset for path evaluates to an empty set')
5517 5520 )
5518 5521 elif ui.configbool(b'commands', b'push.require-revs'):
5519 5522 raise error.Abort(
5520 5523 _(b'no revisions specified to push'),
5521 5524 hint=_(b'did you mean "hg push -r ."?'),
5522 5525 )
5523 5526
5524 5527 repo._subtoppath = dest
5525 5528 try:
5526 5529 # push subrepos depth-first for coherent ordering
5527 5530 c = repo[b'.']
5528 5531 subs = c.substate # only repos that are committed
5529 5532 for s in sorted(subs):
5530 5533 result = c.sub(s).push(opts)
5531 5534 if result == 0:
5532 5535 return not result
5533 5536 finally:
5534 5537 del repo._subtoppath
5535 5538
5536 5539 opargs = dict(opts.get(b'opargs', {})) # copy opargs since we may mutate it
5537 5540 opargs.setdefault(b'pushvars', []).extend(opts.get(b'pushvars', []))
5538 5541
5539 5542 pushop = exchange.push(
5540 5543 repo,
5541 5544 other,
5542 5545 opts.get(b'force'),
5543 5546 revs=revs,
5544 5547 newbranch=opts.get(b'new_branch'),
5545 5548 bookmarks=opts.get(b'bookmark', ()),
5546 5549 publish=opts.get(b'publish'),
5547 5550 opargs=opargs,
5548 5551 )
5549 5552
5550 5553 result = not pushop.cgresult
5551 5554
5552 5555 if pushop.bkresult is not None:
5553 5556 if pushop.bkresult == 2:
5554 5557 result = 2
5555 5558 elif not result and pushop.bkresult:
5556 5559 result = 2
5557 5560
5558 5561 return result
5559 5562
5560 5563
5561 5564 @command(
5562 5565 b'recover',
5563 5566 [(b'', b'verify', False, b"run `hg verify` after successful recover"),],
5564 5567 helpcategory=command.CATEGORY_MAINTENANCE,
5565 5568 )
5566 5569 def recover(ui, repo, **opts):
5567 5570 """roll back an interrupted transaction
5568 5571
5569 5572 Recover from an interrupted commit or pull.
5570 5573
5571 5574 This command tries to fix the repository status after an
5572 5575 interrupted operation. It should only be necessary when Mercurial
5573 5576 suggests it.
5574 5577
5575 5578 Returns 0 if successful, 1 if nothing to recover or verify fails.
5576 5579 """
5577 5580 ret = repo.recover()
5578 5581 if ret:
5579 5582 if opts['verify']:
5580 5583 return hg.verify(repo)
5581 5584 else:
5582 5585 msg = _(
5583 5586 b"(verify step skipped, run `hg verify` to check your "
5584 5587 b"repository content)\n"
5585 5588 )
5586 5589 ui.warn(msg)
5587 5590 return 0
5588 5591 return 1
5589 5592
5590 5593
5591 5594 @command(
5592 5595 b'remove|rm',
5593 5596 [
5594 5597 (b'A', b'after', None, _(b'record delete for missing files')),
5595 5598 (b'f', b'force', None, _(b'forget added files, delete modified files')),
5596 5599 ]
5597 5600 + subrepoopts
5598 5601 + walkopts
5599 5602 + dryrunopts,
5600 5603 _(b'[OPTION]... FILE...'),
5601 5604 helpcategory=command.CATEGORY_WORKING_DIRECTORY,
5602 5605 helpbasic=True,
5603 5606 inferrepo=True,
5604 5607 )
5605 5608 def remove(ui, repo, *pats, **opts):
5606 5609 """remove the specified files on the next commit
5607 5610
5608 5611 Schedule the indicated files for removal from the current branch.
5609 5612
5610 5613 This command schedules the files to be removed at the next commit.
5611 5614 To undo a remove before that, see :hg:`revert`. To undo added
5612 5615 files, see :hg:`forget`.
5613 5616
5614 5617 .. container:: verbose
5615 5618
5616 5619 -A/--after can be used to remove only files that have already
5617 5620 been deleted, -f/--force can be used to force deletion, and -Af
5618 5621 can be used to remove files from the next revision without
5619 5622 deleting them from the working directory.
5620 5623
5621 5624 The following table details the behavior of remove for different
5622 5625 file states (columns) and option combinations (rows). The file
5623 5626 states are Added [A], Clean [C], Modified [M] and Missing [!]
5624 5627 (as reported by :hg:`status`). The actions are Warn, Remove
5625 5628 (from branch) and Delete (from disk):
5626 5629
5627 5630 ========= == == == ==
5628 5631 opt/state A C M !
5629 5632 ========= == == == ==
5630 5633 none W RD W R
5631 5634 -f R RD RD R
5632 5635 -A W W W R
5633 5636 -Af R R R R
5634 5637 ========= == == == ==
5635 5638
5636 5639 .. note::
5637 5640
5638 5641 :hg:`remove` never deletes files in Added [A] state from the
5639 5642 working directory, not even if ``--force`` is specified.
5640 5643
5641 5644 Returns 0 on success, 1 if any warnings encountered.
5642 5645 """
5643 5646
5644 5647 opts = pycompat.byteskwargs(opts)
5645 5648 after, force = opts.get(b'after'), opts.get(b'force')
5646 5649 dryrun = opts.get(b'dry_run')
5647 5650 if not pats and not after:
5648 5651 raise error.Abort(_(b'no files specified'))
5649 5652
5650 5653 m = scmutil.match(repo[None], pats, opts)
5651 5654 subrepos = opts.get(b'subrepos')
5652 5655 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
5653 5656 return cmdutil.remove(
5654 5657 ui, repo, m, b"", uipathfn, after, force, subrepos, dryrun=dryrun
5655 5658 )
5656 5659
5657 5660
5658 5661 @command(
5659 5662 b'rename|move|mv',
5660 5663 [
5661 5664 (b'A', b'after', None, _(b'record a rename that has already occurred')),
5662 5665 (
5663 5666 b'',
5664 5667 b'at-rev',
5665 5668 b'',
5666 5669 _(b'(un)mark renames in the given revision (EXPERIMENTAL)'),
5667 5670 _(b'REV'),
5668 5671 ),
5669 5672 (
5670 5673 b'f',
5671 5674 b'force',
5672 5675 None,
5673 5676 _(b'forcibly move over an existing managed file'),
5674 5677 ),
5675 5678 ]
5676 5679 + walkopts
5677 5680 + dryrunopts,
5678 5681 _(b'[OPTION]... SOURCE... DEST'),
5679 5682 helpcategory=command.CATEGORY_WORKING_DIRECTORY,
5680 5683 )
5681 5684 def rename(ui, repo, *pats, **opts):
5682 5685 """rename files; equivalent of copy + remove
5683 5686
5684 5687 Mark dest as copies of sources; mark sources for deletion. If dest
5685 5688 is a directory, copies are put in that directory. If dest is a
5686 5689 file, there can only be one source.
5687 5690
5688 5691 By default, this command copies the contents of files as they
5689 5692 exist in the working directory. If invoked with -A/--after, the
5690 5693 operation is recorded, but no copying is performed.
5691 5694
5692 5695 This command takes effect at the next commit. To undo a rename
5693 5696 before that, see :hg:`revert`.
5694 5697
5695 5698 Returns 0 on success, 1 if errors are encountered.
5696 5699 """
5697 5700 opts = pycompat.byteskwargs(opts)
5698 5701 with repo.wlock():
5699 5702 return cmdutil.copy(ui, repo, pats, opts, rename=True)
5700 5703
5701 5704
5702 5705 @command(
5703 5706 b'resolve',
5704 5707 [
5705 5708 (b'a', b'all', None, _(b'select all unresolved files')),
5706 5709 (b'l', b'list', None, _(b'list state of files needing merge')),
5707 5710 (b'm', b'mark', None, _(b'mark files as resolved')),
5708 5711 (b'u', b'unmark', None, _(b'mark files as unresolved')),
5709 5712 (b'n', b'no-status', None, _(b'hide status prefix')),
5710 5713 (b'', b're-merge', None, _(b're-merge files')),
5711 5714 ]
5712 5715 + mergetoolopts
5713 5716 + walkopts
5714 5717 + formatteropts,
5715 5718 _(b'[OPTION]... [FILE]...'),
5716 5719 helpcategory=command.CATEGORY_WORKING_DIRECTORY,
5717 5720 inferrepo=True,
5718 5721 )
5719 5722 def resolve(ui, repo, *pats, **opts):
5720 5723 """redo merges or set/view the merge status of files
5721 5724
5722 5725 Merges with unresolved conflicts are often the result of
5723 5726 non-interactive merging using the ``internal:merge`` configuration
5724 5727 setting, or a command-line merge tool like ``diff3``. The resolve
5725 5728 command is used to manage the files involved in a merge, after
5726 5729 :hg:`merge` has been run, and before :hg:`commit` is run (i.e. the
5727 5730 working directory must have two parents). See :hg:`help
5728 5731 merge-tools` for information on configuring merge tools.
5729 5732
5730 5733 The resolve command can be used in the following ways:
5731 5734
5732 5735 - :hg:`resolve [--re-merge] [--tool TOOL] FILE...`: attempt to re-merge
5733 5736 the specified files, discarding any previous merge attempts. Re-merging
5734 5737 is not performed for files already marked as resolved. Use ``--all/-a``
5735 5738 to select all unresolved files. ``--tool`` can be used to specify
5736 5739 the merge tool used for the given files. It overrides the HGMERGE
5737 5740 environment variable and your configuration files. Previous file
5738 5741 contents are saved with a ``.orig`` suffix.
5739 5742
5740 5743 - :hg:`resolve -m [FILE]`: mark a file as having been resolved
5741 5744 (e.g. after having manually fixed-up the files). The default is
5742 5745 to mark all unresolved files.
5743 5746
5744 5747 - :hg:`resolve -u [FILE]...`: mark a file as unresolved. The
5745 5748 default is to mark all resolved files.
5746 5749
5747 5750 - :hg:`resolve -l`: list files which had or still have conflicts.
5748 5751 In the printed list, ``U`` = unresolved and ``R`` = resolved.
5749 5752 You can use ``set:unresolved()`` or ``set:resolved()`` to filter
5750 5753 the list. See :hg:`help filesets` for details.
5751 5754
5752 5755 .. note::
5753 5756
5754 5757 Mercurial will not let you commit files with unresolved merge
5755 5758 conflicts. You must use :hg:`resolve -m ...` before you can
5756 5759 commit after a conflicting merge.
5757 5760
5758 5761 .. container:: verbose
5759 5762
5760 5763 Template:
5761 5764
5762 5765 The following keywords are supported in addition to the common template
5763 5766 keywords and functions. See also :hg:`help templates`.
5764 5767
5765 5768 :mergestatus: String. Character denoting merge conflicts, ``U`` or ``R``.
5766 5769 :path: String. Repository-absolute path of the file.
5767 5770
5768 5771 Returns 0 on success, 1 if any files fail a resolve attempt.
5769 5772 """
5770 5773
5771 5774 opts = pycompat.byteskwargs(opts)
5772 5775 confirm = ui.configbool(b'commands', b'resolve.confirm')
5773 5776 flaglist = b'all mark unmark list no_status re_merge'.split()
5774 5777 all, mark, unmark, show, nostatus, remerge = [opts.get(o) for o in flaglist]
5775 5778
5776 5779 actioncount = len(list(filter(None, [show, mark, unmark, remerge])))
5777 5780 if actioncount > 1:
5778 5781 raise error.Abort(_(b"too many actions specified"))
5779 5782 elif actioncount == 0 and ui.configbool(
5780 5783 b'commands', b'resolve.explicit-re-merge'
5781 5784 ):
5782 5785 hint = _(b'use --mark, --unmark, --list or --re-merge')
5783 5786 raise error.Abort(_(b'no action specified'), hint=hint)
5784 5787 if pats and all:
5785 5788 raise error.Abort(_(b"can't specify --all and patterns"))
5786 5789 if not (all or pats or show or mark or unmark):
5787 5790 raise error.Abort(
5788 5791 _(b'no files or directories specified'),
5789 5792 hint=b'use --all to re-merge all unresolved files',
5790 5793 )
5791 5794
5792 5795 if confirm:
5793 5796 if all:
5794 5797 if ui.promptchoice(
5795 5798 _(b're-merge all unresolved files (yn)?$$ &Yes $$ &No')
5796 5799 ):
5797 5800 raise error.Abort(_(b'user quit'))
5798 5801 if mark and not pats:
5799 5802 if ui.promptchoice(
5800 5803 _(
5801 5804 b'mark all unresolved files as resolved (yn)?'
5802 5805 b'$$ &Yes $$ &No'
5803 5806 )
5804 5807 ):
5805 5808 raise error.Abort(_(b'user quit'))
5806 5809 if unmark and not pats:
5807 5810 if ui.promptchoice(
5808 5811 _(
5809 5812 b'mark all resolved files as unresolved (yn)?'
5810 5813 b'$$ &Yes $$ &No'
5811 5814 )
5812 5815 ):
5813 5816 raise error.Abort(_(b'user quit'))
5814 5817
5815 5818 uipathfn = scmutil.getuipathfn(repo)
5816 5819
5817 5820 if show:
5818 5821 ui.pager(b'resolve')
5819 5822 fm = ui.formatter(b'resolve', opts)
5820 5823 ms = mergestatemod.mergestate.read(repo)
5821 5824 wctx = repo[None]
5822 5825 m = scmutil.match(wctx, pats, opts)
5823 5826
5824 5827 # Labels and keys based on merge state. Unresolved path conflicts show
5825 5828 # as 'P'. Resolved path conflicts show as 'R', the same as normal
5826 5829 # resolved conflicts.
5827 5830 mergestateinfo = {
5828 5831 mergestatemod.MERGE_RECORD_UNRESOLVED: (
5829 5832 b'resolve.unresolved',
5830 5833 b'U',
5831 5834 ),
5832 5835 mergestatemod.MERGE_RECORD_RESOLVED: (b'resolve.resolved', b'R'),
5833 5836 mergestatemod.MERGE_RECORD_UNRESOLVED_PATH: (
5834 5837 b'resolve.unresolved',
5835 5838 b'P',
5836 5839 ),
5837 5840 mergestatemod.MERGE_RECORD_RESOLVED_PATH: (
5838 5841 b'resolve.resolved',
5839 5842 b'R',
5840 5843 ),
5841 5844 }
5842 5845
5843 5846 for f in ms:
5844 5847 if not m(f):
5845 5848 continue
5846 5849
5847 5850 label, key = mergestateinfo[ms[f]]
5848 5851 fm.startitem()
5849 5852 fm.context(ctx=wctx)
5850 5853 fm.condwrite(not nostatus, b'mergestatus', b'%s ', key, label=label)
5851 5854 fm.data(path=f)
5852 5855 fm.plain(b'%s\n' % uipathfn(f), label=label)
5853 5856 fm.end()
5854 5857 return 0
5855 5858
5856 5859 with repo.wlock():
5857 5860 ms = mergestatemod.mergestate.read(repo)
5858 5861
5859 5862 if not (ms.active() or repo.dirstate.p2() != nullid):
5860 5863 raise error.Abort(
5861 5864 _(b'resolve command not applicable when not merging')
5862 5865 )
5863 5866
5864 5867 wctx = repo[None]
5865 5868 m = scmutil.match(wctx, pats, opts)
5866 5869 ret = 0
5867 5870 didwork = False
5868 5871
5869 5872 tocomplete = []
5870 5873 hasconflictmarkers = []
5871 5874 if mark:
5872 5875 markcheck = ui.config(b'commands', b'resolve.mark-check')
5873 5876 if markcheck not in [b'warn', b'abort']:
5874 5877 # Treat all invalid / unrecognized values as 'none'.
5875 5878 markcheck = False
5876 5879 for f in ms:
5877 5880 if not m(f):
5878 5881 continue
5879 5882
5880 5883 didwork = True
5881 5884
5882 5885 # path conflicts must be resolved manually
5883 5886 if ms[f] in (
5884 5887 mergestatemod.MERGE_RECORD_UNRESOLVED_PATH,
5885 5888 mergestatemod.MERGE_RECORD_RESOLVED_PATH,
5886 5889 ):
5887 5890 if mark:
5888 5891 ms.mark(f, mergestatemod.MERGE_RECORD_RESOLVED_PATH)
5889 5892 elif unmark:
5890 5893 ms.mark(f, mergestatemod.MERGE_RECORD_UNRESOLVED_PATH)
5891 5894 elif ms[f] == mergestatemod.MERGE_RECORD_UNRESOLVED_PATH:
5892 5895 ui.warn(
5893 5896 _(b'%s: path conflict must be resolved manually\n')
5894 5897 % uipathfn(f)
5895 5898 )
5896 5899 continue
5897 5900
5898 5901 if mark:
5899 5902 if markcheck:
5900 5903 fdata = repo.wvfs.tryread(f)
5901 5904 if (
5902 5905 filemerge.hasconflictmarkers(fdata)
5903 5906 and ms[f] != mergestatemod.MERGE_RECORD_RESOLVED
5904 5907 ):
5905 5908 hasconflictmarkers.append(f)
5906 5909 ms.mark(f, mergestatemod.MERGE_RECORD_RESOLVED)
5907 5910 elif unmark:
5908 5911 ms.mark(f, mergestatemod.MERGE_RECORD_UNRESOLVED)
5909 5912 else:
5910 5913 # backup pre-resolve (merge uses .orig for its own purposes)
5911 5914 a = repo.wjoin(f)
5912 5915 try:
5913 5916 util.copyfile(a, a + b".resolve")
5914 5917 except (IOError, OSError) as inst:
5915 5918 if inst.errno != errno.ENOENT:
5916 5919 raise
5917 5920
5918 5921 try:
5919 5922 # preresolve file
5920 5923 overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')}
5921 5924 with ui.configoverride(overrides, b'resolve'):
5922 5925 complete, r = ms.preresolve(f, wctx)
5923 5926 if not complete:
5924 5927 tocomplete.append(f)
5925 5928 elif r:
5926 5929 ret = 1
5927 5930 finally:
5928 5931 ms.commit()
5929 5932
5930 5933 # replace filemerge's .orig file with our resolve file, but only
5931 5934 # for merges that are complete
5932 5935 if complete:
5933 5936 try:
5934 5937 util.rename(
5935 5938 a + b".resolve", scmutil.backuppath(ui, repo, f)
5936 5939 )
5937 5940 except OSError as inst:
5938 5941 if inst.errno != errno.ENOENT:
5939 5942 raise
5940 5943
5941 5944 if hasconflictmarkers:
5942 5945 ui.warn(
5943 5946 _(
5944 5947 b'warning: the following files still have conflict '
5945 5948 b'markers:\n'
5946 5949 )
5947 5950 + b''.join(
5948 5951 b' ' + uipathfn(f) + b'\n' for f in hasconflictmarkers
5949 5952 )
5950 5953 )
5951 5954 if markcheck == b'abort' and not all and not pats:
5952 5955 raise error.Abort(
5953 5956 _(b'conflict markers detected'),
5954 5957 hint=_(b'use --all to mark anyway'),
5955 5958 )
5956 5959
5957 5960 for f in tocomplete:
5958 5961 try:
5959 5962 # resolve file
5960 5963 overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')}
5961 5964 with ui.configoverride(overrides, b'resolve'):
5962 5965 r = ms.resolve(f, wctx)
5963 5966 if r:
5964 5967 ret = 1
5965 5968 finally:
5966 5969 ms.commit()
5967 5970
5968 5971 # replace filemerge's .orig file with our resolve file
5969 5972 a = repo.wjoin(f)
5970 5973 try:
5971 5974 util.rename(a + b".resolve", scmutil.backuppath(ui, repo, f))
5972 5975 except OSError as inst:
5973 5976 if inst.errno != errno.ENOENT:
5974 5977 raise
5975 5978
5976 5979 ms.commit()
5977 5980 branchmerge = repo.dirstate.p2() != nullid
5978 5981 mergestatemod.recordupdates(repo, ms.actions(), branchmerge, None)
5979 5982
5980 5983 if not didwork and pats:
5981 5984 hint = None
5982 5985 if not any([p for p in pats if p.find(b':') >= 0]):
5983 5986 pats = [b'path:%s' % p for p in pats]
5984 5987 m = scmutil.match(wctx, pats, opts)
5985 5988 for f in ms:
5986 5989 if not m(f):
5987 5990 continue
5988 5991
5989 5992 def flag(o):
5990 5993 if o == b're_merge':
5991 5994 return b'--re-merge '
5992 5995 return b'-%s ' % o[0:1]
5993 5996
5994 5997 flags = b''.join([flag(o) for o in flaglist if opts.get(o)])
5995 5998 hint = _(b"(try: hg resolve %s%s)\n") % (
5996 5999 flags,
5997 6000 b' '.join(pats),
5998 6001 )
5999 6002 break
6000 6003 ui.warn(_(b"arguments do not match paths that need resolving\n"))
6001 6004 if hint:
6002 6005 ui.warn(hint)
6003 6006
6004 6007 unresolvedf = list(ms.unresolved())
6005 6008 if not unresolvedf:
6006 6009 ui.status(_(b'(no more unresolved files)\n'))
6007 6010 cmdutil.checkafterresolved(repo)
6008 6011
6009 6012 return ret
6010 6013
6011 6014
6012 6015 @command(
6013 6016 b'revert',
6014 6017 [
6015 6018 (b'a', b'all', None, _(b'revert all changes when no arguments given')),
6016 6019 (b'd', b'date', b'', _(b'tipmost revision matching date'), _(b'DATE')),
6017 6020 (b'r', b'rev', b'', _(b'revert to the specified revision'), _(b'REV')),
6018 6021 (b'C', b'no-backup', None, _(b'do not save backup copies of files')),
6019 6022 (b'i', b'interactive', None, _(b'interactively select the changes')),
6020 6023 ]
6021 6024 + walkopts
6022 6025 + dryrunopts,
6023 6026 _(b'[OPTION]... [-r REV] [NAME]...'),
6024 6027 helpcategory=command.CATEGORY_WORKING_DIRECTORY,
6025 6028 )
6026 6029 def revert(ui, repo, *pats, **opts):
6027 6030 """restore files to their checkout state
6028 6031
6029 6032 .. note::
6030 6033
6031 6034 To check out earlier revisions, you should use :hg:`update REV`.
6032 6035 To cancel an uncommitted merge (and lose your changes),
6033 6036 use :hg:`merge --abort`.
6034 6037
6035 6038 With no revision specified, revert the specified files or directories
6036 6039 to the contents they had in the parent of the working directory.
6037 6040 This restores the contents of files to an unmodified
6038 6041 state and unschedules adds, removes, copies, and renames. If the
6039 6042 working directory has two parents, you must explicitly specify a
6040 6043 revision.
6041 6044
6042 6045 Using the -r/--rev or -d/--date options, revert the given files or
6043 6046 directories to their states as of a specific revision. Because
6044 6047 revert does not change the working directory parents, this will
6045 6048 cause these files to appear modified. This can be helpful to "back
6046 6049 out" some or all of an earlier change. See :hg:`backout` for a
6047 6050 related method.
6048 6051
6049 6052 Modified files are saved with a .orig suffix before reverting.
6050 6053 To disable these backups, use --no-backup. It is possible to store
6051 6054 the backup files in a custom directory relative to the root of the
6052 6055 repository by setting the ``ui.origbackuppath`` configuration
6053 6056 option.
6054 6057
6055 6058 See :hg:`help dates` for a list of formats valid for -d/--date.
6056 6059
6057 6060 See :hg:`help backout` for a way to reverse the effect of an
6058 6061 earlier changeset.
6059 6062
6060 6063 Returns 0 on success.
6061 6064 """
6062 6065
6063 6066 opts = pycompat.byteskwargs(opts)
6064 6067 if opts.get(b"date"):
6065 6068 cmdutil.check_incompatible_arguments(opts, b'date', [b'rev'])
6066 6069 opts[b"rev"] = cmdutil.finddate(ui, repo, opts[b"date"])
6067 6070
6068 6071 parent, p2 = repo.dirstate.parents()
6069 6072 if not opts.get(b'rev') and p2 != nullid:
6070 6073 # revert after merge is a trap for new users (issue2915)
6071 6074 raise error.Abort(
6072 6075 _(b'uncommitted merge with no revision specified'),
6073 6076 hint=_(b"use 'hg update' or see 'hg help revert'"),
6074 6077 )
6075 6078
6076 6079 rev = opts.get(b'rev')
6077 6080 if rev:
6078 6081 repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
6079 6082 ctx = scmutil.revsingle(repo, rev)
6080 6083
6081 6084 if not (
6082 6085 pats
6083 6086 or opts.get(b'include')
6084 6087 or opts.get(b'exclude')
6085 6088 or opts.get(b'all')
6086 6089 or opts.get(b'interactive')
6087 6090 ):
6088 6091 msg = _(b"no files or directories specified")
6089 6092 if p2 != nullid:
6090 6093 hint = _(
6091 6094 b"uncommitted merge, use --all to discard all changes,"
6092 6095 b" or 'hg update -C .' to abort the merge"
6093 6096 )
6094 6097 raise error.Abort(msg, hint=hint)
6095 6098 dirty = any(repo.status())
6096 6099 node = ctx.node()
6097 6100 if node != parent:
6098 6101 if dirty:
6099 6102 hint = (
6100 6103 _(
6101 6104 b"uncommitted changes, use --all to discard all"
6102 6105 b" changes, or 'hg update %d' to update"
6103 6106 )
6104 6107 % ctx.rev()
6105 6108 )
6106 6109 else:
6107 6110 hint = (
6108 6111 _(
6109 6112 b"use --all to revert all files,"
6110 6113 b" or 'hg update %d' to update"
6111 6114 )
6112 6115 % ctx.rev()
6113 6116 )
6114 6117 elif dirty:
6115 6118 hint = _(b"uncommitted changes, use --all to discard all changes")
6116 6119 else:
6117 6120 hint = _(b"use --all to revert all files")
6118 6121 raise error.Abort(msg, hint=hint)
6119 6122
6120 6123 return cmdutil.revert(ui, repo, ctx, *pats, **pycompat.strkwargs(opts))
6121 6124
6122 6125
6123 6126 @command(
6124 6127 b'rollback',
6125 6128 dryrunopts + [(b'f', b'force', False, _(b'ignore safety measures'))],
6126 6129 helpcategory=command.CATEGORY_MAINTENANCE,
6127 6130 )
6128 6131 def rollback(ui, repo, **opts):
6129 6132 """roll back the last transaction (DANGEROUS) (DEPRECATED)
6130 6133
6131 6134 Please use :hg:`commit --amend` instead of rollback to correct
6132 6135 mistakes in the last commit.
6133 6136
6134 6137 This command should be used with care. There is only one level of
6135 6138 rollback, and there is no way to undo a rollback. It will also
6136 6139 restore the dirstate at the time of the last transaction, losing
6137 6140 any dirstate changes since that time. This command does not alter
6138 6141 the working directory.
6139 6142
6140 6143 Transactions are used to encapsulate the effects of all commands
6141 6144 that create new changesets or propagate existing changesets into a
6142 6145 repository.
6143 6146
6144 6147 .. container:: verbose
6145 6148
6146 6149 For example, the following commands are transactional, and their
6147 6150 effects can be rolled back:
6148 6151
6149 6152 - commit
6150 6153 - import
6151 6154 - pull
6152 6155 - push (with this repository as the destination)
6153 6156 - unbundle
6154 6157
6155 6158 To avoid permanent data loss, rollback will refuse to rollback a
6156 6159 commit transaction if it isn't checked out. Use --force to
6157 6160 override this protection.
6158 6161
6159 6162 The rollback command can be entirely disabled by setting the
6160 6163 ``ui.rollback`` configuration setting to false. If you're here
6161 6164 because you want to use rollback and it's disabled, you can
6162 6165 re-enable the command by setting ``ui.rollback`` to true.
6163 6166
6164 6167 This command is not intended for use on public repositories. Once
6165 6168 changes are visible for pull by other users, rolling a transaction
6166 6169 back locally is ineffective (someone else may already have pulled
6167 6170 the changes). Furthermore, a race is possible with readers of the
6168 6171 repository; for example an in-progress pull from the repository
6169 6172 may fail if a rollback is performed.
6170 6173
6171 6174 Returns 0 on success, 1 if no rollback data is available.
6172 6175 """
6173 6176 if not ui.configbool(b'ui', b'rollback'):
6174 6177 raise error.Abort(
6175 6178 _(b'rollback is disabled because it is unsafe'),
6176 6179 hint=b'see `hg help -v rollback` for information',
6177 6180 )
6178 6181 return repo.rollback(dryrun=opts.get('dry_run'), force=opts.get('force'))
6179 6182
6180 6183
6181 6184 @command(
6182 6185 b'root',
6183 6186 [] + formatteropts,
6184 6187 intents={INTENT_READONLY},
6185 6188 helpcategory=command.CATEGORY_WORKING_DIRECTORY,
6186 6189 )
6187 6190 def root(ui, repo, **opts):
6188 6191 """print the root (top) of the current working directory
6189 6192
6190 6193 Print the root directory of the current repository.
6191 6194
6192 6195 .. container:: verbose
6193 6196
6194 6197 Template:
6195 6198
6196 6199 The following keywords are supported in addition to the common template
6197 6200 keywords and functions. See also :hg:`help templates`.
6198 6201
6199 6202 :hgpath: String. Path to the .hg directory.
6200 6203 :storepath: String. Path to the directory holding versioned data.
6201 6204
6202 6205 Returns 0 on success.
6203 6206 """
6204 6207 opts = pycompat.byteskwargs(opts)
6205 6208 with ui.formatter(b'root', opts) as fm:
6206 6209 fm.startitem()
6207 6210 fm.write(b'reporoot', b'%s\n', repo.root)
6208 6211 fm.data(hgpath=repo.path, storepath=repo.spath)
6209 6212
6210 6213
6211 6214 @command(
6212 6215 b'serve',
6213 6216 [
6214 6217 (
6215 6218 b'A',
6216 6219 b'accesslog',
6217 6220 b'',
6218 6221 _(b'name of access log file to write to'),
6219 6222 _(b'FILE'),
6220 6223 ),
6221 6224 (b'd', b'daemon', None, _(b'run server in background')),
6222 6225 (b'', b'daemon-postexec', [], _(b'used internally by daemon mode')),
6223 6226 (
6224 6227 b'E',
6225 6228 b'errorlog',
6226 6229 b'',
6227 6230 _(b'name of error log file to write to'),
6228 6231 _(b'FILE'),
6229 6232 ),
6230 6233 # use string type, then we can check if something was passed
6231 6234 (
6232 6235 b'p',
6233 6236 b'port',
6234 6237 b'',
6235 6238 _(b'port to listen on (default: 8000)'),
6236 6239 _(b'PORT'),
6237 6240 ),
6238 6241 (
6239 6242 b'a',
6240 6243 b'address',
6241 6244 b'',
6242 6245 _(b'address to listen on (default: all interfaces)'),
6243 6246 _(b'ADDR'),
6244 6247 ),
6245 6248 (
6246 6249 b'',
6247 6250 b'prefix',
6248 6251 b'',
6249 6252 _(b'prefix path to serve from (default: server root)'),
6250 6253 _(b'PREFIX'),
6251 6254 ),
6252 6255 (
6253 6256 b'n',
6254 6257 b'name',
6255 6258 b'',
6256 6259 _(b'name to show in web pages (default: working directory)'),
6257 6260 _(b'NAME'),
6258 6261 ),
6259 6262 (
6260 6263 b'',
6261 6264 b'web-conf',
6262 6265 b'',
6263 6266 _(b"name of the hgweb config file (see 'hg help hgweb')"),
6264 6267 _(b'FILE'),
6265 6268 ),
6266 6269 (
6267 6270 b'',
6268 6271 b'webdir-conf',
6269 6272 b'',
6270 6273 _(b'name of the hgweb config file (DEPRECATED)'),
6271 6274 _(b'FILE'),
6272 6275 ),
6273 6276 (
6274 6277 b'',
6275 6278 b'pid-file',
6276 6279 b'',
6277 6280 _(b'name of file to write process ID to'),
6278 6281 _(b'FILE'),
6279 6282 ),
6280 6283 (b'', b'stdio', None, _(b'for remote clients (ADVANCED)')),
6281 6284 (
6282 6285 b'',
6283 6286 b'cmdserver',
6284 6287 b'',
6285 6288 _(b'for remote clients (ADVANCED)'),
6286 6289 _(b'MODE'),
6287 6290 ),
6288 6291 (b't', b'templates', b'', _(b'web templates to use'), _(b'TEMPLATE')),
6289 6292 (b'', b'style', b'', _(b'template style to use'), _(b'STYLE')),
6290 6293 (b'6', b'ipv6', None, _(b'use IPv6 in addition to IPv4')),
6291 6294 (b'', b'certificate', b'', _(b'SSL certificate file'), _(b'FILE')),
6292 6295 (b'', b'print-url', None, _(b'start and print only the URL')),
6293 6296 ]
6294 6297 + subrepoopts,
6295 6298 _(b'[OPTION]...'),
6296 6299 helpcategory=command.CATEGORY_REMOTE_REPO_MANAGEMENT,
6297 6300 helpbasic=True,
6298 6301 optionalrepo=True,
6299 6302 )
6300 6303 def serve(ui, repo, **opts):
6301 6304 """start stand-alone webserver
6302 6305
6303 6306 Start a local HTTP repository browser and pull server. You can use
6304 6307 this for ad-hoc sharing and browsing of repositories. It is
6305 6308 recommended to use a real web server to serve a repository for
6306 6309 longer periods of time.
6307 6310
6308 6311 Please note that the server does not implement access control.
6309 6312 This means that, by default, anybody can read from the server and
6310 6313 nobody can write to it by default. Set the ``web.allow-push``
6311 6314 option to ``*`` to allow everybody to push to the server. You
6312 6315 should use a real web server if you need to authenticate users.
6313 6316
6314 6317 By default, the server logs accesses to stdout and errors to
6315 6318 stderr. Use the -A/--accesslog and -E/--errorlog options to log to
6316 6319 files.
6317 6320
6318 6321 To have the server choose a free port number to listen on, specify
6319 6322 a port number of 0; in this case, the server will print the port
6320 6323 number it uses.
6321 6324
6322 6325 Returns 0 on success.
6323 6326 """
6324 6327
6325 6328 cmdutil.check_incompatible_arguments(opts, 'stdio', ['cmdserver'])
6326 6329 opts = pycompat.byteskwargs(opts)
6327 6330 if opts[b"print_url"] and ui.verbose:
6328 6331 raise error.Abort(_(b"cannot use --print-url with --verbose"))
6329 6332
6330 6333 if opts[b"stdio"]:
6331 6334 if repo is None:
6332 6335 raise error.RepoError(
6333 6336 _(b"there is no Mercurial repository here (.hg not found)")
6334 6337 )
6335 6338 s = wireprotoserver.sshserver(ui, repo)
6336 6339 s.serve_forever()
6337 6340
6338 6341 service = server.createservice(ui, repo, opts)
6339 6342 return server.runservice(opts, initfn=service.init, runfn=service.run)
6340 6343
6341 6344
6342 6345 @command(
6343 6346 b'shelve',
6344 6347 [
6345 6348 (
6346 6349 b'A',
6347 6350 b'addremove',
6348 6351 None,
6349 6352 _(b'mark new/missing files as added/removed before shelving'),
6350 6353 ),
6351 6354 (b'u', b'unknown', None, _(b'store unknown files in the shelve')),
6352 6355 (b'', b'cleanup', None, _(b'delete all shelved changes')),
6353 6356 (
6354 6357 b'',
6355 6358 b'date',
6356 6359 b'',
6357 6360 _(b'shelve with the specified commit date'),
6358 6361 _(b'DATE'),
6359 6362 ),
6360 6363 (b'd', b'delete', None, _(b'delete the named shelved change(s)')),
6361 6364 (b'e', b'edit', False, _(b'invoke editor on commit messages')),
6362 6365 (
6363 6366 b'k',
6364 6367 b'keep',
6365 6368 False,
6366 6369 _(b'shelve, but keep changes in the working directory'),
6367 6370 ),
6368 6371 (b'l', b'list', None, _(b'list current shelves')),
6369 6372 (b'm', b'message', b'', _(b'use text as shelve message'), _(b'TEXT')),
6370 6373 (
6371 6374 b'n',
6372 6375 b'name',
6373 6376 b'',
6374 6377 _(b'use the given name for the shelved commit'),
6375 6378 _(b'NAME'),
6376 6379 ),
6377 6380 (
6378 6381 b'p',
6379 6382 b'patch',
6380 6383 None,
6381 6384 _(
6382 6385 b'output patches for changes (provide the names of the shelved '
6383 6386 b'changes as positional arguments)'
6384 6387 ),
6385 6388 ),
6386 6389 (b'i', b'interactive', None, _(b'interactive mode')),
6387 6390 (
6388 6391 b'',
6389 6392 b'stat',
6390 6393 None,
6391 6394 _(
6392 6395 b'output diffstat-style summary of changes (provide the names of '
6393 6396 b'the shelved changes as positional arguments)'
6394 6397 ),
6395 6398 ),
6396 6399 ]
6397 6400 + cmdutil.walkopts,
6398 6401 _(b'hg shelve [OPTION]... [FILE]...'),
6399 6402 helpcategory=command.CATEGORY_WORKING_DIRECTORY,
6400 6403 )
6401 6404 def shelve(ui, repo, *pats, **opts):
6402 6405 '''save and set aside changes from the working directory
6403 6406
6404 6407 Shelving takes files that "hg status" reports as not clean, saves
6405 6408 the modifications to a bundle (a shelved change), and reverts the
6406 6409 files so that their state in the working directory becomes clean.
6407 6410
6408 6411 To restore these changes to the working directory, using "hg
6409 6412 unshelve"; this will work even if you switch to a different
6410 6413 commit.
6411 6414
6412 6415 When no files are specified, "hg shelve" saves all not-clean
6413 6416 files. If specific files or directories are named, only changes to
6414 6417 those files are shelved.
6415 6418
6416 6419 In bare shelve (when no files are specified, without interactive,
6417 6420 include and exclude option), shelving remembers information if the
6418 6421 working directory was on newly created branch, in other words working
6419 6422 directory was on different branch than its first parent. In this
6420 6423 situation unshelving restores branch information to the working directory.
6421 6424
6422 6425 Each shelved change has a name that makes it easier to find later.
6423 6426 The name of a shelved change defaults to being based on the active
6424 6427 bookmark, or if there is no active bookmark, the current named
6425 6428 branch. To specify a different name, use ``--name``.
6426 6429
6427 6430 To see a list of existing shelved changes, use the ``--list``
6428 6431 option. For each shelved change, this will print its name, age,
6429 6432 and description; use ``--patch`` or ``--stat`` for more details.
6430 6433
6431 6434 To delete specific shelved changes, use ``--delete``. To delete
6432 6435 all shelved changes, use ``--cleanup``.
6433 6436 '''
6434 6437 opts = pycompat.byteskwargs(opts)
6435 6438 allowables = [
6436 6439 (b'addremove', {b'create'}), # 'create' is pseudo action
6437 6440 (b'unknown', {b'create'}),
6438 6441 (b'cleanup', {b'cleanup'}),
6439 6442 # ('date', {'create'}), # ignored for passing '--date "0 0"' in tests
6440 6443 (b'delete', {b'delete'}),
6441 6444 (b'edit', {b'create'}),
6442 6445 (b'keep', {b'create'}),
6443 6446 (b'list', {b'list'}),
6444 6447 (b'message', {b'create'}),
6445 6448 (b'name', {b'create'}),
6446 6449 (b'patch', {b'patch', b'list'}),
6447 6450 (b'stat', {b'stat', b'list'}),
6448 6451 ]
6449 6452
6450 6453 def checkopt(opt):
6451 6454 if opts.get(opt):
6452 6455 for i, allowable in allowables:
6453 6456 if opts[i] and opt not in allowable:
6454 6457 raise error.Abort(
6455 6458 _(
6456 6459 b"options '--%s' and '--%s' may not be "
6457 6460 b"used together"
6458 6461 )
6459 6462 % (opt, i)
6460 6463 )
6461 6464 return True
6462 6465
6463 6466 if checkopt(b'cleanup'):
6464 6467 if pats:
6465 6468 raise error.Abort(_(b"cannot specify names when using '--cleanup'"))
6466 6469 return shelvemod.cleanupcmd(ui, repo)
6467 6470 elif checkopt(b'delete'):
6468 6471 return shelvemod.deletecmd(ui, repo, pats)
6469 6472 elif checkopt(b'list'):
6470 6473 return shelvemod.listcmd(ui, repo, pats, opts)
6471 6474 elif checkopt(b'patch') or checkopt(b'stat'):
6472 6475 return shelvemod.patchcmds(ui, repo, pats, opts)
6473 6476 else:
6474 6477 return shelvemod.createcmd(ui, repo, pats, opts)
6475 6478
6476 6479
6477 6480 _NOTTERSE = b'nothing'
6478 6481
6479 6482
6480 6483 @command(
6481 6484 b'status|st',
6482 6485 [
6483 6486 (b'A', b'all', None, _(b'show status of all files')),
6484 6487 (b'm', b'modified', None, _(b'show only modified files')),
6485 6488 (b'a', b'added', None, _(b'show only added files')),
6486 6489 (b'r', b'removed', None, _(b'show only removed files')),
6487 6490 (b'd', b'deleted', None, _(b'show only missing files')),
6488 6491 (b'c', b'clean', None, _(b'show only files without changes')),
6489 6492 (b'u', b'unknown', None, _(b'show only unknown (not tracked) files')),
6490 6493 (b'i', b'ignored', None, _(b'show only ignored files')),
6491 6494 (b'n', b'no-status', None, _(b'hide status prefix')),
6492 6495 (b't', b'terse', _NOTTERSE, _(b'show the terse output (EXPERIMENTAL)')),
6493 6496 (
6494 6497 b'C',
6495 6498 b'copies',
6496 6499 None,
6497 6500 _(b'show source of copied files (DEFAULT: ui.statuscopies)'),
6498 6501 ),
6499 6502 (
6500 6503 b'0',
6501 6504 b'print0',
6502 6505 None,
6503 6506 _(b'end filenames with NUL, for use with xargs'),
6504 6507 ),
6505 6508 (b'', b'rev', [], _(b'show difference from revision'), _(b'REV')),
6506 6509 (
6507 6510 b'',
6508 6511 b'change',
6509 6512 b'',
6510 6513 _(b'list the changed files of a revision'),
6511 6514 _(b'REV'),
6512 6515 ),
6513 6516 ]
6514 6517 + walkopts
6515 6518 + subrepoopts
6516 6519 + formatteropts,
6517 6520 _(b'[OPTION]... [FILE]...'),
6518 6521 helpcategory=command.CATEGORY_WORKING_DIRECTORY,
6519 6522 helpbasic=True,
6520 6523 inferrepo=True,
6521 6524 intents={INTENT_READONLY},
6522 6525 )
6523 6526 def status(ui, repo, *pats, **opts):
6524 6527 """show changed files in the working directory
6525 6528
6526 6529 Show status of files in the repository. If names are given, only
6527 6530 files that match are shown. Files that are clean or ignored or
6528 6531 the source of a copy/move operation, are not listed unless
6529 6532 -c/--clean, -i/--ignored, -C/--copies or -A/--all are given.
6530 6533 Unless options described with "show only ..." are given, the
6531 6534 options -mardu are used.
6532 6535
6533 6536 Option -q/--quiet hides untracked (unknown and ignored) files
6534 6537 unless explicitly requested with -u/--unknown or -i/--ignored.
6535 6538
6536 6539 .. note::
6537 6540
6538 6541 :hg:`status` may appear to disagree with diff if permissions have
6539 6542 changed or a merge has occurred. The standard diff format does
6540 6543 not report permission changes and diff only reports changes
6541 6544 relative to one merge parent.
6542 6545
6543 6546 If one revision is given, it is used as the base revision.
6544 6547 If two revisions are given, the differences between them are
6545 6548 shown. The --change option can also be used as a shortcut to list
6546 6549 the changed files of a revision from its first parent.
6547 6550
6548 6551 The codes used to show the status of files are::
6549 6552
6550 6553 M = modified
6551 6554 A = added
6552 6555 R = removed
6553 6556 C = clean
6554 6557 ! = missing (deleted by non-hg command, but still tracked)
6555 6558 ? = not tracked
6556 6559 I = ignored
6557 6560 = origin of the previous file (with --copies)
6558 6561
6559 6562 .. container:: verbose
6560 6563
6561 6564 The -t/--terse option abbreviates the output by showing only the directory
6562 6565 name if all the files in it share the same status. The option takes an
6563 6566 argument indicating the statuses to abbreviate: 'm' for 'modified', 'a'
6564 6567 for 'added', 'r' for 'removed', 'd' for 'deleted', 'u' for 'unknown', 'i'
6565 6568 for 'ignored' and 'c' for clean.
6566 6569
6567 6570 It abbreviates only those statuses which are passed. Note that clean and
6568 6571 ignored files are not displayed with '--terse ic' unless the -c/--clean
6569 6572 and -i/--ignored options are also used.
6570 6573
6571 6574 The -v/--verbose option shows information when the repository is in an
6572 6575 unfinished merge, shelve, rebase state etc. You can have this behavior
6573 6576 turned on by default by enabling the ``commands.status.verbose`` option.
6574 6577
6575 6578 You can skip displaying some of these states by setting
6576 6579 ``commands.status.skipstates`` to one or more of: 'bisect', 'graft',
6577 6580 'histedit', 'merge', 'rebase', or 'unshelve'.
6578 6581
6579 6582 Template:
6580 6583
6581 6584 The following keywords are supported in addition to the common template
6582 6585 keywords and functions. See also :hg:`help templates`.
6583 6586
6584 6587 :path: String. Repository-absolute path of the file.
6585 6588 :source: String. Repository-absolute path of the file originated from.
6586 6589 Available if ``--copies`` is specified.
6587 6590 :status: String. Character denoting file's status.
6588 6591
6589 6592 Examples:
6590 6593
6591 6594 - show changes in the working directory relative to a
6592 6595 changeset::
6593 6596
6594 6597 hg status --rev 9353
6595 6598
6596 6599 - show changes in the working directory relative to the
6597 6600 current directory (see :hg:`help patterns` for more information)::
6598 6601
6599 6602 hg status re:
6600 6603
6601 6604 - show all changes including copies in an existing changeset::
6602 6605
6603 6606 hg status --copies --change 9353
6604 6607
6605 6608 - get a NUL separated list of added files, suitable for xargs::
6606 6609
6607 6610 hg status -an0
6608 6611
6609 6612 - show more information about the repository status, abbreviating
6610 6613 added, removed, modified, deleted, and untracked paths::
6611 6614
6612 6615 hg status -v -t mardu
6613 6616
6614 6617 Returns 0 on success.
6615 6618
6616 6619 """
6617 6620
6618 6621 cmdutil.check_at_most_one_arg(opts, 'rev', 'change')
6619 6622 opts = pycompat.byteskwargs(opts)
6620 6623 revs = opts.get(b'rev')
6621 6624 change = opts.get(b'change')
6622 6625 terse = opts.get(b'terse')
6623 6626 if terse is _NOTTERSE:
6624 6627 if revs:
6625 6628 terse = b''
6626 6629 else:
6627 6630 terse = ui.config(b'commands', b'status.terse')
6628 6631
6629 6632 if revs and terse:
6630 6633 msg = _(b'cannot use --terse with --rev')
6631 6634 raise error.Abort(msg)
6632 6635 elif change:
6633 6636 repo = scmutil.unhidehashlikerevs(repo, [change], b'nowarn')
6634 6637 ctx2 = scmutil.revsingle(repo, change, None)
6635 6638 ctx1 = ctx2.p1()
6636 6639 else:
6637 6640 repo = scmutil.unhidehashlikerevs(repo, revs, b'nowarn')
6638 6641 ctx1, ctx2 = scmutil.revpair(repo, revs)
6639 6642
6640 6643 forcerelativevalue = None
6641 6644 if ui.hasconfig(b'commands', b'status.relative'):
6642 6645 forcerelativevalue = ui.configbool(b'commands', b'status.relative')
6643 6646 uipathfn = scmutil.getuipathfn(
6644 6647 repo,
6645 6648 legacyrelativevalue=bool(pats),
6646 6649 forcerelativevalue=forcerelativevalue,
6647 6650 )
6648 6651
6649 6652 if opts.get(b'print0'):
6650 6653 end = b'\0'
6651 6654 else:
6652 6655 end = b'\n'
6653 6656 states = b'modified added removed deleted unknown ignored clean'.split()
6654 6657 show = [k for k in states if opts.get(k)]
6655 6658 if opts.get(b'all'):
6656 6659 show += ui.quiet and (states[:4] + [b'clean']) or states
6657 6660
6658 6661 if not show:
6659 6662 if ui.quiet:
6660 6663 show = states[:4]
6661 6664 else:
6662 6665 show = states[:5]
6663 6666
6664 6667 m = scmutil.match(ctx2, pats, opts)
6665 6668 if terse:
6666 6669 # we need to compute clean and unknown to terse
6667 6670 stat = repo.status(
6668 6671 ctx1.node(),
6669 6672 ctx2.node(),
6670 6673 m,
6671 6674 b'ignored' in show or b'i' in terse,
6672 6675 clean=True,
6673 6676 unknown=True,
6674 6677 listsubrepos=opts.get(b'subrepos'),
6675 6678 )
6676 6679
6677 6680 stat = cmdutil.tersedir(stat, terse)
6678 6681 else:
6679 6682 stat = repo.status(
6680 6683 ctx1.node(),
6681 6684 ctx2.node(),
6682 6685 m,
6683 6686 b'ignored' in show,
6684 6687 b'clean' in show,
6685 6688 b'unknown' in show,
6686 6689 opts.get(b'subrepos'),
6687 6690 )
6688 6691
6689 6692 changestates = zip(
6690 6693 states,
6691 6694 pycompat.iterbytestr(b'MAR!?IC'),
6692 6695 [getattr(stat, s.decode('utf8')) for s in states],
6693 6696 )
6694 6697
6695 6698 copy = {}
6696 6699 if (
6697 6700 opts.get(b'all')
6698 6701 or opts.get(b'copies')
6699 6702 or ui.configbool(b'ui', b'statuscopies')
6700 6703 ) and not opts.get(b'no_status'):
6701 6704 copy = copies.pathcopies(ctx1, ctx2, m)
6702 6705
6703 6706 morestatus = None
6704 6707 if (
6705 6708 ui.verbose or ui.configbool(b'commands', b'status.verbose')
6706 6709 ) and not ui.plain():
6707 6710 morestatus = cmdutil.readmorestatus(repo)
6708 6711
6709 6712 ui.pager(b'status')
6710 6713 fm = ui.formatter(b'status', opts)
6711 6714 fmt = b'%s' + end
6712 6715 showchar = not opts.get(b'no_status')
6713 6716
6714 6717 for state, char, files in changestates:
6715 6718 if state in show:
6716 6719 label = b'status.' + state
6717 6720 for f in files:
6718 6721 fm.startitem()
6719 6722 fm.context(ctx=ctx2)
6720 6723 fm.data(itemtype=b'file', path=f)
6721 6724 fm.condwrite(showchar, b'status', b'%s ', char, label=label)
6722 6725 fm.plain(fmt % uipathfn(f), label=label)
6723 6726 if f in copy:
6724 6727 fm.data(source=copy[f])
6725 6728 fm.plain(
6726 6729 (b' %s' + end) % uipathfn(copy[f]),
6727 6730 label=b'status.copied',
6728 6731 )
6729 6732 if morestatus:
6730 6733 morestatus.formatfile(f, fm)
6731 6734
6732 6735 if morestatus:
6733 6736 morestatus.formatfooter(fm)
6734 6737 fm.end()
6735 6738
6736 6739
6737 6740 @command(
6738 6741 b'summary|sum',
6739 6742 [(b'', b'remote', None, _(b'check for push and pull'))],
6740 6743 b'[--remote]',
6741 6744 helpcategory=command.CATEGORY_WORKING_DIRECTORY,
6742 6745 helpbasic=True,
6743 6746 intents={INTENT_READONLY},
6744 6747 )
6745 6748 def summary(ui, repo, **opts):
6746 6749 """summarize working directory state
6747 6750
6748 6751 This generates a brief summary of the working directory state,
6749 6752 including parents, branch, commit status, phase and available updates.
6750 6753
6751 6754 With the --remote option, this will check the default paths for
6752 6755 incoming and outgoing changes. This can be time-consuming.
6753 6756
6754 6757 Returns 0 on success.
6755 6758 """
6756 6759
6757 6760 opts = pycompat.byteskwargs(opts)
6758 6761 ui.pager(b'summary')
6759 6762 ctx = repo[None]
6760 6763 parents = ctx.parents()
6761 6764 pnode = parents[0].node()
6762 6765 marks = []
6763 6766
6764 6767 try:
6765 6768 ms = mergestatemod.mergestate.read(repo)
6766 6769 except error.UnsupportedMergeRecords as e:
6767 6770 s = b' '.join(e.recordtypes)
6768 6771 ui.warn(
6769 6772 _(b'warning: merge state has unsupported record types: %s\n') % s
6770 6773 )
6771 6774 unresolved = []
6772 6775 else:
6773 6776 unresolved = list(ms.unresolved())
6774 6777
6775 6778 for p in parents:
6776 6779 # label with log.changeset (instead of log.parent) since this
6777 6780 # shows a working directory parent *changeset*:
6778 6781 # i18n: column positioning for "hg summary"
6779 6782 ui.write(
6780 6783 _(b'parent: %d:%s ') % (p.rev(), p),
6781 6784 label=logcmdutil.changesetlabels(p),
6782 6785 )
6783 6786 ui.write(b' '.join(p.tags()), label=b'log.tag')
6784 6787 if p.bookmarks():
6785 6788 marks.extend(p.bookmarks())
6786 6789 if p.rev() == -1:
6787 6790 if not len(repo):
6788 6791 ui.write(_(b' (empty repository)'))
6789 6792 else:
6790 6793 ui.write(_(b' (no revision checked out)'))
6791 6794 if p.obsolete():
6792 6795 ui.write(_(b' (obsolete)'))
6793 6796 if p.isunstable():
6794 6797 instabilities = (
6795 6798 ui.label(instability, b'trouble.%s' % instability)
6796 6799 for instability in p.instabilities()
6797 6800 )
6798 6801 ui.write(b' (' + b', '.join(instabilities) + b')')
6799 6802 ui.write(b'\n')
6800 6803 if p.description():
6801 6804 ui.status(
6802 6805 b' ' + p.description().splitlines()[0].strip() + b'\n',
6803 6806 label=b'log.summary',
6804 6807 )
6805 6808
6806 6809 branch = ctx.branch()
6807 6810 bheads = repo.branchheads(branch)
6808 6811 # i18n: column positioning for "hg summary"
6809 6812 m = _(b'branch: %s\n') % branch
6810 6813 if branch != b'default':
6811 6814 ui.write(m, label=b'log.branch')
6812 6815 else:
6813 6816 ui.status(m, label=b'log.branch')
6814 6817
6815 6818 if marks:
6816 6819 active = repo._activebookmark
6817 6820 # i18n: column positioning for "hg summary"
6818 6821 ui.write(_(b'bookmarks:'), label=b'log.bookmark')
6819 6822 if active is not None:
6820 6823 if active in marks:
6821 6824 ui.write(b' *' + active, label=bookmarks.activebookmarklabel)
6822 6825 marks.remove(active)
6823 6826 else:
6824 6827 ui.write(b' [%s]' % active, label=bookmarks.activebookmarklabel)
6825 6828 for m in marks:
6826 6829 ui.write(b' ' + m, label=b'log.bookmark')
6827 6830 ui.write(b'\n', label=b'log.bookmark')
6828 6831
6829 6832 status = repo.status(unknown=True)
6830 6833
6831 6834 c = repo.dirstate.copies()
6832 6835 copied, renamed = [], []
6833 6836 for d, s in pycompat.iteritems(c):
6834 6837 if s in status.removed:
6835 6838 status.removed.remove(s)
6836 6839 renamed.append(d)
6837 6840 else:
6838 6841 copied.append(d)
6839 6842 if d in status.added:
6840 6843 status.added.remove(d)
6841 6844
6842 6845 subs = [s for s in ctx.substate if ctx.sub(s).dirty()]
6843 6846
6844 6847 labels = [
6845 6848 (ui.label(_(b'%d modified'), b'status.modified'), status.modified),
6846 6849 (ui.label(_(b'%d added'), b'status.added'), status.added),
6847 6850 (ui.label(_(b'%d removed'), b'status.removed'), status.removed),
6848 6851 (ui.label(_(b'%d renamed'), b'status.copied'), renamed),
6849 6852 (ui.label(_(b'%d copied'), b'status.copied'), copied),
6850 6853 (ui.label(_(b'%d deleted'), b'status.deleted'), status.deleted),
6851 6854 (ui.label(_(b'%d unknown'), b'status.unknown'), status.unknown),
6852 6855 (ui.label(_(b'%d unresolved'), b'resolve.unresolved'), unresolved),
6853 6856 (ui.label(_(b'%d subrepos'), b'status.modified'), subs),
6854 6857 ]
6855 6858 t = []
6856 6859 for l, s in labels:
6857 6860 if s:
6858 6861 t.append(l % len(s))
6859 6862
6860 6863 t = b', '.join(t)
6861 6864 cleanworkdir = False
6862 6865
6863 6866 if repo.vfs.exists(b'graftstate'):
6864 6867 t += _(b' (graft in progress)')
6865 6868 if repo.vfs.exists(b'updatestate'):
6866 6869 t += _(b' (interrupted update)')
6867 6870 elif len(parents) > 1:
6868 6871 t += _(b' (merge)')
6869 6872 elif branch != parents[0].branch():
6870 6873 t += _(b' (new branch)')
6871 6874 elif parents[0].closesbranch() and pnode in repo.branchheads(
6872 6875 branch, closed=True
6873 6876 ):
6874 6877 t += _(b' (head closed)')
6875 6878 elif not (
6876 6879 status.modified
6877 6880 or status.added
6878 6881 or status.removed
6879 6882 or renamed
6880 6883 or copied
6881 6884 or subs
6882 6885 ):
6883 6886 t += _(b' (clean)')
6884 6887 cleanworkdir = True
6885 6888 elif pnode not in bheads:
6886 6889 t += _(b' (new branch head)')
6887 6890
6888 6891 if parents:
6889 6892 pendingphase = max(p.phase() for p in parents)
6890 6893 else:
6891 6894 pendingphase = phases.public
6892 6895
6893 6896 if pendingphase > phases.newcommitphase(ui):
6894 6897 t += b' (%s)' % phases.phasenames[pendingphase]
6895 6898
6896 6899 if cleanworkdir:
6897 6900 # i18n: column positioning for "hg summary"
6898 6901 ui.status(_(b'commit: %s\n') % t.strip())
6899 6902 else:
6900 6903 # i18n: column positioning for "hg summary"
6901 6904 ui.write(_(b'commit: %s\n') % t.strip())
6902 6905
6903 6906 # all ancestors of branch heads - all ancestors of parent = new csets
6904 6907 new = len(
6905 6908 repo.changelog.findmissing([pctx.node() for pctx in parents], bheads)
6906 6909 )
6907 6910
6908 6911 if new == 0:
6909 6912 # i18n: column positioning for "hg summary"
6910 6913 ui.status(_(b'update: (current)\n'))
6911 6914 elif pnode not in bheads:
6912 6915 # i18n: column positioning for "hg summary"
6913 6916 ui.write(_(b'update: %d new changesets (update)\n') % new)
6914 6917 else:
6915 6918 # i18n: column positioning for "hg summary"
6916 6919 ui.write(
6917 6920 _(b'update: %d new changesets, %d branch heads (merge)\n')
6918 6921 % (new, len(bheads))
6919 6922 )
6920 6923
6921 6924 t = []
6922 6925 draft = len(repo.revs(b'draft()'))
6923 6926 if draft:
6924 6927 t.append(_(b'%d draft') % draft)
6925 6928 secret = len(repo.revs(b'secret()'))
6926 6929 if secret:
6927 6930 t.append(_(b'%d secret') % secret)
6928 6931
6929 6932 if draft or secret:
6930 6933 ui.status(_(b'phases: %s\n') % b', '.join(t))
6931 6934
6932 6935 if obsolete.isenabled(repo, obsolete.createmarkersopt):
6933 6936 for trouble in (b"orphan", b"contentdivergent", b"phasedivergent"):
6934 6937 numtrouble = len(repo.revs(trouble + b"()"))
6935 6938 # We write all the possibilities to ease translation
6936 6939 troublemsg = {
6937 6940 b"orphan": _(b"orphan: %d changesets"),
6938 6941 b"contentdivergent": _(b"content-divergent: %d changesets"),
6939 6942 b"phasedivergent": _(b"phase-divergent: %d changesets"),
6940 6943 }
6941 6944 if numtrouble > 0:
6942 6945 ui.status(troublemsg[trouble] % numtrouble + b"\n")
6943 6946
6944 6947 cmdutil.summaryhooks(ui, repo)
6945 6948
6946 6949 if opts.get(b'remote'):
6947 6950 needsincoming, needsoutgoing = True, True
6948 6951 else:
6949 6952 needsincoming, needsoutgoing = False, False
6950 6953 for i, o in cmdutil.summaryremotehooks(ui, repo, opts, None):
6951 6954 if i:
6952 6955 needsincoming = True
6953 6956 if o:
6954 6957 needsoutgoing = True
6955 6958 if not needsincoming and not needsoutgoing:
6956 6959 return
6957 6960
6958 6961 def getincoming():
6959 6962 source, branches = hg.parseurl(ui.expandpath(b'default'))
6960 6963 sbranch = branches[0]
6961 6964 try:
6962 6965 other = hg.peer(repo, {}, source)
6963 6966 except error.RepoError:
6964 6967 if opts.get(b'remote'):
6965 6968 raise
6966 6969 return source, sbranch, None, None, None
6967 6970 revs, checkout = hg.addbranchrevs(repo, other, branches, None)
6968 6971 if revs:
6969 6972 revs = [other.lookup(rev) for rev in revs]
6970 6973 ui.debug(b'comparing with %s\n' % util.hidepassword(source))
6971 6974 repo.ui.pushbuffer()
6972 6975 commoninc = discovery.findcommonincoming(repo, other, heads=revs)
6973 6976 repo.ui.popbuffer()
6974 6977 return source, sbranch, other, commoninc, commoninc[1]
6975 6978
6976 6979 if needsincoming:
6977 6980 source, sbranch, sother, commoninc, incoming = getincoming()
6978 6981 else:
6979 6982 source = sbranch = sother = commoninc = incoming = None
6980 6983
6981 6984 def getoutgoing():
6982 6985 dest, branches = hg.parseurl(ui.expandpath(b'default-push', b'default'))
6983 6986 dbranch = branches[0]
6984 6987 revs, checkout = hg.addbranchrevs(repo, repo, branches, None)
6985 6988 if source != dest:
6986 6989 try:
6987 6990 dother = hg.peer(repo, {}, dest)
6988 6991 except error.RepoError:
6989 6992 if opts.get(b'remote'):
6990 6993 raise
6991 6994 return dest, dbranch, None, None
6992 6995 ui.debug(b'comparing with %s\n' % util.hidepassword(dest))
6993 6996 elif sother is None:
6994 6997 # there is no explicit destination peer, but source one is invalid
6995 6998 return dest, dbranch, None, None
6996 6999 else:
6997 7000 dother = sother
6998 7001 if source != dest or (sbranch is not None and sbranch != dbranch):
6999 7002 common = None
7000 7003 else:
7001 7004 common = commoninc
7002 7005 if revs:
7003 7006 revs = [repo.lookup(rev) for rev in revs]
7004 7007 repo.ui.pushbuffer()
7005 7008 outgoing = discovery.findcommonoutgoing(
7006 7009 repo, dother, onlyheads=revs, commoninc=common
7007 7010 )
7008 7011 repo.ui.popbuffer()
7009 7012 return dest, dbranch, dother, outgoing
7010 7013
7011 7014 if needsoutgoing:
7012 7015 dest, dbranch, dother, outgoing = getoutgoing()
7013 7016 else:
7014 7017 dest = dbranch = dother = outgoing = None
7015 7018
7016 7019 if opts.get(b'remote'):
7017 7020 t = []
7018 7021 if incoming:
7019 7022 t.append(_(b'1 or more incoming'))
7020 7023 o = outgoing.missing
7021 7024 if o:
7022 7025 t.append(_(b'%d outgoing') % len(o))
7023 7026 other = dother or sother
7024 7027 if b'bookmarks' in other.listkeys(b'namespaces'):
7025 7028 counts = bookmarks.summary(repo, other)
7026 7029 if counts[0] > 0:
7027 7030 t.append(_(b'%d incoming bookmarks') % counts[0])
7028 7031 if counts[1] > 0:
7029 7032 t.append(_(b'%d outgoing bookmarks') % counts[1])
7030 7033
7031 7034 if t:
7032 7035 # i18n: column positioning for "hg summary"
7033 7036 ui.write(_(b'remote: %s\n') % (b', '.join(t)))
7034 7037 else:
7035 7038 # i18n: column positioning for "hg summary"
7036 7039 ui.status(_(b'remote: (synced)\n'))
7037 7040
7038 7041 cmdutil.summaryremotehooks(
7039 7042 ui,
7040 7043 repo,
7041 7044 opts,
7042 7045 (
7043 7046 (source, sbranch, sother, commoninc),
7044 7047 (dest, dbranch, dother, outgoing),
7045 7048 ),
7046 7049 )
7047 7050
7048 7051
7049 7052 @command(
7050 7053 b'tag',
7051 7054 [
7052 7055 (b'f', b'force', None, _(b'force tag')),
7053 7056 (b'l', b'local', None, _(b'make the tag local')),
7054 7057 (b'r', b'rev', b'', _(b'revision to tag'), _(b'REV')),
7055 7058 (b'', b'remove', None, _(b'remove a tag')),
7056 7059 # -l/--local is already there, commitopts cannot be used
7057 7060 (b'e', b'edit', None, _(b'invoke editor on commit messages')),
7058 7061 (b'm', b'message', b'', _(b'use text as commit message'), _(b'TEXT')),
7059 7062 ]
7060 7063 + commitopts2,
7061 7064 _(b'[-f] [-l] [-m TEXT] [-d DATE] [-u USER] [-r REV] NAME...'),
7062 7065 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
7063 7066 )
7064 7067 def tag(ui, repo, name1, *names, **opts):
7065 7068 """add one or more tags for the current or given revision
7066 7069
7067 7070 Name a particular revision using <name>.
7068 7071
7069 7072 Tags are used to name particular revisions of the repository and are
7070 7073 very useful to compare different revisions, to go back to significant
7071 7074 earlier versions or to mark branch points as releases, etc. Changing
7072 7075 an existing tag is normally disallowed; use -f/--force to override.
7073 7076
7074 7077 If no revision is given, the parent of the working directory is
7075 7078 used.
7076 7079
7077 7080 To facilitate version control, distribution, and merging of tags,
7078 7081 they are stored as a file named ".hgtags" which is managed similarly
7079 7082 to other project files and can be hand-edited if necessary. This
7080 7083 also means that tagging creates a new commit. The file
7081 7084 ".hg/localtags" is used for local tags (not shared among
7082 7085 repositories).
7083 7086
7084 7087 Tag commits are usually made at the head of a branch. If the parent
7085 7088 of the working directory is not a branch head, :hg:`tag` aborts; use
7086 7089 -f/--force to force the tag commit to be based on a non-head
7087 7090 changeset.
7088 7091
7089 7092 See :hg:`help dates` for a list of formats valid for -d/--date.
7090 7093
7091 7094 Since tag names have priority over branch names during revision
7092 7095 lookup, using an existing branch name as a tag name is discouraged.
7093 7096
7094 7097 Returns 0 on success.
7095 7098 """
7096 7099 cmdutil.check_incompatible_arguments(opts, 'remove', ['rev'])
7097 7100 opts = pycompat.byteskwargs(opts)
7098 7101 with repo.wlock(), repo.lock():
7099 7102 rev_ = b"."
7100 7103 names = [t.strip() for t in (name1,) + names]
7101 7104 if len(names) != len(set(names)):
7102 7105 raise error.Abort(_(b'tag names must be unique'))
7103 7106 for n in names:
7104 7107 scmutil.checknewlabel(repo, n, b'tag')
7105 7108 if not n:
7106 7109 raise error.Abort(
7107 7110 _(b'tag names cannot consist entirely of whitespace')
7108 7111 )
7109 7112 if opts.get(b'rev'):
7110 7113 rev_ = opts[b'rev']
7111 7114 message = opts.get(b'message')
7112 7115 if opts.get(b'remove'):
7113 7116 if opts.get(b'local'):
7114 7117 expectedtype = b'local'
7115 7118 else:
7116 7119 expectedtype = b'global'
7117 7120
7118 7121 for n in names:
7119 7122 if repo.tagtype(n) == b'global':
7120 7123 alltags = tagsmod.findglobaltags(ui, repo)
7121 7124 if alltags[n][0] == nullid:
7122 7125 raise error.Abort(_(b"tag '%s' is already removed") % n)
7123 7126 if not repo.tagtype(n):
7124 7127 raise error.Abort(_(b"tag '%s' does not exist") % n)
7125 7128 if repo.tagtype(n) != expectedtype:
7126 7129 if expectedtype == b'global':
7127 7130 raise error.Abort(
7128 7131 _(b"tag '%s' is not a global tag") % n
7129 7132 )
7130 7133 else:
7131 7134 raise error.Abort(_(b"tag '%s' is not a local tag") % n)
7132 7135 rev_ = b'null'
7133 7136 if not message:
7134 7137 # we don't translate commit messages
7135 7138 message = b'Removed tag %s' % b', '.join(names)
7136 7139 elif not opts.get(b'force'):
7137 7140 for n in names:
7138 7141 if n in repo.tags():
7139 7142 raise error.Abort(
7140 7143 _(b"tag '%s' already exists (use -f to force)") % n
7141 7144 )
7142 7145 if not opts.get(b'local'):
7143 7146 p1, p2 = repo.dirstate.parents()
7144 7147 if p2 != nullid:
7145 7148 raise error.Abort(_(b'uncommitted merge'))
7146 7149 bheads = repo.branchheads()
7147 7150 if not opts.get(b'force') and bheads and p1 not in bheads:
7148 7151 raise error.Abort(
7149 7152 _(
7150 7153 b'working directory is not at a branch head '
7151 7154 b'(use -f to force)'
7152 7155 )
7153 7156 )
7154 7157 node = scmutil.revsingle(repo, rev_).node()
7155 7158
7156 7159 if not message:
7157 7160 # we don't translate commit messages
7158 7161 message = b'Added tag %s for changeset %s' % (
7159 7162 b', '.join(names),
7160 7163 short(node),
7161 7164 )
7162 7165
7163 7166 date = opts.get(b'date')
7164 7167 if date:
7165 7168 date = dateutil.parsedate(date)
7166 7169
7167 7170 if opts.get(b'remove'):
7168 7171 editform = b'tag.remove'
7169 7172 else:
7170 7173 editform = b'tag.add'
7171 7174 editor = cmdutil.getcommiteditor(
7172 7175 editform=editform, **pycompat.strkwargs(opts)
7173 7176 )
7174 7177
7175 7178 # don't allow tagging the null rev
7176 7179 if (
7177 7180 not opts.get(b'remove')
7178 7181 and scmutil.revsingle(repo, rev_).rev() == nullrev
7179 7182 ):
7180 7183 raise error.Abort(_(b"cannot tag null revision"))
7181 7184
7182 7185 tagsmod.tag(
7183 7186 repo,
7184 7187 names,
7185 7188 node,
7186 7189 message,
7187 7190 opts.get(b'local'),
7188 7191 opts.get(b'user'),
7189 7192 date,
7190 7193 editor=editor,
7191 7194 )
7192 7195
7193 7196
7194 7197 @command(
7195 7198 b'tags',
7196 7199 formatteropts,
7197 7200 b'',
7198 7201 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
7199 7202 intents={INTENT_READONLY},
7200 7203 )
7201 7204 def tags(ui, repo, **opts):
7202 7205 """list repository tags
7203 7206
7204 7207 This lists both regular and local tags. When the -v/--verbose
7205 7208 switch is used, a third column "local" is printed for local tags.
7206 7209 When the -q/--quiet switch is used, only the tag name is printed.
7207 7210
7208 7211 .. container:: verbose
7209 7212
7210 7213 Template:
7211 7214
7212 7215 The following keywords are supported in addition to the common template
7213 7216 keywords and functions such as ``{tag}``. See also
7214 7217 :hg:`help templates`.
7215 7218
7216 7219 :type: String. ``local`` for local tags.
7217 7220
7218 7221 Returns 0 on success.
7219 7222 """
7220 7223
7221 7224 opts = pycompat.byteskwargs(opts)
7222 7225 ui.pager(b'tags')
7223 7226 fm = ui.formatter(b'tags', opts)
7224 7227 hexfunc = fm.hexfunc
7225 7228
7226 7229 for t, n in reversed(repo.tagslist()):
7227 7230 hn = hexfunc(n)
7228 7231 label = b'tags.normal'
7229 7232 tagtype = b''
7230 7233 if repo.tagtype(t) == b'local':
7231 7234 label = b'tags.local'
7232 7235 tagtype = b'local'
7233 7236
7234 7237 fm.startitem()
7235 7238 fm.context(repo=repo)
7236 7239 fm.write(b'tag', b'%s', t, label=label)
7237 7240 fmt = b" " * (30 - encoding.colwidth(t)) + b' %5d:%s'
7238 7241 fm.condwrite(
7239 7242 not ui.quiet,
7240 7243 b'rev node',
7241 7244 fmt,
7242 7245 repo.changelog.rev(n),
7243 7246 hn,
7244 7247 label=label,
7245 7248 )
7246 7249 fm.condwrite(
7247 7250 ui.verbose and tagtype, b'type', b' %s', tagtype, label=label
7248 7251 )
7249 7252 fm.plain(b'\n')
7250 7253 fm.end()
7251 7254
7252 7255
7253 7256 @command(
7254 7257 b'tip',
7255 7258 [
7256 7259 (b'p', b'patch', None, _(b'show patch')),
7257 7260 (b'g', b'git', None, _(b'use git extended diff format')),
7258 7261 ]
7259 7262 + templateopts,
7260 7263 _(b'[-p] [-g]'),
7261 7264 helpcategory=command.CATEGORY_CHANGE_NAVIGATION,
7262 7265 )
7263 7266 def tip(ui, repo, **opts):
7264 7267 """show the tip revision (DEPRECATED)
7265 7268
7266 7269 The tip revision (usually just called the tip) is the changeset
7267 7270 most recently added to the repository (and therefore the most
7268 7271 recently changed head).
7269 7272
7270 7273 If you have just made a commit, that commit will be the tip. If
7271 7274 you have just pulled changes from another repository, the tip of
7272 7275 that repository becomes the current tip. The "tip" tag is special
7273 7276 and cannot be renamed or assigned to a different changeset.
7274 7277
7275 7278 This command is deprecated, please use :hg:`heads` instead.
7276 7279
7277 7280 Returns 0 on success.
7278 7281 """
7279 7282 opts = pycompat.byteskwargs(opts)
7280 7283 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
7281 7284 displayer.show(repo[b'tip'])
7282 7285 displayer.close()
7283 7286
7284 7287
7285 7288 @command(
7286 7289 b'unbundle',
7287 7290 [
7288 7291 (
7289 7292 b'u',
7290 7293 b'update',
7291 7294 None,
7292 7295 _(b'update to new branch head if changesets were unbundled'),
7293 7296 )
7294 7297 ],
7295 7298 _(b'[-u] FILE...'),
7296 7299 helpcategory=command.CATEGORY_IMPORT_EXPORT,
7297 7300 )
7298 7301 def unbundle(ui, repo, fname1, *fnames, **opts):
7299 7302 """apply one or more bundle files
7300 7303
7301 7304 Apply one or more bundle files generated by :hg:`bundle`.
7302 7305
7303 7306 Returns 0 on success, 1 if an update has unresolved files.
7304 7307 """
7305 7308 fnames = (fname1,) + fnames
7306 7309
7307 7310 with repo.lock():
7308 7311 for fname in fnames:
7309 7312 f = hg.openpath(ui, fname)
7310 7313 gen = exchange.readbundle(ui, f, fname)
7311 7314 if isinstance(gen, streamclone.streamcloneapplier):
7312 7315 raise error.Abort(
7313 7316 _(
7314 7317 b'packed bundles cannot be applied with '
7315 7318 b'"hg unbundle"'
7316 7319 ),
7317 7320 hint=_(b'use "hg debugapplystreamclonebundle"'),
7318 7321 )
7319 7322 url = b'bundle:' + fname
7320 7323 try:
7321 7324 txnname = b'unbundle'
7322 7325 if not isinstance(gen, bundle2.unbundle20):
7323 7326 txnname = b'unbundle\n%s' % util.hidepassword(url)
7324 7327 with repo.transaction(txnname) as tr:
7325 7328 op = bundle2.applybundle(
7326 7329 repo, gen, tr, source=b'unbundle', url=url
7327 7330 )
7328 7331 except error.BundleUnknownFeatureError as exc:
7329 7332 raise error.Abort(
7330 7333 _(b'%s: unknown bundle feature, %s') % (fname, exc),
7331 7334 hint=_(
7332 7335 b"see https://mercurial-scm.org/"
7333 7336 b"wiki/BundleFeature for more "
7334 7337 b"information"
7335 7338 ),
7336 7339 )
7337 7340 modheads = bundle2.combinechangegroupresults(op)
7338 7341
7339 7342 return postincoming(ui, repo, modheads, opts.get('update'), None, None)
7340 7343
7341 7344
7342 7345 @command(
7343 7346 b'unshelve',
7344 7347 [
7345 7348 (b'a', b'abort', None, _(b'abort an incomplete unshelve operation')),
7346 7349 (
7347 7350 b'c',
7348 7351 b'continue',
7349 7352 None,
7350 7353 _(b'continue an incomplete unshelve operation'),
7351 7354 ),
7352 7355 (b'i', b'interactive', None, _(b'use interactive mode (EXPERIMENTAL)')),
7353 7356 (b'k', b'keep', None, _(b'keep shelve after unshelving')),
7354 7357 (
7355 7358 b'n',
7356 7359 b'name',
7357 7360 b'',
7358 7361 _(b'restore shelved change with given name'),
7359 7362 _(b'NAME'),
7360 7363 ),
7361 7364 (b't', b'tool', b'', _(b'specify merge tool')),
7362 7365 (
7363 7366 b'',
7364 7367 b'date',
7365 7368 b'',
7366 7369 _(b'set date for temporary commits (DEPRECATED)'),
7367 7370 _(b'DATE'),
7368 7371 ),
7369 7372 ],
7370 7373 _(b'hg unshelve [OPTION]... [[-n] SHELVED]'),
7371 7374 helpcategory=command.CATEGORY_WORKING_DIRECTORY,
7372 7375 )
7373 7376 def unshelve(ui, repo, *shelved, **opts):
7374 7377 """restore a shelved change to the working directory
7375 7378
7376 7379 This command accepts an optional name of a shelved change to
7377 7380 restore. If none is given, the most recent shelved change is used.
7378 7381
7379 7382 If a shelved change is applied successfully, the bundle that
7380 7383 contains the shelved changes is moved to a backup location
7381 7384 (.hg/shelve-backup).
7382 7385
7383 7386 Since you can restore a shelved change on top of an arbitrary
7384 7387 commit, it is possible that unshelving will result in a conflict
7385 7388 between your changes and the commits you are unshelving onto. If
7386 7389 this occurs, you must resolve the conflict, then use
7387 7390 ``--continue`` to complete the unshelve operation. (The bundle
7388 7391 will not be moved until you successfully complete the unshelve.)
7389 7392
7390 7393 (Alternatively, you can use ``--abort`` to abandon an unshelve
7391 7394 that causes a conflict. This reverts the unshelved changes, and
7392 7395 leaves the bundle in place.)
7393 7396
7394 7397 If bare shelved change (without interactive, include and exclude
7395 7398 option) was done on newly created branch it would restore branch
7396 7399 information to the working directory.
7397 7400
7398 7401 After a successful unshelve, the shelved changes are stored in a
7399 7402 backup directory. Only the N most recent backups are kept. N
7400 7403 defaults to 10 but can be overridden using the ``shelve.maxbackups``
7401 7404 configuration option.
7402 7405
7403 7406 .. container:: verbose
7404 7407
7405 7408 Timestamp in seconds is used to decide order of backups. More
7406 7409 than ``maxbackups`` backups are kept, if same timestamp
7407 7410 prevents from deciding exact order of them, for safety.
7408 7411
7409 7412 Selected changes can be unshelved with ``--interactive`` flag.
7410 7413 The working directory is updated with the selected changes, and
7411 7414 only the unselected changes remain shelved.
7412 7415 Note: The whole shelve is applied to working directory first before
7413 7416 running interactively. So, this will bring up all the conflicts between
7414 7417 working directory and the shelve, irrespective of which changes will be
7415 7418 unshelved.
7416 7419 """
7417 7420 with repo.wlock():
7418 7421 return shelvemod.unshelvecmd(ui, repo, *shelved, **opts)
7419 7422
7420 7423
7421 7424 statemod.addunfinished(
7422 7425 b'unshelve',
7423 7426 fname=b'shelvedstate',
7424 7427 continueflag=True,
7425 7428 abortfunc=shelvemod.hgabortunshelve,
7426 7429 continuefunc=shelvemod.hgcontinueunshelve,
7427 7430 cmdmsg=_(b'unshelve already in progress'),
7428 7431 )
7429 7432
7430 7433
7431 7434 @command(
7432 7435 b'update|up|checkout|co',
7433 7436 [
7434 7437 (b'C', b'clean', None, _(b'discard uncommitted changes (no backup)')),
7435 7438 (b'c', b'check', None, _(b'require clean working directory')),
7436 7439 (b'm', b'merge', None, _(b'merge uncommitted changes')),
7437 7440 (b'd', b'date', b'', _(b'tipmost revision matching date'), _(b'DATE')),
7438 7441 (b'r', b'rev', b'', _(b'revision'), _(b'REV')),
7439 7442 ]
7440 7443 + mergetoolopts,
7441 7444 _(b'[-C|-c|-m] [-d DATE] [[-r] REV]'),
7442 7445 helpcategory=command.CATEGORY_WORKING_DIRECTORY,
7443 7446 helpbasic=True,
7444 7447 )
7445 7448 def update(ui, repo, node=None, **opts):
7446 7449 """update working directory (or switch revisions)
7447 7450
7448 7451 Update the repository's working directory to the specified
7449 7452 changeset. If no changeset is specified, update to the tip of the
7450 7453 current named branch and move the active bookmark (see :hg:`help
7451 7454 bookmarks`).
7452 7455
7453 7456 Update sets the working directory's parent revision to the specified
7454 7457 changeset (see :hg:`help parents`).
7455 7458
7456 7459 If the changeset is not a descendant or ancestor of the working
7457 7460 directory's parent and there are uncommitted changes, the update is
7458 7461 aborted. With the -c/--check option, the working directory is checked
7459 7462 for uncommitted changes; if none are found, the working directory is
7460 7463 updated to the specified changeset.
7461 7464
7462 7465 .. container:: verbose
7463 7466
7464 7467 The -C/--clean, -c/--check, and -m/--merge options control what
7465 7468 happens if the working directory contains uncommitted changes.
7466 7469 At most of one of them can be specified.
7467 7470
7468 7471 1. If no option is specified, and if
7469 7472 the requested changeset is an ancestor or descendant of
7470 7473 the working directory's parent, the uncommitted changes
7471 7474 are merged into the requested changeset and the merged
7472 7475 result is left uncommitted. If the requested changeset is
7473 7476 not an ancestor or descendant (that is, it is on another
7474 7477 branch), the update is aborted and the uncommitted changes
7475 7478 are preserved.
7476 7479
7477 7480 2. With the -m/--merge option, the update is allowed even if the
7478 7481 requested changeset is not an ancestor or descendant of
7479 7482 the working directory's parent.
7480 7483
7481 7484 3. With the -c/--check option, the update is aborted and the
7482 7485 uncommitted changes are preserved.
7483 7486
7484 7487 4. With the -C/--clean option, uncommitted changes are discarded and
7485 7488 the working directory is updated to the requested changeset.
7486 7489
7487 7490 To cancel an uncommitted merge (and lose your changes), use
7488 7491 :hg:`merge --abort`.
7489 7492
7490 7493 Use null as the changeset to remove the working directory (like
7491 7494 :hg:`clone -U`).
7492 7495
7493 7496 If you want to revert just one file to an older revision, use
7494 7497 :hg:`revert [-r REV] NAME`.
7495 7498
7496 7499 See :hg:`help dates` for a list of formats valid for -d/--date.
7497 7500
7498 7501 Returns 0 on success, 1 if there are unresolved files.
7499 7502 """
7500 7503 cmdutil.check_at_most_one_arg(opts, 'clean', 'check', 'merge')
7501 7504 rev = opts.get('rev')
7502 7505 date = opts.get('date')
7503 7506 clean = opts.get('clean')
7504 7507 check = opts.get('check')
7505 7508 merge = opts.get('merge')
7506 7509 if rev and node:
7507 7510 raise error.Abort(_(b"please specify just one revision"))
7508 7511
7509 7512 if ui.configbool(b'commands', b'update.requiredest'):
7510 7513 if not node and not rev and not date:
7511 7514 raise error.Abort(
7512 7515 _(b'you must specify a destination'),
7513 7516 hint=_(b'for example: hg update ".::"'),
7514 7517 )
7515 7518
7516 7519 if rev is None or rev == b'':
7517 7520 rev = node
7518 7521
7519 7522 if date and rev is not None:
7520 7523 raise error.Abort(_(b"you can't specify a revision and a date"))
7521 7524
7522 7525 updatecheck = None
7523 7526 if check:
7524 7527 updatecheck = b'abort'
7525 7528 elif merge:
7526 7529 updatecheck = b'none'
7527 7530
7528 7531 with repo.wlock():
7529 7532 cmdutil.clearunfinished(repo)
7530 7533 if date:
7531 7534 rev = cmdutil.finddate(ui, repo, date)
7532 7535
7533 7536 # if we defined a bookmark, we have to remember the original name
7534 7537 brev = rev
7535 7538 if rev:
7536 7539 repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
7537 7540 ctx = scmutil.revsingle(repo, rev, default=None)
7538 7541 rev = ctx.rev()
7539 7542 hidden = ctx.hidden()
7540 7543 overrides = {(b'ui', b'forcemerge'): opts.get('tool', b'')}
7541 7544 with ui.configoverride(overrides, b'update'):
7542 7545 ret = hg.updatetotally(
7543 7546 ui, repo, rev, brev, clean=clean, updatecheck=updatecheck
7544 7547 )
7545 7548 if hidden:
7546 7549 ctxstr = ctx.hex()[:12]
7547 7550 ui.warn(_(b"updated to hidden changeset %s\n") % ctxstr)
7548 7551
7549 7552 if ctx.obsolete():
7550 7553 obsfatemsg = obsutil._getfilteredreason(repo, ctxstr, ctx)
7551 7554 ui.warn(b"(%s)\n" % obsfatemsg)
7552 7555 return ret
7553 7556
7554 7557
7555 7558 @command(
7556 7559 b'verify',
7557 7560 [(b'', b'full', False, b'perform more checks (EXPERIMENTAL)')],
7558 7561 helpcategory=command.CATEGORY_MAINTENANCE,
7559 7562 )
7560 7563 def verify(ui, repo, **opts):
7561 7564 """verify the integrity of the repository
7562 7565
7563 7566 Verify the integrity of the current repository.
7564 7567
7565 7568 This will perform an extensive check of the repository's
7566 7569 integrity, validating the hashes and checksums of each entry in
7567 7570 the changelog, manifest, and tracked files, as well as the
7568 7571 integrity of their crosslinks and indices.
7569 7572
7570 7573 Please see https://mercurial-scm.org/wiki/RepositoryCorruption
7571 7574 for more information about recovery from corruption of the
7572 7575 repository.
7573 7576
7574 7577 Returns 0 on success, 1 if errors are encountered.
7575 7578 """
7576 7579 opts = pycompat.byteskwargs(opts)
7577 7580
7578 7581 level = None
7579 7582 if opts[b'full']:
7580 7583 level = verifymod.VERIFY_FULL
7581 7584 return hg.verify(repo, level)
7582 7585
7583 7586
7584 7587 @command(
7585 7588 b'version',
7586 7589 [] + formatteropts,
7587 7590 helpcategory=command.CATEGORY_HELP,
7588 7591 norepo=True,
7589 7592 intents={INTENT_READONLY},
7590 7593 )
7591 7594 def version_(ui, **opts):
7592 7595 """output version and copyright information
7593 7596
7594 7597 .. container:: verbose
7595 7598
7596 7599 Template:
7597 7600
7598 7601 The following keywords are supported. See also :hg:`help templates`.
7599 7602
7600 7603 :extensions: List of extensions.
7601 7604 :ver: String. Version number.
7602 7605
7603 7606 And each entry of ``{extensions}`` provides the following sub-keywords
7604 7607 in addition to ``{ver}``.
7605 7608
7606 7609 :bundled: Boolean. True if included in the release.
7607 7610 :name: String. Extension name.
7608 7611 """
7609 7612 opts = pycompat.byteskwargs(opts)
7610 7613 if ui.verbose:
7611 7614 ui.pager(b'version')
7612 7615 fm = ui.formatter(b"version", opts)
7613 7616 fm.startitem()
7614 7617 fm.write(
7615 7618 b"ver", _(b"Mercurial Distributed SCM (version %s)\n"), util.version()
7616 7619 )
7617 7620 license = _(
7618 7621 b"(see https://mercurial-scm.org for more information)\n"
7619 7622 b"\nCopyright (C) 2005-2020 Matt Mackall and others\n"
7620 7623 b"This is free software; see the source for copying conditions. "
7621 7624 b"There is NO\nwarranty; "
7622 7625 b"not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n"
7623 7626 )
7624 7627 if not ui.quiet:
7625 7628 fm.plain(license)
7626 7629
7627 7630 if ui.verbose:
7628 7631 fm.plain(_(b"\nEnabled extensions:\n\n"))
7629 7632 # format names and versions into columns
7630 7633 names = []
7631 7634 vers = []
7632 7635 isinternals = []
7633 7636 for name, module in sorted(extensions.extensions()):
7634 7637 names.append(name)
7635 7638 vers.append(extensions.moduleversion(module) or None)
7636 7639 isinternals.append(extensions.ismoduleinternal(module))
7637 7640 fn = fm.nested(b"extensions", tmpl=b'{name}\n')
7638 7641 if names:
7639 7642 namefmt = b" %%-%ds " % max(len(n) for n in names)
7640 7643 places = [_(b"external"), _(b"internal")]
7641 7644 for n, v, p in zip(names, vers, isinternals):
7642 7645 fn.startitem()
7643 7646 fn.condwrite(ui.verbose, b"name", namefmt, n)
7644 7647 if ui.verbose:
7645 7648 fn.plain(b"%s " % places[p])
7646 7649 fn.data(bundled=p)
7647 7650 fn.condwrite(ui.verbose and v, b"ver", b"%s", v)
7648 7651 if ui.verbose:
7649 7652 fn.plain(b"\n")
7650 7653 fn.end()
7651 7654 fm.end()
7652 7655
7653 7656
7654 7657 def loadcmdtable(ui, name, cmdtable):
7655 7658 """Load command functions from specified cmdtable
7656 7659 """
7657 7660 overrides = [cmd for cmd in cmdtable if cmd in table]
7658 7661 if overrides:
7659 7662 ui.warn(
7660 7663 _(b"extension '%s' overrides commands: %s\n")
7661 7664 % (name, b" ".join(overrides))
7662 7665 )
7663 7666 table.update(cmdtable)
@@ -1,3157 +1,2752 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import weakref
12 12
13 13 from .i18n import _
14 14 from .node import (
15 15 hex,
16 16 nullid,
17 17 nullrev,
18 18 )
19 from .thirdparty import attr
20 19 from . import (
21 20 bookmarks as bookmod,
22 21 bundle2,
22 bundlecaches,
23 23 changegroup,
24 24 discovery,
25 25 error,
26 26 exchangev2,
27 27 lock as lockmod,
28 28 logexchange,
29 29 narrowspec,
30 30 obsolete,
31 31 obsutil,
32 32 phases,
33 33 pushkey,
34 34 pycompat,
35 35 requirements,
36 36 scmutil,
37 sslutil,
38 37 streamclone,
39 38 url as urlmod,
40 39 util,
41 40 wireprototypes,
42 41 )
43 42 from .utils import (
44 43 hashutil,
45 44 stringutil,
46 45 )
47 46
48 47 urlerr = util.urlerr
49 48 urlreq = util.urlreq
50 49
51 50 _NARROWACL_SECTION = b'narrowacl'
52 51
53 # Maps bundle version human names to changegroup versions.
54 _bundlespeccgversions = {
55 b'v1': b'01',
56 b'v2': b'02',
57 b'packed1': b's1',
58 b'bundle2': b'02', # legacy
59 }
60
61 # Maps bundle version with content opts to choose which part to bundle
62 _bundlespeccontentopts = {
63 b'v1': {
64 b'changegroup': True,
65 b'cg.version': b'01',
66 b'obsolescence': False,
67 b'phases': False,
68 b'tagsfnodescache': False,
69 b'revbranchcache': False,
70 },
71 b'v2': {
72 b'changegroup': True,
73 b'cg.version': b'02',
74 b'obsolescence': False,
75 b'phases': False,
76 b'tagsfnodescache': True,
77 b'revbranchcache': True,
78 },
79 b'packed1': {b'cg.version': b's1'},
80 }
81 _bundlespeccontentopts[b'bundle2'] = _bundlespeccontentopts[b'v2']
82
83 _bundlespecvariants = {
84 b"streamv2": {
85 b"changegroup": False,
86 b"streamv2": True,
87 b"tagsfnodescache": False,
88 b"revbranchcache": False,
89 }
90 }
91
92 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
93 _bundlespecv1compengines = {b'gzip', b'bzip2', b'none'}
94
95
96 @attr.s
97 class bundlespec(object):
98 compression = attr.ib()
99 wirecompression = attr.ib()
100 version = attr.ib()
101 wireversion = attr.ib()
102 params = attr.ib()
103 contentopts = attr.ib()
104
105
106 def parsebundlespec(repo, spec, strict=True):
107 """Parse a bundle string specification into parts.
108
109 Bundle specifications denote a well-defined bundle/exchange format.
110 The content of a given specification should not change over time in
111 order to ensure that bundles produced by a newer version of Mercurial are
112 readable from an older version.
113
114 The string currently has the form:
115
116 <compression>-<type>[;<parameter0>[;<parameter1>]]
117
118 Where <compression> is one of the supported compression formats
119 and <type> is (currently) a version string. A ";" can follow the type and
120 all text afterwards is interpreted as URI encoded, ";" delimited key=value
121 pairs.
122
123 If ``strict`` is True (the default) <compression> is required. Otherwise,
124 it is optional.
125
126 Returns a bundlespec object of (compression, version, parameters).
127 Compression will be ``None`` if not in strict mode and a compression isn't
128 defined.
129
130 An ``InvalidBundleSpecification`` is raised when the specification is
131 not syntactically well formed.
132
133 An ``UnsupportedBundleSpecification`` is raised when the compression or
134 bundle type/version is not recognized.
135
136 Note: this function will likely eventually return a more complex data
137 structure, including bundle2 part information.
138 """
139
140 def parseparams(s):
141 if b';' not in s:
142 return s, {}
143
144 params = {}
145 version, paramstr = s.split(b';', 1)
146
147 for p in paramstr.split(b';'):
148 if b'=' not in p:
149 raise error.InvalidBundleSpecification(
150 _(
151 b'invalid bundle specification: '
152 b'missing "=" in parameter: %s'
153 )
154 % p
155 )
156
157 key, value = p.split(b'=', 1)
158 key = urlreq.unquote(key)
159 value = urlreq.unquote(value)
160 params[key] = value
161
162 return version, params
163
164 if strict and b'-' not in spec:
165 raise error.InvalidBundleSpecification(
166 _(
167 b'invalid bundle specification; '
168 b'must be prefixed with compression: %s'
169 )
170 % spec
171 )
172
173 if b'-' in spec:
174 compression, version = spec.split(b'-', 1)
175
176 if compression not in util.compengines.supportedbundlenames:
177 raise error.UnsupportedBundleSpecification(
178 _(b'%s compression is not supported') % compression
179 )
180
181 version, params = parseparams(version)
182
183 if version not in _bundlespeccgversions:
184 raise error.UnsupportedBundleSpecification(
185 _(b'%s is not a recognized bundle version') % version
186 )
187 else:
188 # Value could be just the compression or just the version, in which
189 # case some defaults are assumed (but only when not in strict mode).
190 assert not strict
191
192 spec, params = parseparams(spec)
193
194 if spec in util.compengines.supportedbundlenames:
195 compression = spec
196 version = b'v1'
197 # Generaldelta repos require v2.
198 if b'generaldelta' in repo.requirements:
199 version = b'v2'
200 # Modern compression engines require v2.
201 if compression not in _bundlespecv1compengines:
202 version = b'v2'
203 elif spec in _bundlespeccgversions:
204 if spec == b'packed1':
205 compression = b'none'
206 else:
207 compression = b'bzip2'
208 version = spec
209 else:
210 raise error.UnsupportedBundleSpecification(
211 _(b'%s is not a recognized bundle specification') % spec
212 )
213
214 # Bundle version 1 only supports a known set of compression engines.
215 if version == b'v1' and compression not in _bundlespecv1compengines:
216 raise error.UnsupportedBundleSpecification(
217 _(b'compression engine %s is not supported on v1 bundles')
218 % compression
219 )
220
221 # The specification for packed1 can optionally declare the data formats
222 # required to apply it. If we see this metadata, compare against what the
223 # repo supports and error if the bundle isn't compatible.
224 if version == b'packed1' and b'requirements' in params:
225 requirements = set(params[b'requirements'].split(b','))
226 missingreqs = requirements - repo.supportedformats
227 if missingreqs:
228 raise error.UnsupportedBundleSpecification(
229 _(b'missing support for repository features: %s')
230 % b', '.join(sorted(missingreqs))
231 )
232
233 # Compute contentopts based on the version
234 contentopts = _bundlespeccontentopts.get(version, {}).copy()
235
236 # Process the variants
237 if b"stream" in params and params[b"stream"] == b"v2":
238 variant = _bundlespecvariants[b"streamv2"]
239 contentopts.update(variant)
240
241 engine = util.compengines.forbundlename(compression)
242 compression, wirecompression = engine.bundletype()
243 wireversion = _bundlespeccgversions[version]
244
245 return bundlespec(
246 compression, wirecompression, version, wireversion, params, contentopts
247 )
248
249 52
250 53 def readbundle(ui, fh, fname, vfs=None):
251 54 header = changegroup.readexactly(fh, 4)
252 55
253 56 alg = None
254 57 if not fname:
255 58 fname = b"stream"
256 59 if not header.startswith(b'HG') and header.startswith(b'\0'):
257 60 fh = changegroup.headerlessfixup(fh, header)
258 61 header = b"HG10"
259 62 alg = b'UN'
260 63 elif vfs:
261 64 fname = vfs.join(fname)
262 65
263 66 magic, version = header[0:2], header[2:4]
264 67
265 68 if magic != b'HG':
266 69 raise error.Abort(_(b'%s: not a Mercurial bundle') % fname)
267 70 if version == b'10':
268 71 if alg is None:
269 72 alg = changegroup.readexactly(fh, 2)
270 73 return changegroup.cg1unpacker(fh, alg)
271 74 elif version.startswith(b'2'):
272 75 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
273 76 elif version == b'S1':
274 77 return streamclone.streamcloneapplier(fh)
275 78 else:
276 79 raise error.Abort(
277 80 _(b'%s: unknown bundle version %s') % (fname, version)
278 81 )
279 82
280 83
281 84 def getbundlespec(ui, fh):
282 85 """Infer the bundlespec from a bundle file handle.
283 86
284 87 The input file handle is seeked and the original seek position is not
285 88 restored.
286 89 """
287 90
288 91 def speccompression(alg):
289 92 try:
290 93 return util.compengines.forbundletype(alg).bundletype()[0]
291 94 except KeyError:
292 95 return None
293 96
294 97 b = readbundle(ui, fh, None)
295 98 if isinstance(b, changegroup.cg1unpacker):
296 99 alg = b._type
297 100 if alg == b'_truncatedBZ':
298 101 alg = b'BZ'
299 102 comp = speccompression(alg)
300 103 if not comp:
301 104 raise error.Abort(_(b'unknown compression algorithm: %s') % alg)
302 105 return b'%s-v1' % comp
303 106 elif isinstance(b, bundle2.unbundle20):
304 107 if b'Compression' in b.params:
305 108 comp = speccompression(b.params[b'Compression'])
306 109 if not comp:
307 110 raise error.Abort(
308 111 _(b'unknown compression algorithm: %s') % comp
309 112 )
310 113 else:
311 114 comp = b'none'
312 115
313 116 version = None
314 117 for part in b.iterparts():
315 118 if part.type == b'changegroup':
316 119 version = part.params[b'version']
317 120 if version in (b'01', b'02'):
318 121 version = b'v2'
319 122 else:
320 123 raise error.Abort(
321 124 _(
322 125 b'changegroup version %s does not have '
323 126 b'a known bundlespec'
324 127 )
325 128 % version,
326 129 hint=_(b'try upgrading your Mercurial client'),
327 130 )
328 131 elif part.type == b'stream2' and version is None:
329 132 # A stream2 part requires to be part of a v2 bundle
330 133 requirements = urlreq.unquote(part.params[b'requirements'])
331 134 splitted = requirements.split()
332 135 params = bundle2._formatrequirementsparams(splitted)
333 136 return b'none-v2;stream=v2;%s' % params
334 137
335 138 if not version:
336 139 raise error.Abort(
337 140 _(b'could not identify changegroup version in bundle')
338 141 )
339 142
340 143 return b'%s-%s' % (comp, version)
341 144 elif isinstance(b, streamclone.streamcloneapplier):
342 145 requirements = streamclone.readbundle1header(fh)[2]
343 146 formatted = bundle2._formatrequirementsparams(requirements)
344 147 return b'none-packed1;%s' % formatted
345 148 else:
346 149 raise error.Abort(_(b'unknown bundle type: %s') % b)
347 150
348 151
349 152 def _computeoutgoing(repo, heads, common):
350 153 """Computes which revs are outgoing given a set of common
351 154 and a set of heads.
352 155
353 156 This is a separate function so extensions can have access to
354 157 the logic.
355 158
356 159 Returns a discovery.outgoing object.
357 160 """
358 161 cl = repo.changelog
359 162 if common:
360 163 hasnode = cl.hasnode
361 164 common = [n for n in common if hasnode(n)]
362 165 else:
363 166 common = [nullid]
364 167 if not heads:
365 168 heads = cl.heads()
366 169 return discovery.outgoing(repo, common, heads)
367 170
368 171
369 172 def _checkpublish(pushop):
370 173 repo = pushop.repo
371 174 ui = repo.ui
372 175 behavior = ui.config(b'experimental', b'auto-publish')
373 176 if pushop.publish or behavior not in (b'warn', b'confirm', b'abort'):
374 177 return
375 178 remotephases = listkeys(pushop.remote, b'phases')
376 179 if not remotephases.get(b'publishing', False):
377 180 return
378 181
379 182 if pushop.revs is None:
380 183 published = repo.filtered(b'served').revs(b'not public()')
381 184 else:
382 185 published = repo.revs(b'::%ln - public()', pushop.revs)
383 186 if published:
384 187 if behavior == b'warn':
385 188 ui.warn(
386 189 _(b'%i changesets about to be published\n') % len(published)
387 190 )
388 191 elif behavior == b'confirm':
389 192 if ui.promptchoice(
390 193 _(b'push and publish %i changesets (yn)?$$ &Yes $$ &No')
391 194 % len(published)
392 195 ):
393 196 raise error.Abort(_(b'user quit'))
394 197 elif behavior == b'abort':
395 198 msg = _(b'push would publish %i changesets') % len(published)
396 199 hint = _(
397 200 b"use --publish or adjust 'experimental.auto-publish'"
398 201 b" config"
399 202 )
400 203 raise error.Abort(msg, hint=hint)
401 204
402 205
403 206 def _forcebundle1(op):
404 207 """return true if a pull/push must use bundle1
405 208
406 209 This function is used to allow testing of the older bundle version"""
407 210 ui = op.repo.ui
408 211 # The goal is this config is to allow developer to choose the bundle
409 212 # version used during exchanged. This is especially handy during test.
410 213 # Value is a list of bundle version to be picked from, highest version
411 214 # should be used.
412 215 #
413 216 # developer config: devel.legacy.exchange
414 217 exchange = ui.configlist(b'devel', b'legacy.exchange')
415 218 forcebundle1 = b'bundle2' not in exchange and b'bundle1' in exchange
416 219 return forcebundle1 or not op.remote.capable(b'bundle2')
417 220
418 221
419 222 class pushoperation(object):
420 223 """A object that represent a single push operation
421 224
422 225 Its purpose is to carry push related state and very common operations.
423 226
424 227 A new pushoperation should be created at the beginning of each push and
425 228 discarded afterward.
426 229 """
427 230
428 231 def __init__(
429 232 self,
430 233 repo,
431 234 remote,
432 235 force=False,
433 236 revs=None,
434 237 newbranch=False,
435 238 bookmarks=(),
436 239 publish=False,
437 240 pushvars=None,
438 241 ):
439 242 # repo we push from
440 243 self.repo = repo
441 244 self.ui = repo.ui
442 245 # repo we push to
443 246 self.remote = remote
444 247 # force option provided
445 248 self.force = force
446 249 # revs to be pushed (None is "all")
447 250 self.revs = revs
448 251 # bookmark explicitly pushed
449 252 self.bookmarks = bookmarks
450 253 # allow push of new branch
451 254 self.newbranch = newbranch
452 255 # step already performed
453 256 # (used to check what steps have been already performed through bundle2)
454 257 self.stepsdone = set()
455 258 # Integer version of the changegroup push result
456 259 # - None means nothing to push
457 260 # - 0 means HTTP error
458 261 # - 1 means we pushed and remote head count is unchanged *or*
459 262 # we have outgoing changesets but refused to push
460 263 # - other values as described by addchangegroup()
461 264 self.cgresult = None
462 265 # Boolean value for the bookmark push
463 266 self.bkresult = None
464 267 # discover.outgoing object (contains common and outgoing data)
465 268 self.outgoing = None
466 269 # all remote topological heads before the push
467 270 self.remoteheads = None
468 271 # Details of the remote branch pre and post push
469 272 #
470 273 # mapping: {'branch': ([remoteheads],
471 274 # [newheads],
472 275 # [unsyncedheads],
473 276 # [discardedheads])}
474 277 # - branch: the branch name
475 278 # - remoteheads: the list of remote heads known locally
476 279 # None if the branch is new
477 280 # - newheads: the new remote heads (known locally) with outgoing pushed
478 281 # - unsyncedheads: the list of remote heads unknown locally.
479 282 # - discardedheads: the list of remote heads made obsolete by the push
480 283 self.pushbranchmap = None
481 284 # testable as a boolean indicating if any nodes are missing locally.
482 285 self.incoming = None
483 286 # summary of the remote phase situation
484 287 self.remotephases = None
485 288 # phases changes that must be pushed along side the changesets
486 289 self.outdatedphases = None
487 290 # phases changes that must be pushed if changeset push fails
488 291 self.fallbackoutdatedphases = None
489 292 # outgoing obsmarkers
490 293 self.outobsmarkers = set()
491 294 # outgoing bookmarks, list of (bm, oldnode | '', newnode | '')
492 295 self.outbookmarks = []
493 296 # transaction manager
494 297 self.trmanager = None
495 298 # map { pushkey partid -> callback handling failure}
496 299 # used to handle exception from mandatory pushkey part failure
497 300 self.pkfailcb = {}
498 301 # an iterable of pushvars or None
499 302 self.pushvars = pushvars
500 303 # publish pushed changesets
501 304 self.publish = publish
502 305
503 306 @util.propertycache
504 307 def futureheads(self):
505 308 """future remote heads if the changeset push succeeds"""
506 309 return self.outgoing.ancestorsof
507 310
508 311 @util.propertycache
509 312 def fallbackheads(self):
510 313 """future remote heads if the changeset push fails"""
511 314 if self.revs is None:
512 315 # not target to push, all common are relevant
513 316 return self.outgoing.commonheads
514 317 unfi = self.repo.unfiltered()
515 318 # I want cheads = heads(::ancestorsof and ::commonheads)
516 319 # (ancestorsof is revs with secret changeset filtered out)
517 320 #
518 321 # This can be expressed as:
519 322 # cheads = ( (ancestorsof and ::commonheads)
520 323 # + (commonheads and ::ancestorsof))"
521 324 # )
522 325 #
523 326 # while trying to push we already computed the following:
524 327 # common = (::commonheads)
525 328 # missing = ((commonheads::ancestorsof) - commonheads)
526 329 #
527 330 # We can pick:
528 331 # * ancestorsof part of common (::commonheads)
529 332 common = self.outgoing.common
530 333 rev = self.repo.changelog.index.rev
531 334 cheads = [node for node in self.revs if rev(node) in common]
532 335 # and
533 336 # * commonheads parents on missing
534 337 revset = unfi.set(
535 338 b'%ln and parents(roots(%ln))',
536 339 self.outgoing.commonheads,
537 340 self.outgoing.missing,
538 341 )
539 342 cheads.extend(c.node() for c in revset)
540 343 return cheads
541 344
542 345 @property
543 346 def commonheads(self):
544 347 """set of all common heads after changeset bundle push"""
545 348 if self.cgresult:
546 349 return self.futureheads
547 350 else:
548 351 return self.fallbackheads
549 352
550 353
551 354 # mapping of message used when pushing bookmark
552 355 bookmsgmap = {
553 356 b'update': (
554 357 _(b"updating bookmark %s\n"),
555 358 _(b'updating bookmark %s failed!\n'),
556 359 ),
557 360 b'export': (
558 361 _(b"exporting bookmark %s\n"),
559 362 _(b'exporting bookmark %s failed!\n'),
560 363 ),
561 364 b'delete': (
562 365 _(b"deleting remote bookmark %s\n"),
563 366 _(b'deleting remote bookmark %s failed!\n'),
564 367 ),
565 368 }
566 369
567 370
568 371 def push(
569 372 repo,
570 373 remote,
571 374 force=False,
572 375 revs=None,
573 376 newbranch=False,
574 377 bookmarks=(),
575 378 publish=False,
576 379 opargs=None,
577 380 ):
578 381 '''Push outgoing changesets (limited by revs) from a local
579 382 repository to remote. Return an integer:
580 383 - None means nothing to push
581 384 - 0 means HTTP error
582 385 - 1 means we pushed and remote head count is unchanged *or*
583 386 we have outgoing changesets but refused to push
584 387 - other values as described by addchangegroup()
585 388 '''
586 389 if opargs is None:
587 390 opargs = {}
588 391 pushop = pushoperation(
589 392 repo,
590 393 remote,
591 394 force,
592 395 revs,
593 396 newbranch,
594 397 bookmarks,
595 398 publish,
596 399 **pycompat.strkwargs(opargs)
597 400 )
598 401 if pushop.remote.local():
599 402 missing = (
600 403 set(pushop.repo.requirements) - pushop.remote.local().supported
601 404 )
602 405 if missing:
603 406 msg = _(
604 407 b"required features are not"
605 408 b" supported in the destination:"
606 409 b" %s"
607 410 ) % (b', '.join(sorted(missing)))
608 411 raise error.Abort(msg)
609 412
610 413 if not pushop.remote.canpush():
611 414 raise error.Abort(_(b"destination does not support push"))
612 415
613 416 if not pushop.remote.capable(b'unbundle'):
614 417 raise error.Abort(
615 418 _(
616 419 b'cannot push: destination does not support the '
617 420 b'unbundle wire protocol command'
618 421 )
619 422 )
620 423
621 424 # get lock as we might write phase data
622 425 wlock = lock = None
623 426 try:
624 427 # bundle2 push may receive a reply bundle touching bookmarks
625 428 # requiring the wlock. Take it now to ensure proper ordering.
626 429 maypushback = pushop.ui.configbool(b'experimental', b'bundle2.pushback')
627 430 if (
628 431 (not _forcebundle1(pushop))
629 432 and maypushback
630 433 and not bookmod.bookmarksinstore(repo)
631 434 ):
632 435 wlock = pushop.repo.wlock()
633 436 lock = pushop.repo.lock()
634 437 pushop.trmanager = transactionmanager(
635 438 pushop.repo, b'push-response', pushop.remote.url()
636 439 )
637 440 except error.LockUnavailable as err:
638 441 # source repo cannot be locked.
639 442 # We do not abort the push, but just disable the local phase
640 443 # synchronisation.
641 444 msg = b'cannot lock source repository: %s\n' % stringutil.forcebytestr(
642 445 err
643 446 )
644 447 pushop.ui.debug(msg)
645 448
646 449 with wlock or util.nullcontextmanager():
647 450 with lock or util.nullcontextmanager():
648 451 with pushop.trmanager or util.nullcontextmanager():
649 452 pushop.repo.checkpush(pushop)
650 453 _checkpublish(pushop)
651 454 _pushdiscovery(pushop)
652 455 if not pushop.force:
653 456 _checksubrepostate(pushop)
654 457 if not _forcebundle1(pushop):
655 458 _pushbundle2(pushop)
656 459 _pushchangeset(pushop)
657 460 _pushsyncphase(pushop)
658 461 _pushobsolete(pushop)
659 462 _pushbookmark(pushop)
660 463
661 464 if repo.ui.configbool(b'experimental', b'remotenames'):
662 465 logexchange.pullremotenames(repo, remote)
663 466
664 467 return pushop
665 468
666 469
667 470 # list of steps to perform discovery before push
668 471 pushdiscoveryorder = []
669 472
670 473 # Mapping between step name and function
671 474 #
672 475 # This exists to help extensions wrap steps if necessary
673 476 pushdiscoverymapping = {}
674 477
675 478
676 479 def pushdiscovery(stepname):
677 480 """decorator for function performing discovery before push
678 481
679 482 The function is added to the step -> function mapping and appended to the
680 483 list of steps. Beware that decorated function will be added in order (this
681 484 may matter).
682 485
683 486 You can only use this decorator for a new step, if you want to wrap a step
684 487 from an extension, change the pushdiscovery dictionary directly."""
685 488
686 489 def dec(func):
687 490 assert stepname not in pushdiscoverymapping
688 491 pushdiscoverymapping[stepname] = func
689 492 pushdiscoveryorder.append(stepname)
690 493 return func
691 494
692 495 return dec
693 496
694 497
695 498 def _pushdiscovery(pushop):
696 499 """Run all discovery steps"""
697 500 for stepname in pushdiscoveryorder:
698 501 step = pushdiscoverymapping[stepname]
699 502 step(pushop)
700 503
701 504
702 505 def _checksubrepostate(pushop):
703 506 """Ensure all outgoing referenced subrepo revisions are present locally"""
704 507 for n in pushop.outgoing.missing:
705 508 ctx = pushop.repo[n]
706 509
707 510 if b'.hgsub' in ctx.manifest() and b'.hgsubstate' in ctx.files():
708 511 for subpath in sorted(ctx.substate):
709 512 sub = ctx.sub(subpath)
710 513 sub.verify(onpush=True)
711 514
712 515
713 516 @pushdiscovery(b'changeset')
714 517 def _pushdiscoverychangeset(pushop):
715 518 """discover the changeset that need to be pushed"""
716 519 fci = discovery.findcommonincoming
717 520 if pushop.revs:
718 521 commoninc = fci(
719 522 pushop.repo,
720 523 pushop.remote,
721 524 force=pushop.force,
722 525 ancestorsof=pushop.revs,
723 526 )
724 527 else:
725 528 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
726 529 common, inc, remoteheads = commoninc
727 530 fco = discovery.findcommonoutgoing
728 531 outgoing = fco(
729 532 pushop.repo,
730 533 pushop.remote,
731 534 onlyheads=pushop.revs,
732 535 commoninc=commoninc,
733 536 force=pushop.force,
734 537 )
735 538 pushop.outgoing = outgoing
736 539 pushop.remoteheads = remoteheads
737 540 pushop.incoming = inc
738 541
739 542
740 543 @pushdiscovery(b'phase')
741 544 def _pushdiscoveryphase(pushop):
742 545 """discover the phase that needs to be pushed
743 546
744 547 (computed for both success and failure case for changesets push)"""
745 548 outgoing = pushop.outgoing
746 549 unfi = pushop.repo.unfiltered()
747 550 remotephases = listkeys(pushop.remote, b'phases')
748 551
749 552 if (
750 553 pushop.ui.configbool(b'ui', b'_usedassubrepo')
751 554 and remotephases # server supports phases
752 555 and not pushop.outgoing.missing # no changesets to be pushed
753 556 and remotephases.get(b'publishing', False)
754 557 ):
755 558 # When:
756 559 # - this is a subrepo push
757 560 # - and remote support phase
758 561 # - and no changeset are to be pushed
759 562 # - and remote is publishing
760 563 # We may be in issue 3781 case!
761 564 # We drop the possible phase synchronisation done by
762 565 # courtesy to publish changesets possibly locally draft
763 566 # on the remote.
764 567 pushop.outdatedphases = []
765 568 pushop.fallbackoutdatedphases = []
766 569 return
767 570
768 571 pushop.remotephases = phases.remotephasessummary(
769 572 pushop.repo, pushop.fallbackheads, remotephases
770 573 )
771 574 droots = pushop.remotephases.draftroots
772 575
773 576 extracond = b''
774 577 if not pushop.remotephases.publishing:
775 578 extracond = b' and public()'
776 579 revset = b'heads((%%ln::%%ln) %s)' % extracond
777 580 # Get the list of all revs draft on remote by public here.
778 581 # XXX Beware that revset break if droots is not strictly
779 582 # XXX root we may want to ensure it is but it is costly
780 583 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
781 584 if not pushop.remotephases.publishing and pushop.publish:
782 585 future = list(
783 586 unfi.set(
784 587 b'%ln and (not public() or %ln::)', pushop.futureheads, droots
785 588 )
786 589 )
787 590 elif not outgoing.missing:
788 591 future = fallback
789 592 else:
790 593 # adds changeset we are going to push as draft
791 594 #
792 595 # should not be necessary for publishing server, but because of an
793 596 # issue fixed in xxxxx we have to do it anyway.
794 597 fdroots = list(
795 598 unfi.set(b'roots(%ln + %ln::)', outgoing.missing, droots)
796 599 )
797 600 fdroots = [f.node() for f in fdroots]
798 601 future = list(unfi.set(revset, fdroots, pushop.futureheads))
799 602 pushop.outdatedphases = future
800 603 pushop.fallbackoutdatedphases = fallback
801 604
802 605
803 606 @pushdiscovery(b'obsmarker')
804 607 def _pushdiscoveryobsmarkers(pushop):
805 608 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
806 609 return
807 610
808 611 if not pushop.repo.obsstore:
809 612 return
810 613
811 614 if b'obsolete' not in listkeys(pushop.remote, b'namespaces'):
812 615 return
813 616
814 617 repo = pushop.repo
815 618 # very naive computation, that can be quite expensive on big repo.
816 619 # However: evolution is currently slow on them anyway.
817 620 nodes = (c.node() for c in repo.set(b'::%ln', pushop.futureheads))
818 621 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
819 622
820 623
821 624 @pushdiscovery(b'bookmarks')
822 625 def _pushdiscoverybookmarks(pushop):
823 626 ui = pushop.ui
824 627 repo = pushop.repo.unfiltered()
825 628 remote = pushop.remote
826 629 ui.debug(b"checking for updated bookmarks\n")
827 630 ancestors = ()
828 631 if pushop.revs:
829 632 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
830 633 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
831 634
832 635 remotebookmark = bookmod.unhexlifybookmarks(listkeys(remote, b'bookmarks'))
833 636
834 637 explicit = {
835 638 repo._bookmarks.expandname(bookmark) for bookmark in pushop.bookmarks
836 639 }
837 640
838 641 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
839 642 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
840 643
841 644
842 645 def _processcompared(pushop, pushed, explicit, remotebms, comp):
843 646 """take decision on bookmarks to push to the remote repo
844 647
845 648 Exists to help extensions alter this behavior.
846 649 """
847 650 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
848 651
849 652 repo = pushop.repo
850 653
851 654 for b, scid, dcid in advsrc:
852 655 if b in explicit:
853 656 explicit.remove(b)
854 657 if not pushed or repo[scid].rev() in pushed:
855 658 pushop.outbookmarks.append((b, dcid, scid))
856 659 # search added bookmark
857 660 for b, scid, dcid in addsrc:
858 661 if b in explicit:
859 662 explicit.remove(b)
860 663 if bookmod.isdivergent(b):
861 664 pushop.ui.warn(_(b'cannot push divergent bookmark %s!\n') % b)
862 665 pushop.bkresult = 2
863 666 else:
864 667 pushop.outbookmarks.append((b, b'', scid))
865 668 # search for overwritten bookmark
866 669 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
867 670 if b in explicit:
868 671 explicit.remove(b)
869 672 pushop.outbookmarks.append((b, dcid, scid))
870 673 # search for bookmark to delete
871 674 for b, scid, dcid in adddst:
872 675 if b in explicit:
873 676 explicit.remove(b)
874 677 # treat as "deleted locally"
875 678 pushop.outbookmarks.append((b, dcid, b''))
876 679 # identical bookmarks shouldn't get reported
877 680 for b, scid, dcid in same:
878 681 if b in explicit:
879 682 explicit.remove(b)
880 683
881 684 if explicit:
882 685 explicit = sorted(explicit)
883 686 # we should probably list all of them
884 687 pushop.ui.warn(
885 688 _(
886 689 b'bookmark %s does not exist on the local '
887 690 b'or remote repository!\n'
888 691 )
889 692 % explicit[0]
890 693 )
891 694 pushop.bkresult = 2
892 695
893 696 pushop.outbookmarks.sort()
894 697
895 698
896 699 def _pushcheckoutgoing(pushop):
897 700 outgoing = pushop.outgoing
898 701 unfi = pushop.repo.unfiltered()
899 702 if not outgoing.missing:
900 703 # nothing to push
901 704 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
902 705 return False
903 706 # something to push
904 707 if not pushop.force:
905 708 # if repo.obsstore == False --> no obsolete
906 709 # then, save the iteration
907 710 if unfi.obsstore:
908 711 # this message are here for 80 char limit reason
909 712 mso = _(b"push includes obsolete changeset: %s!")
910 713 mspd = _(b"push includes phase-divergent changeset: %s!")
911 714 mscd = _(b"push includes content-divergent changeset: %s!")
912 715 mst = {
913 716 b"orphan": _(b"push includes orphan changeset: %s!"),
914 717 b"phase-divergent": mspd,
915 718 b"content-divergent": mscd,
916 719 }
917 720 # If we are to push if there is at least one
918 721 # obsolete or unstable changeset in missing, at
919 722 # least one of the missinghead will be obsolete or
920 723 # unstable. So checking heads only is ok
921 724 for node in outgoing.ancestorsof:
922 725 ctx = unfi[node]
923 726 if ctx.obsolete():
924 727 raise error.Abort(mso % ctx)
925 728 elif ctx.isunstable():
926 729 # TODO print more than one instability in the abort
927 730 # message
928 731 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
929 732
930 733 discovery.checkheads(pushop)
931 734 return True
932 735
933 736
934 737 # List of names of steps to perform for an outgoing bundle2, order matters.
935 738 b2partsgenorder = []
936 739
937 740 # Mapping between step name and function
938 741 #
939 742 # This exists to help extensions wrap steps if necessary
940 743 b2partsgenmapping = {}
941 744
942 745
943 746 def b2partsgenerator(stepname, idx=None):
944 747 """decorator for function generating bundle2 part
945 748
946 749 The function is added to the step -> function mapping and appended to the
947 750 list of steps. Beware that decorated functions will be added in order
948 751 (this may matter).
949 752
950 753 You can only use this decorator for new steps, if you want to wrap a step
951 754 from an extension, attack the b2partsgenmapping dictionary directly."""
952 755
953 756 def dec(func):
954 757 assert stepname not in b2partsgenmapping
955 758 b2partsgenmapping[stepname] = func
956 759 if idx is None:
957 760 b2partsgenorder.append(stepname)
958 761 else:
959 762 b2partsgenorder.insert(idx, stepname)
960 763 return func
961 764
962 765 return dec
963 766
964 767
965 768 def _pushb2ctxcheckheads(pushop, bundler):
966 769 """Generate race condition checking parts
967 770
968 771 Exists as an independent function to aid extensions
969 772 """
970 773 # * 'force' do not check for push race,
971 774 # * if we don't push anything, there are nothing to check.
972 775 if not pushop.force and pushop.outgoing.ancestorsof:
973 776 allowunrelated = b'related' in bundler.capabilities.get(
974 777 b'checkheads', ()
975 778 )
976 779 emptyremote = pushop.pushbranchmap is None
977 780 if not allowunrelated or emptyremote:
978 781 bundler.newpart(b'check:heads', data=iter(pushop.remoteheads))
979 782 else:
980 783 affected = set()
981 784 for branch, heads in pycompat.iteritems(pushop.pushbranchmap):
982 785 remoteheads, newheads, unsyncedheads, discardedheads = heads
983 786 if remoteheads is not None:
984 787 remote = set(remoteheads)
985 788 affected |= set(discardedheads) & remote
986 789 affected |= remote - set(newheads)
987 790 if affected:
988 791 data = iter(sorted(affected))
989 792 bundler.newpart(b'check:updated-heads', data=data)
990 793
991 794
992 795 def _pushing(pushop):
993 796 """return True if we are pushing anything"""
994 797 return bool(
995 798 pushop.outgoing.missing
996 799 or pushop.outdatedphases
997 800 or pushop.outobsmarkers
998 801 or pushop.outbookmarks
999 802 )
1000 803
1001 804
1002 805 @b2partsgenerator(b'check-bookmarks')
1003 806 def _pushb2checkbookmarks(pushop, bundler):
1004 807 """insert bookmark move checking"""
1005 808 if not _pushing(pushop) or pushop.force:
1006 809 return
1007 810 b2caps = bundle2.bundle2caps(pushop.remote)
1008 811 hasbookmarkcheck = b'bookmarks' in b2caps
1009 812 if not (pushop.outbookmarks and hasbookmarkcheck):
1010 813 return
1011 814 data = []
1012 815 for book, old, new in pushop.outbookmarks:
1013 816 data.append((book, old))
1014 817 checkdata = bookmod.binaryencode(data)
1015 818 bundler.newpart(b'check:bookmarks', data=checkdata)
1016 819
1017 820
1018 821 @b2partsgenerator(b'check-phases')
1019 822 def _pushb2checkphases(pushop, bundler):
1020 823 """insert phase move checking"""
1021 824 if not _pushing(pushop) or pushop.force:
1022 825 return
1023 826 b2caps = bundle2.bundle2caps(pushop.remote)
1024 827 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1025 828 if pushop.remotephases is not None and hasphaseheads:
1026 829 # check that the remote phase has not changed
1027 830 checks = {p: [] for p in phases.allphases}
1028 831 checks[phases.public].extend(pushop.remotephases.publicheads)
1029 832 checks[phases.draft].extend(pushop.remotephases.draftroots)
1030 833 if any(pycompat.itervalues(checks)):
1031 834 for phase in checks:
1032 835 checks[phase].sort()
1033 836 checkdata = phases.binaryencode(checks)
1034 837 bundler.newpart(b'check:phases', data=checkdata)
1035 838
1036 839
1037 840 @b2partsgenerator(b'changeset')
1038 841 def _pushb2ctx(pushop, bundler):
1039 842 """handle changegroup push through bundle2
1040 843
1041 844 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
1042 845 """
1043 846 if b'changesets' in pushop.stepsdone:
1044 847 return
1045 848 pushop.stepsdone.add(b'changesets')
1046 849 # Send known heads to the server for race detection.
1047 850 if not _pushcheckoutgoing(pushop):
1048 851 return
1049 852 pushop.repo.prepushoutgoinghooks(pushop)
1050 853
1051 854 _pushb2ctxcheckheads(pushop, bundler)
1052 855
1053 856 b2caps = bundle2.bundle2caps(pushop.remote)
1054 857 version = b'01'
1055 858 cgversions = b2caps.get(b'changegroup')
1056 859 if cgversions: # 3.1 and 3.2 ship with an empty value
1057 860 cgversions = [
1058 861 v
1059 862 for v in cgversions
1060 863 if v in changegroup.supportedoutgoingversions(pushop.repo)
1061 864 ]
1062 865 if not cgversions:
1063 866 raise error.Abort(_(b'no common changegroup version'))
1064 867 version = max(cgversions)
1065 868 cgstream = changegroup.makestream(
1066 869 pushop.repo, pushop.outgoing, version, b'push'
1067 870 )
1068 871 cgpart = bundler.newpart(b'changegroup', data=cgstream)
1069 872 if cgversions:
1070 873 cgpart.addparam(b'version', version)
1071 874 if scmutil.istreemanifest(pushop.repo):
1072 875 cgpart.addparam(b'treemanifest', b'1')
1073 876 if b'exp-sidedata-flag' in pushop.repo.requirements:
1074 877 cgpart.addparam(b'exp-sidedata', b'1')
1075 878
1076 879 def handlereply(op):
1077 880 """extract addchangegroup returns from server reply"""
1078 881 cgreplies = op.records.getreplies(cgpart.id)
1079 882 assert len(cgreplies[b'changegroup']) == 1
1080 883 pushop.cgresult = cgreplies[b'changegroup'][0][b'return']
1081 884
1082 885 return handlereply
1083 886
1084 887
1085 888 @b2partsgenerator(b'phase')
1086 889 def _pushb2phases(pushop, bundler):
1087 890 """handle phase push through bundle2"""
1088 891 if b'phases' in pushop.stepsdone:
1089 892 return
1090 893 b2caps = bundle2.bundle2caps(pushop.remote)
1091 894 ui = pushop.repo.ui
1092 895
1093 896 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1094 897 haspushkey = b'pushkey' in b2caps
1095 898 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1096 899
1097 900 if hasphaseheads and not legacyphase:
1098 901 return _pushb2phaseheads(pushop, bundler)
1099 902 elif haspushkey:
1100 903 return _pushb2phasespushkey(pushop, bundler)
1101 904
1102 905
1103 906 def _pushb2phaseheads(pushop, bundler):
1104 907 """push phase information through a bundle2 - binary part"""
1105 908 pushop.stepsdone.add(b'phases')
1106 909 if pushop.outdatedphases:
1107 910 updates = {p: [] for p in phases.allphases}
1108 911 updates[0].extend(h.node() for h in pushop.outdatedphases)
1109 912 phasedata = phases.binaryencode(updates)
1110 913 bundler.newpart(b'phase-heads', data=phasedata)
1111 914
1112 915
1113 916 def _pushb2phasespushkey(pushop, bundler):
1114 917 """push phase information through a bundle2 - pushkey part"""
1115 918 pushop.stepsdone.add(b'phases')
1116 919 part2node = []
1117 920
1118 921 def handlefailure(pushop, exc):
1119 922 targetid = int(exc.partid)
1120 923 for partid, node in part2node:
1121 924 if partid == targetid:
1122 925 raise error.Abort(_(b'updating %s to public failed') % node)
1123 926
1124 927 enc = pushkey.encode
1125 928 for newremotehead in pushop.outdatedphases:
1126 929 part = bundler.newpart(b'pushkey')
1127 930 part.addparam(b'namespace', enc(b'phases'))
1128 931 part.addparam(b'key', enc(newremotehead.hex()))
1129 932 part.addparam(b'old', enc(b'%d' % phases.draft))
1130 933 part.addparam(b'new', enc(b'%d' % phases.public))
1131 934 part2node.append((part.id, newremotehead))
1132 935 pushop.pkfailcb[part.id] = handlefailure
1133 936
1134 937 def handlereply(op):
1135 938 for partid, node in part2node:
1136 939 partrep = op.records.getreplies(partid)
1137 940 results = partrep[b'pushkey']
1138 941 assert len(results) <= 1
1139 942 msg = None
1140 943 if not results:
1141 944 msg = _(b'server ignored update of %s to public!\n') % node
1142 945 elif not int(results[0][b'return']):
1143 946 msg = _(b'updating %s to public failed!\n') % node
1144 947 if msg is not None:
1145 948 pushop.ui.warn(msg)
1146 949
1147 950 return handlereply
1148 951
1149 952
1150 953 @b2partsgenerator(b'obsmarkers')
1151 954 def _pushb2obsmarkers(pushop, bundler):
1152 955 if b'obsmarkers' in pushop.stepsdone:
1153 956 return
1154 957 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
1155 958 if obsolete.commonversion(remoteversions) is None:
1156 959 return
1157 960 pushop.stepsdone.add(b'obsmarkers')
1158 961 if pushop.outobsmarkers:
1159 962 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1160 963 bundle2.buildobsmarkerspart(bundler, markers)
1161 964
1162 965
1163 966 @b2partsgenerator(b'bookmarks')
1164 967 def _pushb2bookmarks(pushop, bundler):
1165 968 """handle bookmark push through bundle2"""
1166 969 if b'bookmarks' in pushop.stepsdone:
1167 970 return
1168 971 b2caps = bundle2.bundle2caps(pushop.remote)
1169 972
1170 973 legacy = pushop.repo.ui.configlist(b'devel', b'legacy.exchange')
1171 974 legacybooks = b'bookmarks' in legacy
1172 975
1173 976 if not legacybooks and b'bookmarks' in b2caps:
1174 977 return _pushb2bookmarkspart(pushop, bundler)
1175 978 elif b'pushkey' in b2caps:
1176 979 return _pushb2bookmarkspushkey(pushop, bundler)
1177 980
1178 981
1179 982 def _bmaction(old, new):
1180 983 """small utility for bookmark pushing"""
1181 984 if not old:
1182 985 return b'export'
1183 986 elif not new:
1184 987 return b'delete'
1185 988 return b'update'
1186 989
1187 990
1188 991 def _abortonsecretctx(pushop, node, b):
1189 992 """abort if a given bookmark points to a secret changeset"""
1190 993 if node and pushop.repo[node].phase() == phases.secret:
1191 994 raise error.Abort(
1192 995 _(b'cannot push bookmark %s as it points to a secret changeset') % b
1193 996 )
1194 997
1195 998
1196 999 def _pushb2bookmarkspart(pushop, bundler):
1197 1000 pushop.stepsdone.add(b'bookmarks')
1198 1001 if not pushop.outbookmarks:
1199 1002 return
1200 1003
1201 1004 allactions = []
1202 1005 data = []
1203 1006 for book, old, new in pushop.outbookmarks:
1204 1007 _abortonsecretctx(pushop, new, book)
1205 1008 data.append((book, new))
1206 1009 allactions.append((book, _bmaction(old, new)))
1207 1010 checkdata = bookmod.binaryencode(data)
1208 1011 bundler.newpart(b'bookmarks', data=checkdata)
1209 1012
1210 1013 def handlereply(op):
1211 1014 ui = pushop.ui
1212 1015 # if success
1213 1016 for book, action in allactions:
1214 1017 ui.status(bookmsgmap[action][0] % book)
1215 1018
1216 1019 return handlereply
1217 1020
1218 1021
1219 1022 def _pushb2bookmarkspushkey(pushop, bundler):
1220 1023 pushop.stepsdone.add(b'bookmarks')
1221 1024 part2book = []
1222 1025 enc = pushkey.encode
1223 1026
1224 1027 def handlefailure(pushop, exc):
1225 1028 targetid = int(exc.partid)
1226 1029 for partid, book, action in part2book:
1227 1030 if partid == targetid:
1228 1031 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1229 1032 # we should not be called for part we did not generated
1230 1033 assert False
1231 1034
1232 1035 for book, old, new in pushop.outbookmarks:
1233 1036 _abortonsecretctx(pushop, new, book)
1234 1037 part = bundler.newpart(b'pushkey')
1235 1038 part.addparam(b'namespace', enc(b'bookmarks'))
1236 1039 part.addparam(b'key', enc(book))
1237 1040 part.addparam(b'old', enc(hex(old)))
1238 1041 part.addparam(b'new', enc(hex(new)))
1239 1042 action = b'update'
1240 1043 if not old:
1241 1044 action = b'export'
1242 1045 elif not new:
1243 1046 action = b'delete'
1244 1047 part2book.append((part.id, book, action))
1245 1048 pushop.pkfailcb[part.id] = handlefailure
1246 1049
1247 1050 def handlereply(op):
1248 1051 ui = pushop.ui
1249 1052 for partid, book, action in part2book:
1250 1053 partrep = op.records.getreplies(partid)
1251 1054 results = partrep[b'pushkey']
1252 1055 assert len(results) <= 1
1253 1056 if not results:
1254 1057 pushop.ui.warn(_(b'server ignored bookmark %s update\n') % book)
1255 1058 else:
1256 1059 ret = int(results[0][b'return'])
1257 1060 if ret:
1258 1061 ui.status(bookmsgmap[action][0] % book)
1259 1062 else:
1260 1063 ui.warn(bookmsgmap[action][1] % book)
1261 1064 if pushop.bkresult is not None:
1262 1065 pushop.bkresult = 1
1263 1066
1264 1067 return handlereply
1265 1068
1266 1069
1267 1070 @b2partsgenerator(b'pushvars', idx=0)
1268 1071 def _getbundlesendvars(pushop, bundler):
1269 1072 '''send shellvars via bundle2'''
1270 1073 pushvars = pushop.pushvars
1271 1074 if pushvars:
1272 1075 shellvars = {}
1273 1076 for raw in pushvars:
1274 1077 if b'=' not in raw:
1275 1078 msg = (
1276 1079 b"unable to parse variable '%s', should follow "
1277 1080 b"'KEY=VALUE' or 'KEY=' format"
1278 1081 )
1279 1082 raise error.Abort(msg % raw)
1280 1083 k, v = raw.split(b'=', 1)
1281 1084 shellvars[k] = v
1282 1085
1283 1086 part = bundler.newpart(b'pushvars')
1284 1087
1285 1088 for key, value in pycompat.iteritems(shellvars):
1286 1089 part.addparam(key, value, mandatory=False)
1287 1090
1288 1091
1289 1092 def _pushbundle2(pushop):
1290 1093 """push data to the remote using bundle2
1291 1094
1292 1095 The only currently supported type of data is changegroup but this will
1293 1096 evolve in the future."""
1294 1097 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1295 1098 pushback = pushop.trmanager and pushop.ui.configbool(
1296 1099 b'experimental', b'bundle2.pushback'
1297 1100 )
1298 1101
1299 1102 # create reply capability
1300 1103 capsblob = bundle2.encodecaps(
1301 1104 bundle2.getrepocaps(pushop.repo, allowpushback=pushback, role=b'client')
1302 1105 )
1303 1106 bundler.newpart(b'replycaps', data=capsblob)
1304 1107 replyhandlers = []
1305 1108 for partgenname in b2partsgenorder:
1306 1109 partgen = b2partsgenmapping[partgenname]
1307 1110 ret = partgen(pushop, bundler)
1308 1111 if callable(ret):
1309 1112 replyhandlers.append(ret)
1310 1113 # do not push if nothing to push
1311 1114 if bundler.nbparts <= 1:
1312 1115 return
1313 1116 stream = util.chunkbuffer(bundler.getchunks())
1314 1117 try:
1315 1118 try:
1316 1119 with pushop.remote.commandexecutor() as e:
1317 1120 reply = e.callcommand(
1318 1121 b'unbundle',
1319 1122 {
1320 1123 b'bundle': stream,
1321 1124 b'heads': [b'force'],
1322 1125 b'url': pushop.remote.url(),
1323 1126 },
1324 1127 ).result()
1325 1128 except error.BundleValueError as exc:
1326 1129 raise error.Abort(_(b'missing support for %s') % exc)
1327 1130 try:
1328 1131 trgetter = None
1329 1132 if pushback:
1330 1133 trgetter = pushop.trmanager.transaction
1331 1134 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1332 1135 except error.BundleValueError as exc:
1333 1136 raise error.Abort(_(b'missing support for %s') % exc)
1334 1137 except bundle2.AbortFromPart as exc:
1335 1138 pushop.ui.status(_(b'remote: %s\n') % exc)
1336 1139 if exc.hint is not None:
1337 1140 pushop.ui.status(_(b'remote: %s\n') % (b'(%s)' % exc.hint))
1338 1141 raise error.Abort(_(b'push failed on remote'))
1339 1142 except error.PushkeyFailed as exc:
1340 1143 partid = int(exc.partid)
1341 1144 if partid not in pushop.pkfailcb:
1342 1145 raise
1343 1146 pushop.pkfailcb[partid](pushop, exc)
1344 1147 for rephand in replyhandlers:
1345 1148 rephand(op)
1346 1149
1347 1150
1348 1151 def _pushchangeset(pushop):
1349 1152 """Make the actual push of changeset bundle to remote repo"""
1350 1153 if b'changesets' in pushop.stepsdone:
1351 1154 return
1352 1155 pushop.stepsdone.add(b'changesets')
1353 1156 if not _pushcheckoutgoing(pushop):
1354 1157 return
1355 1158
1356 1159 # Should have verified this in push().
1357 1160 assert pushop.remote.capable(b'unbundle')
1358 1161
1359 1162 pushop.repo.prepushoutgoinghooks(pushop)
1360 1163 outgoing = pushop.outgoing
1361 1164 # TODO: get bundlecaps from remote
1362 1165 bundlecaps = None
1363 1166 # create a changegroup from local
1364 1167 if pushop.revs is None and not (
1365 1168 outgoing.excluded or pushop.repo.changelog.filteredrevs
1366 1169 ):
1367 1170 # push everything,
1368 1171 # use the fast path, no race possible on push
1369 1172 cg = changegroup.makechangegroup(
1370 1173 pushop.repo,
1371 1174 outgoing,
1372 1175 b'01',
1373 1176 b'push',
1374 1177 fastpath=True,
1375 1178 bundlecaps=bundlecaps,
1376 1179 )
1377 1180 else:
1378 1181 cg = changegroup.makechangegroup(
1379 1182 pushop.repo, outgoing, b'01', b'push', bundlecaps=bundlecaps
1380 1183 )
1381 1184
1382 1185 # apply changegroup to remote
1383 1186 # local repo finds heads on server, finds out what
1384 1187 # revs it must push. once revs transferred, if server
1385 1188 # finds it has different heads (someone else won
1386 1189 # commit/push race), server aborts.
1387 1190 if pushop.force:
1388 1191 remoteheads = [b'force']
1389 1192 else:
1390 1193 remoteheads = pushop.remoteheads
1391 1194 # ssh: return remote's addchangegroup()
1392 1195 # http: return remote's addchangegroup() or 0 for error
1393 1196 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads, pushop.repo.url())
1394 1197
1395 1198
1396 1199 def _pushsyncphase(pushop):
1397 1200 """synchronise phase information locally and remotely"""
1398 1201 cheads = pushop.commonheads
1399 1202 # even when we don't push, exchanging phase data is useful
1400 1203 remotephases = listkeys(pushop.remote, b'phases')
1401 1204 if (
1402 1205 pushop.ui.configbool(b'ui', b'_usedassubrepo')
1403 1206 and remotephases # server supports phases
1404 1207 and pushop.cgresult is None # nothing was pushed
1405 1208 and remotephases.get(b'publishing', False)
1406 1209 ):
1407 1210 # When:
1408 1211 # - this is a subrepo push
1409 1212 # - and remote support phase
1410 1213 # - and no changeset was pushed
1411 1214 # - and remote is publishing
1412 1215 # We may be in issue 3871 case!
1413 1216 # We drop the possible phase synchronisation done by
1414 1217 # courtesy to publish changesets possibly locally draft
1415 1218 # on the remote.
1416 1219 remotephases = {b'publishing': b'True'}
1417 1220 if not remotephases: # old server or public only reply from non-publishing
1418 1221 _localphasemove(pushop, cheads)
1419 1222 # don't push any phase data as there is nothing to push
1420 1223 else:
1421 1224 ana = phases.analyzeremotephases(pushop.repo, cheads, remotephases)
1422 1225 pheads, droots = ana
1423 1226 ### Apply remote phase on local
1424 1227 if remotephases.get(b'publishing', False):
1425 1228 _localphasemove(pushop, cheads)
1426 1229 else: # publish = False
1427 1230 _localphasemove(pushop, pheads)
1428 1231 _localphasemove(pushop, cheads, phases.draft)
1429 1232 ### Apply local phase on remote
1430 1233
1431 1234 if pushop.cgresult:
1432 1235 if b'phases' in pushop.stepsdone:
1433 1236 # phases already pushed though bundle2
1434 1237 return
1435 1238 outdated = pushop.outdatedphases
1436 1239 else:
1437 1240 outdated = pushop.fallbackoutdatedphases
1438 1241
1439 1242 pushop.stepsdone.add(b'phases')
1440 1243
1441 1244 # filter heads already turned public by the push
1442 1245 outdated = [c for c in outdated if c.node() not in pheads]
1443 1246 # fallback to independent pushkey command
1444 1247 for newremotehead in outdated:
1445 1248 with pushop.remote.commandexecutor() as e:
1446 1249 r = e.callcommand(
1447 1250 b'pushkey',
1448 1251 {
1449 1252 b'namespace': b'phases',
1450 1253 b'key': newremotehead.hex(),
1451 1254 b'old': b'%d' % phases.draft,
1452 1255 b'new': b'%d' % phases.public,
1453 1256 },
1454 1257 ).result()
1455 1258
1456 1259 if not r:
1457 1260 pushop.ui.warn(
1458 1261 _(b'updating %s to public failed!\n') % newremotehead
1459 1262 )
1460 1263
1461 1264
1462 1265 def _localphasemove(pushop, nodes, phase=phases.public):
1463 1266 """move <nodes> to <phase> in the local source repo"""
1464 1267 if pushop.trmanager:
1465 1268 phases.advanceboundary(
1466 1269 pushop.repo, pushop.trmanager.transaction(), phase, nodes
1467 1270 )
1468 1271 else:
1469 1272 # repo is not locked, do not change any phases!
1470 1273 # Informs the user that phases should have been moved when
1471 1274 # applicable.
1472 1275 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1473 1276 phasestr = phases.phasenames[phase]
1474 1277 if actualmoves:
1475 1278 pushop.ui.status(
1476 1279 _(
1477 1280 b'cannot lock source repo, skipping '
1478 1281 b'local %s phase update\n'
1479 1282 )
1480 1283 % phasestr
1481 1284 )
1482 1285
1483 1286
1484 1287 def _pushobsolete(pushop):
1485 1288 """utility function to push obsolete markers to a remote"""
1486 1289 if b'obsmarkers' in pushop.stepsdone:
1487 1290 return
1488 1291 repo = pushop.repo
1489 1292 remote = pushop.remote
1490 1293 pushop.stepsdone.add(b'obsmarkers')
1491 1294 if pushop.outobsmarkers:
1492 1295 pushop.ui.debug(b'try to push obsolete markers to remote\n')
1493 1296 rslts = []
1494 1297 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1495 1298 remotedata = obsolete._pushkeyescape(markers)
1496 1299 for key in sorted(remotedata, reverse=True):
1497 1300 # reverse sort to ensure we end with dump0
1498 1301 data = remotedata[key]
1499 1302 rslts.append(remote.pushkey(b'obsolete', key, b'', data))
1500 1303 if [r for r in rslts if not r]:
1501 1304 msg = _(b'failed to push some obsolete markers!\n')
1502 1305 repo.ui.warn(msg)
1503 1306
1504 1307
1505 1308 def _pushbookmark(pushop):
1506 1309 """Update bookmark position on remote"""
1507 1310 if pushop.cgresult == 0 or b'bookmarks' in pushop.stepsdone:
1508 1311 return
1509 1312 pushop.stepsdone.add(b'bookmarks')
1510 1313 ui = pushop.ui
1511 1314 remote = pushop.remote
1512 1315
1513 1316 for b, old, new in pushop.outbookmarks:
1514 1317 action = b'update'
1515 1318 if not old:
1516 1319 action = b'export'
1517 1320 elif not new:
1518 1321 action = b'delete'
1519 1322
1520 1323 with remote.commandexecutor() as e:
1521 1324 r = e.callcommand(
1522 1325 b'pushkey',
1523 1326 {
1524 1327 b'namespace': b'bookmarks',
1525 1328 b'key': b,
1526 1329 b'old': hex(old),
1527 1330 b'new': hex(new),
1528 1331 },
1529 1332 ).result()
1530 1333
1531 1334 if r:
1532 1335 ui.status(bookmsgmap[action][0] % b)
1533 1336 else:
1534 1337 ui.warn(bookmsgmap[action][1] % b)
1535 1338 # discovery can have set the value form invalid entry
1536 1339 if pushop.bkresult is not None:
1537 1340 pushop.bkresult = 1
1538 1341
1539 1342
1540 1343 class pulloperation(object):
1541 1344 """A object that represent a single pull operation
1542 1345
1543 1346 It purpose is to carry pull related state and very common operation.
1544 1347
1545 1348 A new should be created at the beginning of each pull and discarded
1546 1349 afterward.
1547 1350 """
1548 1351
1549 1352 def __init__(
1550 1353 self,
1551 1354 repo,
1552 1355 remote,
1553 1356 heads=None,
1554 1357 force=False,
1555 1358 bookmarks=(),
1556 1359 remotebookmarks=None,
1557 1360 streamclonerequested=None,
1558 1361 includepats=None,
1559 1362 excludepats=None,
1560 1363 depth=None,
1561 1364 ):
1562 1365 # repo we pull into
1563 1366 self.repo = repo
1564 1367 # repo we pull from
1565 1368 self.remote = remote
1566 1369 # revision we try to pull (None is "all")
1567 1370 self.heads = heads
1568 1371 # bookmark pulled explicitly
1569 1372 self.explicitbookmarks = [
1570 1373 repo._bookmarks.expandname(bookmark) for bookmark in bookmarks
1571 1374 ]
1572 1375 # do we force pull?
1573 1376 self.force = force
1574 1377 # whether a streaming clone was requested
1575 1378 self.streamclonerequested = streamclonerequested
1576 1379 # transaction manager
1577 1380 self.trmanager = None
1578 1381 # set of common changeset between local and remote before pull
1579 1382 self.common = None
1580 1383 # set of pulled head
1581 1384 self.rheads = None
1582 1385 # list of missing changeset to fetch remotely
1583 1386 self.fetch = None
1584 1387 # remote bookmarks data
1585 1388 self.remotebookmarks = remotebookmarks
1586 1389 # result of changegroup pulling (used as return code by pull)
1587 1390 self.cgresult = None
1588 1391 # list of step already done
1589 1392 self.stepsdone = set()
1590 1393 # Whether we attempted a clone from pre-generated bundles.
1591 1394 self.clonebundleattempted = False
1592 1395 # Set of file patterns to include.
1593 1396 self.includepats = includepats
1594 1397 # Set of file patterns to exclude.
1595 1398 self.excludepats = excludepats
1596 1399 # Number of ancestor changesets to pull from each pulled head.
1597 1400 self.depth = depth
1598 1401
1599 1402 @util.propertycache
1600 1403 def pulledsubset(self):
1601 1404 """heads of the set of changeset target by the pull"""
1602 1405 # compute target subset
1603 1406 if self.heads is None:
1604 1407 # We pulled every thing possible
1605 1408 # sync on everything common
1606 1409 c = set(self.common)
1607 1410 ret = list(self.common)
1608 1411 for n in self.rheads:
1609 1412 if n not in c:
1610 1413 ret.append(n)
1611 1414 return ret
1612 1415 else:
1613 1416 # We pulled a specific subset
1614 1417 # sync on this subset
1615 1418 return self.heads
1616 1419
1617 1420 @util.propertycache
1618 1421 def canusebundle2(self):
1619 1422 return not _forcebundle1(self)
1620 1423
1621 1424 @util.propertycache
1622 1425 def remotebundle2caps(self):
1623 1426 return bundle2.bundle2caps(self.remote)
1624 1427
1625 1428 def gettransaction(self):
1626 1429 # deprecated; talk to trmanager directly
1627 1430 return self.trmanager.transaction()
1628 1431
1629 1432
1630 1433 class transactionmanager(util.transactional):
1631 1434 """An object to manage the life cycle of a transaction
1632 1435
1633 1436 It creates the transaction on demand and calls the appropriate hooks when
1634 1437 closing the transaction."""
1635 1438
1636 1439 def __init__(self, repo, source, url):
1637 1440 self.repo = repo
1638 1441 self.source = source
1639 1442 self.url = url
1640 1443 self._tr = None
1641 1444
1642 1445 def transaction(self):
1643 1446 """Return an open transaction object, constructing if necessary"""
1644 1447 if not self._tr:
1645 1448 trname = b'%s\n%s' % (self.source, util.hidepassword(self.url))
1646 1449 self._tr = self.repo.transaction(trname)
1647 1450 self._tr.hookargs[b'source'] = self.source
1648 1451 self._tr.hookargs[b'url'] = self.url
1649 1452 return self._tr
1650 1453
1651 1454 def close(self):
1652 1455 """close transaction if created"""
1653 1456 if self._tr is not None:
1654 1457 self._tr.close()
1655 1458
1656 1459 def release(self):
1657 1460 """release transaction if created"""
1658 1461 if self._tr is not None:
1659 1462 self._tr.release()
1660 1463
1661 1464
1662 1465 def listkeys(remote, namespace):
1663 1466 with remote.commandexecutor() as e:
1664 1467 return e.callcommand(b'listkeys', {b'namespace': namespace}).result()
1665 1468
1666 1469
1667 1470 def _fullpullbundle2(repo, pullop):
1668 1471 # The server may send a partial reply, i.e. when inlining
1669 1472 # pre-computed bundles. In that case, update the common
1670 1473 # set based on the results and pull another bundle.
1671 1474 #
1672 1475 # There are two indicators that the process is finished:
1673 1476 # - no changeset has been added, or
1674 1477 # - all remote heads are known locally.
1675 1478 # The head check must use the unfiltered view as obsoletion
1676 1479 # markers can hide heads.
1677 1480 unfi = repo.unfiltered()
1678 1481 unficl = unfi.changelog
1679 1482
1680 1483 def headsofdiff(h1, h2):
1681 1484 """Returns heads(h1 % h2)"""
1682 1485 res = unfi.set(b'heads(%ln %% %ln)', h1, h2)
1683 1486 return {ctx.node() for ctx in res}
1684 1487
1685 1488 def headsofunion(h1, h2):
1686 1489 """Returns heads((h1 + h2) - null)"""
1687 1490 res = unfi.set(b'heads((%ln + %ln - null))', h1, h2)
1688 1491 return {ctx.node() for ctx in res}
1689 1492
1690 1493 while True:
1691 1494 old_heads = unficl.heads()
1692 1495 clstart = len(unficl)
1693 1496 _pullbundle2(pullop)
1694 1497 if requirements.NARROW_REQUIREMENT in repo.requirements:
1695 1498 # XXX narrow clones filter the heads on the server side during
1696 1499 # XXX getbundle and result in partial replies as well.
1697 1500 # XXX Disable pull bundles in this case as band aid to avoid
1698 1501 # XXX extra round trips.
1699 1502 break
1700 1503 if clstart == len(unficl):
1701 1504 break
1702 1505 if all(unficl.hasnode(n) for n in pullop.rheads):
1703 1506 break
1704 1507 new_heads = headsofdiff(unficl.heads(), old_heads)
1705 1508 pullop.common = headsofunion(new_heads, pullop.common)
1706 1509 pullop.rheads = set(pullop.rheads) - pullop.common
1707 1510
1708 1511
1709 1512 def add_confirm_callback(repo, pullop):
1710 1513 """ adds a finalize callback to transaction which can be used to show stats
1711 1514 to user and confirm the pull before committing transaction """
1712 1515
1713 1516 tr = pullop.trmanager.transaction()
1714 1517 scmutil.registersummarycallback(
1715 1518 repo, tr, txnname=b'pull', as_validator=True
1716 1519 )
1717 1520 reporef = weakref.ref(repo.unfiltered())
1718 1521
1719 1522 def prompt(tr):
1720 1523 repo = reporef()
1721 1524 cm = _(b'accept incoming changes (yn)?$$ &Yes $$ &No')
1722 1525 if repo.ui.promptchoice(cm):
1723 1526 raise error.Abort(b"user aborted")
1724 1527
1725 1528 tr.addvalidator(b'900-pull-prompt', prompt)
1726 1529
1727 1530
1728 1531 def pull(
1729 1532 repo,
1730 1533 remote,
1731 1534 heads=None,
1732 1535 force=False,
1733 1536 bookmarks=(),
1734 1537 opargs=None,
1735 1538 streamclonerequested=None,
1736 1539 includepats=None,
1737 1540 excludepats=None,
1738 1541 depth=None,
1739 1542 confirm=None,
1740 1543 ):
1741 1544 """Fetch repository data from a remote.
1742 1545
1743 1546 This is the main function used to retrieve data from a remote repository.
1744 1547
1745 1548 ``repo`` is the local repository to clone into.
1746 1549 ``remote`` is a peer instance.
1747 1550 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1748 1551 default) means to pull everything from the remote.
1749 1552 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1750 1553 default, all remote bookmarks are pulled.
1751 1554 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1752 1555 initialization.
1753 1556 ``streamclonerequested`` is a boolean indicating whether a "streaming
1754 1557 clone" is requested. A "streaming clone" is essentially a raw file copy
1755 1558 of revlogs from the server. This only works when the local repository is
1756 1559 empty. The default value of ``None`` means to respect the server
1757 1560 configuration for preferring stream clones.
1758 1561 ``includepats`` and ``excludepats`` define explicit file patterns to
1759 1562 include and exclude in storage, respectively. If not defined, narrow
1760 1563 patterns from the repo instance are used, if available.
1761 1564 ``depth`` is an integer indicating the DAG depth of history we're
1762 1565 interested in. If defined, for each revision specified in ``heads``, we
1763 1566 will fetch up to this many of its ancestors and data associated with them.
1764 1567 ``confirm`` is a boolean indicating whether the pull should be confirmed
1765 1568 before committing the transaction. This overrides HGPLAIN.
1766 1569
1767 1570 Returns the ``pulloperation`` created for this pull.
1768 1571 """
1769 1572 if opargs is None:
1770 1573 opargs = {}
1771 1574
1772 1575 # We allow the narrow patterns to be passed in explicitly to provide more
1773 1576 # flexibility for API consumers.
1774 1577 if includepats or excludepats:
1775 1578 includepats = includepats or set()
1776 1579 excludepats = excludepats or set()
1777 1580 else:
1778 1581 includepats, excludepats = repo.narrowpats
1779 1582
1780 1583 narrowspec.validatepatterns(includepats)
1781 1584 narrowspec.validatepatterns(excludepats)
1782 1585
1783 1586 pullop = pulloperation(
1784 1587 repo,
1785 1588 remote,
1786 1589 heads,
1787 1590 force,
1788 1591 bookmarks=bookmarks,
1789 1592 streamclonerequested=streamclonerequested,
1790 1593 includepats=includepats,
1791 1594 excludepats=excludepats,
1792 1595 depth=depth,
1793 1596 **pycompat.strkwargs(opargs)
1794 1597 )
1795 1598
1796 1599 peerlocal = pullop.remote.local()
1797 1600 if peerlocal:
1798 1601 missing = set(peerlocal.requirements) - pullop.repo.supported
1799 1602 if missing:
1800 1603 msg = _(
1801 1604 b"required features are not"
1802 1605 b" supported in the destination:"
1803 1606 b" %s"
1804 1607 ) % (b', '.join(sorted(missing)))
1805 1608 raise error.Abort(msg)
1806 1609
1807 1610 pullop.trmanager = transactionmanager(repo, b'pull', remote.url())
1808 1611 wlock = util.nullcontextmanager()
1809 1612 if not bookmod.bookmarksinstore(repo):
1810 1613 wlock = repo.wlock()
1811 1614 with wlock, repo.lock(), pullop.trmanager:
1812 1615 if confirm or (
1813 1616 repo.ui.configbool(b"pull", b"confirm") and not repo.ui.plain()
1814 1617 ):
1815 1618 add_confirm_callback(repo, pullop)
1816 1619
1817 1620 # Use the modern wire protocol, if available.
1818 1621 if remote.capable(b'command-changesetdata'):
1819 1622 exchangev2.pull(pullop)
1820 1623 else:
1821 1624 # This should ideally be in _pullbundle2(). However, it needs to run
1822 1625 # before discovery to avoid extra work.
1823 1626 _maybeapplyclonebundle(pullop)
1824 1627 streamclone.maybeperformlegacystreamclone(pullop)
1825 1628 _pulldiscovery(pullop)
1826 1629 if pullop.canusebundle2:
1827 1630 _fullpullbundle2(repo, pullop)
1828 1631 _pullchangeset(pullop)
1829 1632 _pullphase(pullop)
1830 1633 _pullbookmarks(pullop)
1831 1634 _pullobsolete(pullop)
1832 1635
1833 1636 # storing remotenames
1834 1637 if repo.ui.configbool(b'experimental', b'remotenames'):
1835 1638 logexchange.pullremotenames(repo, remote)
1836 1639
1837 1640 return pullop
1838 1641
1839 1642
1840 1643 # list of steps to perform discovery before pull
1841 1644 pulldiscoveryorder = []
1842 1645
1843 1646 # Mapping between step name and function
1844 1647 #
1845 1648 # This exists to help extensions wrap steps if necessary
1846 1649 pulldiscoverymapping = {}
1847 1650
1848 1651
1849 1652 def pulldiscovery(stepname):
1850 1653 """decorator for function performing discovery before pull
1851 1654
1852 1655 The function is added to the step -> function mapping and appended to the
1853 1656 list of steps. Beware that decorated function will be added in order (this
1854 1657 may matter).
1855 1658
1856 1659 You can only use this decorator for a new step, if you want to wrap a step
1857 1660 from an extension, change the pulldiscovery dictionary directly."""
1858 1661
1859 1662 def dec(func):
1860 1663 assert stepname not in pulldiscoverymapping
1861 1664 pulldiscoverymapping[stepname] = func
1862 1665 pulldiscoveryorder.append(stepname)
1863 1666 return func
1864 1667
1865 1668 return dec
1866 1669
1867 1670
1868 1671 def _pulldiscovery(pullop):
1869 1672 """Run all discovery steps"""
1870 1673 for stepname in pulldiscoveryorder:
1871 1674 step = pulldiscoverymapping[stepname]
1872 1675 step(pullop)
1873 1676
1874 1677
1875 1678 @pulldiscovery(b'b1:bookmarks')
1876 1679 def _pullbookmarkbundle1(pullop):
1877 1680 """fetch bookmark data in bundle1 case
1878 1681
1879 1682 If not using bundle2, we have to fetch bookmarks before changeset
1880 1683 discovery to reduce the chance and impact of race conditions."""
1881 1684 if pullop.remotebookmarks is not None:
1882 1685 return
1883 1686 if pullop.canusebundle2 and b'listkeys' in pullop.remotebundle2caps:
1884 1687 # all known bundle2 servers now support listkeys, but lets be nice with
1885 1688 # new implementation.
1886 1689 return
1887 1690 books = listkeys(pullop.remote, b'bookmarks')
1888 1691 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1889 1692
1890 1693
1891 1694 @pulldiscovery(b'changegroup')
1892 1695 def _pulldiscoverychangegroup(pullop):
1893 1696 """discovery phase for the pull
1894 1697
1895 1698 Current handle changeset discovery only, will change handle all discovery
1896 1699 at some point."""
1897 1700 tmp = discovery.findcommonincoming(
1898 1701 pullop.repo, pullop.remote, heads=pullop.heads, force=pullop.force
1899 1702 )
1900 1703 common, fetch, rheads = tmp
1901 1704 has_node = pullop.repo.unfiltered().changelog.index.has_node
1902 1705 if fetch and rheads:
1903 1706 # If a remote heads is filtered locally, put in back in common.
1904 1707 #
1905 1708 # This is a hackish solution to catch most of "common but locally
1906 1709 # hidden situation". We do not performs discovery on unfiltered
1907 1710 # repository because it end up doing a pathological amount of round
1908 1711 # trip for w huge amount of changeset we do not care about.
1909 1712 #
1910 1713 # If a set of such "common but filtered" changeset exist on the server
1911 1714 # but are not including a remote heads, we'll not be able to detect it,
1912 1715 scommon = set(common)
1913 1716 for n in rheads:
1914 1717 if has_node(n):
1915 1718 if n not in scommon:
1916 1719 common.append(n)
1917 1720 if set(rheads).issubset(set(common)):
1918 1721 fetch = []
1919 1722 pullop.common = common
1920 1723 pullop.fetch = fetch
1921 1724 pullop.rheads = rheads
1922 1725
1923 1726
1924 1727 def _pullbundle2(pullop):
1925 1728 """pull data using bundle2
1926 1729
1927 1730 For now, the only supported data are changegroup."""
1928 1731 kwargs = {b'bundlecaps': caps20to10(pullop.repo, role=b'client')}
1929 1732
1930 1733 # make ui easier to access
1931 1734 ui = pullop.repo.ui
1932 1735
1933 1736 # At the moment we don't do stream clones over bundle2. If that is
1934 1737 # implemented then here's where the check for that will go.
1935 1738 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1936 1739
1937 1740 # declare pull perimeters
1938 1741 kwargs[b'common'] = pullop.common
1939 1742 kwargs[b'heads'] = pullop.heads or pullop.rheads
1940 1743
1941 1744 # check server supports narrow and then adding includepats and excludepats
1942 1745 servernarrow = pullop.remote.capable(wireprototypes.NARROWCAP)
1943 1746 if servernarrow and pullop.includepats:
1944 1747 kwargs[b'includepats'] = pullop.includepats
1945 1748 if servernarrow and pullop.excludepats:
1946 1749 kwargs[b'excludepats'] = pullop.excludepats
1947 1750
1948 1751 if streaming:
1949 1752 kwargs[b'cg'] = False
1950 1753 kwargs[b'stream'] = True
1951 1754 pullop.stepsdone.add(b'changegroup')
1952 1755 pullop.stepsdone.add(b'phases')
1953 1756
1954 1757 else:
1955 1758 # pulling changegroup
1956 1759 pullop.stepsdone.add(b'changegroup')
1957 1760
1958 1761 kwargs[b'cg'] = pullop.fetch
1959 1762
1960 1763 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1961 1764 hasbinaryphase = b'heads' in pullop.remotebundle2caps.get(b'phases', ())
1962 1765 if not legacyphase and hasbinaryphase:
1963 1766 kwargs[b'phases'] = True
1964 1767 pullop.stepsdone.add(b'phases')
1965 1768
1966 1769 if b'listkeys' in pullop.remotebundle2caps:
1967 1770 if b'phases' not in pullop.stepsdone:
1968 1771 kwargs[b'listkeys'] = [b'phases']
1969 1772
1970 1773 bookmarksrequested = False
1971 1774 legacybookmark = b'bookmarks' in ui.configlist(b'devel', b'legacy.exchange')
1972 1775 hasbinarybook = b'bookmarks' in pullop.remotebundle2caps
1973 1776
1974 1777 if pullop.remotebookmarks is not None:
1975 1778 pullop.stepsdone.add(b'request-bookmarks')
1976 1779
1977 1780 if (
1978 1781 b'request-bookmarks' not in pullop.stepsdone
1979 1782 and pullop.remotebookmarks is None
1980 1783 and not legacybookmark
1981 1784 and hasbinarybook
1982 1785 ):
1983 1786 kwargs[b'bookmarks'] = True
1984 1787 bookmarksrequested = True
1985 1788
1986 1789 if b'listkeys' in pullop.remotebundle2caps:
1987 1790 if b'request-bookmarks' not in pullop.stepsdone:
1988 1791 # make sure to always includes bookmark data when migrating
1989 1792 # `hg incoming --bundle` to using this function.
1990 1793 pullop.stepsdone.add(b'request-bookmarks')
1991 1794 kwargs.setdefault(b'listkeys', []).append(b'bookmarks')
1992 1795
1993 1796 # If this is a full pull / clone and the server supports the clone bundles
1994 1797 # feature, tell the server whether we attempted a clone bundle. The
1995 1798 # presence of this flag indicates the client supports clone bundles. This
1996 1799 # will enable the server to treat clients that support clone bundles
1997 1800 # differently from those that don't.
1998 1801 if (
1999 1802 pullop.remote.capable(b'clonebundles')
2000 1803 and pullop.heads is None
2001 1804 and list(pullop.common) == [nullid]
2002 1805 ):
2003 1806 kwargs[b'cbattempted'] = pullop.clonebundleattempted
2004 1807
2005 1808 if streaming:
2006 1809 pullop.repo.ui.status(_(b'streaming all changes\n'))
2007 1810 elif not pullop.fetch:
2008 1811 pullop.repo.ui.status(_(b"no changes found\n"))
2009 1812 pullop.cgresult = 0
2010 1813 else:
2011 1814 if pullop.heads is None and list(pullop.common) == [nullid]:
2012 1815 pullop.repo.ui.status(_(b"requesting all changes\n"))
2013 1816 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
2014 1817 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
2015 1818 if obsolete.commonversion(remoteversions) is not None:
2016 1819 kwargs[b'obsmarkers'] = True
2017 1820 pullop.stepsdone.add(b'obsmarkers')
2018 1821 _pullbundle2extraprepare(pullop, kwargs)
2019 1822
2020 1823 with pullop.remote.commandexecutor() as e:
2021 1824 args = dict(kwargs)
2022 1825 args[b'source'] = b'pull'
2023 1826 bundle = e.callcommand(b'getbundle', args).result()
2024 1827
2025 1828 try:
2026 1829 op = bundle2.bundleoperation(
2027 1830 pullop.repo, pullop.gettransaction, source=b'pull'
2028 1831 )
2029 1832 op.modes[b'bookmarks'] = b'records'
2030 1833 bundle2.processbundle(pullop.repo, bundle, op=op)
2031 1834 except bundle2.AbortFromPart as exc:
2032 1835 pullop.repo.ui.status(_(b'remote: abort: %s\n') % exc)
2033 1836 raise error.Abort(_(b'pull failed on remote'), hint=exc.hint)
2034 1837 except error.BundleValueError as exc:
2035 1838 raise error.Abort(_(b'missing support for %s') % exc)
2036 1839
2037 1840 if pullop.fetch:
2038 1841 pullop.cgresult = bundle2.combinechangegroupresults(op)
2039 1842
2040 1843 # processing phases change
2041 1844 for namespace, value in op.records[b'listkeys']:
2042 1845 if namespace == b'phases':
2043 1846 _pullapplyphases(pullop, value)
2044 1847
2045 1848 # processing bookmark update
2046 1849 if bookmarksrequested:
2047 1850 books = {}
2048 1851 for record in op.records[b'bookmarks']:
2049 1852 books[record[b'bookmark']] = record[b"node"]
2050 1853 pullop.remotebookmarks = books
2051 1854 else:
2052 1855 for namespace, value in op.records[b'listkeys']:
2053 1856 if namespace == b'bookmarks':
2054 1857 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
2055 1858
2056 1859 # bookmark data were either already there or pulled in the bundle
2057 1860 if pullop.remotebookmarks is not None:
2058 1861 _pullbookmarks(pullop)
2059 1862
2060 1863
2061 1864 def _pullbundle2extraprepare(pullop, kwargs):
2062 1865 """hook function so that extensions can extend the getbundle call"""
2063 1866
2064 1867
2065 1868 def _pullchangeset(pullop):
2066 1869 """pull changeset from unbundle into the local repo"""
2067 1870 # We delay the open of the transaction as late as possible so we
2068 1871 # don't open transaction for nothing or you break future useful
2069 1872 # rollback call
2070 1873 if b'changegroup' in pullop.stepsdone:
2071 1874 return
2072 1875 pullop.stepsdone.add(b'changegroup')
2073 1876 if not pullop.fetch:
2074 1877 pullop.repo.ui.status(_(b"no changes found\n"))
2075 1878 pullop.cgresult = 0
2076 1879 return
2077 1880 tr = pullop.gettransaction()
2078 1881 if pullop.heads is None and list(pullop.common) == [nullid]:
2079 1882 pullop.repo.ui.status(_(b"requesting all changes\n"))
2080 1883 elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'):
2081 1884 # issue1320, avoid a race if remote changed after discovery
2082 1885 pullop.heads = pullop.rheads
2083 1886
2084 1887 if pullop.remote.capable(b'getbundle'):
2085 1888 # TODO: get bundlecaps from remote
2086 1889 cg = pullop.remote.getbundle(
2087 1890 b'pull', common=pullop.common, heads=pullop.heads or pullop.rheads
2088 1891 )
2089 1892 elif pullop.heads is None:
2090 1893 with pullop.remote.commandexecutor() as e:
2091 1894 cg = e.callcommand(
2092 1895 b'changegroup', {b'nodes': pullop.fetch, b'source': b'pull',}
2093 1896 ).result()
2094 1897
2095 1898 elif not pullop.remote.capable(b'changegroupsubset'):
2096 1899 raise error.Abort(
2097 1900 _(
2098 1901 b"partial pull cannot be done because "
2099 1902 b"other repository doesn't support "
2100 1903 b"changegroupsubset."
2101 1904 )
2102 1905 )
2103 1906 else:
2104 1907 with pullop.remote.commandexecutor() as e:
2105 1908 cg = e.callcommand(
2106 1909 b'changegroupsubset',
2107 1910 {
2108 1911 b'bases': pullop.fetch,
2109 1912 b'heads': pullop.heads,
2110 1913 b'source': b'pull',
2111 1914 },
2112 1915 ).result()
2113 1916
2114 1917 bundleop = bundle2.applybundle(
2115 1918 pullop.repo, cg, tr, b'pull', pullop.remote.url()
2116 1919 )
2117 1920 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
2118 1921
2119 1922
2120 1923 def _pullphase(pullop):
2121 1924 # Get remote phases data from remote
2122 1925 if b'phases' in pullop.stepsdone:
2123 1926 return
2124 1927 remotephases = listkeys(pullop.remote, b'phases')
2125 1928 _pullapplyphases(pullop, remotephases)
2126 1929
2127 1930
2128 1931 def _pullapplyphases(pullop, remotephases):
2129 1932 """apply phase movement from observed remote state"""
2130 1933 if b'phases' in pullop.stepsdone:
2131 1934 return
2132 1935 pullop.stepsdone.add(b'phases')
2133 1936 publishing = bool(remotephases.get(b'publishing', False))
2134 1937 if remotephases and not publishing:
2135 1938 # remote is new and non-publishing
2136 1939 pheads, _dr = phases.analyzeremotephases(
2137 1940 pullop.repo, pullop.pulledsubset, remotephases
2138 1941 )
2139 1942 dheads = pullop.pulledsubset
2140 1943 else:
2141 1944 # Remote is old or publishing all common changesets
2142 1945 # should be seen as public
2143 1946 pheads = pullop.pulledsubset
2144 1947 dheads = []
2145 1948 unfi = pullop.repo.unfiltered()
2146 1949 phase = unfi._phasecache.phase
2147 1950 rev = unfi.changelog.index.get_rev
2148 1951 public = phases.public
2149 1952 draft = phases.draft
2150 1953
2151 1954 # exclude changesets already public locally and update the others
2152 1955 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
2153 1956 if pheads:
2154 1957 tr = pullop.gettransaction()
2155 1958 phases.advanceboundary(pullop.repo, tr, public, pheads)
2156 1959
2157 1960 # exclude changesets already draft locally and update the others
2158 1961 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
2159 1962 if dheads:
2160 1963 tr = pullop.gettransaction()
2161 1964 phases.advanceboundary(pullop.repo, tr, draft, dheads)
2162 1965
2163 1966
2164 1967 def _pullbookmarks(pullop):
2165 1968 """process the remote bookmark information to update the local one"""
2166 1969 if b'bookmarks' in pullop.stepsdone:
2167 1970 return
2168 1971 pullop.stepsdone.add(b'bookmarks')
2169 1972 repo = pullop.repo
2170 1973 remotebookmarks = pullop.remotebookmarks
2171 1974 bookmod.updatefromremote(
2172 1975 repo.ui,
2173 1976 repo,
2174 1977 remotebookmarks,
2175 1978 pullop.remote.url(),
2176 1979 pullop.gettransaction,
2177 1980 explicit=pullop.explicitbookmarks,
2178 1981 )
2179 1982
2180 1983
2181 1984 def _pullobsolete(pullop):
2182 1985 """utility function to pull obsolete markers from a remote
2183 1986
2184 1987 The `gettransaction` is function that return the pull transaction, creating
2185 1988 one if necessary. We return the transaction to inform the calling code that
2186 1989 a new transaction have been created (when applicable).
2187 1990
2188 1991 Exists mostly to allow overriding for experimentation purpose"""
2189 1992 if b'obsmarkers' in pullop.stepsdone:
2190 1993 return
2191 1994 pullop.stepsdone.add(b'obsmarkers')
2192 1995 tr = None
2193 1996 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
2194 1997 pullop.repo.ui.debug(b'fetching remote obsolete markers\n')
2195 1998 remoteobs = listkeys(pullop.remote, b'obsolete')
2196 1999 if b'dump0' in remoteobs:
2197 2000 tr = pullop.gettransaction()
2198 2001 markers = []
2199 2002 for key in sorted(remoteobs, reverse=True):
2200 2003 if key.startswith(b'dump'):
2201 2004 data = util.b85decode(remoteobs[key])
2202 2005 version, newmarks = obsolete._readmarkers(data)
2203 2006 markers += newmarks
2204 2007 if markers:
2205 2008 pullop.repo.obsstore.add(tr, markers)
2206 2009 pullop.repo.invalidatevolatilesets()
2207 2010 return tr
2208 2011
2209 2012
2210 2013 def applynarrowacl(repo, kwargs):
2211 2014 """Apply narrow fetch access control.
2212 2015
2213 2016 This massages the named arguments for getbundle wire protocol commands
2214 2017 so requested data is filtered through access control rules.
2215 2018 """
2216 2019 ui = repo.ui
2217 2020 # TODO this assumes existence of HTTP and is a layering violation.
2218 2021 username = ui.shortuser(ui.environ.get(b'REMOTE_USER') or ui.username())
2219 2022 user_includes = ui.configlist(
2220 2023 _NARROWACL_SECTION,
2221 2024 username + b'.includes',
2222 2025 ui.configlist(_NARROWACL_SECTION, b'default.includes'),
2223 2026 )
2224 2027 user_excludes = ui.configlist(
2225 2028 _NARROWACL_SECTION,
2226 2029 username + b'.excludes',
2227 2030 ui.configlist(_NARROWACL_SECTION, b'default.excludes'),
2228 2031 )
2229 2032 if not user_includes:
2230 2033 raise error.Abort(
2231 2034 _(b"%s configuration for user %s is empty")
2232 2035 % (_NARROWACL_SECTION, username)
2233 2036 )
2234 2037
2235 2038 user_includes = [
2236 2039 b'path:.' if p == b'*' else b'path:' + p for p in user_includes
2237 2040 ]
2238 2041 user_excludes = [
2239 2042 b'path:.' if p == b'*' else b'path:' + p for p in user_excludes
2240 2043 ]
2241 2044
2242 2045 req_includes = set(kwargs.get('includepats', []))
2243 2046 req_excludes = set(kwargs.get('excludepats', []))
2244 2047
2245 2048 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
2246 2049 req_includes, req_excludes, user_includes, user_excludes
2247 2050 )
2248 2051
2249 2052 if invalid_includes:
2250 2053 raise error.Abort(
2251 2054 _(b"The following includes are not accessible for %s: %s")
2252 2055 % (username, stringutil.pprint(invalid_includes))
2253 2056 )
2254 2057
2255 2058 new_args = {}
2256 2059 new_args.update(kwargs)
2257 2060 new_args['narrow'] = True
2258 2061 new_args['narrow_acl'] = True
2259 2062 new_args['includepats'] = req_includes
2260 2063 if req_excludes:
2261 2064 new_args['excludepats'] = req_excludes
2262 2065
2263 2066 return new_args
2264 2067
2265 2068
2266 2069 def _computeellipsis(repo, common, heads, known, match, depth=None):
2267 2070 """Compute the shape of a narrowed DAG.
2268 2071
2269 2072 Args:
2270 2073 repo: The repository we're transferring.
2271 2074 common: The roots of the DAG range we're transferring.
2272 2075 May be just [nullid], which means all ancestors of heads.
2273 2076 heads: The heads of the DAG range we're transferring.
2274 2077 match: The narrowmatcher that allows us to identify relevant changes.
2275 2078 depth: If not None, only consider nodes to be full nodes if they are at
2276 2079 most depth changesets away from one of heads.
2277 2080
2278 2081 Returns:
2279 2082 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
2280 2083
2281 2084 visitnodes: The list of nodes (either full or ellipsis) which
2282 2085 need to be sent to the client.
2283 2086 relevant_nodes: The set of changelog nodes which change a file inside
2284 2087 the narrowspec. The client needs these as non-ellipsis nodes.
2285 2088 ellipsisroots: A dict of {rev: parents} that is used in
2286 2089 narrowchangegroup to produce ellipsis nodes with the
2287 2090 correct parents.
2288 2091 """
2289 2092 cl = repo.changelog
2290 2093 mfl = repo.manifestlog
2291 2094
2292 2095 clrev = cl.rev
2293 2096
2294 2097 commonrevs = {clrev(n) for n in common} | {nullrev}
2295 2098 headsrevs = {clrev(n) for n in heads}
2296 2099
2297 2100 if depth:
2298 2101 revdepth = {h: 0 for h in headsrevs}
2299 2102
2300 2103 ellipsisheads = collections.defaultdict(set)
2301 2104 ellipsisroots = collections.defaultdict(set)
2302 2105
2303 2106 def addroot(head, curchange):
2304 2107 """Add a root to an ellipsis head, splitting heads with 3 roots."""
2305 2108 ellipsisroots[head].add(curchange)
2306 2109 # Recursively split ellipsis heads with 3 roots by finding the
2307 2110 # roots' youngest common descendant which is an elided merge commit.
2308 2111 # That descendant takes 2 of the 3 roots as its own, and becomes a
2309 2112 # root of the head.
2310 2113 while len(ellipsisroots[head]) > 2:
2311 2114 child, roots = splithead(head)
2312 2115 splitroots(head, child, roots)
2313 2116 head = child # Recurse in case we just added a 3rd root
2314 2117
2315 2118 def splitroots(head, child, roots):
2316 2119 ellipsisroots[head].difference_update(roots)
2317 2120 ellipsisroots[head].add(child)
2318 2121 ellipsisroots[child].update(roots)
2319 2122 ellipsisroots[child].discard(child)
2320 2123
2321 2124 def splithead(head):
2322 2125 r1, r2, r3 = sorted(ellipsisroots[head])
2323 2126 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
2324 2127 mid = repo.revs(
2325 2128 b'sort(merge() & %d::%d & %d::%d, -rev)', nr1, head, nr2, head
2326 2129 )
2327 2130 for j in mid:
2328 2131 if j == nr2:
2329 2132 return nr2, (nr1, nr2)
2330 2133 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
2331 2134 return j, (nr1, nr2)
2332 2135 raise error.Abort(
2333 2136 _(
2334 2137 b'Failed to split up ellipsis node! head: %d, '
2335 2138 b'roots: %d %d %d'
2336 2139 )
2337 2140 % (head, r1, r2, r3)
2338 2141 )
2339 2142
2340 2143 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
2341 2144 visit = reversed(missing)
2342 2145 relevant_nodes = set()
2343 2146 visitnodes = [cl.node(m) for m in missing]
2344 2147 required = set(headsrevs) | known
2345 2148 for rev in visit:
2346 2149 clrev = cl.changelogrevision(rev)
2347 2150 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
2348 2151 if depth is not None:
2349 2152 curdepth = revdepth[rev]
2350 2153 for p in ps:
2351 2154 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
2352 2155 needed = False
2353 2156 shallow_enough = depth is None or revdepth[rev] <= depth
2354 2157 if shallow_enough:
2355 2158 curmf = mfl[clrev.manifest].read()
2356 2159 if ps:
2357 2160 # We choose to not trust the changed files list in
2358 2161 # changesets because it's not always correct. TODO: could
2359 2162 # we trust it for the non-merge case?
2360 2163 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
2361 2164 needed = bool(curmf.diff(p1mf, match))
2362 2165 if not needed and len(ps) > 1:
2363 2166 # For merge changes, the list of changed files is not
2364 2167 # helpful, since we need to emit the merge if a file
2365 2168 # in the narrow spec has changed on either side of the
2366 2169 # merge. As a result, we do a manifest diff to check.
2367 2170 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2368 2171 needed = bool(curmf.diff(p2mf, match))
2369 2172 else:
2370 2173 # For a root node, we need to include the node if any
2371 2174 # files in the node match the narrowspec.
2372 2175 needed = any(curmf.walk(match))
2373 2176
2374 2177 if needed:
2375 2178 for head in ellipsisheads[rev]:
2376 2179 addroot(head, rev)
2377 2180 for p in ps:
2378 2181 required.add(p)
2379 2182 relevant_nodes.add(cl.node(rev))
2380 2183 else:
2381 2184 if not ps:
2382 2185 ps = [nullrev]
2383 2186 if rev in required:
2384 2187 for head in ellipsisheads[rev]:
2385 2188 addroot(head, rev)
2386 2189 for p in ps:
2387 2190 ellipsisheads[p].add(rev)
2388 2191 else:
2389 2192 for p in ps:
2390 2193 ellipsisheads[p] |= ellipsisheads[rev]
2391 2194
2392 2195 # add common changesets as roots of their reachable ellipsis heads
2393 2196 for c in commonrevs:
2394 2197 for head in ellipsisheads[c]:
2395 2198 addroot(head, c)
2396 2199 return visitnodes, relevant_nodes, ellipsisroots
2397 2200
2398 2201
2399 2202 def caps20to10(repo, role):
2400 2203 """return a set with appropriate options to use bundle20 during getbundle"""
2401 2204 caps = {b'HG20'}
2402 2205 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2403 2206 caps.add(b'bundle2=' + urlreq.quote(capsblob))
2404 2207 return caps
2405 2208
2406 2209
2407 2210 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2408 2211 getbundle2partsorder = []
2409 2212
2410 2213 # Mapping between step name and function
2411 2214 #
2412 2215 # This exists to help extensions wrap steps if necessary
2413 2216 getbundle2partsmapping = {}
2414 2217
2415 2218
2416 2219 def getbundle2partsgenerator(stepname, idx=None):
2417 2220 """decorator for function generating bundle2 part for getbundle
2418 2221
2419 2222 The function is added to the step -> function mapping and appended to the
2420 2223 list of steps. Beware that decorated functions will be added in order
2421 2224 (this may matter).
2422 2225
2423 2226 You can only use this decorator for new steps, if you want to wrap a step
2424 2227 from an extension, attack the getbundle2partsmapping dictionary directly."""
2425 2228
2426 2229 def dec(func):
2427 2230 assert stepname not in getbundle2partsmapping
2428 2231 getbundle2partsmapping[stepname] = func
2429 2232 if idx is None:
2430 2233 getbundle2partsorder.append(stepname)
2431 2234 else:
2432 2235 getbundle2partsorder.insert(idx, stepname)
2433 2236 return func
2434 2237
2435 2238 return dec
2436 2239
2437 2240
2438 2241 def bundle2requested(bundlecaps):
2439 2242 if bundlecaps is not None:
2440 2243 return any(cap.startswith(b'HG2') for cap in bundlecaps)
2441 2244 return False
2442 2245
2443 2246
2444 2247 def getbundlechunks(
2445 2248 repo, source, heads=None, common=None, bundlecaps=None, **kwargs
2446 2249 ):
2447 2250 """Return chunks constituting a bundle's raw data.
2448 2251
2449 2252 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2450 2253 passed.
2451 2254
2452 2255 Returns a 2-tuple of a dict with metadata about the generated bundle
2453 2256 and an iterator over raw chunks (of varying sizes).
2454 2257 """
2455 2258 kwargs = pycompat.byteskwargs(kwargs)
2456 2259 info = {}
2457 2260 usebundle2 = bundle2requested(bundlecaps)
2458 2261 # bundle10 case
2459 2262 if not usebundle2:
2460 2263 if bundlecaps and not kwargs.get(b'cg', True):
2461 2264 raise ValueError(
2462 2265 _(b'request for bundle10 must include changegroup')
2463 2266 )
2464 2267
2465 2268 if kwargs:
2466 2269 raise ValueError(
2467 2270 _(b'unsupported getbundle arguments: %s')
2468 2271 % b', '.join(sorted(kwargs.keys()))
2469 2272 )
2470 2273 outgoing = _computeoutgoing(repo, heads, common)
2471 2274 info[b'bundleversion'] = 1
2472 2275 return (
2473 2276 info,
2474 2277 changegroup.makestream(
2475 2278 repo, outgoing, b'01', source, bundlecaps=bundlecaps
2476 2279 ),
2477 2280 )
2478 2281
2479 2282 # bundle20 case
2480 2283 info[b'bundleversion'] = 2
2481 2284 b2caps = {}
2482 2285 for bcaps in bundlecaps:
2483 2286 if bcaps.startswith(b'bundle2='):
2484 2287 blob = urlreq.unquote(bcaps[len(b'bundle2=') :])
2485 2288 b2caps.update(bundle2.decodecaps(blob))
2486 2289 bundler = bundle2.bundle20(repo.ui, b2caps)
2487 2290
2488 2291 kwargs[b'heads'] = heads
2489 2292 kwargs[b'common'] = common
2490 2293
2491 2294 for name in getbundle2partsorder:
2492 2295 func = getbundle2partsmapping[name]
2493 2296 func(
2494 2297 bundler,
2495 2298 repo,
2496 2299 source,
2497 2300 bundlecaps=bundlecaps,
2498 2301 b2caps=b2caps,
2499 2302 **pycompat.strkwargs(kwargs)
2500 2303 )
2501 2304
2502 2305 info[b'prefercompressed'] = bundler.prefercompressed
2503 2306
2504 2307 return info, bundler.getchunks()
2505 2308
2506 2309
2507 2310 @getbundle2partsgenerator(b'stream2')
2508 2311 def _getbundlestream2(bundler, repo, *args, **kwargs):
2509 2312 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2510 2313
2511 2314
2512 2315 @getbundle2partsgenerator(b'changegroup')
2513 2316 def _getbundlechangegrouppart(
2514 2317 bundler,
2515 2318 repo,
2516 2319 source,
2517 2320 bundlecaps=None,
2518 2321 b2caps=None,
2519 2322 heads=None,
2520 2323 common=None,
2521 2324 **kwargs
2522 2325 ):
2523 2326 """add a changegroup part to the requested bundle"""
2524 2327 if not kwargs.get('cg', True) or not b2caps:
2525 2328 return
2526 2329
2527 2330 version = b'01'
2528 2331 cgversions = b2caps.get(b'changegroup')
2529 2332 if cgversions: # 3.1 and 3.2 ship with an empty value
2530 2333 cgversions = [
2531 2334 v
2532 2335 for v in cgversions
2533 2336 if v in changegroup.supportedoutgoingversions(repo)
2534 2337 ]
2535 2338 if not cgversions:
2536 2339 raise error.Abort(_(b'no common changegroup version'))
2537 2340 version = max(cgversions)
2538 2341
2539 2342 outgoing = _computeoutgoing(repo, heads, common)
2540 2343 if not outgoing.missing:
2541 2344 return
2542 2345
2543 2346 if kwargs.get('narrow', False):
2544 2347 include = sorted(filter(bool, kwargs.get('includepats', [])))
2545 2348 exclude = sorted(filter(bool, kwargs.get('excludepats', [])))
2546 2349 matcher = narrowspec.match(repo.root, include=include, exclude=exclude)
2547 2350 else:
2548 2351 matcher = None
2549 2352
2550 2353 cgstream = changegroup.makestream(
2551 2354 repo, outgoing, version, source, bundlecaps=bundlecaps, matcher=matcher
2552 2355 )
2553 2356
2554 2357 part = bundler.newpart(b'changegroup', data=cgstream)
2555 2358 if cgversions:
2556 2359 part.addparam(b'version', version)
2557 2360
2558 2361 part.addparam(b'nbchanges', b'%d' % len(outgoing.missing), mandatory=False)
2559 2362
2560 2363 if scmutil.istreemanifest(repo):
2561 2364 part.addparam(b'treemanifest', b'1')
2562 2365
2563 2366 if b'exp-sidedata-flag' in repo.requirements:
2564 2367 part.addparam(b'exp-sidedata', b'1')
2565 2368
2566 2369 if (
2567 2370 kwargs.get('narrow', False)
2568 2371 and kwargs.get('narrow_acl', False)
2569 2372 and (include or exclude)
2570 2373 ):
2571 2374 # this is mandatory because otherwise ACL clients won't work
2572 2375 narrowspecpart = bundler.newpart(b'Narrow:responsespec')
2573 2376 narrowspecpart.data = b'%s\0%s' % (
2574 2377 b'\n'.join(include),
2575 2378 b'\n'.join(exclude),
2576 2379 )
2577 2380
2578 2381
2579 2382 @getbundle2partsgenerator(b'bookmarks')
2580 2383 def _getbundlebookmarkpart(
2581 2384 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2582 2385 ):
2583 2386 """add a bookmark part to the requested bundle"""
2584 2387 if not kwargs.get('bookmarks', False):
2585 2388 return
2586 2389 if not b2caps or b'bookmarks' not in b2caps:
2587 2390 raise error.Abort(_(b'no common bookmarks exchange method'))
2588 2391 books = bookmod.listbinbookmarks(repo)
2589 2392 data = bookmod.binaryencode(books)
2590 2393 if data:
2591 2394 bundler.newpart(b'bookmarks', data=data)
2592 2395
2593 2396
2594 2397 @getbundle2partsgenerator(b'listkeys')
2595 2398 def _getbundlelistkeysparts(
2596 2399 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2597 2400 ):
2598 2401 """add parts containing listkeys namespaces to the requested bundle"""
2599 2402 listkeys = kwargs.get('listkeys', ())
2600 2403 for namespace in listkeys:
2601 2404 part = bundler.newpart(b'listkeys')
2602 2405 part.addparam(b'namespace', namespace)
2603 2406 keys = repo.listkeys(namespace).items()
2604 2407 part.data = pushkey.encodekeys(keys)
2605 2408
2606 2409
2607 2410 @getbundle2partsgenerator(b'obsmarkers')
2608 2411 def _getbundleobsmarkerpart(
2609 2412 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2610 2413 ):
2611 2414 """add an obsolescence markers part to the requested bundle"""
2612 2415 if kwargs.get('obsmarkers', False):
2613 2416 if heads is None:
2614 2417 heads = repo.heads()
2615 2418 subset = [c.node() for c in repo.set(b'::%ln', heads)]
2616 2419 markers = repo.obsstore.relevantmarkers(subset)
2617 2420 markers = obsutil.sortedmarkers(markers)
2618 2421 bundle2.buildobsmarkerspart(bundler, markers)
2619 2422
2620 2423
2621 2424 @getbundle2partsgenerator(b'phases')
2622 2425 def _getbundlephasespart(
2623 2426 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2624 2427 ):
2625 2428 """add phase heads part to the requested bundle"""
2626 2429 if kwargs.get('phases', False):
2627 2430 if not b2caps or b'heads' not in b2caps.get(b'phases'):
2628 2431 raise error.Abort(_(b'no common phases exchange method'))
2629 2432 if heads is None:
2630 2433 heads = repo.heads()
2631 2434
2632 2435 headsbyphase = collections.defaultdict(set)
2633 2436 if repo.publishing():
2634 2437 headsbyphase[phases.public] = heads
2635 2438 else:
2636 2439 # find the appropriate heads to move
2637 2440
2638 2441 phase = repo._phasecache.phase
2639 2442 node = repo.changelog.node
2640 2443 rev = repo.changelog.rev
2641 2444 for h in heads:
2642 2445 headsbyphase[phase(repo, rev(h))].add(h)
2643 2446 seenphases = list(headsbyphase.keys())
2644 2447
2645 2448 # We do not handle anything but public and draft phase for now)
2646 2449 if seenphases:
2647 2450 assert max(seenphases) <= phases.draft
2648 2451
2649 2452 # if client is pulling non-public changesets, we need to find
2650 2453 # intermediate public heads.
2651 2454 draftheads = headsbyphase.get(phases.draft, set())
2652 2455 if draftheads:
2653 2456 publicheads = headsbyphase.get(phases.public, set())
2654 2457
2655 2458 revset = b'heads(only(%ln, %ln) and public())'
2656 2459 extraheads = repo.revs(revset, draftheads, publicheads)
2657 2460 for r in extraheads:
2658 2461 headsbyphase[phases.public].add(node(r))
2659 2462
2660 2463 # transform data in a format used by the encoding function
2661 2464 phasemapping = {
2662 2465 phase: sorted(headsbyphase[phase]) for phase in phases.allphases
2663 2466 }
2664 2467
2665 2468 # generate the actual part
2666 2469 phasedata = phases.binaryencode(phasemapping)
2667 2470 bundler.newpart(b'phase-heads', data=phasedata)
2668 2471
2669 2472
2670 2473 @getbundle2partsgenerator(b'hgtagsfnodes')
2671 2474 def _getbundletagsfnodes(
2672 2475 bundler,
2673 2476 repo,
2674 2477 source,
2675 2478 bundlecaps=None,
2676 2479 b2caps=None,
2677 2480 heads=None,
2678 2481 common=None,
2679 2482 **kwargs
2680 2483 ):
2681 2484 """Transfer the .hgtags filenodes mapping.
2682 2485
2683 2486 Only values for heads in this bundle will be transferred.
2684 2487
2685 2488 The part data consists of pairs of 20 byte changeset node and .hgtags
2686 2489 filenodes raw values.
2687 2490 """
2688 2491 # Don't send unless:
2689 2492 # - changeset are being exchanged,
2690 2493 # - the client supports it.
2691 2494 if not b2caps or not (kwargs.get('cg', True) and b'hgtagsfnodes' in b2caps):
2692 2495 return
2693 2496
2694 2497 outgoing = _computeoutgoing(repo, heads, common)
2695 2498 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2696 2499
2697 2500
2698 2501 @getbundle2partsgenerator(b'cache:rev-branch-cache')
2699 2502 def _getbundlerevbranchcache(
2700 2503 bundler,
2701 2504 repo,
2702 2505 source,
2703 2506 bundlecaps=None,
2704 2507 b2caps=None,
2705 2508 heads=None,
2706 2509 common=None,
2707 2510 **kwargs
2708 2511 ):
2709 2512 """Transfer the rev-branch-cache mapping
2710 2513
2711 2514 The payload is a series of data related to each branch
2712 2515
2713 2516 1) branch name length
2714 2517 2) number of open heads
2715 2518 3) number of closed heads
2716 2519 4) open heads nodes
2717 2520 5) closed heads nodes
2718 2521 """
2719 2522 # Don't send unless:
2720 2523 # - changeset are being exchanged,
2721 2524 # - the client supports it.
2722 2525 # - narrow bundle isn't in play (not currently compatible).
2723 2526 if (
2724 2527 not kwargs.get('cg', True)
2725 2528 or not b2caps
2726 2529 or b'rev-branch-cache' not in b2caps
2727 2530 or kwargs.get('narrow', False)
2728 2531 or repo.ui.has_section(_NARROWACL_SECTION)
2729 2532 ):
2730 2533 return
2731 2534
2732 2535 outgoing = _computeoutgoing(repo, heads, common)
2733 2536 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2734 2537
2735 2538
2736 2539 def check_heads(repo, their_heads, context):
2737 2540 """check if the heads of a repo have been modified
2738 2541
2739 2542 Used by peer for unbundling.
2740 2543 """
2741 2544 heads = repo.heads()
2742 2545 heads_hash = hashutil.sha1(b''.join(sorted(heads))).digest()
2743 2546 if not (
2744 2547 their_heads == [b'force']
2745 2548 or their_heads == heads
2746 2549 or their_heads == [b'hashed', heads_hash]
2747 2550 ):
2748 2551 # someone else committed/pushed/unbundled while we
2749 2552 # were transferring data
2750 2553 raise error.PushRaced(
2751 2554 b'repository changed while %s - please try again' % context
2752 2555 )
2753 2556
2754 2557
2755 2558 def unbundle(repo, cg, heads, source, url):
2756 2559 """Apply a bundle to a repo.
2757 2560
2758 2561 this function makes sure the repo is locked during the application and have
2759 2562 mechanism to check that no push race occurred between the creation of the
2760 2563 bundle and its application.
2761 2564
2762 2565 If the push was raced as PushRaced exception is raised."""
2763 2566 r = 0
2764 2567 # need a transaction when processing a bundle2 stream
2765 2568 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2766 2569 lockandtr = [None, None, None]
2767 2570 recordout = None
2768 2571 # quick fix for output mismatch with bundle2 in 3.4
2769 2572 captureoutput = repo.ui.configbool(
2770 2573 b'experimental', b'bundle2-output-capture'
2771 2574 )
2772 2575 if url.startswith(b'remote:http:') or url.startswith(b'remote:https:'):
2773 2576 captureoutput = True
2774 2577 try:
2775 2578 # note: outside bundle1, 'heads' is expected to be empty and this
2776 2579 # 'check_heads' call wil be a no-op
2777 2580 check_heads(repo, heads, b'uploading changes')
2778 2581 # push can proceed
2779 2582 if not isinstance(cg, bundle2.unbundle20):
2780 2583 # legacy case: bundle1 (changegroup 01)
2781 2584 txnname = b"\n".join([source, util.hidepassword(url)])
2782 2585 with repo.lock(), repo.transaction(txnname) as tr:
2783 2586 op = bundle2.applybundle(repo, cg, tr, source, url)
2784 2587 r = bundle2.combinechangegroupresults(op)
2785 2588 else:
2786 2589 r = None
2787 2590 try:
2788 2591
2789 2592 def gettransaction():
2790 2593 if not lockandtr[2]:
2791 2594 if not bookmod.bookmarksinstore(repo):
2792 2595 lockandtr[0] = repo.wlock()
2793 2596 lockandtr[1] = repo.lock()
2794 2597 lockandtr[2] = repo.transaction(source)
2795 2598 lockandtr[2].hookargs[b'source'] = source
2796 2599 lockandtr[2].hookargs[b'url'] = url
2797 2600 lockandtr[2].hookargs[b'bundle2'] = b'1'
2798 2601 return lockandtr[2]
2799 2602
2800 2603 # Do greedy locking by default until we're satisfied with lazy
2801 2604 # locking.
2802 2605 if not repo.ui.configbool(
2803 2606 b'experimental', b'bundle2lazylocking'
2804 2607 ):
2805 2608 gettransaction()
2806 2609
2807 2610 op = bundle2.bundleoperation(
2808 2611 repo,
2809 2612 gettransaction,
2810 2613 captureoutput=captureoutput,
2811 2614 source=b'push',
2812 2615 )
2813 2616 try:
2814 2617 op = bundle2.processbundle(repo, cg, op=op)
2815 2618 finally:
2816 2619 r = op.reply
2817 2620 if captureoutput and r is not None:
2818 2621 repo.ui.pushbuffer(error=True, subproc=True)
2819 2622
2820 2623 def recordout(output):
2821 2624 r.newpart(b'output', data=output, mandatory=False)
2822 2625
2823 2626 if lockandtr[2] is not None:
2824 2627 lockandtr[2].close()
2825 2628 except BaseException as exc:
2826 2629 exc.duringunbundle2 = True
2827 2630 if captureoutput and r is not None:
2828 2631 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2829 2632
2830 2633 def recordout(output):
2831 2634 part = bundle2.bundlepart(
2832 2635 b'output', data=output, mandatory=False
2833 2636 )
2834 2637 parts.append(part)
2835 2638
2836 2639 raise
2837 2640 finally:
2838 2641 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2839 2642 if recordout is not None:
2840 2643 recordout(repo.ui.popbuffer())
2841 2644 return r
2842 2645
2843 2646
2844 2647 def _maybeapplyclonebundle(pullop):
2845 2648 """Apply a clone bundle from a remote, if possible."""
2846 2649
2847 2650 repo = pullop.repo
2848 2651 remote = pullop.remote
2849 2652
2850 2653 if not repo.ui.configbool(b'ui', b'clonebundles'):
2851 2654 return
2852 2655
2853 2656 # Only run if local repo is empty.
2854 2657 if len(repo):
2855 2658 return
2856 2659
2857 2660 if pullop.heads:
2858 2661 return
2859 2662
2860 2663 if not remote.capable(b'clonebundles'):
2861 2664 return
2862 2665
2863 2666 with remote.commandexecutor() as e:
2864 2667 res = e.callcommand(b'clonebundles', {}).result()
2865 2668
2866 2669 # If we call the wire protocol command, that's good enough to record the
2867 2670 # attempt.
2868 2671 pullop.clonebundleattempted = True
2869 2672
2870 entries = parseclonebundlesmanifest(repo, res)
2673 entries = bundlecaches.parseclonebundlesmanifest(repo, res)
2871 2674 if not entries:
2872 2675 repo.ui.note(
2873 2676 _(
2874 2677 b'no clone bundles available on remote; '
2875 2678 b'falling back to regular clone\n'
2876 2679 )
2877 2680 )
2878 2681 return
2879 2682
2880 entries = filterclonebundleentries(
2683 entries = bundlecaches.filterclonebundleentries(
2881 2684 repo, entries, streamclonerequested=pullop.streamclonerequested
2882 2685 )
2883 2686
2884 2687 if not entries:
2885 2688 # There is a thundering herd concern here. However, if a server
2886 2689 # operator doesn't advertise bundles appropriate for its clients,
2887 2690 # they deserve what's coming. Furthermore, from a client's
2888 2691 # perspective, no automatic fallback would mean not being able to
2889 2692 # clone!
2890 2693 repo.ui.warn(
2891 2694 _(
2892 2695 b'no compatible clone bundles available on server; '
2893 2696 b'falling back to regular clone\n'
2894 2697 )
2895 2698 )
2896 2699 repo.ui.warn(
2897 2700 _(b'(you may want to report this to the server operator)\n')
2898 2701 )
2899 2702 return
2900 2703
2901 entries = sortclonebundleentries(repo.ui, entries)
2704 entries = bundlecaches.sortclonebundleentries(repo.ui, entries)
2902 2705
2903 2706 url = entries[0][b'URL']
2904 2707 repo.ui.status(_(b'applying clone bundle from %s\n') % url)
2905 2708 if trypullbundlefromurl(repo.ui, repo, url):
2906 2709 repo.ui.status(_(b'finished applying clone bundle\n'))
2907 2710 # Bundle failed.
2908 2711 #
2909 2712 # We abort by default to avoid the thundering herd of
2910 2713 # clients flooding a server that was expecting expensive
2911 2714 # clone load to be offloaded.
2912 2715 elif repo.ui.configbool(b'ui', b'clonebundlefallback'):
2913 2716 repo.ui.warn(_(b'falling back to normal clone\n'))
2914 2717 else:
2915 2718 raise error.Abort(
2916 2719 _(b'error applying bundle'),
2917 2720 hint=_(
2918 2721 b'if this error persists, consider contacting '
2919 2722 b'the server operator or disable clone '
2920 2723 b'bundles via '
2921 2724 b'"--config ui.clonebundles=false"'
2922 2725 ),
2923 2726 )
2924 2727
2925 2728
2926 def parseclonebundlesmanifest(repo, s):
2927 """Parses the raw text of a clone bundles manifest.
2928
2929 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2930 to the URL and other keys are the attributes for the entry.
2931 """
2932 m = []
2933 for line in s.splitlines():
2934 fields = line.split()
2935 if not fields:
2936 continue
2937 attrs = {b'URL': fields[0]}
2938 for rawattr in fields[1:]:
2939 key, value = rawattr.split(b'=', 1)
2940 key = urlreq.unquote(key)
2941 value = urlreq.unquote(value)
2942 attrs[key] = value
2943
2944 # Parse BUNDLESPEC into components. This makes client-side
2945 # preferences easier to specify since you can prefer a single
2946 # component of the BUNDLESPEC.
2947 if key == b'BUNDLESPEC':
2948 try:
2949 bundlespec = parsebundlespec(repo, value)
2950 attrs[b'COMPRESSION'] = bundlespec.compression
2951 attrs[b'VERSION'] = bundlespec.version
2952 except error.InvalidBundleSpecification:
2953 pass
2954 except error.UnsupportedBundleSpecification:
2955 pass
2956
2957 m.append(attrs)
2958
2959 return m
2960
2961
2962 def isstreamclonespec(bundlespec):
2963 # Stream clone v1
2964 if bundlespec.wirecompression == b'UN' and bundlespec.wireversion == b's1':
2965 return True
2966
2967 # Stream clone v2
2968 if (
2969 bundlespec.wirecompression == b'UN'
2970 and bundlespec.wireversion == b'02'
2971 and bundlespec.contentopts.get(b'streamv2')
2972 ):
2973 return True
2974
2975 return False
2976
2977
2978 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2979 """Remove incompatible clone bundle manifest entries.
2980
2981 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2982 and returns a new list consisting of only the entries that this client
2983 should be able to apply.
2984
2985 There is no guarantee we'll be able to apply all returned entries because
2986 the metadata we use to filter on may be missing or wrong.
2987 """
2988 newentries = []
2989 for entry in entries:
2990 spec = entry.get(b'BUNDLESPEC')
2991 if spec:
2992 try:
2993 bundlespec = parsebundlespec(repo, spec, strict=True)
2994
2995 # If a stream clone was requested, filter out non-streamclone
2996 # entries.
2997 if streamclonerequested and not isstreamclonespec(bundlespec):
2998 repo.ui.debug(
2999 b'filtering %s because not a stream clone\n'
3000 % entry[b'URL']
3001 )
3002 continue
3003
3004 except error.InvalidBundleSpecification as e:
3005 repo.ui.debug(stringutil.forcebytestr(e) + b'\n')
3006 continue
3007 except error.UnsupportedBundleSpecification as e:
3008 repo.ui.debug(
3009 b'filtering %s because unsupported bundle '
3010 b'spec: %s\n' % (entry[b'URL'], stringutil.forcebytestr(e))
3011 )
3012 continue
3013 # If we don't have a spec and requested a stream clone, we don't know
3014 # what the entry is so don't attempt to apply it.
3015 elif streamclonerequested:
3016 repo.ui.debug(
3017 b'filtering %s because cannot determine if a stream '
3018 b'clone bundle\n' % entry[b'URL']
3019 )
3020 continue
3021
3022 if b'REQUIRESNI' in entry and not sslutil.hassni:
3023 repo.ui.debug(
3024 b'filtering %s because SNI not supported\n' % entry[b'URL']
3025 )
3026 continue
3027
3028 if b'REQUIREDRAM' in entry:
3029 try:
3030 requiredram = util.sizetoint(entry[b'REQUIREDRAM'])
3031 except error.ParseError:
3032 repo.ui.debug(
3033 b'filtering %s due to a bad REQUIREDRAM attribute\n'
3034 % entry[b'URL']
3035 )
3036 continue
3037 actualram = repo.ui.estimatememory()
3038 if actualram is not None and actualram * 0.66 < requiredram:
3039 repo.ui.debug(
3040 b'filtering %s as it needs more than 2/3 of system memory\n'
3041 % entry[b'URL']
3042 )
3043 continue
3044
3045 newentries.append(entry)
3046
3047 return newentries
3048
3049
3050 class clonebundleentry(object):
3051 """Represents an item in a clone bundles manifest.
3052
3053 This rich class is needed to support sorting since sorted() in Python 3
3054 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
3055 won't work.
3056 """
3057
3058 def __init__(self, value, prefers):
3059 self.value = value
3060 self.prefers = prefers
3061
3062 def _cmp(self, other):
3063 for prefkey, prefvalue in self.prefers:
3064 avalue = self.value.get(prefkey)
3065 bvalue = other.value.get(prefkey)
3066
3067 # Special case for b missing attribute and a matches exactly.
3068 if avalue is not None and bvalue is None and avalue == prefvalue:
3069 return -1
3070
3071 # Special case for a missing attribute and b matches exactly.
3072 if bvalue is not None and avalue is None and bvalue == prefvalue:
3073 return 1
3074
3075 # We can't compare unless attribute present on both.
3076 if avalue is None or bvalue is None:
3077 continue
3078
3079 # Same values should fall back to next attribute.
3080 if avalue == bvalue:
3081 continue
3082
3083 # Exact matches come first.
3084 if avalue == prefvalue:
3085 return -1
3086 if bvalue == prefvalue:
3087 return 1
3088
3089 # Fall back to next attribute.
3090 continue
3091
3092 # If we got here we couldn't sort by attributes and prefers. Fall
3093 # back to index order.
3094 return 0
3095
3096 def __lt__(self, other):
3097 return self._cmp(other) < 0
3098
3099 def __gt__(self, other):
3100 return self._cmp(other) > 0
3101
3102 def __eq__(self, other):
3103 return self._cmp(other) == 0
3104
3105 def __le__(self, other):
3106 return self._cmp(other) <= 0
3107
3108 def __ge__(self, other):
3109 return self._cmp(other) >= 0
3110
3111 def __ne__(self, other):
3112 return self._cmp(other) != 0
3113
3114
3115 def sortclonebundleentries(ui, entries):
3116 prefers = ui.configlist(b'ui', b'clonebundleprefers')
3117 if not prefers:
3118 return list(entries)
3119
3120 def _split(p):
3121 if b'=' not in p:
3122 hint = _(b"each comma separated item should be key=value pairs")
3123 raise error.Abort(
3124 _(b"invalid ui.clonebundleprefers item: %s") % p, hint=hint
3125 )
3126 return p.split(b'=', 1)
3127
3128 prefers = [_split(p) for p in prefers]
3129
3130 items = sorted(clonebundleentry(v, prefers) for v in entries)
3131 return [i.value for i in items]
3132
3133
3134 2729 def trypullbundlefromurl(ui, repo, url):
3135 2730 """Attempt to apply a bundle from a URL."""
3136 2731 with repo.lock(), repo.transaction(b'bundleurl') as tr:
3137 2732 try:
3138 2733 fh = urlmod.open(ui, url)
3139 2734 cg = readbundle(ui, fh, b'stream')
3140 2735
3141 2736 if isinstance(cg, streamclone.streamcloneapplier):
3142 2737 cg.apply(repo)
3143 2738 else:
3144 2739 bundle2.applybundle(repo, cg, tr, b'clonebundles', url)
3145 2740 return True
3146 2741 except urlerr.httperror as e:
3147 2742 ui.warn(
3148 2743 _(b'HTTP error fetching bundle: %s\n')
3149 2744 % stringutil.forcebytestr(e)
3150 2745 )
3151 2746 except urlerr.urlerror as e:
3152 2747 ui.warn(
3153 2748 _(b'error fetching bundle: %s\n')
3154 2749 % stringutil.forcebytestr(e.reason)
3155 2750 )
3156 2751
3157 2752 return False
@@ -1,748 +1,749 b''
1 1 # wireprotov1server.py - Wire protocol version 1 server functionality
2 2 #
3 3 # Copyright 2005-2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import binascii
11 11 import os
12 12
13 13 from .i18n import _
14 14 from .node import (
15 15 hex,
16 16 nullid,
17 17 )
18 18 from .pycompat import getattr
19 19
20 20 from . import (
21 21 bundle2,
22 bundlecaches,
22 23 changegroup as changegroupmod,
23 24 discovery,
24 25 encoding,
25 26 error,
26 27 exchange,
27 28 pushkey as pushkeymod,
28 29 pycompat,
29 30 streamclone,
30 31 util,
31 32 wireprototypes,
32 33 )
33 34
34 35 from .utils import (
35 36 procutil,
36 37 stringutil,
37 38 )
38 39
39 40 urlerr = util.urlerr
40 41 urlreq = util.urlreq
41 42
42 43 bundle2requiredmain = _(b'incompatible Mercurial client; bundle2 required')
43 44 bundle2requiredhint = _(
44 45 b'see https://www.mercurial-scm.org/wiki/IncompatibleClient'
45 46 )
46 47 bundle2required = b'%s\n(%s)\n' % (bundle2requiredmain, bundle2requiredhint)
47 48
48 49
49 50 def clientcompressionsupport(proto):
50 51 """Returns a list of compression methods supported by the client.
51 52
52 53 Returns a list of the compression methods supported by the client
53 54 according to the protocol capabilities. If no such capability has
54 55 been announced, fallback to the default of zlib and uncompressed.
55 56 """
56 57 for cap in proto.getprotocaps():
57 58 if cap.startswith(b'comp='):
58 59 return cap[5:].split(b',')
59 60 return [b'zlib', b'none']
60 61
61 62
62 63 # wire protocol command can either return a string or one of these classes.
63 64
64 65
65 66 def getdispatchrepo(repo, proto, command):
66 67 """Obtain the repo used for processing wire protocol commands.
67 68
68 69 The intent of this function is to serve as a monkeypatch point for
69 70 extensions that need commands to operate on different repo views under
70 71 specialized circumstances.
71 72 """
72 73 viewconfig = repo.ui.config(b'server', b'view')
73 74 return repo.filtered(viewconfig)
74 75
75 76
76 77 def dispatch(repo, proto, command):
77 78 repo = getdispatchrepo(repo, proto, command)
78 79
79 80 func, spec = commands[command]
80 81 args = proto.getargs(spec)
81 82
82 83 return func(repo, proto, *args)
83 84
84 85
85 86 def options(cmd, keys, others):
86 87 opts = {}
87 88 for k in keys:
88 89 if k in others:
89 90 opts[k] = others[k]
90 91 del others[k]
91 92 if others:
92 93 procutil.stderr.write(
93 94 b"warning: %s ignored unexpected arguments %s\n"
94 95 % (cmd, b",".join(others))
95 96 )
96 97 return opts
97 98
98 99
99 100 def bundle1allowed(repo, action):
100 101 """Whether a bundle1 operation is allowed from the server.
101 102
102 103 Priority is:
103 104
104 105 1. server.bundle1gd.<action> (if generaldelta active)
105 106 2. server.bundle1.<action>
106 107 3. server.bundle1gd (if generaldelta active)
107 108 4. server.bundle1
108 109 """
109 110 ui = repo.ui
110 111 gd = b'generaldelta' in repo.requirements
111 112
112 113 if gd:
113 114 v = ui.configbool(b'server', b'bundle1gd.%s' % action)
114 115 if v is not None:
115 116 return v
116 117
117 118 v = ui.configbool(b'server', b'bundle1.%s' % action)
118 119 if v is not None:
119 120 return v
120 121
121 122 if gd:
122 123 v = ui.configbool(b'server', b'bundle1gd')
123 124 if v is not None:
124 125 return v
125 126
126 127 return ui.configbool(b'server', b'bundle1')
127 128
128 129
129 130 commands = wireprototypes.commanddict()
130 131
131 132
132 133 def wireprotocommand(name, args=None, permission=b'push'):
133 134 """Decorator to declare a wire protocol command.
134 135
135 136 ``name`` is the name of the wire protocol command being provided.
136 137
137 138 ``args`` defines the named arguments accepted by the command. It is
138 139 a space-delimited list of argument names. ``*`` denotes a special value
139 140 that says to accept all named arguments.
140 141
141 142 ``permission`` defines the permission type needed to run this command.
142 143 Can be ``push`` or ``pull``. These roughly map to read-write and read-only,
143 144 respectively. Default is to assume command requires ``push`` permissions
144 145 because otherwise commands not declaring their permissions could modify
145 146 a repository that is supposed to be read-only.
146 147 """
147 148 transports = {
148 149 k for k, v in wireprototypes.TRANSPORTS.items() if v[b'version'] == 1
149 150 }
150 151
151 152 # Because SSHv2 is a mirror of SSHv1, we allow "batch" commands through to
152 153 # SSHv2.
153 154 # TODO undo this hack when SSH is using the unified frame protocol.
154 155 if name == b'batch':
155 156 transports.add(wireprototypes.SSHV2)
156 157
157 158 if permission not in (b'push', b'pull'):
158 159 raise error.ProgrammingError(
159 160 b'invalid wire protocol permission; '
160 161 b'got %s; expected "push" or "pull"' % permission
161 162 )
162 163
163 164 if args is None:
164 165 args = b''
165 166
166 167 if not isinstance(args, bytes):
167 168 raise error.ProgrammingError(
168 169 b'arguments for version 1 commands must be declared as bytes'
169 170 )
170 171
171 172 def register(func):
172 173 if name in commands:
173 174 raise error.ProgrammingError(
174 175 b'%s command already registered for version 1' % name
175 176 )
176 177 commands[name] = wireprototypes.commandentry(
177 178 func, args=args, transports=transports, permission=permission
178 179 )
179 180
180 181 return func
181 182
182 183 return register
183 184
184 185
185 186 # TODO define a more appropriate permissions type to use for this.
186 187 @wireprotocommand(b'batch', b'cmds *', permission=b'pull')
187 188 def batch(repo, proto, cmds, others):
188 189 unescapearg = wireprototypes.unescapebatcharg
189 190 res = []
190 191 for pair in cmds.split(b';'):
191 192 op, args = pair.split(b' ', 1)
192 193 vals = {}
193 194 for a in args.split(b','):
194 195 if a:
195 196 n, v = a.split(b'=')
196 197 vals[unescapearg(n)] = unescapearg(v)
197 198 func, spec = commands[op]
198 199
199 200 # Validate that client has permissions to perform this command.
200 201 perm = commands[op].permission
201 202 assert perm in (b'push', b'pull')
202 203 proto.checkperm(perm)
203 204
204 205 if spec:
205 206 keys = spec.split()
206 207 data = {}
207 208 for k in keys:
208 209 if k == b'*':
209 210 star = {}
210 211 for key in vals.keys():
211 212 if key not in keys:
212 213 star[key] = vals[key]
213 214 data[b'*'] = star
214 215 else:
215 216 data[k] = vals[k]
216 217 result = func(repo, proto, *[data[k] for k in keys])
217 218 else:
218 219 result = func(repo, proto)
219 220 if isinstance(result, wireprototypes.ooberror):
220 221 return result
221 222
222 223 # For now, all batchable commands must return bytesresponse or
223 224 # raw bytes (for backwards compatibility).
224 225 assert isinstance(result, (wireprototypes.bytesresponse, bytes))
225 226 if isinstance(result, wireprototypes.bytesresponse):
226 227 result = result.data
227 228 res.append(wireprototypes.escapebatcharg(result))
228 229
229 230 return wireprototypes.bytesresponse(b';'.join(res))
230 231
231 232
232 233 @wireprotocommand(b'between', b'pairs', permission=b'pull')
233 234 def between(repo, proto, pairs):
234 235 pairs = [wireprototypes.decodelist(p, b'-') for p in pairs.split(b" ")]
235 236 r = []
236 237 for b in repo.between(pairs):
237 238 r.append(wireprototypes.encodelist(b) + b"\n")
238 239
239 240 return wireprototypes.bytesresponse(b''.join(r))
240 241
241 242
242 243 @wireprotocommand(b'branchmap', permission=b'pull')
243 244 def branchmap(repo, proto):
244 245 branchmap = repo.branchmap()
245 246 heads = []
246 247 for branch, nodes in pycompat.iteritems(branchmap):
247 248 branchname = urlreq.quote(encoding.fromlocal(branch))
248 249 branchnodes = wireprototypes.encodelist(nodes)
249 250 heads.append(b'%s %s' % (branchname, branchnodes))
250 251
251 252 return wireprototypes.bytesresponse(b'\n'.join(heads))
252 253
253 254
254 255 @wireprotocommand(b'branches', b'nodes', permission=b'pull')
255 256 def branches(repo, proto, nodes):
256 257 nodes = wireprototypes.decodelist(nodes)
257 258 r = []
258 259 for b in repo.branches(nodes):
259 260 r.append(wireprototypes.encodelist(b) + b"\n")
260 261
261 262 return wireprototypes.bytesresponse(b''.join(r))
262 263
263 264
264 265 @wireprotocommand(b'clonebundles', b'', permission=b'pull')
265 266 def clonebundles(repo, proto):
266 267 """Server command for returning info for available bundles to seed clones.
267 268
268 269 Clients will parse this response and determine what bundle to fetch.
269 270
270 271 Extensions may wrap this command to filter or dynamically emit data
271 272 depending on the request. e.g. you could advertise URLs for the closest
272 273 data center given the client's IP address.
273 274 """
274 275 return wireprototypes.bytesresponse(
275 276 repo.vfs.tryread(b'clonebundles.manifest')
276 277 )
277 278
278 279
279 280 wireprotocaps = [
280 281 b'lookup',
281 282 b'branchmap',
282 283 b'pushkey',
283 284 b'known',
284 285 b'getbundle',
285 286 b'unbundlehash',
286 287 ]
287 288
288 289
289 290 def _capabilities(repo, proto):
290 291 """return a list of capabilities for a repo
291 292
292 293 This function exists to allow extensions to easily wrap capabilities
293 294 computation
294 295
295 296 - returns a lists: easy to alter
296 297 - change done here will be propagated to both `capabilities` and `hello`
297 298 command without any other action needed.
298 299 """
299 300 # copy to prevent modification of the global list
300 301 caps = list(wireprotocaps)
301 302
302 303 # Command of same name as capability isn't exposed to version 1 of
303 304 # transports. So conditionally add it.
304 305 if commands.commandavailable(b'changegroupsubset', proto):
305 306 caps.append(b'changegroupsubset')
306 307
307 308 if streamclone.allowservergeneration(repo):
308 309 if repo.ui.configbool(b'server', b'preferuncompressed'):
309 310 caps.append(b'stream-preferred')
310 311 requiredformats = repo.requirements & repo.supportedformats
311 312 # if our local revlogs are just revlogv1, add 'stream' cap
312 313 if not requiredformats - {b'revlogv1'}:
313 314 caps.append(b'stream')
314 315 # otherwise, add 'streamreqs' detailing our local revlog format
315 316 else:
316 317 caps.append(b'streamreqs=%s' % b','.join(sorted(requiredformats)))
317 318 if repo.ui.configbool(b'experimental', b'bundle2-advertise'):
318 319 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=b'server'))
319 320 caps.append(b'bundle2=' + urlreq.quote(capsblob))
320 321 caps.append(b'unbundle=%s' % b','.join(bundle2.bundlepriority))
321 322
322 323 if repo.ui.configbool(b'experimental', b'narrow'):
323 324 caps.append(wireprototypes.NARROWCAP)
324 325 if repo.ui.configbool(b'experimental', b'narrowservebrokenellipses'):
325 326 caps.append(wireprototypes.ELLIPSESCAP)
326 327
327 328 return proto.addcapabilities(repo, caps)
328 329
329 330
330 331 # If you are writing an extension and consider wrapping this function. Wrap
331 332 # `_capabilities` instead.
332 333 @wireprotocommand(b'capabilities', permission=b'pull')
333 334 def capabilities(repo, proto):
334 335 caps = _capabilities(repo, proto)
335 336 return wireprototypes.bytesresponse(b' '.join(sorted(caps)))
336 337
337 338
338 339 @wireprotocommand(b'changegroup', b'roots', permission=b'pull')
339 340 def changegroup(repo, proto, roots):
340 341 nodes = wireprototypes.decodelist(roots)
341 342 outgoing = discovery.outgoing(
342 343 repo, missingroots=nodes, ancestorsof=repo.heads()
343 344 )
344 345 cg = changegroupmod.makechangegroup(repo, outgoing, b'01', b'serve')
345 346 gen = iter(lambda: cg.read(32768), b'')
346 347 return wireprototypes.streamres(gen=gen)
347 348
348 349
349 350 @wireprotocommand(b'changegroupsubset', b'bases heads', permission=b'pull')
350 351 def changegroupsubset(repo, proto, bases, heads):
351 352 bases = wireprototypes.decodelist(bases)
352 353 heads = wireprototypes.decodelist(heads)
353 354 outgoing = discovery.outgoing(repo, missingroots=bases, ancestorsof=heads)
354 355 cg = changegroupmod.makechangegroup(repo, outgoing, b'01', b'serve')
355 356 gen = iter(lambda: cg.read(32768), b'')
356 357 return wireprototypes.streamres(gen=gen)
357 358
358 359
359 360 @wireprotocommand(b'debugwireargs', b'one two *', permission=b'pull')
360 361 def debugwireargs(repo, proto, one, two, others):
361 362 # only accept optional args from the known set
362 363 opts = options(b'debugwireargs', [b'three', b'four'], others)
363 364 return wireprototypes.bytesresponse(
364 365 repo.debugwireargs(one, two, **pycompat.strkwargs(opts))
365 366 )
366 367
367 368
368 369 def find_pullbundle(repo, proto, opts, clheads, heads, common):
369 370 """Return a file object for the first matching pullbundle.
370 371
371 372 Pullbundles are specified in .hg/pullbundles.manifest similar to
372 373 clonebundles.
373 374 For each entry, the bundle specification is checked for compatibility:
374 375 - Client features vs the BUNDLESPEC.
375 376 - Revisions shared with the clients vs base revisions of the bundle.
376 377 A bundle can be applied only if all its base revisions are known by
377 378 the client.
378 379 - At least one leaf of the bundle's DAG is missing on the client.
379 380 - Every leaf of the bundle's DAG is part of node set the client wants.
380 381 E.g. do not send a bundle of all changes if the client wants only
381 382 one specific branch of many.
382 383 """
383 384
384 385 def decodehexstring(s):
385 386 return {binascii.unhexlify(h) for h in s.split(b';')}
386 387
387 388 manifest = repo.vfs.tryread(b'pullbundles.manifest')
388 389 if not manifest:
389 390 return None
390 res = exchange.parseclonebundlesmanifest(repo, manifest)
391 res = exchange.filterclonebundleentries(repo, res)
391 res = bundlecaches.parseclonebundlesmanifest(repo, manifest)
392 res = bundlecaches.filterclonebundleentries(repo, res)
392 393 if not res:
393 394 return None
394 395 cl = repo.unfiltered().changelog
395 396 heads_anc = cl.ancestors([cl.rev(rev) for rev in heads], inclusive=True)
396 397 common_anc = cl.ancestors([cl.rev(rev) for rev in common], inclusive=True)
397 398 compformats = clientcompressionsupport(proto)
398 399 for entry in res:
399 400 comp = entry.get(b'COMPRESSION')
400 401 altcomp = util.compengines._bundlenames.get(comp)
401 402 if comp and comp not in compformats and altcomp not in compformats:
402 403 continue
403 404 # No test yet for VERSION, since V2 is supported by any client
404 405 # that advertises partial pulls
405 406 if b'heads' in entry:
406 407 try:
407 408 bundle_heads = decodehexstring(entry[b'heads'])
408 409 except TypeError:
409 410 # Bad heads entry
410 411 continue
411 412 if bundle_heads.issubset(common):
412 413 continue # Nothing new
413 414 if all(cl.rev(rev) in common_anc for rev in bundle_heads):
414 415 continue # Still nothing new
415 416 if any(
416 417 cl.rev(rev) not in heads_anc and cl.rev(rev) not in common_anc
417 418 for rev in bundle_heads
418 419 ):
419 420 continue
420 421 if b'bases' in entry:
421 422 try:
422 423 bundle_bases = decodehexstring(entry[b'bases'])
423 424 except TypeError:
424 425 # Bad bases entry
425 426 continue
426 427 if not all(cl.rev(rev) in common_anc for rev in bundle_bases):
427 428 continue
428 429 path = entry[b'URL']
429 430 repo.ui.debug(b'sending pullbundle "%s"\n' % path)
430 431 try:
431 432 return repo.vfs.open(path)
432 433 except IOError:
433 434 repo.ui.debug(b'pullbundle "%s" not accessible\n' % path)
434 435 continue
435 436 return None
436 437
437 438
438 439 @wireprotocommand(b'getbundle', b'*', permission=b'pull')
439 440 def getbundle(repo, proto, others):
440 441 opts = options(
441 442 b'getbundle', wireprototypes.GETBUNDLE_ARGUMENTS.keys(), others
442 443 )
443 444 for k, v in pycompat.iteritems(opts):
444 445 keytype = wireprototypes.GETBUNDLE_ARGUMENTS[k]
445 446 if keytype == b'nodes':
446 447 opts[k] = wireprototypes.decodelist(v)
447 448 elif keytype == b'csv':
448 449 opts[k] = list(v.split(b','))
449 450 elif keytype == b'scsv':
450 451 opts[k] = set(v.split(b','))
451 452 elif keytype == b'boolean':
452 453 # Client should serialize False as '0', which is a non-empty string
453 454 # so it evaluates as a True bool.
454 455 if v == b'0':
455 456 opts[k] = False
456 457 else:
457 458 opts[k] = bool(v)
458 459 elif keytype != b'plain':
459 460 raise KeyError(b'unknown getbundle option type %s' % keytype)
460 461
461 462 if not bundle1allowed(repo, b'pull'):
462 463 if not exchange.bundle2requested(opts.get(b'bundlecaps')):
463 464 if proto.name == b'http-v1':
464 465 return wireprototypes.ooberror(bundle2required)
465 466 raise error.Abort(bundle2requiredmain, hint=bundle2requiredhint)
466 467
467 468 try:
468 469 clheads = set(repo.changelog.heads())
469 470 heads = set(opts.get(b'heads', set()))
470 471 common = set(opts.get(b'common', set()))
471 472 common.discard(nullid)
472 473 if (
473 474 repo.ui.configbool(b'server', b'pullbundle')
474 475 and b'partial-pull' in proto.getprotocaps()
475 476 ):
476 477 # Check if a pre-built bundle covers this request.
477 478 bundle = find_pullbundle(repo, proto, opts, clheads, heads, common)
478 479 if bundle:
479 480 return wireprototypes.streamres(
480 481 gen=util.filechunkiter(bundle), prefer_uncompressed=True
481 482 )
482 483
483 484 if repo.ui.configbool(b'server', b'disablefullbundle'):
484 485 # Check to see if this is a full clone.
485 486 changegroup = opts.get(b'cg', True)
486 487 if changegroup and not common and clheads == heads:
487 488 raise error.Abort(
488 489 _(b'server has pull-based clones disabled'),
489 490 hint=_(b'remove --pull if specified or upgrade Mercurial'),
490 491 )
491 492
492 493 info, chunks = exchange.getbundlechunks(
493 494 repo, b'serve', **pycompat.strkwargs(opts)
494 495 )
495 496 prefercompressed = info.get(b'prefercompressed', True)
496 497 except error.Abort as exc:
497 498 # cleanly forward Abort error to the client
498 499 if not exchange.bundle2requested(opts.get(b'bundlecaps')):
499 500 if proto.name == b'http-v1':
500 501 return wireprototypes.ooberror(exc.message + b'\n')
501 502 raise # cannot do better for bundle1 + ssh
502 503 # bundle2 request expect a bundle2 reply
503 504 bundler = bundle2.bundle20(repo.ui)
504 505 manargs = [(b'message', exc.message)]
505 506 advargs = []
506 507 if exc.hint is not None:
507 508 advargs.append((b'hint', exc.hint))
508 509 bundler.addpart(bundle2.bundlepart(b'error:abort', manargs, advargs))
509 510 chunks = bundler.getchunks()
510 511 prefercompressed = False
511 512
512 513 return wireprototypes.streamres(
513 514 gen=chunks, prefer_uncompressed=not prefercompressed
514 515 )
515 516
516 517
517 518 @wireprotocommand(b'heads', permission=b'pull')
518 519 def heads(repo, proto):
519 520 h = repo.heads()
520 521 return wireprototypes.bytesresponse(wireprototypes.encodelist(h) + b'\n')
521 522
522 523
523 524 @wireprotocommand(b'hello', permission=b'pull')
524 525 def hello(repo, proto):
525 526 """Called as part of SSH handshake to obtain server info.
526 527
527 528 Returns a list of lines describing interesting things about the
528 529 server, in an RFC822-like format.
529 530
530 531 Currently, the only one defined is ``capabilities``, which consists of a
531 532 line of space separated tokens describing server abilities:
532 533
533 534 capabilities: <token0> <token1> <token2>
534 535 """
535 536 caps = capabilities(repo, proto).data
536 537 return wireprototypes.bytesresponse(b'capabilities: %s\n' % caps)
537 538
538 539
539 540 @wireprotocommand(b'listkeys', b'namespace', permission=b'pull')
540 541 def listkeys(repo, proto, namespace):
541 542 d = sorted(repo.listkeys(encoding.tolocal(namespace)).items())
542 543 return wireprototypes.bytesresponse(pushkeymod.encodekeys(d))
543 544
544 545
545 546 @wireprotocommand(b'lookup', b'key', permission=b'pull')
546 547 def lookup(repo, proto, key):
547 548 try:
548 549 k = encoding.tolocal(key)
549 550 n = repo.lookup(k)
550 551 r = hex(n)
551 552 success = 1
552 553 except Exception as inst:
553 554 r = stringutil.forcebytestr(inst)
554 555 success = 0
555 556 return wireprototypes.bytesresponse(b'%d %s\n' % (success, r))
556 557
557 558
558 559 @wireprotocommand(b'known', b'nodes *', permission=b'pull')
559 560 def known(repo, proto, nodes, others):
560 561 v = b''.join(
561 562 b and b'1' or b'0' for b in repo.known(wireprototypes.decodelist(nodes))
562 563 )
563 564 return wireprototypes.bytesresponse(v)
564 565
565 566
566 567 @wireprotocommand(b'protocaps', b'caps', permission=b'pull')
567 568 def protocaps(repo, proto, caps):
568 569 if proto.name == wireprototypes.SSHV1:
569 570 proto._protocaps = set(caps.split(b' '))
570 571 return wireprototypes.bytesresponse(b'OK')
571 572
572 573
573 574 @wireprotocommand(b'pushkey', b'namespace key old new', permission=b'push')
574 575 def pushkey(repo, proto, namespace, key, old, new):
575 576 # compatibility with pre-1.8 clients which were accidentally
576 577 # sending raw binary nodes rather than utf-8-encoded hex
577 578 if len(new) == 20 and stringutil.escapestr(new) != new:
578 579 # looks like it could be a binary node
579 580 try:
580 581 new.decode('utf-8')
581 582 new = encoding.tolocal(new) # but cleanly decodes as UTF-8
582 583 except UnicodeDecodeError:
583 584 pass # binary, leave unmodified
584 585 else:
585 586 new = encoding.tolocal(new) # normal path
586 587
587 588 with proto.mayberedirectstdio() as output:
588 589 r = (
589 590 repo.pushkey(
590 591 encoding.tolocal(namespace),
591 592 encoding.tolocal(key),
592 593 encoding.tolocal(old),
593 594 new,
594 595 )
595 596 or False
596 597 )
597 598
598 599 output = output.getvalue() if output else b''
599 600 return wireprototypes.bytesresponse(b'%d\n%s' % (int(r), output))
600 601
601 602
602 603 @wireprotocommand(b'stream_out', permission=b'pull')
603 604 def stream(repo, proto):
604 605 '''If the server supports streaming clone, it advertises the "stream"
605 606 capability with a value representing the version and flags of the repo
606 607 it is serving. Client checks to see if it understands the format.
607 608 '''
608 609 return wireprototypes.streamreslegacy(streamclone.generatev1wireproto(repo))
609 610
610 611
611 612 @wireprotocommand(b'unbundle', b'heads', permission=b'push')
612 613 def unbundle(repo, proto, heads):
613 614 their_heads = wireprototypes.decodelist(heads)
614 615
615 616 with proto.mayberedirectstdio() as output:
616 617 try:
617 618 exchange.check_heads(repo, their_heads, b'preparing changes')
618 619 cleanup = lambda: None
619 620 try:
620 621 payload = proto.getpayload()
621 622 if repo.ui.configbool(b'server', b'streamunbundle'):
622 623
623 624 def cleanup():
624 625 # Ensure that the full payload is consumed, so
625 626 # that the connection doesn't contain trailing garbage.
626 627 for p in payload:
627 628 pass
628 629
629 630 fp = util.chunkbuffer(payload)
630 631 else:
631 632 # write bundle data to temporary file as it can be big
632 633 fp, tempname = None, None
633 634
634 635 def cleanup():
635 636 if fp:
636 637 fp.close()
637 638 if tempname:
638 639 os.unlink(tempname)
639 640
640 641 fd, tempname = pycompat.mkstemp(prefix=b'hg-unbundle-')
641 642 repo.ui.debug(
642 643 b'redirecting incoming bundle to %s\n' % tempname
643 644 )
644 645 fp = os.fdopen(fd, pycompat.sysstr(b'wb+'))
645 646 for p in payload:
646 647 fp.write(p)
647 648 fp.seek(0)
648 649
649 650 gen = exchange.readbundle(repo.ui, fp, None)
650 651 if isinstance(
651 652 gen, changegroupmod.cg1unpacker
652 653 ) and not bundle1allowed(repo, b'push'):
653 654 if proto.name == b'http-v1':
654 655 # need to special case http because stderr do not get to
655 656 # the http client on failed push so we need to abuse
656 657 # some other error type to make sure the message get to
657 658 # the user.
658 659 return wireprototypes.ooberror(bundle2required)
659 660 raise error.Abort(
660 661 bundle2requiredmain, hint=bundle2requiredhint
661 662 )
662 663
663 664 r = exchange.unbundle(
664 665 repo, gen, their_heads, b'serve', proto.client()
665 666 )
666 667 if util.safehasattr(r, b'addpart'):
667 668 # The return looks streamable, we are in the bundle2 case
668 669 # and should return a stream.
669 670 return wireprototypes.streamreslegacy(gen=r.getchunks())
670 671 return wireprototypes.pushres(
671 672 r, output.getvalue() if output else b''
672 673 )
673 674
674 675 finally:
675 676 cleanup()
676 677
677 678 except (error.BundleValueError, error.Abort, error.PushRaced) as exc:
678 679 # handle non-bundle2 case first
679 680 if not getattr(exc, 'duringunbundle2', False):
680 681 try:
681 682 raise
682 683 except error.Abort as exc:
683 684 # The old code we moved used procutil.stderr directly.
684 685 # We did not change it to minimise code change.
685 686 # This need to be moved to something proper.
686 687 # Feel free to do it.
687 688 procutil.stderr.write(b"abort: %s\n" % exc.message)
688 689 if exc.hint is not None:
689 690 procutil.stderr.write(b"(%s)\n" % exc.hint)
690 691 procutil.stderr.flush()
691 692 return wireprototypes.pushres(
692 693 0, output.getvalue() if output else b''
693 694 )
694 695 except error.PushRaced:
695 696 return wireprototypes.pusherr(
696 697 pycompat.bytestr(exc),
697 698 output.getvalue() if output else b'',
698 699 )
699 700
700 701 bundler = bundle2.bundle20(repo.ui)
701 702 for out in getattr(exc, '_bundle2salvagedoutput', ()):
702 703 bundler.addpart(out)
703 704 try:
704 705 try:
705 706 raise
706 707 except error.PushkeyFailed as exc:
707 708 # check client caps
708 709 remotecaps = getattr(exc, '_replycaps', None)
709 710 if (
710 711 remotecaps is not None
711 712 and b'pushkey' not in remotecaps.get(b'error', ())
712 713 ):
713 714 # no support remote side, fallback to Abort handler.
714 715 raise
715 716 part = bundler.newpart(b'error:pushkey')
716 717 part.addparam(b'in-reply-to', exc.partid)
717 718 if exc.namespace is not None:
718 719 part.addparam(
719 720 b'namespace', exc.namespace, mandatory=False
720 721 )
721 722 if exc.key is not None:
722 723 part.addparam(b'key', exc.key, mandatory=False)
723 724 if exc.new is not None:
724 725 part.addparam(b'new', exc.new, mandatory=False)
725 726 if exc.old is not None:
726 727 part.addparam(b'old', exc.old, mandatory=False)
727 728 if exc.ret is not None:
728 729 part.addparam(b'ret', exc.ret, mandatory=False)
729 730 except error.BundleValueError as exc:
730 731 errpart = bundler.newpart(b'error:unsupportedcontent')
731 732 if exc.parttype is not None:
732 733 errpart.addparam(b'parttype', exc.parttype)
733 734 if exc.params:
734 735 errpart.addparam(b'params', b'\0'.join(exc.params))
735 736 except error.Abort as exc:
736 737 manargs = [(b'message', exc.message)]
737 738 advargs = []
738 739 if exc.hint is not None:
739 740 advargs.append((b'hint', exc.hint))
740 741 bundler.addpart(
741 742 bundle2.bundlepart(b'error:abort', manargs, advargs)
742 743 )
743 744 except error.PushRaced as exc:
744 745 bundler.newpart(
745 746 b'error:pushraced',
746 747 [(b'message', stringutil.forcebytestr(exc))],
747 748 )
748 749 return wireprototypes.streamreslegacy(gen=bundler.getchunks())
@@ -1,149 +1,149 b''
1 1 # coding=UTF-8
2 2
3 3 from __future__ import absolute_import
4 4
5 5 import base64
6 6 import zlib
7 7
8 8 from mercurial import (
9 bundlecaches,
9 10 changegroup,
10 exchange,
11 11 extensions,
12 12 revlog,
13 13 util,
14 14 )
15 15 from mercurial.revlogutils import flagutil
16 16
17 17 # Test only: These flags are defined here only in the context of testing the
18 18 # behavior of the flag processor. The canonical way to add flags is to get in
19 19 # touch with the community and make them known in revlog.
20 20 REVIDX_NOOP = 1 << 3
21 21 REVIDX_BASE64 = 1 << 2
22 22 REVIDX_GZIP = 1 << 1
23 23 REVIDX_FAIL = 1
24 24
25 25
26 26 def validatehash(self, text):
27 27 return True
28 28
29 29
30 30 def bypass(self, text):
31 31 return False
32 32
33 33
34 34 def noopdonothing(self, text, sidedata):
35 35 return (text, True)
36 36
37 37
38 38 def noopdonothingread(self, text):
39 39 return (text, True, {})
40 40
41 41
42 42 def b64encode(self, text, sidedata):
43 43 return (base64.b64encode(text), False)
44 44
45 45
46 46 def b64decode(self, text):
47 47 return (base64.b64decode(text), True, {})
48 48
49 49
50 50 def gzipcompress(self, text, sidedata):
51 51 return (zlib.compress(text), False)
52 52
53 53
54 54 def gzipdecompress(self, text):
55 55 return (zlib.decompress(text), True, {})
56 56
57 57
58 58 def supportedoutgoingversions(orig, repo):
59 59 versions = orig(repo)
60 60 versions.discard(b'01')
61 61 versions.discard(b'02')
62 62 versions.add(b'03')
63 63 return versions
64 64
65 65
66 66 def allsupportedversions(orig, ui):
67 67 versions = orig(ui)
68 68 versions.add(b'03')
69 69 return versions
70 70
71 71
72 72 def makewrappedfile(obj):
73 73 class wrappedfile(obj.__class__):
74 74 def addrevision(
75 75 self,
76 76 text,
77 77 transaction,
78 78 link,
79 79 p1,
80 80 p2,
81 81 cachedelta=None,
82 82 node=None,
83 83 flags=flagutil.REVIDX_DEFAULT_FLAGS,
84 84 ):
85 85 if b'[NOOP]' in text:
86 86 flags |= REVIDX_NOOP
87 87
88 88 if b'[BASE64]' in text:
89 89 flags |= REVIDX_BASE64
90 90
91 91 if b'[GZIP]' in text:
92 92 flags |= REVIDX_GZIP
93 93
94 94 # This addrevision wrapper is meant to add a flag we will not have
95 95 # transforms registered for, ensuring we handle this error case.
96 96 if b'[FAIL]' in text:
97 97 flags |= REVIDX_FAIL
98 98
99 99 return super(wrappedfile, self).addrevision(
100 100 text,
101 101 transaction,
102 102 link,
103 103 p1,
104 104 p2,
105 105 cachedelta=cachedelta,
106 106 node=node,
107 107 flags=flags,
108 108 )
109 109
110 110 obj.__class__ = wrappedfile
111 111
112 112
113 113 def reposetup(ui, repo):
114 114 class wrappingflagprocessorrepo(repo.__class__):
115 115 def file(self, f):
116 116 orig = super(wrappingflagprocessorrepo, self).file(f)
117 117 makewrappedfile(orig)
118 118 return orig
119 119
120 120 repo.__class__ = wrappingflagprocessorrepo
121 121
122 122
123 123 def extsetup(ui):
124 124 # Enable changegroup3 for flags to be sent over the wire
125 125 wrapfunction = extensions.wrapfunction
126 126 wrapfunction(
127 127 changegroup, 'supportedoutgoingversions', supportedoutgoingversions
128 128 )
129 129 wrapfunction(changegroup, 'allsupportedversions', allsupportedversions)
130 130
131 131 # Teach revlog about our test flags
132 132 flags = [REVIDX_NOOP, REVIDX_BASE64, REVIDX_GZIP, REVIDX_FAIL]
133 133 flagutil.REVIDX_KNOWN_FLAGS |= util.bitsfrom(flags)
134 134 revlog.REVIDX_FLAGS_ORDER.extend(flags)
135 135
136 136 # Teach exchange to use changegroup 3
137 for k in exchange._bundlespeccontentopts.keys():
138 exchange._bundlespeccontentopts[k][b"cg.version"] = b"03"
137 for k in bundlecaches._bundlespeccontentopts.keys():
138 bundlecaches._bundlespeccontentopts[k][b"cg.version"] = b"03"
139 139
140 140 # Register flag processors for each extension
141 141 flagutil.addflagprocessor(
142 142 REVIDX_NOOP, (noopdonothingread, noopdonothing, validatehash,)
143 143 )
144 144 flagutil.addflagprocessor(
145 145 REVIDX_BASE64, (b64decode, b64encode, bypass,),
146 146 )
147 147 flagutil.addflagprocessor(
148 148 REVIDX_GZIP, (gzipdecompress, gzipcompress, bypass)
149 149 )
General Comments 0
You need to be logged in to leave comments. Login now