##// END OF EJS Templates
configitems: register the 'format.usegeneraldelta' config
marmoute -
r33243:4d9458e0 default
parent child Browse files
Show More
@@ -1,195 +1,198 b''
1 1 # configitems.py - centralized declaration of configuration option
2 2 #
3 3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import functools
11 11
12 12 from . import (
13 13 error,
14 14 )
15 15
16 16 def loadconfigtable(ui, extname, configtable):
17 17 """update config item known to the ui with the extension ones"""
18 18 for section, items in configtable.items():
19 19 knownitems = ui._knownconfig.setdefault(section, {})
20 20 knownkeys = set(knownitems)
21 21 newkeys = set(items)
22 22 for key in sorted(knownkeys & newkeys):
23 23 msg = "extension '%s' overwrite config item '%s.%s'"
24 24 msg %= (extname, section, key)
25 25 ui.develwarn(msg, config='warn-config')
26 26
27 27 knownitems.update(items)
28 28
29 29 class configitem(object):
30 30 """represent a known config item
31 31
32 32 :section: the official config section where to find this item,
33 33 :name: the official name within the section,
34 34 :default: default value for this item,
35 35 """
36 36
37 37 def __init__(self, section, name, default=None):
38 38 self.section = section
39 39 self.name = name
40 40 self.default = default
41 41
42 42 coreitems = {}
43 43
44 44 def _register(configtable, *args, **kwargs):
45 45 item = configitem(*args, **kwargs)
46 46 section = configtable.setdefault(item.section, {})
47 47 if item.name in section:
48 48 msg = "duplicated config item registration for '%s.%s'"
49 49 raise error.ProgrammingError(msg % (item.section, item.name))
50 50 section[item.name] = item
51 51
52 52 # Registering actual config items
53 53
54 54 def getitemregister(configtable):
55 55 return functools.partial(_register, configtable)
56 56
57 57 coreconfigitem = getitemregister(coreitems)
58 58
59 59 coreconfigitem('auth', 'cookiefile',
60 60 default=None,
61 61 )
62 62 # bookmarks.pushing: internal hack for discovery
63 63 coreconfigitem('bookmarks', 'pushing',
64 64 default=list,
65 65 )
66 66 # bundle.mainreporoot: internal hack for bundlerepo
67 67 coreconfigitem('bundle', 'mainreporoot',
68 68 default='',
69 69 )
70 70 # bundle.reorder: experimental config
71 71 coreconfigitem('bundle', 'reorder',
72 72 default='auto',
73 73 )
74 74 coreconfigitem('color', 'mode',
75 75 default='auto',
76 76 )
77 77 coreconfigitem('devel', 'all-warnings',
78 78 default=False,
79 79 )
80 80 coreconfigitem('devel', 'bundle2.debug',
81 81 default=False,
82 82 )
83 83 coreconfigitem('devel', 'check-locks',
84 84 default=False,
85 85 )
86 86 coreconfigitem('devel', 'check-relroot',
87 87 default=False,
88 88 )
89 89 coreconfigitem('devel', 'disableloaddefaultcerts',
90 90 default=False,
91 91 )
92 92 coreconfigitem('devel', 'legacy.exchange',
93 93 default=list,
94 94 )
95 95 coreconfigitem('devel', 'servercafile',
96 96 default='',
97 97 )
98 98 coreconfigitem('devel', 'serverexactprotocol',
99 99 default='',
100 100 )
101 101 coreconfigitem('devel', 'serverrequirecert',
102 102 default=False,
103 103 )
104 104 coreconfigitem('devel', 'strip-obsmarkers',
105 105 default=True,
106 106 )
107 107 coreconfigitem('format', 'aggressivemergedeltas',
108 108 default=False,
109 109 )
110 110 coreconfigitem('format', 'chunkcachesize',
111 111 default=None,
112 112 )
113 113 coreconfigitem('format', 'dotencode',
114 114 default=True,
115 115 )
116 116 coreconfigitem('format', 'generaldelta',
117 117 default=False,
118 118 )
119 119 coreconfigitem('format', 'manifestcachesize',
120 120 default=None,
121 121 )
122 122 coreconfigitem('format', 'maxchainlen',
123 123 default=None,
124 124 )
125 125 coreconfigitem('format', 'obsstore-version',
126 126 default=None,
127 127 )
128 128 coreconfigitem('format', 'usefncache',
129 129 default=True,
130 130 )
131 coreconfigitem('format', 'usegeneraldelta',
132 default=True,
133 )
131 134 coreconfigitem('hostsecurity', 'ciphers',
132 135 default=None,
133 136 )
134 137 coreconfigitem('hostsecurity', 'disabletls10warning',
135 138 default=False,
136 139 )
137 140 coreconfigitem('patch', 'eol',
138 141 default='strict',
139 142 )
140 143 coreconfigitem('patch', 'fuzz',
141 144 default=2,
142 145 )
143 146 coreconfigitem('server', 'bundle1',
144 147 default=True,
145 148 )
146 149 coreconfigitem('server', 'bundle1gd',
147 150 default=None,
148 151 )
149 152 coreconfigitem('server', 'compressionengines',
150 153 default=list,
151 154 )
152 155 coreconfigitem('server', 'concurrent-push-mode',
153 156 default='strict',
154 157 )
155 158 coreconfigitem('server', 'disablefullbundle',
156 159 default=False,
157 160 )
158 161 coreconfigitem('server', 'maxhttpheaderlen',
159 162 default=1024,
160 163 )
161 164 coreconfigitem('server', 'preferuncompressed',
162 165 default=False,
163 166 )
164 167 coreconfigitem('server', 'uncompressedallowsecret',
165 168 default=False,
166 169 )
167 170 coreconfigitem('server', 'validate',
168 171 default=False,
169 172 )
170 173 coreconfigitem('server', 'zliblevel',
171 174 default=-1,
172 175 )
173 176 coreconfigitem('ui', 'clonebundleprefers',
174 177 default=list,
175 178 )
176 179 coreconfigitem('ui', 'interactive',
177 180 default=None,
178 181 )
179 182 coreconfigitem('ui', 'quiet',
180 183 default=False,
181 184 )
182 185 # Windows defaults to a limit of 512 open files. A buffer of 128
183 186 # should give us enough headway.
184 187 coreconfigitem('worker', 'backgroundclosemaxqueue',
185 188 default=384,
186 189 )
187 190 coreconfigitem('worker', 'backgroundcloseminfilecount',
188 191 default=2048,
189 192 )
190 193 coreconfigitem('worker', 'backgroundclosethreadcount',
191 194 default=4,
192 195 )
193 196 coreconfigitem('worker', 'numcpus',
194 197 default=None,
195 198 )
@@ -1,1061 +1,1061 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import glob
12 12 import hashlib
13 13 import os
14 14 import re
15 15 import socket
16 16
17 17 from .i18n import _
18 18 from .node import (
19 19 hex,
20 20 nullid,
21 21 wdirid,
22 22 wdirrev,
23 23 )
24 24
25 25 from . import (
26 26 encoding,
27 27 error,
28 28 match as matchmod,
29 29 obsolete,
30 30 pathutil,
31 31 phases,
32 32 pycompat,
33 33 revsetlang,
34 34 similar,
35 35 util,
36 36 )
37 37
38 38 if pycompat.osname == 'nt':
39 39 from . import scmwindows as scmplatform
40 40 else:
41 41 from . import scmposix as scmplatform
42 42
43 43 termsize = scmplatform.termsize
44 44
45 45 class status(tuple):
46 46 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
47 47 and 'ignored' properties are only relevant to the working copy.
48 48 '''
49 49
50 50 __slots__ = ()
51 51
52 52 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
53 53 clean):
54 54 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
55 55 ignored, clean))
56 56
57 57 @property
58 58 def modified(self):
59 59 '''files that have been modified'''
60 60 return self[0]
61 61
62 62 @property
63 63 def added(self):
64 64 '''files that have been added'''
65 65 return self[1]
66 66
67 67 @property
68 68 def removed(self):
69 69 '''files that have been removed'''
70 70 return self[2]
71 71
72 72 @property
73 73 def deleted(self):
74 74 '''files that are in the dirstate, but have been deleted from the
75 75 working copy (aka "missing")
76 76 '''
77 77 return self[3]
78 78
79 79 @property
80 80 def unknown(self):
81 81 '''files not in the dirstate that are not ignored'''
82 82 return self[4]
83 83
84 84 @property
85 85 def ignored(self):
86 86 '''files not in the dirstate that are ignored (by _dirignore())'''
87 87 return self[5]
88 88
89 89 @property
90 90 def clean(self):
91 91 '''files that have not been modified'''
92 92 return self[6]
93 93
94 94 def __repr__(self, *args, **kwargs):
95 95 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
96 96 'unknown=%r, ignored=%r, clean=%r>') % self)
97 97
98 98 def itersubrepos(ctx1, ctx2):
99 99 """find subrepos in ctx1 or ctx2"""
100 100 # Create a (subpath, ctx) mapping where we prefer subpaths from
101 101 # ctx1. The subpaths from ctx2 are important when the .hgsub file
102 102 # has been modified (in ctx2) but not yet committed (in ctx1).
103 103 subpaths = dict.fromkeys(ctx2.substate, ctx2)
104 104 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
105 105
106 106 missing = set()
107 107
108 108 for subpath in ctx2.substate:
109 109 if subpath not in ctx1.substate:
110 110 del subpaths[subpath]
111 111 missing.add(subpath)
112 112
113 113 for subpath, ctx in sorted(subpaths.iteritems()):
114 114 yield subpath, ctx.sub(subpath)
115 115
116 116 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
117 117 # status and diff will have an accurate result when it does
118 118 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
119 119 # against itself.
120 120 for subpath in missing:
121 121 yield subpath, ctx2.nullsub(subpath, ctx1)
122 122
123 123 def nochangesfound(ui, repo, excluded=None):
124 124 '''Report no changes for push/pull, excluded is None or a list of
125 125 nodes excluded from the push/pull.
126 126 '''
127 127 secretlist = []
128 128 if excluded:
129 129 for n in excluded:
130 130 ctx = repo[n]
131 131 if ctx.phase() >= phases.secret and not ctx.extinct():
132 132 secretlist.append(n)
133 133
134 134 if secretlist:
135 135 ui.status(_("no changes found (ignored %d secret changesets)\n")
136 136 % len(secretlist))
137 137 else:
138 138 ui.status(_("no changes found\n"))
139 139
140 140 def callcatch(ui, func):
141 141 """call func() with global exception handling
142 142
143 143 return func() if no exception happens. otherwise do some error handling
144 144 and return an exit code accordingly. does not handle all exceptions.
145 145 """
146 146 try:
147 147 try:
148 148 return func()
149 149 except: # re-raises
150 150 ui.traceback()
151 151 raise
152 152 # Global exception handling, alphabetically
153 153 # Mercurial-specific first, followed by built-in and library exceptions
154 154 except error.LockHeld as inst:
155 155 if inst.errno == errno.ETIMEDOUT:
156 156 reason = _('timed out waiting for lock held by %r') % inst.locker
157 157 else:
158 158 reason = _('lock held by %r') % inst.locker
159 159 ui.warn(_("abort: %s: %s\n") % (inst.desc or inst.filename, reason))
160 160 if not inst.locker:
161 161 ui.warn(_("(lock might be very busy)\n"))
162 162 except error.LockUnavailable as inst:
163 163 ui.warn(_("abort: could not lock %s: %s\n") %
164 164 (inst.desc or inst.filename, inst.strerror))
165 165 except error.OutOfBandError as inst:
166 166 if inst.args:
167 167 msg = _("abort: remote error:\n")
168 168 else:
169 169 msg = _("abort: remote error\n")
170 170 ui.warn(msg)
171 171 if inst.args:
172 172 ui.warn(''.join(inst.args))
173 173 if inst.hint:
174 174 ui.warn('(%s)\n' % inst.hint)
175 175 except error.RepoError as inst:
176 176 ui.warn(_("abort: %s!\n") % inst)
177 177 if inst.hint:
178 178 ui.warn(_("(%s)\n") % inst.hint)
179 179 except error.ResponseError as inst:
180 180 ui.warn(_("abort: %s") % inst.args[0])
181 181 if not isinstance(inst.args[1], basestring):
182 182 ui.warn(" %r\n" % (inst.args[1],))
183 183 elif not inst.args[1]:
184 184 ui.warn(_(" empty string\n"))
185 185 else:
186 186 ui.warn("\n%r\n" % util.ellipsis(inst.args[1]))
187 187 except error.CensoredNodeError as inst:
188 188 ui.warn(_("abort: file censored %s!\n") % inst)
189 189 except error.RevlogError as inst:
190 190 ui.warn(_("abort: %s!\n") % inst)
191 191 except error.InterventionRequired as inst:
192 192 ui.warn("%s\n" % inst)
193 193 if inst.hint:
194 194 ui.warn(_("(%s)\n") % inst.hint)
195 195 return 1
196 196 except error.WdirUnsupported:
197 197 ui.warn(_("abort: working directory revision cannot be specified\n"))
198 198 except error.Abort as inst:
199 199 ui.warn(_("abort: %s\n") % inst)
200 200 if inst.hint:
201 201 ui.warn(_("(%s)\n") % inst.hint)
202 202 except ImportError as inst:
203 203 ui.warn(_("abort: %s!\n") % inst)
204 204 m = str(inst).split()[-1]
205 205 if m in "mpatch bdiff".split():
206 206 ui.warn(_("(did you forget to compile extensions?)\n"))
207 207 elif m in "zlib".split():
208 208 ui.warn(_("(is your Python install correct?)\n"))
209 209 except IOError as inst:
210 210 if util.safehasattr(inst, "code"):
211 211 ui.warn(_("abort: %s\n") % inst)
212 212 elif util.safehasattr(inst, "reason"):
213 213 try: # usually it is in the form (errno, strerror)
214 214 reason = inst.reason.args[1]
215 215 except (AttributeError, IndexError):
216 216 # it might be anything, for example a string
217 217 reason = inst.reason
218 218 if isinstance(reason, unicode):
219 219 # SSLError of Python 2.7.9 contains a unicode
220 220 reason = encoding.unitolocal(reason)
221 221 ui.warn(_("abort: error: %s\n") % reason)
222 222 elif (util.safehasattr(inst, "args")
223 223 and inst.args and inst.args[0] == errno.EPIPE):
224 224 pass
225 225 elif getattr(inst, "strerror", None):
226 226 if getattr(inst, "filename", None):
227 227 ui.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename))
228 228 else:
229 229 ui.warn(_("abort: %s\n") % inst.strerror)
230 230 else:
231 231 raise
232 232 except OSError as inst:
233 233 if getattr(inst, "filename", None) is not None:
234 234 ui.warn(_("abort: %s: '%s'\n") % (inst.strerror, inst.filename))
235 235 else:
236 236 ui.warn(_("abort: %s\n") % inst.strerror)
237 237 except MemoryError:
238 238 ui.warn(_("abort: out of memory\n"))
239 239 except SystemExit as inst:
240 240 # Commands shouldn't sys.exit directly, but give a return code.
241 241 # Just in case catch this and and pass exit code to caller.
242 242 return inst.code
243 243 except socket.error as inst:
244 244 ui.warn(_("abort: %s\n") % inst.args[-1])
245 245
246 246 return -1
247 247
248 248 def checknewlabel(repo, lbl, kind):
249 249 # Do not use the "kind" parameter in ui output.
250 250 # It makes strings difficult to translate.
251 251 if lbl in ['tip', '.', 'null']:
252 252 raise error.Abort(_("the name '%s' is reserved") % lbl)
253 253 for c in (':', '\0', '\n', '\r'):
254 254 if c in lbl:
255 255 raise error.Abort(_("%r cannot be used in a name") % c)
256 256 try:
257 257 int(lbl)
258 258 raise error.Abort(_("cannot use an integer as a name"))
259 259 except ValueError:
260 260 pass
261 261
262 262 def checkfilename(f):
263 263 '''Check that the filename f is an acceptable filename for a tracked file'''
264 264 if '\r' in f or '\n' in f:
265 265 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
266 266
267 267 def checkportable(ui, f):
268 268 '''Check if filename f is portable and warn or abort depending on config'''
269 269 checkfilename(f)
270 270 abort, warn = checkportabilityalert(ui)
271 271 if abort or warn:
272 272 msg = util.checkwinfilename(f)
273 273 if msg:
274 274 msg = "%s: %r" % (msg, f)
275 275 if abort:
276 276 raise error.Abort(msg)
277 277 ui.warn(_("warning: %s\n") % msg)
278 278
279 279 def checkportabilityalert(ui):
280 280 '''check if the user's config requests nothing, a warning, or abort for
281 281 non-portable filenames'''
282 282 val = ui.config('ui', 'portablefilenames', 'warn')
283 283 lval = val.lower()
284 284 bval = util.parsebool(val)
285 285 abort = pycompat.osname == 'nt' or lval == 'abort'
286 286 warn = bval or lval == 'warn'
287 287 if bval is None and not (warn or abort or lval == 'ignore'):
288 288 raise error.ConfigError(
289 289 _("ui.portablefilenames value is invalid ('%s')") % val)
290 290 return abort, warn
291 291
292 292 class casecollisionauditor(object):
293 293 def __init__(self, ui, abort, dirstate):
294 294 self._ui = ui
295 295 self._abort = abort
296 296 allfiles = '\0'.join(dirstate._map)
297 297 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
298 298 self._dirstate = dirstate
299 299 # The purpose of _newfiles is so that we don't complain about
300 300 # case collisions if someone were to call this object with the
301 301 # same filename twice.
302 302 self._newfiles = set()
303 303
304 304 def __call__(self, f):
305 305 if f in self._newfiles:
306 306 return
307 307 fl = encoding.lower(f)
308 308 if fl in self._loweredfiles and f not in self._dirstate:
309 309 msg = _('possible case-folding collision for %s') % f
310 310 if self._abort:
311 311 raise error.Abort(msg)
312 312 self._ui.warn(_("warning: %s\n") % msg)
313 313 self._loweredfiles.add(fl)
314 314 self._newfiles.add(f)
315 315
316 316 def filteredhash(repo, maxrev):
317 317 """build hash of filtered revisions in the current repoview.
318 318
319 319 Multiple caches perform up-to-date validation by checking that the
320 320 tiprev and tipnode stored in the cache file match the current repository.
321 321 However, this is not sufficient for validating repoviews because the set
322 322 of revisions in the view may change without the repository tiprev and
323 323 tipnode changing.
324 324
325 325 This function hashes all the revs filtered from the view and returns
326 326 that SHA-1 digest.
327 327 """
328 328 cl = repo.changelog
329 329 if not cl.filteredrevs:
330 330 return None
331 331 key = None
332 332 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
333 333 if revs:
334 334 s = hashlib.sha1()
335 335 for rev in revs:
336 336 s.update('%d;' % rev)
337 337 key = s.digest()
338 338 return key
339 339
340 340 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
341 341 '''yield every hg repository under path, always recursively.
342 342 The recurse flag will only control recursion into repo working dirs'''
343 343 def errhandler(err):
344 344 if err.filename == path:
345 345 raise err
346 346 samestat = getattr(os.path, 'samestat', None)
347 347 if followsym and samestat is not None:
348 348 def adddir(dirlst, dirname):
349 349 match = False
350 350 dirstat = os.stat(dirname)
351 351 for lstdirstat in dirlst:
352 352 if samestat(dirstat, lstdirstat):
353 353 match = True
354 354 break
355 355 if not match:
356 356 dirlst.append(dirstat)
357 357 return not match
358 358 else:
359 359 followsym = False
360 360
361 361 if (seen_dirs is None) and followsym:
362 362 seen_dirs = []
363 363 adddir(seen_dirs, path)
364 364 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
365 365 dirs.sort()
366 366 if '.hg' in dirs:
367 367 yield root # found a repository
368 368 qroot = os.path.join(root, '.hg', 'patches')
369 369 if os.path.isdir(os.path.join(qroot, '.hg')):
370 370 yield qroot # we have a patch queue repo here
371 371 if recurse:
372 372 # avoid recursing inside the .hg directory
373 373 dirs.remove('.hg')
374 374 else:
375 375 dirs[:] = [] # don't descend further
376 376 elif followsym:
377 377 newdirs = []
378 378 for d in dirs:
379 379 fname = os.path.join(root, d)
380 380 if adddir(seen_dirs, fname):
381 381 if os.path.islink(fname):
382 382 for hgname in walkrepos(fname, True, seen_dirs):
383 383 yield hgname
384 384 else:
385 385 newdirs.append(d)
386 386 dirs[:] = newdirs
387 387
388 388 def binnode(ctx):
389 389 """Return binary node id for a given basectx"""
390 390 node = ctx.node()
391 391 if node is None:
392 392 return wdirid
393 393 return node
394 394
395 395 def intrev(ctx):
396 396 """Return integer for a given basectx that can be used in comparison or
397 397 arithmetic operation"""
398 398 rev = ctx.rev()
399 399 if rev is None:
400 400 return wdirrev
401 401 return rev
402 402
403 403 def revsingle(repo, revspec, default='.'):
404 404 if not revspec and revspec != 0:
405 405 return repo[default]
406 406
407 407 l = revrange(repo, [revspec])
408 408 if not l:
409 409 raise error.Abort(_('empty revision set'))
410 410 return repo[l.last()]
411 411
412 412 def _pairspec(revspec):
413 413 tree = revsetlang.parse(revspec)
414 414 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
415 415
416 416 def revpair(repo, revs):
417 417 if not revs:
418 418 return repo.dirstate.p1(), None
419 419
420 420 l = revrange(repo, revs)
421 421
422 422 if not l:
423 423 first = second = None
424 424 elif l.isascending():
425 425 first = l.min()
426 426 second = l.max()
427 427 elif l.isdescending():
428 428 first = l.max()
429 429 second = l.min()
430 430 else:
431 431 first = l.first()
432 432 second = l.last()
433 433
434 434 if first is None:
435 435 raise error.Abort(_('empty revision range'))
436 436 if (first == second and len(revs) >= 2
437 437 and not all(revrange(repo, [r]) for r in revs)):
438 438 raise error.Abort(_('empty revision on one side of range'))
439 439
440 440 # if top-level is range expression, the result must always be a pair
441 441 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
442 442 return repo.lookup(first), None
443 443
444 444 return repo.lookup(first), repo.lookup(second)
445 445
446 446 def revrange(repo, specs):
447 447 """Execute 1 to many revsets and return the union.
448 448
449 449 This is the preferred mechanism for executing revsets using user-specified
450 450 config options, such as revset aliases.
451 451
452 452 The revsets specified by ``specs`` will be executed via a chained ``OR``
453 453 expression. If ``specs`` is empty, an empty result is returned.
454 454
455 455 ``specs`` can contain integers, in which case they are assumed to be
456 456 revision numbers.
457 457
458 458 It is assumed the revsets are already formatted. If you have arguments
459 459 that need to be expanded in the revset, call ``revsetlang.formatspec()``
460 460 and pass the result as an element of ``specs``.
461 461
462 462 Specifying a single revset is allowed.
463 463
464 464 Returns a ``revset.abstractsmartset`` which is a list-like interface over
465 465 integer revisions.
466 466 """
467 467 allspecs = []
468 468 for spec in specs:
469 469 if isinstance(spec, int):
470 470 spec = revsetlang.formatspec('rev(%d)', spec)
471 471 allspecs.append(spec)
472 472 return repo.anyrevs(allspecs, user=True)
473 473
474 474 def meaningfulparents(repo, ctx):
475 475 """Return list of meaningful (or all if debug) parentrevs for rev.
476 476
477 477 For merges (two non-nullrev revisions) both parents are meaningful.
478 478 Otherwise the first parent revision is considered meaningful if it
479 479 is not the preceding revision.
480 480 """
481 481 parents = ctx.parents()
482 482 if len(parents) > 1:
483 483 return parents
484 484 if repo.ui.debugflag:
485 485 return [parents[0], repo['null']]
486 486 if parents[0].rev() >= intrev(ctx) - 1:
487 487 return []
488 488 return parents
489 489
490 490 def expandpats(pats):
491 491 '''Expand bare globs when running on windows.
492 492 On posix we assume it already has already been done by sh.'''
493 493 if not util.expandglobs:
494 494 return list(pats)
495 495 ret = []
496 496 for kindpat in pats:
497 497 kind, pat = matchmod._patsplit(kindpat, None)
498 498 if kind is None:
499 499 try:
500 500 globbed = glob.glob(pat)
501 501 except re.error:
502 502 globbed = [pat]
503 503 if globbed:
504 504 ret.extend(globbed)
505 505 continue
506 506 ret.append(kindpat)
507 507 return ret
508 508
509 509 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
510 510 badfn=None):
511 511 '''Return a matcher and the patterns that were used.
512 512 The matcher will warn about bad matches, unless an alternate badfn callback
513 513 is provided.'''
514 514 if pats == ("",):
515 515 pats = []
516 516 if opts is None:
517 517 opts = {}
518 518 if not globbed and default == 'relpath':
519 519 pats = expandpats(pats or [])
520 520
521 521 def bad(f, msg):
522 522 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
523 523
524 524 if badfn is None:
525 525 badfn = bad
526 526
527 527 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
528 528 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
529 529
530 530 if m.always():
531 531 pats = []
532 532 return m, pats
533 533
534 534 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
535 535 badfn=None):
536 536 '''Return a matcher that will warn about bad matches.'''
537 537 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
538 538
539 539 def matchall(repo):
540 540 '''Return a matcher that will efficiently match everything.'''
541 541 return matchmod.always(repo.root, repo.getcwd())
542 542
543 543 def matchfiles(repo, files, badfn=None):
544 544 '''Return a matcher that will efficiently match exactly these files.'''
545 545 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
546 546
547 547 def origpath(ui, repo, filepath):
548 548 '''customize where .orig files are created
549 549
550 550 Fetch user defined path from config file: [ui] origbackuppath = <path>
551 551 Fall back to default (filepath) if not specified
552 552 '''
553 553 origbackuppath = ui.config('ui', 'origbackuppath', None)
554 554 if origbackuppath is None:
555 555 return filepath + ".orig"
556 556
557 557 filepathfromroot = os.path.relpath(filepath, start=repo.root)
558 558 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
559 559
560 560 origbackupdir = repo.vfs.dirname(fullorigpath)
561 561 if not repo.vfs.exists(origbackupdir):
562 562 ui.note(_('creating directory: %s\n') % origbackupdir)
563 563 util.makedirs(origbackupdir)
564 564
565 565 return fullorigpath + ".orig"
566 566
567 567 def cleanupnodes(repo, mapping, operation):
568 568 """do common cleanups when old nodes are replaced by new nodes
569 569
570 570 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
571 571 (we might also want to move working directory parent in the future)
572 572
573 573 mapping is {oldnode: [newnode]} or a iterable of nodes if they do not have
574 574 replacements. operation is a string, like "rebase".
575 575 """
576 576 if not util.safehasattr(mapping, 'items'):
577 577 mapping = {n: () for n in mapping}
578 578
579 579 with repo.transaction('cleanup') as tr:
580 580 # Move bookmarks
581 581 bmarks = repo._bookmarks
582 582 bmarkchanged = False
583 583 for oldnode, newnodes in mapping.items():
584 584 oldbmarks = repo.nodebookmarks(oldnode)
585 585 if not oldbmarks:
586 586 continue
587 587 bmarkchanged = True
588 588 if len(newnodes) > 1:
589 589 heads = list(repo.set('heads(%ln)', newnodes))
590 590 if len(heads) != 1:
591 591 raise error.ProgrammingError(
592 592 'cannot figure out bookmark movement')
593 593 newnode = heads[0].node()
594 594 elif len(newnodes) == 0:
595 595 # move bookmark backwards
596 596 roots = list(repo.set('max((::%n) - %ln)', oldnode,
597 597 list(mapping)))
598 598 if roots:
599 599 newnode = roots[0].node()
600 600 else:
601 601 newnode = nullid
602 602 else:
603 603 newnode = newnodes[0]
604 604 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
605 605 (oldbmarks, hex(oldnode), hex(newnode)))
606 606 for name in oldbmarks:
607 607 bmarks[name] = newnode
608 608 if bmarkchanged:
609 609 bmarks.recordchange(tr)
610 610
611 611 # Obsolete or strip nodes
612 612 if obsolete.isenabled(repo, obsolete.createmarkersopt):
613 613 # If a node is already obsoleted, and we want to obsolete it
614 614 # without a successor, skip that obssolete request since it's
615 615 # unnecessary. That's the "if s or not isobs(n)" check below.
616 616 # Also sort the node in topology order, that might be useful for
617 617 # some obsstore logic.
618 618 # NOTE: the filtering and sorting might belong to createmarkers.
619 619 isobs = repo.obsstore.successors.__contains__
620 620 sortfunc = lambda ns: repo.changelog.rev(ns[0])
621 621 rels = [(repo[n], (repo[m] for m in s))
622 622 for n, s in sorted(mapping.items(), key=sortfunc)
623 623 if s or not isobs(n)]
624 624 obsolete.createmarkers(repo, rels, operation=operation)
625 625 else:
626 626 from . import repair # avoid import cycle
627 627 repair.delayedstrip(repo.ui, repo, list(mapping), operation)
628 628
629 629 def addremove(repo, matcher, prefix, opts=None, dry_run=None, similarity=None):
630 630 if opts is None:
631 631 opts = {}
632 632 m = matcher
633 633 if dry_run is None:
634 634 dry_run = opts.get('dry_run')
635 635 if similarity is None:
636 636 similarity = float(opts.get('similarity') or 0)
637 637
638 638 ret = 0
639 639 join = lambda f: os.path.join(prefix, f)
640 640
641 641 wctx = repo[None]
642 642 for subpath in sorted(wctx.substate):
643 643 submatch = matchmod.subdirmatcher(subpath, m)
644 644 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
645 645 sub = wctx.sub(subpath)
646 646 try:
647 647 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
648 648 ret = 1
649 649 except error.LookupError:
650 650 repo.ui.status(_("skipping missing subrepository: %s\n")
651 651 % join(subpath))
652 652
653 653 rejected = []
654 654 def badfn(f, msg):
655 655 if f in m.files():
656 656 m.bad(f, msg)
657 657 rejected.append(f)
658 658
659 659 badmatch = matchmod.badmatch(m, badfn)
660 660 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
661 661 badmatch)
662 662
663 663 unknownset = set(unknown + forgotten)
664 664 toprint = unknownset.copy()
665 665 toprint.update(deleted)
666 666 for abs in sorted(toprint):
667 667 if repo.ui.verbose or not m.exact(abs):
668 668 if abs in unknownset:
669 669 status = _('adding %s\n') % m.uipath(abs)
670 670 else:
671 671 status = _('removing %s\n') % m.uipath(abs)
672 672 repo.ui.status(status)
673 673
674 674 renames = _findrenames(repo, m, added + unknown, removed + deleted,
675 675 similarity)
676 676
677 677 if not dry_run:
678 678 _markchanges(repo, unknown + forgotten, deleted, renames)
679 679
680 680 for f in rejected:
681 681 if f in m.files():
682 682 return 1
683 683 return ret
684 684
685 685 def marktouched(repo, files, similarity=0.0):
686 686 '''Assert that files have somehow been operated upon. files are relative to
687 687 the repo root.'''
688 688 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
689 689 rejected = []
690 690
691 691 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
692 692
693 693 if repo.ui.verbose:
694 694 unknownset = set(unknown + forgotten)
695 695 toprint = unknownset.copy()
696 696 toprint.update(deleted)
697 697 for abs in sorted(toprint):
698 698 if abs in unknownset:
699 699 status = _('adding %s\n') % abs
700 700 else:
701 701 status = _('removing %s\n') % abs
702 702 repo.ui.status(status)
703 703
704 704 renames = _findrenames(repo, m, added + unknown, removed + deleted,
705 705 similarity)
706 706
707 707 _markchanges(repo, unknown + forgotten, deleted, renames)
708 708
709 709 for f in rejected:
710 710 if f in m.files():
711 711 return 1
712 712 return 0
713 713
714 714 def _interestingfiles(repo, matcher):
715 715 '''Walk dirstate with matcher, looking for files that addremove would care
716 716 about.
717 717
718 718 This is different from dirstate.status because it doesn't care about
719 719 whether files are modified or clean.'''
720 720 added, unknown, deleted, removed, forgotten = [], [], [], [], []
721 721 audit_path = pathutil.pathauditor(repo.root)
722 722
723 723 ctx = repo[None]
724 724 dirstate = repo.dirstate
725 725 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
726 726 full=False)
727 727 for abs, st in walkresults.iteritems():
728 728 dstate = dirstate[abs]
729 729 if dstate == '?' and audit_path.check(abs):
730 730 unknown.append(abs)
731 731 elif dstate != 'r' and not st:
732 732 deleted.append(abs)
733 733 elif dstate == 'r' and st:
734 734 forgotten.append(abs)
735 735 # for finding renames
736 736 elif dstate == 'r' and not st:
737 737 removed.append(abs)
738 738 elif dstate == 'a':
739 739 added.append(abs)
740 740
741 741 return added, unknown, deleted, removed, forgotten
742 742
743 743 def _findrenames(repo, matcher, added, removed, similarity):
744 744 '''Find renames from removed files to added ones.'''
745 745 renames = {}
746 746 if similarity > 0:
747 747 for old, new, score in similar.findrenames(repo, added, removed,
748 748 similarity):
749 749 if (repo.ui.verbose or not matcher.exact(old)
750 750 or not matcher.exact(new)):
751 751 repo.ui.status(_('recording removal of %s as rename to %s '
752 752 '(%d%% similar)\n') %
753 753 (matcher.rel(old), matcher.rel(new),
754 754 score * 100))
755 755 renames[new] = old
756 756 return renames
757 757
758 758 def _markchanges(repo, unknown, deleted, renames):
759 759 '''Marks the files in unknown as added, the files in deleted as removed,
760 760 and the files in renames as copied.'''
761 761 wctx = repo[None]
762 762 with repo.wlock():
763 763 wctx.forget(deleted)
764 764 wctx.add(unknown)
765 765 for new, old in renames.iteritems():
766 766 wctx.copy(old, new)
767 767
768 768 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
769 769 """Update the dirstate to reflect the intent of copying src to dst. For
770 770 different reasons it might not end with dst being marked as copied from src.
771 771 """
772 772 origsrc = repo.dirstate.copied(src) or src
773 773 if dst == origsrc: # copying back a copy?
774 774 if repo.dirstate[dst] not in 'mn' and not dryrun:
775 775 repo.dirstate.normallookup(dst)
776 776 else:
777 777 if repo.dirstate[origsrc] == 'a' and origsrc == src:
778 778 if not ui.quiet:
779 779 ui.warn(_("%s has not been committed yet, so no copy "
780 780 "data will be stored for %s.\n")
781 781 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
782 782 if repo.dirstate[dst] in '?r' and not dryrun:
783 783 wctx.add([dst])
784 784 elif not dryrun:
785 785 wctx.copy(origsrc, dst)
786 786
787 787 def readrequires(opener, supported):
788 788 '''Reads and parses .hg/requires and checks if all entries found
789 789 are in the list of supported features.'''
790 790 requirements = set(opener.read("requires").splitlines())
791 791 missings = []
792 792 for r in requirements:
793 793 if r not in supported:
794 794 if not r or not r[0].isalnum():
795 795 raise error.RequirementError(_(".hg/requires file is corrupt"))
796 796 missings.append(r)
797 797 missings.sort()
798 798 if missings:
799 799 raise error.RequirementError(
800 800 _("repository requires features unknown to this Mercurial: %s")
801 801 % " ".join(missings),
802 802 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
803 803 " for more information"))
804 804 return requirements
805 805
806 806 def writerequires(opener, requirements):
807 807 with opener('requires', 'w') as fp:
808 808 for r in sorted(requirements):
809 809 fp.write("%s\n" % r)
810 810
811 811 class filecachesubentry(object):
812 812 def __init__(self, path, stat):
813 813 self.path = path
814 814 self.cachestat = None
815 815 self._cacheable = None
816 816
817 817 if stat:
818 818 self.cachestat = filecachesubentry.stat(self.path)
819 819
820 820 if self.cachestat:
821 821 self._cacheable = self.cachestat.cacheable()
822 822 else:
823 823 # None means we don't know yet
824 824 self._cacheable = None
825 825
826 826 def refresh(self):
827 827 if self.cacheable():
828 828 self.cachestat = filecachesubentry.stat(self.path)
829 829
830 830 def cacheable(self):
831 831 if self._cacheable is not None:
832 832 return self._cacheable
833 833
834 834 # we don't know yet, assume it is for now
835 835 return True
836 836
837 837 def changed(self):
838 838 # no point in going further if we can't cache it
839 839 if not self.cacheable():
840 840 return True
841 841
842 842 newstat = filecachesubentry.stat(self.path)
843 843
844 844 # we may not know if it's cacheable yet, check again now
845 845 if newstat and self._cacheable is None:
846 846 self._cacheable = newstat.cacheable()
847 847
848 848 # check again
849 849 if not self._cacheable:
850 850 return True
851 851
852 852 if self.cachestat != newstat:
853 853 self.cachestat = newstat
854 854 return True
855 855 else:
856 856 return False
857 857
858 858 @staticmethod
859 859 def stat(path):
860 860 try:
861 861 return util.cachestat(path)
862 862 except OSError as e:
863 863 if e.errno != errno.ENOENT:
864 864 raise
865 865
866 866 class filecacheentry(object):
867 867 def __init__(self, paths, stat=True):
868 868 self._entries = []
869 869 for path in paths:
870 870 self._entries.append(filecachesubentry(path, stat))
871 871
872 872 def changed(self):
873 873 '''true if any entry has changed'''
874 874 for entry in self._entries:
875 875 if entry.changed():
876 876 return True
877 877 return False
878 878
879 879 def refresh(self):
880 880 for entry in self._entries:
881 881 entry.refresh()
882 882
883 883 class filecache(object):
884 884 '''A property like decorator that tracks files under .hg/ for updates.
885 885
886 886 Records stat info when called in _filecache.
887 887
888 888 On subsequent calls, compares old stat info with new info, and recreates the
889 889 object when any of the files changes, updating the new stat info in
890 890 _filecache.
891 891
892 892 Mercurial either atomic renames or appends for files under .hg,
893 893 so to ensure the cache is reliable we need the filesystem to be able
894 894 to tell us if a file has been replaced. If it can't, we fallback to
895 895 recreating the object on every call (essentially the same behavior as
896 896 propertycache).
897 897
898 898 '''
899 899 def __init__(self, *paths):
900 900 self.paths = paths
901 901
902 902 def join(self, obj, fname):
903 903 """Used to compute the runtime path of a cached file.
904 904
905 905 Users should subclass filecache and provide their own version of this
906 906 function to call the appropriate join function on 'obj' (an instance
907 907 of the class that its member function was decorated).
908 908 """
909 909 raise NotImplementedError
910 910
911 911 def __call__(self, func):
912 912 self.func = func
913 913 self.name = func.__name__.encode('ascii')
914 914 return self
915 915
916 916 def __get__(self, obj, type=None):
917 917 # if accessed on the class, return the descriptor itself.
918 918 if obj is None:
919 919 return self
920 920 # do we need to check if the file changed?
921 921 if self.name in obj.__dict__:
922 922 assert self.name in obj._filecache, self.name
923 923 return obj.__dict__[self.name]
924 924
925 925 entry = obj._filecache.get(self.name)
926 926
927 927 if entry:
928 928 if entry.changed():
929 929 entry.obj = self.func(obj)
930 930 else:
931 931 paths = [self.join(obj, path) for path in self.paths]
932 932
933 933 # We stat -before- creating the object so our cache doesn't lie if
934 934 # a writer modified between the time we read and stat
935 935 entry = filecacheentry(paths, True)
936 936 entry.obj = self.func(obj)
937 937
938 938 obj._filecache[self.name] = entry
939 939
940 940 obj.__dict__[self.name] = entry.obj
941 941 return entry.obj
942 942
943 943 def __set__(self, obj, value):
944 944 if self.name not in obj._filecache:
945 945 # we add an entry for the missing value because X in __dict__
946 946 # implies X in _filecache
947 947 paths = [self.join(obj, path) for path in self.paths]
948 948 ce = filecacheentry(paths, False)
949 949 obj._filecache[self.name] = ce
950 950 else:
951 951 ce = obj._filecache[self.name]
952 952
953 953 ce.obj = value # update cached copy
954 954 obj.__dict__[self.name] = value # update copy returned by obj.x
955 955
956 956 def __delete__(self, obj):
957 957 try:
958 958 del obj.__dict__[self.name]
959 959 except KeyError:
960 960 raise AttributeError(self.name)
961 961
962 962 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
963 963 if lock is None:
964 964 raise error.LockInheritanceContractViolation(
965 965 'lock can only be inherited while held')
966 966 if environ is None:
967 967 environ = {}
968 968 with lock.inherit() as locker:
969 969 environ[envvar] = locker
970 970 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
971 971
972 972 def wlocksub(repo, cmd, *args, **kwargs):
973 973 """run cmd as a subprocess that allows inheriting repo's wlock
974 974
975 975 This can only be called while the wlock is held. This takes all the
976 976 arguments that ui.system does, and returns the exit code of the
977 977 subprocess."""
978 978 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
979 979 **kwargs)
980 980
981 981 def gdinitconfig(ui):
982 982 """helper function to know if a repo should be created as general delta
983 983 """
984 984 # experimental config: format.generaldelta
985 985 return (ui.configbool('format', 'generaldelta')
986 or ui.configbool('format', 'usegeneraldelta', True))
986 or ui.configbool('format', 'usegeneraldelta'))
987 987
988 988 def gddeltaconfig(ui):
989 989 """helper function to know if incoming delta should be optimised
990 990 """
991 991 # experimental config: format.generaldelta
992 992 return ui.configbool('format', 'generaldelta')
993 993
994 994 class simplekeyvaluefile(object):
995 995 """A simple file with key=value lines
996 996
997 997 Keys must be alphanumerics and start with a letter, values must not
998 998 contain '\n' characters"""
999 999 firstlinekey = '__firstline'
1000 1000
1001 1001 def __init__(self, vfs, path, keys=None):
1002 1002 self.vfs = vfs
1003 1003 self.path = path
1004 1004
1005 1005 def read(self, firstlinenonkeyval=False):
1006 1006 """Read the contents of a simple key-value file
1007 1007
1008 1008 'firstlinenonkeyval' indicates whether the first line of file should
1009 1009 be treated as a key-value pair or reuturned fully under the
1010 1010 __firstline key."""
1011 1011 lines = self.vfs.readlines(self.path)
1012 1012 d = {}
1013 1013 if firstlinenonkeyval:
1014 1014 if not lines:
1015 1015 e = _("empty simplekeyvalue file")
1016 1016 raise error.CorruptedState(e)
1017 1017 # we don't want to include '\n' in the __firstline
1018 1018 d[self.firstlinekey] = lines[0][:-1]
1019 1019 del lines[0]
1020 1020
1021 1021 try:
1022 1022 # the 'if line.strip()' part prevents us from failing on empty
1023 1023 # lines which only contain '\n' therefore are not skipped
1024 1024 # by 'if line'
1025 1025 updatedict = dict(line[:-1].split('=', 1) for line in lines
1026 1026 if line.strip())
1027 1027 if self.firstlinekey in updatedict:
1028 1028 e = _("%r can't be used as a key")
1029 1029 raise error.CorruptedState(e % self.firstlinekey)
1030 1030 d.update(updatedict)
1031 1031 except ValueError as e:
1032 1032 raise error.CorruptedState(str(e))
1033 1033 return d
1034 1034
1035 1035 def write(self, data, firstline=None):
1036 1036 """Write key=>value mapping to a file
1037 1037 data is a dict. Keys must be alphanumerical and start with a letter.
1038 1038 Values must not contain newline characters.
1039 1039
1040 1040 If 'firstline' is not None, it is written to file before
1041 1041 everything else, as it is, not in a key=value form"""
1042 1042 lines = []
1043 1043 if firstline is not None:
1044 1044 lines.append('%s\n' % firstline)
1045 1045
1046 1046 for k, v in data.items():
1047 1047 if k == self.firstlinekey:
1048 1048 e = "key name '%s' is reserved" % self.firstlinekey
1049 1049 raise error.ProgrammingError(e)
1050 1050 if not k[0].isalpha():
1051 1051 e = "keys must start with a letter in a key-value file"
1052 1052 raise error.ProgrammingError(e)
1053 1053 if not k.isalnum():
1054 1054 e = "invalid key name in a simple key-value file"
1055 1055 raise error.ProgrammingError(e)
1056 1056 if '\n' in v:
1057 1057 e = "invalid value in a simple key-value file"
1058 1058 raise error.ProgrammingError(e)
1059 1059 lines.append("%s=%s\n" % (k, v))
1060 1060 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1061 1061 fp.write(''.join(lines))
General Comments 0
You need to be logged in to leave comments. Login now