##// END OF EJS Templates
hgweb: move ismember from `hgweb.common` to `scmutil`...
marmoute -
r51314:4bddc2f7 default
parent child Browse files
Show More
@@ -1,314 +1,307 b''
1 1 # hgweb/common.py - Utility functions needed by hgweb_mod and hgwebdir_mod
2 2 #
3 3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
4 4 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9
10 10 import base64
11 11 import errno
12 12 import mimetypes
13 13 import os
14 14 import stat
15 15
16 16 from ..i18n import _
17 17 from ..pycompat import (
18 18 getattr,
19 19 open,
20 20 )
21 21 from .. import (
22 22 encoding,
23 23 pycompat,
24 scmutil,
24 25 templater,
25 26 util,
26 27 )
27 28
28 29 httpserver = util.httpserver
29 30
30 31 HTTP_OK = 200
31 32 HTTP_CREATED = 201
32 33 HTTP_NOT_MODIFIED = 304
33 34 HTTP_BAD_REQUEST = 400
34 35 HTTP_UNAUTHORIZED = 401
35 36 HTTP_FORBIDDEN = 403
36 37 HTTP_NOT_FOUND = 404
37 38 HTTP_METHOD_NOT_ALLOWED = 405
38 39 HTTP_NOT_ACCEPTABLE = 406
39 40 HTTP_UNSUPPORTED_MEDIA_TYPE = 415
40 41 HTTP_SERVER_ERROR = 500
41 42
42
43 def ismember(ui, username, userlist):
44 """Check if username is a member of userlist.
45
46 If userlist has a single '*' member, all users are considered members.
47 Can be overridden by extensions to provide more complex authorization
48 schemes.
49 """
50 return userlist == [b'*'] or username in userlist
43 ismember = scmutil.ismember
51 44
52 45
53 46 def hashiddenaccess(repo, req):
54 47 if bool(req.qsparams.get(b'access-hidden')):
55 48 # Disable this by default for now. Main risk is to get critical
56 49 # information exposed through this. This is expecially risky if
57 50 # someone decided to make a changeset secret for good reason, but
58 51 # its predecessors are still draft.
59 52 #
60 53 # The feature is currently experimental, so we can still decide to
61 54 # change the default.
62 55 ui = repo.ui
63 56 allow = ui.configlist(b'experimental', b'server.allow-hidden-access')
64 57 user = req.remoteuser
65 58 if allow and ismember(ui, user, allow):
66 59 return True
67 60 else:
68 61 msg = (
69 62 _(
70 63 b'ignoring request to access hidden changeset by '
71 64 b'unauthorized user: %r\n'
72 65 )
73 66 % user
74 67 )
75 68 ui.warn(msg)
76 69 return False
77 70
78 71
79 72 def checkauthz(hgweb, req, op):
80 73 """Check permission for operation based on request data (including
81 74 authentication info). Return if op allowed, else raise an ErrorResponse
82 75 exception."""
83 76
84 77 user = req.remoteuser
85 78
86 79 deny_read = hgweb.configlist(b'web', b'deny_read')
87 80 if deny_read and (not user or ismember(hgweb.repo.ui, user, deny_read)):
88 81 raise ErrorResponse(HTTP_UNAUTHORIZED, b'read not authorized')
89 82
90 83 allow_read = hgweb.configlist(b'web', b'allow_read')
91 84 if allow_read and (not ismember(hgweb.repo.ui, user, allow_read)):
92 85 raise ErrorResponse(HTTP_UNAUTHORIZED, b'read not authorized')
93 86
94 87 if op == b'pull' and not hgweb.allowpull:
95 88 raise ErrorResponse(HTTP_UNAUTHORIZED, b'pull not authorized')
96 89 elif op == b'pull' or op is None: # op is None for interface requests
97 90 return
98 91
99 92 # Allow LFS uploading via PUT requests
100 93 if op == b'upload':
101 94 if req.method != b'PUT':
102 95 msg = b'upload requires PUT request'
103 96 raise ErrorResponse(HTTP_METHOD_NOT_ALLOWED, msg)
104 97 # enforce that you can only push using POST requests
105 98 elif req.method != b'POST':
106 99 msg = b'push requires POST request'
107 100 raise ErrorResponse(HTTP_METHOD_NOT_ALLOWED, msg)
108 101
109 102 # require ssl by default for pushing, auth info cannot be sniffed
110 103 # and replayed
111 104 if hgweb.configbool(b'web', b'push_ssl') and req.urlscheme != b'https':
112 105 raise ErrorResponse(HTTP_FORBIDDEN, b'ssl required')
113 106
114 107 deny = hgweb.configlist(b'web', b'deny_push')
115 108 if deny and (not user or ismember(hgweb.repo.ui, user, deny)):
116 109 raise ErrorResponse(HTTP_UNAUTHORIZED, b'push not authorized')
117 110
118 111 allow = hgweb.configlist(b'web', b'allow-push')
119 112 if not (allow and ismember(hgweb.repo.ui, user, allow)):
120 113 raise ErrorResponse(HTTP_UNAUTHORIZED, b'push not authorized')
121 114
122 115
123 116 # Hooks for hgweb permission checks; extensions can add hooks here.
124 117 # Each hook is invoked like this: hook(hgweb, request, operation),
125 118 # where operation is either read, pull, push or upload. Hooks should either
126 119 # raise an ErrorResponse exception, or just return.
127 120 #
128 121 # It is possible to do both authentication and authorization through
129 122 # this.
130 123 permhooks = [checkauthz]
131 124
132 125
133 126 class ErrorResponse(Exception):
134 127 def __init__(self, code, message=None, headers=None):
135 128 if message is None:
136 129 message = _statusmessage(code)
137 130 Exception.__init__(self, pycompat.sysstr(message))
138 131 self.code = code
139 132 if headers is None:
140 133 headers = []
141 134 self.headers = headers
142 135 self.message = message
143 136
144 137
145 138 class continuereader:
146 139 """File object wrapper to handle HTTP 100-continue.
147 140
148 141 This is used by servers so they automatically handle Expect: 100-continue
149 142 request headers. On first read of the request body, the 100 Continue
150 143 response is sent. This should trigger the client into actually sending
151 144 the request body.
152 145 """
153 146
154 147 def __init__(self, f, write):
155 148 self.f = f
156 149 self._write = write
157 150 self.continued = False
158 151
159 152 def read(self, amt=-1):
160 153 if not self.continued:
161 154 self.continued = True
162 155 self._write(b'HTTP/1.1 100 Continue\r\n\r\n')
163 156 return self.f.read(amt)
164 157
165 158 def __getattr__(self, attr):
166 159 if attr in (b'close', b'readline', b'readlines', b'__iter__'):
167 160 return getattr(self.f, attr)
168 161 raise AttributeError
169 162
170 163
171 164 def _statusmessage(code):
172 165 responses = httpserver.basehttprequesthandler.responses
173 166 return pycompat.bytesurl(responses.get(code, ('Error', 'Unknown error'))[0])
174 167
175 168
176 169 def statusmessage(code, message=None):
177 170 return b'%d %s' % (code, message or _statusmessage(code))
178 171
179 172
180 173 def get_stat(spath, fn):
181 174 """stat fn if it exists, spath otherwise"""
182 175 cl_path = os.path.join(spath, fn)
183 176 if os.path.exists(cl_path):
184 177 return os.stat(cl_path)
185 178 else:
186 179 return os.stat(spath)
187 180
188 181
189 182 def get_mtime(spath):
190 183 return get_stat(spath, b"00changelog.i")[stat.ST_MTIME]
191 184
192 185
193 186 def ispathsafe(path):
194 187 """Determine if a path is safe to use for filesystem access."""
195 188 parts = path.split(b'/')
196 189 for part in parts:
197 190 if (
198 191 part in (b'', pycompat.oscurdir, pycompat.ospardir)
199 192 or pycompat.ossep in part
200 193 or pycompat.osaltsep is not None
201 194 and pycompat.osaltsep in part
202 195 ):
203 196 return False
204 197
205 198 return True
206 199
207 200
208 201 def staticfile(templatepath, directory, fname, res):
209 202 """return a file inside directory with guessed Content-Type header
210 203
211 204 fname always uses '/' as directory separator and isn't allowed to
212 205 contain unusual path components.
213 206 Content-Type is guessed using the mimetypes module.
214 207 Return an empty string if fname is illegal or file not found.
215 208
216 209 """
217 210 if not ispathsafe(fname):
218 211 return
219 212
220 213 if not directory:
221 214 tp = templatepath or templater.templatedir()
222 215 if tp is not None:
223 216 directory = os.path.join(tp, b'static')
224 217
225 218 fpath = os.path.join(*fname.split(b'/'))
226 219 ct = pycompat.sysbytes(
227 220 mimetypes.guess_type(pycompat.fsdecode(fpath))[0] or r"text/plain"
228 221 )
229 222 path = os.path.join(directory, fpath)
230 223 try:
231 224 os.stat(path)
232 225 with open(path, b'rb') as fh:
233 226 data = fh.read()
234 227 except TypeError:
235 228 raise ErrorResponse(HTTP_SERVER_ERROR, b'illegal filename')
236 229 except OSError as err:
237 230 if err.errno == errno.ENOENT:
238 231 raise ErrorResponse(HTTP_NOT_FOUND)
239 232 else:
240 233 raise ErrorResponse(
241 234 HTTP_SERVER_ERROR, encoding.strtolocal(err.strerror)
242 235 )
243 236
244 237 res.headers[b'Content-Type'] = ct
245 238 res.setbodybytes(data)
246 239 return res
247 240
248 241
249 242 def paritygen(stripecount, offset=0):
250 243 """count parity of horizontal stripes for easier reading"""
251 244 if stripecount and offset:
252 245 # account for offset, e.g. due to building the list in reverse
253 246 count = (stripecount + offset) % stripecount
254 247 parity = (stripecount + offset) // stripecount & 1
255 248 else:
256 249 count = 0
257 250 parity = 0
258 251 while True:
259 252 yield parity
260 253 count += 1
261 254 if stripecount and count >= stripecount:
262 255 parity = 1 - parity
263 256 count = 0
264 257
265 258
266 259 def get_contact(config):
267 260 """Return repo contact information or empty string.
268 261
269 262 web.contact is the primary source, but if that is not set, try
270 263 ui.username or $EMAIL as a fallback to display something useful.
271 264 """
272 265 return (
273 266 config(b"web", b"contact")
274 267 or config(b"ui", b"username")
275 268 or encoding.environ.get(b"EMAIL")
276 269 or b""
277 270 )
278 271
279 272
280 273 def cspvalues(ui):
281 274 """Obtain the Content-Security-Policy header and nonce value.
282 275
283 276 Returns a 2-tuple of the CSP header value and the nonce value.
284 277
285 278 First value is ``None`` if CSP isn't enabled. Second value is ``None``
286 279 if CSP isn't enabled or if the CSP header doesn't need a nonce.
287 280 """
288 281 # Without demandimport, "import uuid" could have an immediate side-effect
289 282 # running "ldconfig" on Linux trying to find libuuid.
290 283 # With Python <= 2.7.12, that "ldconfig" is run via a shell and the shell
291 284 # may pollute the terminal with:
292 285 #
293 286 # shell-init: error retrieving current directory: getcwd: cannot access
294 287 # parent directories: No such file or directory
295 288 #
296 289 # Python >= 2.7.13 has fixed it by running "ldconfig" directly without a
297 290 # shell (hg changeset a09ae70f3489).
298 291 #
299 292 # Moved "import uuid" from here so it's executed after we know we have
300 293 # a sane cwd (i.e. after dispatch.py cwd check).
301 294 #
302 295 # We can move it back once we no longer need Python <= 2.7.12 support.
303 296 import uuid
304 297
305 298 # Don't allow untrusted CSP setting since it be disable protections
306 299 # from a trusted/global source.
307 300 csp = ui.config(b'web', b'csp', untrusted=False)
308 301 nonce = None
309 302
310 303 if csp and b'%nonce%' in csp:
311 304 nonce = base64.urlsafe_b64encode(uuid.uuid4().bytes).rstrip(b'=')
312 305 csp = csp.replace(b'%nonce%', nonce)
313 306
314 307 return csp, nonce
@@ -1,2315 +1,2325 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8
9 9 import binascii
10 10 import errno
11 11 import glob
12 12 import os
13 13 import posixpath
14 14 import re
15 15 import subprocess
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 bin,
21 21 hex,
22 22 nullrev,
23 23 short,
24 24 wdirrev,
25 25 )
26 26 from .pycompat import getattr
27 27 from .thirdparty import attr
28 28 from . import (
29 29 copies as copiesmod,
30 30 encoding,
31 31 error,
32 32 match as matchmod,
33 33 obsolete,
34 34 obsutil,
35 35 pathutil,
36 36 phases,
37 37 policy,
38 38 pycompat,
39 39 requirements as requirementsmod,
40 40 revsetlang,
41 41 similar,
42 42 smartset,
43 43 url,
44 44 util,
45 45 vfs,
46 46 )
47 47
48 48 from .utils import (
49 49 hashutil,
50 50 procutil,
51 51 stringutil,
52 52 )
53 53
54 54 if pycompat.iswindows:
55 55 from . import scmwindows as scmplatform
56 56 else:
57 57 from . import scmposix as scmplatform
58 58
59 59 parsers = policy.importmod('parsers')
60 60 rustrevlog = policy.importrust('revlog')
61 61
62 62 termsize = scmplatform.termsize
63 63
64 64
65 65 @attr.s(slots=True, repr=False)
66 66 class status:
67 67 """Struct with a list of files per status.
68 68
69 69 The 'deleted', 'unknown' and 'ignored' properties are only
70 70 relevant to the working copy.
71 71 """
72 72
73 73 modified = attr.ib(default=attr.Factory(list))
74 74 added = attr.ib(default=attr.Factory(list))
75 75 removed = attr.ib(default=attr.Factory(list))
76 76 deleted = attr.ib(default=attr.Factory(list))
77 77 unknown = attr.ib(default=attr.Factory(list))
78 78 ignored = attr.ib(default=attr.Factory(list))
79 79 clean = attr.ib(default=attr.Factory(list))
80 80
81 81 def __iter__(self):
82 82 yield self.modified
83 83 yield self.added
84 84 yield self.removed
85 85 yield self.deleted
86 86 yield self.unknown
87 87 yield self.ignored
88 88 yield self.clean
89 89
90 90 def __repr__(self):
91 91 return (
92 92 r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
93 93 r'unknown=%s, ignored=%s, clean=%s>'
94 94 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)
95 95
96 96
97 97 def itersubrepos(ctx1, ctx2):
98 98 """find subrepos in ctx1 or ctx2"""
99 99 # Create a (subpath, ctx) mapping where we prefer subpaths from
100 100 # ctx1. The subpaths from ctx2 are important when the .hgsub file
101 101 # has been modified (in ctx2) but not yet committed (in ctx1).
102 102 subpaths = dict.fromkeys(ctx2.substate, ctx2)
103 103 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
104 104
105 105 missing = set()
106 106
107 107 for subpath in ctx2.substate:
108 108 if subpath not in ctx1.substate:
109 109 del subpaths[subpath]
110 110 missing.add(subpath)
111 111
112 112 for subpath, ctx in sorted(subpaths.items()):
113 113 yield subpath, ctx.sub(subpath)
114 114
115 115 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
116 116 # status and diff will have an accurate result when it does
117 117 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
118 118 # against itself.
119 119 for subpath in missing:
120 120 yield subpath, ctx2.nullsub(subpath, ctx1)
121 121
122 122
123 123 def nochangesfound(ui, repo, excluded=None):
124 124 """Report no changes for push/pull, excluded is None or a list of
125 125 nodes excluded from the push/pull.
126 126 """
127 127 secretlist = []
128 128 if excluded:
129 129 for n in excluded:
130 130 ctx = repo[n]
131 131 if ctx.phase() >= phases.secret and not ctx.extinct():
132 132 secretlist.append(n)
133 133
134 134 if secretlist:
135 135 ui.status(
136 136 _(b"no changes found (ignored %d secret changesets)\n")
137 137 % len(secretlist)
138 138 )
139 139 else:
140 140 ui.status(_(b"no changes found\n"))
141 141
142 142
143 143 def callcatch(ui, func):
144 144 """call func() with global exception handling
145 145
146 146 return func() if no exception happens. otherwise do some error handling
147 147 and return an exit code accordingly. does not handle all exceptions.
148 148 """
149 149 coarse_exit_code = -1
150 150 detailed_exit_code = -1
151 151 try:
152 152 try:
153 153 return func()
154 154 except: # re-raises
155 155 ui.traceback()
156 156 raise
157 157 # Global exception handling, alphabetically
158 158 # Mercurial-specific first, followed by built-in and library exceptions
159 159 except error.LockHeld as inst:
160 160 detailed_exit_code = 20
161 161 if inst.errno == errno.ETIMEDOUT:
162 162 reason = _(b'timed out waiting for lock held by %r') % (
163 163 pycompat.bytestr(inst.locker)
164 164 )
165 165 else:
166 166 reason = _(b'lock held by %r') % inst.locker
167 167 ui.error(
168 168 _(b"abort: %s: %s\n")
169 169 % (inst.desc or stringutil.forcebytestr(inst.filename), reason)
170 170 )
171 171 if not inst.locker:
172 172 ui.error(_(b"(lock might be very busy)\n"))
173 173 except error.LockUnavailable as inst:
174 174 detailed_exit_code = 20
175 175 ui.error(
176 176 _(b"abort: could not lock %s: %s\n")
177 177 % (
178 178 inst.desc or stringutil.forcebytestr(inst.filename),
179 179 encoding.strtolocal(inst.strerror),
180 180 )
181 181 )
182 182 except error.RepoError as inst:
183 183 if isinstance(inst, error.RepoLookupError):
184 184 detailed_exit_code = 10
185 185 ui.error(_(b"abort: %s\n") % inst)
186 186 if inst.hint:
187 187 ui.error(_(b"(%s)\n") % inst.hint)
188 188 except error.ResponseError as inst:
189 189 ui.error(_(b"abort: %s") % inst.args[0])
190 190 msg = inst.args[1]
191 191 if isinstance(msg, type(u'')):
192 192 msg = pycompat.sysbytes(msg)
193 193 if msg is None:
194 194 ui.error(b"\n")
195 195 elif not isinstance(msg, bytes):
196 196 ui.error(b" %r\n" % (msg,))
197 197 elif not msg:
198 198 ui.error(_(b" empty string\n"))
199 199 else:
200 200 ui.error(b"\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
201 201 except error.CensoredNodeError as inst:
202 202 ui.error(_(b"abort: file censored %s\n") % inst)
203 203 except error.WdirUnsupported:
204 204 ui.error(_(b"abort: working directory revision cannot be specified\n"))
205 205 except error.Error as inst:
206 206 if inst.detailed_exit_code is not None:
207 207 detailed_exit_code = inst.detailed_exit_code
208 208 if inst.coarse_exit_code is not None:
209 209 coarse_exit_code = inst.coarse_exit_code
210 210 ui.error(inst.format())
211 211 except error.WorkerError as inst:
212 212 # Don't print a message -- the worker already should have
213 213 return inst.status_code
214 214 except ImportError as inst:
215 215 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
216 216 m = stringutil.forcebytestr(inst).split()[-1]
217 217 if m in b"mpatch bdiff".split():
218 218 ui.error(_(b"(did you forget to compile extensions?)\n"))
219 219 elif m in b"zlib".split():
220 220 ui.error(_(b"(is your Python install correct?)\n"))
221 221 except util.urlerr.httperror as inst:
222 222 detailed_exit_code = 100
223 223 ui.error(_(b"abort: %s\n") % stringutil.forcebytestr(inst))
224 224 except util.urlerr.urlerror as inst:
225 225 detailed_exit_code = 100
226 226 try: # usually it is in the form (errno, strerror)
227 227 reason = inst.reason.args[1]
228 228 except (AttributeError, IndexError):
229 229 # it might be anything, for example a string
230 230 reason = inst.reason
231 231 if isinstance(reason, str):
232 232 # SSLError of Python 2.7.9 contains a unicode
233 233 reason = encoding.unitolocal(reason)
234 234 ui.error(_(b"abort: error: %s\n") % stringutil.forcebytestr(reason))
235 235 except (IOError, OSError) as inst:
236 236 if (
237 237 util.safehasattr(inst, b"args")
238 238 and inst.args
239 239 and inst.args[0] == errno.EPIPE
240 240 ):
241 241 pass
242 242 elif getattr(inst, "strerror", None): # common IOError or OSError
243 243 if getattr(inst, "filename", None) is not None:
244 244 ui.error(
245 245 _(b"abort: %s: '%s'\n")
246 246 % (
247 247 encoding.strtolocal(inst.strerror),
248 248 stringutil.forcebytestr(inst.filename),
249 249 )
250 250 )
251 251 else:
252 252 ui.error(_(b"abort: %s\n") % encoding.strtolocal(inst.strerror))
253 253 else: # suspicious IOError
254 254 raise
255 255 except MemoryError:
256 256 ui.error(_(b"abort: out of memory\n"))
257 257 except SystemExit as inst:
258 258 # Commands shouldn't sys.exit directly, but give a return code.
259 259 # Just in case catch this and and pass exit code to caller.
260 260 detailed_exit_code = 254
261 261 coarse_exit_code = inst.code
262 262
263 263 if ui.configbool(b'ui', b'detailed-exit-code'):
264 264 return detailed_exit_code
265 265 else:
266 266 return coarse_exit_code
267 267
268 268
269 269 def checknewlabel(repo, lbl, kind):
270 270 # Do not use the "kind" parameter in ui output.
271 271 # It makes strings difficult to translate.
272 272 if lbl in [b'tip', b'.', b'null']:
273 273 raise error.InputError(_(b"the name '%s' is reserved") % lbl)
274 274 for c in (b':', b'\0', b'\n', b'\r'):
275 275 if c in lbl:
276 276 raise error.InputError(
277 277 _(b"%r cannot be used in a name") % pycompat.bytestr(c)
278 278 )
279 279 try:
280 280 int(lbl)
281 281 if b'_' in lbl:
282 282 # If label contains underscores, Python might consider it an
283 283 # integer (with "_" as visual separators), but we do not.
284 284 # See PEP 515 - Underscores in Numeric Literals.
285 285 raise ValueError
286 286 raise error.InputError(_(b"cannot use an integer as a name"))
287 287 except ValueError:
288 288 pass
289 289 if lbl.strip() != lbl:
290 290 raise error.InputError(
291 291 _(b"leading or trailing whitespace in name %r") % lbl
292 292 )
293 293
294 294
295 295 def checkfilename(f):
296 296 '''Check that the filename f is an acceptable filename for a tracked file'''
297 297 if b'\r' in f or b'\n' in f:
298 298 raise error.InputError(
299 299 _(b"'\\n' and '\\r' disallowed in filenames: %r")
300 300 % pycompat.bytestr(f)
301 301 )
302 302
303 303
304 304 def checkportable(ui, f):
305 305 '''Check if filename f is portable and warn or abort depending on config'''
306 306 checkfilename(f)
307 307 abort, warn = checkportabilityalert(ui)
308 308 if abort or warn:
309 309 msg = util.checkwinfilename(f)
310 310 if msg:
311 311 msg = b"%s: %s" % (msg, procutil.shellquote(f))
312 312 if abort:
313 313 raise error.InputError(msg)
314 314 ui.warn(_(b"warning: %s\n") % msg)
315 315
316 316
317 317 def checkportabilityalert(ui):
318 318 """check if the user's config requests nothing, a warning, or abort for
319 319 non-portable filenames"""
320 320 val = ui.config(b'ui', b'portablefilenames')
321 321 lval = val.lower()
322 322 bval = stringutil.parsebool(val)
323 323 abort = pycompat.iswindows or lval == b'abort'
324 324 warn = bval or lval == b'warn'
325 325 if bval is None and not (warn or abort or lval == b'ignore'):
326 326 raise error.ConfigError(
327 327 _(b"ui.portablefilenames value is invalid ('%s')") % val
328 328 )
329 329 return abort, warn
330 330
331 331
332 332 class casecollisionauditor:
333 333 def __init__(self, ui, abort, dirstate):
334 334 self._ui = ui
335 335 self._abort = abort
336 336 allfiles = b'\0'.join(dirstate)
337 337 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0'))
338 338 self._dirstate = dirstate
339 339 # The purpose of _newfiles is so that we don't complain about
340 340 # case collisions if someone were to call this object with the
341 341 # same filename twice.
342 342 self._newfiles = set()
343 343
344 344 def __call__(self, f):
345 345 if f in self._newfiles:
346 346 return
347 347 fl = encoding.lower(f)
348 348 if fl in self._loweredfiles and f not in self._dirstate:
349 349 msg = _(b'possible case-folding collision for %s') % f
350 350 if self._abort:
351 351 raise error.StateError(msg)
352 352 self._ui.warn(_(b"warning: %s\n") % msg)
353 353 self._loweredfiles.add(fl)
354 354 self._newfiles.add(f)
355 355
356 356
357 357 def filteredhash(repo, maxrev, needobsolete=False):
358 358 """build hash of filtered revisions in the current repoview.
359 359
360 360 Multiple caches perform up-to-date validation by checking that the
361 361 tiprev and tipnode stored in the cache file match the current repository.
362 362 However, this is not sufficient for validating repoviews because the set
363 363 of revisions in the view may change without the repository tiprev and
364 364 tipnode changing.
365 365
366 366 This function hashes all the revs filtered from the view (and, optionally,
367 367 all obsolete revs) up to maxrev and returns that SHA-1 digest.
368 368 """
369 369 cl = repo.changelog
370 370 if needobsolete:
371 371 obsrevs = obsolete.getrevs(repo, b'obsolete')
372 372 if not cl.filteredrevs and not obsrevs:
373 373 return None
374 374 key = (maxrev, hash(cl.filteredrevs), hash(obsrevs))
375 375 else:
376 376 if not cl.filteredrevs:
377 377 return None
378 378 key = maxrev
379 379 obsrevs = frozenset()
380 380
381 381 result = cl._filteredrevs_hashcache.get(key)
382 382 if not result:
383 383 revs = sorted(r for r in cl.filteredrevs | obsrevs if r <= maxrev)
384 384 if revs:
385 385 s = hashutil.sha1()
386 386 for rev in revs:
387 387 s.update(b'%d;' % rev)
388 388 result = s.digest()
389 389 cl._filteredrevs_hashcache[key] = result
390 390 return result
391 391
392 392
393 393 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
394 394 """yield every hg repository under path, always recursively.
395 395 The recurse flag will only control recursion into repo working dirs"""
396 396
397 397 def errhandler(err):
398 398 if err.filename == path:
399 399 raise err
400 400
401 401 samestat = getattr(os.path, 'samestat', None)
402 402 if followsym and samestat is not None:
403 403
404 404 def adddir(dirlst, dirname):
405 405 dirstat = os.stat(dirname)
406 406 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
407 407 if not match:
408 408 dirlst.append(dirstat)
409 409 return not match
410 410
411 411 else:
412 412 followsym = False
413 413
414 414 if (seen_dirs is None) and followsym:
415 415 seen_dirs = []
416 416 adddir(seen_dirs, path)
417 417 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
418 418 dirs.sort()
419 419 if b'.hg' in dirs:
420 420 yield root # found a repository
421 421 qroot = os.path.join(root, b'.hg', b'patches')
422 422 if os.path.isdir(os.path.join(qroot, b'.hg')):
423 423 yield qroot # we have a patch queue repo here
424 424 if recurse:
425 425 # avoid recursing inside the .hg directory
426 426 dirs.remove(b'.hg')
427 427 else:
428 428 dirs[:] = [] # don't descend further
429 429 elif followsym:
430 430 newdirs = []
431 431 for d in dirs:
432 432 fname = os.path.join(root, d)
433 433 if adddir(seen_dirs, fname):
434 434 if os.path.islink(fname):
435 435 for hgname in walkrepos(fname, True, seen_dirs):
436 436 yield hgname
437 437 else:
438 438 newdirs.append(d)
439 439 dirs[:] = newdirs
440 440
441 441
442 442 def binnode(ctx):
443 443 """Return binary node id for a given basectx"""
444 444 node = ctx.node()
445 445 if node is None:
446 446 return ctx.repo().nodeconstants.wdirid
447 447 return node
448 448
449 449
450 450 def intrev(ctx):
451 451 """Return integer for a given basectx that can be used in comparison or
452 452 arithmetic operation"""
453 453 rev = ctx.rev()
454 454 if rev is None:
455 455 return wdirrev
456 456 return rev
457 457
458 458
459 459 def formatchangeid(ctx):
460 460 """Format changectx as '{rev}:{node|formatnode}', which is the default
461 461 template provided by logcmdutil.changesettemplater"""
462 462 repo = ctx.repo()
463 463 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
464 464
465 465
466 466 def formatrevnode(ui, rev, node):
467 467 """Format given revision and node depending on the current verbosity"""
468 468 if ui.debugflag:
469 469 hexfunc = hex
470 470 else:
471 471 hexfunc = short
472 472 return b'%d:%s' % (rev, hexfunc(node))
473 473
474 474
475 475 def resolvehexnodeidprefix(repo, prefix):
476 476 if prefix.startswith(b'x'):
477 477 prefix = prefix[1:]
478 478 try:
479 479 # Uses unfiltered repo because it's faster when prefix is ambiguous/
480 480 # This matches the shortesthexnodeidprefix() function below.
481 481 node = repo.unfiltered().changelog._partialmatch(prefix)
482 482 except error.AmbiguousPrefixLookupError:
483 483 revset = repo.ui.config(
484 484 b'experimental', b'revisions.disambiguatewithin'
485 485 )
486 486 if revset:
487 487 # Clear config to avoid infinite recursion
488 488 configoverrides = {
489 489 (b'experimental', b'revisions.disambiguatewithin'): None
490 490 }
491 491 with repo.ui.configoverride(configoverrides):
492 492 revs = repo.anyrevs([revset], user=True)
493 493 matches = []
494 494 for rev in revs:
495 495 node = repo.changelog.node(rev)
496 496 if hex(node).startswith(prefix):
497 497 matches.append(node)
498 498 if len(matches) == 1:
499 499 return matches[0]
500 500 raise
501 501 if node is None:
502 502 return
503 503 repo.changelog.rev(node) # make sure node isn't filtered
504 504 return node
505 505
506 506
507 507 def mayberevnum(repo, prefix):
508 508 """Checks if the given prefix may be mistaken for a revision number"""
509 509 try:
510 510 i = int(prefix)
511 511 # if we are a pure int, then starting with zero will not be
512 512 # confused as a rev; or, obviously, if the int is larger
513 513 # than the value of the tip rev. We still need to disambiguate if
514 514 # prefix == '0', since that *is* a valid revnum.
515 515 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
516 516 return False
517 517 return True
518 518 except ValueError:
519 519 return False
520 520
521 521
522 522 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
523 523 """Find the shortest unambiguous prefix that matches hexnode.
524 524
525 525 If "cache" is not None, it must be a dictionary that can be used for
526 526 caching between calls to this method.
527 527 """
528 528 # _partialmatch() of filtered changelog could take O(len(repo)) time,
529 529 # which would be unacceptably slow. so we look for hash collision in
530 530 # unfiltered space, which means some hashes may be slightly longer.
531 531
532 532 minlength = max(minlength, 1)
533 533
534 534 def disambiguate(prefix):
535 535 """Disambiguate against revnums."""
536 536 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'):
537 537 if mayberevnum(repo, prefix):
538 538 return b'x' + prefix
539 539 else:
540 540 return prefix
541 541
542 542 hexnode = hex(node)
543 543 for length in range(len(prefix), len(hexnode) + 1):
544 544 prefix = hexnode[:length]
545 545 if not mayberevnum(repo, prefix):
546 546 return prefix
547 547
548 548 cl = repo.unfiltered().changelog
549 549 revset = repo.ui.config(b'experimental', b'revisions.disambiguatewithin')
550 550 if revset:
551 551 revs = None
552 552 if cache is not None:
553 553 revs = cache.get(b'disambiguationrevset')
554 554 if revs is None:
555 555 revs = repo.anyrevs([revset], user=True)
556 556 if cache is not None:
557 557 cache[b'disambiguationrevset'] = revs
558 558 if cl.rev(node) in revs:
559 559 hexnode = hex(node)
560 560 nodetree = None
561 561 if cache is not None:
562 562 nodetree = cache.get(b'disambiguationnodetree')
563 563 if not nodetree:
564 564 if util.safehasattr(parsers, 'nodetree'):
565 565 # The CExt is the only implementation to provide a nodetree
566 566 # class so far.
567 567 index = cl.index
568 568 if util.safehasattr(index, 'get_cindex'):
569 569 # the rust wrapped need to give access to its internal index
570 570 index = index.get_cindex()
571 571 nodetree = parsers.nodetree(index, len(revs))
572 572 for r in revs:
573 573 nodetree.insert(r)
574 574 if cache is not None:
575 575 cache[b'disambiguationnodetree'] = nodetree
576 576 if nodetree is not None:
577 577 length = max(nodetree.shortest(node), minlength)
578 578 prefix = hexnode[:length]
579 579 return disambiguate(prefix)
580 580 for length in range(minlength, len(hexnode) + 1):
581 581 matches = []
582 582 prefix = hexnode[:length]
583 583 for rev in revs:
584 584 otherhexnode = repo[rev].hex()
585 585 if prefix == otherhexnode[:length]:
586 586 matches.append(otherhexnode)
587 587 if len(matches) == 1:
588 588 return disambiguate(prefix)
589 589
590 590 try:
591 591 return disambiguate(cl.shortest(node, minlength))
592 592 except error.LookupError:
593 593 raise error.RepoLookupError()
594 594
595 595
596 596 def isrevsymbol(repo, symbol):
597 597 """Checks if a symbol exists in the repo.
598 598
599 599 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
600 600 symbol is an ambiguous nodeid prefix.
601 601 """
602 602 try:
603 603 revsymbol(repo, symbol)
604 604 return True
605 605 except error.RepoLookupError:
606 606 return False
607 607
608 608
609 609 def revsymbol(repo, symbol):
610 610 """Returns a context given a single revision symbol (as string).
611 611
612 612 This is similar to revsingle(), but accepts only a single revision symbol,
613 613 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
614 614 not "max(public())".
615 615 """
616 616 if not isinstance(symbol, bytes):
617 617 msg = (
618 618 b"symbol (%s of type %s) was not a string, did you mean "
619 619 b"repo[symbol]?" % (symbol, type(symbol))
620 620 )
621 621 raise error.ProgrammingError(msg)
622 622 try:
623 623 if symbol in (b'.', b'tip', b'null'):
624 624 return repo[symbol]
625 625
626 626 try:
627 627 r = int(symbol)
628 628 if b'%d' % r != symbol:
629 629 raise ValueError
630 630 l = len(repo.changelog)
631 631 if r < 0:
632 632 r += l
633 633 if r < 0 or r >= l and r != wdirrev:
634 634 raise ValueError
635 635 return repo[r]
636 636 except error.FilteredIndexError:
637 637 raise
638 638 except (ValueError, OverflowError, IndexError):
639 639 pass
640 640
641 641 if len(symbol) == 2 * repo.nodeconstants.nodelen:
642 642 try:
643 643 node = bin(symbol)
644 644 rev = repo.changelog.rev(node)
645 645 return repo[rev]
646 646 except error.FilteredLookupError:
647 647 raise
648 648 except (binascii.Error, LookupError):
649 649 pass
650 650
651 651 # look up bookmarks through the name interface
652 652 try:
653 653 node = repo.names.singlenode(repo, symbol)
654 654 rev = repo.changelog.rev(node)
655 655 return repo[rev]
656 656 except KeyError:
657 657 pass
658 658
659 659 node = resolvehexnodeidprefix(repo, symbol)
660 660 if node is not None:
661 661 rev = repo.changelog.rev(node)
662 662 return repo[rev]
663 663
664 664 raise error.RepoLookupError(_(b"unknown revision '%s'") % symbol)
665 665
666 666 except error.WdirUnsupported:
667 667 return repo[None]
668 668 except (
669 669 error.FilteredIndexError,
670 670 error.FilteredLookupError,
671 671 error.FilteredRepoLookupError,
672 672 ):
673 673 raise _filterederror(repo, symbol)
674 674
675 675
676 676 def _filterederror(repo, changeid):
677 677 """build an exception to be raised about a filtered changeid
678 678
679 679 This is extracted in a function to help extensions (eg: evolve) to
680 680 experiment with various message variants."""
681 681 if repo.filtername.startswith(b'visible'):
682 682
683 683 # Check if the changeset is obsolete
684 684 unfilteredrepo = repo.unfiltered()
685 685 ctx = revsymbol(unfilteredrepo, changeid)
686 686
687 687 # If the changeset is obsolete, enrich the message with the reason
688 688 # that made this changeset not visible
689 689 if ctx.obsolete():
690 690 msg = obsutil._getfilteredreason(repo, changeid, ctx)
691 691 else:
692 692 msg = _(b"hidden revision '%s'") % changeid
693 693
694 694 hint = _(b'use --hidden to access hidden revisions')
695 695
696 696 return error.FilteredRepoLookupError(msg, hint=hint)
697 697 msg = _(b"filtered revision '%s' (not in '%s' subset)")
698 698 msg %= (changeid, repo.filtername)
699 699 return error.FilteredRepoLookupError(msg)
700 700
701 701
702 702 def revsingle(repo, revspec, default=b'.', localalias=None):
703 703 if not revspec and revspec != 0:
704 704 return repo[default]
705 705
706 706 l = revrange(repo, [revspec], localalias=localalias)
707 707 if not l:
708 708 raise error.InputError(_(b'empty revision set'))
709 709 return repo[l.last()]
710 710
711 711
712 712 def _pairspec(revspec):
713 713 tree = revsetlang.parse(revspec)
714 714 return tree and tree[0] in (
715 715 b'range',
716 716 b'rangepre',
717 717 b'rangepost',
718 718 b'rangeall',
719 719 )
720 720
721 721
722 722 def revpair(repo, revs):
723 723 if not revs:
724 724 return repo[b'.'], repo[None]
725 725
726 726 l = revrange(repo, revs)
727 727
728 728 if not l:
729 729 raise error.InputError(_(b'empty revision range'))
730 730
731 731 first = l.first()
732 732 second = l.last()
733 733
734 734 if (
735 735 first == second
736 736 and len(revs) >= 2
737 737 and not all(revrange(repo, [r]) for r in revs)
738 738 ):
739 739 raise error.InputError(_(b'empty revision on one side of range'))
740 740
741 741 # if top-level is range expression, the result must always be a pair
742 742 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
743 743 return repo[first], repo[None]
744 744
745 745 return repo[first], repo[second]
746 746
747 747
748 748 def revrange(repo, specs, localalias=None):
749 749 """Execute 1 to many revsets and return the union.
750 750
751 751 This is the preferred mechanism for executing revsets using user-specified
752 752 config options, such as revset aliases.
753 753
754 754 The revsets specified by ``specs`` will be executed via a chained ``OR``
755 755 expression. If ``specs`` is empty, an empty result is returned.
756 756
757 757 ``specs`` can contain integers, in which case they are assumed to be
758 758 revision numbers.
759 759
760 760 It is assumed the revsets are already formatted. If you have arguments
761 761 that need to be expanded in the revset, call ``revsetlang.formatspec()``
762 762 and pass the result as an element of ``specs``.
763 763
764 764 Specifying a single revset is allowed.
765 765
766 766 Returns a ``smartset.abstractsmartset`` which is a list-like interface over
767 767 integer revisions.
768 768 """
769 769 allspecs = []
770 770 for spec in specs:
771 771 if isinstance(spec, int):
772 772 spec = revsetlang.formatspec(b'%d', spec)
773 773 allspecs.append(spec)
774 774 return repo.anyrevs(allspecs, user=True, localalias=localalias)
775 775
776 776
777 777 def increasingwindows(windowsize=8, sizelimit=512):
778 778 while True:
779 779 yield windowsize
780 780 if windowsize < sizelimit:
781 781 windowsize *= 2
782 782
783 783
784 784 def walkchangerevs(repo, revs, makefilematcher, prepare):
785 785 """Iterate over files and the revs in a "windowed" way.
786 786
787 787 Callers most commonly need to iterate backwards over the history
788 788 in which they are interested. Doing so has awful (quadratic-looking)
789 789 performance, so we use iterators in a "windowed" way.
790 790
791 791 We walk a window of revisions in the desired order. Within the
792 792 window, we first walk forwards to gather data, then in the desired
793 793 order (usually backwards) to display it.
794 794
795 795 This function returns an iterator yielding contexts. Before
796 796 yielding each context, the iterator will first call the prepare
797 797 function on each context in the window in forward order."""
798 798
799 799 if not revs:
800 800 return []
801 801 change = repo.__getitem__
802 802
803 803 def iterate():
804 804 it = iter(revs)
805 805 stopiteration = False
806 806 for windowsize in increasingwindows():
807 807 nrevs = []
808 808 for i in range(windowsize):
809 809 rev = next(it, None)
810 810 if rev is None:
811 811 stopiteration = True
812 812 break
813 813 nrevs.append(rev)
814 814 for rev in sorted(nrevs):
815 815 ctx = change(rev)
816 816 prepare(ctx, makefilematcher(ctx))
817 817 for rev in nrevs:
818 818 yield change(rev)
819 819
820 820 if stopiteration:
821 821 break
822 822
823 823 return iterate()
824 824
825 825
826 826 def meaningfulparents(repo, ctx):
827 827 """Return list of meaningful (or all if debug) parentrevs for rev.
828 828
829 829 For merges (two non-nullrev revisions) both parents are meaningful.
830 830 Otherwise the first parent revision is considered meaningful if it
831 831 is not the preceding revision.
832 832 """
833 833 parents = ctx.parents()
834 834 if len(parents) > 1:
835 835 return parents
836 836 if repo.ui.debugflag:
837 837 return [parents[0], repo[nullrev]]
838 838 if parents[0].rev() >= intrev(ctx) - 1:
839 839 return []
840 840 return parents
841 841
842 842
843 843 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
844 844 """Return a function that produced paths for presenting to the user.
845 845
846 846 The returned function takes a repo-relative path and produces a path
847 847 that can be presented in the UI.
848 848
849 849 Depending on the value of ui.relative-paths, either a repo-relative or
850 850 cwd-relative path will be produced.
851 851
852 852 legacyrelativevalue is the value to use if ui.relative-paths=legacy
853 853
854 854 If forcerelativevalue is not None, then that value will be used regardless
855 855 of what ui.relative-paths is set to.
856 856 """
857 857 if forcerelativevalue is not None:
858 858 relative = forcerelativevalue
859 859 else:
860 860 config = repo.ui.config(b'ui', b'relative-paths')
861 861 if config == b'legacy':
862 862 relative = legacyrelativevalue
863 863 else:
864 864 relative = stringutil.parsebool(config)
865 865 if relative is None:
866 866 raise error.ConfigError(
867 867 _(b"ui.relative-paths is not a boolean ('%s')") % config
868 868 )
869 869
870 870 if relative:
871 871 cwd = repo.getcwd()
872 872 if cwd != b'':
873 873 # this branch would work even if cwd == b'' (ie cwd = repo
874 874 # root), but its generality makes the returned function slower
875 875 pathto = repo.pathto
876 876 return lambda f: pathto(f, cwd)
877 877 if repo.ui.configbool(b'ui', b'slash'):
878 878 return lambda f: f
879 879 else:
880 880 return util.localpath
881 881
882 882
883 883 def subdiruipathfn(subpath, uipathfn):
884 884 '''Create a new uipathfn that treats the file as relative to subpath.'''
885 885 return lambda f: uipathfn(posixpath.join(subpath, f))
886 886
887 887
888 888 def anypats(pats, opts):
889 889 """Checks if any patterns, including --include and --exclude were given.
890 890
891 891 Some commands (e.g. addremove) use this condition for deciding whether to
892 892 print absolute or relative paths.
893 893 """
894 894 return bool(pats or opts.get(b'include') or opts.get(b'exclude'))
895 895
896 896
897 897 def expandpats(pats):
898 898 """Expand bare globs when running on windows.
899 899 On posix we assume it already has already been done by sh."""
900 900 if not util.expandglobs:
901 901 return list(pats)
902 902 ret = []
903 903 for kindpat in pats:
904 904 kind, pat = matchmod._patsplit(kindpat, None)
905 905 if kind is None:
906 906 try:
907 907 globbed = glob.glob(pat)
908 908 except re.error:
909 909 globbed = [pat]
910 910 if globbed:
911 911 ret.extend(globbed)
912 912 continue
913 913 ret.append(kindpat)
914 914 return ret
915 915
916 916
917 917 def matchandpats(
918 918 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
919 919 ):
920 920 """Return a matcher and the patterns that were used.
921 921 The matcher will warn about bad matches, unless an alternate badfn callback
922 922 is provided."""
923 923 if opts is None:
924 924 opts = {}
925 925 if not globbed and default == b'relpath':
926 926 pats = expandpats(pats or [])
927 927
928 928 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
929 929
930 930 def bad(f, msg):
931 931 ctx.repo().ui.warn(b"%s: %s\n" % (uipathfn(f), msg))
932 932
933 933 if badfn is None:
934 934 badfn = bad
935 935
936 936 m = ctx.match(
937 937 pats,
938 938 opts.get(b'include'),
939 939 opts.get(b'exclude'),
940 940 default,
941 941 listsubrepos=opts.get(b'subrepos'),
942 942 badfn=badfn,
943 943 )
944 944
945 945 if m.always():
946 946 pats = []
947 947 return m, pats
948 948
949 949
950 950 def match(
951 951 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None
952 952 ):
953 953 '''Return a matcher that will warn about bad matches.'''
954 954 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
955 955
956 956
957 957 def matchall(repo):
958 958 '''Return a matcher that will efficiently match everything.'''
959 959 return matchmod.always()
960 960
961 961
962 962 def matchfiles(repo, files, badfn=None):
963 963 '''Return a matcher that will efficiently match exactly these files.'''
964 964 return matchmod.exact(files, badfn=badfn)
965 965
966 966
967 967 def parsefollowlinespattern(repo, rev, pat, msg):
968 968 """Return a file name from `pat` pattern suitable for usage in followlines
969 969 logic.
970 970 """
971 971 if not matchmod.patkind(pat):
972 972 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
973 973 else:
974 974 ctx = repo[rev]
975 975 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
976 976 files = [f for f in ctx if m(f)]
977 977 if len(files) != 1:
978 978 raise error.ParseError(msg)
979 979 return files[0]
980 980
981 981
982 982 def getorigvfs(ui, repo):
983 983 """return a vfs suitable to save 'orig' file
984 984
985 985 return None if no special directory is configured"""
986 986 origbackuppath = ui.config(b'ui', b'origbackuppath')
987 987 if not origbackuppath:
988 988 return None
989 989 return vfs.vfs(repo.wvfs.join(origbackuppath))
990 990
991 991
992 992 def backuppath(ui, repo, filepath):
993 993 """customize where working copy backup files (.orig files) are created
994 994
995 995 Fetch user defined path from config file: [ui] origbackuppath = <path>
996 996 Fall back to default (filepath with .orig suffix) if not specified
997 997
998 998 filepath is repo-relative
999 999
1000 1000 Returns an absolute path
1001 1001 """
1002 1002 origvfs = getorigvfs(ui, repo)
1003 1003 if origvfs is None:
1004 1004 return repo.wjoin(filepath + b".orig")
1005 1005
1006 1006 origbackupdir = origvfs.dirname(filepath)
1007 1007 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
1008 1008 ui.note(_(b'creating directory: %s\n') % origvfs.join(origbackupdir))
1009 1009
1010 1010 # Remove any files that conflict with the backup file's path
1011 1011 for f in reversed(list(pathutil.finddirs(filepath))):
1012 1012 if origvfs.isfileorlink(f):
1013 1013 ui.note(_(b'removing conflicting file: %s\n') % origvfs.join(f))
1014 1014 origvfs.unlink(f)
1015 1015 break
1016 1016
1017 1017 origvfs.makedirs(origbackupdir)
1018 1018
1019 1019 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
1020 1020 ui.note(
1021 1021 _(b'removing conflicting directory: %s\n') % origvfs.join(filepath)
1022 1022 )
1023 1023 origvfs.rmtree(filepath, forcibly=True)
1024 1024
1025 1025 return origvfs.join(filepath)
1026 1026
1027 1027
1028 1028 class _containsnode:
1029 1029 """proxy __contains__(node) to container.__contains__ which accepts revs"""
1030 1030
1031 1031 def __init__(self, repo, revcontainer):
1032 1032 self._torev = repo.changelog.rev
1033 1033 self._revcontains = revcontainer.__contains__
1034 1034
1035 1035 def __contains__(self, node):
1036 1036 return self._revcontains(self._torev(node))
1037 1037
1038 1038
1039 1039 def cleanupnodes(
1040 1040 repo,
1041 1041 replacements,
1042 1042 operation,
1043 1043 moves=None,
1044 1044 metadata=None,
1045 1045 fixphase=False,
1046 1046 targetphase=None,
1047 1047 backup=True,
1048 1048 ):
1049 1049 """do common cleanups when old nodes are replaced by new nodes
1050 1050
1051 1051 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
1052 1052 (we might also want to move working directory parent in the future)
1053 1053
1054 1054 By default, bookmark moves are calculated automatically from 'replacements',
1055 1055 but 'moves' can be used to override that. Also, 'moves' may include
1056 1056 additional bookmark moves that should not have associated obsmarkers.
1057 1057
1058 1058 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
1059 1059 have replacements. operation is a string, like "rebase".
1060 1060
1061 1061 metadata is dictionary containing metadata to be stored in obsmarker if
1062 1062 obsolescence is enabled.
1063 1063 """
1064 1064 assert fixphase or targetphase is None
1065 1065 if not replacements and not moves:
1066 1066 return
1067 1067
1068 1068 # translate mapping's other forms
1069 1069 if not util.safehasattr(replacements, b'items'):
1070 1070 replacements = {(n,): () for n in replacements}
1071 1071 else:
1072 1072 # upgrading non tuple "source" to tuple ones for BC
1073 1073 repls = {}
1074 1074 for key, value in replacements.items():
1075 1075 if not isinstance(key, tuple):
1076 1076 key = (key,)
1077 1077 repls[key] = value
1078 1078 replacements = repls
1079 1079
1080 1080 # Unfiltered repo is needed since nodes in replacements might be hidden.
1081 1081 unfi = repo.unfiltered()
1082 1082
1083 1083 # Calculate bookmark movements
1084 1084 if moves is None:
1085 1085 moves = {}
1086 1086 for oldnodes, newnodes in replacements.items():
1087 1087 for oldnode in oldnodes:
1088 1088 if oldnode in moves:
1089 1089 continue
1090 1090 if len(newnodes) > 1:
1091 1091 # usually a split, take the one with biggest rev number
1092 1092 newnode = next(unfi.set(b'max(%ln)', newnodes)).node()
1093 1093 elif len(newnodes) == 0:
1094 1094 # move bookmark backwards
1095 1095 allreplaced = []
1096 1096 for rep in replacements:
1097 1097 allreplaced.extend(rep)
1098 1098 roots = list(
1099 1099 unfi.set(b'max((::%n) - %ln)', oldnode, allreplaced)
1100 1100 )
1101 1101 if roots:
1102 1102 newnode = roots[0].node()
1103 1103 else:
1104 1104 newnode = repo.nullid
1105 1105 else:
1106 1106 newnode = newnodes[0]
1107 1107 moves[oldnode] = newnode
1108 1108
1109 1109 allnewnodes = [n for ns in replacements.values() for n in ns]
1110 1110 toretract = {}
1111 1111 toadvance = {}
1112 1112 if fixphase:
1113 1113 precursors = {}
1114 1114 for oldnodes, newnodes in replacements.items():
1115 1115 for oldnode in oldnodes:
1116 1116 for newnode in newnodes:
1117 1117 precursors.setdefault(newnode, []).append(oldnode)
1118 1118
1119 1119 allnewnodes.sort(key=lambda n: unfi[n].rev())
1120 1120 newphases = {}
1121 1121
1122 1122 def phase(ctx):
1123 1123 return newphases.get(ctx.node(), ctx.phase())
1124 1124
1125 1125 for newnode in allnewnodes:
1126 1126 ctx = unfi[newnode]
1127 1127 parentphase = max(phase(p) for p in ctx.parents())
1128 1128 if targetphase is None:
1129 1129 oldphase = max(
1130 1130 unfi[oldnode].phase() for oldnode in precursors[newnode]
1131 1131 )
1132 1132 newphase = max(oldphase, parentphase)
1133 1133 else:
1134 1134 newphase = max(targetphase, parentphase)
1135 1135 newphases[newnode] = newphase
1136 1136 if newphase > ctx.phase():
1137 1137 toretract.setdefault(newphase, []).append(newnode)
1138 1138 elif newphase < ctx.phase():
1139 1139 toadvance.setdefault(newphase, []).append(newnode)
1140 1140
1141 1141 with repo.transaction(b'cleanup') as tr:
1142 1142 # Move bookmarks
1143 1143 bmarks = repo._bookmarks
1144 1144 bmarkchanges = []
1145 1145 for oldnode, newnode in moves.items():
1146 1146 oldbmarks = repo.nodebookmarks(oldnode)
1147 1147 if not oldbmarks:
1148 1148 continue
1149 1149 from . import bookmarks # avoid import cycle
1150 1150
1151 1151 repo.ui.debug(
1152 1152 b'moving bookmarks %r from %s to %s\n'
1153 1153 % (
1154 1154 pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1155 1155 hex(oldnode),
1156 1156 hex(newnode),
1157 1157 )
1158 1158 )
1159 1159 # Delete divergent bookmarks being parents of related newnodes
1160 1160 deleterevs = repo.revs(
1161 1161 b'parents(roots(%ln & (::%n))) - parents(%n)',
1162 1162 allnewnodes,
1163 1163 newnode,
1164 1164 oldnode,
1165 1165 )
1166 1166 deletenodes = _containsnode(repo, deleterevs)
1167 1167 for name in oldbmarks:
1168 1168 bmarkchanges.append((name, newnode))
1169 1169 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1170 1170 bmarkchanges.append((b, None))
1171 1171
1172 1172 if bmarkchanges:
1173 1173 bmarks.applychanges(repo, tr, bmarkchanges)
1174 1174
1175 1175 for phase, nodes in toretract.items():
1176 1176 phases.retractboundary(repo, tr, phase, nodes)
1177 1177 for phase, nodes in toadvance.items():
1178 1178 phases.advanceboundary(repo, tr, phase, nodes)
1179 1179
1180 1180 mayusearchived = repo.ui.config(b'experimental', b'cleanup-as-archived')
1181 1181 # Obsolete or strip nodes
1182 1182 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1183 1183 # If a node is already obsoleted, and we want to obsolete it
1184 1184 # without a successor, skip that obssolete request since it's
1185 1185 # unnecessary. That's the "if s or not isobs(n)" check below.
1186 1186 # Also sort the node in topology order, that might be useful for
1187 1187 # some obsstore logic.
1188 1188 # NOTE: the sorting might belong to createmarkers.
1189 1189 torev = unfi.changelog.rev
1190 1190 sortfunc = lambda ns: torev(ns[0][0])
1191 1191 rels = []
1192 1192 for ns, s in sorted(replacements.items(), key=sortfunc):
1193 1193 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1194 1194 rels.append(rel)
1195 1195 if rels:
1196 1196 obsolete.createmarkers(
1197 1197 repo, rels, operation=operation, metadata=metadata
1198 1198 )
1199 1199 elif phases.supportarchived(repo) and mayusearchived:
1200 1200 # this assume we do not have "unstable" nodes above the cleaned ones
1201 1201 allreplaced = set()
1202 1202 for ns in replacements.keys():
1203 1203 allreplaced.update(ns)
1204 1204 if backup:
1205 1205 from . import repair # avoid import cycle
1206 1206
1207 1207 node = min(allreplaced, key=repo.changelog.rev)
1208 1208 repair.backupbundle(
1209 1209 repo, allreplaced, allreplaced, node, operation
1210 1210 )
1211 1211 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1212 1212 else:
1213 1213 from . import repair # avoid import cycle
1214 1214
1215 1215 tostrip = list(n for ns in replacements for n in ns)
1216 1216 if tostrip:
1217 1217 repair.delayedstrip(
1218 1218 repo.ui, repo, tostrip, operation, backup=backup
1219 1219 )
1220 1220
1221 1221
1222 1222 def addremove(repo, matcher, prefix, uipathfn, opts=None, open_tr=None):
1223 1223 if opts is None:
1224 1224 opts = {}
1225 1225 m = matcher
1226 1226 dry_run = opts.get(b'dry_run')
1227 1227 try:
1228 1228 similarity = float(opts.get(b'similarity') or 0)
1229 1229 except ValueError:
1230 1230 raise error.InputError(_(b'similarity must be a number'))
1231 1231 if similarity < 0 or similarity > 100:
1232 1232 raise error.InputError(_(b'similarity must be between 0 and 100'))
1233 1233 similarity /= 100.0
1234 1234
1235 1235 ret = 0
1236 1236
1237 1237 wctx = repo[None]
1238 1238 for subpath in sorted(wctx.substate):
1239 1239 submatch = matchmod.subdirmatcher(subpath, m)
1240 1240 if opts.get(b'subrepos') or m.exact(subpath) or any(submatch.files()):
1241 1241 sub = wctx.sub(subpath)
1242 1242 subprefix = repo.wvfs.reljoin(prefix, subpath)
1243 1243 subuipathfn = subdiruipathfn(subpath, uipathfn)
1244 1244 try:
1245 1245 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1246 1246 ret = 1
1247 1247 except error.LookupError:
1248 1248 repo.ui.status(
1249 1249 _(b"skipping missing subrepository: %s\n")
1250 1250 % uipathfn(subpath)
1251 1251 )
1252 1252
1253 1253 rejected = []
1254 1254
1255 1255 def badfn(f, msg):
1256 1256 if f in m.files():
1257 1257 m.bad(f, msg)
1258 1258 rejected.append(f)
1259 1259
1260 1260 badmatch = matchmod.badmatch(m, badfn)
1261 1261 added, unknown, deleted, removed, forgotten = _interestingfiles(
1262 1262 repo, badmatch
1263 1263 )
1264 1264
1265 1265 unknownset = set(unknown + forgotten)
1266 1266 toprint = unknownset.copy()
1267 1267 toprint.update(deleted)
1268 1268 for abs in sorted(toprint):
1269 1269 if repo.ui.verbose or not m.exact(abs):
1270 1270 if abs in unknownset:
1271 1271 status = _(b'adding %s\n') % uipathfn(abs)
1272 1272 label = b'ui.addremove.added'
1273 1273 else:
1274 1274 status = _(b'removing %s\n') % uipathfn(abs)
1275 1275 label = b'ui.addremove.removed'
1276 1276 repo.ui.status(status, label=label)
1277 1277
1278 1278 renames = _findrenames(
1279 1279 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1280 1280 )
1281 1281
1282 1282 if not dry_run and (unknown or forgotten or deleted or renames):
1283 1283 if open_tr is not None:
1284 1284 open_tr()
1285 1285 _markchanges(repo, unknown + forgotten, deleted, renames)
1286 1286
1287 1287 for f in rejected:
1288 1288 if f in m.files():
1289 1289 return 1
1290 1290 return ret
1291 1291
1292 1292
1293 1293 def marktouched(repo, files, similarity=0.0):
1294 1294 """Assert that files have somehow been operated upon. files are relative to
1295 1295 the repo root."""
1296 1296 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1297 1297 rejected = []
1298 1298
1299 1299 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1300 1300
1301 1301 if repo.ui.verbose:
1302 1302 unknownset = set(unknown + forgotten)
1303 1303 toprint = unknownset.copy()
1304 1304 toprint.update(deleted)
1305 1305 for abs in sorted(toprint):
1306 1306 if abs in unknownset:
1307 1307 status = _(b'adding %s\n') % abs
1308 1308 else:
1309 1309 status = _(b'removing %s\n') % abs
1310 1310 repo.ui.status(status)
1311 1311
1312 1312 # TODO: We should probably have the caller pass in uipathfn and apply it to
1313 1313 # the messages above too. legacyrelativevalue=True is consistent with how
1314 1314 # it used to work.
1315 1315 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1316 1316 renames = _findrenames(
1317 1317 repo, m, added + unknown, removed + deleted, similarity, uipathfn
1318 1318 )
1319 1319
1320 1320 _markchanges(repo, unknown + forgotten, deleted, renames)
1321 1321
1322 1322 for f in rejected:
1323 1323 if f in m.files():
1324 1324 return 1
1325 1325 return 0
1326 1326
1327 1327
1328 1328 def _interestingfiles(repo, matcher):
1329 1329 """Walk dirstate with matcher, looking for files that addremove would care
1330 1330 about.
1331 1331
1332 1332 This is different from dirstate.status because it doesn't care about
1333 1333 whether files are modified or clean."""
1334 1334 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1335 1335 audit_path = pathutil.pathauditor(repo.root, cached=True)
1336 1336
1337 1337 ctx = repo[None]
1338 1338 dirstate = repo.dirstate
1339 1339 matcher = repo.narrowmatch(matcher, includeexact=True)
1340 1340 walkresults = dirstate.walk(
1341 1341 matcher,
1342 1342 subrepos=sorted(ctx.substate),
1343 1343 unknown=True,
1344 1344 ignored=False,
1345 1345 full=False,
1346 1346 )
1347 1347 for abs, st in walkresults.items():
1348 1348 entry = dirstate.get_entry(abs)
1349 1349 if (not entry.any_tracked) and audit_path.check(abs):
1350 1350 unknown.append(abs)
1351 1351 elif (not entry.removed) and not st:
1352 1352 deleted.append(abs)
1353 1353 elif entry.removed and st:
1354 1354 forgotten.append(abs)
1355 1355 # for finding renames
1356 1356 elif entry.removed and not st:
1357 1357 removed.append(abs)
1358 1358 elif entry.added:
1359 1359 added.append(abs)
1360 1360
1361 1361 return added, unknown, deleted, removed, forgotten
1362 1362
1363 1363
1364 1364 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1365 1365 '''Find renames from removed files to added ones.'''
1366 1366 renames = {}
1367 1367 if similarity > 0:
1368 1368 for old, new, score in similar.findrenames(
1369 1369 repo, added, removed, similarity
1370 1370 ):
1371 1371 if (
1372 1372 repo.ui.verbose
1373 1373 or not matcher.exact(old)
1374 1374 or not matcher.exact(new)
1375 1375 ):
1376 1376 repo.ui.status(
1377 1377 _(
1378 1378 b'recording removal of %s as rename to %s '
1379 1379 b'(%d%% similar)\n'
1380 1380 )
1381 1381 % (uipathfn(old), uipathfn(new), score * 100)
1382 1382 )
1383 1383 renames[new] = old
1384 1384 return renames
1385 1385
1386 1386
1387 1387 def _markchanges(repo, unknown, deleted, renames):
1388 1388 """Marks the files in unknown as added, the files in deleted as removed,
1389 1389 and the files in renames as copied."""
1390 1390 wctx = repo[None]
1391 1391 with repo.wlock():
1392 1392 wctx.forget(deleted)
1393 1393 wctx.add(unknown)
1394 1394 for new, old in renames.items():
1395 1395 wctx.copy(old, new)
1396 1396
1397 1397
1398 1398 def getrenamedfn(repo, endrev=None):
1399 1399 if copiesmod.usechangesetcentricalgo(repo):
1400 1400
1401 1401 def getrenamed(fn, rev):
1402 1402 ctx = repo[rev]
1403 1403 p1copies = ctx.p1copies()
1404 1404 if fn in p1copies:
1405 1405 return p1copies[fn]
1406 1406 p2copies = ctx.p2copies()
1407 1407 if fn in p2copies:
1408 1408 return p2copies[fn]
1409 1409 return None
1410 1410
1411 1411 return getrenamed
1412 1412
1413 1413 rcache = {}
1414 1414 if endrev is None:
1415 1415 endrev = len(repo)
1416 1416
1417 1417 def getrenamed(fn, rev):
1418 1418 """looks up all renames for a file (up to endrev) the first
1419 1419 time the file is given. It indexes on the changerev and only
1420 1420 parses the manifest if linkrev != changerev.
1421 1421 Returns rename info for fn at changerev rev."""
1422 1422 if fn not in rcache:
1423 1423 rcache[fn] = {}
1424 1424 fl = repo.file(fn)
1425 1425 for i in fl:
1426 1426 lr = fl.linkrev(i)
1427 1427 renamed = fl.renamed(fl.node(i))
1428 1428 rcache[fn][lr] = renamed and renamed[0]
1429 1429 if lr >= endrev:
1430 1430 break
1431 1431 if rev in rcache[fn]:
1432 1432 return rcache[fn][rev]
1433 1433
1434 1434 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1435 1435 # filectx logic.
1436 1436 try:
1437 1437 return repo[rev][fn].copysource()
1438 1438 except error.LookupError:
1439 1439 return None
1440 1440
1441 1441 return getrenamed
1442 1442
1443 1443
1444 1444 def getcopiesfn(repo, endrev=None):
1445 1445 if copiesmod.usechangesetcentricalgo(repo):
1446 1446
1447 1447 def copiesfn(ctx):
1448 1448 if ctx.p2copies():
1449 1449 allcopies = ctx.p1copies().copy()
1450 1450 # There should be no overlap
1451 1451 allcopies.update(ctx.p2copies())
1452 1452 return sorted(allcopies.items())
1453 1453 else:
1454 1454 return sorted(ctx.p1copies().items())
1455 1455
1456 1456 else:
1457 1457 getrenamed = getrenamedfn(repo, endrev)
1458 1458
1459 1459 def copiesfn(ctx):
1460 1460 copies = []
1461 1461 for fn in ctx.files():
1462 1462 rename = getrenamed(fn, ctx.rev())
1463 1463 if rename:
1464 1464 copies.append((fn, rename))
1465 1465 return copies
1466 1466
1467 1467 return copiesfn
1468 1468
1469 1469
1470 1470 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1471 1471 """Update the dirstate to reflect the intent of copying src to dst. For
1472 1472 different reasons it might not end with dst being marked as copied from src.
1473 1473 """
1474 1474 origsrc = repo.dirstate.copied(src) or src
1475 1475 if dst == origsrc: # copying back a copy?
1476 1476 entry = repo.dirstate.get_entry(dst)
1477 1477 if (entry.added or not entry.tracked) and not dryrun:
1478 1478 repo.dirstate.set_tracked(dst)
1479 1479 else:
1480 1480 if repo.dirstate.get_entry(origsrc).added and origsrc == src:
1481 1481 if not ui.quiet:
1482 1482 ui.warn(
1483 1483 _(
1484 1484 b"%s has not been committed yet, so no copy "
1485 1485 b"data will be stored for %s.\n"
1486 1486 )
1487 1487 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))
1488 1488 )
1489 1489 if not repo.dirstate.get_entry(dst).tracked and not dryrun:
1490 1490 wctx.add([dst])
1491 1491 elif not dryrun:
1492 1492 wctx.copy(origsrc, dst)
1493 1493
1494 1494
1495 1495 def movedirstate(repo, newctx, match=None):
1496 1496 """Move the dirstate to newctx and adjust it as necessary.
1497 1497
1498 1498 A matcher can be provided as an optimization. It is probably a bug to pass
1499 1499 a matcher that doesn't match all the differences between the parent of the
1500 1500 working copy and newctx.
1501 1501 """
1502 1502 oldctx = repo[b'.']
1503 1503 ds = repo.dirstate
1504 1504 copies = dict(ds.copies())
1505 1505 ds.setparents(newctx.node(), repo.nullid)
1506 1506 s = newctx.status(oldctx, match=match)
1507 1507
1508 1508 for f in s.modified:
1509 1509 ds.update_file_p1(f, p1_tracked=True)
1510 1510
1511 1511 for f in s.added:
1512 1512 ds.update_file_p1(f, p1_tracked=False)
1513 1513
1514 1514 for f in s.removed:
1515 1515 ds.update_file_p1(f, p1_tracked=True)
1516 1516
1517 1517 # Merge old parent and old working dir copies
1518 1518 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1519 1519 oldcopies.update(copies)
1520 1520 copies = {dst: oldcopies.get(src, src) for dst, src in oldcopies.items()}
1521 1521 # Adjust the dirstate copies
1522 1522 for dst, src in copies.items():
1523 1523 if src not in newctx or dst in newctx or not ds.get_entry(dst).added:
1524 1524 src = None
1525 1525 ds.copy(src, dst)
1526 1526 repo._quick_access_changeid_invalidate()
1527 1527
1528 1528
1529 1529 def filterrequirements(requirements):
1530 1530 """filters the requirements into two sets:
1531 1531
1532 1532 wcreq: requirements which should be written in .hg/requires
1533 1533 storereq: which should be written in .hg/store/requires
1534 1534
1535 1535 Returns (wcreq, storereq)
1536 1536 """
1537 1537 if requirementsmod.SHARESAFE_REQUIREMENT in requirements:
1538 1538 wc, store = set(), set()
1539 1539 for r in requirements:
1540 1540 if r in requirementsmod.WORKING_DIR_REQUIREMENTS:
1541 1541 wc.add(r)
1542 1542 else:
1543 1543 store.add(r)
1544 1544 return wc, store
1545 1545 return requirements, None
1546 1546
1547 1547
1548 1548 def istreemanifest(repo):
1549 1549 """returns whether the repository is using treemanifest or not"""
1550 1550 return requirementsmod.TREEMANIFEST_REQUIREMENT in repo.requirements
1551 1551
1552 1552
1553 1553 def writereporequirements(repo, requirements=None):
1554 1554 """writes requirements for the repo
1555 1555
1556 1556 Requirements are written to .hg/requires and .hg/store/requires based
1557 1557 on whether share-safe mode is enabled and which requirements are wdir
1558 1558 requirements and which are store requirements
1559 1559 """
1560 1560 if requirements:
1561 1561 repo.requirements = requirements
1562 1562 wcreq, storereq = filterrequirements(repo.requirements)
1563 1563 if wcreq is not None:
1564 1564 writerequires(repo.vfs, wcreq)
1565 1565 if storereq is not None:
1566 1566 writerequires(repo.svfs, storereq)
1567 1567 elif repo.ui.configbool(b'format', b'usestore'):
1568 1568 # only remove store requires if we are using store
1569 1569 repo.svfs.tryunlink(b'requires')
1570 1570
1571 1571
1572 1572 def writerequires(opener, requirements):
1573 1573 with opener(b'requires', b'w', atomictemp=True) as fp:
1574 1574 for r in sorted(requirements):
1575 1575 fp.write(b"%s\n" % r)
1576 1576
1577 1577
1578 1578 class filecachesubentry:
1579 1579 def __init__(self, path, stat):
1580 1580 self.path = path
1581 1581 self.cachestat = None
1582 1582 self._cacheable = None
1583 1583
1584 1584 if stat:
1585 1585 self.cachestat = filecachesubentry.stat(self.path)
1586 1586
1587 1587 if self.cachestat:
1588 1588 self._cacheable = self.cachestat.cacheable()
1589 1589 else:
1590 1590 # None means we don't know yet
1591 1591 self._cacheable = None
1592 1592
1593 1593 def refresh(self):
1594 1594 if self.cacheable():
1595 1595 self.cachestat = filecachesubentry.stat(self.path)
1596 1596
1597 1597 def cacheable(self):
1598 1598 if self._cacheable is not None:
1599 1599 return self._cacheable
1600 1600
1601 1601 # we don't know yet, assume it is for now
1602 1602 return True
1603 1603
1604 1604 def changed(self):
1605 1605 # no point in going further if we can't cache it
1606 1606 if not self.cacheable():
1607 1607 return True
1608 1608
1609 1609 newstat = filecachesubentry.stat(self.path)
1610 1610
1611 1611 # we may not know if it's cacheable yet, check again now
1612 1612 if newstat and self._cacheable is None:
1613 1613 self._cacheable = newstat.cacheable()
1614 1614
1615 1615 # check again
1616 1616 if not self._cacheable:
1617 1617 return True
1618 1618
1619 1619 if self.cachestat != newstat:
1620 1620 self.cachestat = newstat
1621 1621 return True
1622 1622 else:
1623 1623 return False
1624 1624
1625 1625 @staticmethod
1626 1626 def stat(path):
1627 1627 try:
1628 1628 return util.cachestat(path)
1629 1629 except FileNotFoundError:
1630 1630 pass
1631 1631
1632 1632
1633 1633 class filecacheentry:
1634 1634 def __init__(self, paths, stat=True):
1635 1635 self._entries = []
1636 1636 for path in paths:
1637 1637 self._entries.append(filecachesubentry(path, stat))
1638 1638
1639 1639 def changed(self):
1640 1640 '''true if any entry has changed'''
1641 1641 for entry in self._entries:
1642 1642 if entry.changed():
1643 1643 return True
1644 1644 return False
1645 1645
1646 1646 def refresh(self):
1647 1647 for entry in self._entries:
1648 1648 entry.refresh()
1649 1649
1650 1650
1651 1651 class filecache:
1652 1652 """A property like decorator that tracks files under .hg/ for updates.
1653 1653
1654 1654 On first access, the files defined as arguments are stat()ed and the
1655 1655 results cached. The decorated function is called. The results are stashed
1656 1656 away in a ``_filecache`` dict on the object whose method is decorated.
1657 1657
1658 1658 On subsequent access, the cached result is used as it is set to the
1659 1659 instance dictionary.
1660 1660
1661 1661 On external property set/delete operations, the caller must update the
1662 1662 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1663 1663 instead of directly setting <attr>.
1664 1664
1665 1665 When using the property API, the cached data is always used if available.
1666 1666 No stat() is performed to check if the file has changed.
1667 1667
1668 1668 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1669 1669 can populate an entry before the property's getter is called. In this case,
1670 1670 entries in ``_filecache`` will be used during property operations,
1671 1671 if available. If the underlying file changes, it is up to external callers
1672 1672 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1673 1673 method result as well as possibly calling ``del obj._filecache[attr]`` to
1674 1674 remove the ``filecacheentry``.
1675 1675 """
1676 1676
1677 1677 def __init__(self, *paths):
1678 1678 self.paths = paths
1679 1679
1680 1680 def tracked_paths(self, obj):
1681 1681 return [self.join(obj, path) for path in self.paths]
1682 1682
1683 1683 def join(self, obj, fname):
1684 1684 """Used to compute the runtime path of a cached file.
1685 1685
1686 1686 Users should subclass filecache and provide their own version of this
1687 1687 function to call the appropriate join function on 'obj' (an instance
1688 1688 of the class that its member function was decorated).
1689 1689 """
1690 1690 raise NotImplementedError
1691 1691
1692 1692 def __call__(self, func):
1693 1693 self.func = func
1694 1694 self.sname = func.__name__
1695 1695 self.name = pycompat.sysbytes(self.sname)
1696 1696 return self
1697 1697
1698 1698 def __get__(self, obj, type=None):
1699 1699 # if accessed on the class, return the descriptor itself.
1700 1700 if obj is None:
1701 1701 return self
1702 1702
1703 1703 assert self.sname not in obj.__dict__
1704 1704
1705 1705 entry = obj._filecache.get(self.name)
1706 1706
1707 1707 if entry:
1708 1708 if entry.changed():
1709 1709 entry.obj = self.func(obj)
1710 1710 else:
1711 1711 paths = self.tracked_paths(obj)
1712 1712
1713 1713 # We stat -before- creating the object so our cache doesn't lie if
1714 1714 # a writer modified between the time we read and stat
1715 1715 entry = filecacheentry(paths, True)
1716 1716 entry.obj = self.func(obj)
1717 1717
1718 1718 obj._filecache[self.name] = entry
1719 1719
1720 1720 obj.__dict__[self.sname] = entry.obj
1721 1721 return entry.obj
1722 1722
1723 1723 # don't implement __set__(), which would make __dict__ lookup as slow as
1724 1724 # function call.
1725 1725
1726 1726 def set(self, obj, value):
1727 1727 if self.name not in obj._filecache:
1728 1728 # we add an entry for the missing value because X in __dict__
1729 1729 # implies X in _filecache
1730 1730 paths = self.tracked_paths(obj)
1731 1731 ce = filecacheentry(paths, False)
1732 1732 obj._filecache[self.name] = ce
1733 1733 else:
1734 1734 ce = obj._filecache[self.name]
1735 1735
1736 1736 ce.obj = value # update cached copy
1737 1737 obj.__dict__[self.sname] = value # update copy returned by obj.x
1738 1738
1739 1739
1740 1740 def extdatasource(repo, source):
1741 1741 """Gather a map of rev -> value dict from the specified source
1742 1742
1743 1743 A source spec is treated as a URL, with a special case shell: type
1744 1744 for parsing the output from a shell command.
1745 1745
1746 1746 The data is parsed as a series of newline-separated records where
1747 1747 each record is a revision specifier optionally followed by a space
1748 1748 and a freeform string value. If the revision is known locally, it
1749 1749 is converted to a rev, otherwise the record is skipped.
1750 1750
1751 1751 Note that both key and value are treated as UTF-8 and converted to
1752 1752 the local encoding. This allows uniformity between local and
1753 1753 remote data sources.
1754 1754 """
1755 1755
1756 1756 spec = repo.ui.config(b"extdata", source)
1757 1757 if not spec:
1758 1758 raise error.Abort(_(b"unknown extdata source '%s'") % source)
1759 1759
1760 1760 data = {}
1761 1761 src = proc = None
1762 1762 try:
1763 1763 if spec.startswith(b"shell:"):
1764 1764 # external commands should be run relative to the repo root
1765 1765 cmd = spec[6:]
1766 1766 proc = subprocess.Popen(
1767 1767 procutil.tonativestr(cmd),
1768 1768 shell=True,
1769 1769 bufsize=-1,
1770 1770 close_fds=procutil.closefds,
1771 1771 stdout=subprocess.PIPE,
1772 1772 cwd=procutil.tonativestr(repo.root),
1773 1773 )
1774 1774 src = proc.stdout
1775 1775 else:
1776 1776 # treat as a URL or file
1777 1777 src = url.open(repo.ui, spec)
1778 1778 for l in src:
1779 1779 if b" " in l:
1780 1780 k, v = l.strip().split(b" ", 1)
1781 1781 else:
1782 1782 k, v = l.strip(), b""
1783 1783
1784 1784 k = encoding.tolocal(k)
1785 1785 try:
1786 1786 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1787 1787 except (error.LookupError, error.RepoLookupError, error.InputError):
1788 1788 pass # we ignore data for nodes that don't exist locally
1789 1789 finally:
1790 1790 if proc:
1791 1791 try:
1792 1792 proc.communicate()
1793 1793 except ValueError:
1794 1794 # This happens if we started iterating src and then
1795 1795 # get a parse error on a line. It should be safe to ignore.
1796 1796 pass
1797 1797 if src:
1798 1798 src.close()
1799 1799 if proc and proc.returncode != 0:
1800 1800 raise error.Abort(
1801 1801 _(b"extdata command '%s' failed: %s")
1802 1802 % (cmd, procutil.explainexit(proc.returncode))
1803 1803 )
1804 1804
1805 1805 return data
1806 1806
1807 1807
1808 1808 class progress:
1809 1809 def __init__(self, ui, updatebar, topic, unit=b"", total=None):
1810 1810 self.ui = ui
1811 1811 self.pos = 0
1812 1812 self.topic = topic
1813 1813 self.unit = unit
1814 1814 self.total = total
1815 1815 self.debug = ui.configbool(b'progress', b'debug')
1816 1816 self._updatebar = updatebar
1817 1817
1818 1818 def __enter__(self):
1819 1819 return self
1820 1820
1821 1821 def __exit__(self, exc_type, exc_value, exc_tb):
1822 1822 self.complete()
1823 1823
1824 1824 def update(self, pos, item=b"", total=None):
1825 1825 assert pos is not None
1826 1826 if total:
1827 1827 self.total = total
1828 1828 self.pos = pos
1829 1829 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1830 1830 if self.debug:
1831 1831 self._printdebug(item)
1832 1832
1833 1833 def increment(self, step=1, item=b"", total=None):
1834 1834 self.update(self.pos + step, item, total)
1835 1835
1836 1836 def complete(self):
1837 1837 self.pos = None
1838 1838 self.unit = b""
1839 1839 self.total = None
1840 1840 self._updatebar(self.topic, self.pos, b"", self.unit, self.total)
1841 1841
1842 1842 def _printdebug(self, item):
1843 1843 unit = b''
1844 1844 if self.unit:
1845 1845 unit = b' ' + self.unit
1846 1846 if item:
1847 1847 item = b' ' + item
1848 1848
1849 1849 if self.total:
1850 1850 pct = 100.0 * self.pos / self.total
1851 1851 self.ui.debug(
1852 1852 b'%s:%s %d/%d%s (%4.2f%%)\n'
1853 1853 % (self.topic, item, self.pos, self.total, unit, pct)
1854 1854 )
1855 1855 else:
1856 1856 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1857 1857
1858 1858
1859 1859 def gdinitconfig(ui):
1860 1860 """helper function to know if a repo should be created as general delta"""
1861 1861 # experimental config: format.generaldelta
1862 1862 return ui.configbool(b'format', b'generaldelta') or ui.configbool(
1863 1863 b'format', b'usegeneraldelta'
1864 1864 )
1865 1865
1866 1866
1867 1867 def gddeltaconfig(ui):
1868 1868 """helper function to know if incoming deltas should be optimized
1869 1869
1870 1870 The `format.generaldelta` config is an old form of the config that also
1871 1871 implies that incoming delta-bases should be never be trusted. This function
1872 1872 exists for this purpose.
1873 1873 """
1874 1874 # experimental config: format.generaldelta
1875 1875 return ui.configbool(b'format', b'generaldelta')
1876 1876
1877 1877
1878 1878 class simplekeyvaluefile:
1879 1879 """A simple file with key=value lines
1880 1880
1881 1881 Keys must be alphanumerics and start with a letter, values must not
1882 1882 contain '\n' characters"""
1883 1883
1884 1884 firstlinekey = b'__firstline'
1885 1885
1886 1886 def __init__(self, vfs, path, keys=None):
1887 1887 self.vfs = vfs
1888 1888 self.path = path
1889 1889
1890 1890 def read(self, firstlinenonkeyval=False):
1891 1891 """Read the contents of a simple key-value file
1892 1892
1893 1893 'firstlinenonkeyval' indicates whether the first line of file should
1894 1894 be treated as a key-value pair or reuturned fully under the
1895 1895 __firstline key."""
1896 1896 lines = self.vfs.readlines(self.path)
1897 1897 d = {}
1898 1898 if firstlinenonkeyval:
1899 1899 if not lines:
1900 1900 e = _(b"empty simplekeyvalue file")
1901 1901 raise error.CorruptedState(e)
1902 1902 # we don't want to include '\n' in the __firstline
1903 1903 d[self.firstlinekey] = lines[0][:-1]
1904 1904 del lines[0]
1905 1905
1906 1906 try:
1907 1907 # the 'if line.strip()' part prevents us from failing on empty
1908 1908 # lines which only contain '\n' therefore are not skipped
1909 1909 # by 'if line'
1910 1910 updatedict = dict(
1911 1911 line[:-1].split(b'=', 1) for line in lines if line.strip()
1912 1912 )
1913 1913 if self.firstlinekey in updatedict:
1914 1914 e = _(b"%r can't be used as a key")
1915 1915 raise error.CorruptedState(e % self.firstlinekey)
1916 1916 d.update(updatedict)
1917 1917 except ValueError as e:
1918 1918 raise error.CorruptedState(stringutil.forcebytestr(e))
1919 1919 return d
1920 1920
1921 1921 def write(self, data, firstline=None):
1922 1922 """Write key=>value mapping to a file
1923 1923 data is a dict. Keys must be alphanumerical and start with a letter.
1924 1924 Values must not contain newline characters.
1925 1925
1926 1926 If 'firstline' is not None, it is written to file before
1927 1927 everything else, as it is, not in a key=value form"""
1928 1928 lines = []
1929 1929 if firstline is not None:
1930 1930 lines.append(b'%s\n' % firstline)
1931 1931
1932 1932 for k, v in data.items():
1933 1933 if k == self.firstlinekey:
1934 1934 e = b"key name '%s' is reserved" % self.firstlinekey
1935 1935 raise error.ProgrammingError(e)
1936 1936 if not k[0:1].isalpha():
1937 1937 e = b"keys must start with a letter in a key-value file"
1938 1938 raise error.ProgrammingError(e)
1939 1939 if not k.isalnum():
1940 1940 e = b"invalid key name in a simple key-value file"
1941 1941 raise error.ProgrammingError(e)
1942 1942 if b'\n' in v:
1943 1943 e = b"invalid value in a simple key-value file"
1944 1944 raise error.ProgrammingError(e)
1945 1945 lines.append(b"%s=%s\n" % (k, v))
1946 1946 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp:
1947 1947 fp.write(b''.join(lines))
1948 1948
1949 1949
1950 1950 _reportobsoletedsource = [
1951 1951 b'debugobsolete',
1952 1952 b'pull',
1953 1953 b'push',
1954 1954 b'serve',
1955 1955 b'unbundle',
1956 1956 ]
1957 1957
1958 1958 _reportnewcssource = [
1959 1959 b'pull',
1960 1960 b'unbundle',
1961 1961 ]
1962 1962
1963 1963
1964 1964 def prefetchfiles(repo, revmatches):
1965 1965 """Invokes the registered file prefetch functions, allowing extensions to
1966 1966 ensure the corresponding files are available locally, before the command
1967 1967 uses them.
1968 1968
1969 1969 Args:
1970 1970 revmatches: a list of (revision, match) tuples to indicate the files to
1971 1971 fetch at each revision. If any of the match elements is None, it matches
1972 1972 all files.
1973 1973 """
1974 1974
1975 1975 def _matcher(m):
1976 1976 if m:
1977 1977 assert isinstance(m, matchmod.basematcher)
1978 1978 # The command itself will complain about files that don't exist, so
1979 1979 # don't duplicate the message.
1980 1980 return matchmod.badmatch(m, lambda fn, msg: None)
1981 1981 else:
1982 1982 return matchall(repo)
1983 1983
1984 1984 revbadmatches = [(rev, _matcher(match)) for (rev, match) in revmatches]
1985 1985
1986 1986 fileprefetchhooks(repo, revbadmatches)
1987 1987
1988 1988
1989 1989 # a list of (repo, revs, match) prefetch functions
1990 1990 fileprefetchhooks = util.hooks()
1991 1991
1992 1992 # A marker that tells the evolve extension to suppress its own reporting
1993 1993 _reportstroubledchangesets = True
1994 1994
1995 1995
1996 1996 def registersummarycallback(repo, otr, txnname=b'', as_validator=False):
1997 1997 """register a callback to issue a summary after the transaction is closed
1998 1998
1999 1999 If as_validator is true, then the callbacks are registered as transaction
2000 2000 validators instead
2001 2001 """
2002 2002
2003 2003 def txmatch(sources):
2004 2004 return any(txnname.startswith(source) for source in sources)
2005 2005
2006 2006 categories = []
2007 2007
2008 2008 def reportsummary(func):
2009 2009 """decorator for report callbacks."""
2010 2010 # The repoview life cycle is shorter than the one of the actual
2011 2011 # underlying repository. So the filtered object can die before the
2012 2012 # weakref is used leading to troubles. We keep a reference to the
2013 2013 # unfiltered object and restore the filtering when retrieving the
2014 2014 # repository through the weakref.
2015 2015 filtername = repo.filtername
2016 2016 reporef = weakref.ref(repo.unfiltered())
2017 2017
2018 2018 def wrapped(tr):
2019 2019 repo = reporef()
2020 2020 if filtername:
2021 2021 assert repo is not None # help pytype
2022 2022 repo = repo.filtered(filtername)
2023 2023 func(repo, tr)
2024 2024
2025 2025 newcat = b'%02i-txnreport' % len(categories)
2026 2026 if as_validator:
2027 2027 otr.addvalidator(newcat, wrapped)
2028 2028 else:
2029 2029 otr.addpostclose(newcat, wrapped)
2030 2030 categories.append(newcat)
2031 2031 return wrapped
2032 2032
2033 2033 @reportsummary
2034 2034 def reportchangegroup(repo, tr):
2035 2035 cgchangesets = tr.changes.get(b'changegroup-count-changesets', 0)
2036 2036 cgrevisions = tr.changes.get(b'changegroup-count-revisions', 0)
2037 2037 cgfiles = tr.changes.get(b'changegroup-count-files', 0)
2038 2038 cgheads = tr.changes.get(b'changegroup-count-heads', 0)
2039 2039 if cgchangesets or cgrevisions or cgfiles:
2040 2040 htext = b""
2041 2041 if cgheads:
2042 2042 htext = _(b" (%+d heads)") % cgheads
2043 2043 msg = _(b"added %d changesets with %d changes to %d files%s\n")
2044 2044 if as_validator:
2045 2045 msg = _(b"adding %d changesets with %d changes to %d files%s\n")
2046 2046 assert repo is not None # help pytype
2047 2047 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
2048 2048
2049 2049 if txmatch(_reportobsoletedsource):
2050 2050
2051 2051 @reportsummary
2052 2052 def reportobsoleted(repo, tr):
2053 2053 obsoleted = obsutil.getobsoleted(repo, tr)
2054 2054 newmarkers = len(tr.changes.get(b'obsmarkers', ()))
2055 2055 if newmarkers:
2056 2056 repo.ui.status(_(b'%i new obsolescence markers\n') % newmarkers)
2057 2057 if obsoleted:
2058 2058 msg = _(b'obsoleted %i changesets\n')
2059 2059 if as_validator:
2060 2060 msg = _(b'obsoleting %i changesets\n')
2061 2061 repo.ui.status(msg % len(obsoleted))
2062 2062
2063 2063 if obsolete.isenabled(
2064 2064 repo, obsolete.createmarkersopt
2065 2065 ) and repo.ui.configbool(
2066 2066 b'experimental', b'evolution.report-instabilities'
2067 2067 ):
2068 2068 instabilitytypes = [
2069 2069 (b'orphan', b'orphan'),
2070 2070 (b'phase-divergent', b'phasedivergent'),
2071 2071 (b'content-divergent', b'contentdivergent'),
2072 2072 ]
2073 2073
2074 2074 def getinstabilitycounts(repo):
2075 2075 filtered = repo.changelog.filteredrevs
2076 2076 counts = {}
2077 2077 for instability, revset in instabilitytypes:
2078 2078 counts[instability] = len(
2079 2079 set(obsolete.getrevs(repo, revset)) - filtered
2080 2080 )
2081 2081 return counts
2082 2082
2083 2083 oldinstabilitycounts = getinstabilitycounts(repo)
2084 2084
2085 2085 @reportsummary
2086 2086 def reportnewinstabilities(repo, tr):
2087 2087 newinstabilitycounts = getinstabilitycounts(repo)
2088 2088 for instability, revset in instabilitytypes:
2089 2089 delta = (
2090 2090 newinstabilitycounts[instability]
2091 2091 - oldinstabilitycounts[instability]
2092 2092 )
2093 2093 msg = getinstabilitymessage(delta, instability)
2094 2094 if msg:
2095 2095 repo.ui.warn(msg)
2096 2096
2097 2097 if txmatch(_reportnewcssource):
2098 2098
2099 2099 @reportsummary
2100 2100 def reportnewcs(repo, tr):
2101 2101 """Report the range of new revisions pulled/unbundled."""
2102 2102 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2103 2103 unfi = repo.unfiltered()
2104 2104 if origrepolen >= len(unfi):
2105 2105 return
2106 2106
2107 2107 # Compute the bounds of new visible revisions' range.
2108 2108 revs = smartset.spanset(repo, start=origrepolen)
2109 2109 if revs:
2110 2110 minrev, maxrev = repo[revs.min()], repo[revs.max()]
2111 2111
2112 2112 if minrev == maxrev:
2113 2113 revrange = minrev
2114 2114 else:
2115 2115 revrange = b'%s:%s' % (minrev, maxrev)
2116 2116 draft = len(repo.revs(b'%ld and draft()', revs))
2117 2117 secret = len(repo.revs(b'%ld and secret()', revs))
2118 2118 if not (draft or secret):
2119 2119 msg = _(b'new changesets %s\n') % revrange
2120 2120 elif draft and secret:
2121 2121 msg = _(b'new changesets %s (%d drafts, %d secrets)\n')
2122 2122 msg %= (revrange, draft, secret)
2123 2123 elif draft:
2124 2124 msg = _(b'new changesets %s (%d drafts)\n')
2125 2125 msg %= (revrange, draft)
2126 2126 elif secret:
2127 2127 msg = _(b'new changesets %s (%d secrets)\n')
2128 2128 msg %= (revrange, secret)
2129 2129 else:
2130 2130 errormsg = b'entered unreachable condition'
2131 2131 raise error.ProgrammingError(errormsg)
2132 2132 repo.ui.status(msg)
2133 2133
2134 2134 # search new changesets directly pulled as obsolete
2135 2135 duplicates = tr.changes.get(b'revduplicates', ())
2136 2136 obsadded = unfi.revs(
2137 2137 b'(%d: + %ld) and obsolete()', origrepolen, duplicates
2138 2138 )
2139 2139 cl = repo.changelog
2140 2140 extinctadded = [r for r in obsadded if r not in cl]
2141 2141 if extinctadded:
2142 2142 # They are not just obsolete, but obsolete and invisible
2143 2143 # we call them "extinct" internally but the terms have not been
2144 2144 # exposed to users.
2145 2145 msg = b'(%d other changesets obsolete on arrival)\n'
2146 2146 repo.ui.status(msg % len(extinctadded))
2147 2147
2148 2148 @reportsummary
2149 2149 def reportphasechanges(repo, tr):
2150 2150 """Report statistics of phase changes for changesets pre-existing
2151 2151 pull/unbundle.
2152 2152 """
2153 2153 origrepolen = tr.changes.get(b'origrepolen', len(repo))
2154 2154 published = []
2155 2155 for revs, (old, new) in tr.changes.get(b'phases', []):
2156 2156 if new != phases.public:
2157 2157 continue
2158 2158 published.extend(rev for rev in revs if rev < origrepolen)
2159 2159 if not published:
2160 2160 return
2161 2161 msg = _(b'%d local changesets published\n')
2162 2162 if as_validator:
2163 2163 msg = _(b'%d local changesets will be published\n')
2164 2164 repo.ui.status(msg % len(published))
2165 2165
2166 2166
2167 2167 def getinstabilitymessage(delta, instability):
2168 2168 """function to return the message to show warning about new instabilities
2169 2169
2170 2170 exists as a separate function so that extension can wrap to show more
2171 2171 information like how to fix instabilities"""
2172 2172 if delta > 0:
2173 2173 return _(b'%i new %s changesets\n') % (delta, instability)
2174 2174
2175 2175
2176 2176 def nodesummaries(repo, nodes, maxnumnodes=4):
2177 2177 if len(nodes) <= maxnumnodes or repo.ui.verbose:
2178 2178 return b' '.join(short(h) for h in nodes)
2179 2179 first = b' '.join(short(h) for h in nodes[:maxnumnodes])
2180 2180 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes)
2181 2181
2182 2182
2183 2183 def enforcesinglehead(repo, tr, desc, accountclosed, filtername):
2184 2184 """check that no named branch has multiple heads"""
2185 2185 if desc in (b'strip', b'repair'):
2186 2186 # skip the logic during strip
2187 2187 return
2188 2188 visible = repo.filtered(filtername)
2189 2189 # possible improvement: we could restrict the check to affected branch
2190 2190 bm = visible.branchmap()
2191 2191 for name in bm:
2192 2192 heads = bm.branchheads(name, closed=accountclosed)
2193 2193 if len(heads) > 1:
2194 2194 msg = _(b'rejecting multiple heads on branch "%s"')
2195 2195 msg %= name
2196 2196 hint = _(b'%d heads: %s')
2197 2197 hint %= (len(heads), nodesummaries(repo, heads))
2198 2198 raise error.Abort(msg, hint=hint)
2199 2199
2200 2200
2201 2201 def wrapconvertsink(sink):
2202 2202 """Allow extensions to wrap the sink returned by convcmd.convertsink()
2203 2203 before it is used, whether or not the convert extension was formally loaded.
2204 2204 """
2205 2205 return sink
2206 2206
2207 2207
2208 2208 def unhidehashlikerevs(repo, specs, hiddentype):
2209 2209 """parse the user specs and unhide changesets whose hash or revision number
2210 2210 is passed.
2211 2211
2212 2212 hiddentype can be: 1) 'warn': warn while unhiding changesets
2213 2213 2) 'nowarn': don't warn while unhiding changesets
2214 2214
2215 2215 returns a repo object with the required changesets unhidden
2216 2216 """
2217 2217 if not specs:
2218 2218 return repo
2219 2219
2220 2220 if not repo.filtername or not repo.ui.configbool(
2221 2221 b'experimental', b'directaccess'
2222 2222 ):
2223 2223 return repo
2224 2224
2225 2225 if repo.filtername not in (b'visible', b'visible-hidden'):
2226 2226 return repo
2227 2227
2228 2228 symbols = set()
2229 2229 for spec in specs:
2230 2230 try:
2231 2231 tree = revsetlang.parse(spec)
2232 2232 except error.ParseError: # will be reported by scmutil.revrange()
2233 2233 continue
2234 2234
2235 2235 symbols.update(revsetlang.gethashlikesymbols(tree))
2236 2236
2237 2237 if not symbols:
2238 2238 return repo
2239 2239
2240 2240 revs = _getrevsfromsymbols(repo, symbols)
2241 2241
2242 2242 if not revs:
2243 2243 return repo
2244 2244
2245 2245 if hiddentype == b'warn':
2246 2246 unfi = repo.unfiltered()
2247 2247 revstr = b", ".join([pycompat.bytestr(unfi[l]) for l in revs])
2248 2248 repo.ui.warn(
2249 2249 _(
2250 2250 b"warning: accessing hidden changesets for write "
2251 2251 b"operation: %s\n"
2252 2252 )
2253 2253 % revstr
2254 2254 )
2255 2255
2256 2256 # we have to use new filtername to separate branch/tags cache until we can
2257 2257 # disbale these cache when revisions are dynamically pinned.
2258 2258 return repo.filtered(b'visible-hidden', revs)
2259 2259
2260 2260
2261 2261 def _getrevsfromsymbols(repo, symbols):
2262 2262 """parse the list of symbols and returns a set of revision numbers of hidden
2263 2263 changesets present in symbols"""
2264 2264 revs = set()
2265 2265 unfi = repo.unfiltered()
2266 2266 unficl = unfi.changelog
2267 2267 cl = repo.changelog
2268 2268 tiprev = len(unficl)
2269 2269 allowrevnums = repo.ui.configbool(b'experimental', b'directaccess.revnums')
2270 2270 for s in symbols:
2271 2271 try:
2272 2272 n = int(s)
2273 2273 if n <= tiprev:
2274 2274 if not allowrevnums:
2275 2275 continue
2276 2276 else:
2277 2277 if n not in cl:
2278 2278 revs.add(n)
2279 2279 continue
2280 2280 except ValueError:
2281 2281 pass
2282 2282
2283 2283 try:
2284 2284 s = resolvehexnodeidprefix(unfi, s)
2285 2285 except (error.LookupError, error.WdirUnsupported):
2286 2286 s = None
2287 2287
2288 2288 if s is not None:
2289 2289 rev = unficl.rev(s)
2290 2290 if rev not in cl:
2291 2291 revs.add(rev)
2292 2292
2293 2293 return revs
2294 2294
2295 2295
2296 2296 def bookmarkrevs(repo, mark):
2297 2297 """Select revisions reachable by a given bookmark
2298 2298
2299 2299 If the bookmarked revision isn't a head, an empty set will be returned.
2300 2300 """
2301 2301 return repo.revs(format_bookmark_revspec(mark))
2302 2302
2303 2303
2304 2304 def format_bookmark_revspec(mark):
2305 2305 """Build a revset expression to select revisions reachable by a given
2306 2306 bookmark"""
2307 2307 mark = b'literal:' + mark
2308 2308 return revsetlang.formatspec(
2309 2309 b"ancestors(bookmark(%s)) - "
2310 2310 b"ancestors(head() and not bookmark(%s)) - "
2311 2311 b"ancestors(bookmark() and not bookmark(%s))",
2312 2312 mark,
2313 2313 mark,
2314 2314 mark,
2315 2315 )
2316
2317
2318 def ismember(ui, username, userlist):
2319 """Check if username is a member of userlist.
2320
2321 If userlist has a single '*' member, all users are considered members.
2322 Can be overridden by extensions to provide more complex authorization
2323 schemes.
2324 """
2325 return userlist == [b'*'] or username in userlist
General Comments 0
You need to be logged in to leave comments. Login now