##// END OF EJS Templates
core: migrate uses of hashlib.sha1 to hashutil.sha1...
Augie Fackler -
r44517:a61287a9 default
parent child Browse files
Show More

The requested changes are too big and content was truncated. Show full diff

@@ -1,738 +1,738 b''
1 1 # chgserver.py - command server extension for cHg
2 2 #
3 3 # Copyright 2011 Yuya Nishihara <yuya@tcha.org>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """command server extension for cHg
9 9
10 10 'S' channel (read/write)
11 11 propagate ui.system() request to client
12 12
13 13 'attachio' command
14 14 attach client's stdio passed by sendmsg()
15 15
16 16 'chdir' command
17 17 change current directory
18 18
19 19 'setenv' command
20 20 replace os.environ completely
21 21
22 22 'setumask' command (DEPRECATED)
23 23 'setumask2' command
24 24 set umask
25 25
26 26 'validate' command
27 27 reload the config and check if the server is up to date
28 28
29 29 Config
30 30 ------
31 31
32 32 ::
33 33
34 34 [chgserver]
35 35 # how long (in seconds) should an idle chg server exit
36 36 idletimeout = 3600
37 37
38 38 # whether to skip config or env change checks
39 39 skiphash = False
40 40 """
41 41
42 42 from __future__ import absolute_import
43 43
44 import hashlib
45 44 import inspect
46 45 import os
47 46 import re
48 47 import socket
49 48 import stat
50 49 import struct
51 50 import time
52 51
53 52 from .i18n import _
54 53 from .pycompat import (
55 54 getattr,
56 55 setattr,
57 56 )
58 57
59 58 from . import (
60 59 commandserver,
61 60 encoding,
62 61 error,
63 62 extensions,
64 63 node,
65 64 pycompat,
66 65 util,
67 66 )
68 67
69 68 from .utils import (
69 hashutil,
70 70 procutil,
71 71 stringutil,
72 72 )
73 73
74 74
75 75 def _hashlist(items):
76 76 """return sha1 hexdigest for a list"""
77 return node.hex(hashlib.sha1(stringutil.pprint(items)).digest())
77 return node.hex(hashutil.sha1(stringutil.pprint(items)).digest())
78 78
79 79
80 80 # sensitive config sections affecting confighash
81 81 _configsections = [
82 82 b'alias', # affects global state commands.table
83 83 b'eol', # uses setconfig('eol', ...)
84 84 b'extdiff', # uisetup will register new commands
85 85 b'extensions',
86 86 ]
87 87
88 88 _configsectionitems = [
89 89 (b'commands', b'show.aliasprefix'), # show.py reads it in extsetup
90 90 ]
91 91
92 92 # sensitive environment variables affecting confighash
93 93 _envre = re.compile(
94 94 br'''\A(?:
95 95 CHGHG
96 96 |HG(?:DEMANDIMPORT|EMITWARNINGS|MODULEPOLICY|PROF|RCPATH)?
97 97 |HG(?:ENCODING|PLAIN).*
98 98 |LANG(?:UAGE)?
99 99 |LC_.*
100 100 |LD_.*
101 101 |PATH
102 102 |PYTHON.*
103 103 |TERM(?:INFO)?
104 104 |TZ
105 105 )\Z''',
106 106 re.X,
107 107 )
108 108
109 109
110 110 def _confighash(ui):
111 111 """return a quick hash for detecting config/env changes
112 112
113 113 confighash is the hash of sensitive config items and environment variables.
114 114
115 115 for chgserver, it is designed that once confighash changes, the server is
116 116 not qualified to serve its client and should redirect the client to a new
117 117 server. different from mtimehash, confighash change will not mark the
118 118 server outdated and exit since the user can have different configs at the
119 119 same time.
120 120 """
121 121 sectionitems = []
122 122 for section in _configsections:
123 123 sectionitems.append(ui.configitems(section))
124 124 for section, item in _configsectionitems:
125 125 sectionitems.append(ui.config(section, item))
126 126 sectionhash = _hashlist(sectionitems)
127 127 # If $CHGHG is set, the change to $HG should not trigger a new chg server
128 128 if b'CHGHG' in encoding.environ:
129 129 ignored = {b'HG'}
130 130 else:
131 131 ignored = set()
132 132 envitems = [
133 133 (k, v)
134 134 for k, v in pycompat.iteritems(encoding.environ)
135 135 if _envre.match(k) and k not in ignored
136 136 ]
137 137 envhash = _hashlist(sorted(envitems))
138 138 return sectionhash[:6] + envhash[:6]
139 139
140 140
141 141 def _getmtimepaths(ui):
142 142 """get a list of paths that should be checked to detect change
143 143
144 144 The list will include:
145 145 - extensions (will not cover all files for complex extensions)
146 146 - mercurial/__version__.py
147 147 - python binary
148 148 """
149 149 modules = [m for n, m in extensions.extensions(ui)]
150 150 try:
151 151 from . import __version__
152 152
153 153 modules.append(__version__)
154 154 except ImportError:
155 155 pass
156 156 files = []
157 157 if pycompat.sysexecutable:
158 158 files.append(pycompat.sysexecutable)
159 159 for m in modules:
160 160 try:
161 161 files.append(pycompat.fsencode(inspect.getabsfile(m)))
162 162 except TypeError:
163 163 pass
164 164 return sorted(set(files))
165 165
166 166
167 167 def _mtimehash(paths):
168 168 """return a quick hash for detecting file changes
169 169
170 170 mtimehash calls stat on given paths and calculate a hash based on size and
171 171 mtime of each file. mtimehash does not read file content because reading is
172 172 expensive. therefore it's not 100% reliable for detecting content changes.
173 173 it's possible to return different hashes for same file contents.
174 174 it's also possible to return a same hash for different file contents for
175 175 some carefully crafted situation.
176 176
177 177 for chgserver, it is designed that once mtimehash changes, the server is
178 178 considered outdated immediately and should no longer provide service.
179 179
180 180 mtimehash is not included in confighash because we only know the paths of
181 181 extensions after importing them (there is imp.find_module but that faces
182 182 race conditions). We need to calculate confighash without importing.
183 183 """
184 184
185 185 def trystat(path):
186 186 try:
187 187 st = os.stat(path)
188 188 return (st[stat.ST_MTIME], st.st_size)
189 189 except OSError:
190 190 # could be ENOENT, EPERM etc. not fatal in any case
191 191 pass
192 192
193 193 return _hashlist(pycompat.maplist(trystat, paths))[:12]
194 194
195 195
196 196 class hashstate(object):
197 197 """a structure storing confighash, mtimehash, paths used for mtimehash"""
198 198
199 199 def __init__(self, confighash, mtimehash, mtimepaths):
200 200 self.confighash = confighash
201 201 self.mtimehash = mtimehash
202 202 self.mtimepaths = mtimepaths
203 203
204 204 @staticmethod
205 205 def fromui(ui, mtimepaths=None):
206 206 if mtimepaths is None:
207 207 mtimepaths = _getmtimepaths(ui)
208 208 confighash = _confighash(ui)
209 209 mtimehash = _mtimehash(mtimepaths)
210 210 ui.log(
211 211 b'cmdserver',
212 212 b'confighash = %s mtimehash = %s\n',
213 213 confighash,
214 214 mtimehash,
215 215 )
216 216 return hashstate(confighash, mtimehash, mtimepaths)
217 217
218 218
219 219 def _newchgui(srcui, csystem, attachio):
220 220 class chgui(srcui.__class__):
221 221 def __init__(self, src=None):
222 222 super(chgui, self).__init__(src)
223 223 if src:
224 224 self._csystem = getattr(src, '_csystem', csystem)
225 225 else:
226 226 self._csystem = csystem
227 227
228 228 def _runsystem(self, cmd, environ, cwd, out):
229 229 # fallback to the original system method if
230 230 # a. the output stream is not stdout (e.g. stderr, cStringIO),
231 231 # b. or stdout is redirected by protectfinout(),
232 232 # because the chg client is not aware of these situations and
233 233 # will behave differently (i.e. write to stdout).
234 234 if (
235 235 out is not self.fout
236 236 or not util.safehasattr(self.fout, b'fileno')
237 237 or self.fout.fileno() != procutil.stdout.fileno()
238 238 or self._finoutredirected
239 239 ):
240 240 return procutil.system(cmd, environ=environ, cwd=cwd, out=out)
241 241 self.flush()
242 242 return self._csystem(cmd, procutil.shellenviron(environ), cwd)
243 243
244 244 def _runpager(self, cmd, env=None):
245 245 self._csystem(
246 246 cmd,
247 247 procutil.shellenviron(env),
248 248 type=b'pager',
249 249 cmdtable={b'attachio': attachio},
250 250 )
251 251 return True
252 252
253 253 return chgui(srcui)
254 254
255 255
256 256 def _loadnewui(srcui, args, cdebug):
257 257 from . import dispatch # avoid cycle
258 258
259 259 newui = srcui.__class__.load()
260 260 for a in [b'fin', b'fout', b'ferr', b'environ']:
261 261 setattr(newui, a, getattr(srcui, a))
262 262 if util.safehasattr(srcui, b'_csystem'):
263 263 newui._csystem = srcui._csystem
264 264
265 265 # command line args
266 266 options = dispatch._earlyparseopts(newui, args)
267 267 dispatch._parseconfig(newui, options[b'config'])
268 268
269 269 # stolen from tortoisehg.util.copydynamicconfig()
270 270 for section, name, value in srcui.walkconfig():
271 271 source = srcui.configsource(section, name)
272 272 if b':' in source or source == b'--config' or source.startswith(b'$'):
273 273 # path:line or command line, or environ
274 274 continue
275 275 newui.setconfig(section, name, value, source)
276 276
277 277 # load wd and repo config, copied from dispatch.py
278 278 cwd = options[b'cwd']
279 279 cwd = cwd and os.path.realpath(cwd) or None
280 280 rpath = options[b'repository']
281 281 path, newlui = dispatch._getlocal(newui, rpath, wd=cwd)
282 282
283 283 extensions.populateui(newui)
284 284 commandserver.setuplogging(newui, fp=cdebug)
285 285 if newui is not newlui:
286 286 extensions.populateui(newlui)
287 287 commandserver.setuplogging(newlui, fp=cdebug)
288 288
289 289 return (newui, newlui)
290 290
291 291
292 292 class channeledsystem(object):
293 293 """Propagate ui.system() request in the following format:
294 294
295 295 payload length (unsigned int),
296 296 type, '\0',
297 297 cmd, '\0',
298 298 cwd, '\0',
299 299 envkey, '=', val, '\0',
300 300 ...
301 301 envkey, '=', val
302 302
303 303 if type == 'system', waits for:
304 304
305 305 exitcode length (unsigned int),
306 306 exitcode (int)
307 307
308 308 if type == 'pager', repetitively waits for a command name ending with '\n'
309 309 and executes it defined by cmdtable, or exits the loop if the command name
310 310 is empty.
311 311 """
312 312
313 313 def __init__(self, in_, out, channel):
314 314 self.in_ = in_
315 315 self.out = out
316 316 self.channel = channel
317 317
318 318 def __call__(self, cmd, environ, cwd=None, type=b'system', cmdtable=None):
319 319 args = [type, procutil.quotecommand(cmd), os.path.abspath(cwd or b'.')]
320 320 args.extend(b'%s=%s' % (k, v) for k, v in pycompat.iteritems(environ))
321 321 data = b'\0'.join(args)
322 322 self.out.write(struct.pack(b'>cI', self.channel, len(data)))
323 323 self.out.write(data)
324 324 self.out.flush()
325 325
326 326 if type == b'system':
327 327 length = self.in_.read(4)
328 328 (length,) = struct.unpack(b'>I', length)
329 329 if length != 4:
330 330 raise error.Abort(_(b'invalid response'))
331 331 (rc,) = struct.unpack(b'>i', self.in_.read(4))
332 332 return rc
333 333 elif type == b'pager':
334 334 while True:
335 335 cmd = self.in_.readline()[:-1]
336 336 if not cmd:
337 337 break
338 338 if cmdtable and cmd in cmdtable:
339 339 cmdtable[cmd]()
340 340 else:
341 341 raise error.Abort(_(b'unexpected command: %s') % cmd)
342 342 else:
343 343 raise error.ProgrammingError(b'invalid S channel type: %s' % type)
344 344
345 345
346 346 _iochannels = [
347 347 # server.ch, ui.fp, mode
348 348 (b'cin', b'fin', 'rb'),
349 349 (b'cout', b'fout', 'wb'),
350 350 (b'cerr', b'ferr', 'wb'),
351 351 ]
352 352
353 353
354 354 class chgcmdserver(commandserver.server):
355 355 def __init__(
356 356 self, ui, repo, fin, fout, sock, prereposetups, hashstate, baseaddress
357 357 ):
358 358 super(chgcmdserver, self).__init__(
359 359 _newchgui(ui, channeledsystem(fin, fout, b'S'), self.attachio),
360 360 repo,
361 361 fin,
362 362 fout,
363 363 prereposetups,
364 364 )
365 365 self.clientsock = sock
366 366 self._ioattached = False
367 367 self._oldios = [] # original (self.ch, ui.fp, fd) before "attachio"
368 368 self.hashstate = hashstate
369 369 self.baseaddress = baseaddress
370 370 if hashstate is not None:
371 371 self.capabilities = self.capabilities.copy()
372 372 self.capabilities[b'validate'] = chgcmdserver.validate
373 373
374 374 def cleanup(self):
375 375 super(chgcmdserver, self).cleanup()
376 376 # dispatch._runcatch() does not flush outputs if exception is not
377 377 # handled by dispatch._dispatch()
378 378 self.ui.flush()
379 379 self._restoreio()
380 380 self._ioattached = False
381 381
382 382 def attachio(self):
383 383 """Attach to client's stdio passed via unix domain socket; all
384 384 channels except cresult will no longer be used
385 385 """
386 386 # tell client to sendmsg() with 1-byte payload, which makes it
387 387 # distinctive from "attachio\n" command consumed by client.read()
388 388 self.clientsock.sendall(struct.pack(b'>cI', b'I', 1))
389 389 clientfds = util.recvfds(self.clientsock.fileno())
390 390 self.ui.log(b'chgserver', b'received fds: %r\n', clientfds)
391 391
392 392 ui = self.ui
393 393 ui.flush()
394 394 self._saveio()
395 395 for fd, (cn, fn, mode) in zip(clientfds, _iochannels):
396 396 assert fd > 0
397 397 fp = getattr(ui, fn)
398 398 os.dup2(fd, fp.fileno())
399 399 os.close(fd)
400 400 if self._ioattached:
401 401 continue
402 402 # reset buffering mode when client is first attached. as we want
403 403 # to see output immediately on pager, the mode stays unchanged
404 404 # when client re-attached. ferr is unchanged because it should
405 405 # be unbuffered no matter if it is a tty or not.
406 406 if fn == b'ferr':
407 407 newfp = fp
408 408 else:
409 409 # make it line buffered explicitly because the default is
410 410 # decided on first write(), where fout could be a pager.
411 411 if fp.isatty():
412 412 bufsize = 1 # line buffered
413 413 else:
414 414 bufsize = -1 # system default
415 415 newfp = os.fdopen(fp.fileno(), mode, bufsize)
416 416 setattr(ui, fn, newfp)
417 417 setattr(self, cn, newfp)
418 418
419 419 self._ioattached = True
420 420 self.cresult.write(struct.pack(b'>i', len(clientfds)))
421 421
422 422 def _saveio(self):
423 423 if self._oldios:
424 424 return
425 425 ui = self.ui
426 426 for cn, fn, _mode in _iochannels:
427 427 ch = getattr(self, cn)
428 428 fp = getattr(ui, fn)
429 429 fd = os.dup(fp.fileno())
430 430 self._oldios.append((ch, fp, fd))
431 431
432 432 def _restoreio(self):
433 433 ui = self.ui
434 434 for (ch, fp, fd), (cn, fn, _mode) in zip(self._oldios, _iochannels):
435 435 newfp = getattr(ui, fn)
436 436 # close newfp while it's associated with client; otherwise it
437 437 # would be closed when newfp is deleted
438 438 if newfp is not fp:
439 439 newfp.close()
440 440 # restore original fd: fp is open again
441 441 os.dup2(fd, fp.fileno())
442 442 os.close(fd)
443 443 setattr(self, cn, ch)
444 444 setattr(ui, fn, fp)
445 445 del self._oldios[:]
446 446
447 447 def validate(self):
448 448 """Reload the config and check if the server is up to date
449 449
450 450 Read a list of '\0' separated arguments.
451 451 Write a non-empty list of '\0' separated instruction strings or '\0'
452 452 if the list is empty.
453 453 An instruction string could be either:
454 454 - "unlink $path", the client should unlink the path to stop the
455 455 outdated server.
456 456 - "redirect $path", the client should attempt to connect to $path
457 457 first. If it does not work, start a new server. It implies
458 458 "reconnect".
459 459 - "exit $n", the client should exit directly with code n.
460 460 This may happen if we cannot parse the config.
461 461 - "reconnect", the client should close the connection and
462 462 reconnect.
463 463 If neither "reconnect" nor "redirect" is included in the instruction
464 464 list, the client can continue with this server after completing all
465 465 the instructions.
466 466 """
467 467 from . import dispatch # avoid cycle
468 468
469 469 args = self._readlist()
470 470 try:
471 471 self.ui, lui = _loadnewui(self.ui, args, self.cdebug)
472 472 except error.ParseError as inst:
473 473 dispatch._formatparse(self.ui.warn, inst)
474 474 self.ui.flush()
475 475 self.cresult.write(b'exit 255')
476 476 return
477 477 except error.Abort as inst:
478 478 self.ui.error(_(b"abort: %s\n") % inst)
479 479 if inst.hint:
480 480 self.ui.error(_(b"(%s)\n") % inst.hint)
481 481 self.ui.flush()
482 482 self.cresult.write(b'exit 255')
483 483 return
484 484 newhash = hashstate.fromui(lui, self.hashstate.mtimepaths)
485 485 insts = []
486 486 if newhash.mtimehash != self.hashstate.mtimehash:
487 487 addr = _hashaddress(self.baseaddress, self.hashstate.confighash)
488 488 insts.append(b'unlink %s' % addr)
489 489 # mtimehash is empty if one or more extensions fail to load.
490 490 # to be compatible with hg, still serve the client this time.
491 491 if self.hashstate.mtimehash:
492 492 insts.append(b'reconnect')
493 493 if newhash.confighash != self.hashstate.confighash:
494 494 addr = _hashaddress(self.baseaddress, newhash.confighash)
495 495 insts.append(b'redirect %s' % addr)
496 496 self.ui.log(b'chgserver', b'validate: %s\n', stringutil.pprint(insts))
497 497 self.cresult.write(b'\0'.join(insts) or b'\0')
498 498
499 499 def chdir(self):
500 500 """Change current directory
501 501
502 502 Note that the behavior of --cwd option is bit different from this.
503 503 It does not affect --config parameter.
504 504 """
505 505 path = self._readstr()
506 506 if not path:
507 507 return
508 508 self.ui.log(b'chgserver', b"chdir to '%s'\n", path)
509 509 os.chdir(path)
510 510
511 511 def setumask(self):
512 512 """Change umask (DEPRECATED)"""
513 513 # BUG: this does not follow the message frame structure, but kept for
514 514 # backward compatibility with old chg clients for some time
515 515 self._setumask(self._read(4))
516 516
517 517 def setumask2(self):
518 518 """Change umask"""
519 519 data = self._readstr()
520 520 if len(data) != 4:
521 521 raise ValueError(b'invalid mask length in setumask2 request')
522 522 self._setumask(data)
523 523
524 524 def _setumask(self, data):
525 525 mask = struct.unpack(b'>I', data)[0]
526 526 self.ui.log(b'chgserver', b'setumask %r\n', mask)
527 527 os.umask(mask)
528 528
529 529 def runcommand(self):
530 530 # pager may be attached within the runcommand session, which should
531 531 # be detached at the end of the session. otherwise the pager wouldn't
532 532 # receive EOF.
533 533 globaloldios = self._oldios
534 534 self._oldios = []
535 535 try:
536 536 return super(chgcmdserver, self).runcommand()
537 537 finally:
538 538 self._restoreio()
539 539 self._oldios = globaloldios
540 540
541 541 def setenv(self):
542 542 """Clear and update os.environ
543 543
544 544 Note that not all variables can make an effect on the running process.
545 545 """
546 546 l = self._readlist()
547 547 try:
548 548 newenv = dict(s.split(b'=', 1) for s in l)
549 549 except ValueError:
550 550 raise ValueError(b'unexpected value in setenv request')
551 551 self.ui.log(b'chgserver', b'setenv: %r\n', sorted(newenv.keys()))
552 552
553 553 # Python3 has some logic to "coerce" the C locale to a UTF-8 capable
554 554 # one, and it sets LC_CTYPE in the environment to C.UTF-8 if none of
555 555 # 'LC_CTYPE', 'LC_ALL' or 'LANG' are set (to any value). This can be
556 556 # disabled with PYTHONCOERCECLOCALE=0 in the environment.
557 557 #
558 558 # When fromui is called via _inithashstate, python has already set
559 559 # this, so that's in the environment right when we start up the hg
560 560 # process. Then chg will call us and tell us to set the environment to
561 561 # the one it has; this might NOT have LC_CTYPE, so we'll need to
562 562 # carry-forward the LC_CTYPE that was coerced in these situations.
563 563 #
564 564 # If this is not handled, we will fail config+env validation and fail
565 565 # to start chg. If this is just ignored instead of carried forward, we
566 566 # may have different behavior between chg and non-chg.
567 567 if pycompat.ispy3:
568 568 # Rename for wordwrapping purposes
569 569 oldenv = encoding.environ
570 570 if not any(
571 571 e.get(b'PYTHONCOERCECLOCALE') == b'0' for e in [oldenv, newenv]
572 572 ):
573 573 keys = [b'LC_CTYPE', b'LC_ALL', b'LANG']
574 574 old_keys = [k for k, v in oldenv.items() if k in keys and v]
575 575 new_keys = [k for k, v in newenv.items() if k in keys and v]
576 576 # If the user's environment (from chg) doesn't have ANY of the
577 577 # keys that python looks for, and the environment (from
578 578 # initialization) has ONLY LC_CTYPE and it's set to C.UTF-8,
579 579 # carry it forward.
580 580 if (
581 581 not new_keys
582 582 and old_keys == [b'LC_CTYPE']
583 583 and oldenv[b'LC_CTYPE'] == b'C.UTF-8'
584 584 ):
585 585 newenv[b'LC_CTYPE'] = oldenv[b'LC_CTYPE']
586 586
587 587 encoding.environ.clear()
588 588 encoding.environ.update(newenv)
589 589
590 590 capabilities = commandserver.server.capabilities.copy()
591 591 capabilities.update(
592 592 {
593 593 b'attachio': attachio,
594 594 b'chdir': chdir,
595 595 b'runcommand': runcommand,
596 596 b'setenv': setenv,
597 597 b'setumask': setumask,
598 598 b'setumask2': setumask2,
599 599 }
600 600 )
601 601
602 602 if util.safehasattr(procutil, b'setprocname'):
603 603
604 604 def setprocname(self):
605 605 """Change process title"""
606 606 name = self._readstr()
607 607 self.ui.log(b'chgserver', b'setprocname: %r\n', name)
608 608 procutil.setprocname(name)
609 609
610 610 capabilities[b'setprocname'] = setprocname
611 611
612 612
613 613 def _tempaddress(address):
614 614 return b'%s.%d.tmp' % (address, os.getpid())
615 615
616 616
617 617 def _hashaddress(address, hashstr):
618 618 # if the basename of address contains '.', use only the left part. this
619 619 # makes it possible for the client to pass 'server.tmp$PID' and follow by
620 620 # an atomic rename to avoid locking when spawning new servers.
621 621 dirname, basename = os.path.split(address)
622 622 basename = basename.split(b'.', 1)[0]
623 623 return b'%s-%s' % (os.path.join(dirname, basename), hashstr)
624 624
625 625
626 626 class chgunixservicehandler(object):
627 627 """Set of operations for chg services"""
628 628
629 629 pollinterval = 1 # [sec]
630 630
631 631 def __init__(self, ui):
632 632 self.ui = ui
633 633 self._idletimeout = ui.configint(b'chgserver', b'idletimeout')
634 634 self._lastactive = time.time()
635 635
636 636 def bindsocket(self, sock, address):
637 637 self._inithashstate(address)
638 638 self._checkextensions()
639 639 self._bind(sock)
640 640 self._createsymlink()
641 641 # no "listening at" message should be printed to simulate hg behavior
642 642
643 643 def _inithashstate(self, address):
644 644 self._baseaddress = address
645 645 if self.ui.configbool(b'chgserver', b'skiphash'):
646 646 self._hashstate = None
647 647 self._realaddress = address
648 648 return
649 649 self._hashstate = hashstate.fromui(self.ui)
650 650 self._realaddress = _hashaddress(address, self._hashstate.confighash)
651 651
652 652 def _checkextensions(self):
653 653 if not self._hashstate:
654 654 return
655 655 if extensions.notloaded():
656 656 # one or more extensions failed to load. mtimehash becomes
657 657 # meaningless because we do not know the paths of those extensions.
658 658 # set mtimehash to an illegal hash value to invalidate the server.
659 659 self._hashstate.mtimehash = b''
660 660
661 661 def _bind(self, sock):
662 662 # use a unique temp address so we can stat the file and do ownership
663 663 # check later
664 664 tempaddress = _tempaddress(self._realaddress)
665 665 util.bindunixsocket(sock, tempaddress)
666 666 self._socketstat = os.stat(tempaddress)
667 667 sock.listen(socket.SOMAXCONN)
668 668 # rename will replace the old socket file if exists atomically. the
669 669 # old server will detect ownership change and exit.
670 670 util.rename(tempaddress, self._realaddress)
671 671
672 672 def _createsymlink(self):
673 673 if self._baseaddress == self._realaddress:
674 674 return
675 675 tempaddress = _tempaddress(self._baseaddress)
676 676 os.symlink(os.path.basename(self._realaddress), tempaddress)
677 677 util.rename(tempaddress, self._baseaddress)
678 678
679 679 def _issocketowner(self):
680 680 try:
681 681 st = os.stat(self._realaddress)
682 682 return (
683 683 st.st_ino == self._socketstat.st_ino
684 684 and st[stat.ST_MTIME] == self._socketstat[stat.ST_MTIME]
685 685 )
686 686 except OSError:
687 687 return False
688 688
689 689 def unlinksocket(self, address):
690 690 if not self._issocketowner():
691 691 return
692 692 # it is possible to have a race condition here that we may
693 693 # remove another server's socket file. but that's okay
694 694 # since that server will detect and exit automatically and
695 695 # the client will start a new server on demand.
696 696 util.tryunlink(self._realaddress)
697 697
698 698 def shouldexit(self):
699 699 if not self._issocketowner():
700 700 self.ui.log(
701 701 b'chgserver', b'%s is not owned, exiting.\n', self._realaddress
702 702 )
703 703 return True
704 704 if time.time() - self._lastactive > self._idletimeout:
705 705 self.ui.log(b'chgserver', b'being idle too long. exiting.\n')
706 706 return True
707 707 return False
708 708
709 709 def newconnection(self):
710 710 self._lastactive = time.time()
711 711
712 712 def createcmdserver(self, repo, conn, fin, fout, prereposetups):
713 713 return chgcmdserver(
714 714 self.ui,
715 715 repo,
716 716 fin,
717 717 fout,
718 718 conn,
719 719 prereposetups,
720 720 self._hashstate,
721 721 self._baseaddress,
722 722 )
723 723
724 724
725 725 def chgunixservice(ui, repo, opts):
726 726 # CHGINTERNALMARK is set by chg client. It is an indication of things are
727 727 # started by chg so other code can do things accordingly, like disabling
728 728 # demandimport or detecting chg client started by chg client. When executed
729 729 # here, CHGINTERNALMARK is no longer useful and hence dropped to make
730 730 # environ cleaner.
731 731 if b'CHGINTERNALMARK' in encoding.environ:
732 732 del encoding.environ[b'CHGINTERNALMARK']
733 733
734 734 if repo:
735 735 # one chgserver can serve multiple repos. drop repo information
736 736 ui.setconfig(b'bundle', b'mainreporoot', b'', b'repo')
737 737 h = chgunixservicehandler(ui)
738 738 return commandserver.unixforkingservice(ui, repo=None, opts=opts, handler=h)
@@ -1,3098 +1,3100 b''
1 1 # exchange.py - utility to exchange data between repos.
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 import hashlib
12 11
13 12 from .i18n import _
14 13 from .node import (
15 14 hex,
16 15 nullid,
17 16 nullrev,
18 17 )
19 18 from .thirdparty import attr
20 19 from . import (
21 20 bookmarks as bookmod,
22 21 bundle2,
23 22 changegroup,
24 23 discovery,
25 24 error,
26 25 exchangev2,
27 26 lock as lockmod,
28 27 logexchange,
29 28 narrowspec,
30 29 obsolete,
31 30 obsutil,
32 31 phases,
33 32 pushkey,
34 33 pycompat,
35 34 scmutil,
36 35 sslutil,
37 36 streamclone,
38 37 url as urlmod,
39 38 util,
40 39 wireprototypes,
41 40 )
42 41 from .interfaces import repository
43 from .utils import stringutil
42 from .utils import (
43 hashutil,
44 stringutil,
45 )
44 46
45 47 urlerr = util.urlerr
46 48 urlreq = util.urlreq
47 49
48 50 _NARROWACL_SECTION = b'narrowacl'
49 51
50 52 # Maps bundle version human names to changegroup versions.
51 53 _bundlespeccgversions = {
52 54 b'v1': b'01',
53 55 b'v2': b'02',
54 56 b'packed1': b's1',
55 57 b'bundle2': b'02', # legacy
56 58 }
57 59
58 60 # Maps bundle version with content opts to choose which part to bundle
59 61 _bundlespeccontentopts = {
60 62 b'v1': {
61 63 b'changegroup': True,
62 64 b'cg.version': b'01',
63 65 b'obsolescence': False,
64 66 b'phases': False,
65 67 b'tagsfnodescache': False,
66 68 b'revbranchcache': False,
67 69 },
68 70 b'v2': {
69 71 b'changegroup': True,
70 72 b'cg.version': b'02',
71 73 b'obsolescence': False,
72 74 b'phases': False,
73 75 b'tagsfnodescache': True,
74 76 b'revbranchcache': True,
75 77 },
76 78 b'packed1': {b'cg.version': b's1'},
77 79 }
78 80 _bundlespeccontentopts[b'bundle2'] = _bundlespeccontentopts[b'v2']
79 81
80 82 _bundlespecvariants = {
81 83 b"streamv2": {
82 84 b"changegroup": False,
83 85 b"streamv2": True,
84 86 b"tagsfnodescache": False,
85 87 b"revbranchcache": False,
86 88 }
87 89 }
88 90
89 91 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
90 92 _bundlespecv1compengines = {b'gzip', b'bzip2', b'none'}
91 93
92 94
93 95 @attr.s
94 96 class bundlespec(object):
95 97 compression = attr.ib()
96 98 wirecompression = attr.ib()
97 99 version = attr.ib()
98 100 wireversion = attr.ib()
99 101 params = attr.ib()
100 102 contentopts = attr.ib()
101 103
102 104
103 105 def parsebundlespec(repo, spec, strict=True):
104 106 """Parse a bundle string specification into parts.
105 107
106 108 Bundle specifications denote a well-defined bundle/exchange format.
107 109 The content of a given specification should not change over time in
108 110 order to ensure that bundles produced by a newer version of Mercurial are
109 111 readable from an older version.
110 112
111 113 The string currently has the form:
112 114
113 115 <compression>-<type>[;<parameter0>[;<parameter1>]]
114 116
115 117 Where <compression> is one of the supported compression formats
116 118 and <type> is (currently) a version string. A ";" can follow the type and
117 119 all text afterwards is interpreted as URI encoded, ";" delimited key=value
118 120 pairs.
119 121
120 122 If ``strict`` is True (the default) <compression> is required. Otherwise,
121 123 it is optional.
122 124
123 125 Returns a bundlespec object of (compression, version, parameters).
124 126 Compression will be ``None`` if not in strict mode and a compression isn't
125 127 defined.
126 128
127 129 An ``InvalidBundleSpecification`` is raised when the specification is
128 130 not syntactically well formed.
129 131
130 132 An ``UnsupportedBundleSpecification`` is raised when the compression or
131 133 bundle type/version is not recognized.
132 134
133 135 Note: this function will likely eventually return a more complex data
134 136 structure, including bundle2 part information.
135 137 """
136 138
137 139 def parseparams(s):
138 140 if b';' not in s:
139 141 return s, {}
140 142
141 143 params = {}
142 144 version, paramstr = s.split(b';', 1)
143 145
144 146 for p in paramstr.split(b';'):
145 147 if b'=' not in p:
146 148 raise error.InvalidBundleSpecification(
147 149 _(
148 150 b'invalid bundle specification: '
149 151 b'missing "=" in parameter: %s'
150 152 )
151 153 % p
152 154 )
153 155
154 156 key, value = p.split(b'=', 1)
155 157 key = urlreq.unquote(key)
156 158 value = urlreq.unquote(value)
157 159 params[key] = value
158 160
159 161 return version, params
160 162
161 163 if strict and b'-' not in spec:
162 164 raise error.InvalidBundleSpecification(
163 165 _(
164 166 b'invalid bundle specification; '
165 167 b'must be prefixed with compression: %s'
166 168 )
167 169 % spec
168 170 )
169 171
170 172 if b'-' in spec:
171 173 compression, version = spec.split(b'-', 1)
172 174
173 175 if compression not in util.compengines.supportedbundlenames:
174 176 raise error.UnsupportedBundleSpecification(
175 177 _(b'%s compression is not supported') % compression
176 178 )
177 179
178 180 version, params = parseparams(version)
179 181
180 182 if version not in _bundlespeccgversions:
181 183 raise error.UnsupportedBundleSpecification(
182 184 _(b'%s is not a recognized bundle version') % version
183 185 )
184 186 else:
185 187 # Value could be just the compression or just the version, in which
186 188 # case some defaults are assumed (but only when not in strict mode).
187 189 assert not strict
188 190
189 191 spec, params = parseparams(spec)
190 192
191 193 if spec in util.compengines.supportedbundlenames:
192 194 compression = spec
193 195 version = b'v1'
194 196 # Generaldelta repos require v2.
195 197 if b'generaldelta' in repo.requirements:
196 198 version = b'v2'
197 199 # Modern compression engines require v2.
198 200 if compression not in _bundlespecv1compengines:
199 201 version = b'v2'
200 202 elif spec in _bundlespeccgversions:
201 203 if spec == b'packed1':
202 204 compression = b'none'
203 205 else:
204 206 compression = b'bzip2'
205 207 version = spec
206 208 else:
207 209 raise error.UnsupportedBundleSpecification(
208 210 _(b'%s is not a recognized bundle specification') % spec
209 211 )
210 212
211 213 # Bundle version 1 only supports a known set of compression engines.
212 214 if version == b'v1' and compression not in _bundlespecv1compengines:
213 215 raise error.UnsupportedBundleSpecification(
214 216 _(b'compression engine %s is not supported on v1 bundles')
215 217 % compression
216 218 )
217 219
218 220 # The specification for packed1 can optionally declare the data formats
219 221 # required to apply it. If we see this metadata, compare against what the
220 222 # repo supports and error if the bundle isn't compatible.
221 223 if version == b'packed1' and b'requirements' in params:
222 224 requirements = set(params[b'requirements'].split(b','))
223 225 missingreqs = requirements - repo.supportedformats
224 226 if missingreqs:
225 227 raise error.UnsupportedBundleSpecification(
226 228 _(b'missing support for repository features: %s')
227 229 % b', '.join(sorted(missingreqs))
228 230 )
229 231
230 232 # Compute contentopts based on the version
231 233 contentopts = _bundlespeccontentopts.get(version, {}).copy()
232 234
233 235 # Process the variants
234 236 if b"stream" in params and params[b"stream"] == b"v2":
235 237 variant = _bundlespecvariants[b"streamv2"]
236 238 contentopts.update(variant)
237 239
238 240 engine = util.compengines.forbundlename(compression)
239 241 compression, wirecompression = engine.bundletype()
240 242 wireversion = _bundlespeccgversions[version]
241 243
242 244 return bundlespec(
243 245 compression, wirecompression, version, wireversion, params, contentopts
244 246 )
245 247
246 248
247 249 def readbundle(ui, fh, fname, vfs=None):
248 250 header = changegroup.readexactly(fh, 4)
249 251
250 252 alg = None
251 253 if not fname:
252 254 fname = b"stream"
253 255 if not header.startswith(b'HG') and header.startswith(b'\0'):
254 256 fh = changegroup.headerlessfixup(fh, header)
255 257 header = b"HG10"
256 258 alg = b'UN'
257 259 elif vfs:
258 260 fname = vfs.join(fname)
259 261
260 262 magic, version = header[0:2], header[2:4]
261 263
262 264 if magic != b'HG':
263 265 raise error.Abort(_(b'%s: not a Mercurial bundle') % fname)
264 266 if version == b'10':
265 267 if alg is None:
266 268 alg = changegroup.readexactly(fh, 2)
267 269 return changegroup.cg1unpacker(fh, alg)
268 270 elif version.startswith(b'2'):
269 271 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
270 272 elif version == b'S1':
271 273 return streamclone.streamcloneapplier(fh)
272 274 else:
273 275 raise error.Abort(
274 276 _(b'%s: unknown bundle version %s') % (fname, version)
275 277 )
276 278
277 279
278 280 def getbundlespec(ui, fh):
279 281 """Infer the bundlespec from a bundle file handle.
280 282
281 283 The input file handle is seeked and the original seek position is not
282 284 restored.
283 285 """
284 286
285 287 def speccompression(alg):
286 288 try:
287 289 return util.compengines.forbundletype(alg).bundletype()[0]
288 290 except KeyError:
289 291 return None
290 292
291 293 b = readbundle(ui, fh, None)
292 294 if isinstance(b, changegroup.cg1unpacker):
293 295 alg = b._type
294 296 if alg == b'_truncatedBZ':
295 297 alg = b'BZ'
296 298 comp = speccompression(alg)
297 299 if not comp:
298 300 raise error.Abort(_(b'unknown compression algorithm: %s') % alg)
299 301 return b'%s-v1' % comp
300 302 elif isinstance(b, bundle2.unbundle20):
301 303 if b'Compression' in b.params:
302 304 comp = speccompression(b.params[b'Compression'])
303 305 if not comp:
304 306 raise error.Abort(
305 307 _(b'unknown compression algorithm: %s') % comp
306 308 )
307 309 else:
308 310 comp = b'none'
309 311
310 312 version = None
311 313 for part in b.iterparts():
312 314 if part.type == b'changegroup':
313 315 version = part.params[b'version']
314 316 if version in (b'01', b'02'):
315 317 version = b'v2'
316 318 else:
317 319 raise error.Abort(
318 320 _(
319 321 b'changegroup version %s does not have '
320 322 b'a known bundlespec'
321 323 )
322 324 % version,
323 325 hint=_(b'try upgrading your Mercurial client'),
324 326 )
325 327 elif part.type == b'stream2' and version is None:
326 328 # A stream2 part requires to be part of a v2 bundle
327 329 requirements = urlreq.unquote(part.params[b'requirements'])
328 330 splitted = requirements.split()
329 331 params = bundle2._formatrequirementsparams(splitted)
330 332 return b'none-v2;stream=v2;%s' % params
331 333
332 334 if not version:
333 335 raise error.Abort(
334 336 _(b'could not identify changegroup version in bundle')
335 337 )
336 338
337 339 return b'%s-%s' % (comp, version)
338 340 elif isinstance(b, streamclone.streamcloneapplier):
339 341 requirements = streamclone.readbundle1header(fh)[2]
340 342 formatted = bundle2._formatrequirementsparams(requirements)
341 343 return b'none-packed1;%s' % formatted
342 344 else:
343 345 raise error.Abort(_(b'unknown bundle type: %s') % b)
344 346
345 347
346 348 def _computeoutgoing(repo, heads, common):
347 349 """Computes which revs are outgoing given a set of common
348 350 and a set of heads.
349 351
350 352 This is a separate function so extensions can have access to
351 353 the logic.
352 354
353 355 Returns a discovery.outgoing object.
354 356 """
355 357 cl = repo.changelog
356 358 if common:
357 359 hasnode = cl.hasnode
358 360 common = [n for n in common if hasnode(n)]
359 361 else:
360 362 common = [nullid]
361 363 if not heads:
362 364 heads = cl.heads()
363 365 return discovery.outgoing(repo, common, heads)
364 366
365 367
366 368 def _checkpublish(pushop):
367 369 repo = pushop.repo
368 370 ui = repo.ui
369 371 behavior = ui.config(b'experimental', b'auto-publish')
370 372 if pushop.publish or behavior not in (b'warn', b'confirm', b'abort'):
371 373 return
372 374 remotephases = listkeys(pushop.remote, b'phases')
373 375 if not remotephases.get(b'publishing', False):
374 376 return
375 377
376 378 if pushop.revs is None:
377 379 published = repo.filtered(b'served').revs(b'not public()')
378 380 else:
379 381 published = repo.revs(b'::%ln - public()', pushop.revs)
380 382 if published:
381 383 if behavior == b'warn':
382 384 ui.warn(
383 385 _(b'%i changesets about to be published\n') % len(published)
384 386 )
385 387 elif behavior == b'confirm':
386 388 if ui.promptchoice(
387 389 _(b'push and publish %i changesets (yn)?$$ &Yes $$ &No')
388 390 % len(published)
389 391 ):
390 392 raise error.Abort(_(b'user quit'))
391 393 elif behavior == b'abort':
392 394 msg = _(b'push would publish %i changesets') % len(published)
393 395 hint = _(
394 396 b"use --publish or adjust 'experimental.auto-publish'"
395 397 b" config"
396 398 )
397 399 raise error.Abort(msg, hint=hint)
398 400
399 401
400 402 def _forcebundle1(op):
401 403 """return true if a pull/push must use bundle1
402 404
403 405 This function is used to allow testing of the older bundle version"""
404 406 ui = op.repo.ui
405 407 # The goal is this config is to allow developer to choose the bundle
406 408 # version used during exchanged. This is especially handy during test.
407 409 # Value is a list of bundle version to be picked from, highest version
408 410 # should be used.
409 411 #
410 412 # developer config: devel.legacy.exchange
411 413 exchange = ui.configlist(b'devel', b'legacy.exchange')
412 414 forcebundle1 = b'bundle2' not in exchange and b'bundle1' in exchange
413 415 return forcebundle1 or not op.remote.capable(b'bundle2')
414 416
415 417
416 418 class pushoperation(object):
417 419 """A object that represent a single push operation
418 420
419 421 Its purpose is to carry push related state and very common operations.
420 422
421 423 A new pushoperation should be created at the beginning of each push and
422 424 discarded afterward.
423 425 """
424 426
425 427 def __init__(
426 428 self,
427 429 repo,
428 430 remote,
429 431 force=False,
430 432 revs=None,
431 433 newbranch=False,
432 434 bookmarks=(),
433 435 publish=False,
434 436 pushvars=None,
435 437 ):
436 438 # repo we push from
437 439 self.repo = repo
438 440 self.ui = repo.ui
439 441 # repo we push to
440 442 self.remote = remote
441 443 # force option provided
442 444 self.force = force
443 445 # revs to be pushed (None is "all")
444 446 self.revs = revs
445 447 # bookmark explicitly pushed
446 448 self.bookmarks = bookmarks
447 449 # allow push of new branch
448 450 self.newbranch = newbranch
449 451 # step already performed
450 452 # (used to check what steps have been already performed through bundle2)
451 453 self.stepsdone = set()
452 454 # Integer version of the changegroup push result
453 455 # - None means nothing to push
454 456 # - 0 means HTTP error
455 457 # - 1 means we pushed and remote head count is unchanged *or*
456 458 # we have outgoing changesets but refused to push
457 459 # - other values as described by addchangegroup()
458 460 self.cgresult = None
459 461 # Boolean value for the bookmark push
460 462 self.bkresult = None
461 463 # discover.outgoing object (contains common and outgoing data)
462 464 self.outgoing = None
463 465 # all remote topological heads before the push
464 466 self.remoteheads = None
465 467 # Details of the remote branch pre and post push
466 468 #
467 469 # mapping: {'branch': ([remoteheads],
468 470 # [newheads],
469 471 # [unsyncedheads],
470 472 # [discardedheads])}
471 473 # - branch: the branch name
472 474 # - remoteheads: the list of remote heads known locally
473 475 # None if the branch is new
474 476 # - newheads: the new remote heads (known locally) with outgoing pushed
475 477 # - unsyncedheads: the list of remote heads unknown locally.
476 478 # - discardedheads: the list of remote heads made obsolete by the push
477 479 self.pushbranchmap = None
478 480 # testable as a boolean indicating if any nodes are missing locally.
479 481 self.incoming = None
480 482 # summary of the remote phase situation
481 483 self.remotephases = None
482 484 # phases changes that must be pushed along side the changesets
483 485 self.outdatedphases = None
484 486 # phases changes that must be pushed if changeset push fails
485 487 self.fallbackoutdatedphases = None
486 488 # outgoing obsmarkers
487 489 self.outobsmarkers = set()
488 490 # outgoing bookmarks, list of (bm, oldnode | '', newnode | '')
489 491 self.outbookmarks = []
490 492 # transaction manager
491 493 self.trmanager = None
492 494 # map { pushkey partid -> callback handling failure}
493 495 # used to handle exception from mandatory pushkey part failure
494 496 self.pkfailcb = {}
495 497 # an iterable of pushvars or None
496 498 self.pushvars = pushvars
497 499 # publish pushed changesets
498 500 self.publish = publish
499 501
500 502 @util.propertycache
501 503 def futureheads(self):
502 504 """future remote heads if the changeset push succeeds"""
503 505 return self.outgoing.missingheads
504 506
505 507 @util.propertycache
506 508 def fallbackheads(self):
507 509 """future remote heads if the changeset push fails"""
508 510 if self.revs is None:
509 511 # not target to push, all common are relevant
510 512 return self.outgoing.commonheads
511 513 unfi = self.repo.unfiltered()
512 514 # I want cheads = heads(::missingheads and ::commonheads)
513 515 # (missingheads is revs with secret changeset filtered out)
514 516 #
515 517 # This can be expressed as:
516 518 # cheads = ( (missingheads and ::commonheads)
517 519 # + (commonheads and ::missingheads))"
518 520 # )
519 521 #
520 522 # while trying to push we already computed the following:
521 523 # common = (::commonheads)
522 524 # missing = ((commonheads::missingheads) - commonheads)
523 525 #
524 526 # We can pick:
525 527 # * missingheads part of common (::commonheads)
526 528 common = self.outgoing.common
527 529 rev = self.repo.changelog.index.rev
528 530 cheads = [node for node in self.revs if rev(node) in common]
529 531 # and
530 532 # * commonheads parents on missing
531 533 revset = unfi.set(
532 534 b'%ln and parents(roots(%ln))',
533 535 self.outgoing.commonheads,
534 536 self.outgoing.missing,
535 537 )
536 538 cheads.extend(c.node() for c in revset)
537 539 return cheads
538 540
539 541 @property
540 542 def commonheads(self):
541 543 """set of all common heads after changeset bundle push"""
542 544 if self.cgresult:
543 545 return self.futureheads
544 546 else:
545 547 return self.fallbackheads
546 548
547 549
548 550 # mapping of message used when pushing bookmark
549 551 bookmsgmap = {
550 552 b'update': (
551 553 _(b"updating bookmark %s\n"),
552 554 _(b'updating bookmark %s failed!\n'),
553 555 ),
554 556 b'export': (
555 557 _(b"exporting bookmark %s\n"),
556 558 _(b'exporting bookmark %s failed!\n'),
557 559 ),
558 560 b'delete': (
559 561 _(b"deleting remote bookmark %s\n"),
560 562 _(b'deleting remote bookmark %s failed!\n'),
561 563 ),
562 564 }
563 565
564 566
565 567 def push(
566 568 repo,
567 569 remote,
568 570 force=False,
569 571 revs=None,
570 572 newbranch=False,
571 573 bookmarks=(),
572 574 publish=False,
573 575 opargs=None,
574 576 ):
575 577 '''Push outgoing changesets (limited by revs) from a local
576 578 repository to remote. Return an integer:
577 579 - None means nothing to push
578 580 - 0 means HTTP error
579 581 - 1 means we pushed and remote head count is unchanged *or*
580 582 we have outgoing changesets but refused to push
581 583 - other values as described by addchangegroup()
582 584 '''
583 585 if opargs is None:
584 586 opargs = {}
585 587 pushop = pushoperation(
586 588 repo,
587 589 remote,
588 590 force,
589 591 revs,
590 592 newbranch,
591 593 bookmarks,
592 594 publish,
593 595 **pycompat.strkwargs(opargs)
594 596 )
595 597 if pushop.remote.local():
596 598 missing = (
597 599 set(pushop.repo.requirements) - pushop.remote.local().supported
598 600 )
599 601 if missing:
600 602 msg = _(
601 603 b"required features are not"
602 604 b" supported in the destination:"
603 605 b" %s"
604 606 ) % (b', '.join(sorted(missing)))
605 607 raise error.Abort(msg)
606 608
607 609 if not pushop.remote.canpush():
608 610 raise error.Abort(_(b"destination does not support push"))
609 611
610 612 if not pushop.remote.capable(b'unbundle'):
611 613 raise error.Abort(
612 614 _(
613 615 b'cannot push: destination does not support the '
614 616 b'unbundle wire protocol command'
615 617 )
616 618 )
617 619
618 620 # get lock as we might write phase data
619 621 wlock = lock = None
620 622 try:
621 623 # bundle2 push may receive a reply bundle touching bookmarks
622 624 # requiring the wlock. Take it now to ensure proper ordering.
623 625 maypushback = pushop.ui.configbool(b'experimental', b'bundle2.pushback')
624 626 if (
625 627 (not _forcebundle1(pushop))
626 628 and maypushback
627 629 and not bookmod.bookmarksinstore(repo)
628 630 ):
629 631 wlock = pushop.repo.wlock()
630 632 lock = pushop.repo.lock()
631 633 pushop.trmanager = transactionmanager(
632 634 pushop.repo, b'push-response', pushop.remote.url()
633 635 )
634 636 except error.LockUnavailable as err:
635 637 # source repo cannot be locked.
636 638 # We do not abort the push, but just disable the local phase
637 639 # synchronisation.
638 640 msg = b'cannot lock source repository: %s\n' % stringutil.forcebytestr(
639 641 err
640 642 )
641 643 pushop.ui.debug(msg)
642 644
643 645 with wlock or util.nullcontextmanager():
644 646 with lock or util.nullcontextmanager():
645 647 with pushop.trmanager or util.nullcontextmanager():
646 648 pushop.repo.checkpush(pushop)
647 649 _checkpublish(pushop)
648 650 _pushdiscovery(pushop)
649 651 if not pushop.force:
650 652 _checksubrepostate(pushop)
651 653 if not _forcebundle1(pushop):
652 654 _pushbundle2(pushop)
653 655 _pushchangeset(pushop)
654 656 _pushsyncphase(pushop)
655 657 _pushobsolete(pushop)
656 658 _pushbookmark(pushop)
657 659
658 660 if repo.ui.configbool(b'experimental', b'remotenames'):
659 661 logexchange.pullremotenames(repo, remote)
660 662
661 663 return pushop
662 664
663 665
664 666 # list of steps to perform discovery before push
665 667 pushdiscoveryorder = []
666 668
667 669 # Mapping between step name and function
668 670 #
669 671 # This exists to help extensions wrap steps if necessary
670 672 pushdiscoverymapping = {}
671 673
672 674
673 675 def pushdiscovery(stepname):
674 676 """decorator for function performing discovery before push
675 677
676 678 The function is added to the step -> function mapping and appended to the
677 679 list of steps. Beware that decorated function will be added in order (this
678 680 may matter).
679 681
680 682 You can only use this decorator for a new step, if you want to wrap a step
681 683 from an extension, change the pushdiscovery dictionary directly."""
682 684
683 685 def dec(func):
684 686 assert stepname not in pushdiscoverymapping
685 687 pushdiscoverymapping[stepname] = func
686 688 pushdiscoveryorder.append(stepname)
687 689 return func
688 690
689 691 return dec
690 692
691 693
692 694 def _pushdiscovery(pushop):
693 695 """Run all discovery steps"""
694 696 for stepname in pushdiscoveryorder:
695 697 step = pushdiscoverymapping[stepname]
696 698 step(pushop)
697 699
698 700
699 701 def _checksubrepostate(pushop):
700 702 """Ensure all outgoing referenced subrepo revisions are present locally"""
701 703 for n in pushop.outgoing.missing:
702 704 ctx = pushop.repo[n]
703 705
704 706 if b'.hgsub' in ctx.manifest() and b'.hgsubstate' in ctx.files():
705 707 for subpath in sorted(ctx.substate):
706 708 sub = ctx.sub(subpath)
707 709 sub.verify(onpush=True)
708 710
709 711
710 712 @pushdiscovery(b'changeset')
711 713 def _pushdiscoverychangeset(pushop):
712 714 """discover the changeset that need to be pushed"""
713 715 fci = discovery.findcommonincoming
714 716 if pushop.revs:
715 717 commoninc = fci(
716 718 pushop.repo,
717 719 pushop.remote,
718 720 force=pushop.force,
719 721 ancestorsof=pushop.revs,
720 722 )
721 723 else:
722 724 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
723 725 common, inc, remoteheads = commoninc
724 726 fco = discovery.findcommonoutgoing
725 727 outgoing = fco(
726 728 pushop.repo,
727 729 pushop.remote,
728 730 onlyheads=pushop.revs,
729 731 commoninc=commoninc,
730 732 force=pushop.force,
731 733 )
732 734 pushop.outgoing = outgoing
733 735 pushop.remoteheads = remoteheads
734 736 pushop.incoming = inc
735 737
736 738
737 739 @pushdiscovery(b'phase')
738 740 def _pushdiscoveryphase(pushop):
739 741 """discover the phase that needs to be pushed
740 742
741 743 (computed for both success and failure case for changesets push)"""
742 744 outgoing = pushop.outgoing
743 745 unfi = pushop.repo.unfiltered()
744 746 remotephases = listkeys(pushop.remote, b'phases')
745 747
746 748 if (
747 749 pushop.ui.configbool(b'ui', b'_usedassubrepo')
748 750 and remotephases # server supports phases
749 751 and not pushop.outgoing.missing # no changesets to be pushed
750 752 and remotephases.get(b'publishing', False)
751 753 ):
752 754 # When:
753 755 # - this is a subrepo push
754 756 # - and remote support phase
755 757 # - and no changeset are to be pushed
756 758 # - and remote is publishing
757 759 # We may be in issue 3781 case!
758 760 # We drop the possible phase synchronisation done by
759 761 # courtesy to publish changesets possibly locally draft
760 762 # on the remote.
761 763 pushop.outdatedphases = []
762 764 pushop.fallbackoutdatedphases = []
763 765 return
764 766
765 767 pushop.remotephases = phases.remotephasessummary(
766 768 pushop.repo, pushop.fallbackheads, remotephases
767 769 )
768 770 droots = pushop.remotephases.draftroots
769 771
770 772 extracond = b''
771 773 if not pushop.remotephases.publishing:
772 774 extracond = b' and public()'
773 775 revset = b'heads((%%ln::%%ln) %s)' % extracond
774 776 # Get the list of all revs draft on remote by public here.
775 777 # XXX Beware that revset break if droots is not strictly
776 778 # XXX root we may want to ensure it is but it is costly
777 779 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
778 780 if not pushop.remotephases.publishing and pushop.publish:
779 781 future = list(
780 782 unfi.set(
781 783 b'%ln and (not public() or %ln::)', pushop.futureheads, droots
782 784 )
783 785 )
784 786 elif not outgoing.missing:
785 787 future = fallback
786 788 else:
787 789 # adds changeset we are going to push as draft
788 790 #
789 791 # should not be necessary for publishing server, but because of an
790 792 # issue fixed in xxxxx we have to do it anyway.
791 793 fdroots = list(
792 794 unfi.set(b'roots(%ln + %ln::)', outgoing.missing, droots)
793 795 )
794 796 fdroots = [f.node() for f in fdroots]
795 797 future = list(unfi.set(revset, fdroots, pushop.futureheads))
796 798 pushop.outdatedphases = future
797 799 pushop.fallbackoutdatedphases = fallback
798 800
799 801
800 802 @pushdiscovery(b'obsmarker')
801 803 def _pushdiscoveryobsmarkers(pushop):
802 804 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
803 805 return
804 806
805 807 if not pushop.repo.obsstore:
806 808 return
807 809
808 810 if b'obsolete' not in listkeys(pushop.remote, b'namespaces'):
809 811 return
810 812
811 813 repo = pushop.repo
812 814 # very naive computation, that can be quite expensive on big repo.
813 815 # However: evolution is currently slow on them anyway.
814 816 nodes = (c.node() for c in repo.set(b'::%ln', pushop.futureheads))
815 817 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
816 818
817 819
818 820 @pushdiscovery(b'bookmarks')
819 821 def _pushdiscoverybookmarks(pushop):
820 822 ui = pushop.ui
821 823 repo = pushop.repo.unfiltered()
822 824 remote = pushop.remote
823 825 ui.debug(b"checking for updated bookmarks\n")
824 826 ancestors = ()
825 827 if pushop.revs:
826 828 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
827 829 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
828 830
829 831 remotebookmark = bookmod.unhexlifybookmarks(listkeys(remote, b'bookmarks'))
830 832
831 833 explicit = {
832 834 repo._bookmarks.expandname(bookmark) for bookmark in pushop.bookmarks
833 835 }
834 836
835 837 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
836 838 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
837 839
838 840
839 841 def _processcompared(pushop, pushed, explicit, remotebms, comp):
840 842 """take decision on bookmarks to push to the remote repo
841 843
842 844 Exists to help extensions alter this behavior.
843 845 """
844 846 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
845 847
846 848 repo = pushop.repo
847 849
848 850 for b, scid, dcid in advsrc:
849 851 if b in explicit:
850 852 explicit.remove(b)
851 853 if not pushed or repo[scid].rev() in pushed:
852 854 pushop.outbookmarks.append((b, dcid, scid))
853 855 # search added bookmark
854 856 for b, scid, dcid in addsrc:
855 857 if b in explicit:
856 858 explicit.remove(b)
857 859 pushop.outbookmarks.append((b, b'', scid))
858 860 # search for overwritten bookmark
859 861 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
860 862 if b in explicit:
861 863 explicit.remove(b)
862 864 pushop.outbookmarks.append((b, dcid, scid))
863 865 # search for bookmark to delete
864 866 for b, scid, dcid in adddst:
865 867 if b in explicit:
866 868 explicit.remove(b)
867 869 # treat as "deleted locally"
868 870 pushop.outbookmarks.append((b, dcid, b''))
869 871 # identical bookmarks shouldn't get reported
870 872 for b, scid, dcid in same:
871 873 if b in explicit:
872 874 explicit.remove(b)
873 875
874 876 if explicit:
875 877 explicit = sorted(explicit)
876 878 # we should probably list all of them
877 879 pushop.ui.warn(
878 880 _(
879 881 b'bookmark %s does not exist on the local '
880 882 b'or remote repository!\n'
881 883 )
882 884 % explicit[0]
883 885 )
884 886 pushop.bkresult = 2
885 887
886 888 pushop.outbookmarks.sort()
887 889
888 890
889 891 def _pushcheckoutgoing(pushop):
890 892 outgoing = pushop.outgoing
891 893 unfi = pushop.repo.unfiltered()
892 894 if not outgoing.missing:
893 895 # nothing to push
894 896 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
895 897 return False
896 898 # something to push
897 899 if not pushop.force:
898 900 # if repo.obsstore == False --> no obsolete
899 901 # then, save the iteration
900 902 if unfi.obsstore:
901 903 # this message are here for 80 char limit reason
902 904 mso = _(b"push includes obsolete changeset: %s!")
903 905 mspd = _(b"push includes phase-divergent changeset: %s!")
904 906 mscd = _(b"push includes content-divergent changeset: %s!")
905 907 mst = {
906 908 b"orphan": _(b"push includes orphan changeset: %s!"),
907 909 b"phase-divergent": mspd,
908 910 b"content-divergent": mscd,
909 911 }
910 912 # If we are to push if there is at least one
911 913 # obsolete or unstable changeset in missing, at
912 914 # least one of the missinghead will be obsolete or
913 915 # unstable. So checking heads only is ok
914 916 for node in outgoing.missingheads:
915 917 ctx = unfi[node]
916 918 if ctx.obsolete():
917 919 raise error.Abort(mso % ctx)
918 920 elif ctx.isunstable():
919 921 # TODO print more than one instability in the abort
920 922 # message
921 923 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
922 924
923 925 discovery.checkheads(pushop)
924 926 return True
925 927
926 928
927 929 # List of names of steps to perform for an outgoing bundle2, order matters.
928 930 b2partsgenorder = []
929 931
930 932 # Mapping between step name and function
931 933 #
932 934 # This exists to help extensions wrap steps if necessary
933 935 b2partsgenmapping = {}
934 936
935 937
936 938 def b2partsgenerator(stepname, idx=None):
937 939 """decorator for function generating bundle2 part
938 940
939 941 The function is added to the step -> function mapping and appended to the
940 942 list of steps. Beware that decorated functions will be added in order
941 943 (this may matter).
942 944
943 945 You can only use this decorator for new steps, if you want to wrap a step
944 946 from an extension, attack the b2partsgenmapping dictionary directly."""
945 947
946 948 def dec(func):
947 949 assert stepname not in b2partsgenmapping
948 950 b2partsgenmapping[stepname] = func
949 951 if idx is None:
950 952 b2partsgenorder.append(stepname)
951 953 else:
952 954 b2partsgenorder.insert(idx, stepname)
953 955 return func
954 956
955 957 return dec
956 958
957 959
958 960 def _pushb2ctxcheckheads(pushop, bundler):
959 961 """Generate race condition checking parts
960 962
961 963 Exists as an independent function to aid extensions
962 964 """
963 965 # * 'force' do not check for push race,
964 966 # * if we don't push anything, there are nothing to check.
965 967 if not pushop.force and pushop.outgoing.missingheads:
966 968 allowunrelated = b'related' in bundler.capabilities.get(
967 969 b'checkheads', ()
968 970 )
969 971 emptyremote = pushop.pushbranchmap is None
970 972 if not allowunrelated or emptyremote:
971 973 bundler.newpart(b'check:heads', data=iter(pushop.remoteheads))
972 974 else:
973 975 affected = set()
974 976 for branch, heads in pycompat.iteritems(pushop.pushbranchmap):
975 977 remoteheads, newheads, unsyncedheads, discardedheads = heads
976 978 if remoteheads is not None:
977 979 remote = set(remoteheads)
978 980 affected |= set(discardedheads) & remote
979 981 affected |= remote - set(newheads)
980 982 if affected:
981 983 data = iter(sorted(affected))
982 984 bundler.newpart(b'check:updated-heads', data=data)
983 985
984 986
985 987 def _pushing(pushop):
986 988 """return True if we are pushing anything"""
987 989 return bool(
988 990 pushop.outgoing.missing
989 991 or pushop.outdatedphases
990 992 or pushop.outobsmarkers
991 993 or pushop.outbookmarks
992 994 )
993 995
994 996
995 997 @b2partsgenerator(b'check-bookmarks')
996 998 def _pushb2checkbookmarks(pushop, bundler):
997 999 """insert bookmark move checking"""
998 1000 if not _pushing(pushop) or pushop.force:
999 1001 return
1000 1002 b2caps = bundle2.bundle2caps(pushop.remote)
1001 1003 hasbookmarkcheck = b'bookmarks' in b2caps
1002 1004 if not (pushop.outbookmarks and hasbookmarkcheck):
1003 1005 return
1004 1006 data = []
1005 1007 for book, old, new in pushop.outbookmarks:
1006 1008 data.append((book, old))
1007 1009 checkdata = bookmod.binaryencode(data)
1008 1010 bundler.newpart(b'check:bookmarks', data=checkdata)
1009 1011
1010 1012
1011 1013 @b2partsgenerator(b'check-phases')
1012 1014 def _pushb2checkphases(pushop, bundler):
1013 1015 """insert phase move checking"""
1014 1016 if not _pushing(pushop) or pushop.force:
1015 1017 return
1016 1018 b2caps = bundle2.bundle2caps(pushop.remote)
1017 1019 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1018 1020 if pushop.remotephases is not None and hasphaseheads:
1019 1021 # check that the remote phase has not changed
1020 1022 checks = [[] for p in phases.allphases]
1021 1023 checks[phases.public].extend(pushop.remotephases.publicheads)
1022 1024 checks[phases.draft].extend(pushop.remotephases.draftroots)
1023 1025 if any(checks):
1024 1026 for nodes in checks:
1025 1027 nodes.sort()
1026 1028 checkdata = phases.binaryencode(checks)
1027 1029 bundler.newpart(b'check:phases', data=checkdata)
1028 1030
1029 1031
1030 1032 @b2partsgenerator(b'changeset')
1031 1033 def _pushb2ctx(pushop, bundler):
1032 1034 """handle changegroup push through bundle2
1033 1035
1034 1036 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
1035 1037 """
1036 1038 if b'changesets' in pushop.stepsdone:
1037 1039 return
1038 1040 pushop.stepsdone.add(b'changesets')
1039 1041 # Send known heads to the server for race detection.
1040 1042 if not _pushcheckoutgoing(pushop):
1041 1043 return
1042 1044 pushop.repo.prepushoutgoinghooks(pushop)
1043 1045
1044 1046 _pushb2ctxcheckheads(pushop, bundler)
1045 1047
1046 1048 b2caps = bundle2.bundle2caps(pushop.remote)
1047 1049 version = b'01'
1048 1050 cgversions = b2caps.get(b'changegroup')
1049 1051 if cgversions: # 3.1 and 3.2 ship with an empty value
1050 1052 cgversions = [
1051 1053 v
1052 1054 for v in cgversions
1053 1055 if v in changegroup.supportedoutgoingversions(pushop.repo)
1054 1056 ]
1055 1057 if not cgversions:
1056 1058 raise error.Abort(_(b'no common changegroup version'))
1057 1059 version = max(cgversions)
1058 1060 cgstream = changegroup.makestream(
1059 1061 pushop.repo, pushop.outgoing, version, b'push'
1060 1062 )
1061 1063 cgpart = bundler.newpart(b'changegroup', data=cgstream)
1062 1064 if cgversions:
1063 1065 cgpart.addparam(b'version', version)
1064 1066 if b'treemanifest' in pushop.repo.requirements:
1065 1067 cgpart.addparam(b'treemanifest', b'1')
1066 1068 if b'exp-sidedata-flag' in pushop.repo.requirements:
1067 1069 cgpart.addparam(b'exp-sidedata', b'1')
1068 1070
1069 1071 def handlereply(op):
1070 1072 """extract addchangegroup returns from server reply"""
1071 1073 cgreplies = op.records.getreplies(cgpart.id)
1072 1074 assert len(cgreplies[b'changegroup']) == 1
1073 1075 pushop.cgresult = cgreplies[b'changegroup'][0][b'return']
1074 1076
1075 1077 return handlereply
1076 1078
1077 1079
1078 1080 @b2partsgenerator(b'phase')
1079 1081 def _pushb2phases(pushop, bundler):
1080 1082 """handle phase push through bundle2"""
1081 1083 if b'phases' in pushop.stepsdone:
1082 1084 return
1083 1085 b2caps = bundle2.bundle2caps(pushop.remote)
1084 1086 ui = pushop.repo.ui
1085 1087
1086 1088 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1087 1089 haspushkey = b'pushkey' in b2caps
1088 1090 hasphaseheads = b'heads' in b2caps.get(b'phases', ())
1089 1091
1090 1092 if hasphaseheads and not legacyphase:
1091 1093 return _pushb2phaseheads(pushop, bundler)
1092 1094 elif haspushkey:
1093 1095 return _pushb2phasespushkey(pushop, bundler)
1094 1096
1095 1097
1096 1098 def _pushb2phaseheads(pushop, bundler):
1097 1099 """push phase information through a bundle2 - binary part"""
1098 1100 pushop.stepsdone.add(b'phases')
1099 1101 if pushop.outdatedphases:
1100 1102 updates = [[] for p in phases.allphases]
1101 1103 updates[0].extend(h.node() for h in pushop.outdatedphases)
1102 1104 phasedata = phases.binaryencode(updates)
1103 1105 bundler.newpart(b'phase-heads', data=phasedata)
1104 1106
1105 1107
1106 1108 def _pushb2phasespushkey(pushop, bundler):
1107 1109 """push phase information through a bundle2 - pushkey part"""
1108 1110 pushop.stepsdone.add(b'phases')
1109 1111 part2node = []
1110 1112
1111 1113 def handlefailure(pushop, exc):
1112 1114 targetid = int(exc.partid)
1113 1115 for partid, node in part2node:
1114 1116 if partid == targetid:
1115 1117 raise error.Abort(_(b'updating %s to public failed') % node)
1116 1118
1117 1119 enc = pushkey.encode
1118 1120 for newremotehead in pushop.outdatedphases:
1119 1121 part = bundler.newpart(b'pushkey')
1120 1122 part.addparam(b'namespace', enc(b'phases'))
1121 1123 part.addparam(b'key', enc(newremotehead.hex()))
1122 1124 part.addparam(b'old', enc(b'%d' % phases.draft))
1123 1125 part.addparam(b'new', enc(b'%d' % phases.public))
1124 1126 part2node.append((part.id, newremotehead))
1125 1127 pushop.pkfailcb[part.id] = handlefailure
1126 1128
1127 1129 def handlereply(op):
1128 1130 for partid, node in part2node:
1129 1131 partrep = op.records.getreplies(partid)
1130 1132 results = partrep[b'pushkey']
1131 1133 assert len(results) <= 1
1132 1134 msg = None
1133 1135 if not results:
1134 1136 msg = _(b'server ignored update of %s to public!\n') % node
1135 1137 elif not int(results[0][b'return']):
1136 1138 msg = _(b'updating %s to public failed!\n') % node
1137 1139 if msg is not None:
1138 1140 pushop.ui.warn(msg)
1139 1141
1140 1142 return handlereply
1141 1143
1142 1144
1143 1145 @b2partsgenerator(b'obsmarkers')
1144 1146 def _pushb2obsmarkers(pushop, bundler):
1145 1147 if b'obsmarkers' in pushop.stepsdone:
1146 1148 return
1147 1149 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
1148 1150 if obsolete.commonversion(remoteversions) is None:
1149 1151 return
1150 1152 pushop.stepsdone.add(b'obsmarkers')
1151 1153 if pushop.outobsmarkers:
1152 1154 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1153 1155 bundle2.buildobsmarkerspart(bundler, markers)
1154 1156
1155 1157
1156 1158 @b2partsgenerator(b'bookmarks')
1157 1159 def _pushb2bookmarks(pushop, bundler):
1158 1160 """handle bookmark push through bundle2"""
1159 1161 if b'bookmarks' in pushop.stepsdone:
1160 1162 return
1161 1163 b2caps = bundle2.bundle2caps(pushop.remote)
1162 1164
1163 1165 legacy = pushop.repo.ui.configlist(b'devel', b'legacy.exchange')
1164 1166 legacybooks = b'bookmarks' in legacy
1165 1167
1166 1168 if not legacybooks and b'bookmarks' in b2caps:
1167 1169 return _pushb2bookmarkspart(pushop, bundler)
1168 1170 elif b'pushkey' in b2caps:
1169 1171 return _pushb2bookmarkspushkey(pushop, bundler)
1170 1172
1171 1173
1172 1174 def _bmaction(old, new):
1173 1175 """small utility for bookmark pushing"""
1174 1176 if not old:
1175 1177 return b'export'
1176 1178 elif not new:
1177 1179 return b'delete'
1178 1180 return b'update'
1179 1181
1180 1182
1181 1183 def _abortonsecretctx(pushop, node, b):
1182 1184 """abort if a given bookmark points to a secret changeset"""
1183 1185 if node and pushop.repo[node].phase() == phases.secret:
1184 1186 raise error.Abort(
1185 1187 _(b'cannot push bookmark %s as it points to a secret changeset') % b
1186 1188 )
1187 1189
1188 1190
1189 1191 def _pushb2bookmarkspart(pushop, bundler):
1190 1192 pushop.stepsdone.add(b'bookmarks')
1191 1193 if not pushop.outbookmarks:
1192 1194 return
1193 1195
1194 1196 allactions = []
1195 1197 data = []
1196 1198 for book, old, new in pushop.outbookmarks:
1197 1199 _abortonsecretctx(pushop, new, book)
1198 1200 data.append((book, new))
1199 1201 allactions.append((book, _bmaction(old, new)))
1200 1202 checkdata = bookmod.binaryencode(data)
1201 1203 bundler.newpart(b'bookmarks', data=checkdata)
1202 1204
1203 1205 def handlereply(op):
1204 1206 ui = pushop.ui
1205 1207 # if success
1206 1208 for book, action in allactions:
1207 1209 ui.status(bookmsgmap[action][0] % book)
1208 1210
1209 1211 return handlereply
1210 1212
1211 1213
1212 1214 def _pushb2bookmarkspushkey(pushop, bundler):
1213 1215 pushop.stepsdone.add(b'bookmarks')
1214 1216 part2book = []
1215 1217 enc = pushkey.encode
1216 1218
1217 1219 def handlefailure(pushop, exc):
1218 1220 targetid = int(exc.partid)
1219 1221 for partid, book, action in part2book:
1220 1222 if partid == targetid:
1221 1223 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1222 1224 # we should not be called for part we did not generated
1223 1225 assert False
1224 1226
1225 1227 for book, old, new in pushop.outbookmarks:
1226 1228 _abortonsecretctx(pushop, new, book)
1227 1229 part = bundler.newpart(b'pushkey')
1228 1230 part.addparam(b'namespace', enc(b'bookmarks'))
1229 1231 part.addparam(b'key', enc(book))
1230 1232 part.addparam(b'old', enc(hex(old)))
1231 1233 part.addparam(b'new', enc(hex(new)))
1232 1234 action = b'update'
1233 1235 if not old:
1234 1236 action = b'export'
1235 1237 elif not new:
1236 1238 action = b'delete'
1237 1239 part2book.append((part.id, book, action))
1238 1240 pushop.pkfailcb[part.id] = handlefailure
1239 1241
1240 1242 def handlereply(op):
1241 1243 ui = pushop.ui
1242 1244 for partid, book, action in part2book:
1243 1245 partrep = op.records.getreplies(partid)
1244 1246 results = partrep[b'pushkey']
1245 1247 assert len(results) <= 1
1246 1248 if not results:
1247 1249 pushop.ui.warn(_(b'server ignored bookmark %s update\n') % book)
1248 1250 else:
1249 1251 ret = int(results[0][b'return'])
1250 1252 if ret:
1251 1253 ui.status(bookmsgmap[action][0] % book)
1252 1254 else:
1253 1255 ui.warn(bookmsgmap[action][1] % book)
1254 1256 if pushop.bkresult is not None:
1255 1257 pushop.bkresult = 1
1256 1258
1257 1259 return handlereply
1258 1260
1259 1261
1260 1262 @b2partsgenerator(b'pushvars', idx=0)
1261 1263 def _getbundlesendvars(pushop, bundler):
1262 1264 '''send shellvars via bundle2'''
1263 1265 pushvars = pushop.pushvars
1264 1266 if pushvars:
1265 1267 shellvars = {}
1266 1268 for raw in pushvars:
1267 1269 if b'=' not in raw:
1268 1270 msg = (
1269 1271 b"unable to parse variable '%s', should follow "
1270 1272 b"'KEY=VALUE' or 'KEY=' format"
1271 1273 )
1272 1274 raise error.Abort(msg % raw)
1273 1275 k, v = raw.split(b'=', 1)
1274 1276 shellvars[k] = v
1275 1277
1276 1278 part = bundler.newpart(b'pushvars')
1277 1279
1278 1280 for key, value in pycompat.iteritems(shellvars):
1279 1281 part.addparam(key, value, mandatory=False)
1280 1282
1281 1283
1282 1284 def _pushbundle2(pushop):
1283 1285 """push data to the remote using bundle2
1284 1286
1285 1287 The only currently supported type of data is changegroup but this will
1286 1288 evolve in the future."""
1287 1289 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1288 1290 pushback = pushop.trmanager and pushop.ui.configbool(
1289 1291 b'experimental', b'bundle2.pushback'
1290 1292 )
1291 1293
1292 1294 # create reply capability
1293 1295 capsblob = bundle2.encodecaps(
1294 1296 bundle2.getrepocaps(pushop.repo, allowpushback=pushback, role=b'client')
1295 1297 )
1296 1298 bundler.newpart(b'replycaps', data=capsblob)
1297 1299 replyhandlers = []
1298 1300 for partgenname in b2partsgenorder:
1299 1301 partgen = b2partsgenmapping[partgenname]
1300 1302 ret = partgen(pushop, bundler)
1301 1303 if callable(ret):
1302 1304 replyhandlers.append(ret)
1303 1305 # do not push if nothing to push
1304 1306 if bundler.nbparts <= 1:
1305 1307 return
1306 1308 stream = util.chunkbuffer(bundler.getchunks())
1307 1309 try:
1308 1310 try:
1309 1311 with pushop.remote.commandexecutor() as e:
1310 1312 reply = e.callcommand(
1311 1313 b'unbundle',
1312 1314 {
1313 1315 b'bundle': stream,
1314 1316 b'heads': [b'force'],
1315 1317 b'url': pushop.remote.url(),
1316 1318 },
1317 1319 ).result()
1318 1320 except error.BundleValueError as exc:
1319 1321 raise error.Abort(_(b'missing support for %s') % exc)
1320 1322 try:
1321 1323 trgetter = None
1322 1324 if pushback:
1323 1325 trgetter = pushop.trmanager.transaction
1324 1326 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1325 1327 except error.BundleValueError as exc:
1326 1328 raise error.Abort(_(b'missing support for %s') % exc)
1327 1329 except bundle2.AbortFromPart as exc:
1328 1330 pushop.ui.status(_(b'remote: %s\n') % exc)
1329 1331 if exc.hint is not None:
1330 1332 pushop.ui.status(_(b'remote: %s\n') % (b'(%s)' % exc.hint))
1331 1333 raise error.Abort(_(b'push failed on remote'))
1332 1334 except error.PushkeyFailed as exc:
1333 1335 partid = int(exc.partid)
1334 1336 if partid not in pushop.pkfailcb:
1335 1337 raise
1336 1338 pushop.pkfailcb[partid](pushop, exc)
1337 1339 for rephand in replyhandlers:
1338 1340 rephand(op)
1339 1341
1340 1342
1341 1343 def _pushchangeset(pushop):
1342 1344 """Make the actual push of changeset bundle to remote repo"""
1343 1345 if b'changesets' in pushop.stepsdone:
1344 1346 return
1345 1347 pushop.stepsdone.add(b'changesets')
1346 1348 if not _pushcheckoutgoing(pushop):
1347 1349 return
1348 1350
1349 1351 # Should have verified this in push().
1350 1352 assert pushop.remote.capable(b'unbundle')
1351 1353
1352 1354 pushop.repo.prepushoutgoinghooks(pushop)
1353 1355 outgoing = pushop.outgoing
1354 1356 # TODO: get bundlecaps from remote
1355 1357 bundlecaps = None
1356 1358 # create a changegroup from local
1357 1359 if pushop.revs is None and not (
1358 1360 outgoing.excluded or pushop.repo.changelog.filteredrevs
1359 1361 ):
1360 1362 # push everything,
1361 1363 # use the fast path, no race possible on push
1362 1364 cg = changegroup.makechangegroup(
1363 1365 pushop.repo,
1364 1366 outgoing,
1365 1367 b'01',
1366 1368 b'push',
1367 1369 fastpath=True,
1368 1370 bundlecaps=bundlecaps,
1369 1371 )
1370 1372 else:
1371 1373 cg = changegroup.makechangegroup(
1372 1374 pushop.repo, outgoing, b'01', b'push', bundlecaps=bundlecaps
1373 1375 )
1374 1376
1375 1377 # apply changegroup to remote
1376 1378 # local repo finds heads on server, finds out what
1377 1379 # revs it must push. once revs transferred, if server
1378 1380 # finds it has different heads (someone else won
1379 1381 # commit/push race), server aborts.
1380 1382 if pushop.force:
1381 1383 remoteheads = [b'force']
1382 1384 else:
1383 1385 remoteheads = pushop.remoteheads
1384 1386 # ssh: return remote's addchangegroup()
1385 1387 # http: return remote's addchangegroup() or 0 for error
1386 1388 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads, pushop.repo.url())
1387 1389
1388 1390
1389 1391 def _pushsyncphase(pushop):
1390 1392 """synchronise phase information locally and remotely"""
1391 1393 cheads = pushop.commonheads
1392 1394 # even when we don't push, exchanging phase data is useful
1393 1395 remotephases = listkeys(pushop.remote, b'phases')
1394 1396 if (
1395 1397 pushop.ui.configbool(b'ui', b'_usedassubrepo')
1396 1398 and remotephases # server supports phases
1397 1399 and pushop.cgresult is None # nothing was pushed
1398 1400 and remotephases.get(b'publishing', False)
1399 1401 ):
1400 1402 # When:
1401 1403 # - this is a subrepo push
1402 1404 # - and remote support phase
1403 1405 # - and no changeset was pushed
1404 1406 # - and remote is publishing
1405 1407 # We may be in issue 3871 case!
1406 1408 # We drop the possible phase synchronisation done by
1407 1409 # courtesy to publish changesets possibly locally draft
1408 1410 # on the remote.
1409 1411 remotephases = {b'publishing': b'True'}
1410 1412 if not remotephases: # old server or public only reply from non-publishing
1411 1413 _localphasemove(pushop, cheads)
1412 1414 # don't push any phase data as there is nothing to push
1413 1415 else:
1414 1416 ana = phases.analyzeremotephases(pushop.repo, cheads, remotephases)
1415 1417 pheads, droots = ana
1416 1418 ### Apply remote phase on local
1417 1419 if remotephases.get(b'publishing', False):
1418 1420 _localphasemove(pushop, cheads)
1419 1421 else: # publish = False
1420 1422 _localphasemove(pushop, pheads)
1421 1423 _localphasemove(pushop, cheads, phases.draft)
1422 1424 ### Apply local phase on remote
1423 1425
1424 1426 if pushop.cgresult:
1425 1427 if b'phases' in pushop.stepsdone:
1426 1428 # phases already pushed though bundle2
1427 1429 return
1428 1430 outdated = pushop.outdatedphases
1429 1431 else:
1430 1432 outdated = pushop.fallbackoutdatedphases
1431 1433
1432 1434 pushop.stepsdone.add(b'phases')
1433 1435
1434 1436 # filter heads already turned public by the push
1435 1437 outdated = [c for c in outdated if c.node() not in pheads]
1436 1438 # fallback to independent pushkey command
1437 1439 for newremotehead in outdated:
1438 1440 with pushop.remote.commandexecutor() as e:
1439 1441 r = e.callcommand(
1440 1442 b'pushkey',
1441 1443 {
1442 1444 b'namespace': b'phases',
1443 1445 b'key': newremotehead.hex(),
1444 1446 b'old': b'%d' % phases.draft,
1445 1447 b'new': b'%d' % phases.public,
1446 1448 },
1447 1449 ).result()
1448 1450
1449 1451 if not r:
1450 1452 pushop.ui.warn(
1451 1453 _(b'updating %s to public failed!\n') % newremotehead
1452 1454 )
1453 1455
1454 1456
1455 1457 def _localphasemove(pushop, nodes, phase=phases.public):
1456 1458 """move <nodes> to <phase> in the local source repo"""
1457 1459 if pushop.trmanager:
1458 1460 phases.advanceboundary(
1459 1461 pushop.repo, pushop.trmanager.transaction(), phase, nodes
1460 1462 )
1461 1463 else:
1462 1464 # repo is not locked, do not change any phases!
1463 1465 # Informs the user that phases should have been moved when
1464 1466 # applicable.
1465 1467 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1466 1468 phasestr = phases.phasenames[phase]
1467 1469 if actualmoves:
1468 1470 pushop.ui.status(
1469 1471 _(
1470 1472 b'cannot lock source repo, skipping '
1471 1473 b'local %s phase update\n'
1472 1474 )
1473 1475 % phasestr
1474 1476 )
1475 1477
1476 1478
1477 1479 def _pushobsolete(pushop):
1478 1480 """utility function to push obsolete markers to a remote"""
1479 1481 if b'obsmarkers' in pushop.stepsdone:
1480 1482 return
1481 1483 repo = pushop.repo
1482 1484 remote = pushop.remote
1483 1485 pushop.stepsdone.add(b'obsmarkers')
1484 1486 if pushop.outobsmarkers:
1485 1487 pushop.ui.debug(b'try to push obsolete markers to remote\n')
1486 1488 rslts = []
1487 1489 markers = obsutil.sortedmarkers(pushop.outobsmarkers)
1488 1490 remotedata = obsolete._pushkeyescape(markers)
1489 1491 for key in sorted(remotedata, reverse=True):
1490 1492 # reverse sort to ensure we end with dump0
1491 1493 data = remotedata[key]
1492 1494 rslts.append(remote.pushkey(b'obsolete', key, b'', data))
1493 1495 if [r for r in rslts if not r]:
1494 1496 msg = _(b'failed to push some obsolete markers!\n')
1495 1497 repo.ui.warn(msg)
1496 1498
1497 1499
1498 1500 def _pushbookmark(pushop):
1499 1501 """Update bookmark position on remote"""
1500 1502 if pushop.cgresult == 0 or b'bookmarks' in pushop.stepsdone:
1501 1503 return
1502 1504 pushop.stepsdone.add(b'bookmarks')
1503 1505 ui = pushop.ui
1504 1506 remote = pushop.remote
1505 1507
1506 1508 for b, old, new in pushop.outbookmarks:
1507 1509 action = b'update'
1508 1510 if not old:
1509 1511 action = b'export'
1510 1512 elif not new:
1511 1513 action = b'delete'
1512 1514
1513 1515 with remote.commandexecutor() as e:
1514 1516 r = e.callcommand(
1515 1517 b'pushkey',
1516 1518 {
1517 1519 b'namespace': b'bookmarks',
1518 1520 b'key': b,
1519 1521 b'old': hex(old),
1520 1522 b'new': hex(new),
1521 1523 },
1522 1524 ).result()
1523 1525
1524 1526 if r:
1525 1527 ui.status(bookmsgmap[action][0] % b)
1526 1528 else:
1527 1529 ui.warn(bookmsgmap[action][1] % b)
1528 1530 # discovery can have set the value form invalid entry
1529 1531 if pushop.bkresult is not None:
1530 1532 pushop.bkresult = 1
1531 1533
1532 1534
1533 1535 class pulloperation(object):
1534 1536 """A object that represent a single pull operation
1535 1537
1536 1538 It purpose is to carry pull related state and very common operation.
1537 1539
1538 1540 A new should be created at the beginning of each pull and discarded
1539 1541 afterward.
1540 1542 """
1541 1543
1542 1544 def __init__(
1543 1545 self,
1544 1546 repo,
1545 1547 remote,
1546 1548 heads=None,
1547 1549 force=False,
1548 1550 bookmarks=(),
1549 1551 remotebookmarks=None,
1550 1552 streamclonerequested=None,
1551 1553 includepats=None,
1552 1554 excludepats=None,
1553 1555 depth=None,
1554 1556 ):
1555 1557 # repo we pull into
1556 1558 self.repo = repo
1557 1559 # repo we pull from
1558 1560 self.remote = remote
1559 1561 # revision we try to pull (None is "all")
1560 1562 self.heads = heads
1561 1563 # bookmark pulled explicitly
1562 1564 self.explicitbookmarks = [
1563 1565 repo._bookmarks.expandname(bookmark) for bookmark in bookmarks
1564 1566 ]
1565 1567 # do we force pull?
1566 1568 self.force = force
1567 1569 # whether a streaming clone was requested
1568 1570 self.streamclonerequested = streamclonerequested
1569 1571 # transaction manager
1570 1572 self.trmanager = None
1571 1573 # set of common changeset between local and remote before pull
1572 1574 self.common = None
1573 1575 # set of pulled head
1574 1576 self.rheads = None
1575 1577 # list of missing changeset to fetch remotely
1576 1578 self.fetch = None
1577 1579 # remote bookmarks data
1578 1580 self.remotebookmarks = remotebookmarks
1579 1581 # result of changegroup pulling (used as return code by pull)
1580 1582 self.cgresult = None
1581 1583 # list of step already done
1582 1584 self.stepsdone = set()
1583 1585 # Whether we attempted a clone from pre-generated bundles.
1584 1586 self.clonebundleattempted = False
1585 1587 # Set of file patterns to include.
1586 1588 self.includepats = includepats
1587 1589 # Set of file patterns to exclude.
1588 1590 self.excludepats = excludepats
1589 1591 # Number of ancestor changesets to pull from each pulled head.
1590 1592 self.depth = depth
1591 1593
1592 1594 @util.propertycache
1593 1595 def pulledsubset(self):
1594 1596 """heads of the set of changeset target by the pull"""
1595 1597 # compute target subset
1596 1598 if self.heads is None:
1597 1599 # We pulled every thing possible
1598 1600 # sync on everything common
1599 1601 c = set(self.common)
1600 1602 ret = list(self.common)
1601 1603 for n in self.rheads:
1602 1604 if n not in c:
1603 1605 ret.append(n)
1604 1606 return ret
1605 1607 else:
1606 1608 # We pulled a specific subset
1607 1609 # sync on this subset
1608 1610 return self.heads
1609 1611
1610 1612 @util.propertycache
1611 1613 def canusebundle2(self):
1612 1614 return not _forcebundle1(self)
1613 1615
1614 1616 @util.propertycache
1615 1617 def remotebundle2caps(self):
1616 1618 return bundle2.bundle2caps(self.remote)
1617 1619
1618 1620 def gettransaction(self):
1619 1621 # deprecated; talk to trmanager directly
1620 1622 return self.trmanager.transaction()
1621 1623
1622 1624
1623 1625 class transactionmanager(util.transactional):
1624 1626 """An object to manage the life cycle of a transaction
1625 1627
1626 1628 It creates the transaction on demand and calls the appropriate hooks when
1627 1629 closing the transaction."""
1628 1630
1629 1631 def __init__(self, repo, source, url):
1630 1632 self.repo = repo
1631 1633 self.source = source
1632 1634 self.url = url
1633 1635 self._tr = None
1634 1636
1635 1637 def transaction(self):
1636 1638 """Return an open transaction object, constructing if necessary"""
1637 1639 if not self._tr:
1638 1640 trname = b'%s\n%s' % (self.source, util.hidepassword(self.url))
1639 1641 self._tr = self.repo.transaction(trname)
1640 1642 self._tr.hookargs[b'source'] = self.source
1641 1643 self._tr.hookargs[b'url'] = self.url
1642 1644 return self._tr
1643 1645
1644 1646 def close(self):
1645 1647 """close transaction if created"""
1646 1648 if self._tr is not None:
1647 1649 self._tr.close()
1648 1650
1649 1651 def release(self):
1650 1652 """release transaction if created"""
1651 1653 if self._tr is not None:
1652 1654 self._tr.release()
1653 1655
1654 1656
1655 1657 def listkeys(remote, namespace):
1656 1658 with remote.commandexecutor() as e:
1657 1659 return e.callcommand(b'listkeys', {b'namespace': namespace}).result()
1658 1660
1659 1661
1660 1662 def _fullpullbundle2(repo, pullop):
1661 1663 # The server may send a partial reply, i.e. when inlining
1662 1664 # pre-computed bundles. In that case, update the common
1663 1665 # set based on the results and pull another bundle.
1664 1666 #
1665 1667 # There are two indicators that the process is finished:
1666 1668 # - no changeset has been added, or
1667 1669 # - all remote heads are known locally.
1668 1670 # The head check must use the unfiltered view as obsoletion
1669 1671 # markers can hide heads.
1670 1672 unfi = repo.unfiltered()
1671 1673 unficl = unfi.changelog
1672 1674
1673 1675 def headsofdiff(h1, h2):
1674 1676 """Returns heads(h1 % h2)"""
1675 1677 res = unfi.set(b'heads(%ln %% %ln)', h1, h2)
1676 1678 return set(ctx.node() for ctx in res)
1677 1679
1678 1680 def headsofunion(h1, h2):
1679 1681 """Returns heads((h1 + h2) - null)"""
1680 1682 res = unfi.set(b'heads((%ln + %ln - null))', h1, h2)
1681 1683 return set(ctx.node() for ctx in res)
1682 1684
1683 1685 while True:
1684 1686 old_heads = unficl.heads()
1685 1687 clstart = len(unficl)
1686 1688 _pullbundle2(pullop)
1687 1689 if repository.NARROW_REQUIREMENT in repo.requirements:
1688 1690 # XXX narrow clones filter the heads on the server side during
1689 1691 # XXX getbundle and result in partial replies as well.
1690 1692 # XXX Disable pull bundles in this case as band aid to avoid
1691 1693 # XXX extra round trips.
1692 1694 break
1693 1695 if clstart == len(unficl):
1694 1696 break
1695 1697 if all(unficl.hasnode(n) for n in pullop.rheads):
1696 1698 break
1697 1699 new_heads = headsofdiff(unficl.heads(), old_heads)
1698 1700 pullop.common = headsofunion(new_heads, pullop.common)
1699 1701 pullop.rheads = set(pullop.rheads) - pullop.common
1700 1702
1701 1703
1702 1704 def pull(
1703 1705 repo,
1704 1706 remote,
1705 1707 heads=None,
1706 1708 force=False,
1707 1709 bookmarks=(),
1708 1710 opargs=None,
1709 1711 streamclonerequested=None,
1710 1712 includepats=None,
1711 1713 excludepats=None,
1712 1714 depth=None,
1713 1715 ):
1714 1716 """Fetch repository data from a remote.
1715 1717
1716 1718 This is the main function used to retrieve data from a remote repository.
1717 1719
1718 1720 ``repo`` is the local repository to clone into.
1719 1721 ``remote`` is a peer instance.
1720 1722 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1721 1723 default) means to pull everything from the remote.
1722 1724 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1723 1725 default, all remote bookmarks are pulled.
1724 1726 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1725 1727 initialization.
1726 1728 ``streamclonerequested`` is a boolean indicating whether a "streaming
1727 1729 clone" is requested. A "streaming clone" is essentially a raw file copy
1728 1730 of revlogs from the server. This only works when the local repository is
1729 1731 empty. The default value of ``None`` means to respect the server
1730 1732 configuration for preferring stream clones.
1731 1733 ``includepats`` and ``excludepats`` define explicit file patterns to
1732 1734 include and exclude in storage, respectively. If not defined, narrow
1733 1735 patterns from the repo instance are used, if available.
1734 1736 ``depth`` is an integer indicating the DAG depth of history we're
1735 1737 interested in. If defined, for each revision specified in ``heads``, we
1736 1738 will fetch up to this many of its ancestors and data associated with them.
1737 1739
1738 1740 Returns the ``pulloperation`` created for this pull.
1739 1741 """
1740 1742 if opargs is None:
1741 1743 opargs = {}
1742 1744
1743 1745 # We allow the narrow patterns to be passed in explicitly to provide more
1744 1746 # flexibility for API consumers.
1745 1747 if includepats or excludepats:
1746 1748 includepats = includepats or set()
1747 1749 excludepats = excludepats or set()
1748 1750 else:
1749 1751 includepats, excludepats = repo.narrowpats
1750 1752
1751 1753 narrowspec.validatepatterns(includepats)
1752 1754 narrowspec.validatepatterns(excludepats)
1753 1755
1754 1756 pullop = pulloperation(
1755 1757 repo,
1756 1758 remote,
1757 1759 heads,
1758 1760 force,
1759 1761 bookmarks=bookmarks,
1760 1762 streamclonerequested=streamclonerequested,
1761 1763 includepats=includepats,
1762 1764 excludepats=excludepats,
1763 1765 depth=depth,
1764 1766 **pycompat.strkwargs(opargs)
1765 1767 )
1766 1768
1767 1769 peerlocal = pullop.remote.local()
1768 1770 if peerlocal:
1769 1771 missing = set(peerlocal.requirements) - pullop.repo.supported
1770 1772 if missing:
1771 1773 msg = _(
1772 1774 b"required features are not"
1773 1775 b" supported in the destination:"
1774 1776 b" %s"
1775 1777 ) % (b', '.join(sorted(missing)))
1776 1778 raise error.Abort(msg)
1777 1779
1778 1780 pullop.trmanager = transactionmanager(repo, b'pull', remote.url())
1779 1781 wlock = util.nullcontextmanager()
1780 1782 if not bookmod.bookmarksinstore(repo):
1781 1783 wlock = repo.wlock()
1782 1784 with wlock, repo.lock(), pullop.trmanager:
1783 1785 # Use the modern wire protocol, if available.
1784 1786 if remote.capable(b'command-changesetdata'):
1785 1787 exchangev2.pull(pullop)
1786 1788 else:
1787 1789 # This should ideally be in _pullbundle2(). However, it needs to run
1788 1790 # before discovery to avoid extra work.
1789 1791 _maybeapplyclonebundle(pullop)
1790 1792 streamclone.maybeperformlegacystreamclone(pullop)
1791 1793 _pulldiscovery(pullop)
1792 1794 if pullop.canusebundle2:
1793 1795 _fullpullbundle2(repo, pullop)
1794 1796 _pullchangeset(pullop)
1795 1797 _pullphase(pullop)
1796 1798 _pullbookmarks(pullop)
1797 1799 _pullobsolete(pullop)
1798 1800
1799 1801 # storing remotenames
1800 1802 if repo.ui.configbool(b'experimental', b'remotenames'):
1801 1803 logexchange.pullremotenames(repo, remote)
1802 1804
1803 1805 return pullop
1804 1806
1805 1807
1806 1808 # list of steps to perform discovery before pull
1807 1809 pulldiscoveryorder = []
1808 1810
1809 1811 # Mapping between step name and function
1810 1812 #
1811 1813 # This exists to help extensions wrap steps if necessary
1812 1814 pulldiscoverymapping = {}
1813 1815
1814 1816
1815 1817 def pulldiscovery(stepname):
1816 1818 """decorator for function performing discovery before pull
1817 1819
1818 1820 The function is added to the step -> function mapping and appended to the
1819 1821 list of steps. Beware that decorated function will be added in order (this
1820 1822 may matter).
1821 1823
1822 1824 You can only use this decorator for a new step, if you want to wrap a step
1823 1825 from an extension, change the pulldiscovery dictionary directly."""
1824 1826
1825 1827 def dec(func):
1826 1828 assert stepname not in pulldiscoverymapping
1827 1829 pulldiscoverymapping[stepname] = func
1828 1830 pulldiscoveryorder.append(stepname)
1829 1831 return func
1830 1832
1831 1833 return dec
1832 1834
1833 1835
1834 1836 def _pulldiscovery(pullop):
1835 1837 """Run all discovery steps"""
1836 1838 for stepname in pulldiscoveryorder:
1837 1839 step = pulldiscoverymapping[stepname]
1838 1840 step(pullop)
1839 1841
1840 1842
1841 1843 @pulldiscovery(b'b1:bookmarks')
1842 1844 def _pullbookmarkbundle1(pullop):
1843 1845 """fetch bookmark data in bundle1 case
1844 1846
1845 1847 If not using bundle2, we have to fetch bookmarks before changeset
1846 1848 discovery to reduce the chance and impact of race conditions."""
1847 1849 if pullop.remotebookmarks is not None:
1848 1850 return
1849 1851 if pullop.canusebundle2 and b'listkeys' in pullop.remotebundle2caps:
1850 1852 # all known bundle2 servers now support listkeys, but lets be nice with
1851 1853 # new implementation.
1852 1854 return
1853 1855 books = listkeys(pullop.remote, b'bookmarks')
1854 1856 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1855 1857
1856 1858
1857 1859 @pulldiscovery(b'changegroup')
1858 1860 def _pulldiscoverychangegroup(pullop):
1859 1861 """discovery phase for the pull
1860 1862
1861 1863 Current handle changeset discovery only, will change handle all discovery
1862 1864 at some point."""
1863 1865 tmp = discovery.findcommonincoming(
1864 1866 pullop.repo, pullop.remote, heads=pullop.heads, force=pullop.force
1865 1867 )
1866 1868 common, fetch, rheads = tmp
1867 1869 has_node = pullop.repo.unfiltered().changelog.index.has_node
1868 1870 if fetch and rheads:
1869 1871 # If a remote heads is filtered locally, put in back in common.
1870 1872 #
1871 1873 # This is a hackish solution to catch most of "common but locally
1872 1874 # hidden situation". We do not performs discovery on unfiltered
1873 1875 # repository because it end up doing a pathological amount of round
1874 1876 # trip for w huge amount of changeset we do not care about.
1875 1877 #
1876 1878 # If a set of such "common but filtered" changeset exist on the server
1877 1879 # but are not including a remote heads, we'll not be able to detect it,
1878 1880 scommon = set(common)
1879 1881 for n in rheads:
1880 1882 if has_node(n):
1881 1883 if n not in scommon:
1882 1884 common.append(n)
1883 1885 if set(rheads).issubset(set(common)):
1884 1886 fetch = []
1885 1887 pullop.common = common
1886 1888 pullop.fetch = fetch
1887 1889 pullop.rheads = rheads
1888 1890
1889 1891
1890 1892 def _pullbundle2(pullop):
1891 1893 """pull data using bundle2
1892 1894
1893 1895 For now, the only supported data are changegroup."""
1894 1896 kwargs = {b'bundlecaps': caps20to10(pullop.repo, role=b'client')}
1895 1897
1896 1898 # make ui easier to access
1897 1899 ui = pullop.repo.ui
1898 1900
1899 1901 # At the moment we don't do stream clones over bundle2. If that is
1900 1902 # implemented then here's where the check for that will go.
1901 1903 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1902 1904
1903 1905 # declare pull perimeters
1904 1906 kwargs[b'common'] = pullop.common
1905 1907 kwargs[b'heads'] = pullop.heads or pullop.rheads
1906 1908
1907 1909 # check server supports narrow and then adding includepats and excludepats
1908 1910 servernarrow = pullop.remote.capable(wireprototypes.NARROWCAP)
1909 1911 if servernarrow and pullop.includepats:
1910 1912 kwargs[b'includepats'] = pullop.includepats
1911 1913 if servernarrow and pullop.excludepats:
1912 1914 kwargs[b'excludepats'] = pullop.excludepats
1913 1915
1914 1916 if streaming:
1915 1917 kwargs[b'cg'] = False
1916 1918 kwargs[b'stream'] = True
1917 1919 pullop.stepsdone.add(b'changegroup')
1918 1920 pullop.stepsdone.add(b'phases')
1919 1921
1920 1922 else:
1921 1923 # pulling changegroup
1922 1924 pullop.stepsdone.add(b'changegroup')
1923 1925
1924 1926 kwargs[b'cg'] = pullop.fetch
1925 1927
1926 1928 legacyphase = b'phases' in ui.configlist(b'devel', b'legacy.exchange')
1927 1929 hasbinaryphase = b'heads' in pullop.remotebundle2caps.get(b'phases', ())
1928 1930 if not legacyphase and hasbinaryphase:
1929 1931 kwargs[b'phases'] = True
1930 1932 pullop.stepsdone.add(b'phases')
1931 1933
1932 1934 if b'listkeys' in pullop.remotebundle2caps:
1933 1935 if b'phases' not in pullop.stepsdone:
1934 1936 kwargs[b'listkeys'] = [b'phases']
1935 1937
1936 1938 bookmarksrequested = False
1937 1939 legacybookmark = b'bookmarks' in ui.configlist(b'devel', b'legacy.exchange')
1938 1940 hasbinarybook = b'bookmarks' in pullop.remotebundle2caps
1939 1941
1940 1942 if pullop.remotebookmarks is not None:
1941 1943 pullop.stepsdone.add(b'request-bookmarks')
1942 1944
1943 1945 if (
1944 1946 b'request-bookmarks' not in pullop.stepsdone
1945 1947 and pullop.remotebookmarks is None
1946 1948 and not legacybookmark
1947 1949 and hasbinarybook
1948 1950 ):
1949 1951 kwargs[b'bookmarks'] = True
1950 1952 bookmarksrequested = True
1951 1953
1952 1954 if b'listkeys' in pullop.remotebundle2caps:
1953 1955 if b'request-bookmarks' not in pullop.stepsdone:
1954 1956 # make sure to always includes bookmark data when migrating
1955 1957 # `hg incoming --bundle` to using this function.
1956 1958 pullop.stepsdone.add(b'request-bookmarks')
1957 1959 kwargs.setdefault(b'listkeys', []).append(b'bookmarks')
1958 1960
1959 1961 # If this is a full pull / clone and the server supports the clone bundles
1960 1962 # feature, tell the server whether we attempted a clone bundle. The
1961 1963 # presence of this flag indicates the client supports clone bundles. This
1962 1964 # will enable the server to treat clients that support clone bundles
1963 1965 # differently from those that don't.
1964 1966 if (
1965 1967 pullop.remote.capable(b'clonebundles')
1966 1968 and pullop.heads is None
1967 1969 and list(pullop.common) == [nullid]
1968 1970 ):
1969 1971 kwargs[b'cbattempted'] = pullop.clonebundleattempted
1970 1972
1971 1973 if streaming:
1972 1974 pullop.repo.ui.status(_(b'streaming all changes\n'))
1973 1975 elif not pullop.fetch:
1974 1976 pullop.repo.ui.status(_(b"no changes found\n"))
1975 1977 pullop.cgresult = 0
1976 1978 else:
1977 1979 if pullop.heads is None and list(pullop.common) == [nullid]:
1978 1980 pullop.repo.ui.status(_(b"requesting all changes\n"))
1979 1981 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1980 1982 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1981 1983 if obsolete.commonversion(remoteversions) is not None:
1982 1984 kwargs[b'obsmarkers'] = True
1983 1985 pullop.stepsdone.add(b'obsmarkers')
1984 1986 _pullbundle2extraprepare(pullop, kwargs)
1985 1987
1986 1988 with pullop.remote.commandexecutor() as e:
1987 1989 args = dict(kwargs)
1988 1990 args[b'source'] = b'pull'
1989 1991 bundle = e.callcommand(b'getbundle', args).result()
1990 1992
1991 1993 try:
1992 1994 op = bundle2.bundleoperation(
1993 1995 pullop.repo, pullop.gettransaction, source=b'pull'
1994 1996 )
1995 1997 op.modes[b'bookmarks'] = b'records'
1996 1998 bundle2.processbundle(pullop.repo, bundle, op=op)
1997 1999 except bundle2.AbortFromPart as exc:
1998 2000 pullop.repo.ui.status(_(b'remote: abort: %s\n') % exc)
1999 2001 raise error.Abort(_(b'pull failed on remote'), hint=exc.hint)
2000 2002 except error.BundleValueError as exc:
2001 2003 raise error.Abort(_(b'missing support for %s') % exc)
2002 2004
2003 2005 if pullop.fetch:
2004 2006 pullop.cgresult = bundle2.combinechangegroupresults(op)
2005 2007
2006 2008 # processing phases change
2007 2009 for namespace, value in op.records[b'listkeys']:
2008 2010 if namespace == b'phases':
2009 2011 _pullapplyphases(pullop, value)
2010 2012
2011 2013 # processing bookmark update
2012 2014 if bookmarksrequested:
2013 2015 books = {}
2014 2016 for record in op.records[b'bookmarks']:
2015 2017 books[record[b'bookmark']] = record[b"node"]
2016 2018 pullop.remotebookmarks = books
2017 2019 else:
2018 2020 for namespace, value in op.records[b'listkeys']:
2019 2021 if namespace == b'bookmarks':
2020 2022 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
2021 2023
2022 2024 # bookmark data were either already there or pulled in the bundle
2023 2025 if pullop.remotebookmarks is not None:
2024 2026 _pullbookmarks(pullop)
2025 2027
2026 2028
2027 2029 def _pullbundle2extraprepare(pullop, kwargs):
2028 2030 """hook function so that extensions can extend the getbundle call"""
2029 2031
2030 2032
2031 2033 def _pullchangeset(pullop):
2032 2034 """pull changeset from unbundle into the local repo"""
2033 2035 # We delay the open of the transaction as late as possible so we
2034 2036 # don't open transaction for nothing or you break future useful
2035 2037 # rollback call
2036 2038 if b'changegroup' in pullop.stepsdone:
2037 2039 return
2038 2040 pullop.stepsdone.add(b'changegroup')
2039 2041 if not pullop.fetch:
2040 2042 pullop.repo.ui.status(_(b"no changes found\n"))
2041 2043 pullop.cgresult = 0
2042 2044 return
2043 2045 tr = pullop.gettransaction()
2044 2046 if pullop.heads is None and list(pullop.common) == [nullid]:
2045 2047 pullop.repo.ui.status(_(b"requesting all changes\n"))
2046 2048 elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'):
2047 2049 # issue1320, avoid a race if remote changed after discovery
2048 2050 pullop.heads = pullop.rheads
2049 2051
2050 2052 if pullop.remote.capable(b'getbundle'):
2051 2053 # TODO: get bundlecaps from remote
2052 2054 cg = pullop.remote.getbundle(
2053 2055 b'pull', common=pullop.common, heads=pullop.heads or pullop.rheads
2054 2056 )
2055 2057 elif pullop.heads is None:
2056 2058 with pullop.remote.commandexecutor() as e:
2057 2059 cg = e.callcommand(
2058 2060 b'changegroup', {b'nodes': pullop.fetch, b'source': b'pull',}
2059 2061 ).result()
2060 2062
2061 2063 elif not pullop.remote.capable(b'changegroupsubset'):
2062 2064 raise error.Abort(
2063 2065 _(
2064 2066 b"partial pull cannot be done because "
2065 2067 b"other repository doesn't support "
2066 2068 b"changegroupsubset."
2067 2069 )
2068 2070 )
2069 2071 else:
2070 2072 with pullop.remote.commandexecutor() as e:
2071 2073 cg = e.callcommand(
2072 2074 b'changegroupsubset',
2073 2075 {
2074 2076 b'bases': pullop.fetch,
2075 2077 b'heads': pullop.heads,
2076 2078 b'source': b'pull',
2077 2079 },
2078 2080 ).result()
2079 2081
2080 2082 bundleop = bundle2.applybundle(
2081 2083 pullop.repo, cg, tr, b'pull', pullop.remote.url()
2082 2084 )
2083 2085 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
2084 2086
2085 2087
2086 2088 def _pullphase(pullop):
2087 2089 # Get remote phases data from remote
2088 2090 if b'phases' in pullop.stepsdone:
2089 2091 return
2090 2092 remotephases = listkeys(pullop.remote, b'phases')
2091 2093 _pullapplyphases(pullop, remotephases)
2092 2094
2093 2095
2094 2096 def _pullapplyphases(pullop, remotephases):
2095 2097 """apply phase movement from observed remote state"""
2096 2098 if b'phases' in pullop.stepsdone:
2097 2099 return
2098 2100 pullop.stepsdone.add(b'phases')
2099 2101 publishing = bool(remotephases.get(b'publishing', False))
2100 2102 if remotephases and not publishing:
2101 2103 # remote is new and non-publishing
2102 2104 pheads, _dr = phases.analyzeremotephases(
2103 2105 pullop.repo, pullop.pulledsubset, remotephases
2104 2106 )
2105 2107 dheads = pullop.pulledsubset
2106 2108 else:
2107 2109 # Remote is old or publishing all common changesets
2108 2110 # should be seen as public
2109 2111 pheads = pullop.pulledsubset
2110 2112 dheads = []
2111 2113 unfi = pullop.repo.unfiltered()
2112 2114 phase = unfi._phasecache.phase
2113 2115 rev = unfi.changelog.index.get_rev
2114 2116 public = phases.public
2115 2117 draft = phases.draft
2116 2118
2117 2119 # exclude changesets already public locally and update the others
2118 2120 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
2119 2121 if pheads:
2120 2122 tr = pullop.gettransaction()
2121 2123 phases.advanceboundary(pullop.repo, tr, public, pheads)
2122 2124
2123 2125 # exclude changesets already draft locally and update the others
2124 2126 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
2125 2127 if dheads:
2126 2128 tr = pullop.gettransaction()
2127 2129 phases.advanceboundary(pullop.repo, tr, draft, dheads)
2128 2130
2129 2131
2130 2132 def _pullbookmarks(pullop):
2131 2133 """process the remote bookmark information to update the local one"""
2132 2134 if b'bookmarks' in pullop.stepsdone:
2133 2135 return
2134 2136 pullop.stepsdone.add(b'bookmarks')
2135 2137 repo = pullop.repo
2136 2138 remotebookmarks = pullop.remotebookmarks
2137 2139 bookmod.updatefromremote(
2138 2140 repo.ui,
2139 2141 repo,
2140 2142 remotebookmarks,
2141 2143 pullop.remote.url(),
2142 2144 pullop.gettransaction,
2143 2145 explicit=pullop.explicitbookmarks,
2144 2146 )
2145 2147
2146 2148
2147 2149 def _pullobsolete(pullop):
2148 2150 """utility function to pull obsolete markers from a remote
2149 2151
2150 2152 The `gettransaction` is function that return the pull transaction, creating
2151 2153 one if necessary. We return the transaction to inform the calling code that
2152 2154 a new transaction have been created (when applicable).
2153 2155
2154 2156 Exists mostly to allow overriding for experimentation purpose"""
2155 2157 if b'obsmarkers' in pullop.stepsdone:
2156 2158 return
2157 2159 pullop.stepsdone.add(b'obsmarkers')
2158 2160 tr = None
2159 2161 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
2160 2162 pullop.repo.ui.debug(b'fetching remote obsolete markers\n')
2161 2163 remoteobs = listkeys(pullop.remote, b'obsolete')
2162 2164 if b'dump0' in remoteobs:
2163 2165 tr = pullop.gettransaction()
2164 2166 markers = []
2165 2167 for key in sorted(remoteobs, reverse=True):
2166 2168 if key.startswith(b'dump'):
2167 2169 data = util.b85decode(remoteobs[key])
2168 2170 version, newmarks = obsolete._readmarkers(data)
2169 2171 markers += newmarks
2170 2172 if markers:
2171 2173 pullop.repo.obsstore.add(tr, markers)
2172 2174 pullop.repo.invalidatevolatilesets()
2173 2175 return tr
2174 2176
2175 2177
2176 2178 def applynarrowacl(repo, kwargs):
2177 2179 """Apply narrow fetch access control.
2178 2180
2179 2181 This massages the named arguments for getbundle wire protocol commands
2180 2182 so requested data is filtered through access control rules.
2181 2183 """
2182 2184 ui = repo.ui
2183 2185 # TODO this assumes existence of HTTP and is a layering violation.
2184 2186 username = ui.shortuser(ui.environ.get(b'REMOTE_USER') or ui.username())
2185 2187 user_includes = ui.configlist(
2186 2188 _NARROWACL_SECTION,
2187 2189 username + b'.includes',
2188 2190 ui.configlist(_NARROWACL_SECTION, b'default.includes'),
2189 2191 )
2190 2192 user_excludes = ui.configlist(
2191 2193 _NARROWACL_SECTION,
2192 2194 username + b'.excludes',
2193 2195 ui.configlist(_NARROWACL_SECTION, b'default.excludes'),
2194 2196 )
2195 2197 if not user_includes:
2196 2198 raise error.Abort(
2197 2199 _(b"%s configuration for user %s is empty")
2198 2200 % (_NARROWACL_SECTION, username)
2199 2201 )
2200 2202
2201 2203 user_includes = [
2202 2204 b'path:.' if p == b'*' else b'path:' + p for p in user_includes
2203 2205 ]
2204 2206 user_excludes = [
2205 2207 b'path:.' if p == b'*' else b'path:' + p for p in user_excludes
2206 2208 ]
2207 2209
2208 2210 req_includes = set(kwargs.get('includepats', []))
2209 2211 req_excludes = set(kwargs.get('excludepats', []))
2210 2212
2211 2213 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
2212 2214 req_includes, req_excludes, user_includes, user_excludes
2213 2215 )
2214 2216
2215 2217 if invalid_includes:
2216 2218 raise error.Abort(
2217 2219 _(b"The following includes are not accessible for %s: %s")
2218 2220 % (username, stringutil.pprint(invalid_includes))
2219 2221 )
2220 2222
2221 2223 new_args = {}
2222 2224 new_args.update(kwargs)
2223 2225 new_args['narrow'] = True
2224 2226 new_args['narrow_acl'] = True
2225 2227 new_args['includepats'] = req_includes
2226 2228 if req_excludes:
2227 2229 new_args['excludepats'] = req_excludes
2228 2230
2229 2231 return new_args
2230 2232
2231 2233
2232 2234 def _computeellipsis(repo, common, heads, known, match, depth=None):
2233 2235 """Compute the shape of a narrowed DAG.
2234 2236
2235 2237 Args:
2236 2238 repo: The repository we're transferring.
2237 2239 common: The roots of the DAG range we're transferring.
2238 2240 May be just [nullid], which means all ancestors of heads.
2239 2241 heads: The heads of the DAG range we're transferring.
2240 2242 match: The narrowmatcher that allows us to identify relevant changes.
2241 2243 depth: If not None, only consider nodes to be full nodes if they are at
2242 2244 most depth changesets away from one of heads.
2243 2245
2244 2246 Returns:
2245 2247 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
2246 2248
2247 2249 visitnodes: The list of nodes (either full or ellipsis) which
2248 2250 need to be sent to the client.
2249 2251 relevant_nodes: The set of changelog nodes which change a file inside
2250 2252 the narrowspec. The client needs these as non-ellipsis nodes.
2251 2253 ellipsisroots: A dict of {rev: parents} that is used in
2252 2254 narrowchangegroup to produce ellipsis nodes with the
2253 2255 correct parents.
2254 2256 """
2255 2257 cl = repo.changelog
2256 2258 mfl = repo.manifestlog
2257 2259
2258 2260 clrev = cl.rev
2259 2261
2260 2262 commonrevs = {clrev(n) for n in common} | {nullrev}
2261 2263 headsrevs = {clrev(n) for n in heads}
2262 2264
2263 2265 if depth:
2264 2266 revdepth = {h: 0 for h in headsrevs}
2265 2267
2266 2268 ellipsisheads = collections.defaultdict(set)
2267 2269 ellipsisroots = collections.defaultdict(set)
2268 2270
2269 2271 def addroot(head, curchange):
2270 2272 """Add a root to an ellipsis head, splitting heads with 3 roots."""
2271 2273 ellipsisroots[head].add(curchange)
2272 2274 # Recursively split ellipsis heads with 3 roots by finding the
2273 2275 # roots' youngest common descendant which is an elided merge commit.
2274 2276 # That descendant takes 2 of the 3 roots as its own, and becomes a
2275 2277 # root of the head.
2276 2278 while len(ellipsisroots[head]) > 2:
2277 2279 child, roots = splithead(head)
2278 2280 splitroots(head, child, roots)
2279 2281 head = child # Recurse in case we just added a 3rd root
2280 2282
2281 2283 def splitroots(head, child, roots):
2282 2284 ellipsisroots[head].difference_update(roots)
2283 2285 ellipsisroots[head].add(child)
2284 2286 ellipsisroots[child].update(roots)
2285 2287 ellipsisroots[child].discard(child)
2286 2288
2287 2289 def splithead(head):
2288 2290 r1, r2, r3 = sorted(ellipsisroots[head])
2289 2291 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
2290 2292 mid = repo.revs(
2291 2293 b'sort(merge() & %d::%d & %d::%d, -rev)', nr1, head, nr2, head
2292 2294 )
2293 2295 for j in mid:
2294 2296 if j == nr2:
2295 2297 return nr2, (nr1, nr2)
2296 2298 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
2297 2299 return j, (nr1, nr2)
2298 2300 raise error.Abort(
2299 2301 _(
2300 2302 b'Failed to split up ellipsis node! head: %d, '
2301 2303 b'roots: %d %d %d'
2302 2304 )
2303 2305 % (head, r1, r2, r3)
2304 2306 )
2305 2307
2306 2308 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
2307 2309 visit = reversed(missing)
2308 2310 relevant_nodes = set()
2309 2311 visitnodes = [cl.node(m) for m in missing]
2310 2312 required = set(headsrevs) | known
2311 2313 for rev in visit:
2312 2314 clrev = cl.changelogrevision(rev)
2313 2315 ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
2314 2316 if depth is not None:
2315 2317 curdepth = revdepth[rev]
2316 2318 for p in ps:
2317 2319 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
2318 2320 needed = False
2319 2321 shallow_enough = depth is None or revdepth[rev] <= depth
2320 2322 if shallow_enough:
2321 2323 curmf = mfl[clrev.manifest].read()
2322 2324 if ps:
2323 2325 # We choose to not trust the changed files list in
2324 2326 # changesets because it's not always correct. TODO: could
2325 2327 # we trust it for the non-merge case?
2326 2328 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
2327 2329 needed = bool(curmf.diff(p1mf, match))
2328 2330 if not needed and len(ps) > 1:
2329 2331 # For merge changes, the list of changed files is not
2330 2332 # helpful, since we need to emit the merge if a file
2331 2333 # in the narrow spec has changed on either side of the
2332 2334 # merge. As a result, we do a manifest diff to check.
2333 2335 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
2334 2336 needed = bool(curmf.diff(p2mf, match))
2335 2337 else:
2336 2338 # For a root node, we need to include the node if any
2337 2339 # files in the node match the narrowspec.
2338 2340 needed = any(curmf.walk(match))
2339 2341
2340 2342 if needed:
2341 2343 for head in ellipsisheads[rev]:
2342 2344 addroot(head, rev)
2343 2345 for p in ps:
2344 2346 required.add(p)
2345 2347 relevant_nodes.add(cl.node(rev))
2346 2348 else:
2347 2349 if not ps:
2348 2350 ps = [nullrev]
2349 2351 if rev in required:
2350 2352 for head in ellipsisheads[rev]:
2351 2353 addroot(head, rev)
2352 2354 for p in ps:
2353 2355 ellipsisheads[p].add(rev)
2354 2356 else:
2355 2357 for p in ps:
2356 2358 ellipsisheads[p] |= ellipsisheads[rev]
2357 2359
2358 2360 # add common changesets as roots of their reachable ellipsis heads
2359 2361 for c in commonrevs:
2360 2362 for head in ellipsisheads[c]:
2361 2363 addroot(head, c)
2362 2364 return visitnodes, relevant_nodes, ellipsisroots
2363 2365
2364 2366
2365 2367 def caps20to10(repo, role):
2366 2368 """return a set with appropriate options to use bundle20 during getbundle"""
2367 2369 caps = {b'HG20'}
2368 2370 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2369 2371 caps.add(b'bundle2=' + urlreq.quote(capsblob))
2370 2372 return caps
2371 2373
2372 2374
2373 2375 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2374 2376 getbundle2partsorder = []
2375 2377
2376 2378 # Mapping between step name and function
2377 2379 #
2378 2380 # This exists to help extensions wrap steps if necessary
2379 2381 getbundle2partsmapping = {}
2380 2382
2381 2383
2382 2384 def getbundle2partsgenerator(stepname, idx=None):
2383 2385 """decorator for function generating bundle2 part for getbundle
2384 2386
2385 2387 The function is added to the step -> function mapping and appended to the
2386 2388 list of steps. Beware that decorated functions will be added in order
2387 2389 (this may matter).
2388 2390
2389 2391 You can only use this decorator for new steps, if you want to wrap a step
2390 2392 from an extension, attack the getbundle2partsmapping dictionary directly."""
2391 2393
2392 2394 def dec(func):
2393 2395 assert stepname not in getbundle2partsmapping
2394 2396 getbundle2partsmapping[stepname] = func
2395 2397 if idx is None:
2396 2398 getbundle2partsorder.append(stepname)
2397 2399 else:
2398 2400 getbundle2partsorder.insert(idx, stepname)
2399 2401 return func
2400 2402
2401 2403 return dec
2402 2404
2403 2405
2404 2406 def bundle2requested(bundlecaps):
2405 2407 if bundlecaps is not None:
2406 2408 return any(cap.startswith(b'HG2') for cap in bundlecaps)
2407 2409 return False
2408 2410
2409 2411
2410 2412 def getbundlechunks(
2411 2413 repo, source, heads=None, common=None, bundlecaps=None, **kwargs
2412 2414 ):
2413 2415 """Return chunks constituting a bundle's raw data.
2414 2416
2415 2417 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2416 2418 passed.
2417 2419
2418 2420 Returns a 2-tuple of a dict with metadata about the generated bundle
2419 2421 and an iterator over raw chunks (of varying sizes).
2420 2422 """
2421 2423 kwargs = pycompat.byteskwargs(kwargs)
2422 2424 info = {}
2423 2425 usebundle2 = bundle2requested(bundlecaps)
2424 2426 # bundle10 case
2425 2427 if not usebundle2:
2426 2428 if bundlecaps and not kwargs.get(b'cg', True):
2427 2429 raise ValueError(
2428 2430 _(b'request for bundle10 must include changegroup')
2429 2431 )
2430 2432
2431 2433 if kwargs:
2432 2434 raise ValueError(
2433 2435 _(b'unsupported getbundle arguments: %s')
2434 2436 % b', '.join(sorted(kwargs.keys()))
2435 2437 )
2436 2438 outgoing = _computeoutgoing(repo, heads, common)
2437 2439 info[b'bundleversion'] = 1
2438 2440 return (
2439 2441 info,
2440 2442 changegroup.makestream(
2441 2443 repo, outgoing, b'01', source, bundlecaps=bundlecaps
2442 2444 ),
2443 2445 )
2444 2446
2445 2447 # bundle20 case
2446 2448 info[b'bundleversion'] = 2
2447 2449 b2caps = {}
2448 2450 for bcaps in bundlecaps:
2449 2451 if bcaps.startswith(b'bundle2='):
2450 2452 blob = urlreq.unquote(bcaps[len(b'bundle2=') :])
2451 2453 b2caps.update(bundle2.decodecaps(blob))
2452 2454 bundler = bundle2.bundle20(repo.ui, b2caps)
2453 2455
2454 2456 kwargs[b'heads'] = heads
2455 2457 kwargs[b'common'] = common
2456 2458
2457 2459 for name in getbundle2partsorder:
2458 2460 func = getbundle2partsmapping[name]
2459 2461 func(
2460 2462 bundler,
2461 2463 repo,
2462 2464 source,
2463 2465 bundlecaps=bundlecaps,
2464 2466 b2caps=b2caps,
2465 2467 **pycompat.strkwargs(kwargs)
2466 2468 )
2467 2469
2468 2470 info[b'prefercompressed'] = bundler.prefercompressed
2469 2471
2470 2472 return info, bundler.getchunks()
2471 2473
2472 2474
2473 2475 @getbundle2partsgenerator(b'stream2')
2474 2476 def _getbundlestream2(bundler, repo, *args, **kwargs):
2475 2477 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2476 2478
2477 2479
2478 2480 @getbundle2partsgenerator(b'changegroup')
2479 2481 def _getbundlechangegrouppart(
2480 2482 bundler,
2481 2483 repo,
2482 2484 source,
2483 2485 bundlecaps=None,
2484 2486 b2caps=None,
2485 2487 heads=None,
2486 2488 common=None,
2487 2489 **kwargs
2488 2490 ):
2489 2491 """add a changegroup part to the requested bundle"""
2490 2492 if not kwargs.get('cg', True) or not b2caps:
2491 2493 return
2492 2494
2493 2495 version = b'01'
2494 2496 cgversions = b2caps.get(b'changegroup')
2495 2497 if cgversions: # 3.1 and 3.2 ship with an empty value
2496 2498 cgversions = [
2497 2499 v
2498 2500 for v in cgversions
2499 2501 if v in changegroup.supportedoutgoingversions(repo)
2500 2502 ]
2501 2503 if not cgversions:
2502 2504 raise error.Abort(_(b'no common changegroup version'))
2503 2505 version = max(cgversions)
2504 2506
2505 2507 outgoing = _computeoutgoing(repo, heads, common)
2506 2508 if not outgoing.missing:
2507 2509 return
2508 2510
2509 2511 if kwargs.get('narrow', False):
2510 2512 include = sorted(filter(bool, kwargs.get('includepats', [])))
2511 2513 exclude = sorted(filter(bool, kwargs.get('excludepats', [])))
2512 2514 matcher = narrowspec.match(repo.root, include=include, exclude=exclude)
2513 2515 else:
2514 2516 matcher = None
2515 2517
2516 2518 cgstream = changegroup.makestream(
2517 2519 repo, outgoing, version, source, bundlecaps=bundlecaps, matcher=matcher
2518 2520 )
2519 2521
2520 2522 part = bundler.newpart(b'changegroup', data=cgstream)
2521 2523 if cgversions:
2522 2524 part.addparam(b'version', version)
2523 2525
2524 2526 part.addparam(b'nbchanges', b'%d' % len(outgoing.missing), mandatory=False)
2525 2527
2526 2528 if b'treemanifest' in repo.requirements:
2527 2529 part.addparam(b'treemanifest', b'1')
2528 2530
2529 2531 if b'exp-sidedata-flag' in repo.requirements:
2530 2532 part.addparam(b'exp-sidedata', b'1')
2531 2533
2532 2534 if (
2533 2535 kwargs.get('narrow', False)
2534 2536 and kwargs.get('narrow_acl', False)
2535 2537 and (include or exclude)
2536 2538 ):
2537 2539 # this is mandatory because otherwise ACL clients won't work
2538 2540 narrowspecpart = bundler.newpart(b'Narrow:responsespec')
2539 2541 narrowspecpart.data = b'%s\0%s' % (
2540 2542 b'\n'.join(include),
2541 2543 b'\n'.join(exclude),
2542 2544 )
2543 2545
2544 2546
2545 2547 @getbundle2partsgenerator(b'bookmarks')
2546 2548 def _getbundlebookmarkpart(
2547 2549 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2548 2550 ):
2549 2551 """add a bookmark part to the requested bundle"""
2550 2552 if not kwargs.get('bookmarks', False):
2551 2553 return
2552 2554 if not b2caps or b'bookmarks' not in b2caps:
2553 2555 raise error.Abort(_(b'no common bookmarks exchange method'))
2554 2556 books = bookmod.listbinbookmarks(repo)
2555 2557 data = bookmod.binaryencode(books)
2556 2558 if data:
2557 2559 bundler.newpart(b'bookmarks', data=data)
2558 2560
2559 2561
2560 2562 @getbundle2partsgenerator(b'listkeys')
2561 2563 def _getbundlelistkeysparts(
2562 2564 bundler, repo, source, bundlecaps=None, b2caps=None, **kwargs
2563 2565 ):
2564 2566 """add parts containing listkeys namespaces to the requested bundle"""
2565 2567 listkeys = kwargs.get('listkeys', ())
2566 2568 for namespace in listkeys:
2567 2569 part = bundler.newpart(b'listkeys')
2568 2570 part.addparam(b'namespace', namespace)
2569 2571 keys = repo.listkeys(namespace).items()
2570 2572 part.data = pushkey.encodekeys(keys)
2571 2573
2572 2574
2573 2575 @getbundle2partsgenerator(b'obsmarkers')
2574 2576 def _getbundleobsmarkerpart(
2575 2577 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2576 2578 ):
2577 2579 """add an obsolescence markers part to the requested bundle"""
2578 2580 if kwargs.get('obsmarkers', False):
2579 2581 if heads is None:
2580 2582 heads = repo.heads()
2581 2583 subset = [c.node() for c in repo.set(b'::%ln', heads)]
2582 2584 markers = repo.obsstore.relevantmarkers(subset)
2583 2585 markers = obsutil.sortedmarkers(markers)
2584 2586 bundle2.buildobsmarkerspart(bundler, markers)
2585 2587
2586 2588
2587 2589 @getbundle2partsgenerator(b'phases')
2588 2590 def _getbundlephasespart(
2589 2591 bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, **kwargs
2590 2592 ):
2591 2593 """add phase heads part to the requested bundle"""
2592 2594 if kwargs.get('phases', False):
2593 2595 if not b2caps or b'heads' not in b2caps.get(b'phases'):
2594 2596 raise error.Abort(_(b'no common phases exchange method'))
2595 2597 if heads is None:
2596 2598 heads = repo.heads()
2597 2599
2598 2600 headsbyphase = collections.defaultdict(set)
2599 2601 if repo.publishing():
2600 2602 headsbyphase[phases.public] = heads
2601 2603 else:
2602 2604 # find the appropriate heads to move
2603 2605
2604 2606 phase = repo._phasecache.phase
2605 2607 node = repo.changelog.node
2606 2608 rev = repo.changelog.rev
2607 2609 for h in heads:
2608 2610 headsbyphase[phase(repo, rev(h))].add(h)
2609 2611 seenphases = list(headsbyphase.keys())
2610 2612
2611 2613 # We do not handle anything but public and draft phase for now)
2612 2614 if seenphases:
2613 2615 assert max(seenphases) <= phases.draft
2614 2616
2615 2617 # if client is pulling non-public changesets, we need to find
2616 2618 # intermediate public heads.
2617 2619 draftheads = headsbyphase.get(phases.draft, set())
2618 2620 if draftheads:
2619 2621 publicheads = headsbyphase.get(phases.public, set())
2620 2622
2621 2623 revset = b'heads(only(%ln, %ln) and public())'
2622 2624 extraheads = repo.revs(revset, draftheads, publicheads)
2623 2625 for r in extraheads:
2624 2626 headsbyphase[phases.public].add(node(r))
2625 2627
2626 2628 # transform data in a format used by the encoding function
2627 2629 phasemapping = []
2628 2630 for phase in phases.allphases:
2629 2631 phasemapping.append(sorted(headsbyphase[phase]))
2630 2632
2631 2633 # generate the actual part
2632 2634 phasedata = phases.binaryencode(phasemapping)
2633 2635 bundler.newpart(b'phase-heads', data=phasedata)
2634 2636
2635 2637
2636 2638 @getbundle2partsgenerator(b'hgtagsfnodes')
2637 2639 def _getbundletagsfnodes(
2638 2640 bundler,
2639 2641 repo,
2640 2642 source,
2641 2643 bundlecaps=None,
2642 2644 b2caps=None,
2643 2645 heads=None,
2644 2646 common=None,
2645 2647 **kwargs
2646 2648 ):
2647 2649 """Transfer the .hgtags filenodes mapping.
2648 2650
2649 2651 Only values for heads in this bundle will be transferred.
2650 2652
2651 2653 The part data consists of pairs of 20 byte changeset node and .hgtags
2652 2654 filenodes raw values.
2653 2655 """
2654 2656 # Don't send unless:
2655 2657 # - changeset are being exchanged,
2656 2658 # - the client supports it.
2657 2659 if not b2caps or not (kwargs.get('cg', True) and b'hgtagsfnodes' in b2caps):
2658 2660 return
2659 2661
2660 2662 outgoing = _computeoutgoing(repo, heads, common)
2661 2663 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2662 2664
2663 2665
2664 2666 @getbundle2partsgenerator(b'cache:rev-branch-cache')
2665 2667 def _getbundlerevbranchcache(
2666 2668 bundler,
2667 2669 repo,
2668 2670 source,
2669 2671 bundlecaps=None,
2670 2672 b2caps=None,
2671 2673 heads=None,
2672 2674 common=None,
2673 2675 **kwargs
2674 2676 ):
2675 2677 """Transfer the rev-branch-cache mapping
2676 2678
2677 2679 The payload is a series of data related to each branch
2678 2680
2679 2681 1) branch name length
2680 2682 2) number of open heads
2681 2683 3) number of closed heads
2682 2684 4) open heads nodes
2683 2685 5) closed heads nodes
2684 2686 """
2685 2687 # Don't send unless:
2686 2688 # - changeset are being exchanged,
2687 2689 # - the client supports it.
2688 2690 # - narrow bundle isn't in play (not currently compatible).
2689 2691 if (
2690 2692 not kwargs.get('cg', True)
2691 2693 or not b2caps
2692 2694 or b'rev-branch-cache' not in b2caps
2693 2695 or kwargs.get('narrow', False)
2694 2696 or repo.ui.has_section(_NARROWACL_SECTION)
2695 2697 ):
2696 2698 return
2697 2699
2698 2700 outgoing = _computeoutgoing(repo, heads, common)
2699 2701 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2700 2702
2701 2703
2702 2704 def check_heads(repo, their_heads, context):
2703 2705 """check if the heads of a repo have been modified
2704 2706
2705 2707 Used by peer for unbundling.
2706 2708 """
2707 2709 heads = repo.heads()
2708 heads_hash = hashlib.sha1(b''.join(sorted(heads))).digest()
2710 heads_hash = hashutil.sha1(b''.join(sorted(heads))).digest()
2709 2711 if not (
2710 2712 their_heads == [b'force']
2711 2713 or their_heads == heads
2712 2714 or their_heads == [b'hashed', heads_hash]
2713 2715 ):
2714 2716 # someone else committed/pushed/unbundled while we
2715 2717 # were transferring data
2716 2718 raise error.PushRaced(
2717 2719 b'repository changed while %s - please try again' % context
2718 2720 )
2719 2721
2720 2722
2721 2723 def unbundle(repo, cg, heads, source, url):
2722 2724 """Apply a bundle to a repo.
2723 2725
2724 2726 this function makes sure the repo is locked during the application and have
2725 2727 mechanism to check that no push race occurred between the creation of the
2726 2728 bundle and its application.
2727 2729
2728 2730 If the push was raced as PushRaced exception is raised."""
2729 2731 r = 0
2730 2732 # need a transaction when processing a bundle2 stream
2731 2733 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2732 2734 lockandtr = [None, None, None]
2733 2735 recordout = None
2734 2736 # quick fix for output mismatch with bundle2 in 3.4
2735 2737 captureoutput = repo.ui.configbool(
2736 2738 b'experimental', b'bundle2-output-capture'
2737 2739 )
2738 2740 if url.startswith(b'remote:http:') or url.startswith(b'remote:https:'):
2739 2741 captureoutput = True
2740 2742 try:
2741 2743 # note: outside bundle1, 'heads' is expected to be empty and this
2742 2744 # 'check_heads' call wil be a no-op
2743 2745 check_heads(repo, heads, b'uploading changes')
2744 2746 # push can proceed
2745 2747 if not isinstance(cg, bundle2.unbundle20):
2746 2748 # legacy case: bundle1 (changegroup 01)
2747 2749 txnname = b"\n".join([source, util.hidepassword(url)])
2748 2750 with repo.lock(), repo.transaction(txnname) as tr:
2749 2751 op = bundle2.applybundle(repo, cg, tr, source, url)
2750 2752 r = bundle2.combinechangegroupresults(op)
2751 2753 else:
2752 2754 r = None
2753 2755 try:
2754 2756
2755 2757 def gettransaction():
2756 2758 if not lockandtr[2]:
2757 2759 if not bookmod.bookmarksinstore(repo):
2758 2760 lockandtr[0] = repo.wlock()
2759 2761 lockandtr[1] = repo.lock()
2760 2762 lockandtr[2] = repo.transaction(source)
2761 2763 lockandtr[2].hookargs[b'source'] = source
2762 2764 lockandtr[2].hookargs[b'url'] = url
2763 2765 lockandtr[2].hookargs[b'bundle2'] = b'1'
2764 2766 return lockandtr[2]
2765 2767
2766 2768 # Do greedy locking by default until we're satisfied with lazy
2767 2769 # locking.
2768 2770 if not repo.ui.configbool(
2769 2771 b'experimental', b'bundle2lazylocking'
2770 2772 ):
2771 2773 gettransaction()
2772 2774
2773 2775 op = bundle2.bundleoperation(
2774 2776 repo,
2775 2777 gettransaction,
2776 2778 captureoutput=captureoutput,
2777 2779 source=b'push',
2778 2780 )
2779 2781 try:
2780 2782 op = bundle2.processbundle(repo, cg, op=op)
2781 2783 finally:
2782 2784 r = op.reply
2783 2785 if captureoutput and r is not None:
2784 2786 repo.ui.pushbuffer(error=True, subproc=True)
2785 2787
2786 2788 def recordout(output):
2787 2789 r.newpart(b'output', data=output, mandatory=False)
2788 2790
2789 2791 if lockandtr[2] is not None:
2790 2792 lockandtr[2].close()
2791 2793 except BaseException as exc:
2792 2794 exc.duringunbundle2 = True
2793 2795 if captureoutput and r is not None:
2794 2796 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2795 2797
2796 2798 def recordout(output):
2797 2799 part = bundle2.bundlepart(
2798 2800 b'output', data=output, mandatory=False
2799 2801 )
2800 2802 parts.append(part)
2801 2803
2802 2804 raise
2803 2805 finally:
2804 2806 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2805 2807 if recordout is not None:
2806 2808 recordout(repo.ui.popbuffer())
2807 2809 return r
2808 2810
2809 2811
2810 2812 def _maybeapplyclonebundle(pullop):
2811 2813 """Apply a clone bundle from a remote, if possible."""
2812 2814
2813 2815 repo = pullop.repo
2814 2816 remote = pullop.remote
2815 2817
2816 2818 if not repo.ui.configbool(b'ui', b'clonebundles'):
2817 2819 return
2818 2820
2819 2821 # Only run if local repo is empty.
2820 2822 if len(repo):
2821 2823 return
2822 2824
2823 2825 if pullop.heads:
2824 2826 return
2825 2827
2826 2828 if not remote.capable(b'clonebundles'):
2827 2829 return
2828 2830
2829 2831 with remote.commandexecutor() as e:
2830 2832 res = e.callcommand(b'clonebundles', {}).result()
2831 2833
2832 2834 # If we call the wire protocol command, that's good enough to record the
2833 2835 # attempt.
2834 2836 pullop.clonebundleattempted = True
2835 2837
2836 2838 entries = parseclonebundlesmanifest(repo, res)
2837 2839 if not entries:
2838 2840 repo.ui.note(
2839 2841 _(
2840 2842 b'no clone bundles available on remote; '
2841 2843 b'falling back to regular clone\n'
2842 2844 )
2843 2845 )
2844 2846 return
2845 2847
2846 2848 entries = filterclonebundleentries(
2847 2849 repo, entries, streamclonerequested=pullop.streamclonerequested
2848 2850 )
2849 2851
2850 2852 if not entries:
2851 2853 # There is a thundering herd concern here. However, if a server
2852 2854 # operator doesn't advertise bundles appropriate for its clients,
2853 2855 # they deserve what's coming. Furthermore, from a client's
2854 2856 # perspective, no automatic fallback would mean not being able to
2855 2857 # clone!
2856 2858 repo.ui.warn(
2857 2859 _(
2858 2860 b'no compatible clone bundles available on server; '
2859 2861 b'falling back to regular clone\n'
2860 2862 )
2861 2863 )
2862 2864 repo.ui.warn(
2863 2865 _(b'(you may want to report this to the server operator)\n')
2864 2866 )
2865 2867 return
2866 2868
2867 2869 entries = sortclonebundleentries(repo.ui, entries)
2868 2870
2869 2871 url = entries[0][b'URL']
2870 2872 repo.ui.status(_(b'applying clone bundle from %s\n') % url)
2871 2873 if trypullbundlefromurl(repo.ui, repo, url):
2872 2874 repo.ui.status(_(b'finished applying clone bundle\n'))
2873 2875 # Bundle failed.
2874 2876 #
2875 2877 # We abort by default to avoid the thundering herd of
2876 2878 # clients flooding a server that was expecting expensive
2877 2879 # clone load to be offloaded.
2878 2880 elif repo.ui.configbool(b'ui', b'clonebundlefallback'):
2879 2881 repo.ui.warn(_(b'falling back to normal clone\n'))
2880 2882 else:
2881 2883 raise error.Abort(
2882 2884 _(b'error applying bundle'),
2883 2885 hint=_(
2884 2886 b'if this error persists, consider contacting '
2885 2887 b'the server operator or disable clone '
2886 2888 b'bundles via '
2887 2889 b'"--config ui.clonebundles=false"'
2888 2890 ),
2889 2891 )
2890 2892
2891 2893
2892 2894 def parseclonebundlesmanifest(repo, s):
2893 2895 """Parses the raw text of a clone bundles manifest.
2894 2896
2895 2897 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2896 2898 to the URL and other keys are the attributes for the entry.
2897 2899 """
2898 2900 m = []
2899 2901 for line in s.splitlines():
2900 2902 fields = line.split()
2901 2903 if not fields:
2902 2904 continue
2903 2905 attrs = {b'URL': fields[0]}
2904 2906 for rawattr in fields[1:]:
2905 2907 key, value = rawattr.split(b'=', 1)
2906 2908 key = urlreq.unquote(key)
2907 2909 value = urlreq.unquote(value)
2908 2910 attrs[key] = value
2909 2911
2910 2912 # Parse BUNDLESPEC into components. This makes client-side
2911 2913 # preferences easier to specify since you can prefer a single
2912 2914 # component of the BUNDLESPEC.
2913 2915 if key == b'BUNDLESPEC':
2914 2916 try:
2915 2917 bundlespec = parsebundlespec(repo, value)
2916 2918 attrs[b'COMPRESSION'] = bundlespec.compression
2917 2919 attrs[b'VERSION'] = bundlespec.version
2918 2920 except error.InvalidBundleSpecification:
2919 2921 pass
2920 2922 except error.UnsupportedBundleSpecification:
2921 2923 pass
2922 2924
2923 2925 m.append(attrs)
2924 2926
2925 2927 return m
2926 2928
2927 2929
2928 2930 def isstreamclonespec(bundlespec):
2929 2931 # Stream clone v1
2930 2932 if bundlespec.wirecompression == b'UN' and bundlespec.wireversion == b's1':
2931 2933 return True
2932 2934
2933 2935 # Stream clone v2
2934 2936 if (
2935 2937 bundlespec.wirecompression == b'UN'
2936 2938 and bundlespec.wireversion == b'02'
2937 2939 and bundlespec.contentopts.get(b'streamv2')
2938 2940 ):
2939 2941 return True
2940 2942
2941 2943 return False
2942 2944
2943 2945
2944 2946 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2945 2947 """Remove incompatible clone bundle manifest entries.
2946 2948
2947 2949 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2948 2950 and returns a new list consisting of only the entries that this client
2949 2951 should be able to apply.
2950 2952
2951 2953 There is no guarantee we'll be able to apply all returned entries because
2952 2954 the metadata we use to filter on may be missing or wrong.
2953 2955 """
2954 2956 newentries = []
2955 2957 for entry in entries:
2956 2958 spec = entry.get(b'BUNDLESPEC')
2957 2959 if spec:
2958 2960 try:
2959 2961 bundlespec = parsebundlespec(repo, spec, strict=True)
2960 2962
2961 2963 # If a stream clone was requested, filter out non-streamclone
2962 2964 # entries.
2963 2965 if streamclonerequested and not isstreamclonespec(bundlespec):
2964 2966 repo.ui.debug(
2965 2967 b'filtering %s because not a stream clone\n'
2966 2968 % entry[b'URL']
2967 2969 )
2968 2970 continue
2969 2971
2970 2972 except error.InvalidBundleSpecification as e:
2971 2973 repo.ui.debug(stringutil.forcebytestr(e) + b'\n')
2972 2974 continue
2973 2975 except error.UnsupportedBundleSpecification as e:
2974 2976 repo.ui.debug(
2975 2977 b'filtering %s because unsupported bundle '
2976 2978 b'spec: %s\n' % (entry[b'URL'], stringutil.forcebytestr(e))
2977 2979 )
2978 2980 continue
2979 2981 # If we don't have a spec and requested a stream clone, we don't know
2980 2982 # what the entry is so don't attempt to apply it.
2981 2983 elif streamclonerequested:
2982 2984 repo.ui.debug(
2983 2985 b'filtering %s because cannot determine if a stream '
2984 2986 b'clone bundle\n' % entry[b'URL']
2985 2987 )
2986 2988 continue
2987 2989
2988 2990 if b'REQUIRESNI' in entry and not sslutil.hassni:
2989 2991 repo.ui.debug(
2990 2992 b'filtering %s because SNI not supported\n' % entry[b'URL']
2991 2993 )
2992 2994 continue
2993 2995
2994 2996 newentries.append(entry)
2995 2997
2996 2998 return newentries
2997 2999
2998 3000
2999 3001 class clonebundleentry(object):
3000 3002 """Represents an item in a clone bundles manifest.
3001 3003
3002 3004 This rich class is needed to support sorting since sorted() in Python 3
3003 3005 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
3004 3006 won't work.
3005 3007 """
3006 3008
3007 3009 def __init__(self, value, prefers):
3008 3010 self.value = value
3009 3011 self.prefers = prefers
3010 3012
3011 3013 def _cmp(self, other):
3012 3014 for prefkey, prefvalue in self.prefers:
3013 3015 avalue = self.value.get(prefkey)
3014 3016 bvalue = other.value.get(prefkey)
3015 3017
3016 3018 # Special case for b missing attribute and a matches exactly.
3017 3019 if avalue is not None and bvalue is None and avalue == prefvalue:
3018 3020 return -1
3019 3021
3020 3022 # Special case for a missing attribute and b matches exactly.
3021 3023 if bvalue is not None and avalue is None and bvalue == prefvalue:
3022 3024 return 1
3023 3025
3024 3026 # We can't compare unless attribute present on both.
3025 3027 if avalue is None or bvalue is None:
3026 3028 continue
3027 3029
3028 3030 # Same values should fall back to next attribute.
3029 3031 if avalue == bvalue:
3030 3032 continue
3031 3033
3032 3034 # Exact matches come first.
3033 3035 if avalue == prefvalue:
3034 3036 return -1
3035 3037 if bvalue == prefvalue:
3036 3038 return 1
3037 3039
3038 3040 # Fall back to next attribute.
3039 3041 continue
3040 3042
3041 3043 # If we got here we couldn't sort by attributes and prefers. Fall
3042 3044 # back to index order.
3043 3045 return 0
3044 3046
3045 3047 def __lt__(self, other):
3046 3048 return self._cmp(other) < 0
3047 3049
3048 3050 def __gt__(self, other):
3049 3051 return self._cmp(other) > 0
3050 3052
3051 3053 def __eq__(self, other):
3052 3054 return self._cmp(other) == 0
3053 3055
3054 3056 def __le__(self, other):
3055 3057 return self._cmp(other) <= 0
3056 3058
3057 3059 def __ge__(self, other):
3058 3060 return self._cmp(other) >= 0
3059 3061
3060 3062 def __ne__(self, other):
3061 3063 return self._cmp(other) != 0
3062 3064
3063 3065
3064 3066 def sortclonebundleentries(ui, entries):
3065 3067 prefers = ui.configlist(b'ui', b'clonebundleprefers')
3066 3068 if not prefers:
3067 3069 return list(entries)
3068 3070
3069 3071 prefers = [p.split(b'=', 1) for p in prefers]
3070 3072
3071 3073 items = sorted(clonebundleentry(v, prefers) for v in entries)
3072 3074 return [i.value for i in items]
3073 3075
3074 3076
3075 3077 def trypullbundlefromurl(ui, repo, url):
3076 3078 """Attempt to apply a bundle from a URL."""
3077 3079 with repo.lock(), repo.transaction(b'bundleurl') as tr:
3078 3080 try:
3079 3081 fh = urlmod.open(ui, url)
3080 3082 cg = readbundle(ui, fh, b'stream')
3081 3083
3082 3084 if isinstance(cg, streamclone.streamcloneapplier):
3083 3085 cg.apply(repo)
3084 3086 else:
3085 3087 bundle2.applybundle(repo, cg, tr, b'clonebundles', url)
3086 3088 return True
3087 3089 except urlerr.httperror as e:
3088 3090 ui.warn(
3089 3091 _(b'HTTP error fetching bundle: %s\n')
3090 3092 % stringutil.forcebytestr(e)
3091 3093 )
3092 3094 except urlerr.urlerror as e:
3093 3095 ui.warn(
3094 3096 _(b'error fetching bundle: %s\n')
3095 3097 % stringutil.forcebytestr(e.reason)
3096 3098 )
3097 3099
3098 3100 return False
@@ -1,1460 +1,1459 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 import hashlib
13 12 import os
14 13 import shutil
15 14 import stat
16 15
17 16 from .i18n import _
18 17 from .node import nullid
19 18 from .pycompat import getattr
20 19
21 20 from . import (
22 21 bookmarks,
23 22 bundlerepo,
24 23 cacheutil,
25 24 cmdutil,
26 25 destutil,
27 26 discovery,
28 27 error,
29 28 exchange,
30 29 extensions,
31 30 httppeer,
32 31 localrepo,
33 32 lock,
34 33 logcmdutil,
35 34 logexchange,
36 35 merge as mergemod,
37 36 narrowspec,
38 37 node,
39 38 phases,
40 39 pycompat,
41 40 scmutil,
42 41 sshpeer,
43 42 statichttprepo,
44 43 ui as uimod,
45 44 unionrepo,
46 45 url,
47 46 util,
48 47 verify as verifymod,
49 48 vfs as vfsmod,
50 49 )
51
50 from .utils import hashutil
52 51 from .interfaces import repository as repositorymod
53 52
54 53 release = lock.release
55 54
56 55 # shared features
57 56 sharedbookmarks = b'bookmarks'
58 57
59 58
60 59 def _local(path):
61 60 path = util.expandpath(util.urllocalpath(path))
62 61
63 62 try:
64 63 isfile = os.path.isfile(path)
65 64 # Python 2 raises TypeError, Python 3 ValueError.
66 65 except (TypeError, ValueError) as e:
67 66 raise error.Abort(
68 67 _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
69 68 )
70 69
71 70 return isfile and bundlerepo or localrepo
72 71
73 72
74 73 def addbranchrevs(lrepo, other, branches, revs):
75 74 peer = other.peer() # a courtesy to callers using a localrepo for other
76 75 hashbranch, branches = branches
77 76 if not hashbranch and not branches:
78 77 x = revs or None
79 78 if revs:
80 79 y = revs[0]
81 80 else:
82 81 y = None
83 82 return x, y
84 83 if revs:
85 84 revs = list(revs)
86 85 else:
87 86 revs = []
88 87
89 88 if not peer.capable(b'branchmap'):
90 89 if branches:
91 90 raise error.Abort(_(b"remote branch lookup not supported"))
92 91 revs.append(hashbranch)
93 92 return revs, revs[0]
94 93
95 94 with peer.commandexecutor() as e:
96 95 branchmap = e.callcommand(b'branchmap', {}).result()
97 96
98 97 def primary(branch):
99 98 if branch == b'.':
100 99 if not lrepo:
101 100 raise error.Abort(_(b"dirstate branch not accessible"))
102 101 branch = lrepo.dirstate.branch()
103 102 if branch in branchmap:
104 103 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
105 104 return True
106 105 else:
107 106 return False
108 107
109 108 for branch in branches:
110 109 if not primary(branch):
111 110 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
112 111 if hashbranch:
113 112 if not primary(hashbranch):
114 113 revs.append(hashbranch)
115 114 return revs, revs[0]
116 115
117 116
118 117 def parseurl(path, branches=None):
119 118 '''parse url#branch, returning (url, (branch, branches))'''
120 119
121 120 u = util.url(path)
122 121 branch = None
123 122 if u.fragment:
124 123 branch = u.fragment
125 124 u.fragment = None
126 125 return bytes(u), (branch, branches or [])
127 126
128 127
129 128 schemes = {
130 129 b'bundle': bundlerepo,
131 130 b'union': unionrepo,
132 131 b'file': _local,
133 132 b'http': httppeer,
134 133 b'https': httppeer,
135 134 b'ssh': sshpeer,
136 135 b'static-http': statichttprepo,
137 136 }
138 137
139 138
140 139 def _peerlookup(path):
141 140 u = util.url(path)
142 141 scheme = u.scheme or b'file'
143 142 thing = schemes.get(scheme) or schemes[b'file']
144 143 try:
145 144 return thing(path)
146 145 except TypeError:
147 146 # we can't test callable(thing) because 'thing' can be an unloaded
148 147 # module that implements __call__
149 148 if not util.safehasattr(thing, b'instance'):
150 149 raise
151 150 return thing
152 151
153 152
154 153 def islocal(repo):
155 154 '''return true if repo (or path pointing to repo) is local'''
156 155 if isinstance(repo, bytes):
157 156 try:
158 157 return _peerlookup(repo).islocal(repo)
159 158 except AttributeError:
160 159 return False
161 160 return repo.local()
162 161
163 162
164 163 def openpath(ui, path, sendaccept=True):
165 164 '''open path with open if local, url.open if remote'''
166 165 pathurl = util.url(path, parsequery=False, parsefragment=False)
167 166 if pathurl.islocal():
168 167 return util.posixfile(pathurl.localpath(), b'rb')
169 168 else:
170 169 return url.open(ui, path, sendaccept=sendaccept)
171 170
172 171
173 172 # a list of (ui, repo) functions called for wire peer initialization
174 173 wirepeersetupfuncs = []
175 174
176 175
177 176 def _peerorrepo(
178 177 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None
179 178 ):
180 179 """return a repository object for the specified path"""
181 180 obj = _peerlookup(path).instance(
182 181 ui, path, create, intents=intents, createopts=createopts
183 182 )
184 183 ui = getattr(obj, "ui", ui)
185 184 for f in presetupfuncs or []:
186 185 f(ui, obj)
187 186 ui.log(b'extension', b'- executing reposetup hooks\n')
188 187 with util.timedcm('all reposetup') as allreposetupstats:
189 188 for name, module in extensions.extensions(ui):
190 189 ui.log(b'extension', b' - running reposetup for %s\n', name)
191 190 hook = getattr(module, 'reposetup', None)
192 191 if hook:
193 192 with util.timedcm('reposetup %r', name) as stats:
194 193 hook(ui, obj)
195 194 ui.log(
196 195 b'extension', b' > reposetup for %s took %s\n', name, stats
197 196 )
198 197 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
199 198 if not obj.local():
200 199 for f in wirepeersetupfuncs:
201 200 f(ui, obj)
202 201 return obj
203 202
204 203
205 204 def repository(
206 205 ui,
207 206 path=b'',
208 207 create=False,
209 208 presetupfuncs=None,
210 209 intents=None,
211 210 createopts=None,
212 211 ):
213 212 """return a repository object for the specified path"""
214 213 peer = _peerorrepo(
215 214 ui,
216 215 path,
217 216 create,
218 217 presetupfuncs=presetupfuncs,
219 218 intents=intents,
220 219 createopts=createopts,
221 220 )
222 221 repo = peer.local()
223 222 if not repo:
224 223 raise error.Abort(
225 224 _(b"repository '%s' is not local") % (path or peer.url())
226 225 )
227 226 return repo.filtered(b'visible')
228 227
229 228
230 229 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
231 230 '''return a repository peer for the specified path'''
232 231 rui = remoteui(uiorrepo, opts)
233 232 return _peerorrepo(
234 233 rui, path, create, intents=intents, createopts=createopts
235 234 ).peer()
236 235
237 236
238 237 def defaultdest(source):
239 238 '''return default destination of clone if none is given
240 239
241 240 >>> defaultdest(b'foo')
242 241 'foo'
243 242 >>> defaultdest(b'/foo/bar')
244 243 'bar'
245 244 >>> defaultdest(b'/')
246 245 ''
247 246 >>> defaultdest(b'')
248 247 ''
249 248 >>> defaultdest(b'http://example.org/')
250 249 ''
251 250 >>> defaultdest(b'http://example.org/foo/')
252 251 'foo'
253 252 '''
254 253 path = util.url(source).path
255 254 if not path:
256 255 return b''
257 256 return os.path.basename(os.path.normpath(path))
258 257
259 258
260 259 def sharedreposource(repo):
261 260 """Returns repository object for source repository of a shared repo.
262 261
263 262 If repo is not a shared repository, returns None.
264 263 """
265 264 if repo.sharedpath == repo.path:
266 265 return None
267 266
268 267 if util.safehasattr(repo, b'srcrepo') and repo.srcrepo:
269 268 return repo.srcrepo
270 269
271 270 # the sharedpath always ends in the .hg; we want the path to the repo
272 271 source = repo.vfs.split(repo.sharedpath)[0]
273 272 srcurl, branches = parseurl(source)
274 273 srcrepo = repository(repo.ui, srcurl)
275 274 repo.srcrepo = srcrepo
276 275 return srcrepo
277 276
278 277
279 278 def share(
280 279 ui,
281 280 source,
282 281 dest=None,
283 282 update=True,
284 283 bookmarks=True,
285 284 defaultpath=None,
286 285 relative=False,
287 286 ):
288 287 '''create a shared repository'''
289 288
290 289 if not islocal(source):
291 290 raise error.Abort(_(b'can only share local repositories'))
292 291
293 292 if not dest:
294 293 dest = defaultdest(source)
295 294 else:
296 295 dest = ui.expandpath(dest)
297 296
298 297 if isinstance(source, bytes):
299 298 origsource = ui.expandpath(source)
300 299 source, branches = parseurl(origsource)
301 300 srcrepo = repository(ui, source)
302 301 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
303 302 else:
304 303 srcrepo = source.local()
305 304 checkout = None
306 305
307 306 shareditems = set()
308 307 if bookmarks:
309 308 shareditems.add(sharedbookmarks)
310 309
311 310 r = repository(
312 311 ui,
313 312 dest,
314 313 create=True,
315 314 createopts={
316 315 b'sharedrepo': srcrepo,
317 316 b'sharedrelative': relative,
318 317 b'shareditems': shareditems,
319 318 },
320 319 )
321 320
322 321 postshare(srcrepo, r, defaultpath=defaultpath)
323 322 r = repository(ui, dest)
324 323 _postshareupdate(r, update, checkout=checkout)
325 324 return r
326 325
327 326
328 327 def unshare(ui, repo):
329 328 """convert a shared repository to a normal one
330 329
331 330 Copy the store data to the repo and remove the sharedpath data.
332 331
333 332 Returns a new repository object representing the unshared repository.
334 333
335 334 The passed repository object is not usable after this function is
336 335 called.
337 336 """
338 337
339 338 with repo.lock():
340 339 # we use locks here because if we race with commit, we
341 340 # can end up with extra data in the cloned revlogs that's
342 341 # not pointed to by changesets, thus causing verify to
343 342 # fail
344 343 destlock = copystore(ui, repo, repo.path)
345 344 with destlock or util.nullcontextmanager():
346 345
347 346 sharefile = repo.vfs.join(b'sharedpath')
348 347 util.rename(sharefile, sharefile + b'.old')
349 348
350 349 repo.requirements.discard(b'shared')
351 350 repo.requirements.discard(b'relshared')
352 351 repo._writerequirements()
353 352
354 353 # Removing share changes some fundamental properties of the repo instance.
355 354 # So we instantiate a new repo object and operate on it rather than
356 355 # try to keep the existing repo usable.
357 356 newrepo = repository(repo.baseui, repo.root, create=False)
358 357
359 358 # TODO: figure out how to access subrepos that exist, but were previously
360 359 # removed from .hgsub
361 360 c = newrepo[b'.']
362 361 subs = c.substate
363 362 for s in sorted(subs):
364 363 c.sub(s).unshare()
365 364
366 365 localrepo.poisonrepository(repo)
367 366
368 367 return newrepo
369 368
370 369
371 370 def postshare(sourcerepo, destrepo, defaultpath=None):
372 371 """Called after a new shared repo is created.
373 372
374 373 The new repo only has a requirements file and pointer to the source.
375 374 This function configures additional shared data.
376 375
377 376 Extensions can wrap this function and write additional entries to
378 377 destrepo/.hg/shared to indicate additional pieces of data to be shared.
379 378 """
380 379 default = defaultpath or sourcerepo.ui.config(b'paths', b'default')
381 380 if default:
382 381 template = b'[paths]\ndefault = %s\n'
383 382 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % default))
384 383 if repositorymod.NARROW_REQUIREMENT in sourcerepo.requirements:
385 384 with destrepo.wlock():
386 385 narrowspec.copytoworkingcopy(destrepo)
387 386
388 387
389 388 def _postshareupdate(repo, update, checkout=None):
390 389 """Maybe perform a working directory update after a shared repo is created.
391 390
392 391 ``update`` can be a boolean or a revision to update to.
393 392 """
394 393 if not update:
395 394 return
396 395
397 396 repo.ui.status(_(b"updating working directory\n"))
398 397 if update is not True:
399 398 checkout = update
400 399 for test in (checkout, b'default', b'tip'):
401 400 if test is None:
402 401 continue
403 402 try:
404 403 uprev = repo.lookup(test)
405 404 break
406 405 except error.RepoLookupError:
407 406 continue
408 407 _update(repo, uprev)
409 408
410 409
411 410 def copystore(ui, srcrepo, destpath):
412 411 '''copy files from store of srcrepo in destpath
413 412
414 413 returns destlock
415 414 '''
416 415 destlock = None
417 416 try:
418 417 hardlink = None
419 418 topic = _(b'linking') if hardlink else _(b'copying')
420 419 with ui.makeprogress(topic, unit=_(b'files')) as progress:
421 420 num = 0
422 421 srcpublishing = srcrepo.publishing()
423 422 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
424 423 dstvfs = vfsmod.vfs(destpath)
425 424 for f in srcrepo.store.copylist():
426 425 if srcpublishing and f.endswith(b'phaseroots'):
427 426 continue
428 427 dstbase = os.path.dirname(f)
429 428 if dstbase and not dstvfs.exists(dstbase):
430 429 dstvfs.mkdir(dstbase)
431 430 if srcvfs.exists(f):
432 431 if f.endswith(b'data'):
433 432 # 'dstbase' may be empty (e.g. revlog format 0)
434 433 lockfile = os.path.join(dstbase, b"lock")
435 434 # lock to avoid premature writing to the target
436 435 destlock = lock.lock(dstvfs, lockfile)
437 436 hardlink, n = util.copyfiles(
438 437 srcvfs.join(f), dstvfs.join(f), hardlink, progress
439 438 )
440 439 num += n
441 440 if hardlink:
442 441 ui.debug(b"linked %d files\n" % num)
443 442 else:
444 443 ui.debug(b"copied %d files\n" % num)
445 444 return destlock
446 445 except: # re-raises
447 446 release(destlock)
448 447 raise
449 448
450 449
451 450 def clonewithshare(
452 451 ui,
453 452 peeropts,
454 453 sharepath,
455 454 source,
456 455 srcpeer,
457 456 dest,
458 457 pull=False,
459 458 rev=None,
460 459 update=True,
461 460 stream=False,
462 461 ):
463 462 """Perform a clone using a shared repo.
464 463
465 464 The store for the repository will be located at <sharepath>/.hg. The
466 465 specified revisions will be cloned or pulled from "source". A shared repo
467 466 will be created at "dest" and a working copy will be created if "update" is
468 467 True.
469 468 """
470 469 revs = None
471 470 if rev:
472 471 if not srcpeer.capable(b'lookup'):
473 472 raise error.Abort(
474 473 _(
475 474 b"src repository does not support "
476 475 b"revision lookup and so doesn't "
477 476 b"support clone by revision"
478 477 )
479 478 )
480 479
481 480 # TODO this is batchable.
482 481 remoterevs = []
483 482 for r in rev:
484 483 with srcpeer.commandexecutor() as e:
485 484 remoterevs.append(
486 485 e.callcommand(b'lookup', {b'key': r,}).result()
487 486 )
488 487 revs = remoterevs
489 488
490 489 # Obtain a lock before checking for or cloning the pooled repo otherwise
491 490 # 2 clients may race creating or populating it.
492 491 pooldir = os.path.dirname(sharepath)
493 492 # lock class requires the directory to exist.
494 493 try:
495 494 util.makedir(pooldir, False)
496 495 except OSError as e:
497 496 if e.errno != errno.EEXIST:
498 497 raise
499 498
500 499 poolvfs = vfsmod.vfs(pooldir)
501 500 basename = os.path.basename(sharepath)
502 501
503 502 with lock.lock(poolvfs, b'%s.lock' % basename):
504 503 if os.path.exists(sharepath):
505 504 ui.status(
506 505 _(b'(sharing from existing pooled repository %s)\n') % basename
507 506 )
508 507 else:
509 508 ui.status(
510 509 _(b'(sharing from new pooled repository %s)\n') % basename
511 510 )
512 511 # Always use pull mode because hardlinks in share mode don't work
513 512 # well. Never update because working copies aren't necessary in
514 513 # share mode.
515 514 clone(
516 515 ui,
517 516 peeropts,
518 517 source,
519 518 dest=sharepath,
520 519 pull=True,
521 520 revs=rev,
522 521 update=False,
523 522 stream=stream,
524 523 )
525 524
526 525 # Resolve the value to put in [paths] section for the source.
527 526 if islocal(source):
528 527 defaultpath = os.path.abspath(util.urllocalpath(source))
529 528 else:
530 529 defaultpath = source
531 530
532 531 sharerepo = repository(ui, path=sharepath)
533 532 destrepo = share(
534 533 ui,
535 534 sharerepo,
536 535 dest=dest,
537 536 update=False,
538 537 bookmarks=False,
539 538 defaultpath=defaultpath,
540 539 )
541 540
542 541 # We need to perform a pull against the dest repo to fetch bookmarks
543 542 # and other non-store data that isn't shared by default. In the case of
544 543 # non-existing shared repo, this means we pull from the remote twice. This
545 544 # is a bit weird. But at the time it was implemented, there wasn't an easy
546 545 # way to pull just non-changegroup data.
547 546 exchange.pull(destrepo, srcpeer, heads=revs)
548 547
549 548 _postshareupdate(destrepo, update)
550 549
551 550 return srcpeer, peer(ui, peeropts, dest)
552 551
553 552
554 553 # Recomputing branch cache might be slow on big repos,
555 554 # so just copy it
556 555 def _copycache(srcrepo, dstcachedir, fname):
557 556 """copy a cache from srcrepo to destcachedir (if it exists)"""
558 557 srcbranchcache = srcrepo.vfs.join(b'cache/%s' % fname)
559 558 dstbranchcache = os.path.join(dstcachedir, fname)
560 559 if os.path.exists(srcbranchcache):
561 560 if not os.path.exists(dstcachedir):
562 561 os.mkdir(dstcachedir)
563 562 util.copyfile(srcbranchcache, dstbranchcache)
564 563
565 564
566 565 def clone(
567 566 ui,
568 567 peeropts,
569 568 source,
570 569 dest=None,
571 570 pull=False,
572 571 revs=None,
573 572 update=True,
574 573 stream=False,
575 574 branch=None,
576 575 shareopts=None,
577 576 storeincludepats=None,
578 577 storeexcludepats=None,
579 578 depth=None,
580 579 ):
581 580 """Make a copy of an existing repository.
582 581
583 582 Create a copy of an existing repository in a new directory. The
584 583 source and destination are URLs, as passed to the repository
585 584 function. Returns a pair of repository peers, the source and
586 585 newly created destination.
587 586
588 587 The location of the source is added to the new repository's
589 588 .hg/hgrc file, as the default to be used for future pulls and
590 589 pushes.
591 590
592 591 If an exception is raised, the partly cloned/updated destination
593 592 repository will be deleted.
594 593
595 594 Arguments:
596 595
597 596 source: repository object or URL
598 597
599 598 dest: URL of destination repository to create (defaults to base
600 599 name of source repository)
601 600
602 601 pull: always pull from source repository, even in local case or if the
603 602 server prefers streaming
604 603
605 604 stream: stream raw data uncompressed from repository (fast over
606 605 LAN, slow over WAN)
607 606
608 607 revs: revision to clone up to (implies pull=True)
609 608
610 609 update: update working directory after clone completes, if
611 610 destination is local repository (True means update to default rev,
612 611 anything else is treated as a revision)
613 612
614 613 branch: branches to clone
615 614
616 615 shareopts: dict of options to control auto sharing behavior. The "pool" key
617 616 activates auto sharing mode and defines the directory for stores. The
618 617 "mode" key determines how to construct the directory name of the shared
619 618 repository. "identity" means the name is derived from the node of the first
620 619 changeset in the repository. "remote" means the name is derived from the
621 620 remote's path/URL. Defaults to "identity."
622 621
623 622 storeincludepats and storeexcludepats: sets of file patterns to include and
624 623 exclude in the repository copy, respectively. If not defined, all files
625 624 will be included (a "full" clone). Otherwise a "narrow" clone containing
626 625 only the requested files will be performed. If ``storeincludepats`` is not
627 626 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
628 627 ``path:.``. If both are empty sets, no files will be cloned.
629 628 """
630 629
631 630 if isinstance(source, bytes):
632 631 origsource = ui.expandpath(source)
633 632 source, branches = parseurl(origsource, branch)
634 633 srcpeer = peer(ui, peeropts, source)
635 634 else:
636 635 srcpeer = source.peer() # in case we were called with a localrepo
637 636 branches = (None, branch or [])
638 637 origsource = source = srcpeer.url()
639 638 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
640 639
641 640 if dest is None:
642 641 dest = defaultdest(source)
643 642 if dest:
644 643 ui.status(_(b"destination directory: %s\n") % dest)
645 644 else:
646 645 dest = ui.expandpath(dest)
647 646
648 647 dest = util.urllocalpath(dest)
649 648 source = util.urllocalpath(source)
650 649
651 650 if not dest:
652 651 raise error.Abort(_(b"empty destination path is not valid"))
653 652
654 653 destvfs = vfsmod.vfs(dest, expandpath=True)
655 654 if destvfs.lexists():
656 655 if not destvfs.isdir():
657 656 raise error.Abort(_(b"destination '%s' already exists") % dest)
658 657 elif destvfs.listdir():
659 658 raise error.Abort(_(b"destination '%s' is not empty") % dest)
660 659
661 660 createopts = {}
662 661 narrow = False
663 662
664 663 if storeincludepats is not None:
665 664 narrowspec.validatepatterns(storeincludepats)
666 665 narrow = True
667 666
668 667 if storeexcludepats is not None:
669 668 narrowspec.validatepatterns(storeexcludepats)
670 669 narrow = True
671 670
672 671 if narrow:
673 672 # Include everything by default if only exclusion patterns defined.
674 673 if storeexcludepats and not storeincludepats:
675 674 storeincludepats = {b'path:.'}
676 675
677 676 createopts[b'narrowfiles'] = True
678 677
679 678 if depth:
680 679 createopts[b'shallowfilestore'] = True
681 680
682 681 if srcpeer.capable(b'lfs-serve'):
683 682 # Repository creation honors the config if it disabled the extension, so
684 683 # we can't just announce that lfs will be enabled. This check avoids
685 684 # saying that lfs will be enabled, and then saying it's an unknown
686 685 # feature. The lfs creation option is set in either case so that a
687 686 # requirement is added. If the extension is explicitly disabled but the
688 687 # requirement is set, the clone aborts early, before transferring any
689 688 # data.
690 689 createopts[b'lfs'] = True
691 690
692 691 if extensions.disabledext(b'lfs'):
693 692 ui.status(
694 693 _(
695 694 b'(remote is using large file support (lfs), but it is '
696 695 b'explicitly disabled in the local configuration)\n'
697 696 )
698 697 )
699 698 else:
700 699 ui.status(
701 700 _(
702 701 b'(remote is using large file support (lfs); lfs will '
703 702 b'be enabled for this repository)\n'
704 703 )
705 704 )
706 705
707 706 shareopts = shareopts or {}
708 707 sharepool = shareopts.get(b'pool')
709 708 sharenamemode = shareopts.get(b'mode')
710 709 if sharepool and islocal(dest):
711 710 sharepath = None
712 711 if sharenamemode == b'identity':
713 712 # Resolve the name from the initial changeset in the remote
714 713 # repository. This returns nullid when the remote is empty. It
715 714 # raises RepoLookupError if revision 0 is filtered or otherwise
716 715 # not available. If we fail to resolve, sharing is not enabled.
717 716 try:
718 717 with srcpeer.commandexecutor() as e:
719 718 rootnode = e.callcommand(
720 719 b'lookup', {b'key': b'0',}
721 720 ).result()
722 721
723 722 if rootnode != node.nullid:
724 723 sharepath = os.path.join(sharepool, node.hex(rootnode))
725 724 else:
726 725 ui.status(
727 726 _(
728 727 b'(not using pooled storage: '
729 728 b'remote appears to be empty)\n'
730 729 )
731 730 )
732 731 except error.RepoLookupError:
733 732 ui.status(
734 733 _(
735 734 b'(not using pooled storage: '
736 735 b'unable to resolve identity of remote)\n'
737 736 )
738 737 )
739 738 elif sharenamemode == b'remote':
740 739 sharepath = os.path.join(
741 sharepool, node.hex(hashlib.sha1(source).digest())
740 sharepool, node.hex(hashutil.sha1(source).digest())
742 741 )
743 742 else:
744 743 raise error.Abort(
745 744 _(b'unknown share naming mode: %s') % sharenamemode
746 745 )
747 746
748 747 # TODO this is a somewhat arbitrary restriction.
749 748 if narrow:
750 749 ui.status(_(b'(pooled storage not supported for narrow clones)\n'))
751 750 sharepath = None
752 751
753 752 if sharepath:
754 753 return clonewithshare(
755 754 ui,
756 755 peeropts,
757 756 sharepath,
758 757 source,
759 758 srcpeer,
760 759 dest,
761 760 pull=pull,
762 761 rev=revs,
763 762 update=update,
764 763 stream=stream,
765 764 )
766 765
767 766 srclock = destlock = cleandir = None
768 767 srcrepo = srcpeer.local()
769 768 try:
770 769 abspath = origsource
771 770 if islocal(origsource):
772 771 abspath = os.path.abspath(util.urllocalpath(origsource))
773 772
774 773 if islocal(dest):
775 774 cleandir = dest
776 775
777 776 copy = False
778 777 if (
779 778 srcrepo
780 779 and srcrepo.cancopy()
781 780 and islocal(dest)
782 781 and not phases.hassecret(srcrepo)
783 782 ):
784 783 copy = not pull and not revs
785 784
786 785 # TODO this is a somewhat arbitrary restriction.
787 786 if narrow:
788 787 copy = False
789 788
790 789 if copy:
791 790 try:
792 791 # we use a lock here because if we race with commit, we
793 792 # can end up with extra data in the cloned revlogs that's
794 793 # not pointed to by changesets, thus causing verify to
795 794 # fail
796 795 srclock = srcrepo.lock(wait=False)
797 796 except error.LockError:
798 797 copy = False
799 798
800 799 if copy:
801 800 srcrepo.hook(b'preoutgoing', throw=True, source=b'clone')
802 801 hgdir = os.path.realpath(os.path.join(dest, b".hg"))
803 802 if not os.path.exists(dest):
804 803 util.makedirs(dest)
805 804 else:
806 805 # only clean up directories we create ourselves
807 806 cleandir = hgdir
808 807 try:
809 808 destpath = hgdir
810 809 util.makedir(destpath, notindexed=True)
811 810 except OSError as inst:
812 811 if inst.errno == errno.EEXIST:
813 812 cleandir = None
814 813 raise error.Abort(
815 814 _(b"destination '%s' already exists") % dest
816 815 )
817 816 raise
818 817
819 818 destlock = copystore(ui, srcrepo, destpath)
820 819 # copy bookmarks over
821 820 srcbookmarks = srcrepo.vfs.join(b'bookmarks')
822 821 dstbookmarks = os.path.join(destpath, b'bookmarks')
823 822 if os.path.exists(srcbookmarks):
824 823 util.copyfile(srcbookmarks, dstbookmarks)
825 824
826 825 dstcachedir = os.path.join(destpath, b'cache')
827 826 for cache in cacheutil.cachetocopy(srcrepo):
828 827 _copycache(srcrepo, dstcachedir, cache)
829 828
830 829 # we need to re-init the repo after manually copying the data
831 830 # into it
832 831 destpeer = peer(srcrepo, peeropts, dest)
833 832 srcrepo.hook(
834 833 b'outgoing', source=b'clone', node=node.hex(node.nullid)
835 834 )
836 835 else:
837 836 try:
838 837 # only pass ui when no srcrepo
839 838 destpeer = peer(
840 839 srcrepo or ui,
841 840 peeropts,
842 841 dest,
843 842 create=True,
844 843 createopts=createopts,
845 844 )
846 845 except OSError as inst:
847 846 if inst.errno == errno.EEXIST:
848 847 cleandir = None
849 848 raise error.Abort(
850 849 _(b"destination '%s' already exists") % dest
851 850 )
852 851 raise
853 852
854 853 if revs:
855 854 if not srcpeer.capable(b'lookup'):
856 855 raise error.Abort(
857 856 _(
858 857 b"src repository does not support "
859 858 b"revision lookup and so doesn't "
860 859 b"support clone by revision"
861 860 )
862 861 )
863 862
864 863 # TODO this is batchable.
865 864 remoterevs = []
866 865 for rev in revs:
867 866 with srcpeer.commandexecutor() as e:
868 867 remoterevs.append(
869 868 e.callcommand(b'lookup', {b'key': rev,}).result()
870 869 )
871 870 revs = remoterevs
872 871
873 872 checkout = revs[0]
874 873 else:
875 874 revs = None
876 875 local = destpeer.local()
877 876 if local:
878 877 if narrow:
879 878 with local.wlock(), local.lock():
880 879 local.setnarrowpats(storeincludepats, storeexcludepats)
881 880 narrowspec.copytoworkingcopy(local)
882 881
883 882 u = util.url(abspath)
884 883 defaulturl = bytes(u)
885 884 local.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
886 885 if not stream:
887 886 if pull:
888 887 stream = False
889 888 else:
890 889 stream = None
891 890 # internal config: ui.quietbookmarkmove
892 891 overrides = {(b'ui', b'quietbookmarkmove'): True}
893 892 with local.ui.configoverride(overrides, b'clone'):
894 893 exchange.pull(
895 894 local,
896 895 srcpeer,
897 896 revs,
898 897 streamclonerequested=stream,
899 898 includepats=storeincludepats,
900 899 excludepats=storeexcludepats,
901 900 depth=depth,
902 901 )
903 902 elif srcrepo:
904 903 # TODO lift restriction once exchange.push() accepts narrow
905 904 # push.
906 905 if narrow:
907 906 raise error.Abort(
908 907 _(
909 908 b'narrow clone not available for '
910 909 b'remote destinations'
911 910 )
912 911 )
913 912
914 913 exchange.push(
915 914 srcrepo,
916 915 destpeer,
917 916 revs=revs,
918 917 bookmarks=srcrepo._bookmarks.keys(),
919 918 )
920 919 else:
921 920 raise error.Abort(
922 921 _(b"clone from remote to remote not supported")
923 922 )
924 923
925 924 cleandir = None
926 925
927 926 destrepo = destpeer.local()
928 927 if destrepo:
929 928 template = uimod.samplehgrcs[b'cloned']
930 929 u = util.url(abspath)
931 930 u.passwd = None
932 931 defaulturl = bytes(u)
933 932 destrepo.vfs.write(b'hgrc', util.tonativeeol(template % defaulturl))
934 933 destrepo.ui.setconfig(b'paths', b'default', defaulturl, b'clone')
935 934
936 935 if ui.configbool(b'experimental', b'remotenames'):
937 936 logexchange.pullremotenames(destrepo, srcpeer)
938 937
939 938 if update:
940 939 if update is not True:
941 940 with srcpeer.commandexecutor() as e:
942 941 checkout = e.callcommand(
943 942 b'lookup', {b'key': update,}
944 943 ).result()
945 944
946 945 uprev = None
947 946 status = None
948 947 if checkout is not None:
949 948 # Some extensions (at least hg-git and hg-subversion) have
950 949 # a peer.lookup() implementation that returns a name instead
951 950 # of a nodeid. We work around it here until we've figured
952 951 # out a better solution.
953 952 if len(checkout) == 20 and checkout in destrepo:
954 953 uprev = checkout
955 954 elif scmutil.isrevsymbol(destrepo, checkout):
956 955 uprev = scmutil.revsymbol(destrepo, checkout).node()
957 956 else:
958 957 if update is not True:
959 958 try:
960 959 uprev = destrepo.lookup(update)
961 960 except error.RepoLookupError:
962 961 pass
963 962 if uprev is None:
964 963 try:
965 964 uprev = destrepo._bookmarks[b'@']
966 965 update = b'@'
967 966 bn = destrepo[uprev].branch()
968 967 if bn == b'default':
969 968 status = _(b"updating to bookmark @\n")
970 969 else:
971 970 status = (
972 971 _(b"updating to bookmark @ on branch %s\n") % bn
973 972 )
974 973 except KeyError:
975 974 try:
976 975 uprev = destrepo.branchtip(b'default')
977 976 except error.RepoLookupError:
978 977 uprev = destrepo.lookup(b'tip')
979 978 if not status:
980 979 bn = destrepo[uprev].branch()
981 980 status = _(b"updating to branch %s\n") % bn
982 981 destrepo.ui.status(status)
983 982 _update(destrepo, uprev)
984 983 if update in destrepo._bookmarks:
985 984 bookmarks.activate(destrepo, update)
986 985 finally:
987 986 release(srclock, destlock)
988 987 if cleandir is not None:
989 988 shutil.rmtree(cleandir, True)
990 989 if srcpeer is not None:
991 990 srcpeer.close()
992 991 return srcpeer, destpeer
993 992
994 993
995 994 def _showstats(repo, stats, quietempty=False):
996 995 if quietempty and stats.isempty():
997 996 return
998 997 repo.ui.status(
999 998 _(
1000 999 b"%d files updated, %d files merged, "
1001 1000 b"%d files removed, %d files unresolved\n"
1002 1001 )
1003 1002 % (
1004 1003 stats.updatedcount,
1005 1004 stats.mergedcount,
1006 1005 stats.removedcount,
1007 1006 stats.unresolvedcount,
1008 1007 )
1009 1008 )
1010 1009
1011 1010
1012 1011 def updaterepo(repo, node, overwrite, updatecheck=None):
1013 1012 """Update the working directory to node.
1014 1013
1015 1014 When overwrite is set, changes are clobbered, merged else
1016 1015
1017 1016 returns stats (see pydoc mercurial.merge.applyupdates)"""
1018 1017 return mergemod.update(
1019 1018 repo,
1020 1019 node,
1021 1020 branchmerge=False,
1022 1021 force=overwrite,
1023 1022 labels=[b'working copy', b'destination'],
1024 1023 updatecheck=updatecheck,
1025 1024 )
1026 1025
1027 1026
1028 1027 def update(repo, node, quietempty=False, updatecheck=None):
1029 1028 """update the working directory to node"""
1030 1029 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
1031 1030 _showstats(repo, stats, quietempty)
1032 1031 if stats.unresolvedcount:
1033 1032 repo.ui.status(_(b"use 'hg resolve' to retry unresolved file merges\n"))
1034 1033 return stats.unresolvedcount > 0
1035 1034
1036 1035
1037 1036 # naming conflict in clone()
1038 1037 _update = update
1039 1038
1040 1039
1041 1040 def clean(repo, node, show_stats=True, quietempty=False):
1042 1041 """forcibly switch the working directory to node, clobbering changes"""
1043 1042 stats = updaterepo(repo, node, True)
1044 1043 repo.vfs.unlinkpath(b'graftstate', ignoremissing=True)
1045 1044 if show_stats:
1046 1045 _showstats(repo, stats, quietempty)
1047 1046 return stats.unresolvedcount > 0
1048 1047
1049 1048
1050 1049 # naming conflict in updatetotally()
1051 1050 _clean = clean
1052 1051
1053 1052 _VALID_UPDATECHECKS = {
1054 1053 mergemod.UPDATECHECK_ABORT,
1055 1054 mergemod.UPDATECHECK_NONE,
1056 1055 mergemod.UPDATECHECK_LINEAR,
1057 1056 mergemod.UPDATECHECK_NO_CONFLICT,
1058 1057 }
1059 1058
1060 1059
1061 1060 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
1062 1061 """Update the working directory with extra care for non-file components
1063 1062
1064 1063 This takes care of non-file components below:
1065 1064
1066 1065 :bookmark: might be advanced or (in)activated
1067 1066
1068 1067 This takes arguments below:
1069 1068
1070 1069 :checkout: to which revision the working directory is updated
1071 1070 :brev: a name, which might be a bookmark to be activated after updating
1072 1071 :clean: whether changes in the working directory can be discarded
1073 1072 :updatecheck: how to deal with a dirty working directory
1074 1073
1075 1074 Valid values for updatecheck are the UPDATECHECK_* constants
1076 1075 defined in the merge module. Passing `None` will result in using the
1077 1076 configured default.
1078 1077
1079 1078 * ABORT: abort if the working directory is dirty
1080 1079 * NONE: don't check (merge working directory changes into destination)
1081 1080 * LINEAR: check that update is linear before merging working directory
1082 1081 changes into destination
1083 1082 * NO_CONFLICT: check that the update does not result in file merges
1084 1083
1085 1084 This returns whether conflict is detected at updating or not.
1086 1085 """
1087 1086 if updatecheck is None:
1088 1087 updatecheck = ui.config(b'commands', b'update.check')
1089 1088 if updatecheck not in _VALID_UPDATECHECKS:
1090 1089 # If not configured, or invalid value configured
1091 1090 updatecheck = mergemod.UPDATECHECK_LINEAR
1092 1091 if updatecheck not in _VALID_UPDATECHECKS:
1093 1092 raise ValueError(
1094 1093 r'Invalid updatecheck value %r (can accept %r)'
1095 1094 % (updatecheck, _VALID_UPDATECHECKS)
1096 1095 )
1097 1096 with repo.wlock():
1098 1097 movemarkfrom = None
1099 1098 warndest = False
1100 1099 if checkout is None:
1101 1100 updata = destutil.destupdate(repo, clean=clean)
1102 1101 checkout, movemarkfrom, brev = updata
1103 1102 warndest = True
1104 1103
1105 1104 if clean:
1106 1105 ret = _clean(repo, checkout)
1107 1106 else:
1108 1107 if updatecheck == mergemod.UPDATECHECK_ABORT:
1109 1108 cmdutil.bailifchanged(repo, merge=False)
1110 1109 updatecheck = mergemod.UPDATECHECK_NONE
1111 1110 ret = _update(repo, checkout, updatecheck=updatecheck)
1112 1111
1113 1112 if not ret and movemarkfrom:
1114 1113 if movemarkfrom == repo[b'.'].node():
1115 1114 pass # no-op update
1116 1115 elif bookmarks.update(repo, [movemarkfrom], repo[b'.'].node()):
1117 1116 b = ui.label(repo._activebookmark, b'bookmarks.active')
1118 1117 ui.status(_(b"updating bookmark %s\n") % b)
1119 1118 else:
1120 1119 # this can happen with a non-linear update
1121 1120 b = ui.label(repo._activebookmark, b'bookmarks')
1122 1121 ui.status(_(b"(leaving bookmark %s)\n") % b)
1123 1122 bookmarks.deactivate(repo)
1124 1123 elif brev in repo._bookmarks:
1125 1124 if brev != repo._activebookmark:
1126 1125 b = ui.label(brev, b'bookmarks.active')
1127 1126 ui.status(_(b"(activating bookmark %s)\n") % b)
1128 1127 bookmarks.activate(repo, brev)
1129 1128 elif brev:
1130 1129 if repo._activebookmark:
1131 1130 b = ui.label(repo._activebookmark, b'bookmarks')
1132 1131 ui.status(_(b"(leaving bookmark %s)\n") % b)
1133 1132 bookmarks.deactivate(repo)
1134 1133
1135 1134 if warndest:
1136 1135 destutil.statusotherdests(ui, repo)
1137 1136
1138 1137 return ret
1139 1138
1140 1139
1141 1140 def merge(
1142 1141 repo,
1143 1142 node,
1144 1143 force=None,
1145 1144 remind=True,
1146 1145 mergeforce=False,
1147 1146 labels=None,
1148 1147 abort=False,
1149 1148 ):
1150 1149 """Branch merge with node, resolving changes. Return true if any
1151 1150 unresolved conflicts."""
1152 1151 if abort:
1153 1152 return abortmerge(repo.ui, repo)
1154 1153
1155 1154 stats = mergemod.update(
1156 1155 repo,
1157 1156 node,
1158 1157 branchmerge=True,
1159 1158 force=force,
1160 1159 mergeforce=mergeforce,
1161 1160 labels=labels,
1162 1161 )
1163 1162 _showstats(repo, stats)
1164 1163 if stats.unresolvedcount:
1165 1164 repo.ui.status(
1166 1165 _(
1167 1166 b"use 'hg resolve' to retry unresolved file merges "
1168 1167 b"or 'hg merge --abort' to abandon\n"
1169 1168 )
1170 1169 )
1171 1170 elif remind:
1172 1171 repo.ui.status(_(b"(branch merge, don't forget to commit)\n"))
1173 1172 return stats.unresolvedcount > 0
1174 1173
1175 1174
1176 1175 def abortmerge(ui, repo):
1177 1176 ms = mergemod.mergestate.read(repo)
1178 1177 if ms.active():
1179 1178 # there were conflicts
1180 1179 node = ms.localctx.hex()
1181 1180 else:
1182 1181 # there were no conficts, mergestate was not stored
1183 1182 node = repo[b'.'].hex()
1184 1183
1185 1184 repo.ui.status(_(b"aborting the merge, updating back to %s\n") % node[:12])
1186 1185 stats = mergemod.update(repo, node, branchmerge=False, force=True)
1187 1186 _showstats(repo, stats)
1188 1187 return stats.unresolvedcount > 0
1189 1188
1190 1189
1191 1190 def _incoming(
1192 1191 displaychlist, subreporecurse, ui, repo, source, opts, buffered=False
1193 1192 ):
1194 1193 """
1195 1194 Helper for incoming / gincoming.
1196 1195 displaychlist gets called with
1197 1196 (remoterepo, incomingchangesetlist, displayer) parameters,
1198 1197 and is supposed to contain only code that can't be unified.
1199 1198 """
1200 1199 source, branches = parseurl(ui.expandpath(source), opts.get(b'branch'))
1201 1200 other = peer(repo, opts, source)
1202 1201 ui.status(_(b'comparing with %s\n') % util.hidepassword(source))
1203 1202 revs, checkout = addbranchrevs(repo, other, branches, opts.get(b'rev'))
1204 1203
1205 1204 if revs:
1206 1205 revs = [other.lookup(rev) for rev in revs]
1207 1206 other, chlist, cleanupfn = bundlerepo.getremotechanges(
1208 1207 ui, repo, other, revs, opts[b"bundle"], opts[b"force"]
1209 1208 )
1210 1209 try:
1211 1210 if not chlist:
1212 1211 ui.status(_(b"no changes found\n"))
1213 1212 return subreporecurse()
1214 1213 ui.pager(b'incoming')
1215 1214 displayer = logcmdutil.changesetdisplayer(
1216 1215 ui, other, opts, buffered=buffered
1217 1216 )
1218 1217 displaychlist(other, chlist, displayer)
1219 1218 displayer.close()
1220 1219 finally:
1221 1220 cleanupfn()
1222 1221 subreporecurse()
1223 1222 return 0 # exit code is zero since we found incoming changes
1224 1223
1225 1224
1226 1225 def incoming(ui, repo, source, opts):
1227 1226 def subreporecurse():
1228 1227 ret = 1
1229 1228 if opts.get(b'subrepos'):
1230 1229 ctx = repo[None]
1231 1230 for subpath in sorted(ctx.substate):
1232 1231 sub = ctx.sub(subpath)
1233 1232 ret = min(ret, sub.incoming(ui, source, opts))
1234 1233 return ret
1235 1234
1236 1235 def display(other, chlist, displayer):
1237 1236 limit = logcmdutil.getlimit(opts)
1238 1237 if opts.get(b'newest_first'):
1239 1238 chlist.reverse()
1240 1239 count = 0
1241 1240 for n in chlist:
1242 1241 if limit is not None and count >= limit:
1243 1242 break
1244 1243 parents = [p for p in other.changelog.parents(n) if p != nullid]
1245 1244 if opts.get(b'no_merges') and len(parents) == 2:
1246 1245 continue
1247 1246 count += 1
1248 1247 displayer.show(other[n])
1249 1248
1250 1249 return _incoming(display, subreporecurse, ui, repo, source, opts)
1251 1250
1252 1251
1253 1252 def _outgoing(ui, repo, dest, opts):
1254 1253 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1255 1254 if not path:
1256 1255 raise error.Abort(
1257 1256 _(b'default repository not configured!'),
1258 1257 hint=_(b"see 'hg help config.paths'"),
1259 1258 )
1260 1259 dest = path.pushloc or path.loc
1261 1260 branches = path.branch, opts.get(b'branch') or []
1262 1261
1263 1262 ui.status(_(b'comparing with %s\n') % util.hidepassword(dest))
1264 1263 revs, checkout = addbranchrevs(repo, repo, branches, opts.get(b'rev'))
1265 1264 if revs:
1266 1265 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1267 1266
1268 1267 other = peer(repo, opts, dest)
1269 1268 outgoing = discovery.findcommonoutgoing(
1270 1269 repo, other, revs, force=opts.get(b'force')
1271 1270 )
1272 1271 o = outgoing.missing
1273 1272 if not o:
1274 1273 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1275 1274 return o, other
1276 1275
1277 1276
1278 1277 def outgoing(ui, repo, dest, opts):
1279 1278 def recurse():
1280 1279 ret = 1
1281 1280 if opts.get(b'subrepos'):
1282 1281 ctx = repo[None]
1283 1282 for subpath in sorted(ctx.substate):
1284 1283 sub = ctx.sub(subpath)
1285 1284 ret = min(ret, sub.outgoing(ui, dest, opts))
1286 1285 return ret
1287 1286
1288 1287 limit = logcmdutil.getlimit(opts)
1289 1288 o, other = _outgoing(ui, repo, dest, opts)
1290 1289 if not o:
1291 1290 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1292 1291 return recurse()
1293 1292
1294 1293 if opts.get(b'newest_first'):
1295 1294 o.reverse()
1296 1295 ui.pager(b'outgoing')
1297 1296 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1298 1297 count = 0
1299 1298 for n in o:
1300 1299 if limit is not None and count >= limit:
1301 1300 break
1302 1301 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1303 1302 if opts.get(b'no_merges') and len(parents) == 2:
1304 1303 continue
1305 1304 count += 1
1306 1305 displayer.show(repo[n])
1307 1306 displayer.close()
1308 1307 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1309 1308 recurse()
1310 1309 return 0 # exit code is zero since we found outgoing changes
1311 1310
1312 1311
1313 1312 def verify(repo, level=None):
1314 1313 """verify the consistency of a repository"""
1315 1314 ret = verifymod.verify(repo, level=level)
1316 1315
1317 1316 # Broken subrepo references in hidden csets don't seem worth worrying about,
1318 1317 # since they can't be pushed/pulled, and --hidden can be used if they are a
1319 1318 # concern.
1320 1319
1321 1320 # pathto() is needed for -R case
1322 1321 revs = repo.revs(
1323 1322 b"filelog(%s)", util.pathto(repo.root, repo.getcwd(), b'.hgsubstate')
1324 1323 )
1325 1324
1326 1325 if revs:
1327 1326 repo.ui.status(_(b'checking subrepo links\n'))
1328 1327 for rev in revs:
1329 1328 ctx = repo[rev]
1330 1329 try:
1331 1330 for subpath in ctx.substate:
1332 1331 try:
1333 1332 ret = (
1334 1333 ctx.sub(subpath, allowcreate=False).verify() or ret
1335 1334 )
1336 1335 except error.RepoError as e:
1337 1336 repo.ui.warn(b'%d: %s\n' % (rev, e))
1338 1337 except Exception:
1339 1338 repo.ui.warn(
1340 1339 _(b'.hgsubstate is corrupt in revision %s\n')
1341 1340 % node.short(ctx.node())
1342 1341 )
1343 1342
1344 1343 return ret
1345 1344
1346 1345
1347 1346 def remoteui(src, opts):
1348 1347 """build a remote ui from ui or repo and opts"""
1349 1348 if util.safehasattr(src, b'baseui'): # looks like a repository
1350 1349 dst = src.baseui.copy() # drop repo-specific config
1351 1350 src = src.ui # copy target options from repo
1352 1351 else: # assume it's a global ui object
1353 1352 dst = src.copy() # keep all global options
1354 1353
1355 1354 # copy ssh-specific options
1356 1355 for o in b'ssh', b'remotecmd':
1357 1356 v = opts.get(o) or src.config(b'ui', o)
1358 1357 if v:
1359 1358 dst.setconfig(b"ui", o, v, b'copied')
1360 1359
1361 1360 # copy bundle-specific options
1362 1361 r = src.config(b'bundle', b'mainreporoot')
1363 1362 if r:
1364 1363 dst.setconfig(b'bundle', b'mainreporoot', r, b'copied')
1365 1364
1366 1365 # copy selected local settings to the remote ui
1367 1366 for sect in (b'auth', b'hostfingerprints', b'hostsecurity', b'http_proxy'):
1368 1367 for key, val in src.configitems(sect):
1369 1368 dst.setconfig(sect, key, val, b'copied')
1370 1369 v = src.config(b'web', b'cacerts')
1371 1370 if v:
1372 1371 dst.setconfig(b'web', b'cacerts', util.expandpath(v), b'copied')
1373 1372
1374 1373 return dst
1375 1374
1376 1375
1377 1376 # Files of interest
1378 1377 # Used to check if the repository has changed looking at mtime and size of
1379 1378 # these files.
1380 1379 foi = [
1381 1380 (b'spath', b'00changelog.i'),
1382 1381 (b'spath', b'phaseroots'), # ! phase can change content at the same size
1383 1382 (b'spath', b'obsstore'),
1384 1383 (b'path', b'bookmarks'), # ! bookmark can change content at the same size
1385 1384 ]
1386 1385
1387 1386
1388 1387 class cachedlocalrepo(object):
1389 1388 """Holds a localrepository that can be cached and reused."""
1390 1389
1391 1390 def __init__(self, repo):
1392 1391 """Create a new cached repo from an existing repo.
1393 1392
1394 1393 We assume the passed in repo was recently created. If the
1395 1394 repo has changed between when it was created and when it was
1396 1395 turned into a cache, it may not refresh properly.
1397 1396 """
1398 1397 assert isinstance(repo, localrepo.localrepository)
1399 1398 self._repo = repo
1400 1399 self._state, self.mtime = self._repostate()
1401 1400 self._filtername = repo.filtername
1402 1401
1403 1402 def fetch(self):
1404 1403 """Refresh (if necessary) and return a repository.
1405 1404
1406 1405 If the cached instance is out of date, it will be recreated
1407 1406 automatically and returned.
1408 1407
1409 1408 Returns a tuple of the repo and a boolean indicating whether a new
1410 1409 repo instance was created.
1411 1410 """
1412 1411 # We compare the mtimes and sizes of some well-known files to
1413 1412 # determine if the repo changed. This is not precise, as mtimes
1414 1413 # are susceptible to clock skew and imprecise filesystems and
1415 1414 # file content can change while maintaining the same size.
1416 1415
1417 1416 state, mtime = self._repostate()
1418 1417 if state == self._state:
1419 1418 return self._repo, False
1420 1419
1421 1420 repo = repository(self._repo.baseui, self._repo.url())
1422 1421 if self._filtername:
1423 1422 self._repo = repo.filtered(self._filtername)
1424 1423 else:
1425 1424 self._repo = repo.unfiltered()
1426 1425 self._state = state
1427 1426 self.mtime = mtime
1428 1427
1429 1428 return self._repo, True
1430 1429
1431 1430 def _repostate(self):
1432 1431 state = []
1433 1432 maxmtime = -1
1434 1433 for attr, fname in foi:
1435 1434 prefix = getattr(self._repo, attr)
1436 1435 p = os.path.join(prefix, fname)
1437 1436 try:
1438 1437 st = os.stat(p)
1439 1438 except OSError:
1440 1439 st = os.stat(prefix)
1441 1440 state.append((st[stat.ST_MTIME], st.st_size))
1442 1441 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1443 1442
1444 1443 return tuple(state), maxmtime
1445 1444
1446 1445 def copy(self):
1447 1446 """Obtain a copy of this class instance.
1448 1447
1449 1448 A new localrepository instance is obtained. The new instance should be
1450 1449 completely independent of the original.
1451 1450 """
1452 1451 repo = repository(self._repo.baseui, self._repo.origroot)
1453 1452 if self._filtername:
1454 1453 repo = repo.filtered(self._filtername)
1455 1454 else:
1456 1455 repo = repo.unfiltered()
1457 1456 c = cachedlocalrepo(repo)
1458 1457 c._state = self._state
1459 1458 c.mtime = self.mtime
1460 1459 return c
@@ -1,3734 +1,3734 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 import hashlib
12 11 import os
13 12 import random
14 13 import sys
15 14 import time
16 15 import weakref
17 16
18 17 from .i18n import _
19 18 from .node import (
20 19 bin,
21 20 hex,
22 21 nullid,
23 22 nullrev,
24 23 short,
25 24 )
26 25 from .pycompat import (
27 26 delattr,
28 27 getattr,
29 28 )
30 29 from . import (
31 30 bookmarks,
32 31 branchmap,
33 32 bundle2,
34 33 changegroup,
35 34 color,
36 35 context,
37 36 dirstate,
38 37 dirstateguard,
39 38 discovery,
40 39 encoding,
41 40 error,
42 41 exchange,
43 42 extensions,
44 43 filelog,
45 44 hook,
46 45 lock as lockmod,
47 46 match as matchmod,
48 47 merge as mergemod,
49 48 mergeutil,
50 49 namespaces,
51 50 narrowspec,
52 51 obsolete,
53 52 pathutil,
54 53 phases,
55 54 pushkey,
56 55 pycompat,
57 56 repoview,
58 57 revset,
59 58 revsetlang,
60 59 scmutil,
61 60 sparse,
62 61 store as storemod,
63 62 subrepoutil,
64 63 tags as tagsmod,
65 64 transaction,
66 65 txnutil,
67 66 util,
68 67 vfs as vfsmod,
69 68 )
70 69
71 70 from .interfaces import (
72 71 repository,
73 72 util as interfaceutil,
74 73 )
75 74
76 75 from .utils import (
76 hashutil,
77 77 procutil,
78 78 stringutil,
79 79 )
80 80
81 81 from .revlogutils import constants as revlogconst
82 82
83 83 release = lockmod.release
84 84 urlerr = util.urlerr
85 85 urlreq = util.urlreq
86 86
87 87 # set of (path, vfs-location) tuples. vfs-location is:
88 88 # - 'plain for vfs relative paths
89 89 # - '' for svfs relative paths
90 90 _cachedfiles = set()
91 91
92 92
93 93 class _basefilecache(scmutil.filecache):
94 94 """All filecache usage on repo are done for logic that should be unfiltered
95 95 """
96 96
97 97 def __get__(self, repo, type=None):
98 98 if repo is None:
99 99 return self
100 100 # proxy to unfiltered __dict__ since filtered repo has no entry
101 101 unfi = repo.unfiltered()
102 102 try:
103 103 return unfi.__dict__[self.sname]
104 104 except KeyError:
105 105 pass
106 106 return super(_basefilecache, self).__get__(unfi, type)
107 107
108 108 def set(self, repo, value):
109 109 return super(_basefilecache, self).set(repo.unfiltered(), value)
110 110
111 111
112 112 class repofilecache(_basefilecache):
113 113 """filecache for files in .hg but outside of .hg/store"""
114 114
115 115 def __init__(self, *paths):
116 116 super(repofilecache, self).__init__(*paths)
117 117 for path in paths:
118 118 _cachedfiles.add((path, b'plain'))
119 119
120 120 def join(self, obj, fname):
121 121 return obj.vfs.join(fname)
122 122
123 123
124 124 class storecache(_basefilecache):
125 125 """filecache for files in the store"""
126 126
127 127 def __init__(self, *paths):
128 128 super(storecache, self).__init__(*paths)
129 129 for path in paths:
130 130 _cachedfiles.add((path, b''))
131 131
132 132 def join(self, obj, fname):
133 133 return obj.sjoin(fname)
134 134
135 135
136 136 class mixedrepostorecache(_basefilecache):
137 137 """filecache for a mix files in .hg/store and outside"""
138 138
139 139 def __init__(self, *pathsandlocations):
140 140 # scmutil.filecache only uses the path for passing back into our
141 141 # join(), so we can safely pass a list of paths and locations
142 142 super(mixedrepostorecache, self).__init__(*pathsandlocations)
143 143 _cachedfiles.update(pathsandlocations)
144 144
145 145 def join(self, obj, fnameandlocation):
146 146 fname, location = fnameandlocation
147 147 if location == b'plain':
148 148 return obj.vfs.join(fname)
149 149 else:
150 150 if location != b'':
151 151 raise error.ProgrammingError(
152 152 b'unexpected location: %s' % location
153 153 )
154 154 return obj.sjoin(fname)
155 155
156 156
157 157 def isfilecached(repo, name):
158 158 """check if a repo has already cached "name" filecache-ed property
159 159
160 160 This returns (cachedobj-or-None, iscached) tuple.
161 161 """
162 162 cacheentry = repo.unfiltered()._filecache.get(name, None)
163 163 if not cacheentry:
164 164 return None, False
165 165 return cacheentry.obj, True
166 166
167 167
168 168 class unfilteredpropertycache(util.propertycache):
169 169 """propertycache that apply to unfiltered repo only"""
170 170
171 171 def __get__(self, repo, type=None):
172 172 unfi = repo.unfiltered()
173 173 if unfi is repo:
174 174 return super(unfilteredpropertycache, self).__get__(unfi)
175 175 return getattr(unfi, self.name)
176 176
177 177
178 178 class filteredpropertycache(util.propertycache):
179 179 """propertycache that must take filtering in account"""
180 180
181 181 def cachevalue(self, obj, value):
182 182 object.__setattr__(obj, self.name, value)
183 183
184 184
185 185 def hasunfilteredcache(repo, name):
186 186 """check if a repo has an unfilteredpropertycache value for <name>"""
187 187 return name in vars(repo.unfiltered())
188 188
189 189
190 190 def unfilteredmethod(orig):
191 191 """decorate method that always need to be run on unfiltered version"""
192 192
193 193 def wrapper(repo, *args, **kwargs):
194 194 return orig(repo.unfiltered(), *args, **kwargs)
195 195
196 196 return wrapper
197 197
198 198
199 199 moderncaps = {
200 200 b'lookup',
201 201 b'branchmap',
202 202 b'pushkey',
203 203 b'known',
204 204 b'getbundle',
205 205 b'unbundle',
206 206 }
207 207 legacycaps = moderncaps.union({b'changegroupsubset'})
208 208
209 209
210 210 @interfaceutil.implementer(repository.ipeercommandexecutor)
211 211 class localcommandexecutor(object):
212 212 def __init__(self, peer):
213 213 self._peer = peer
214 214 self._sent = False
215 215 self._closed = False
216 216
217 217 def __enter__(self):
218 218 return self
219 219
220 220 def __exit__(self, exctype, excvalue, exctb):
221 221 self.close()
222 222
223 223 def callcommand(self, command, args):
224 224 if self._sent:
225 225 raise error.ProgrammingError(
226 226 b'callcommand() cannot be used after sendcommands()'
227 227 )
228 228
229 229 if self._closed:
230 230 raise error.ProgrammingError(
231 231 b'callcommand() cannot be used after close()'
232 232 )
233 233
234 234 # We don't need to support anything fancy. Just call the named
235 235 # method on the peer and return a resolved future.
236 236 fn = getattr(self._peer, pycompat.sysstr(command))
237 237
238 238 f = pycompat.futures.Future()
239 239
240 240 try:
241 241 result = fn(**pycompat.strkwargs(args))
242 242 except Exception:
243 243 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
244 244 else:
245 245 f.set_result(result)
246 246
247 247 return f
248 248
249 249 def sendcommands(self):
250 250 self._sent = True
251 251
252 252 def close(self):
253 253 self._closed = True
254 254
255 255
256 256 @interfaceutil.implementer(repository.ipeercommands)
257 257 class localpeer(repository.peer):
258 258 '''peer for a local repo; reflects only the most recent API'''
259 259
260 260 def __init__(self, repo, caps=None):
261 261 super(localpeer, self).__init__()
262 262
263 263 if caps is None:
264 264 caps = moderncaps.copy()
265 265 self._repo = repo.filtered(b'served')
266 266 self.ui = repo.ui
267 267 self._caps = repo._restrictcapabilities(caps)
268 268
269 269 # Begin of _basepeer interface.
270 270
271 271 def url(self):
272 272 return self._repo.url()
273 273
274 274 def local(self):
275 275 return self._repo
276 276
277 277 def peer(self):
278 278 return self
279 279
280 280 def canpush(self):
281 281 return True
282 282
283 283 def close(self):
284 284 self._repo.close()
285 285
286 286 # End of _basepeer interface.
287 287
288 288 # Begin of _basewirecommands interface.
289 289
290 290 def branchmap(self):
291 291 return self._repo.branchmap()
292 292
293 293 def capabilities(self):
294 294 return self._caps
295 295
296 296 def clonebundles(self):
297 297 return self._repo.tryread(b'clonebundles.manifest')
298 298
299 299 def debugwireargs(self, one, two, three=None, four=None, five=None):
300 300 """Used to test argument passing over the wire"""
301 301 return b"%s %s %s %s %s" % (
302 302 one,
303 303 two,
304 304 pycompat.bytestr(three),
305 305 pycompat.bytestr(four),
306 306 pycompat.bytestr(five),
307 307 )
308 308
309 309 def getbundle(
310 310 self, source, heads=None, common=None, bundlecaps=None, **kwargs
311 311 ):
312 312 chunks = exchange.getbundlechunks(
313 313 self._repo,
314 314 source,
315 315 heads=heads,
316 316 common=common,
317 317 bundlecaps=bundlecaps,
318 318 **kwargs
319 319 )[1]
320 320 cb = util.chunkbuffer(chunks)
321 321
322 322 if exchange.bundle2requested(bundlecaps):
323 323 # When requesting a bundle2, getbundle returns a stream to make the
324 324 # wire level function happier. We need to build a proper object
325 325 # from it in local peer.
326 326 return bundle2.getunbundler(self.ui, cb)
327 327 else:
328 328 return changegroup.getunbundler(b'01', cb, None)
329 329
330 330 def heads(self):
331 331 return self._repo.heads()
332 332
333 333 def known(self, nodes):
334 334 return self._repo.known(nodes)
335 335
336 336 def listkeys(self, namespace):
337 337 return self._repo.listkeys(namespace)
338 338
339 339 def lookup(self, key):
340 340 return self._repo.lookup(key)
341 341
342 342 def pushkey(self, namespace, key, old, new):
343 343 return self._repo.pushkey(namespace, key, old, new)
344 344
345 345 def stream_out(self):
346 346 raise error.Abort(_(b'cannot perform stream clone against local peer'))
347 347
348 348 def unbundle(self, bundle, heads, url):
349 349 """apply a bundle on a repo
350 350
351 351 This function handles the repo locking itself."""
352 352 try:
353 353 try:
354 354 bundle = exchange.readbundle(self.ui, bundle, None)
355 355 ret = exchange.unbundle(self._repo, bundle, heads, b'push', url)
356 356 if util.safehasattr(ret, b'getchunks'):
357 357 # This is a bundle20 object, turn it into an unbundler.
358 358 # This little dance should be dropped eventually when the
359 359 # API is finally improved.
360 360 stream = util.chunkbuffer(ret.getchunks())
361 361 ret = bundle2.getunbundler(self.ui, stream)
362 362 return ret
363 363 except Exception as exc:
364 364 # If the exception contains output salvaged from a bundle2
365 365 # reply, we need to make sure it is printed before continuing
366 366 # to fail. So we build a bundle2 with such output and consume
367 367 # it directly.
368 368 #
369 369 # This is not very elegant but allows a "simple" solution for
370 370 # issue4594
371 371 output = getattr(exc, '_bundle2salvagedoutput', ())
372 372 if output:
373 373 bundler = bundle2.bundle20(self._repo.ui)
374 374 for out in output:
375 375 bundler.addpart(out)
376 376 stream = util.chunkbuffer(bundler.getchunks())
377 377 b = bundle2.getunbundler(self.ui, stream)
378 378 bundle2.processbundle(self._repo, b)
379 379 raise
380 380 except error.PushRaced as exc:
381 381 raise error.ResponseError(
382 382 _(b'push failed:'), stringutil.forcebytestr(exc)
383 383 )
384 384
385 385 # End of _basewirecommands interface.
386 386
387 387 # Begin of peer interface.
388 388
389 389 def commandexecutor(self):
390 390 return localcommandexecutor(self)
391 391
392 392 # End of peer interface.
393 393
394 394
395 395 @interfaceutil.implementer(repository.ipeerlegacycommands)
396 396 class locallegacypeer(localpeer):
397 397 '''peer extension which implements legacy methods too; used for tests with
398 398 restricted capabilities'''
399 399
400 400 def __init__(self, repo):
401 401 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
402 402
403 403 # Begin of baselegacywirecommands interface.
404 404
405 405 def between(self, pairs):
406 406 return self._repo.between(pairs)
407 407
408 408 def branches(self, nodes):
409 409 return self._repo.branches(nodes)
410 410
411 411 def changegroup(self, nodes, source):
412 412 outgoing = discovery.outgoing(
413 413 self._repo, missingroots=nodes, missingheads=self._repo.heads()
414 414 )
415 415 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
416 416
417 417 def changegroupsubset(self, bases, heads, source):
418 418 outgoing = discovery.outgoing(
419 419 self._repo, missingroots=bases, missingheads=heads
420 420 )
421 421 return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
422 422
423 423 # End of baselegacywirecommands interface.
424 424
425 425
426 426 # Increment the sub-version when the revlog v2 format changes to lock out old
427 427 # clients.
428 428 REVLOGV2_REQUIREMENT = b'exp-revlogv2.1'
429 429
430 430 # A repository with the sparserevlog feature will have delta chains that
431 431 # can spread over a larger span. Sparse reading cuts these large spans into
432 432 # pieces, so that each piece isn't too big.
433 433 # Without the sparserevlog capability, reading from the repository could use
434 434 # huge amounts of memory, because the whole span would be read at once,
435 435 # including all the intermediate revisions that aren't pertinent for the chain.
436 436 # This is why once a repository has enabled sparse-read, it becomes required.
437 437 SPARSEREVLOG_REQUIREMENT = b'sparserevlog'
438 438
439 439 # A repository with the sidedataflag requirement will allow to store extra
440 440 # information for revision without altering their original hashes.
441 441 SIDEDATA_REQUIREMENT = b'exp-sidedata-flag'
442 442
443 443 # A repository with the the copies-sidedata-changeset requirement will store
444 444 # copies related information in changeset's sidedata.
445 445 COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
446 446
447 447 # Functions receiving (ui, features) that extensions can register to impact
448 448 # the ability to load repositories with custom requirements. Only
449 449 # functions defined in loaded extensions are called.
450 450 #
451 451 # The function receives a set of requirement strings that the repository
452 452 # is capable of opening. Functions will typically add elements to the
453 453 # set to reflect that the extension knows how to handle that requirements.
454 454 featuresetupfuncs = set()
455 455
456 456
457 457 def makelocalrepository(baseui, path, intents=None):
458 458 """Create a local repository object.
459 459
460 460 Given arguments needed to construct a local repository, this function
461 461 performs various early repository loading functionality (such as
462 462 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
463 463 the repository can be opened, derives a type suitable for representing
464 464 that repository, and returns an instance of it.
465 465
466 466 The returned object conforms to the ``repository.completelocalrepository``
467 467 interface.
468 468
469 469 The repository type is derived by calling a series of factory functions
470 470 for each aspect/interface of the final repository. These are defined by
471 471 ``REPO_INTERFACES``.
472 472
473 473 Each factory function is called to produce a type implementing a specific
474 474 interface. The cumulative list of returned types will be combined into a
475 475 new type and that type will be instantiated to represent the local
476 476 repository.
477 477
478 478 The factory functions each receive various state that may be consulted
479 479 as part of deriving a type.
480 480
481 481 Extensions should wrap these factory functions to customize repository type
482 482 creation. Note that an extension's wrapped function may be called even if
483 483 that extension is not loaded for the repo being constructed. Extensions
484 484 should check if their ``__name__`` appears in the
485 485 ``extensionmodulenames`` set passed to the factory function and no-op if
486 486 not.
487 487 """
488 488 ui = baseui.copy()
489 489 # Prevent copying repo configuration.
490 490 ui.copy = baseui.copy
491 491
492 492 # Working directory VFS rooted at repository root.
493 493 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
494 494
495 495 # Main VFS for .hg/ directory.
496 496 hgpath = wdirvfs.join(b'.hg')
497 497 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
498 498
499 499 # The .hg/ path should exist and should be a directory. All other
500 500 # cases are errors.
501 501 if not hgvfs.isdir():
502 502 try:
503 503 hgvfs.stat()
504 504 except OSError as e:
505 505 if e.errno != errno.ENOENT:
506 506 raise
507 507
508 508 raise error.RepoError(_(b'repository %s not found') % path)
509 509
510 510 # .hg/requires file contains a newline-delimited list of
511 511 # features/capabilities the opener (us) must have in order to use
512 512 # the repository. This file was introduced in Mercurial 0.9.2,
513 513 # which means very old repositories may not have one. We assume
514 514 # a missing file translates to no requirements.
515 515 try:
516 516 requirements = set(hgvfs.read(b'requires').splitlines())
517 517 except IOError as e:
518 518 if e.errno != errno.ENOENT:
519 519 raise
520 520 requirements = set()
521 521
522 522 # The .hg/hgrc file may load extensions or contain config options
523 523 # that influence repository construction. Attempt to load it and
524 524 # process any new extensions that it may have pulled in.
525 525 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
526 526 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
527 527 extensions.loadall(ui)
528 528 extensions.populateui(ui)
529 529
530 530 # Set of module names of extensions loaded for this repository.
531 531 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
532 532
533 533 supportedrequirements = gathersupportedrequirements(ui)
534 534
535 535 # We first validate the requirements are known.
536 536 ensurerequirementsrecognized(requirements, supportedrequirements)
537 537
538 538 # Then we validate that the known set is reasonable to use together.
539 539 ensurerequirementscompatible(ui, requirements)
540 540
541 541 # TODO there are unhandled edge cases related to opening repositories with
542 542 # shared storage. If storage is shared, we should also test for requirements
543 543 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
544 544 # that repo, as that repo may load extensions needed to open it. This is a
545 545 # bit complicated because we don't want the other hgrc to overwrite settings
546 546 # in this hgrc.
547 547 #
548 548 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
549 549 # file when sharing repos. But if a requirement is added after the share is
550 550 # performed, thereby introducing a new requirement for the opener, we may
551 551 # will not see that and could encounter a run-time error interacting with
552 552 # that shared store since it has an unknown-to-us requirement.
553 553
554 554 # At this point, we know we should be capable of opening the repository.
555 555 # Now get on with doing that.
556 556
557 557 features = set()
558 558
559 559 # The "store" part of the repository holds versioned data. How it is
560 560 # accessed is determined by various requirements. The ``shared`` or
561 561 # ``relshared`` requirements indicate the store lives in the path contained
562 562 # in the ``.hg/sharedpath`` file. This is an absolute path for
563 563 # ``shared`` and relative to ``.hg/`` for ``relshared``.
564 564 if b'shared' in requirements or b'relshared' in requirements:
565 565 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
566 566 if b'relshared' in requirements:
567 567 sharedpath = hgvfs.join(sharedpath)
568 568
569 569 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
570 570
571 571 if not sharedvfs.exists():
572 572 raise error.RepoError(
573 573 _(b'.hg/sharedpath points to nonexistent directory %s')
574 574 % sharedvfs.base
575 575 )
576 576
577 577 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
578 578
579 579 storebasepath = sharedvfs.base
580 580 cachepath = sharedvfs.join(b'cache')
581 581 else:
582 582 storebasepath = hgvfs.base
583 583 cachepath = hgvfs.join(b'cache')
584 584 wcachepath = hgvfs.join(b'wcache')
585 585
586 586 # The store has changed over time and the exact layout is dictated by
587 587 # requirements. The store interface abstracts differences across all
588 588 # of them.
589 589 store = makestore(
590 590 requirements,
591 591 storebasepath,
592 592 lambda base: vfsmod.vfs(base, cacheaudited=True),
593 593 )
594 594 hgvfs.createmode = store.createmode
595 595
596 596 storevfs = store.vfs
597 597 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
598 598
599 599 # The cache vfs is used to manage cache files.
600 600 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
601 601 cachevfs.createmode = store.createmode
602 602 # The cache vfs is used to manage cache files related to the working copy
603 603 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
604 604 wcachevfs.createmode = store.createmode
605 605
606 606 # Now resolve the type for the repository object. We do this by repeatedly
607 607 # calling a factory function to produces types for specific aspects of the
608 608 # repo's operation. The aggregate returned types are used as base classes
609 609 # for a dynamically-derived type, which will represent our new repository.
610 610
611 611 bases = []
612 612 extrastate = {}
613 613
614 614 for iface, fn in REPO_INTERFACES:
615 615 # We pass all potentially useful state to give extensions tons of
616 616 # flexibility.
617 617 typ = fn()(
618 618 ui=ui,
619 619 intents=intents,
620 620 requirements=requirements,
621 621 features=features,
622 622 wdirvfs=wdirvfs,
623 623 hgvfs=hgvfs,
624 624 store=store,
625 625 storevfs=storevfs,
626 626 storeoptions=storevfs.options,
627 627 cachevfs=cachevfs,
628 628 wcachevfs=wcachevfs,
629 629 extensionmodulenames=extensionmodulenames,
630 630 extrastate=extrastate,
631 631 baseclasses=bases,
632 632 )
633 633
634 634 if not isinstance(typ, type):
635 635 raise error.ProgrammingError(
636 636 b'unable to construct type for %s' % iface
637 637 )
638 638
639 639 bases.append(typ)
640 640
641 641 # type() allows you to use characters in type names that wouldn't be
642 642 # recognized as Python symbols in source code. We abuse that to add
643 643 # rich information about our constructed repo.
644 644 name = pycompat.sysstr(
645 645 b'derivedrepo:%s<%s>' % (wdirvfs.base, b','.join(sorted(requirements)))
646 646 )
647 647
648 648 cls = type(name, tuple(bases), {})
649 649
650 650 return cls(
651 651 baseui=baseui,
652 652 ui=ui,
653 653 origroot=path,
654 654 wdirvfs=wdirvfs,
655 655 hgvfs=hgvfs,
656 656 requirements=requirements,
657 657 supportedrequirements=supportedrequirements,
658 658 sharedpath=storebasepath,
659 659 store=store,
660 660 cachevfs=cachevfs,
661 661 wcachevfs=wcachevfs,
662 662 features=features,
663 663 intents=intents,
664 664 )
665 665
666 666
667 667 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
668 668 """Load hgrc files/content into a ui instance.
669 669
670 670 This is called during repository opening to load any additional
671 671 config files or settings relevant to the current repository.
672 672
673 673 Returns a bool indicating whether any additional configs were loaded.
674 674
675 675 Extensions should monkeypatch this function to modify how per-repo
676 676 configs are loaded. For example, an extension may wish to pull in
677 677 configs from alternate files or sources.
678 678 """
679 679 try:
680 680 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
681 681 return True
682 682 except IOError:
683 683 return False
684 684
685 685
686 686 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
687 687 """Perform additional actions after .hg/hgrc is loaded.
688 688
689 689 This function is called during repository loading immediately after
690 690 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
691 691
692 692 The function can be used to validate configs, automatically add
693 693 options (including extensions) based on requirements, etc.
694 694 """
695 695
696 696 # Map of requirements to list of extensions to load automatically when
697 697 # requirement is present.
698 698 autoextensions = {
699 699 b'largefiles': [b'largefiles'],
700 700 b'lfs': [b'lfs'],
701 701 }
702 702
703 703 for requirement, names in sorted(autoextensions.items()):
704 704 if requirement not in requirements:
705 705 continue
706 706
707 707 for name in names:
708 708 if not ui.hasconfig(b'extensions', name):
709 709 ui.setconfig(b'extensions', name, b'', source=b'autoload')
710 710
711 711
712 712 def gathersupportedrequirements(ui):
713 713 """Determine the complete set of recognized requirements."""
714 714 # Start with all requirements supported by this file.
715 715 supported = set(localrepository._basesupported)
716 716
717 717 # Execute ``featuresetupfuncs`` entries if they belong to an extension
718 718 # relevant to this ui instance.
719 719 modules = {m.__name__ for n, m in extensions.extensions(ui)}
720 720
721 721 for fn in featuresetupfuncs:
722 722 if fn.__module__ in modules:
723 723 fn(ui, supported)
724 724
725 725 # Add derived requirements from registered compression engines.
726 726 for name in util.compengines:
727 727 engine = util.compengines[name]
728 728 if engine.available() and engine.revlogheader():
729 729 supported.add(b'exp-compression-%s' % name)
730 730 if engine.name() == b'zstd':
731 731 supported.add(b'revlog-compression-zstd')
732 732
733 733 return supported
734 734
735 735
736 736 def ensurerequirementsrecognized(requirements, supported):
737 737 """Validate that a set of local requirements is recognized.
738 738
739 739 Receives a set of requirements. Raises an ``error.RepoError`` if there
740 740 exists any requirement in that set that currently loaded code doesn't
741 741 recognize.
742 742
743 743 Returns a set of supported requirements.
744 744 """
745 745 missing = set()
746 746
747 747 for requirement in requirements:
748 748 if requirement in supported:
749 749 continue
750 750
751 751 if not requirement or not requirement[0:1].isalnum():
752 752 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
753 753
754 754 missing.add(requirement)
755 755
756 756 if missing:
757 757 raise error.RequirementError(
758 758 _(b'repository requires features unknown to this Mercurial: %s')
759 759 % b' '.join(sorted(missing)),
760 760 hint=_(
761 761 b'see https://mercurial-scm.org/wiki/MissingRequirement '
762 762 b'for more information'
763 763 ),
764 764 )
765 765
766 766
767 767 def ensurerequirementscompatible(ui, requirements):
768 768 """Validates that a set of recognized requirements is mutually compatible.
769 769
770 770 Some requirements may not be compatible with others or require
771 771 config options that aren't enabled. This function is called during
772 772 repository opening to ensure that the set of requirements needed
773 773 to open a repository is sane and compatible with config options.
774 774
775 775 Extensions can monkeypatch this function to perform additional
776 776 checking.
777 777
778 778 ``error.RepoError`` should be raised on failure.
779 779 """
780 780 if b'exp-sparse' in requirements and not sparse.enabled:
781 781 raise error.RepoError(
782 782 _(
783 783 b'repository is using sparse feature but '
784 784 b'sparse is not enabled; enable the '
785 785 b'"sparse" extensions to access'
786 786 )
787 787 )
788 788
789 789
790 790 def makestore(requirements, path, vfstype):
791 791 """Construct a storage object for a repository."""
792 792 if b'store' in requirements:
793 793 if b'fncache' in requirements:
794 794 return storemod.fncachestore(
795 795 path, vfstype, b'dotencode' in requirements
796 796 )
797 797
798 798 return storemod.encodedstore(path, vfstype)
799 799
800 800 return storemod.basicstore(path, vfstype)
801 801
802 802
803 803 def resolvestorevfsoptions(ui, requirements, features):
804 804 """Resolve the options to pass to the store vfs opener.
805 805
806 806 The returned dict is used to influence behavior of the storage layer.
807 807 """
808 808 options = {}
809 809
810 810 if b'treemanifest' in requirements:
811 811 options[b'treemanifest'] = True
812 812
813 813 # experimental config: format.manifestcachesize
814 814 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
815 815 if manifestcachesize is not None:
816 816 options[b'manifestcachesize'] = manifestcachesize
817 817
818 818 # In the absence of another requirement superseding a revlog-related
819 819 # requirement, we have to assume the repo is using revlog version 0.
820 820 # This revlog format is super old and we don't bother trying to parse
821 821 # opener options for it because those options wouldn't do anything
822 822 # meaningful on such old repos.
823 823 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
824 824 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
825 825 else: # explicitly mark repo as using revlogv0
826 826 options[b'revlogv0'] = True
827 827
828 828 if COPIESSDC_REQUIREMENT in requirements:
829 829 options[b'copies-storage'] = b'changeset-sidedata'
830 830 else:
831 831 writecopiesto = ui.config(b'experimental', b'copies.write-to')
832 832 copiesextramode = (b'changeset-only', b'compatibility')
833 833 if writecopiesto in copiesextramode:
834 834 options[b'copies-storage'] = b'extra'
835 835
836 836 return options
837 837
838 838
839 839 def resolverevlogstorevfsoptions(ui, requirements, features):
840 840 """Resolve opener options specific to revlogs."""
841 841
842 842 options = {}
843 843 options[b'flagprocessors'] = {}
844 844
845 845 if b'revlogv1' in requirements:
846 846 options[b'revlogv1'] = True
847 847 if REVLOGV2_REQUIREMENT in requirements:
848 848 options[b'revlogv2'] = True
849 849
850 850 if b'generaldelta' in requirements:
851 851 options[b'generaldelta'] = True
852 852
853 853 # experimental config: format.chunkcachesize
854 854 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
855 855 if chunkcachesize is not None:
856 856 options[b'chunkcachesize'] = chunkcachesize
857 857
858 858 deltabothparents = ui.configbool(
859 859 b'storage', b'revlog.optimize-delta-parent-choice'
860 860 )
861 861 options[b'deltabothparents'] = deltabothparents
862 862
863 863 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
864 864 lazydeltabase = False
865 865 if lazydelta:
866 866 lazydeltabase = ui.configbool(
867 867 b'storage', b'revlog.reuse-external-delta-parent'
868 868 )
869 869 if lazydeltabase is None:
870 870 lazydeltabase = not scmutil.gddeltaconfig(ui)
871 871 options[b'lazydelta'] = lazydelta
872 872 options[b'lazydeltabase'] = lazydeltabase
873 873
874 874 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
875 875 if 0 <= chainspan:
876 876 options[b'maxdeltachainspan'] = chainspan
877 877
878 878 mmapindexthreshold = ui.configbytes(b'experimental', b'mmapindexthreshold')
879 879 if mmapindexthreshold is not None:
880 880 options[b'mmapindexthreshold'] = mmapindexthreshold
881 881
882 882 withsparseread = ui.configbool(b'experimental', b'sparse-read')
883 883 srdensitythres = float(
884 884 ui.config(b'experimental', b'sparse-read.density-threshold')
885 885 )
886 886 srmingapsize = ui.configbytes(b'experimental', b'sparse-read.min-gap-size')
887 887 options[b'with-sparse-read'] = withsparseread
888 888 options[b'sparse-read-density-threshold'] = srdensitythres
889 889 options[b'sparse-read-min-gap-size'] = srmingapsize
890 890
891 891 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
892 892 options[b'sparse-revlog'] = sparserevlog
893 893 if sparserevlog:
894 894 options[b'generaldelta'] = True
895 895
896 896 sidedata = SIDEDATA_REQUIREMENT in requirements
897 897 options[b'side-data'] = sidedata
898 898
899 899 maxchainlen = None
900 900 if sparserevlog:
901 901 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
902 902 # experimental config: format.maxchainlen
903 903 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
904 904 if maxchainlen is not None:
905 905 options[b'maxchainlen'] = maxchainlen
906 906
907 907 for r in requirements:
908 908 # we allow multiple compression engine requirement to co-exist because
909 909 # strickly speaking, revlog seems to support mixed compression style.
910 910 #
911 911 # The compression used for new entries will be "the last one"
912 912 prefix = r.startswith
913 913 if prefix(b'revlog-compression-') or prefix(b'exp-compression-'):
914 914 options[b'compengine'] = r.split(b'-', 2)[2]
915 915
916 916 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
917 917 if options[b'zlib.level'] is not None:
918 918 if not (0 <= options[b'zlib.level'] <= 9):
919 919 msg = _(b'invalid value for `storage.revlog.zlib.level` config: %d')
920 920 raise error.Abort(msg % options[b'zlib.level'])
921 921 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
922 922 if options[b'zstd.level'] is not None:
923 923 if not (0 <= options[b'zstd.level'] <= 22):
924 924 msg = _(b'invalid value for `storage.revlog.zstd.level` config: %d')
925 925 raise error.Abort(msg % options[b'zstd.level'])
926 926
927 927 if repository.NARROW_REQUIREMENT in requirements:
928 928 options[b'enableellipsis'] = True
929 929
930 930 if ui.configbool(b'experimental', b'rust.index'):
931 931 options[b'rust.index'] = True
932 932
933 933 return options
934 934
935 935
936 936 def makemain(**kwargs):
937 937 """Produce a type conforming to ``ilocalrepositorymain``."""
938 938 return localrepository
939 939
940 940
941 941 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
942 942 class revlogfilestorage(object):
943 943 """File storage when using revlogs."""
944 944
945 945 def file(self, path):
946 946 if path[0] == b'/':
947 947 path = path[1:]
948 948
949 949 return filelog.filelog(self.svfs, path)
950 950
951 951
952 952 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
953 953 class revlognarrowfilestorage(object):
954 954 """File storage when using revlogs and narrow files."""
955 955
956 956 def file(self, path):
957 957 if path[0] == b'/':
958 958 path = path[1:]
959 959
960 960 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
961 961
962 962
963 963 def makefilestorage(requirements, features, **kwargs):
964 964 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
965 965 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
966 966 features.add(repository.REPO_FEATURE_STREAM_CLONE)
967 967
968 968 if repository.NARROW_REQUIREMENT in requirements:
969 969 return revlognarrowfilestorage
970 970 else:
971 971 return revlogfilestorage
972 972
973 973
974 974 # List of repository interfaces and factory functions for them. Each
975 975 # will be called in order during ``makelocalrepository()`` to iteratively
976 976 # derive the final type for a local repository instance. We capture the
977 977 # function as a lambda so we don't hold a reference and the module-level
978 978 # functions can be wrapped.
979 979 REPO_INTERFACES = [
980 980 (repository.ilocalrepositorymain, lambda: makemain),
981 981 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
982 982 ]
983 983
984 984
985 985 @interfaceutil.implementer(repository.ilocalrepositorymain)
986 986 class localrepository(object):
987 987 """Main class for representing local repositories.
988 988
989 989 All local repositories are instances of this class.
990 990
991 991 Constructed on its own, instances of this class are not usable as
992 992 repository objects. To obtain a usable repository object, call
993 993 ``hg.repository()``, ``localrepo.instance()``, or
994 994 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
995 995 ``instance()`` adds support for creating new repositories.
996 996 ``hg.repository()`` adds more extension integration, including calling
997 997 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
998 998 used.
999 999 """
1000 1000
1001 1001 # obsolete experimental requirements:
1002 1002 # - manifestv2: An experimental new manifest format that allowed
1003 1003 # for stem compression of long paths. Experiment ended up not
1004 1004 # being successful (repository sizes went up due to worse delta
1005 1005 # chains), and the code was deleted in 4.6.
1006 1006 supportedformats = {
1007 1007 b'revlogv1',
1008 1008 b'generaldelta',
1009 1009 b'treemanifest',
1010 1010 COPIESSDC_REQUIREMENT,
1011 1011 REVLOGV2_REQUIREMENT,
1012 1012 SIDEDATA_REQUIREMENT,
1013 1013 SPARSEREVLOG_REQUIREMENT,
1014 1014 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
1015 1015 }
1016 1016 _basesupported = supportedformats | {
1017 1017 b'store',
1018 1018 b'fncache',
1019 1019 b'shared',
1020 1020 b'relshared',
1021 1021 b'dotencode',
1022 1022 b'exp-sparse',
1023 1023 b'internal-phase',
1024 1024 }
1025 1025
1026 1026 # list of prefix for file which can be written without 'wlock'
1027 1027 # Extensions should extend this list when needed
1028 1028 _wlockfreeprefix = {
1029 1029 # We migh consider requiring 'wlock' for the next
1030 1030 # two, but pretty much all the existing code assume
1031 1031 # wlock is not needed so we keep them excluded for
1032 1032 # now.
1033 1033 b'hgrc',
1034 1034 b'requires',
1035 1035 # XXX cache is a complicatged business someone
1036 1036 # should investigate this in depth at some point
1037 1037 b'cache/',
1038 1038 # XXX shouldn't be dirstate covered by the wlock?
1039 1039 b'dirstate',
1040 1040 # XXX bisect was still a bit too messy at the time
1041 1041 # this changeset was introduced. Someone should fix
1042 1042 # the remainig bit and drop this line
1043 1043 b'bisect.state',
1044 1044 }
1045 1045
1046 1046 def __init__(
1047 1047 self,
1048 1048 baseui,
1049 1049 ui,
1050 1050 origroot,
1051 1051 wdirvfs,
1052 1052 hgvfs,
1053 1053 requirements,
1054 1054 supportedrequirements,
1055 1055 sharedpath,
1056 1056 store,
1057 1057 cachevfs,
1058 1058 wcachevfs,
1059 1059 features,
1060 1060 intents=None,
1061 1061 ):
1062 1062 """Create a new local repository instance.
1063 1063
1064 1064 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
1065 1065 or ``localrepo.makelocalrepository()`` for obtaining a new repository
1066 1066 object.
1067 1067
1068 1068 Arguments:
1069 1069
1070 1070 baseui
1071 1071 ``ui.ui`` instance that ``ui`` argument was based off of.
1072 1072
1073 1073 ui
1074 1074 ``ui.ui`` instance for use by the repository.
1075 1075
1076 1076 origroot
1077 1077 ``bytes`` path to working directory root of this repository.
1078 1078
1079 1079 wdirvfs
1080 1080 ``vfs.vfs`` rooted at the working directory.
1081 1081
1082 1082 hgvfs
1083 1083 ``vfs.vfs`` rooted at .hg/
1084 1084
1085 1085 requirements
1086 1086 ``set`` of bytestrings representing repository opening requirements.
1087 1087
1088 1088 supportedrequirements
1089 1089 ``set`` of bytestrings representing repository requirements that we
1090 1090 know how to open. May be a supetset of ``requirements``.
1091 1091
1092 1092 sharedpath
1093 1093 ``bytes`` Defining path to storage base directory. Points to a
1094 1094 ``.hg/`` directory somewhere.
1095 1095
1096 1096 store
1097 1097 ``store.basicstore`` (or derived) instance providing access to
1098 1098 versioned storage.
1099 1099
1100 1100 cachevfs
1101 1101 ``vfs.vfs`` used for cache files.
1102 1102
1103 1103 wcachevfs
1104 1104 ``vfs.vfs`` used for cache files related to the working copy.
1105 1105
1106 1106 features
1107 1107 ``set`` of bytestrings defining features/capabilities of this
1108 1108 instance.
1109 1109
1110 1110 intents
1111 1111 ``set`` of system strings indicating what this repo will be used
1112 1112 for.
1113 1113 """
1114 1114 self.baseui = baseui
1115 1115 self.ui = ui
1116 1116 self.origroot = origroot
1117 1117 # vfs rooted at working directory.
1118 1118 self.wvfs = wdirvfs
1119 1119 self.root = wdirvfs.base
1120 1120 # vfs rooted at .hg/. Used to access most non-store paths.
1121 1121 self.vfs = hgvfs
1122 1122 self.path = hgvfs.base
1123 1123 self.requirements = requirements
1124 1124 self.supported = supportedrequirements
1125 1125 self.sharedpath = sharedpath
1126 1126 self.store = store
1127 1127 self.cachevfs = cachevfs
1128 1128 self.wcachevfs = wcachevfs
1129 1129 self.features = features
1130 1130
1131 1131 self.filtername = None
1132 1132
1133 1133 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1134 1134 b'devel', b'check-locks'
1135 1135 ):
1136 1136 self.vfs.audit = self._getvfsward(self.vfs.audit)
1137 1137 # A list of callback to shape the phase if no data were found.
1138 1138 # Callback are in the form: func(repo, roots) --> processed root.
1139 1139 # This list it to be filled by extension during repo setup
1140 1140 self._phasedefaults = []
1141 1141
1142 1142 color.setup(self.ui)
1143 1143
1144 1144 self.spath = self.store.path
1145 1145 self.svfs = self.store.vfs
1146 1146 self.sjoin = self.store.join
1147 1147 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1148 1148 b'devel', b'check-locks'
1149 1149 ):
1150 1150 if util.safehasattr(self.svfs, b'vfs'): # this is filtervfs
1151 1151 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1152 1152 else: # standard vfs
1153 1153 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1154 1154
1155 1155 self._dirstatevalidatewarned = False
1156 1156
1157 1157 self._branchcaches = branchmap.BranchMapCache()
1158 1158 self._revbranchcache = None
1159 1159 self._filterpats = {}
1160 1160 self._datafilters = {}
1161 1161 self._transref = self._lockref = self._wlockref = None
1162 1162
1163 1163 # A cache for various files under .hg/ that tracks file changes,
1164 1164 # (used by the filecache decorator)
1165 1165 #
1166 1166 # Maps a property name to its util.filecacheentry
1167 1167 self._filecache = {}
1168 1168
1169 1169 # hold sets of revision to be filtered
1170 1170 # should be cleared when something might have changed the filter value:
1171 1171 # - new changesets,
1172 1172 # - phase change,
1173 1173 # - new obsolescence marker,
1174 1174 # - working directory parent change,
1175 1175 # - bookmark changes
1176 1176 self.filteredrevcache = {}
1177 1177
1178 1178 # post-dirstate-status hooks
1179 1179 self._postdsstatus = []
1180 1180
1181 1181 # generic mapping between names and nodes
1182 1182 self.names = namespaces.namespaces()
1183 1183
1184 1184 # Key to signature value.
1185 1185 self._sparsesignaturecache = {}
1186 1186 # Signature to cached matcher instance.
1187 1187 self._sparsematchercache = {}
1188 1188
1189 1189 self._extrafilterid = repoview.extrafilter(ui)
1190 1190
1191 1191 self.filecopiesmode = None
1192 1192 if COPIESSDC_REQUIREMENT in self.requirements:
1193 1193 self.filecopiesmode = b'changeset-sidedata'
1194 1194
1195 1195 def _getvfsward(self, origfunc):
1196 1196 """build a ward for self.vfs"""
1197 1197 rref = weakref.ref(self)
1198 1198
1199 1199 def checkvfs(path, mode=None):
1200 1200 ret = origfunc(path, mode=mode)
1201 1201 repo = rref()
1202 1202 if (
1203 1203 repo is None
1204 1204 or not util.safehasattr(repo, b'_wlockref')
1205 1205 or not util.safehasattr(repo, b'_lockref')
1206 1206 ):
1207 1207 return
1208 1208 if mode in (None, b'r', b'rb'):
1209 1209 return
1210 1210 if path.startswith(repo.path):
1211 1211 # truncate name relative to the repository (.hg)
1212 1212 path = path[len(repo.path) + 1 :]
1213 1213 if path.startswith(b'cache/'):
1214 1214 msg = b'accessing cache with vfs instead of cachevfs: "%s"'
1215 1215 repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
1216 1216 if path.startswith(b'journal.') or path.startswith(b'undo.'):
1217 1217 # journal is covered by 'lock'
1218 1218 if repo._currentlock(repo._lockref) is None:
1219 1219 repo.ui.develwarn(
1220 1220 b'write with no lock: "%s"' % path,
1221 1221 stacklevel=3,
1222 1222 config=b'check-locks',
1223 1223 )
1224 1224 elif repo._currentlock(repo._wlockref) is None:
1225 1225 # rest of vfs files are covered by 'wlock'
1226 1226 #
1227 1227 # exclude special files
1228 1228 for prefix in self._wlockfreeprefix:
1229 1229 if path.startswith(prefix):
1230 1230 return
1231 1231 repo.ui.develwarn(
1232 1232 b'write with no wlock: "%s"' % path,
1233 1233 stacklevel=3,
1234 1234 config=b'check-locks',
1235 1235 )
1236 1236 return ret
1237 1237
1238 1238 return checkvfs
1239 1239
1240 1240 def _getsvfsward(self, origfunc):
1241 1241 """build a ward for self.svfs"""
1242 1242 rref = weakref.ref(self)
1243 1243
1244 1244 def checksvfs(path, mode=None):
1245 1245 ret = origfunc(path, mode=mode)
1246 1246 repo = rref()
1247 1247 if repo is None or not util.safehasattr(repo, b'_lockref'):
1248 1248 return
1249 1249 if mode in (None, b'r', b'rb'):
1250 1250 return
1251 1251 if path.startswith(repo.sharedpath):
1252 1252 # truncate name relative to the repository (.hg)
1253 1253 path = path[len(repo.sharedpath) + 1 :]
1254 1254 if repo._currentlock(repo._lockref) is None:
1255 1255 repo.ui.develwarn(
1256 1256 b'write with no lock: "%s"' % path, stacklevel=4
1257 1257 )
1258 1258 return ret
1259 1259
1260 1260 return checksvfs
1261 1261
1262 1262 def close(self):
1263 1263 self._writecaches()
1264 1264
1265 1265 def _writecaches(self):
1266 1266 if self._revbranchcache:
1267 1267 self._revbranchcache.write()
1268 1268
1269 1269 def _restrictcapabilities(self, caps):
1270 1270 if self.ui.configbool(b'experimental', b'bundle2-advertise'):
1271 1271 caps = set(caps)
1272 1272 capsblob = bundle2.encodecaps(
1273 1273 bundle2.getrepocaps(self, role=b'client')
1274 1274 )
1275 1275 caps.add(b'bundle2=' + urlreq.quote(capsblob))
1276 1276 return caps
1277 1277
1278 1278 def _writerequirements(self):
1279 1279 scmutil.writerequires(self.vfs, self.requirements)
1280 1280
1281 1281 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1282 1282 # self -> auditor -> self._checknested -> self
1283 1283
1284 1284 @property
1285 1285 def auditor(self):
1286 1286 # This is only used by context.workingctx.match in order to
1287 1287 # detect files in subrepos.
1288 1288 return pathutil.pathauditor(self.root, callback=self._checknested)
1289 1289
1290 1290 @property
1291 1291 def nofsauditor(self):
1292 1292 # This is only used by context.basectx.match in order to detect
1293 1293 # files in subrepos.
1294 1294 return pathutil.pathauditor(
1295 1295 self.root, callback=self._checknested, realfs=False, cached=True
1296 1296 )
1297 1297
1298 1298 def _checknested(self, path):
1299 1299 """Determine if path is a legal nested repository."""
1300 1300 if not path.startswith(self.root):
1301 1301 return False
1302 1302 subpath = path[len(self.root) + 1 :]
1303 1303 normsubpath = util.pconvert(subpath)
1304 1304
1305 1305 # XXX: Checking against the current working copy is wrong in
1306 1306 # the sense that it can reject things like
1307 1307 #
1308 1308 # $ hg cat -r 10 sub/x.txt
1309 1309 #
1310 1310 # if sub/ is no longer a subrepository in the working copy
1311 1311 # parent revision.
1312 1312 #
1313 1313 # However, it can of course also allow things that would have
1314 1314 # been rejected before, such as the above cat command if sub/
1315 1315 # is a subrepository now, but was a normal directory before.
1316 1316 # The old path auditor would have rejected by mistake since it
1317 1317 # panics when it sees sub/.hg/.
1318 1318 #
1319 1319 # All in all, checking against the working copy seems sensible
1320 1320 # since we want to prevent access to nested repositories on
1321 1321 # the filesystem *now*.
1322 1322 ctx = self[None]
1323 1323 parts = util.splitpath(subpath)
1324 1324 while parts:
1325 1325 prefix = b'/'.join(parts)
1326 1326 if prefix in ctx.substate:
1327 1327 if prefix == normsubpath:
1328 1328 return True
1329 1329 else:
1330 1330 sub = ctx.sub(prefix)
1331 1331 return sub.checknested(subpath[len(prefix) + 1 :])
1332 1332 else:
1333 1333 parts.pop()
1334 1334 return False
1335 1335
1336 1336 def peer(self):
1337 1337 return localpeer(self) # not cached to avoid reference cycle
1338 1338
1339 1339 def unfiltered(self):
1340 1340 """Return unfiltered version of the repository
1341 1341
1342 1342 Intended to be overwritten by filtered repo."""
1343 1343 return self
1344 1344
1345 1345 def filtered(self, name, visibilityexceptions=None):
1346 1346 """Return a filtered version of a repository
1347 1347
1348 1348 The `name` parameter is the identifier of the requested view. This
1349 1349 will return a repoview object set "exactly" to the specified view.
1350 1350
1351 1351 This function does not apply recursive filtering to a repository. For
1352 1352 example calling `repo.filtered("served")` will return a repoview using
1353 1353 the "served" view, regardless of the initial view used by `repo`.
1354 1354
1355 1355 In other word, there is always only one level of `repoview` "filtering".
1356 1356 """
1357 1357 if self._extrafilterid is not None and b'%' not in name:
1358 1358 name = name + b'%' + self._extrafilterid
1359 1359
1360 1360 cls = repoview.newtype(self.unfiltered().__class__)
1361 1361 return cls(self, name, visibilityexceptions)
1362 1362
1363 1363 @mixedrepostorecache(
1364 1364 (b'bookmarks', b'plain'),
1365 1365 (b'bookmarks.current', b'plain'),
1366 1366 (b'bookmarks', b''),
1367 1367 (b'00changelog.i', b''),
1368 1368 )
1369 1369 def _bookmarks(self):
1370 1370 # Since the multiple files involved in the transaction cannot be
1371 1371 # written atomically (with current repository format), there is a race
1372 1372 # condition here.
1373 1373 #
1374 1374 # 1) changelog content A is read
1375 1375 # 2) outside transaction update changelog to content B
1376 1376 # 3) outside transaction update bookmark file referring to content B
1377 1377 # 4) bookmarks file content is read and filtered against changelog-A
1378 1378 #
1379 1379 # When this happens, bookmarks against nodes missing from A are dropped.
1380 1380 #
1381 1381 # Having this happening during read is not great, but it become worse
1382 1382 # when this happen during write because the bookmarks to the "unknown"
1383 1383 # nodes will be dropped for good. However, writes happen within locks.
1384 1384 # This locking makes it possible to have a race free consistent read.
1385 1385 # For this purpose data read from disc before locking are
1386 1386 # "invalidated" right after the locks are taken. This invalidations are
1387 1387 # "light", the `filecache` mechanism keep the data in memory and will
1388 1388 # reuse them if the underlying files did not changed. Not parsing the
1389 1389 # same data multiple times helps performances.
1390 1390 #
1391 1391 # Unfortunately in the case describe above, the files tracked by the
1392 1392 # bookmarks file cache might not have changed, but the in-memory
1393 1393 # content is still "wrong" because we used an older changelog content
1394 1394 # to process the on-disk data. So after locking, the changelog would be
1395 1395 # refreshed but `_bookmarks` would be preserved.
1396 1396 # Adding `00changelog.i` to the list of tracked file is not
1397 1397 # enough, because at the time we build the content for `_bookmarks` in
1398 1398 # (4), the changelog file has already diverged from the content used
1399 1399 # for loading `changelog` in (1)
1400 1400 #
1401 1401 # To prevent the issue, we force the changelog to be explicitly
1402 1402 # reloaded while computing `_bookmarks`. The data race can still happen
1403 1403 # without the lock (with a narrower window), but it would no longer go
1404 1404 # undetected during the lock time refresh.
1405 1405 #
1406 1406 # The new schedule is as follow
1407 1407 #
1408 1408 # 1) filecache logic detect that `_bookmarks` needs to be computed
1409 1409 # 2) cachestat for `bookmarks` and `changelog` are captured (for book)
1410 1410 # 3) We force `changelog` filecache to be tested
1411 1411 # 4) cachestat for `changelog` are captured (for changelog)
1412 1412 # 5) `_bookmarks` is computed and cached
1413 1413 #
1414 1414 # The step in (3) ensure we have a changelog at least as recent as the
1415 1415 # cache stat computed in (1). As a result at locking time:
1416 1416 # * if the changelog did not changed since (1) -> we can reuse the data
1417 1417 # * otherwise -> the bookmarks get refreshed.
1418 1418 self._refreshchangelog()
1419 1419 return bookmarks.bmstore(self)
1420 1420
1421 1421 def _refreshchangelog(self):
1422 1422 """make sure the in memory changelog match the on-disk one"""
1423 1423 if 'changelog' in vars(self) and self.currenttransaction() is None:
1424 1424 del self.changelog
1425 1425
1426 1426 @property
1427 1427 def _activebookmark(self):
1428 1428 return self._bookmarks.active
1429 1429
1430 1430 # _phasesets depend on changelog. what we need is to call
1431 1431 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1432 1432 # can't be easily expressed in filecache mechanism.
1433 1433 @storecache(b'phaseroots', b'00changelog.i')
1434 1434 def _phasecache(self):
1435 1435 return phases.phasecache(self, self._phasedefaults)
1436 1436
1437 1437 @storecache(b'obsstore')
1438 1438 def obsstore(self):
1439 1439 return obsolete.makestore(self.ui, self)
1440 1440
1441 1441 @storecache(b'00changelog.i')
1442 1442 def changelog(self):
1443 1443 return self.store.changelog(txnutil.mayhavepending(self.root))
1444 1444
1445 1445 @storecache(b'00manifest.i')
1446 1446 def manifestlog(self):
1447 1447 return self.store.manifestlog(self, self._storenarrowmatch)
1448 1448
1449 1449 @repofilecache(b'dirstate')
1450 1450 def dirstate(self):
1451 1451 return self._makedirstate()
1452 1452
1453 1453 def _makedirstate(self):
1454 1454 """Extension point for wrapping the dirstate per-repo."""
1455 1455 sparsematchfn = lambda: sparse.matcher(self)
1456 1456
1457 1457 return dirstate.dirstate(
1458 1458 self.vfs, self.ui, self.root, self._dirstatevalidate, sparsematchfn
1459 1459 )
1460 1460
1461 1461 def _dirstatevalidate(self, node):
1462 1462 try:
1463 1463 self.changelog.rev(node)
1464 1464 return node
1465 1465 except error.LookupError:
1466 1466 if not self._dirstatevalidatewarned:
1467 1467 self._dirstatevalidatewarned = True
1468 1468 self.ui.warn(
1469 1469 _(b"warning: ignoring unknown working parent %s!\n")
1470 1470 % short(node)
1471 1471 )
1472 1472 return nullid
1473 1473
1474 1474 @storecache(narrowspec.FILENAME)
1475 1475 def narrowpats(self):
1476 1476 """matcher patterns for this repository's narrowspec
1477 1477
1478 1478 A tuple of (includes, excludes).
1479 1479 """
1480 1480 return narrowspec.load(self)
1481 1481
1482 1482 @storecache(narrowspec.FILENAME)
1483 1483 def _storenarrowmatch(self):
1484 1484 if repository.NARROW_REQUIREMENT not in self.requirements:
1485 1485 return matchmod.always()
1486 1486 include, exclude = self.narrowpats
1487 1487 return narrowspec.match(self.root, include=include, exclude=exclude)
1488 1488
1489 1489 @storecache(narrowspec.FILENAME)
1490 1490 def _narrowmatch(self):
1491 1491 if repository.NARROW_REQUIREMENT not in self.requirements:
1492 1492 return matchmod.always()
1493 1493 narrowspec.checkworkingcopynarrowspec(self)
1494 1494 include, exclude = self.narrowpats
1495 1495 return narrowspec.match(self.root, include=include, exclude=exclude)
1496 1496
1497 1497 def narrowmatch(self, match=None, includeexact=False):
1498 1498 """matcher corresponding the the repo's narrowspec
1499 1499
1500 1500 If `match` is given, then that will be intersected with the narrow
1501 1501 matcher.
1502 1502
1503 1503 If `includeexact` is True, then any exact matches from `match` will
1504 1504 be included even if they're outside the narrowspec.
1505 1505 """
1506 1506 if match:
1507 1507 if includeexact and not self._narrowmatch.always():
1508 1508 # do not exclude explicitly-specified paths so that they can
1509 1509 # be warned later on
1510 1510 em = matchmod.exact(match.files())
1511 1511 nm = matchmod.unionmatcher([self._narrowmatch, em])
1512 1512 return matchmod.intersectmatchers(match, nm)
1513 1513 return matchmod.intersectmatchers(match, self._narrowmatch)
1514 1514 return self._narrowmatch
1515 1515
1516 1516 def setnarrowpats(self, newincludes, newexcludes):
1517 1517 narrowspec.save(self, newincludes, newexcludes)
1518 1518 self.invalidate(clearfilecache=True)
1519 1519
1520 1520 @util.propertycache
1521 1521 def _quick_access_changeid(self):
1522 1522 """an helper dictionnary for __getitem__ calls
1523 1523
1524 1524 This contains a list of symbol we can recognise right away without
1525 1525 further processing.
1526 1526 """
1527 1527 return {
1528 1528 b'null': (nullrev, nullid),
1529 1529 nullrev: (nullrev, nullid),
1530 1530 nullid: (nullrev, nullid),
1531 1531 }
1532 1532
1533 1533 def __getitem__(self, changeid):
1534 1534 # dealing with special cases
1535 1535 if changeid is None:
1536 1536 return context.workingctx(self)
1537 1537 if isinstance(changeid, context.basectx):
1538 1538 return changeid
1539 1539
1540 1540 # dealing with multiple revisions
1541 1541 if isinstance(changeid, slice):
1542 1542 # wdirrev isn't contiguous so the slice shouldn't include it
1543 1543 return [
1544 1544 self[i]
1545 1545 for i in pycompat.xrange(*changeid.indices(len(self)))
1546 1546 if i not in self.changelog.filteredrevs
1547 1547 ]
1548 1548
1549 1549 # dealing with some special values
1550 1550 quick_access = self._quick_access_changeid.get(changeid)
1551 1551 if quick_access is not None:
1552 1552 rev, node = quick_access
1553 1553 return context.changectx(self, rev, node, maybe_filtered=False)
1554 1554 if changeid == b'tip':
1555 1555 node = self.changelog.tip()
1556 1556 rev = self.changelog.rev(node)
1557 1557 return context.changectx(self, rev, node)
1558 1558
1559 1559 # dealing with arbitrary values
1560 1560 try:
1561 1561 if isinstance(changeid, int):
1562 1562 node = self.changelog.node(changeid)
1563 1563 rev = changeid
1564 1564 elif changeid == b'.':
1565 1565 # this is a hack to delay/avoid loading obsmarkers
1566 1566 # when we know that '.' won't be hidden
1567 1567 node = self.dirstate.p1()
1568 1568 rev = self.unfiltered().changelog.rev(node)
1569 1569 elif len(changeid) == 20:
1570 1570 try:
1571 1571 node = changeid
1572 1572 rev = self.changelog.rev(changeid)
1573 1573 except error.FilteredLookupError:
1574 1574 changeid = hex(changeid) # for the error message
1575 1575 raise
1576 1576 except LookupError:
1577 1577 # check if it might have come from damaged dirstate
1578 1578 #
1579 1579 # XXX we could avoid the unfiltered if we had a recognizable
1580 1580 # exception for filtered changeset access
1581 1581 if (
1582 1582 self.local()
1583 1583 and changeid in self.unfiltered().dirstate.parents()
1584 1584 ):
1585 1585 msg = _(b"working directory has unknown parent '%s'!")
1586 1586 raise error.Abort(msg % short(changeid))
1587 1587 changeid = hex(changeid) # for the error message
1588 1588 raise
1589 1589
1590 1590 elif len(changeid) == 40:
1591 1591 node = bin(changeid)
1592 1592 rev = self.changelog.rev(node)
1593 1593 else:
1594 1594 raise error.ProgrammingError(
1595 1595 b"unsupported changeid '%s' of type %s"
1596 1596 % (changeid, pycompat.bytestr(type(changeid)))
1597 1597 )
1598 1598
1599 1599 return context.changectx(self, rev, node)
1600 1600
1601 1601 except (error.FilteredIndexError, error.FilteredLookupError):
1602 1602 raise error.FilteredRepoLookupError(
1603 1603 _(b"filtered revision '%s'") % pycompat.bytestr(changeid)
1604 1604 )
1605 1605 except (IndexError, LookupError):
1606 1606 raise error.RepoLookupError(
1607 1607 _(b"unknown revision '%s'") % pycompat.bytestr(changeid)
1608 1608 )
1609 1609 except error.WdirUnsupported:
1610 1610 return context.workingctx(self)
1611 1611
1612 1612 def __contains__(self, changeid):
1613 1613 """True if the given changeid exists
1614 1614
1615 1615 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1616 1616 specified.
1617 1617 """
1618 1618 try:
1619 1619 self[changeid]
1620 1620 return True
1621 1621 except error.RepoLookupError:
1622 1622 return False
1623 1623
1624 1624 def __nonzero__(self):
1625 1625 return True
1626 1626
1627 1627 __bool__ = __nonzero__
1628 1628
1629 1629 def __len__(self):
1630 1630 # no need to pay the cost of repoview.changelog
1631 1631 unfi = self.unfiltered()
1632 1632 return len(unfi.changelog)
1633 1633
1634 1634 def __iter__(self):
1635 1635 return iter(self.changelog)
1636 1636
1637 1637 def revs(self, expr, *args):
1638 1638 '''Find revisions matching a revset.
1639 1639
1640 1640 The revset is specified as a string ``expr`` that may contain
1641 1641 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1642 1642
1643 1643 Revset aliases from the configuration are not expanded. To expand
1644 1644 user aliases, consider calling ``scmutil.revrange()`` or
1645 1645 ``repo.anyrevs([expr], user=True)``.
1646 1646
1647 1647 Returns a revset.abstractsmartset, which is a list-like interface
1648 1648 that contains integer revisions.
1649 1649 '''
1650 1650 tree = revsetlang.spectree(expr, *args)
1651 1651 return revset.makematcher(tree)(self)
1652 1652
1653 1653 def set(self, expr, *args):
1654 1654 '''Find revisions matching a revset and emit changectx instances.
1655 1655
1656 1656 This is a convenience wrapper around ``revs()`` that iterates the
1657 1657 result and is a generator of changectx instances.
1658 1658
1659 1659 Revset aliases from the configuration are not expanded. To expand
1660 1660 user aliases, consider calling ``scmutil.revrange()``.
1661 1661 '''
1662 1662 for r in self.revs(expr, *args):
1663 1663 yield self[r]
1664 1664
1665 1665 def anyrevs(self, specs, user=False, localalias=None):
1666 1666 '''Find revisions matching one of the given revsets.
1667 1667
1668 1668 Revset aliases from the configuration are not expanded by default. To
1669 1669 expand user aliases, specify ``user=True``. To provide some local
1670 1670 definitions overriding user aliases, set ``localalias`` to
1671 1671 ``{name: definitionstring}``.
1672 1672 '''
1673 1673 if specs == [b'null']:
1674 1674 return revset.baseset([nullrev])
1675 1675 if user:
1676 1676 m = revset.matchany(
1677 1677 self.ui,
1678 1678 specs,
1679 1679 lookup=revset.lookupfn(self),
1680 1680 localalias=localalias,
1681 1681 )
1682 1682 else:
1683 1683 m = revset.matchany(None, specs, localalias=localalias)
1684 1684 return m(self)
1685 1685
1686 1686 def url(self):
1687 1687 return b'file:' + self.root
1688 1688
1689 1689 def hook(self, name, throw=False, **args):
1690 1690 """Call a hook, passing this repo instance.
1691 1691
1692 1692 This a convenience method to aid invoking hooks. Extensions likely
1693 1693 won't call this unless they have registered a custom hook or are
1694 1694 replacing code that is expected to call a hook.
1695 1695 """
1696 1696 return hook.hook(self.ui, self, name, throw, **args)
1697 1697
1698 1698 @filteredpropertycache
1699 1699 def _tagscache(self):
1700 1700 '''Returns a tagscache object that contains various tags related
1701 1701 caches.'''
1702 1702
1703 1703 # This simplifies its cache management by having one decorated
1704 1704 # function (this one) and the rest simply fetch things from it.
1705 1705 class tagscache(object):
1706 1706 def __init__(self):
1707 1707 # These two define the set of tags for this repository. tags
1708 1708 # maps tag name to node; tagtypes maps tag name to 'global' or
1709 1709 # 'local'. (Global tags are defined by .hgtags across all
1710 1710 # heads, and local tags are defined in .hg/localtags.)
1711 1711 # They constitute the in-memory cache of tags.
1712 1712 self.tags = self.tagtypes = None
1713 1713
1714 1714 self.nodetagscache = self.tagslist = None
1715 1715
1716 1716 cache = tagscache()
1717 1717 cache.tags, cache.tagtypes = self._findtags()
1718 1718
1719 1719 return cache
1720 1720
1721 1721 def tags(self):
1722 1722 '''return a mapping of tag to node'''
1723 1723 t = {}
1724 1724 if self.changelog.filteredrevs:
1725 1725 tags, tt = self._findtags()
1726 1726 else:
1727 1727 tags = self._tagscache.tags
1728 1728 rev = self.changelog.rev
1729 1729 for k, v in pycompat.iteritems(tags):
1730 1730 try:
1731 1731 # ignore tags to unknown nodes
1732 1732 rev(v)
1733 1733 t[k] = v
1734 1734 except (error.LookupError, ValueError):
1735 1735 pass
1736 1736 return t
1737 1737
1738 1738 def _findtags(self):
1739 1739 '''Do the hard work of finding tags. Return a pair of dicts
1740 1740 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1741 1741 maps tag name to a string like \'global\' or \'local\'.
1742 1742 Subclasses or extensions are free to add their own tags, but
1743 1743 should be aware that the returned dicts will be retained for the
1744 1744 duration of the localrepo object.'''
1745 1745
1746 1746 # XXX what tagtype should subclasses/extensions use? Currently
1747 1747 # mq and bookmarks add tags, but do not set the tagtype at all.
1748 1748 # Should each extension invent its own tag type? Should there
1749 1749 # be one tagtype for all such "virtual" tags? Or is the status
1750 1750 # quo fine?
1751 1751
1752 1752 # map tag name to (node, hist)
1753 1753 alltags = tagsmod.findglobaltags(self.ui, self)
1754 1754 # map tag name to tag type
1755 1755 tagtypes = dict((tag, b'global') for tag in alltags)
1756 1756
1757 1757 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1758 1758
1759 1759 # Build the return dicts. Have to re-encode tag names because
1760 1760 # the tags module always uses UTF-8 (in order not to lose info
1761 1761 # writing to the cache), but the rest of Mercurial wants them in
1762 1762 # local encoding.
1763 1763 tags = {}
1764 1764 for (name, (node, hist)) in pycompat.iteritems(alltags):
1765 1765 if node != nullid:
1766 1766 tags[encoding.tolocal(name)] = node
1767 1767 tags[b'tip'] = self.changelog.tip()
1768 1768 tagtypes = dict(
1769 1769 [
1770 1770 (encoding.tolocal(name), value)
1771 1771 for (name, value) in pycompat.iteritems(tagtypes)
1772 1772 ]
1773 1773 )
1774 1774 return (tags, tagtypes)
1775 1775
1776 1776 def tagtype(self, tagname):
1777 1777 '''
1778 1778 return the type of the given tag. result can be:
1779 1779
1780 1780 'local' : a local tag
1781 1781 'global' : a global tag
1782 1782 None : tag does not exist
1783 1783 '''
1784 1784
1785 1785 return self._tagscache.tagtypes.get(tagname)
1786 1786
1787 1787 def tagslist(self):
1788 1788 '''return a list of tags ordered by revision'''
1789 1789 if not self._tagscache.tagslist:
1790 1790 l = []
1791 1791 for t, n in pycompat.iteritems(self.tags()):
1792 1792 l.append((self.changelog.rev(n), t, n))
1793 1793 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1794 1794
1795 1795 return self._tagscache.tagslist
1796 1796
1797 1797 def nodetags(self, node):
1798 1798 '''return the tags associated with a node'''
1799 1799 if not self._tagscache.nodetagscache:
1800 1800 nodetagscache = {}
1801 1801 for t, n in pycompat.iteritems(self._tagscache.tags):
1802 1802 nodetagscache.setdefault(n, []).append(t)
1803 1803 for tags in pycompat.itervalues(nodetagscache):
1804 1804 tags.sort()
1805 1805 self._tagscache.nodetagscache = nodetagscache
1806 1806 return self._tagscache.nodetagscache.get(node, [])
1807 1807
1808 1808 def nodebookmarks(self, node):
1809 1809 """return the list of bookmarks pointing to the specified node"""
1810 1810 return self._bookmarks.names(node)
1811 1811
1812 1812 def branchmap(self):
1813 1813 '''returns a dictionary {branch: [branchheads]} with branchheads
1814 1814 ordered by increasing revision number'''
1815 1815 return self._branchcaches[self]
1816 1816
1817 1817 @unfilteredmethod
1818 1818 def revbranchcache(self):
1819 1819 if not self._revbranchcache:
1820 1820 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1821 1821 return self._revbranchcache
1822 1822
1823 1823 def branchtip(self, branch, ignoremissing=False):
1824 1824 '''return the tip node for a given branch
1825 1825
1826 1826 If ignoremissing is True, then this method will not raise an error.
1827 1827 This is helpful for callers that only expect None for a missing branch
1828 1828 (e.g. namespace).
1829 1829
1830 1830 '''
1831 1831 try:
1832 1832 return self.branchmap().branchtip(branch)
1833 1833 except KeyError:
1834 1834 if not ignoremissing:
1835 1835 raise error.RepoLookupError(_(b"unknown branch '%s'") % branch)
1836 1836 else:
1837 1837 pass
1838 1838
1839 1839 def lookup(self, key):
1840 1840 node = scmutil.revsymbol(self, key).node()
1841 1841 if node is None:
1842 1842 raise error.RepoLookupError(_(b"unknown revision '%s'") % key)
1843 1843 return node
1844 1844
1845 1845 def lookupbranch(self, key):
1846 1846 if self.branchmap().hasbranch(key):
1847 1847 return key
1848 1848
1849 1849 return scmutil.revsymbol(self, key).branch()
1850 1850
1851 1851 def known(self, nodes):
1852 1852 cl = self.changelog
1853 1853 get_rev = cl.index.get_rev
1854 1854 filtered = cl.filteredrevs
1855 1855 result = []
1856 1856 for n in nodes:
1857 1857 r = get_rev(n)
1858 1858 resp = not (r is None or r in filtered)
1859 1859 result.append(resp)
1860 1860 return result
1861 1861
1862 1862 def local(self):
1863 1863 return self
1864 1864
1865 1865 def publishing(self):
1866 1866 # it's safe (and desirable) to trust the publish flag unconditionally
1867 1867 # so that we don't finalize changes shared between users via ssh or nfs
1868 1868 return self.ui.configbool(b'phases', b'publish', untrusted=True)
1869 1869
1870 1870 def cancopy(self):
1871 1871 # so statichttprepo's override of local() works
1872 1872 if not self.local():
1873 1873 return False
1874 1874 if not self.publishing():
1875 1875 return True
1876 1876 # if publishing we can't copy if there is filtered content
1877 1877 return not self.filtered(b'visible').changelog.filteredrevs
1878 1878
1879 1879 def shared(self):
1880 1880 '''the type of shared repository (None if not shared)'''
1881 1881 if self.sharedpath != self.path:
1882 1882 return b'store'
1883 1883 return None
1884 1884
1885 1885 def wjoin(self, f, *insidef):
1886 1886 return self.vfs.reljoin(self.root, f, *insidef)
1887 1887
1888 1888 def setparents(self, p1, p2=nullid):
1889 1889 self[None].setparents(p1, p2)
1890 1890
1891 1891 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1892 1892 """changeid must be a changeset revision, if specified.
1893 1893 fileid can be a file revision or node."""
1894 1894 return context.filectx(
1895 1895 self, path, changeid, fileid, changectx=changectx
1896 1896 )
1897 1897
1898 1898 def getcwd(self):
1899 1899 return self.dirstate.getcwd()
1900 1900
1901 1901 def pathto(self, f, cwd=None):
1902 1902 return self.dirstate.pathto(f, cwd)
1903 1903
1904 1904 def _loadfilter(self, filter):
1905 1905 if filter not in self._filterpats:
1906 1906 l = []
1907 1907 for pat, cmd in self.ui.configitems(filter):
1908 1908 if cmd == b'!':
1909 1909 continue
1910 1910 mf = matchmod.match(self.root, b'', [pat])
1911 1911 fn = None
1912 1912 params = cmd
1913 1913 for name, filterfn in pycompat.iteritems(self._datafilters):
1914 1914 if cmd.startswith(name):
1915 1915 fn = filterfn
1916 1916 params = cmd[len(name) :].lstrip()
1917 1917 break
1918 1918 if not fn:
1919 1919 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1920 1920 fn.__name__ = 'commandfilter'
1921 1921 # Wrap old filters not supporting keyword arguments
1922 1922 if not pycompat.getargspec(fn)[2]:
1923 1923 oldfn = fn
1924 1924 fn = lambda s, c, oldfn=oldfn, **kwargs: oldfn(s, c)
1925 1925 fn.__name__ = 'compat-' + oldfn.__name__
1926 1926 l.append((mf, fn, params))
1927 1927 self._filterpats[filter] = l
1928 1928 return self._filterpats[filter]
1929 1929
1930 1930 def _filter(self, filterpats, filename, data):
1931 1931 for mf, fn, cmd in filterpats:
1932 1932 if mf(filename):
1933 1933 self.ui.debug(
1934 1934 b"filtering %s through %s\n"
1935 1935 % (filename, cmd or pycompat.sysbytes(fn.__name__))
1936 1936 )
1937 1937 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1938 1938 break
1939 1939
1940 1940 return data
1941 1941
1942 1942 @unfilteredpropertycache
1943 1943 def _encodefilterpats(self):
1944 1944 return self._loadfilter(b'encode')
1945 1945
1946 1946 @unfilteredpropertycache
1947 1947 def _decodefilterpats(self):
1948 1948 return self._loadfilter(b'decode')
1949 1949
1950 1950 def adddatafilter(self, name, filter):
1951 1951 self._datafilters[name] = filter
1952 1952
1953 1953 def wread(self, filename):
1954 1954 if self.wvfs.islink(filename):
1955 1955 data = self.wvfs.readlink(filename)
1956 1956 else:
1957 1957 data = self.wvfs.read(filename)
1958 1958 return self._filter(self._encodefilterpats, filename, data)
1959 1959
1960 1960 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1961 1961 """write ``data`` into ``filename`` in the working directory
1962 1962
1963 1963 This returns length of written (maybe decoded) data.
1964 1964 """
1965 1965 data = self._filter(self._decodefilterpats, filename, data)
1966 1966 if b'l' in flags:
1967 1967 self.wvfs.symlink(data, filename)
1968 1968 else:
1969 1969 self.wvfs.write(
1970 1970 filename, data, backgroundclose=backgroundclose, **kwargs
1971 1971 )
1972 1972 if b'x' in flags:
1973 1973 self.wvfs.setflags(filename, False, True)
1974 1974 else:
1975 1975 self.wvfs.setflags(filename, False, False)
1976 1976 return len(data)
1977 1977
1978 1978 def wwritedata(self, filename, data):
1979 1979 return self._filter(self._decodefilterpats, filename, data)
1980 1980
1981 1981 def currenttransaction(self):
1982 1982 """return the current transaction or None if non exists"""
1983 1983 if self._transref:
1984 1984 tr = self._transref()
1985 1985 else:
1986 1986 tr = None
1987 1987
1988 1988 if tr and tr.running():
1989 1989 return tr
1990 1990 return None
1991 1991
1992 1992 def transaction(self, desc, report=None):
1993 1993 if self.ui.configbool(b'devel', b'all-warnings') or self.ui.configbool(
1994 1994 b'devel', b'check-locks'
1995 1995 ):
1996 1996 if self._currentlock(self._lockref) is None:
1997 1997 raise error.ProgrammingError(b'transaction requires locking')
1998 1998 tr = self.currenttransaction()
1999 1999 if tr is not None:
2000 2000 return tr.nest(name=desc)
2001 2001
2002 2002 # abort here if the journal already exists
2003 2003 if self.svfs.exists(b"journal"):
2004 2004 raise error.RepoError(
2005 2005 _(b"abandoned transaction found"),
2006 2006 hint=_(b"run 'hg recover' to clean up transaction"),
2007 2007 )
2008 2008
2009 2009 idbase = b"%.40f#%f" % (random.random(), time.time())
2010 ha = hex(hashlib.sha1(idbase).digest())
2010 ha = hex(hashutil.sha1(idbase).digest())
2011 2011 txnid = b'TXN:' + ha
2012 2012 self.hook(b'pretxnopen', throw=True, txnname=desc, txnid=txnid)
2013 2013
2014 2014 self._writejournal(desc)
2015 2015 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
2016 2016 if report:
2017 2017 rp = report
2018 2018 else:
2019 2019 rp = self.ui.warn
2020 2020 vfsmap = {b'plain': self.vfs, b'store': self.svfs} # root of .hg/
2021 2021 # we must avoid cyclic reference between repo and transaction.
2022 2022 reporef = weakref.ref(self)
2023 2023 # Code to track tag movement
2024 2024 #
2025 2025 # Since tags are all handled as file content, it is actually quite hard
2026 2026 # to track these movement from a code perspective. So we fallback to a
2027 2027 # tracking at the repository level. One could envision to track changes
2028 2028 # to the '.hgtags' file through changegroup apply but that fails to
2029 2029 # cope with case where transaction expose new heads without changegroup
2030 2030 # being involved (eg: phase movement).
2031 2031 #
2032 2032 # For now, We gate the feature behind a flag since this likely comes
2033 2033 # with performance impacts. The current code run more often than needed
2034 2034 # and do not use caches as much as it could. The current focus is on
2035 2035 # the behavior of the feature so we disable it by default. The flag
2036 2036 # will be removed when we are happy with the performance impact.
2037 2037 #
2038 2038 # Once this feature is no longer experimental move the following
2039 2039 # documentation to the appropriate help section:
2040 2040 #
2041 2041 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
2042 2042 # tags (new or changed or deleted tags). In addition the details of
2043 2043 # these changes are made available in a file at:
2044 2044 # ``REPOROOT/.hg/changes/tags.changes``.
2045 2045 # Make sure you check for HG_TAG_MOVED before reading that file as it
2046 2046 # might exist from a previous transaction even if no tag were touched
2047 2047 # in this one. Changes are recorded in a line base format::
2048 2048 #
2049 2049 # <action> <hex-node> <tag-name>\n
2050 2050 #
2051 2051 # Actions are defined as follow:
2052 2052 # "-R": tag is removed,
2053 2053 # "+A": tag is added,
2054 2054 # "-M": tag is moved (old value),
2055 2055 # "+M": tag is moved (new value),
2056 2056 tracktags = lambda x: None
2057 2057 # experimental config: experimental.hook-track-tags
2058 2058 shouldtracktags = self.ui.configbool(
2059 2059 b'experimental', b'hook-track-tags'
2060 2060 )
2061 2061 if desc != b'strip' and shouldtracktags:
2062 2062 oldheads = self.changelog.headrevs()
2063 2063
2064 2064 def tracktags(tr2):
2065 2065 repo = reporef()
2066 2066 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
2067 2067 newheads = repo.changelog.headrevs()
2068 2068 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
2069 2069 # notes: we compare lists here.
2070 2070 # As we do it only once buiding set would not be cheaper
2071 2071 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
2072 2072 if changes:
2073 2073 tr2.hookargs[b'tag_moved'] = b'1'
2074 2074 with repo.vfs(
2075 2075 b'changes/tags.changes', b'w', atomictemp=True
2076 2076 ) as changesfile:
2077 2077 # note: we do not register the file to the transaction
2078 2078 # because we needs it to still exist on the transaction
2079 2079 # is close (for txnclose hooks)
2080 2080 tagsmod.writediff(changesfile, changes)
2081 2081
2082 2082 def validate(tr2):
2083 2083 """will run pre-closing hooks"""
2084 2084 # XXX the transaction API is a bit lacking here so we take a hacky
2085 2085 # path for now
2086 2086 #
2087 2087 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
2088 2088 # dict is copied before these run. In addition we needs the data
2089 2089 # available to in memory hooks too.
2090 2090 #
2091 2091 # Moreover, we also need to make sure this runs before txnclose
2092 2092 # hooks and there is no "pending" mechanism that would execute
2093 2093 # logic only if hooks are about to run.
2094 2094 #
2095 2095 # Fixing this limitation of the transaction is also needed to track
2096 2096 # other families of changes (bookmarks, phases, obsolescence).
2097 2097 #
2098 2098 # This will have to be fixed before we remove the experimental
2099 2099 # gating.
2100 2100 tracktags(tr2)
2101 2101 repo = reporef()
2102 2102
2103 2103 singleheadopt = (b'experimental', b'single-head-per-branch')
2104 2104 singlehead = repo.ui.configbool(*singleheadopt)
2105 2105 if singlehead:
2106 2106 singleheadsub = repo.ui.configsuboptions(*singleheadopt)[1]
2107 2107 accountclosed = singleheadsub.get(
2108 2108 b"account-closed-heads", False
2109 2109 )
2110 2110 scmutil.enforcesinglehead(repo, tr2, desc, accountclosed)
2111 2111 if hook.hashook(repo.ui, b'pretxnclose-bookmark'):
2112 2112 for name, (old, new) in sorted(
2113 2113 tr.changes[b'bookmarks'].items()
2114 2114 ):
2115 2115 args = tr.hookargs.copy()
2116 2116 args.update(bookmarks.preparehookargs(name, old, new))
2117 2117 repo.hook(
2118 2118 b'pretxnclose-bookmark',
2119 2119 throw=True,
2120 2120 **pycompat.strkwargs(args)
2121 2121 )
2122 2122 if hook.hashook(repo.ui, b'pretxnclose-phase'):
2123 2123 cl = repo.unfiltered().changelog
2124 2124 for rev, (old, new) in tr.changes[b'phases'].items():
2125 2125 args = tr.hookargs.copy()
2126 2126 node = hex(cl.node(rev))
2127 2127 args.update(phases.preparehookargs(node, old, new))
2128 2128 repo.hook(
2129 2129 b'pretxnclose-phase',
2130 2130 throw=True,
2131 2131 **pycompat.strkwargs(args)
2132 2132 )
2133 2133
2134 2134 repo.hook(
2135 2135 b'pretxnclose', throw=True, **pycompat.strkwargs(tr.hookargs)
2136 2136 )
2137 2137
2138 2138 def releasefn(tr, success):
2139 2139 repo = reporef()
2140 2140 if repo is None:
2141 2141 # If the repo has been GC'd (and this release function is being
2142 2142 # called from transaction.__del__), there's not much we can do,
2143 2143 # so just leave the unfinished transaction there and let the
2144 2144 # user run `hg recover`.
2145 2145 return
2146 2146 if success:
2147 2147 # this should be explicitly invoked here, because
2148 2148 # in-memory changes aren't written out at closing
2149 2149 # transaction, if tr.addfilegenerator (via
2150 2150 # dirstate.write or so) isn't invoked while
2151 2151 # transaction running
2152 2152 repo.dirstate.write(None)
2153 2153 else:
2154 2154 # discard all changes (including ones already written
2155 2155 # out) in this transaction
2156 2156 narrowspec.restorebackup(self, b'journal.narrowspec')
2157 2157 narrowspec.restorewcbackup(self, b'journal.narrowspec.dirstate')
2158 2158 repo.dirstate.restorebackup(None, b'journal.dirstate')
2159 2159
2160 2160 repo.invalidate(clearfilecache=True)
2161 2161
2162 2162 tr = transaction.transaction(
2163 2163 rp,
2164 2164 self.svfs,
2165 2165 vfsmap,
2166 2166 b"journal",
2167 2167 b"undo",
2168 2168 aftertrans(renames),
2169 2169 self.store.createmode,
2170 2170 validator=validate,
2171 2171 releasefn=releasefn,
2172 2172 checkambigfiles=_cachedfiles,
2173 2173 name=desc,
2174 2174 )
2175 2175 tr.changes[b'origrepolen'] = len(self)
2176 2176 tr.changes[b'obsmarkers'] = set()
2177 2177 tr.changes[b'phases'] = {}
2178 2178 tr.changes[b'bookmarks'] = {}
2179 2179
2180 2180 tr.hookargs[b'txnid'] = txnid
2181 2181 tr.hookargs[b'txnname'] = desc
2182 2182 # note: writing the fncache only during finalize mean that the file is
2183 2183 # outdated when running hooks. As fncache is used for streaming clone,
2184 2184 # this is not expected to break anything that happen during the hooks.
2185 2185 tr.addfinalize(b'flush-fncache', self.store.write)
2186 2186
2187 2187 def txnclosehook(tr2):
2188 2188 """To be run if transaction is successful, will schedule a hook run
2189 2189 """
2190 2190 # Don't reference tr2 in hook() so we don't hold a reference.
2191 2191 # This reduces memory consumption when there are multiple
2192 2192 # transactions per lock. This can likely go away if issue5045
2193 2193 # fixes the function accumulation.
2194 2194 hookargs = tr2.hookargs
2195 2195
2196 2196 def hookfunc(unused_success):
2197 2197 repo = reporef()
2198 2198 if hook.hashook(repo.ui, b'txnclose-bookmark'):
2199 2199 bmchanges = sorted(tr.changes[b'bookmarks'].items())
2200 2200 for name, (old, new) in bmchanges:
2201 2201 args = tr.hookargs.copy()
2202 2202 args.update(bookmarks.preparehookargs(name, old, new))
2203 2203 repo.hook(
2204 2204 b'txnclose-bookmark',
2205 2205 throw=False,
2206 2206 **pycompat.strkwargs(args)
2207 2207 )
2208 2208
2209 2209 if hook.hashook(repo.ui, b'txnclose-phase'):
2210 2210 cl = repo.unfiltered().changelog
2211 2211 phasemv = sorted(tr.changes[b'phases'].items())
2212 2212 for rev, (old, new) in phasemv:
2213 2213 args = tr.hookargs.copy()
2214 2214 node = hex(cl.node(rev))
2215 2215 args.update(phases.preparehookargs(node, old, new))
2216 2216 repo.hook(
2217 2217 b'txnclose-phase',
2218 2218 throw=False,
2219 2219 **pycompat.strkwargs(args)
2220 2220 )
2221 2221
2222 2222 repo.hook(
2223 2223 b'txnclose', throw=False, **pycompat.strkwargs(hookargs)
2224 2224 )
2225 2225
2226 2226 reporef()._afterlock(hookfunc)
2227 2227
2228 2228 tr.addfinalize(b'txnclose-hook', txnclosehook)
2229 2229 # Include a leading "-" to make it happen before the transaction summary
2230 2230 # reports registered via scmutil.registersummarycallback() whose names
2231 2231 # are 00-txnreport etc. That way, the caches will be warm when the
2232 2232 # callbacks run.
2233 2233 tr.addpostclose(b'-warm-cache', self._buildcacheupdater(tr))
2234 2234
2235 2235 def txnaborthook(tr2):
2236 2236 """To be run if transaction is aborted
2237 2237 """
2238 2238 reporef().hook(
2239 2239 b'txnabort', throw=False, **pycompat.strkwargs(tr2.hookargs)
2240 2240 )
2241 2241
2242 2242 tr.addabort(b'txnabort-hook', txnaborthook)
2243 2243 # avoid eager cache invalidation. in-memory data should be identical
2244 2244 # to stored data if transaction has no error.
2245 2245 tr.addpostclose(b'refresh-filecachestats', self._refreshfilecachestats)
2246 2246 self._transref = weakref.ref(tr)
2247 2247 scmutil.registersummarycallback(self, tr, desc)
2248 2248 return tr
2249 2249
2250 2250 def _journalfiles(self):
2251 2251 return (
2252 2252 (self.svfs, b'journal'),
2253 2253 (self.svfs, b'journal.narrowspec'),
2254 2254 (self.vfs, b'journal.narrowspec.dirstate'),
2255 2255 (self.vfs, b'journal.dirstate'),
2256 2256 (self.vfs, b'journal.branch'),
2257 2257 (self.vfs, b'journal.desc'),
2258 2258 (bookmarks.bookmarksvfs(self), b'journal.bookmarks'),
2259 2259 (self.svfs, b'journal.phaseroots'),
2260 2260 )
2261 2261
2262 2262 def undofiles(self):
2263 2263 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
2264 2264
2265 2265 @unfilteredmethod
2266 2266 def _writejournal(self, desc):
2267 2267 self.dirstate.savebackup(None, b'journal.dirstate')
2268 2268 narrowspec.savewcbackup(self, b'journal.narrowspec.dirstate')
2269 2269 narrowspec.savebackup(self, b'journal.narrowspec')
2270 2270 self.vfs.write(
2271 2271 b"journal.branch", encoding.fromlocal(self.dirstate.branch())
2272 2272 )
2273 2273 self.vfs.write(b"journal.desc", b"%d\n%s\n" % (len(self), desc))
2274 2274 bookmarksvfs = bookmarks.bookmarksvfs(self)
2275 2275 bookmarksvfs.write(
2276 2276 b"journal.bookmarks", bookmarksvfs.tryread(b"bookmarks")
2277 2277 )
2278 2278 self.svfs.write(b"journal.phaseroots", self.svfs.tryread(b"phaseroots"))
2279 2279
2280 2280 def recover(self):
2281 2281 with self.lock():
2282 2282 if self.svfs.exists(b"journal"):
2283 2283 self.ui.status(_(b"rolling back interrupted transaction\n"))
2284 2284 vfsmap = {
2285 2285 b'': self.svfs,
2286 2286 b'plain': self.vfs,
2287 2287 }
2288 2288 transaction.rollback(
2289 2289 self.svfs,
2290 2290 vfsmap,
2291 2291 b"journal",
2292 2292 self.ui.warn,
2293 2293 checkambigfiles=_cachedfiles,
2294 2294 )
2295 2295 self.invalidate()
2296 2296 return True
2297 2297 else:
2298 2298 self.ui.warn(_(b"no interrupted transaction available\n"))
2299 2299 return False
2300 2300
2301 2301 def rollback(self, dryrun=False, force=False):
2302 2302 wlock = lock = dsguard = None
2303 2303 try:
2304 2304 wlock = self.wlock()
2305 2305 lock = self.lock()
2306 2306 if self.svfs.exists(b"undo"):
2307 2307 dsguard = dirstateguard.dirstateguard(self, b'rollback')
2308 2308
2309 2309 return self._rollback(dryrun, force, dsguard)
2310 2310 else:
2311 2311 self.ui.warn(_(b"no rollback information available\n"))
2312 2312 return 1
2313 2313 finally:
2314 2314 release(dsguard, lock, wlock)
2315 2315
2316 2316 @unfilteredmethod # Until we get smarter cache management
2317 2317 def _rollback(self, dryrun, force, dsguard):
2318 2318 ui = self.ui
2319 2319 try:
2320 2320 args = self.vfs.read(b'undo.desc').splitlines()
2321 2321 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2322 2322 if len(args) >= 3:
2323 2323 detail = args[2]
2324 2324 oldtip = oldlen - 1
2325 2325
2326 2326 if detail and ui.verbose:
2327 2327 msg = _(
2328 2328 b'repository tip rolled back to revision %d'
2329 2329 b' (undo %s: %s)\n'
2330 2330 ) % (oldtip, desc, detail)
2331 2331 else:
2332 2332 msg = _(
2333 2333 b'repository tip rolled back to revision %d (undo %s)\n'
2334 2334 ) % (oldtip, desc)
2335 2335 except IOError:
2336 2336 msg = _(b'rolling back unknown transaction\n')
2337 2337 desc = None
2338 2338
2339 2339 if not force and self[b'.'] != self[b'tip'] and desc == b'commit':
2340 2340 raise error.Abort(
2341 2341 _(
2342 2342 b'rollback of last commit while not checked out '
2343 2343 b'may lose data'
2344 2344 ),
2345 2345 hint=_(b'use -f to force'),
2346 2346 )
2347 2347
2348 2348 ui.status(msg)
2349 2349 if dryrun:
2350 2350 return 0
2351 2351
2352 2352 parents = self.dirstate.parents()
2353 2353 self.destroying()
2354 2354 vfsmap = {b'plain': self.vfs, b'': self.svfs}
2355 2355 transaction.rollback(
2356 2356 self.svfs, vfsmap, b'undo', ui.warn, checkambigfiles=_cachedfiles
2357 2357 )
2358 2358 bookmarksvfs = bookmarks.bookmarksvfs(self)
2359 2359 if bookmarksvfs.exists(b'undo.bookmarks'):
2360 2360 bookmarksvfs.rename(
2361 2361 b'undo.bookmarks', b'bookmarks', checkambig=True
2362 2362 )
2363 2363 if self.svfs.exists(b'undo.phaseroots'):
2364 2364 self.svfs.rename(b'undo.phaseroots', b'phaseroots', checkambig=True)
2365 2365 self.invalidate()
2366 2366
2367 2367 has_node = self.changelog.index.has_node
2368 2368 parentgone = any(not has_node(p) for p in parents)
2369 2369 if parentgone:
2370 2370 # prevent dirstateguard from overwriting already restored one
2371 2371 dsguard.close()
2372 2372
2373 2373 narrowspec.restorebackup(self, b'undo.narrowspec')
2374 2374 narrowspec.restorewcbackup(self, b'undo.narrowspec.dirstate')
2375 2375 self.dirstate.restorebackup(None, b'undo.dirstate')
2376 2376 try:
2377 2377 branch = self.vfs.read(b'undo.branch')
2378 2378 self.dirstate.setbranch(encoding.tolocal(branch))
2379 2379 except IOError:
2380 2380 ui.warn(
2381 2381 _(
2382 2382 b'named branch could not be reset: '
2383 2383 b'current branch is still \'%s\'\n'
2384 2384 )
2385 2385 % self.dirstate.branch()
2386 2386 )
2387 2387
2388 2388 parents = tuple([p.rev() for p in self[None].parents()])
2389 2389 if len(parents) > 1:
2390 2390 ui.status(
2391 2391 _(
2392 2392 b'working directory now based on '
2393 2393 b'revisions %d and %d\n'
2394 2394 )
2395 2395 % parents
2396 2396 )
2397 2397 else:
2398 2398 ui.status(
2399 2399 _(b'working directory now based on revision %d\n') % parents
2400 2400 )
2401 2401 mergemod.mergestate.clean(self, self[b'.'].node())
2402 2402
2403 2403 # TODO: if we know which new heads may result from this rollback, pass
2404 2404 # them to destroy(), which will prevent the branchhead cache from being
2405 2405 # invalidated.
2406 2406 self.destroyed()
2407 2407 return 0
2408 2408
2409 2409 def _buildcacheupdater(self, newtransaction):
2410 2410 """called during transaction to build the callback updating cache
2411 2411
2412 2412 Lives on the repository to help extension who might want to augment
2413 2413 this logic. For this purpose, the created transaction is passed to the
2414 2414 method.
2415 2415 """
2416 2416 # we must avoid cyclic reference between repo and transaction.
2417 2417 reporef = weakref.ref(self)
2418 2418
2419 2419 def updater(tr):
2420 2420 repo = reporef()
2421 2421 repo.updatecaches(tr)
2422 2422
2423 2423 return updater
2424 2424
2425 2425 @unfilteredmethod
2426 2426 def updatecaches(self, tr=None, full=False):
2427 2427 """warm appropriate caches
2428 2428
2429 2429 If this function is called after a transaction closed. The transaction
2430 2430 will be available in the 'tr' argument. This can be used to selectively
2431 2431 update caches relevant to the changes in that transaction.
2432 2432
2433 2433 If 'full' is set, make sure all caches the function knows about have
2434 2434 up-to-date data. Even the ones usually loaded more lazily.
2435 2435 """
2436 2436 if tr is not None and tr.hookargs.get(b'source') == b'strip':
2437 2437 # During strip, many caches are invalid but
2438 2438 # later call to `destroyed` will refresh them.
2439 2439 return
2440 2440
2441 2441 if tr is None or tr.changes[b'origrepolen'] < len(self):
2442 2442 # accessing the 'ser ved' branchmap should refresh all the others,
2443 2443 self.ui.debug(b'updating the branch cache\n')
2444 2444 self.filtered(b'served').branchmap()
2445 2445 self.filtered(b'served.hidden').branchmap()
2446 2446
2447 2447 if full:
2448 2448 unfi = self.unfiltered()
2449 2449 rbc = unfi.revbranchcache()
2450 2450 for r in unfi.changelog:
2451 2451 rbc.branchinfo(r)
2452 2452 rbc.write()
2453 2453
2454 2454 # ensure the working copy parents are in the manifestfulltextcache
2455 2455 for ctx in self[b'.'].parents():
2456 2456 ctx.manifest() # accessing the manifest is enough
2457 2457
2458 2458 # accessing fnode cache warms the cache
2459 2459 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2460 2460 # accessing tags warm the cache
2461 2461 self.tags()
2462 2462 self.filtered(b'served').tags()
2463 2463
2464 2464 # The `full` arg is documented as updating even the lazily-loaded
2465 2465 # caches immediately, so we're forcing a write to cause these caches
2466 2466 # to be warmed up even if they haven't explicitly been requested
2467 2467 # yet (if they've never been used by hg, they won't ever have been
2468 2468 # written, even if they're a subset of another kind of cache that
2469 2469 # *has* been used).
2470 2470 for filt in repoview.filtertable.keys():
2471 2471 filtered = self.filtered(filt)
2472 2472 filtered.branchmap().write(filtered)
2473 2473
2474 2474 def invalidatecaches(self):
2475 2475
2476 2476 if '_tagscache' in vars(self):
2477 2477 # can't use delattr on proxy
2478 2478 del self.__dict__['_tagscache']
2479 2479
2480 2480 self._branchcaches.clear()
2481 2481 self.invalidatevolatilesets()
2482 2482 self._sparsesignaturecache.clear()
2483 2483
2484 2484 def invalidatevolatilesets(self):
2485 2485 self.filteredrevcache.clear()
2486 2486 obsolete.clearobscaches(self)
2487 2487
2488 2488 def invalidatedirstate(self):
2489 2489 '''Invalidates the dirstate, causing the next call to dirstate
2490 2490 to check if it was modified since the last time it was read,
2491 2491 rereading it if it has.
2492 2492
2493 2493 This is different to dirstate.invalidate() that it doesn't always
2494 2494 rereads the dirstate. Use dirstate.invalidate() if you want to
2495 2495 explicitly read the dirstate again (i.e. restoring it to a previous
2496 2496 known good state).'''
2497 2497 if hasunfilteredcache(self, 'dirstate'):
2498 2498 for k in self.dirstate._filecache:
2499 2499 try:
2500 2500 delattr(self.dirstate, k)
2501 2501 except AttributeError:
2502 2502 pass
2503 2503 delattr(self.unfiltered(), 'dirstate')
2504 2504
2505 2505 def invalidate(self, clearfilecache=False):
2506 2506 '''Invalidates both store and non-store parts other than dirstate
2507 2507
2508 2508 If a transaction is running, invalidation of store is omitted,
2509 2509 because discarding in-memory changes might cause inconsistency
2510 2510 (e.g. incomplete fncache causes unintentional failure, but
2511 2511 redundant one doesn't).
2512 2512 '''
2513 2513 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2514 2514 for k in list(self._filecache.keys()):
2515 2515 # dirstate is invalidated separately in invalidatedirstate()
2516 2516 if k == b'dirstate':
2517 2517 continue
2518 2518 if (
2519 2519 k == b'changelog'
2520 2520 and self.currenttransaction()
2521 2521 and self.changelog._delayed
2522 2522 ):
2523 2523 # The changelog object may store unwritten revisions. We don't
2524 2524 # want to lose them.
2525 2525 # TODO: Solve the problem instead of working around it.
2526 2526 continue
2527 2527
2528 2528 if clearfilecache:
2529 2529 del self._filecache[k]
2530 2530 try:
2531 2531 delattr(unfiltered, k)
2532 2532 except AttributeError:
2533 2533 pass
2534 2534 self.invalidatecaches()
2535 2535 if not self.currenttransaction():
2536 2536 # TODO: Changing contents of store outside transaction
2537 2537 # causes inconsistency. We should make in-memory store
2538 2538 # changes detectable, and abort if changed.
2539 2539 self.store.invalidatecaches()
2540 2540
2541 2541 def invalidateall(self):
2542 2542 '''Fully invalidates both store and non-store parts, causing the
2543 2543 subsequent operation to reread any outside changes.'''
2544 2544 # extension should hook this to invalidate its caches
2545 2545 self.invalidate()
2546 2546 self.invalidatedirstate()
2547 2547
2548 2548 @unfilteredmethod
2549 2549 def _refreshfilecachestats(self, tr):
2550 2550 """Reload stats of cached files so that they are flagged as valid"""
2551 2551 for k, ce in self._filecache.items():
2552 2552 k = pycompat.sysstr(k)
2553 2553 if k == 'dirstate' or k not in self.__dict__:
2554 2554 continue
2555 2555 ce.refresh()
2556 2556
2557 2557 def _lock(
2558 2558 self,
2559 2559 vfs,
2560 2560 lockname,
2561 2561 wait,
2562 2562 releasefn,
2563 2563 acquirefn,
2564 2564 desc,
2565 2565 inheritchecker=None,
2566 2566 parentenvvar=None,
2567 2567 ):
2568 2568 parentlock = None
2569 2569 # the contents of parentenvvar are used by the underlying lock to
2570 2570 # determine whether it can be inherited
2571 2571 if parentenvvar is not None:
2572 2572 parentlock = encoding.environ.get(parentenvvar)
2573 2573
2574 2574 timeout = 0
2575 2575 warntimeout = 0
2576 2576 if wait:
2577 2577 timeout = self.ui.configint(b"ui", b"timeout")
2578 2578 warntimeout = self.ui.configint(b"ui", b"timeout.warn")
2579 2579 # internal config: ui.signal-safe-lock
2580 2580 signalsafe = self.ui.configbool(b'ui', b'signal-safe-lock')
2581 2581
2582 2582 l = lockmod.trylock(
2583 2583 self.ui,
2584 2584 vfs,
2585 2585 lockname,
2586 2586 timeout,
2587 2587 warntimeout,
2588 2588 releasefn=releasefn,
2589 2589 acquirefn=acquirefn,
2590 2590 desc=desc,
2591 2591 inheritchecker=inheritchecker,
2592 2592 parentlock=parentlock,
2593 2593 signalsafe=signalsafe,
2594 2594 )
2595 2595 return l
2596 2596
2597 2597 def _afterlock(self, callback):
2598 2598 """add a callback to be run when the repository is fully unlocked
2599 2599
2600 2600 The callback will be executed when the outermost lock is released
2601 2601 (with wlock being higher level than 'lock')."""
2602 2602 for ref in (self._wlockref, self._lockref):
2603 2603 l = ref and ref()
2604 2604 if l and l.held:
2605 2605 l.postrelease.append(callback)
2606 2606 break
2607 2607 else: # no lock have been found.
2608 2608 callback(True)
2609 2609
2610 2610 def lock(self, wait=True):
2611 2611 '''Lock the repository store (.hg/store) and return a weak reference
2612 2612 to the lock. Use this before modifying the store (e.g. committing or
2613 2613 stripping). If you are opening a transaction, get a lock as well.)
2614 2614
2615 2615 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2616 2616 'wlock' first to avoid a dead-lock hazard.'''
2617 2617 l = self._currentlock(self._lockref)
2618 2618 if l is not None:
2619 2619 l.lock()
2620 2620 return l
2621 2621
2622 2622 l = self._lock(
2623 2623 vfs=self.svfs,
2624 2624 lockname=b"lock",
2625 2625 wait=wait,
2626 2626 releasefn=None,
2627 2627 acquirefn=self.invalidate,
2628 2628 desc=_(b'repository %s') % self.origroot,
2629 2629 )
2630 2630 self._lockref = weakref.ref(l)
2631 2631 return l
2632 2632
2633 2633 def _wlockchecktransaction(self):
2634 2634 if self.currenttransaction() is not None:
2635 2635 raise error.LockInheritanceContractViolation(
2636 2636 b'wlock cannot be inherited in the middle of a transaction'
2637 2637 )
2638 2638
2639 2639 def wlock(self, wait=True):
2640 2640 '''Lock the non-store parts of the repository (everything under
2641 2641 .hg except .hg/store) and return a weak reference to the lock.
2642 2642
2643 2643 Use this before modifying files in .hg.
2644 2644
2645 2645 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2646 2646 'wlock' first to avoid a dead-lock hazard.'''
2647 2647 l = self._wlockref and self._wlockref()
2648 2648 if l is not None and l.held:
2649 2649 l.lock()
2650 2650 return l
2651 2651
2652 2652 # We do not need to check for non-waiting lock acquisition. Such
2653 2653 # acquisition would not cause dead-lock as they would just fail.
2654 2654 if wait and (
2655 2655 self.ui.configbool(b'devel', b'all-warnings')
2656 2656 or self.ui.configbool(b'devel', b'check-locks')
2657 2657 ):
2658 2658 if self._currentlock(self._lockref) is not None:
2659 2659 self.ui.develwarn(b'"wlock" acquired after "lock"')
2660 2660
2661 2661 def unlock():
2662 2662 if self.dirstate.pendingparentchange():
2663 2663 self.dirstate.invalidate()
2664 2664 else:
2665 2665 self.dirstate.write(None)
2666 2666
2667 2667 self._filecache[b'dirstate'].refresh()
2668 2668
2669 2669 l = self._lock(
2670 2670 self.vfs,
2671 2671 b"wlock",
2672 2672 wait,
2673 2673 unlock,
2674 2674 self.invalidatedirstate,
2675 2675 _(b'working directory of %s') % self.origroot,
2676 2676 inheritchecker=self._wlockchecktransaction,
2677 2677 parentenvvar=b'HG_WLOCK_LOCKER',
2678 2678 )
2679 2679 self._wlockref = weakref.ref(l)
2680 2680 return l
2681 2681
2682 2682 def _currentlock(self, lockref):
2683 2683 """Returns the lock if it's held, or None if it's not."""
2684 2684 if lockref is None:
2685 2685 return None
2686 2686 l = lockref()
2687 2687 if l is None or not l.held:
2688 2688 return None
2689 2689 return l
2690 2690
2691 2691 def currentwlock(self):
2692 2692 """Returns the wlock if it's held, or None if it's not."""
2693 2693 return self._currentlock(self._wlockref)
2694 2694
2695 2695 def _filecommit(
2696 2696 self,
2697 2697 fctx,
2698 2698 manifest1,
2699 2699 manifest2,
2700 2700 linkrev,
2701 2701 tr,
2702 2702 changelist,
2703 2703 includecopymeta,
2704 2704 ):
2705 2705 """
2706 2706 commit an individual file as part of a larger transaction
2707 2707 """
2708 2708
2709 2709 fname = fctx.path()
2710 2710 fparent1 = manifest1.get(fname, nullid)
2711 2711 fparent2 = manifest2.get(fname, nullid)
2712 2712 if isinstance(fctx, context.filectx):
2713 2713 node = fctx.filenode()
2714 2714 if node in [fparent1, fparent2]:
2715 2715 self.ui.debug(b'reusing %s filelog entry\n' % fname)
2716 2716 if (
2717 2717 fparent1 != nullid
2718 2718 and manifest1.flags(fname) != fctx.flags()
2719 2719 ) or (
2720 2720 fparent2 != nullid
2721 2721 and manifest2.flags(fname) != fctx.flags()
2722 2722 ):
2723 2723 changelist.append(fname)
2724 2724 return node
2725 2725
2726 2726 flog = self.file(fname)
2727 2727 meta = {}
2728 2728 cfname = fctx.copysource()
2729 2729 if cfname and cfname != fname:
2730 2730 # Mark the new revision of this file as a copy of another
2731 2731 # file. This copy data will effectively act as a parent
2732 2732 # of this new revision. If this is a merge, the first
2733 2733 # parent will be the nullid (meaning "look up the copy data")
2734 2734 # and the second one will be the other parent. For example:
2735 2735 #
2736 2736 # 0 --- 1 --- 3 rev1 changes file foo
2737 2737 # \ / rev2 renames foo to bar and changes it
2738 2738 # \- 2 -/ rev3 should have bar with all changes and
2739 2739 # should record that bar descends from
2740 2740 # bar in rev2 and foo in rev1
2741 2741 #
2742 2742 # this allows this merge to succeed:
2743 2743 #
2744 2744 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2745 2745 # \ / merging rev3 and rev4 should use bar@rev2
2746 2746 # \- 2 --- 4 as the merge base
2747 2747 #
2748 2748
2749 2749 cnode = manifest1.get(cfname)
2750 2750 newfparent = fparent2
2751 2751
2752 2752 if manifest2: # branch merge
2753 2753 if fparent2 == nullid or cnode is None: # copied on remote side
2754 2754 if cfname in manifest2:
2755 2755 cnode = manifest2[cfname]
2756 2756 newfparent = fparent1
2757 2757
2758 2758 # Here, we used to search backwards through history to try to find
2759 2759 # where the file copy came from if the source of a copy was not in
2760 2760 # the parent directory. However, this doesn't actually make sense to
2761 2761 # do (what does a copy from something not in your working copy even
2762 2762 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2763 2763 # the user that copy information was dropped, so if they didn't
2764 2764 # expect this outcome it can be fixed, but this is the correct
2765 2765 # behavior in this circumstance.
2766 2766
2767 2767 if cnode:
2768 2768 self.ui.debug(
2769 2769 b" %s: copy %s:%s\n" % (fname, cfname, hex(cnode))
2770 2770 )
2771 2771 if includecopymeta:
2772 2772 meta[b"copy"] = cfname
2773 2773 meta[b"copyrev"] = hex(cnode)
2774 2774 fparent1, fparent2 = nullid, newfparent
2775 2775 else:
2776 2776 self.ui.warn(
2777 2777 _(
2778 2778 b"warning: can't find ancestor for '%s' "
2779 2779 b"copied from '%s'!\n"
2780 2780 )
2781 2781 % (fname, cfname)
2782 2782 )
2783 2783
2784 2784 elif fparent1 == nullid:
2785 2785 fparent1, fparent2 = fparent2, nullid
2786 2786 elif fparent2 != nullid:
2787 2787 # is one parent an ancestor of the other?
2788 2788 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2789 2789 if fparent1 in fparentancestors:
2790 2790 fparent1, fparent2 = fparent2, nullid
2791 2791 elif fparent2 in fparentancestors:
2792 2792 fparent2 = nullid
2793 2793
2794 2794 # is the file changed?
2795 2795 text = fctx.data()
2796 2796 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2797 2797 changelist.append(fname)
2798 2798 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2799 2799 # are just the flags changed during merge?
2800 2800 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2801 2801 changelist.append(fname)
2802 2802
2803 2803 return fparent1
2804 2804
2805 2805 def checkcommitpatterns(self, wctx, match, status, fail):
2806 2806 """check for commit arguments that aren't committable"""
2807 2807 if match.isexact() or match.prefix():
2808 2808 matched = set(status.modified + status.added + status.removed)
2809 2809
2810 2810 for f in match.files():
2811 2811 f = self.dirstate.normalize(f)
2812 2812 if f == b'.' or f in matched or f in wctx.substate:
2813 2813 continue
2814 2814 if f in status.deleted:
2815 2815 fail(f, _(b'file not found!'))
2816 2816 # Is it a directory that exists or used to exist?
2817 2817 if self.wvfs.isdir(f) or wctx.p1().hasdir(f):
2818 2818 d = f + b'/'
2819 2819 for mf in matched:
2820 2820 if mf.startswith(d):
2821 2821 break
2822 2822 else:
2823 2823 fail(f, _(b"no match under directory!"))
2824 2824 elif f not in self.dirstate:
2825 2825 fail(f, _(b"file not tracked!"))
2826 2826
2827 2827 @unfilteredmethod
2828 2828 def commit(
2829 2829 self,
2830 2830 text=b"",
2831 2831 user=None,
2832 2832 date=None,
2833 2833 match=None,
2834 2834 force=False,
2835 2835 editor=None,
2836 2836 extra=None,
2837 2837 ):
2838 2838 """Add a new revision to current repository.
2839 2839
2840 2840 Revision information is gathered from the working directory,
2841 2841 match can be used to filter the committed files. If editor is
2842 2842 supplied, it is called to get a commit message.
2843 2843 """
2844 2844 if extra is None:
2845 2845 extra = {}
2846 2846
2847 2847 def fail(f, msg):
2848 2848 raise error.Abort(b'%s: %s' % (f, msg))
2849 2849
2850 2850 if not match:
2851 2851 match = matchmod.always()
2852 2852
2853 2853 if not force:
2854 2854 match.bad = fail
2855 2855
2856 2856 # lock() for recent changelog (see issue4368)
2857 2857 with self.wlock(), self.lock():
2858 2858 wctx = self[None]
2859 2859 merge = len(wctx.parents()) > 1
2860 2860
2861 2861 if not force and merge and not match.always():
2862 2862 raise error.Abort(
2863 2863 _(
2864 2864 b'cannot partially commit a merge '
2865 2865 b'(do not specify files or patterns)'
2866 2866 )
2867 2867 )
2868 2868
2869 2869 status = self.status(match=match, clean=force)
2870 2870 if force:
2871 2871 status.modified.extend(
2872 2872 status.clean
2873 2873 ) # mq may commit clean files
2874 2874
2875 2875 # check subrepos
2876 2876 subs, commitsubs, newstate = subrepoutil.precommit(
2877 2877 self.ui, wctx, status, match, force=force
2878 2878 )
2879 2879
2880 2880 # make sure all explicit patterns are matched
2881 2881 if not force:
2882 2882 self.checkcommitpatterns(wctx, match, status, fail)
2883 2883
2884 2884 cctx = context.workingcommitctx(
2885 2885 self, status, text, user, date, extra
2886 2886 )
2887 2887
2888 2888 # internal config: ui.allowemptycommit
2889 2889 allowemptycommit = (
2890 2890 wctx.branch() != wctx.p1().branch()
2891 2891 or extra.get(b'close')
2892 2892 or merge
2893 2893 or cctx.files()
2894 2894 or self.ui.configbool(b'ui', b'allowemptycommit')
2895 2895 )
2896 2896 if not allowemptycommit:
2897 2897 return None
2898 2898
2899 2899 if merge and cctx.deleted():
2900 2900 raise error.Abort(_(b"cannot commit merge with missing files"))
2901 2901
2902 2902 ms = mergemod.mergestate.read(self)
2903 2903 mergeutil.checkunresolved(ms)
2904 2904
2905 2905 if editor:
2906 2906 cctx._text = editor(self, cctx, subs)
2907 2907 edited = text != cctx._text
2908 2908
2909 2909 # Save commit message in case this transaction gets rolled back
2910 2910 # (e.g. by a pretxncommit hook). Leave the content alone on
2911 2911 # the assumption that the user will use the same editor again.
2912 2912 msgfn = self.savecommitmessage(cctx._text)
2913 2913
2914 2914 # commit subs and write new state
2915 2915 if subs:
2916 2916 uipathfn = scmutil.getuipathfn(self)
2917 2917 for s in sorted(commitsubs):
2918 2918 sub = wctx.sub(s)
2919 2919 self.ui.status(
2920 2920 _(b'committing subrepository %s\n')
2921 2921 % uipathfn(subrepoutil.subrelpath(sub))
2922 2922 )
2923 2923 sr = sub.commit(cctx._text, user, date)
2924 2924 newstate[s] = (newstate[s][0], sr)
2925 2925 subrepoutil.writestate(self, newstate)
2926 2926
2927 2927 p1, p2 = self.dirstate.parents()
2928 2928 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
2929 2929 try:
2930 2930 self.hook(
2931 2931 b"precommit", throw=True, parent1=hookp1, parent2=hookp2
2932 2932 )
2933 2933 with self.transaction(b'commit'):
2934 2934 ret = self.commitctx(cctx, True)
2935 2935 # update bookmarks, dirstate and mergestate
2936 2936 bookmarks.update(self, [p1, p2], ret)
2937 2937 cctx.markcommitted(ret)
2938 2938 ms.reset()
2939 2939 except: # re-raises
2940 2940 if edited:
2941 2941 self.ui.write(
2942 2942 _(b'note: commit message saved in %s\n') % msgfn
2943 2943 )
2944 2944 raise
2945 2945
2946 2946 def commithook(unused_success):
2947 2947 # hack for command that use a temporary commit (eg: histedit)
2948 2948 # temporary commit got stripped before hook release
2949 2949 if self.changelog.hasnode(ret):
2950 2950 self.hook(
2951 2951 b"commit", node=hex(ret), parent1=hookp1, parent2=hookp2
2952 2952 )
2953 2953
2954 2954 self._afterlock(commithook)
2955 2955 return ret
2956 2956
2957 2957 @unfilteredmethod
2958 2958 def commitctx(self, ctx, error=False, origctx=None):
2959 2959 """Add a new revision to current repository.
2960 2960 Revision information is passed via the context argument.
2961 2961
2962 2962 ctx.files() should list all files involved in this commit, i.e.
2963 2963 modified/added/removed files. On merge, it may be wider than the
2964 2964 ctx.files() to be committed, since any file nodes derived directly
2965 2965 from p1 or p2 are excluded from the committed ctx.files().
2966 2966
2967 2967 origctx is for convert to work around the problem that bug
2968 2968 fixes to the files list in changesets change hashes. For
2969 2969 convert to be the identity, it can pass an origctx and this
2970 2970 function will use the same files list when it makes sense to
2971 2971 do so.
2972 2972 """
2973 2973
2974 2974 p1, p2 = ctx.p1(), ctx.p2()
2975 2975 user = ctx.user()
2976 2976
2977 2977 if self.filecopiesmode == b'changeset-sidedata':
2978 2978 writechangesetcopy = True
2979 2979 writefilecopymeta = True
2980 2980 writecopiesto = None
2981 2981 else:
2982 2982 writecopiesto = self.ui.config(b'experimental', b'copies.write-to')
2983 2983 writefilecopymeta = writecopiesto != b'changeset-only'
2984 2984 writechangesetcopy = writecopiesto in (
2985 2985 b'changeset-only',
2986 2986 b'compatibility',
2987 2987 )
2988 2988 p1copies, p2copies = None, None
2989 2989 if writechangesetcopy:
2990 2990 p1copies = ctx.p1copies()
2991 2991 p2copies = ctx.p2copies()
2992 2992 filesadded, filesremoved = None, None
2993 2993 with self.lock(), self.transaction(b"commit") as tr:
2994 2994 trp = weakref.proxy(tr)
2995 2995
2996 2996 if ctx.manifestnode():
2997 2997 # reuse an existing manifest revision
2998 2998 self.ui.debug(b'reusing known manifest\n')
2999 2999 mn = ctx.manifestnode()
3000 3000 files = ctx.files()
3001 3001 if writechangesetcopy:
3002 3002 filesadded = ctx.filesadded()
3003 3003 filesremoved = ctx.filesremoved()
3004 3004 elif ctx.files():
3005 3005 m1ctx = p1.manifestctx()
3006 3006 m2ctx = p2.manifestctx()
3007 3007 mctx = m1ctx.copy()
3008 3008
3009 3009 m = mctx.read()
3010 3010 m1 = m1ctx.read()
3011 3011 m2 = m2ctx.read()
3012 3012
3013 3013 # check in files
3014 3014 added = []
3015 3015 changed = []
3016 3016 removed = list(ctx.removed())
3017 3017 linkrev = len(self)
3018 3018 self.ui.note(_(b"committing files:\n"))
3019 3019 uipathfn = scmutil.getuipathfn(self)
3020 3020 for f in sorted(ctx.modified() + ctx.added()):
3021 3021 self.ui.note(uipathfn(f) + b"\n")
3022 3022 try:
3023 3023 fctx = ctx[f]
3024 3024 if fctx is None:
3025 3025 removed.append(f)
3026 3026 else:
3027 3027 added.append(f)
3028 3028 m[f] = self._filecommit(
3029 3029 fctx,
3030 3030 m1,
3031 3031 m2,
3032 3032 linkrev,
3033 3033 trp,
3034 3034 changed,
3035 3035 writefilecopymeta,
3036 3036 )
3037 3037 m.setflag(f, fctx.flags())
3038 3038 except OSError:
3039 3039 self.ui.warn(
3040 3040 _(b"trouble committing %s!\n") % uipathfn(f)
3041 3041 )
3042 3042 raise
3043 3043 except IOError as inst:
3044 3044 errcode = getattr(inst, 'errno', errno.ENOENT)
3045 3045 if error or errcode and errcode != errno.ENOENT:
3046 3046 self.ui.warn(
3047 3047 _(b"trouble committing %s!\n") % uipathfn(f)
3048 3048 )
3049 3049 raise
3050 3050
3051 3051 # update manifest
3052 3052 removed = [f for f in removed if f in m1 or f in m2]
3053 3053 drop = sorted([f for f in removed if f in m])
3054 3054 for f in drop:
3055 3055 del m[f]
3056 3056 if p2.rev() != nullrev:
3057 3057
3058 3058 @util.cachefunc
3059 3059 def mas():
3060 3060 p1n = p1.node()
3061 3061 p2n = p2.node()
3062 3062 cahs = self.changelog.commonancestorsheads(p1n, p2n)
3063 3063 if not cahs:
3064 3064 cahs = [nullrev]
3065 3065 return [self[r].manifest() for r in cahs]
3066 3066
3067 3067 def deletionfromparent(f):
3068 3068 # When a file is removed relative to p1 in a merge, this
3069 3069 # function determines whether the absence is due to a
3070 3070 # deletion from a parent, or whether the merge commit
3071 3071 # itself deletes the file. We decide this by doing a
3072 3072 # simplified three way merge of the manifest entry for
3073 3073 # the file. There are two ways we decide the merge
3074 3074 # itself didn't delete a file:
3075 3075 # - neither parent (nor the merge) contain the file
3076 3076 # - exactly one parent contains the file, and that
3077 3077 # parent has the same filelog entry as the merge
3078 3078 # ancestor (or all of them if there two). In other
3079 3079 # words, that parent left the file unchanged while the
3080 3080 # other one deleted it.
3081 3081 # One way to think about this is that deleting a file is
3082 3082 # similar to emptying it, so the list of changed files
3083 3083 # should be similar either way. The computation
3084 3084 # described above is not done directly in _filecommit
3085 3085 # when creating the list of changed files, however
3086 3086 # it does something very similar by comparing filelog
3087 3087 # nodes.
3088 3088 if f in m1:
3089 3089 return f not in m2 and all(
3090 3090 f in ma and ma.find(f) == m1.find(f)
3091 3091 for ma in mas()
3092 3092 )
3093 3093 elif f in m2:
3094 3094 return all(
3095 3095 f in ma and ma.find(f) == m2.find(f)
3096 3096 for ma in mas()
3097 3097 )
3098 3098 else:
3099 3099 return True
3100 3100
3101 3101 removed = [f for f in removed if not deletionfromparent(f)]
3102 3102
3103 3103 files = changed + removed
3104 3104 md = None
3105 3105 if not files:
3106 3106 # if no "files" actually changed in terms of the changelog,
3107 3107 # try hard to detect unmodified manifest entry so that the
3108 3108 # exact same commit can be reproduced later on convert.
3109 3109 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
3110 3110 if not files and md:
3111 3111 self.ui.debug(
3112 3112 b'not reusing manifest (no file change in '
3113 3113 b'changelog, but manifest differs)\n'
3114 3114 )
3115 3115 if files or md:
3116 3116 self.ui.note(_(b"committing manifest\n"))
3117 3117 # we're using narrowmatch here since it's already applied at
3118 3118 # other stages (such as dirstate.walk), so we're already
3119 3119 # ignoring things outside of narrowspec in most cases. The
3120 3120 # one case where we might have files outside the narrowspec
3121 3121 # at this point is merges, and we already error out in the
3122 3122 # case where the merge has files outside of the narrowspec,
3123 3123 # so this is safe.
3124 3124 mn = mctx.write(
3125 3125 trp,
3126 3126 linkrev,
3127 3127 p1.manifestnode(),
3128 3128 p2.manifestnode(),
3129 3129 added,
3130 3130 drop,
3131 3131 match=self.narrowmatch(),
3132 3132 )
3133 3133
3134 3134 if writechangesetcopy:
3135 3135 filesadded = [
3136 3136 f for f in changed if not (f in m1 or f in m2)
3137 3137 ]
3138 3138 filesremoved = removed
3139 3139 else:
3140 3140 self.ui.debug(
3141 3141 b'reusing manifest from p1 (listed files '
3142 3142 b'actually unchanged)\n'
3143 3143 )
3144 3144 mn = p1.manifestnode()
3145 3145 else:
3146 3146 self.ui.debug(b'reusing manifest from p1 (no file change)\n')
3147 3147 mn = p1.manifestnode()
3148 3148 files = []
3149 3149
3150 3150 if writecopiesto == b'changeset-only':
3151 3151 # If writing only to changeset extras, use None to indicate that
3152 3152 # no entry should be written. If writing to both, write an empty
3153 3153 # entry to prevent the reader from falling back to reading
3154 3154 # filelogs.
3155 3155 p1copies = p1copies or None
3156 3156 p2copies = p2copies or None
3157 3157 filesadded = filesadded or None
3158 3158 filesremoved = filesremoved or None
3159 3159
3160 3160 if origctx and origctx.manifestnode() == mn:
3161 3161 files = origctx.files()
3162 3162
3163 3163 # update changelog
3164 3164 self.ui.note(_(b"committing changelog\n"))
3165 3165 self.changelog.delayupdate(tr)
3166 3166 n = self.changelog.add(
3167 3167 mn,
3168 3168 files,
3169 3169 ctx.description(),
3170 3170 trp,
3171 3171 p1.node(),
3172 3172 p2.node(),
3173 3173 user,
3174 3174 ctx.date(),
3175 3175 ctx.extra().copy(),
3176 3176 p1copies,
3177 3177 p2copies,
3178 3178 filesadded,
3179 3179 filesremoved,
3180 3180 )
3181 3181 xp1, xp2 = p1.hex(), p2 and p2.hex() or b''
3182 3182 self.hook(
3183 3183 b'pretxncommit',
3184 3184 throw=True,
3185 3185 node=hex(n),
3186 3186 parent1=xp1,
3187 3187 parent2=xp2,
3188 3188 )
3189 3189 # set the new commit is proper phase
3190 3190 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
3191 3191 if targetphase:
3192 3192 # retract boundary do not alter parent changeset.
3193 3193 # if a parent have higher the resulting phase will
3194 3194 # be compliant anyway
3195 3195 #
3196 3196 # if minimal phase was 0 we don't need to retract anything
3197 3197 phases.registernew(self, tr, targetphase, [n])
3198 3198 return n
3199 3199
3200 3200 @unfilteredmethod
3201 3201 def destroying(self):
3202 3202 '''Inform the repository that nodes are about to be destroyed.
3203 3203 Intended for use by strip and rollback, so there's a common
3204 3204 place for anything that has to be done before destroying history.
3205 3205
3206 3206 This is mostly useful for saving state that is in memory and waiting
3207 3207 to be flushed when the current lock is released. Because a call to
3208 3208 destroyed is imminent, the repo will be invalidated causing those
3209 3209 changes to stay in memory (waiting for the next unlock), or vanish
3210 3210 completely.
3211 3211 '''
3212 3212 # When using the same lock to commit and strip, the phasecache is left
3213 3213 # dirty after committing. Then when we strip, the repo is invalidated,
3214 3214 # causing those changes to disappear.
3215 3215 if '_phasecache' in vars(self):
3216 3216 self._phasecache.write()
3217 3217
3218 3218 @unfilteredmethod
3219 3219 def destroyed(self):
3220 3220 '''Inform the repository that nodes have been destroyed.
3221 3221 Intended for use by strip and rollback, so there's a common
3222 3222 place for anything that has to be done after destroying history.
3223 3223 '''
3224 3224 # When one tries to:
3225 3225 # 1) destroy nodes thus calling this method (e.g. strip)
3226 3226 # 2) use phasecache somewhere (e.g. commit)
3227 3227 #
3228 3228 # then 2) will fail because the phasecache contains nodes that were
3229 3229 # removed. We can either remove phasecache from the filecache,
3230 3230 # causing it to reload next time it is accessed, or simply filter
3231 3231 # the removed nodes now and write the updated cache.
3232 3232 self._phasecache.filterunknown(self)
3233 3233 self._phasecache.write()
3234 3234
3235 3235 # refresh all repository caches
3236 3236 self.updatecaches()
3237 3237
3238 3238 # Ensure the persistent tag cache is updated. Doing it now
3239 3239 # means that the tag cache only has to worry about destroyed
3240 3240 # heads immediately after a strip/rollback. That in turn
3241 3241 # guarantees that "cachetip == currenttip" (comparing both rev
3242 3242 # and node) always means no nodes have been added or destroyed.
3243 3243
3244 3244 # XXX this is suboptimal when qrefresh'ing: we strip the current
3245 3245 # head, refresh the tag cache, then immediately add a new head.
3246 3246 # But I think doing it this way is necessary for the "instant
3247 3247 # tag cache retrieval" case to work.
3248 3248 self.invalidate()
3249 3249
3250 3250 def status(
3251 3251 self,
3252 3252 node1=b'.',
3253 3253 node2=None,
3254 3254 match=None,
3255 3255 ignored=False,
3256 3256 clean=False,
3257 3257 unknown=False,
3258 3258 listsubrepos=False,
3259 3259 ):
3260 3260 '''a convenience method that calls node1.status(node2)'''
3261 3261 return self[node1].status(
3262 3262 node2, match, ignored, clean, unknown, listsubrepos
3263 3263 )
3264 3264
3265 3265 def addpostdsstatus(self, ps):
3266 3266 """Add a callback to run within the wlock, at the point at which status
3267 3267 fixups happen.
3268 3268
3269 3269 On status completion, callback(wctx, status) will be called with the
3270 3270 wlock held, unless the dirstate has changed from underneath or the wlock
3271 3271 couldn't be grabbed.
3272 3272
3273 3273 Callbacks should not capture and use a cached copy of the dirstate --
3274 3274 it might change in the meanwhile. Instead, they should access the
3275 3275 dirstate via wctx.repo().dirstate.
3276 3276
3277 3277 This list is emptied out after each status run -- extensions should
3278 3278 make sure it adds to this list each time dirstate.status is called.
3279 3279 Extensions should also make sure they don't call this for statuses
3280 3280 that don't involve the dirstate.
3281 3281 """
3282 3282
3283 3283 # The list is located here for uniqueness reasons -- it is actually
3284 3284 # managed by the workingctx, but that isn't unique per-repo.
3285 3285 self._postdsstatus.append(ps)
3286 3286
3287 3287 def postdsstatus(self):
3288 3288 """Used by workingctx to get the list of post-dirstate-status hooks."""
3289 3289 return self._postdsstatus
3290 3290
3291 3291 def clearpostdsstatus(self):
3292 3292 """Used by workingctx to clear post-dirstate-status hooks."""
3293 3293 del self._postdsstatus[:]
3294 3294
3295 3295 def heads(self, start=None):
3296 3296 if start is None:
3297 3297 cl = self.changelog
3298 3298 headrevs = reversed(cl.headrevs())
3299 3299 return [cl.node(rev) for rev in headrevs]
3300 3300
3301 3301 heads = self.changelog.heads(start)
3302 3302 # sort the output in rev descending order
3303 3303 return sorted(heads, key=self.changelog.rev, reverse=True)
3304 3304
3305 3305 def branchheads(self, branch=None, start=None, closed=False):
3306 3306 '''return a (possibly filtered) list of heads for the given branch
3307 3307
3308 3308 Heads are returned in topological order, from newest to oldest.
3309 3309 If branch is None, use the dirstate branch.
3310 3310 If start is not None, return only heads reachable from start.
3311 3311 If closed is True, return heads that are marked as closed as well.
3312 3312 '''
3313 3313 if branch is None:
3314 3314 branch = self[None].branch()
3315 3315 branches = self.branchmap()
3316 3316 if not branches.hasbranch(branch):
3317 3317 return []
3318 3318 # the cache returns heads ordered lowest to highest
3319 3319 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
3320 3320 if start is not None:
3321 3321 # filter out the heads that cannot be reached from startrev
3322 3322 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
3323 3323 bheads = [h for h in bheads if h in fbheads]
3324 3324 return bheads
3325 3325
3326 3326 def branches(self, nodes):
3327 3327 if not nodes:
3328 3328 nodes = [self.changelog.tip()]
3329 3329 b = []
3330 3330 for n in nodes:
3331 3331 t = n
3332 3332 while True:
3333 3333 p = self.changelog.parents(n)
3334 3334 if p[1] != nullid or p[0] == nullid:
3335 3335 b.append((t, n, p[0], p[1]))
3336 3336 break
3337 3337 n = p[0]
3338 3338 return b
3339 3339
3340 3340 def between(self, pairs):
3341 3341 r = []
3342 3342
3343 3343 for top, bottom in pairs:
3344 3344 n, l, i = top, [], 0
3345 3345 f = 1
3346 3346
3347 3347 while n != bottom and n != nullid:
3348 3348 p = self.changelog.parents(n)[0]
3349 3349 if i == f:
3350 3350 l.append(n)
3351 3351 f = f * 2
3352 3352 n = p
3353 3353 i += 1
3354 3354
3355 3355 r.append(l)
3356 3356
3357 3357 return r
3358 3358
3359 3359 def checkpush(self, pushop):
3360 3360 """Extensions can override this function if additional checks have
3361 3361 to be performed before pushing, or call it if they override push
3362 3362 command.
3363 3363 """
3364 3364
3365 3365 @unfilteredpropertycache
3366 3366 def prepushoutgoinghooks(self):
3367 3367 """Return util.hooks consists of a pushop with repo, remote, outgoing
3368 3368 methods, which are called before pushing changesets.
3369 3369 """
3370 3370 return util.hooks()
3371 3371
3372 3372 def pushkey(self, namespace, key, old, new):
3373 3373 try:
3374 3374 tr = self.currenttransaction()
3375 3375 hookargs = {}
3376 3376 if tr is not None:
3377 3377 hookargs.update(tr.hookargs)
3378 3378 hookargs = pycompat.strkwargs(hookargs)
3379 3379 hookargs['namespace'] = namespace
3380 3380 hookargs['key'] = key
3381 3381 hookargs['old'] = old
3382 3382 hookargs['new'] = new
3383 3383 self.hook(b'prepushkey', throw=True, **hookargs)
3384 3384 except error.HookAbort as exc:
3385 3385 self.ui.write_err(_(b"pushkey-abort: %s\n") % exc)
3386 3386 if exc.hint:
3387 3387 self.ui.write_err(_(b"(%s)\n") % exc.hint)
3388 3388 return False
3389 3389 self.ui.debug(b'pushing key for "%s:%s"\n' % (namespace, key))
3390 3390 ret = pushkey.push(self, namespace, key, old, new)
3391 3391
3392 3392 def runhook(unused_success):
3393 3393 self.hook(
3394 3394 b'pushkey',
3395 3395 namespace=namespace,
3396 3396 key=key,
3397 3397 old=old,
3398 3398 new=new,
3399 3399 ret=ret,
3400 3400 )
3401 3401
3402 3402 self._afterlock(runhook)
3403 3403 return ret
3404 3404
3405 3405 def listkeys(self, namespace):
3406 3406 self.hook(b'prelistkeys', throw=True, namespace=namespace)
3407 3407 self.ui.debug(b'listing keys for "%s"\n' % namespace)
3408 3408 values = pushkey.list(self, namespace)
3409 3409 self.hook(b'listkeys', namespace=namespace, values=values)
3410 3410 return values
3411 3411
3412 3412 def debugwireargs(self, one, two, three=None, four=None, five=None):
3413 3413 '''used to test argument passing over the wire'''
3414 3414 return b"%s %s %s %s %s" % (
3415 3415 one,
3416 3416 two,
3417 3417 pycompat.bytestr(three),
3418 3418 pycompat.bytestr(four),
3419 3419 pycompat.bytestr(five),
3420 3420 )
3421 3421
3422 3422 def savecommitmessage(self, text):
3423 3423 fp = self.vfs(b'last-message.txt', b'wb')
3424 3424 try:
3425 3425 fp.write(text)
3426 3426 finally:
3427 3427 fp.close()
3428 3428 return self.pathto(fp.name[len(self.root) + 1 :])
3429 3429
3430 3430
3431 3431 # used to avoid circular references so destructors work
3432 3432 def aftertrans(files):
3433 3433 renamefiles = [tuple(t) for t in files]
3434 3434
3435 3435 def a():
3436 3436 for vfs, src, dest in renamefiles:
3437 3437 # if src and dest refer to a same file, vfs.rename is a no-op,
3438 3438 # leaving both src and dest on disk. delete dest to make sure
3439 3439 # the rename couldn't be such a no-op.
3440 3440 vfs.tryunlink(dest)
3441 3441 try:
3442 3442 vfs.rename(src, dest)
3443 3443 except OSError: # journal file does not yet exist
3444 3444 pass
3445 3445
3446 3446 return a
3447 3447
3448 3448
3449 3449 def undoname(fn):
3450 3450 base, name = os.path.split(fn)
3451 3451 assert name.startswith(b'journal')
3452 3452 return os.path.join(base, name.replace(b'journal', b'undo', 1))
3453 3453
3454 3454
3455 3455 def instance(ui, path, create, intents=None, createopts=None):
3456 3456 localpath = util.urllocalpath(path)
3457 3457 if create:
3458 3458 createrepository(ui, localpath, createopts=createopts)
3459 3459
3460 3460 return makelocalrepository(ui, localpath, intents=intents)
3461 3461
3462 3462
3463 3463 def islocal(path):
3464 3464 return True
3465 3465
3466 3466
3467 3467 def defaultcreateopts(ui, createopts=None):
3468 3468 """Populate the default creation options for a repository.
3469 3469
3470 3470 A dictionary of explicitly requested creation options can be passed
3471 3471 in. Missing keys will be populated.
3472 3472 """
3473 3473 createopts = dict(createopts or {})
3474 3474
3475 3475 if b'backend' not in createopts:
3476 3476 # experimental config: storage.new-repo-backend
3477 3477 createopts[b'backend'] = ui.config(b'storage', b'new-repo-backend')
3478 3478
3479 3479 return createopts
3480 3480
3481 3481
3482 3482 def newreporequirements(ui, createopts):
3483 3483 """Determine the set of requirements for a new local repository.
3484 3484
3485 3485 Extensions can wrap this function to specify custom requirements for
3486 3486 new repositories.
3487 3487 """
3488 3488 # If the repo is being created from a shared repository, we copy
3489 3489 # its requirements.
3490 3490 if b'sharedrepo' in createopts:
3491 3491 requirements = set(createopts[b'sharedrepo'].requirements)
3492 3492 if createopts.get(b'sharedrelative'):
3493 3493 requirements.add(b'relshared')
3494 3494 else:
3495 3495 requirements.add(b'shared')
3496 3496
3497 3497 return requirements
3498 3498
3499 3499 if b'backend' not in createopts:
3500 3500 raise error.ProgrammingError(
3501 3501 b'backend key not present in createopts; '
3502 3502 b'was defaultcreateopts() called?'
3503 3503 )
3504 3504
3505 3505 if createopts[b'backend'] != b'revlogv1':
3506 3506 raise error.Abort(
3507 3507 _(
3508 3508 b'unable to determine repository requirements for '
3509 3509 b'storage backend: %s'
3510 3510 )
3511 3511 % createopts[b'backend']
3512 3512 )
3513 3513
3514 3514 requirements = {b'revlogv1'}
3515 3515 if ui.configbool(b'format', b'usestore'):
3516 3516 requirements.add(b'store')
3517 3517 if ui.configbool(b'format', b'usefncache'):
3518 3518 requirements.add(b'fncache')
3519 3519 if ui.configbool(b'format', b'dotencode'):
3520 3520 requirements.add(b'dotencode')
3521 3521
3522 3522 compengine = ui.config(b'format', b'revlog-compression')
3523 3523 if compengine not in util.compengines:
3524 3524 raise error.Abort(
3525 3525 _(
3526 3526 b'compression engine %s defined by '
3527 3527 b'format.revlog-compression not available'
3528 3528 )
3529 3529 % compengine,
3530 3530 hint=_(
3531 3531 b'run "hg debuginstall" to list available '
3532 3532 b'compression engines'
3533 3533 ),
3534 3534 )
3535 3535
3536 3536 # zlib is the historical default and doesn't need an explicit requirement.
3537 3537 elif compengine == b'zstd':
3538 3538 requirements.add(b'revlog-compression-zstd')
3539 3539 elif compengine != b'zlib':
3540 3540 requirements.add(b'exp-compression-%s' % compengine)
3541 3541
3542 3542 if scmutil.gdinitconfig(ui):
3543 3543 requirements.add(b'generaldelta')
3544 3544 if ui.configbool(b'format', b'sparse-revlog'):
3545 3545 requirements.add(SPARSEREVLOG_REQUIREMENT)
3546 3546
3547 3547 # experimental config: format.exp-use-side-data
3548 3548 if ui.configbool(b'format', b'exp-use-side-data'):
3549 3549 requirements.add(SIDEDATA_REQUIREMENT)
3550 3550 # experimental config: format.exp-use-copies-side-data-changeset
3551 3551 if ui.configbool(b'format', b'exp-use-copies-side-data-changeset'):
3552 3552 requirements.add(SIDEDATA_REQUIREMENT)
3553 3553 requirements.add(COPIESSDC_REQUIREMENT)
3554 3554 if ui.configbool(b'experimental', b'treemanifest'):
3555 3555 requirements.add(b'treemanifest')
3556 3556
3557 3557 revlogv2 = ui.config(b'experimental', b'revlogv2')
3558 3558 if revlogv2 == b'enable-unstable-format-and-corrupt-my-data':
3559 3559 requirements.remove(b'revlogv1')
3560 3560 # generaldelta is implied by revlogv2.
3561 3561 requirements.discard(b'generaldelta')
3562 3562 requirements.add(REVLOGV2_REQUIREMENT)
3563 3563 # experimental config: format.internal-phase
3564 3564 if ui.configbool(b'format', b'internal-phase'):
3565 3565 requirements.add(b'internal-phase')
3566 3566
3567 3567 if createopts.get(b'narrowfiles'):
3568 3568 requirements.add(repository.NARROW_REQUIREMENT)
3569 3569
3570 3570 if createopts.get(b'lfs'):
3571 3571 requirements.add(b'lfs')
3572 3572
3573 3573 if ui.configbool(b'format', b'bookmarks-in-store'):
3574 3574 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3575 3575
3576 3576 return requirements
3577 3577
3578 3578
3579 3579 def filterknowncreateopts(ui, createopts):
3580 3580 """Filters a dict of repo creation options against options that are known.
3581 3581
3582 3582 Receives a dict of repo creation options and returns a dict of those
3583 3583 options that we don't know how to handle.
3584 3584
3585 3585 This function is called as part of repository creation. If the
3586 3586 returned dict contains any items, repository creation will not
3587 3587 be allowed, as it means there was a request to create a repository
3588 3588 with options not recognized by loaded code.
3589 3589
3590 3590 Extensions can wrap this function to filter out creation options
3591 3591 they know how to handle.
3592 3592 """
3593 3593 known = {
3594 3594 b'backend',
3595 3595 b'lfs',
3596 3596 b'narrowfiles',
3597 3597 b'sharedrepo',
3598 3598 b'sharedrelative',
3599 3599 b'shareditems',
3600 3600 b'shallowfilestore',
3601 3601 }
3602 3602
3603 3603 return {k: v for k, v in createopts.items() if k not in known}
3604 3604
3605 3605
3606 3606 def createrepository(ui, path, createopts=None):
3607 3607 """Create a new repository in a vfs.
3608 3608
3609 3609 ``path`` path to the new repo's working directory.
3610 3610 ``createopts`` options for the new repository.
3611 3611
3612 3612 The following keys for ``createopts`` are recognized:
3613 3613
3614 3614 backend
3615 3615 The storage backend to use.
3616 3616 lfs
3617 3617 Repository will be created with ``lfs`` requirement. The lfs extension
3618 3618 will automatically be loaded when the repository is accessed.
3619 3619 narrowfiles
3620 3620 Set up repository to support narrow file storage.
3621 3621 sharedrepo
3622 3622 Repository object from which storage should be shared.
3623 3623 sharedrelative
3624 3624 Boolean indicating if the path to the shared repo should be
3625 3625 stored as relative. By default, the pointer to the "parent" repo
3626 3626 is stored as an absolute path.
3627 3627 shareditems
3628 3628 Set of items to share to the new repository (in addition to storage).
3629 3629 shallowfilestore
3630 3630 Indicates that storage for files should be shallow (not all ancestor
3631 3631 revisions are known).
3632 3632 """
3633 3633 createopts = defaultcreateopts(ui, createopts=createopts)
3634 3634
3635 3635 unknownopts = filterknowncreateopts(ui, createopts)
3636 3636
3637 3637 if not isinstance(unknownopts, dict):
3638 3638 raise error.ProgrammingError(
3639 3639 b'filterknowncreateopts() did not return a dict'
3640 3640 )
3641 3641
3642 3642 if unknownopts:
3643 3643 raise error.Abort(
3644 3644 _(
3645 3645 b'unable to create repository because of unknown '
3646 3646 b'creation option: %s'
3647 3647 )
3648 3648 % b', '.join(sorted(unknownopts)),
3649 3649 hint=_(b'is a required extension not loaded?'),
3650 3650 )
3651 3651
3652 3652 requirements = newreporequirements(ui, createopts=createopts)
3653 3653
3654 3654 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3655 3655
3656 3656 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3657 3657 if hgvfs.exists():
3658 3658 raise error.RepoError(_(b'repository %s already exists') % path)
3659 3659
3660 3660 if b'sharedrepo' in createopts:
3661 3661 sharedpath = createopts[b'sharedrepo'].sharedpath
3662 3662
3663 3663 if createopts.get(b'sharedrelative'):
3664 3664 try:
3665 3665 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3666 3666 except (IOError, ValueError) as e:
3667 3667 # ValueError is raised on Windows if the drive letters differ
3668 3668 # on each path.
3669 3669 raise error.Abort(
3670 3670 _(b'cannot calculate relative path'),
3671 3671 hint=stringutil.forcebytestr(e),
3672 3672 )
3673 3673
3674 3674 if not wdirvfs.exists():
3675 3675 wdirvfs.makedirs()
3676 3676
3677 3677 hgvfs.makedir(notindexed=True)
3678 3678 if b'sharedrepo' not in createopts:
3679 3679 hgvfs.mkdir(b'cache')
3680 3680 hgvfs.mkdir(b'wcache')
3681 3681
3682 3682 if b'store' in requirements and b'sharedrepo' not in createopts:
3683 3683 hgvfs.mkdir(b'store')
3684 3684
3685 3685 # We create an invalid changelog outside the store so very old
3686 3686 # Mercurial versions (which didn't know about the requirements
3687 3687 # file) encounter an error on reading the changelog. This
3688 3688 # effectively locks out old clients and prevents them from
3689 3689 # mucking with a repo in an unknown format.
3690 3690 #
3691 3691 # The revlog header has version 2, which won't be recognized by
3692 3692 # such old clients.
3693 3693 hgvfs.append(
3694 3694 b'00changelog.i',
3695 3695 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3696 3696 b'layout',
3697 3697 )
3698 3698
3699 3699 scmutil.writerequires(hgvfs, requirements)
3700 3700
3701 3701 # Write out file telling readers where to find the shared store.
3702 3702 if b'sharedrepo' in createopts:
3703 3703 hgvfs.write(b'sharedpath', sharedpath)
3704 3704
3705 3705 if createopts.get(b'shareditems'):
3706 3706 shared = b'\n'.join(sorted(createopts[b'shareditems'])) + b'\n'
3707 3707 hgvfs.write(b'shared', shared)
3708 3708
3709 3709
3710 3710 def poisonrepository(repo):
3711 3711 """Poison a repository instance so it can no longer be used."""
3712 3712 # Perform any cleanup on the instance.
3713 3713 repo.close()
3714 3714
3715 3715 # Our strategy is to replace the type of the object with one that
3716 3716 # has all attribute lookups result in error.
3717 3717 #
3718 3718 # But we have to allow the close() method because some constructors
3719 3719 # of repos call close() on repo references.
3720 3720 class poisonedrepository(object):
3721 3721 def __getattribute__(self, item):
3722 3722 if item == 'close':
3723 3723 return object.__getattribute__(self, item)
3724 3724
3725 3725 raise error.ProgrammingError(
3726 3726 b'repo instances should not be used after unshare'
3727 3727 )
3728 3728
3729 3729 def close(self):
3730 3730 pass
3731 3731
3732 3732 # We may have a repoview, which intercepts __setattr__. So be sure
3733 3733 # we operate at the lowest level possible.
3734 3734 object.__setattr__(repo, '__class__', poisonedrepository)
@@ -1,2712 +1,2712 b''
1 1 # merge.py - directory-level update/merge handling for Mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 import hashlib
12 11 import shutil
13 12 import stat
14 13 import struct
15 14
16 15 from .i18n import _
17 16 from .node import (
18 17 addednodeid,
19 18 bin,
20 19 hex,
21 20 modifiednodeid,
22 21 nullhex,
23 22 nullid,
24 23 nullrev,
25 24 )
26 25 from .pycompat import delattr
27 26 from .thirdparty import attr
28 27 from . import (
29 28 copies,
30 29 encoding,
31 30 error,
32 31 filemerge,
33 32 match as matchmod,
34 33 obsutil,
35 34 pathutil,
36 35 pycompat,
37 36 scmutil,
38 37 subrepoutil,
39 38 util,
40 39 worker,
41 40 )
41 from .utils import hashutil
42 42
43 43 _pack = struct.pack
44 44 _unpack = struct.unpack
45 45
46 46
47 47 def _droponode(data):
48 48 # used for compatibility for v1
49 49 bits = data.split(b'\0')
50 50 bits = bits[:-2] + bits[-1:]
51 51 return b'\0'.join(bits)
52 52
53 53
54 54 # Merge state record types. See ``mergestate`` docs for more.
55 55 RECORD_LOCAL = b'L'
56 56 RECORD_OTHER = b'O'
57 57 RECORD_MERGED = b'F'
58 58 RECORD_CHANGEDELETE_CONFLICT = b'C'
59 59 RECORD_MERGE_DRIVER_MERGE = b'D'
60 60 RECORD_PATH_CONFLICT = b'P'
61 61 RECORD_MERGE_DRIVER_STATE = b'm'
62 62 RECORD_FILE_VALUES = b'f'
63 63 RECORD_LABELS = b'l'
64 64 RECORD_OVERRIDE = b't'
65 65 RECORD_UNSUPPORTED_MANDATORY = b'X'
66 66 RECORD_UNSUPPORTED_ADVISORY = b'x'
67 67
68 68 MERGE_DRIVER_STATE_UNMARKED = b'u'
69 69 MERGE_DRIVER_STATE_MARKED = b'm'
70 70 MERGE_DRIVER_STATE_SUCCESS = b's'
71 71
72 72 MERGE_RECORD_UNRESOLVED = b'u'
73 73 MERGE_RECORD_RESOLVED = b'r'
74 74 MERGE_RECORD_UNRESOLVED_PATH = b'pu'
75 75 MERGE_RECORD_RESOLVED_PATH = b'pr'
76 76 MERGE_RECORD_DRIVER_RESOLVED = b'd'
77 77
78 78 ACTION_FORGET = b'f'
79 79 ACTION_REMOVE = b'r'
80 80 ACTION_ADD = b'a'
81 81 ACTION_GET = b'g'
82 82 ACTION_PATH_CONFLICT = b'p'
83 83 ACTION_PATH_CONFLICT_RESOLVE = b'pr'
84 84 ACTION_ADD_MODIFIED = b'am'
85 85 ACTION_CREATED = b'c'
86 86 ACTION_DELETED_CHANGED = b'dc'
87 87 ACTION_CHANGED_DELETED = b'cd'
88 88 ACTION_MERGE = b'm'
89 89 ACTION_LOCAL_DIR_RENAME_GET = b'dg'
90 90 ACTION_DIR_RENAME_MOVE_LOCAL = b'dm'
91 91 ACTION_KEEP = b'k'
92 92 ACTION_EXEC = b'e'
93 93 ACTION_CREATED_MERGE = b'cm'
94 94
95 95
96 96 class mergestate(object):
97 97 '''track 3-way merge state of individual files
98 98
99 99 The merge state is stored on disk when needed. Two files are used: one with
100 100 an old format (version 1), and one with a new format (version 2). Version 2
101 101 stores a superset of the data in version 1, including new kinds of records
102 102 in the future. For more about the new format, see the documentation for
103 103 `_readrecordsv2`.
104 104
105 105 Each record can contain arbitrary content, and has an associated type. This
106 106 `type` should be a letter. If `type` is uppercase, the record is mandatory:
107 107 versions of Mercurial that don't support it should abort. If `type` is
108 108 lowercase, the record can be safely ignored.
109 109
110 110 Currently known records:
111 111
112 112 L: the node of the "local" part of the merge (hexified version)
113 113 O: the node of the "other" part of the merge (hexified version)
114 114 F: a file to be merged entry
115 115 C: a change/delete or delete/change conflict
116 116 D: a file that the external merge driver will merge internally
117 117 (experimental)
118 118 P: a path conflict (file vs directory)
119 119 m: the external merge driver defined for this merge plus its run state
120 120 (experimental)
121 121 f: a (filename, dictionary) tuple of optional values for a given file
122 122 X: unsupported mandatory record type (used in tests)
123 123 x: unsupported advisory record type (used in tests)
124 124 l: the labels for the parts of the merge.
125 125
126 126 Merge driver run states (experimental):
127 127 u: driver-resolved files unmarked -- needs to be run next time we're about
128 128 to resolve or commit
129 129 m: driver-resolved files marked -- only needs to be run before commit
130 130 s: success/skipped -- does not need to be run any more
131 131
132 132 Merge record states (stored in self._state, indexed by filename):
133 133 u: unresolved conflict
134 134 r: resolved conflict
135 135 pu: unresolved path conflict (file conflicts with directory)
136 136 pr: resolved path conflict
137 137 d: driver-resolved conflict
138 138
139 139 The resolve command transitions between 'u' and 'r' for conflicts and
140 140 'pu' and 'pr' for path conflicts.
141 141 '''
142 142
143 143 statepathv1 = b'merge/state'
144 144 statepathv2 = b'merge/state2'
145 145
146 146 @staticmethod
147 147 def clean(repo, node=None, other=None, labels=None):
148 148 """Initialize a brand new merge state, removing any existing state on
149 149 disk."""
150 150 ms = mergestate(repo)
151 151 ms.reset(node, other, labels)
152 152 return ms
153 153
154 154 @staticmethod
155 155 def read(repo):
156 156 """Initialize the merge state, reading it from disk."""
157 157 ms = mergestate(repo)
158 158 ms._read()
159 159 return ms
160 160
161 161 def __init__(self, repo):
162 162 """Initialize the merge state.
163 163
164 164 Do not use this directly! Instead call read() or clean()."""
165 165 self._repo = repo
166 166 self._dirty = False
167 167 self._labels = None
168 168
169 169 def reset(self, node=None, other=None, labels=None):
170 170 self._state = {}
171 171 self._stateextras = {}
172 172 self._local = None
173 173 self._other = None
174 174 self._labels = labels
175 175 for var in ('localctx', 'otherctx'):
176 176 if var in vars(self):
177 177 delattr(self, var)
178 178 if node:
179 179 self._local = node
180 180 self._other = other
181 181 self._readmergedriver = None
182 182 if self.mergedriver:
183 183 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
184 184 else:
185 185 self._mdstate = MERGE_DRIVER_STATE_UNMARKED
186 186 shutil.rmtree(self._repo.vfs.join(b'merge'), True)
187 187 self._results = {}
188 188 self._dirty = False
189 189
190 190 def _read(self):
191 191 """Analyse each record content to restore a serialized state from disk
192 192
193 193 This function process "record" entry produced by the de-serialization
194 194 of on disk file.
195 195 """
196 196 self._state = {}
197 197 self._stateextras = {}
198 198 self._local = None
199 199 self._other = None
200 200 for var in ('localctx', 'otherctx'):
201 201 if var in vars(self):
202 202 delattr(self, var)
203 203 self._readmergedriver = None
204 204 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
205 205 unsupported = set()
206 206 records = self._readrecords()
207 207 for rtype, record in records:
208 208 if rtype == RECORD_LOCAL:
209 209 self._local = bin(record)
210 210 elif rtype == RECORD_OTHER:
211 211 self._other = bin(record)
212 212 elif rtype == RECORD_MERGE_DRIVER_STATE:
213 213 bits = record.split(b'\0', 1)
214 214 mdstate = bits[1]
215 215 if len(mdstate) != 1 or mdstate not in (
216 216 MERGE_DRIVER_STATE_UNMARKED,
217 217 MERGE_DRIVER_STATE_MARKED,
218 218 MERGE_DRIVER_STATE_SUCCESS,
219 219 ):
220 220 # the merge driver should be idempotent, so just rerun it
221 221 mdstate = MERGE_DRIVER_STATE_UNMARKED
222 222
223 223 self._readmergedriver = bits[0]
224 224 self._mdstate = mdstate
225 225 elif rtype in (
226 226 RECORD_MERGED,
227 227 RECORD_CHANGEDELETE_CONFLICT,
228 228 RECORD_PATH_CONFLICT,
229 229 RECORD_MERGE_DRIVER_MERGE,
230 230 ):
231 231 bits = record.split(b'\0')
232 232 self._state[bits[0]] = bits[1:]
233 233 elif rtype == RECORD_FILE_VALUES:
234 234 filename, rawextras = record.split(b'\0', 1)
235 235 extraparts = rawextras.split(b'\0')
236 236 extras = {}
237 237 i = 0
238 238 while i < len(extraparts):
239 239 extras[extraparts[i]] = extraparts[i + 1]
240 240 i += 2
241 241
242 242 self._stateextras[filename] = extras
243 243 elif rtype == RECORD_LABELS:
244 244 labels = record.split(b'\0', 2)
245 245 self._labels = [l for l in labels if len(l) > 0]
246 246 elif not rtype.islower():
247 247 unsupported.add(rtype)
248 248 self._results = {}
249 249 self._dirty = False
250 250
251 251 if unsupported:
252 252 raise error.UnsupportedMergeRecords(unsupported)
253 253
254 254 def _readrecords(self):
255 255 """Read merge state from disk and return a list of record (TYPE, data)
256 256
257 257 We read data from both v1 and v2 files and decide which one to use.
258 258
259 259 V1 has been used by version prior to 2.9.1 and contains less data than
260 260 v2. We read both versions and check if no data in v2 contradicts
261 261 v1. If there is not contradiction we can safely assume that both v1
262 262 and v2 were written at the same time and use the extract data in v2. If
263 263 there is contradiction we ignore v2 content as we assume an old version
264 264 of Mercurial has overwritten the mergestate file and left an old v2
265 265 file around.
266 266
267 267 returns list of record [(TYPE, data), ...]"""
268 268 v1records = self._readrecordsv1()
269 269 v2records = self._readrecordsv2()
270 270 if self._v1v2match(v1records, v2records):
271 271 return v2records
272 272 else:
273 273 # v1 file is newer than v2 file, use it
274 274 # we have to infer the "other" changeset of the merge
275 275 # we cannot do better than that with v1 of the format
276 276 mctx = self._repo[None].parents()[-1]
277 277 v1records.append((RECORD_OTHER, mctx.hex()))
278 278 # add place holder "other" file node information
279 279 # nobody is using it yet so we do no need to fetch the data
280 280 # if mctx was wrong `mctx[bits[-2]]` may fails.
281 281 for idx, r in enumerate(v1records):
282 282 if r[0] == RECORD_MERGED:
283 283 bits = r[1].split(b'\0')
284 284 bits.insert(-2, b'')
285 285 v1records[idx] = (r[0], b'\0'.join(bits))
286 286 return v1records
287 287
288 288 def _v1v2match(self, v1records, v2records):
289 289 oldv2 = set() # old format version of v2 record
290 290 for rec in v2records:
291 291 if rec[0] == RECORD_LOCAL:
292 292 oldv2.add(rec)
293 293 elif rec[0] == RECORD_MERGED:
294 294 # drop the onode data (not contained in v1)
295 295 oldv2.add((RECORD_MERGED, _droponode(rec[1])))
296 296 for rec in v1records:
297 297 if rec not in oldv2:
298 298 return False
299 299 else:
300 300 return True
301 301
302 302 def _readrecordsv1(self):
303 303 """read on disk merge state for version 1 file
304 304
305 305 returns list of record [(TYPE, data), ...]
306 306
307 307 Note: the "F" data from this file are one entry short
308 308 (no "other file node" entry)
309 309 """
310 310 records = []
311 311 try:
312 312 f = self._repo.vfs(self.statepathv1)
313 313 for i, l in enumerate(f):
314 314 if i == 0:
315 315 records.append((RECORD_LOCAL, l[:-1]))
316 316 else:
317 317 records.append((RECORD_MERGED, l[:-1]))
318 318 f.close()
319 319 except IOError as err:
320 320 if err.errno != errno.ENOENT:
321 321 raise
322 322 return records
323 323
324 324 def _readrecordsv2(self):
325 325 """read on disk merge state for version 2 file
326 326
327 327 This format is a list of arbitrary records of the form:
328 328
329 329 [type][length][content]
330 330
331 331 `type` is a single character, `length` is a 4 byte integer, and
332 332 `content` is an arbitrary byte sequence of length `length`.
333 333
334 334 Mercurial versions prior to 3.7 have a bug where if there are
335 335 unsupported mandatory merge records, attempting to clear out the merge
336 336 state with hg update --clean or similar aborts. The 't' record type
337 337 works around that by writing out what those versions treat as an
338 338 advisory record, but later versions interpret as special: the first
339 339 character is the 'real' record type and everything onwards is the data.
340 340
341 341 Returns list of records [(TYPE, data), ...]."""
342 342 records = []
343 343 try:
344 344 f = self._repo.vfs(self.statepathv2)
345 345 data = f.read()
346 346 off = 0
347 347 end = len(data)
348 348 while off < end:
349 349 rtype = data[off : off + 1]
350 350 off += 1
351 351 length = _unpack(b'>I', data[off : (off + 4)])[0]
352 352 off += 4
353 353 record = data[off : (off + length)]
354 354 off += length
355 355 if rtype == RECORD_OVERRIDE:
356 356 rtype, record = record[0:1], record[1:]
357 357 records.append((rtype, record))
358 358 f.close()
359 359 except IOError as err:
360 360 if err.errno != errno.ENOENT:
361 361 raise
362 362 return records
363 363
364 364 @util.propertycache
365 365 def mergedriver(self):
366 366 # protect against the following:
367 367 # - A configures a malicious merge driver in their hgrc, then
368 368 # pauses the merge
369 369 # - A edits their hgrc to remove references to the merge driver
370 370 # - A gives a copy of their entire repo, including .hg, to B
371 371 # - B inspects .hgrc and finds it to be clean
372 372 # - B then continues the merge and the malicious merge driver
373 373 # gets invoked
374 374 configmergedriver = self._repo.ui.config(
375 375 b'experimental', b'mergedriver'
376 376 )
377 377 if (
378 378 self._readmergedriver is not None
379 379 and self._readmergedriver != configmergedriver
380 380 ):
381 381 raise error.ConfigError(
382 382 _(b"merge driver changed since merge started"),
383 383 hint=_(b"revert merge driver change or abort merge"),
384 384 )
385 385
386 386 return configmergedriver
387 387
388 388 @util.propertycache
389 389 def localctx(self):
390 390 if self._local is None:
391 391 msg = b"localctx accessed but self._local isn't set"
392 392 raise error.ProgrammingError(msg)
393 393 return self._repo[self._local]
394 394
395 395 @util.propertycache
396 396 def otherctx(self):
397 397 if self._other is None:
398 398 msg = b"otherctx accessed but self._other isn't set"
399 399 raise error.ProgrammingError(msg)
400 400 return self._repo[self._other]
401 401
402 402 def active(self):
403 403 """Whether mergestate is active.
404 404
405 405 Returns True if there appears to be mergestate. This is a rough proxy
406 406 for "is a merge in progress."
407 407 """
408 408 # Check local variables before looking at filesystem for performance
409 409 # reasons.
410 410 return (
411 411 bool(self._local)
412 412 or bool(self._state)
413 413 or self._repo.vfs.exists(self.statepathv1)
414 414 or self._repo.vfs.exists(self.statepathv2)
415 415 )
416 416
417 417 def commit(self):
418 418 """Write current state on disk (if necessary)"""
419 419 if self._dirty:
420 420 records = self._makerecords()
421 421 self._writerecords(records)
422 422 self._dirty = False
423 423
424 424 def _makerecords(self):
425 425 records = []
426 426 records.append((RECORD_LOCAL, hex(self._local)))
427 427 records.append((RECORD_OTHER, hex(self._other)))
428 428 if self.mergedriver:
429 429 records.append(
430 430 (
431 431 RECORD_MERGE_DRIVER_STATE,
432 432 b'\0'.join([self.mergedriver, self._mdstate]),
433 433 )
434 434 )
435 435 # Write out state items. In all cases, the value of the state map entry
436 436 # is written as the contents of the record. The record type depends on
437 437 # the type of state that is stored, and capital-letter records are used
438 438 # to prevent older versions of Mercurial that do not support the feature
439 439 # from loading them.
440 440 for filename, v in pycompat.iteritems(self._state):
441 441 if v[0] == MERGE_RECORD_DRIVER_RESOLVED:
442 442 # Driver-resolved merge. These are stored in 'D' records.
443 443 records.append(
444 444 (RECORD_MERGE_DRIVER_MERGE, b'\0'.join([filename] + v))
445 445 )
446 446 elif v[0] in (
447 447 MERGE_RECORD_UNRESOLVED_PATH,
448 448 MERGE_RECORD_RESOLVED_PATH,
449 449 ):
450 450 # Path conflicts. These are stored in 'P' records. The current
451 451 # resolution state ('pu' or 'pr') is stored within the record.
452 452 records.append(
453 453 (RECORD_PATH_CONFLICT, b'\0'.join([filename] + v))
454 454 )
455 455 elif v[1] == nullhex or v[6] == nullhex:
456 456 # Change/Delete or Delete/Change conflicts. These are stored in
457 457 # 'C' records. v[1] is the local file, and is nullhex when the
458 458 # file is deleted locally ('dc'). v[6] is the remote file, and
459 459 # is nullhex when the file is deleted remotely ('cd').
460 460 records.append(
461 461 (RECORD_CHANGEDELETE_CONFLICT, b'\0'.join([filename] + v))
462 462 )
463 463 else:
464 464 # Normal files. These are stored in 'F' records.
465 465 records.append((RECORD_MERGED, b'\0'.join([filename] + v)))
466 466 for filename, extras in sorted(pycompat.iteritems(self._stateextras)):
467 467 rawextras = b'\0'.join(
468 468 b'%s\0%s' % (k, v) for k, v in pycompat.iteritems(extras)
469 469 )
470 470 records.append(
471 471 (RECORD_FILE_VALUES, b'%s\0%s' % (filename, rawextras))
472 472 )
473 473 if self._labels is not None:
474 474 labels = b'\0'.join(self._labels)
475 475 records.append((RECORD_LABELS, labels))
476 476 return records
477 477
478 478 def _writerecords(self, records):
479 479 """Write current state on disk (both v1 and v2)"""
480 480 self._writerecordsv1(records)
481 481 self._writerecordsv2(records)
482 482
483 483 def _writerecordsv1(self, records):
484 484 """Write current state on disk in a version 1 file"""
485 485 f = self._repo.vfs(self.statepathv1, b'wb')
486 486 irecords = iter(records)
487 487 lrecords = next(irecords)
488 488 assert lrecords[0] == RECORD_LOCAL
489 489 f.write(hex(self._local) + b'\n')
490 490 for rtype, data in irecords:
491 491 if rtype == RECORD_MERGED:
492 492 f.write(b'%s\n' % _droponode(data))
493 493 f.close()
494 494
495 495 def _writerecordsv2(self, records):
496 496 """Write current state on disk in a version 2 file
497 497
498 498 See the docstring for _readrecordsv2 for why we use 't'."""
499 499 # these are the records that all version 2 clients can read
500 500 allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED)
501 501 f = self._repo.vfs(self.statepathv2, b'wb')
502 502 for key, data in records:
503 503 assert len(key) == 1
504 504 if key not in allowlist:
505 505 key, data = RECORD_OVERRIDE, b'%s%s' % (key, data)
506 506 format = b'>sI%is' % len(data)
507 507 f.write(_pack(format, key, len(data), data))
508 508 f.close()
509 509
510 510 @staticmethod
511 511 def getlocalkey(path):
512 512 """hash the path of a local file context for storage in the .hg/merge
513 513 directory."""
514 514
515 return hex(hashlib.sha1(path).digest())
515 return hex(hashutil.sha1(path).digest())
516 516
517 517 def add(self, fcl, fco, fca, fd):
518 518 """add a new (potentially?) conflicting file the merge state
519 519 fcl: file context for local,
520 520 fco: file context for remote,
521 521 fca: file context for ancestors,
522 522 fd: file path of the resulting merge.
523 523
524 524 note: also write the local version to the `.hg/merge` directory.
525 525 """
526 526 if fcl.isabsent():
527 527 localkey = nullhex
528 528 else:
529 529 localkey = mergestate.getlocalkey(fcl.path())
530 530 self._repo.vfs.write(b'merge/' + localkey, fcl.data())
531 531 self._state[fd] = [
532 532 MERGE_RECORD_UNRESOLVED,
533 533 localkey,
534 534 fcl.path(),
535 535 fca.path(),
536 536 hex(fca.filenode()),
537 537 fco.path(),
538 538 hex(fco.filenode()),
539 539 fcl.flags(),
540 540 ]
541 541 self._stateextras[fd] = {b'ancestorlinknode': hex(fca.node())}
542 542 self._dirty = True
543 543
544 544 def addpath(self, path, frename, forigin):
545 545 """add a new conflicting path to the merge state
546 546 path: the path that conflicts
547 547 frename: the filename the conflicting file was renamed to
548 548 forigin: origin of the file ('l' or 'r' for local/remote)
549 549 """
550 550 self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin]
551 551 self._dirty = True
552 552
553 553 def __contains__(self, dfile):
554 554 return dfile in self._state
555 555
556 556 def __getitem__(self, dfile):
557 557 return self._state[dfile][0]
558 558
559 559 def __iter__(self):
560 560 return iter(sorted(self._state))
561 561
562 562 def files(self):
563 563 return self._state.keys()
564 564
565 565 def mark(self, dfile, state):
566 566 self._state[dfile][0] = state
567 567 self._dirty = True
568 568
569 569 def mdstate(self):
570 570 return self._mdstate
571 571
572 572 def unresolved(self):
573 573 """Obtain the paths of unresolved files."""
574 574
575 575 for f, entry in pycompat.iteritems(self._state):
576 576 if entry[0] in (
577 577 MERGE_RECORD_UNRESOLVED,
578 578 MERGE_RECORD_UNRESOLVED_PATH,
579 579 ):
580 580 yield f
581 581
582 582 def driverresolved(self):
583 583 """Obtain the paths of driver-resolved files."""
584 584
585 585 for f, entry in self._state.items():
586 586 if entry[0] == MERGE_RECORD_DRIVER_RESOLVED:
587 587 yield f
588 588
589 589 def extras(self, filename):
590 590 return self._stateextras.setdefault(filename, {})
591 591
592 592 def _resolve(self, preresolve, dfile, wctx):
593 593 """rerun merge process for file path `dfile`"""
594 594 if self[dfile] in (MERGE_RECORD_RESOLVED, MERGE_RECORD_DRIVER_RESOLVED):
595 595 return True, 0
596 596 stateentry = self._state[dfile]
597 597 state, localkey, lfile, afile, anode, ofile, onode, flags = stateentry
598 598 octx = self._repo[self._other]
599 599 extras = self.extras(dfile)
600 600 anccommitnode = extras.get(b'ancestorlinknode')
601 601 if anccommitnode:
602 602 actx = self._repo[anccommitnode]
603 603 else:
604 604 actx = None
605 605 fcd = self._filectxorabsent(localkey, wctx, dfile)
606 606 fco = self._filectxorabsent(onode, octx, ofile)
607 607 # TODO: move this to filectxorabsent
608 608 fca = self._repo.filectx(afile, fileid=anode, changectx=actx)
609 609 # "premerge" x flags
610 610 flo = fco.flags()
611 611 fla = fca.flags()
612 612 if b'x' in flags + flo + fla and b'l' not in flags + flo + fla:
613 613 if fca.node() == nullid and flags != flo:
614 614 if preresolve:
615 615 self._repo.ui.warn(
616 616 _(
617 617 b'warning: cannot merge flags for %s '
618 618 b'without common ancestor - keeping local flags\n'
619 619 )
620 620 % afile
621 621 )
622 622 elif flags == fla:
623 623 flags = flo
624 624 if preresolve:
625 625 # restore local
626 626 if localkey != nullhex:
627 627 f = self._repo.vfs(b'merge/' + localkey)
628 628 wctx[dfile].write(f.read(), flags)
629 629 f.close()
630 630 else:
631 631 wctx[dfile].remove(ignoremissing=True)
632 632 complete, r, deleted = filemerge.premerge(
633 633 self._repo,
634 634 wctx,
635 635 self._local,
636 636 lfile,
637 637 fcd,
638 638 fco,
639 639 fca,
640 640 labels=self._labels,
641 641 )
642 642 else:
643 643 complete, r, deleted = filemerge.filemerge(
644 644 self._repo,
645 645 wctx,
646 646 self._local,
647 647 lfile,
648 648 fcd,
649 649 fco,
650 650 fca,
651 651 labels=self._labels,
652 652 )
653 653 if r is None:
654 654 # no real conflict
655 655 del self._state[dfile]
656 656 self._stateextras.pop(dfile, None)
657 657 self._dirty = True
658 658 elif not r:
659 659 self.mark(dfile, MERGE_RECORD_RESOLVED)
660 660
661 661 if complete:
662 662 action = None
663 663 if deleted:
664 664 if fcd.isabsent():
665 665 # dc: local picked. Need to drop if present, which may
666 666 # happen on re-resolves.
667 667 action = ACTION_FORGET
668 668 else:
669 669 # cd: remote picked (or otherwise deleted)
670 670 action = ACTION_REMOVE
671 671 else:
672 672 if fcd.isabsent(): # dc: remote picked
673 673 action = ACTION_GET
674 674 elif fco.isabsent(): # cd: local picked
675 675 if dfile in self.localctx:
676 676 action = ACTION_ADD_MODIFIED
677 677 else:
678 678 action = ACTION_ADD
679 679 # else: regular merges (no action necessary)
680 680 self._results[dfile] = r, action
681 681
682 682 return complete, r
683 683
684 684 def _filectxorabsent(self, hexnode, ctx, f):
685 685 if hexnode == nullhex:
686 686 return filemerge.absentfilectx(ctx, f)
687 687 else:
688 688 return ctx[f]
689 689
690 690 def preresolve(self, dfile, wctx):
691 691 """run premerge process for dfile
692 692
693 693 Returns whether the merge is complete, and the exit code."""
694 694 return self._resolve(True, dfile, wctx)
695 695
696 696 def resolve(self, dfile, wctx):
697 697 """run merge process (assuming premerge was run) for dfile
698 698
699 699 Returns the exit code of the merge."""
700 700 return self._resolve(False, dfile, wctx)[1]
701 701
702 702 def counts(self):
703 703 """return counts for updated, merged and removed files in this
704 704 session"""
705 705 updated, merged, removed = 0, 0, 0
706 706 for r, action in pycompat.itervalues(self._results):
707 707 if r is None:
708 708 updated += 1
709 709 elif r == 0:
710 710 if action == ACTION_REMOVE:
711 711 removed += 1
712 712 else:
713 713 merged += 1
714 714 return updated, merged, removed
715 715
716 716 def unresolvedcount(self):
717 717 """get unresolved count for this merge (persistent)"""
718 718 return len(list(self.unresolved()))
719 719
720 720 def actions(self):
721 721 """return lists of actions to perform on the dirstate"""
722 722 actions = {
723 723 ACTION_REMOVE: [],
724 724 ACTION_FORGET: [],
725 725 ACTION_ADD: [],
726 726 ACTION_ADD_MODIFIED: [],
727 727 ACTION_GET: [],
728 728 }
729 729 for f, (r, action) in pycompat.iteritems(self._results):
730 730 if action is not None:
731 731 actions[action].append((f, None, b"merge result"))
732 732 return actions
733 733
734 734 def recordactions(self):
735 735 """record remove/add/get actions in the dirstate"""
736 736 branchmerge = self._repo.dirstate.p2() != nullid
737 737 recordupdates(self._repo, self.actions(), branchmerge, None)
738 738
739 739 def queueremove(self, f):
740 740 """queues a file to be removed from the dirstate
741 741
742 742 Meant for use by custom merge drivers."""
743 743 self._results[f] = 0, ACTION_REMOVE
744 744
745 745 def queueadd(self, f):
746 746 """queues a file to be added to the dirstate
747 747
748 748 Meant for use by custom merge drivers."""
749 749 self._results[f] = 0, ACTION_ADD
750 750
751 751 def queueget(self, f):
752 752 """queues a file to be marked modified in the dirstate
753 753
754 754 Meant for use by custom merge drivers."""
755 755 self._results[f] = 0, ACTION_GET
756 756
757 757
758 758 def _getcheckunknownconfig(repo, section, name):
759 759 config = repo.ui.config(section, name)
760 760 valid = [b'abort', b'ignore', b'warn']
761 761 if config not in valid:
762 762 validstr = b', '.join([b"'" + v + b"'" for v in valid])
763 763 raise error.ConfigError(
764 764 _(b"%s.%s not valid ('%s' is none of %s)")
765 765 % (section, name, config, validstr)
766 766 )
767 767 return config
768 768
769 769
770 770 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
771 771 if wctx.isinmemory():
772 772 # Nothing to do in IMM because nothing in the "working copy" can be an
773 773 # unknown file.
774 774 #
775 775 # Note that we should bail out here, not in ``_checkunknownfiles()``,
776 776 # because that function does other useful work.
777 777 return False
778 778
779 779 if f2 is None:
780 780 f2 = f
781 781 return (
782 782 repo.wvfs.audit.check(f)
783 783 and repo.wvfs.isfileorlink(f)
784 784 and repo.dirstate.normalize(f) not in repo.dirstate
785 785 and mctx[f2].cmp(wctx[f])
786 786 )
787 787
788 788
789 789 class _unknowndirschecker(object):
790 790 """
791 791 Look for any unknown files or directories that may have a path conflict
792 792 with a file. If any path prefix of the file exists as a file or link,
793 793 then it conflicts. If the file itself is a directory that contains any
794 794 file that is not tracked, then it conflicts.
795 795
796 796 Returns the shortest path at which a conflict occurs, or None if there is
797 797 no conflict.
798 798 """
799 799
800 800 def __init__(self):
801 801 # A set of paths known to be good. This prevents repeated checking of
802 802 # dirs. It will be updated with any new dirs that are checked and found
803 803 # to be safe.
804 804 self._unknowndircache = set()
805 805
806 806 # A set of paths that are known to be absent. This prevents repeated
807 807 # checking of subdirectories that are known not to exist. It will be
808 808 # updated with any new dirs that are checked and found to be absent.
809 809 self._missingdircache = set()
810 810
811 811 def __call__(self, repo, wctx, f):
812 812 if wctx.isinmemory():
813 813 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
814 814 return False
815 815
816 816 # Check for path prefixes that exist as unknown files.
817 817 for p in reversed(list(pathutil.finddirs(f))):
818 818 if p in self._missingdircache:
819 819 return
820 820 if p in self._unknowndircache:
821 821 continue
822 822 if repo.wvfs.audit.check(p):
823 823 if (
824 824 repo.wvfs.isfileorlink(p)
825 825 and repo.dirstate.normalize(p) not in repo.dirstate
826 826 ):
827 827 return p
828 828 if not repo.wvfs.lexists(p):
829 829 self._missingdircache.add(p)
830 830 return
831 831 self._unknowndircache.add(p)
832 832
833 833 # Check if the file conflicts with a directory containing unknown files.
834 834 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
835 835 # Does the directory contain any files that are not in the dirstate?
836 836 for p, dirs, files in repo.wvfs.walk(f):
837 837 for fn in files:
838 838 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
839 839 relf = repo.dirstate.normalize(relf, isknown=True)
840 840 if relf not in repo.dirstate:
841 841 return f
842 842 return None
843 843
844 844
845 845 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
846 846 """
847 847 Considers any actions that care about the presence of conflicting unknown
848 848 files. For some actions, the result is to abort; for others, it is to
849 849 choose a different action.
850 850 """
851 851 fileconflicts = set()
852 852 pathconflicts = set()
853 853 warnconflicts = set()
854 854 abortconflicts = set()
855 855 unknownconfig = _getcheckunknownconfig(repo, b'merge', b'checkunknown')
856 856 ignoredconfig = _getcheckunknownconfig(repo, b'merge', b'checkignored')
857 857 pathconfig = repo.ui.configbool(
858 858 b'experimental', b'merge.checkpathconflicts'
859 859 )
860 860 if not force:
861 861
862 862 def collectconflicts(conflicts, config):
863 863 if config == b'abort':
864 864 abortconflicts.update(conflicts)
865 865 elif config == b'warn':
866 866 warnconflicts.update(conflicts)
867 867
868 868 checkunknowndirs = _unknowndirschecker()
869 869 for f, (m, args, msg) in pycompat.iteritems(actions):
870 870 if m in (ACTION_CREATED, ACTION_DELETED_CHANGED):
871 871 if _checkunknownfile(repo, wctx, mctx, f):
872 872 fileconflicts.add(f)
873 873 elif pathconfig and f not in wctx:
874 874 path = checkunknowndirs(repo, wctx, f)
875 875 if path is not None:
876 876 pathconflicts.add(path)
877 877 elif m == ACTION_LOCAL_DIR_RENAME_GET:
878 878 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
879 879 fileconflicts.add(f)
880 880
881 881 allconflicts = fileconflicts | pathconflicts
882 882 ignoredconflicts = {c for c in allconflicts if repo.dirstate._ignore(c)}
883 883 unknownconflicts = allconflicts - ignoredconflicts
884 884 collectconflicts(ignoredconflicts, ignoredconfig)
885 885 collectconflicts(unknownconflicts, unknownconfig)
886 886 else:
887 887 for f, (m, args, msg) in pycompat.iteritems(actions):
888 888 if m == ACTION_CREATED_MERGE:
889 889 fl2, anc = args
890 890 different = _checkunknownfile(repo, wctx, mctx, f)
891 891 if repo.dirstate._ignore(f):
892 892 config = ignoredconfig
893 893 else:
894 894 config = unknownconfig
895 895
896 896 # The behavior when force is True is described by this table:
897 897 # config different mergeforce | action backup
898 898 # * n * | get n
899 899 # * y y | merge -
900 900 # abort y n | merge - (1)
901 901 # warn y n | warn + get y
902 902 # ignore y n | get y
903 903 #
904 904 # (1) this is probably the wrong behavior here -- we should
905 905 # probably abort, but some actions like rebases currently
906 906 # don't like an abort happening in the middle of
907 907 # merge.update.
908 908 if not different:
909 909 actions[f] = (ACTION_GET, (fl2, False), b'remote created')
910 910 elif mergeforce or config == b'abort':
911 911 actions[f] = (
912 912 ACTION_MERGE,
913 913 (f, f, None, False, anc),
914 914 b'remote differs from untracked local',
915 915 )
916 916 elif config == b'abort':
917 917 abortconflicts.add(f)
918 918 else:
919 919 if config == b'warn':
920 920 warnconflicts.add(f)
921 921 actions[f] = (ACTION_GET, (fl2, True), b'remote created')
922 922
923 923 for f in sorted(abortconflicts):
924 924 warn = repo.ui.warn
925 925 if f in pathconflicts:
926 926 if repo.wvfs.isfileorlink(f):
927 927 warn(_(b"%s: untracked file conflicts with directory\n") % f)
928 928 else:
929 929 warn(_(b"%s: untracked directory conflicts with file\n") % f)
930 930 else:
931 931 warn(_(b"%s: untracked file differs\n") % f)
932 932 if abortconflicts:
933 933 raise error.Abort(
934 934 _(
935 935 b"untracked files in working directory "
936 936 b"differ from files in requested revision"
937 937 )
938 938 )
939 939
940 940 for f in sorted(warnconflicts):
941 941 if repo.wvfs.isfileorlink(f):
942 942 repo.ui.warn(_(b"%s: replacing untracked file\n") % f)
943 943 else:
944 944 repo.ui.warn(_(b"%s: replacing untracked files in directory\n") % f)
945 945
946 946 for f, (m, args, msg) in pycompat.iteritems(actions):
947 947 if m == ACTION_CREATED:
948 948 backup = (
949 949 f in fileconflicts
950 950 or f in pathconflicts
951 951 or any(p in pathconflicts for p in pathutil.finddirs(f))
952 952 )
953 953 (flags,) = args
954 954 actions[f] = (ACTION_GET, (flags, backup), msg)
955 955
956 956
957 957 def _forgetremoved(wctx, mctx, branchmerge):
958 958 """
959 959 Forget removed files
960 960
961 961 If we're jumping between revisions (as opposed to merging), and if
962 962 neither the working directory nor the target rev has the file,
963 963 then we need to remove it from the dirstate, to prevent the
964 964 dirstate from listing the file when it is no longer in the
965 965 manifest.
966 966
967 967 If we're merging, and the other revision has removed a file
968 968 that is not present in the working directory, we need to mark it
969 969 as removed.
970 970 """
971 971
972 972 actions = {}
973 973 m = ACTION_FORGET
974 974 if branchmerge:
975 975 m = ACTION_REMOVE
976 976 for f in wctx.deleted():
977 977 if f not in mctx:
978 978 actions[f] = m, None, b"forget deleted"
979 979
980 980 if not branchmerge:
981 981 for f in wctx.removed():
982 982 if f not in mctx:
983 983 actions[f] = ACTION_FORGET, None, b"forget removed"
984 984
985 985 return actions
986 986
987 987
988 988 def _checkcollision(repo, wmf, actions):
989 989 """
990 990 Check for case-folding collisions.
991 991 """
992 992
993 993 # If the repo is narrowed, filter out files outside the narrowspec.
994 994 narrowmatch = repo.narrowmatch()
995 995 if not narrowmatch.always():
996 996 wmf = wmf.matches(narrowmatch)
997 997 if actions:
998 998 narrowactions = {}
999 999 for m, actionsfortype in pycompat.iteritems(actions):
1000 1000 narrowactions[m] = []
1001 1001 for (f, args, msg) in actionsfortype:
1002 1002 if narrowmatch(f):
1003 1003 narrowactions[m].append((f, args, msg))
1004 1004 actions = narrowactions
1005 1005
1006 1006 # build provisional merged manifest up
1007 1007 pmmf = set(wmf)
1008 1008
1009 1009 if actions:
1010 1010 # KEEP and EXEC are no-op
1011 1011 for m in (
1012 1012 ACTION_ADD,
1013 1013 ACTION_ADD_MODIFIED,
1014 1014 ACTION_FORGET,
1015 1015 ACTION_GET,
1016 1016 ACTION_CHANGED_DELETED,
1017 1017 ACTION_DELETED_CHANGED,
1018 1018 ):
1019 1019 for f, args, msg in actions[m]:
1020 1020 pmmf.add(f)
1021 1021 for f, args, msg in actions[ACTION_REMOVE]:
1022 1022 pmmf.discard(f)
1023 1023 for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]:
1024 1024 f2, flags = args
1025 1025 pmmf.discard(f2)
1026 1026 pmmf.add(f)
1027 1027 for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]:
1028 1028 pmmf.add(f)
1029 1029 for f, args, msg in actions[ACTION_MERGE]:
1030 1030 f1, f2, fa, move, anc = args
1031 1031 if move:
1032 1032 pmmf.discard(f1)
1033 1033 pmmf.add(f)
1034 1034
1035 1035 # check case-folding collision in provisional merged manifest
1036 1036 foldmap = {}
1037 1037 for f in pmmf:
1038 1038 fold = util.normcase(f)
1039 1039 if fold in foldmap:
1040 1040 raise error.Abort(
1041 1041 _(b"case-folding collision between %s and %s")
1042 1042 % (f, foldmap[fold])
1043 1043 )
1044 1044 foldmap[fold] = f
1045 1045
1046 1046 # check case-folding of directories
1047 1047 foldprefix = unfoldprefix = lastfull = b''
1048 1048 for fold, f in sorted(foldmap.items()):
1049 1049 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
1050 1050 # the folded prefix matches but actual casing is different
1051 1051 raise error.Abort(
1052 1052 _(b"case-folding collision between %s and directory of %s")
1053 1053 % (lastfull, f)
1054 1054 )
1055 1055 foldprefix = fold + b'/'
1056 1056 unfoldprefix = f + b'/'
1057 1057 lastfull = f
1058 1058
1059 1059
1060 1060 def driverpreprocess(repo, ms, wctx, labels=None):
1061 1061 """run the preprocess step of the merge driver, if any
1062 1062
1063 1063 This is currently not implemented -- it's an extension point."""
1064 1064 return True
1065 1065
1066 1066
1067 1067 def driverconclude(repo, ms, wctx, labels=None):
1068 1068 """run the conclude step of the merge driver, if any
1069 1069
1070 1070 This is currently not implemented -- it's an extension point."""
1071 1071 return True
1072 1072
1073 1073
1074 1074 def _filesindirs(repo, manifest, dirs):
1075 1075 """
1076 1076 Generator that yields pairs of all the files in the manifest that are found
1077 1077 inside the directories listed in dirs, and which directory they are found
1078 1078 in.
1079 1079 """
1080 1080 for f in manifest:
1081 1081 for p in pathutil.finddirs(f):
1082 1082 if p in dirs:
1083 1083 yield f, p
1084 1084 break
1085 1085
1086 1086
1087 1087 def checkpathconflicts(repo, wctx, mctx, actions):
1088 1088 """
1089 1089 Check if any actions introduce path conflicts in the repository, updating
1090 1090 actions to record or handle the path conflict accordingly.
1091 1091 """
1092 1092 mf = wctx.manifest()
1093 1093
1094 1094 # The set of local files that conflict with a remote directory.
1095 1095 localconflicts = set()
1096 1096
1097 1097 # The set of directories that conflict with a remote file, and so may cause
1098 1098 # conflicts if they still contain any files after the merge.
1099 1099 remoteconflicts = set()
1100 1100
1101 1101 # The set of directories that appear as both a file and a directory in the
1102 1102 # remote manifest. These indicate an invalid remote manifest, which
1103 1103 # can't be updated to cleanly.
1104 1104 invalidconflicts = set()
1105 1105
1106 1106 # The set of directories that contain files that are being created.
1107 1107 createdfiledirs = set()
1108 1108
1109 1109 # The set of files deleted by all the actions.
1110 1110 deletedfiles = set()
1111 1111
1112 1112 for f, (m, args, msg) in actions.items():
1113 1113 if m in (
1114 1114 ACTION_CREATED,
1115 1115 ACTION_DELETED_CHANGED,
1116 1116 ACTION_MERGE,
1117 1117 ACTION_CREATED_MERGE,
1118 1118 ):
1119 1119 # This action may create a new local file.
1120 1120 createdfiledirs.update(pathutil.finddirs(f))
1121 1121 if mf.hasdir(f):
1122 1122 # The file aliases a local directory. This might be ok if all
1123 1123 # the files in the local directory are being deleted. This
1124 1124 # will be checked once we know what all the deleted files are.
1125 1125 remoteconflicts.add(f)
1126 1126 # Track the names of all deleted files.
1127 1127 if m == ACTION_REMOVE:
1128 1128 deletedfiles.add(f)
1129 1129 if m == ACTION_MERGE:
1130 1130 f1, f2, fa, move, anc = args
1131 1131 if move:
1132 1132 deletedfiles.add(f1)
1133 1133 if m == ACTION_DIR_RENAME_MOVE_LOCAL:
1134 1134 f2, flags = args
1135 1135 deletedfiles.add(f2)
1136 1136
1137 1137 # Check all directories that contain created files for path conflicts.
1138 1138 for p in createdfiledirs:
1139 1139 if p in mf:
1140 1140 if p in mctx:
1141 1141 # A file is in a directory which aliases both a local
1142 1142 # and a remote file. This is an internal inconsistency
1143 1143 # within the remote manifest.
1144 1144 invalidconflicts.add(p)
1145 1145 else:
1146 1146 # A file is in a directory which aliases a local file.
1147 1147 # We will need to rename the local file.
1148 1148 localconflicts.add(p)
1149 1149 if p in actions and actions[p][0] in (
1150 1150 ACTION_CREATED,
1151 1151 ACTION_DELETED_CHANGED,
1152 1152 ACTION_MERGE,
1153 1153 ACTION_CREATED_MERGE,
1154 1154 ):
1155 1155 # The file is in a directory which aliases a remote file.
1156 1156 # This is an internal inconsistency within the remote
1157 1157 # manifest.
1158 1158 invalidconflicts.add(p)
1159 1159
1160 1160 # Rename all local conflicting files that have not been deleted.
1161 1161 for p in localconflicts:
1162 1162 if p not in deletedfiles:
1163 1163 ctxname = bytes(wctx).rstrip(b'+')
1164 1164 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1165 1165 actions[pnew] = (
1166 1166 ACTION_PATH_CONFLICT_RESOLVE,
1167 1167 (p,),
1168 1168 b'local path conflict',
1169 1169 )
1170 1170 actions[p] = (ACTION_PATH_CONFLICT, (pnew, b'l'), b'path conflict')
1171 1171
1172 1172 if remoteconflicts:
1173 1173 # Check if all files in the conflicting directories have been removed.
1174 1174 ctxname = bytes(mctx).rstrip(b'+')
1175 1175 for f, p in _filesindirs(repo, mf, remoteconflicts):
1176 1176 if f not in deletedfiles:
1177 1177 m, args, msg = actions[p]
1178 1178 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1179 1179 if m in (ACTION_DELETED_CHANGED, ACTION_MERGE):
1180 1180 # Action was merge, just update target.
1181 1181 actions[pnew] = (m, args, msg)
1182 1182 else:
1183 1183 # Action was create, change to renamed get action.
1184 1184 fl = args[0]
1185 1185 actions[pnew] = (
1186 1186 ACTION_LOCAL_DIR_RENAME_GET,
1187 1187 (p, fl),
1188 1188 b'remote path conflict',
1189 1189 )
1190 1190 actions[p] = (
1191 1191 ACTION_PATH_CONFLICT,
1192 1192 (pnew, ACTION_REMOVE),
1193 1193 b'path conflict',
1194 1194 )
1195 1195 remoteconflicts.remove(p)
1196 1196 break
1197 1197
1198 1198 if invalidconflicts:
1199 1199 for p in invalidconflicts:
1200 1200 repo.ui.warn(_(b"%s: is both a file and a directory\n") % p)
1201 1201 raise error.Abort(_(b"destination manifest contains path conflicts"))
1202 1202
1203 1203
1204 1204 def _filternarrowactions(narrowmatch, branchmerge, actions):
1205 1205 """
1206 1206 Filters out actions that can ignored because the repo is narrowed.
1207 1207
1208 1208 Raise an exception if the merge cannot be completed because the repo is
1209 1209 narrowed.
1210 1210 """
1211 1211 nooptypes = {b'k'} # TODO: handle with nonconflicttypes
1212 1212 nonconflicttypes = set(b'a am c cm f g r e'.split())
1213 1213 # We mutate the items in the dict during iteration, so iterate
1214 1214 # over a copy.
1215 1215 for f, action in list(actions.items()):
1216 1216 if narrowmatch(f):
1217 1217 pass
1218 1218 elif not branchmerge:
1219 1219 del actions[f] # just updating, ignore changes outside clone
1220 1220 elif action[0] in nooptypes:
1221 1221 del actions[f] # merge does not affect file
1222 1222 elif action[0] in nonconflicttypes:
1223 1223 raise error.Abort(
1224 1224 _(
1225 1225 b'merge affects file \'%s\' outside narrow, '
1226 1226 b'which is not yet supported'
1227 1227 )
1228 1228 % f,
1229 1229 hint=_(b'merging in the other direction may work'),
1230 1230 )
1231 1231 else:
1232 1232 raise error.Abort(
1233 1233 _(b'conflict in file \'%s\' is outside narrow clone') % f
1234 1234 )
1235 1235
1236 1236
1237 1237 def manifestmerge(
1238 1238 repo,
1239 1239 wctx,
1240 1240 p2,
1241 1241 pa,
1242 1242 branchmerge,
1243 1243 force,
1244 1244 matcher,
1245 1245 acceptremote,
1246 1246 followcopies,
1247 1247 forcefulldiff=False,
1248 1248 ):
1249 1249 """
1250 1250 Merge wctx and p2 with ancestor pa and generate merge action list
1251 1251
1252 1252 branchmerge and force are as passed in to update
1253 1253 matcher = matcher to filter file lists
1254 1254 acceptremote = accept the incoming changes without prompting
1255 1255 """
1256 1256 if matcher is not None and matcher.always():
1257 1257 matcher = None
1258 1258
1259 1259 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
1260 1260
1261 1261 # manifests fetched in order are going to be faster, so prime the caches
1262 1262 [
1263 1263 x.manifest()
1264 1264 for x in sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)
1265 1265 ]
1266 1266
1267 1267 if followcopies:
1268 1268 ret = copies.mergecopies(repo, wctx, p2, pa)
1269 1269 copy, movewithdir, diverge, renamedelete, dirmove = ret
1270 1270
1271 1271 boolbm = pycompat.bytestr(bool(branchmerge))
1272 1272 boolf = pycompat.bytestr(bool(force))
1273 1273 boolm = pycompat.bytestr(bool(matcher))
1274 1274 repo.ui.note(_(b"resolving manifests\n"))
1275 1275 repo.ui.debug(
1276 1276 b" branchmerge: %s, force: %s, partial: %s\n" % (boolbm, boolf, boolm)
1277 1277 )
1278 1278 repo.ui.debug(b" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
1279 1279
1280 1280 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
1281 1281 copied = set(copy.values())
1282 1282 copied.update(movewithdir.values())
1283 1283
1284 1284 if b'.hgsubstate' in m1 and wctx.rev() is None:
1285 1285 # Check whether sub state is modified, and overwrite the manifest
1286 1286 # to flag the change. If wctx is a committed revision, we shouldn't
1287 1287 # care for the dirty state of the working directory.
1288 1288 if any(wctx.sub(s).dirty() for s in wctx.substate):
1289 1289 m1[b'.hgsubstate'] = modifiednodeid
1290 1290
1291 1291 # Don't use m2-vs-ma optimization if:
1292 1292 # - ma is the same as m1 or m2, which we're just going to diff again later
1293 1293 # - The caller specifically asks for a full diff, which is useful during bid
1294 1294 # merge.
1295 1295 if pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff:
1296 1296 # Identify which files are relevant to the merge, so we can limit the
1297 1297 # total m1-vs-m2 diff to just those files. This has significant
1298 1298 # performance benefits in large repositories.
1299 1299 relevantfiles = set(ma.diff(m2).keys())
1300 1300
1301 1301 # For copied and moved files, we need to add the source file too.
1302 1302 for copykey, copyvalue in pycompat.iteritems(copy):
1303 1303 if copyvalue in relevantfiles:
1304 1304 relevantfiles.add(copykey)
1305 1305 for movedirkey in movewithdir:
1306 1306 relevantfiles.add(movedirkey)
1307 1307 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
1308 1308 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
1309 1309
1310 1310 diff = m1.diff(m2, match=matcher)
1311 1311
1312 1312 actions = {}
1313 1313 for f, ((n1, fl1), (n2, fl2)) in pycompat.iteritems(diff):
1314 1314 if n1 and n2: # file exists on both local and remote side
1315 1315 if f not in ma:
1316 1316 fa = copy.get(f, None)
1317 1317 if fa is not None:
1318 1318 actions[f] = (
1319 1319 ACTION_MERGE,
1320 1320 (f, f, fa, False, pa.node()),
1321 1321 b'both renamed from %s' % fa,
1322 1322 )
1323 1323 else:
1324 1324 actions[f] = (
1325 1325 ACTION_MERGE,
1326 1326 (f, f, None, False, pa.node()),
1327 1327 b'both created',
1328 1328 )
1329 1329 else:
1330 1330 a = ma[f]
1331 1331 fla = ma.flags(f)
1332 1332 nol = b'l' not in fl1 + fl2 + fla
1333 1333 if n2 == a and fl2 == fla:
1334 1334 actions[f] = (ACTION_KEEP, (), b'remote unchanged')
1335 1335 elif n1 == a and fl1 == fla: # local unchanged - use remote
1336 1336 if n1 == n2: # optimization: keep local content
1337 1337 actions[f] = (
1338 1338 ACTION_EXEC,
1339 1339 (fl2,),
1340 1340 b'update permissions',
1341 1341 )
1342 1342 else:
1343 1343 actions[f] = (
1344 1344 ACTION_GET,
1345 1345 (fl2, False),
1346 1346 b'remote is newer',
1347 1347 )
1348 1348 elif nol and n2 == a: # remote only changed 'x'
1349 1349 actions[f] = (ACTION_EXEC, (fl2,), b'update permissions')
1350 1350 elif nol and n1 == a: # local only changed 'x'
1351 1351 actions[f] = (ACTION_GET, (fl1, False), b'remote is newer')
1352 1352 else: # both changed something
1353 1353 actions[f] = (
1354 1354 ACTION_MERGE,
1355 1355 (f, f, f, False, pa.node()),
1356 1356 b'versions differ',
1357 1357 )
1358 1358 elif n1: # file exists only on local side
1359 1359 if f in copied:
1360 1360 pass # we'll deal with it on m2 side
1361 1361 elif f in movewithdir: # directory rename, move local
1362 1362 f2 = movewithdir[f]
1363 1363 if f2 in m2:
1364 1364 actions[f2] = (
1365 1365 ACTION_MERGE,
1366 1366 (f, f2, None, True, pa.node()),
1367 1367 b'remote directory rename, both created',
1368 1368 )
1369 1369 else:
1370 1370 actions[f2] = (
1371 1371 ACTION_DIR_RENAME_MOVE_LOCAL,
1372 1372 (f, fl1),
1373 1373 b'remote directory rename - move from %s' % f,
1374 1374 )
1375 1375 elif f in copy:
1376 1376 f2 = copy[f]
1377 1377 actions[f] = (
1378 1378 ACTION_MERGE,
1379 1379 (f, f2, f2, False, pa.node()),
1380 1380 b'local copied/moved from %s' % f2,
1381 1381 )
1382 1382 elif f in ma: # clean, a different, no remote
1383 1383 if n1 != ma[f]:
1384 1384 if acceptremote:
1385 1385 actions[f] = (ACTION_REMOVE, None, b'remote delete')
1386 1386 else:
1387 1387 actions[f] = (
1388 1388 ACTION_CHANGED_DELETED,
1389 1389 (f, None, f, False, pa.node()),
1390 1390 b'prompt changed/deleted',
1391 1391 )
1392 1392 elif n1 == addednodeid:
1393 1393 # This extra 'a' is added by working copy manifest to mark
1394 1394 # the file as locally added. We should forget it instead of
1395 1395 # deleting it.
1396 1396 actions[f] = (ACTION_FORGET, None, b'remote deleted')
1397 1397 else:
1398 1398 actions[f] = (ACTION_REMOVE, None, b'other deleted')
1399 1399 elif n2: # file exists only on remote side
1400 1400 if f in copied:
1401 1401 pass # we'll deal with it on m1 side
1402 1402 elif f in movewithdir:
1403 1403 f2 = movewithdir[f]
1404 1404 if f2 in m1:
1405 1405 actions[f2] = (
1406 1406 ACTION_MERGE,
1407 1407 (f2, f, None, False, pa.node()),
1408 1408 b'local directory rename, both created',
1409 1409 )
1410 1410 else:
1411 1411 actions[f2] = (
1412 1412 ACTION_LOCAL_DIR_RENAME_GET,
1413 1413 (f, fl2),
1414 1414 b'local directory rename - get from %s' % f,
1415 1415 )
1416 1416 elif f in copy:
1417 1417 f2 = copy[f]
1418 1418 if f2 in m2:
1419 1419 actions[f] = (
1420 1420 ACTION_MERGE,
1421 1421 (f2, f, f2, False, pa.node()),
1422 1422 b'remote copied from %s' % f2,
1423 1423 )
1424 1424 else:
1425 1425 actions[f] = (
1426 1426 ACTION_MERGE,
1427 1427 (f2, f, f2, True, pa.node()),
1428 1428 b'remote moved from %s' % f2,
1429 1429 )
1430 1430 elif f not in ma:
1431 1431 # local unknown, remote created: the logic is described by the
1432 1432 # following table:
1433 1433 #
1434 1434 # force branchmerge different | action
1435 1435 # n * * | create
1436 1436 # y n * | create
1437 1437 # y y n | create
1438 1438 # y y y | merge
1439 1439 #
1440 1440 # Checking whether the files are different is expensive, so we
1441 1441 # don't do that when we can avoid it.
1442 1442 if not force:
1443 1443 actions[f] = (ACTION_CREATED, (fl2,), b'remote created')
1444 1444 elif not branchmerge:
1445 1445 actions[f] = (ACTION_CREATED, (fl2,), b'remote created')
1446 1446 else:
1447 1447 actions[f] = (
1448 1448 ACTION_CREATED_MERGE,
1449 1449 (fl2, pa.node()),
1450 1450 b'remote created, get or merge',
1451 1451 )
1452 1452 elif n2 != ma[f]:
1453 1453 df = None
1454 1454 for d in dirmove:
1455 1455 if f.startswith(d):
1456 1456 # new file added in a directory that was moved
1457 1457 df = dirmove[d] + f[len(d) :]
1458 1458 break
1459 1459 if df is not None and df in m1:
1460 1460 actions[df] = (
1461 1461 ACTION_MERGE,
1462 1462 (df, f, f, False, pa.node()),
1463 1463 b'local directory rename - respect move '
1464 1464 b'from %s' % f,
1465 1465 )
1466 1466 elif acceptremote:
1467 1467 actions[f] = (ACTION_CREATED, (fl2,), b'remote recreating')
1468 1468 else:
1469 1469 actions[f] = (
1470 1470 ACTION_DELETED_CHANGED,
1471 1471 (None, f, f, False, pa.node()),
1472 1472 b'prompt deleted/changed',
1473 1473 )
1474 1474
1475 1475 if repo.ui.configbool(b'experimental', b'merge.checkpathconflicts'):
1476 1476 # If we are merging, look for path conflicts.
1477 1477 checkpathconflicts(repo, wctx, p2, actions)
1478 1478
1479 1479 narrowmatch = repo.narrowmatch()
1480 1480 if not narrowmatch.always():
1481 1481 # Updates "actions" in place
1482 1482 _filternarrowactions(narrowmatch, branchmerge, actions)
1483 1483
1484 1484 return actions, diverge, renamedelete
1485 1485
1486 1486
1487 1487 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
1488 1488 """Resolves false conflicts where the nodeid changed but the content
1489 1489 remained the same."""
1490 1490 # We force a copy of actions.items() because we're going to mutate
1491 1491 # actions as we resolve trivial conflicts.
1492 1492 for f, (m, args, msg) in list(actions.items()):
1493 1493 if (
1494 1494 m == ACTION_CHANGED_DELETED
1495 1495 and f in ancestor
1496 1496 and not wctx[f].cmp(ancestor[f])
1497 1497 ):
1498 1498 # local did change but ended up with same content
1499 1499 actions[f] = ACTION_REMOVE, None, b'prompt same'
1500 1500 elif (
1501 1501 m == ACTION_DELETED_CHANGED
1502 1502 and f in ancestor
1503 1503 and not mctx[f].cmp(ancestor[f])
1504 1504 ):
1505 1505 # remote did change but ended up with same content
1506 1506 del actions[f] # don't get = keep local deleted
1507 1507
1508 1508
1509 1509 def calculateupdates(
1510 1510 repo,
1511 1511 wctx,
1512 1512 mctx,
1513 1513 ancestors,
1514 1514 branchmerge,
1515 1515 force,
1516 1516 acceptremote,
1517 1517 followcopies,
1518 1518 matcher=None,
1519 1519 mergeforce=False,
1520 1520 ):
1521 1521 """Calculate the actions needed to merge mctx into wctx using ancestors"""
1522 1522 # Avoid cycle.
1523 1523 from . import sparse
1524 1524
1525 1525 if len(ancestors) == 1: # default
1526 1526 actions, diverge, renamedelete = manifestmerge(
1527 1527 repo,
1528 1528 wctx,
1529 1529 mctx,
1530 1530 ancestors[0],
1531 1531 branchmerge,
1532 1532 force,
1533 1533 matcher,
1534 1534 acceptremote,
1535 1535 followcopies,
1536 1536 )
1537 1537 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1538 1538
1539 1539 else: # only when merge.preferancestor=* - the default
1540 1540 repo.ui.note(
1541 1541 _(b"note: merging %s and %s using bids from ancestors %s\n")
1542 1542 % (
1543 1543 wctx,
1544 1544 mctx,
1545 1545 _(b' and ').join(pycompat.bytestr(anc) for anc in ancestors),
1546 1546 )
1547 1547 )
1548 1548
1549 1549 # Call for bids
1550 1550 fbids = (
1551 1551 {}
1552 1552 ) # mapping filename to bids (action method to list af actions)
1553 1553 diverge, renamedelete = None, None
1554 1554 for ancestor in ancestors:
1555 1555 repo.ui.note(_(b'\ncalculating bids for ancestor %s\n') % ancestor)
1556 1556 actions, diverge1, renamedelete1 = manifestmerge(
1557 1557 repo,
1558 1558 wctx,
1559 1559 mctx,
1560 1560 ancestor,
1561 1561 branchmerge,
1562 1562 force,
1563 1563 matcher,
1564 1564 acceptremote,
1565 1565 followcopies,
1566 1566 forcefulldiff=True,
1567 1567 )
1568 1568 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1569 1569
1570 1570 # Track the shortest set of warning on the theory that bid
1571 1571 # merge will correctly incorporate more information
1572 1572 if diverge is None or len(diverge1) < len(diverge):
1573 1573 diverge = diverge1
1574 1574 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1575 1575 renamedelete = renamedelete1
1576 1576
1577 1577 for f, a in sorted(pycompat.iteritems(actions)):
1578 1578 m, args, msg = a
1579 1579 repo.ui.debug(b' %s: %s -> %s\n' % (f, msg, m))
1580 1580 if f in fbids:
1581 1581 d = fbids[f]
1582 1582 if m in d:
1583 1583 d[m].append(a)
1584 1584 else:
1585 1585 d[m] = [a]
1586 1586 else:
1587 1587 fbids[f] = {m: [a]}
1588 1588
1589 1589 # Pick the best bid for each file
1590 1590 repo.ui.note(_(b'\nauction for merging merge bids\n'))
1591 1591 actions = {}
1592 1592 for f, bids in sorted(fbids.items()):
1593 1593 # bids is a mapping from action method to list af actions
1594 1594 # Consensus?
1595 1595 if len(bids) == 1: # all bids are the same kind of method
1596 1596 m, l = list(bids.items())[0]
1597 1597 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1598 1598 repo.ui.note(_(b" %s: consensus for %s\n") % (f, m))
1599 1599 actions[f] = l[0]
1600 1600 continue
1601 1601 # If keep is an option, just do it.
1602 1602 if ACTION_KEEP in bids:
1603 1603 repo.ui.note(_(b" %s: picking 'keep' action\n") % f)
1604 1604 actions[f] = bids[ACTION_KEEP][0]
1605 1605 continue
1606 1606 # If there are gets and they all agree [how could they not?], do it.
1607 1607 if ACTION_GET in bids:
1608 1608 ga0 = bids[ACTION_GET][0]
1609 1609 if all(a == ga0 for a in bids[ACTION_GET][1:]):
1610 1610 repo.ui.note(_(b" %s: picking 'get' action\n") % f)
1611 1611 actions[f] = ga0
1612 1612 continue
1613 1613 # TODO: Consider other simple actions such as mode changes
1614 1614 # Handle inefficient democrazy.
1615 1615 repo.ui.note(_(b' %s: multiple bids for merge action:\n') % f)
1616 1616 for m, l in sorted(bids.items()):
1617 1617 for _f, args, msg in l:
1618 1618 repo.ui.note(b' %s -> %s\n' % (msg, m))
1619 1619 # Pick random action. TODO: Instead, prompt user when resolving
1620 1620 m, l = list(bids.items())[0]
1621 1621 repo.ui.warn(
1622 1622 _(b' %s: ambiguous merge - picked %s action\n') % (f, m)
1623 1623 )
1624 1624 actions[f] = l[0]
1625 1625 continue
1626 1626 repo.ui.note(_(b'end of auction\n\n'))
1627 1627
1628 1628 if wctx.rev() is None:
1629 1629 fractions = _forgetremoved(wctx, mctx, branchmerge)
1630 1630 actions.update(fractions)
1631 1631
1632 1632 prunedactions = sparse.filterupdatesactions(
1633 1633 repo, wctx, mctx, branchmerge, actions
1634 1634 )
1635 1635 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1636 1636
1637 1637 return prunedactions, diverge, renamedelete
1638 1638
1639 1639
1640 1640 def _getcwd():
1641 1641 try:
1642 1642 return encoding.getcwd()
1643 1643 except OSError as err:
1644 1644 if err.errno == errno.ENOENT:
1645 1645 return None
1646 1646 raise
1647 1647
1648 1648
1649 1649 def batchremove(repo, wctx, actions):
1650 1650 """apply removes to the working directory
1651 1651
1652 1652 yields tuples for progress updates
1653 1653 """
1654 1654 verbose = repo.ui.verbose
1655 1655 cwd = _getcwd()
1656 1656 i = 0
1657 1657 for f, args, msg in actions:
1658 1658 repo.ui.debug(b" %s: %s -> r\n" % (f, msg))
1659 1659 if verbose:
1660 1660 repo.ui.note(_(b"removing %s\n") % f)
1661 1661 wctx[f].audit()
1662 1662 try:
1663 1663 wctx[f].remove(ignoremissing=True)
1664 1664 except OSError as inst:
1665 1665 repo.ui.warn(
1666 1666 _(b"update failed to remove %s: %s!\n") % (f, inst.strerror)
1667 1667 )
1668 1668 if i == 100:
1669 1669 yield i, f
1670 1670 i = 0
1671 1671 i += 1
1672 1672 if i > 0:
1673 1673 yield i, f
1674 1674
1675 1675 if cwd and not _getcwd():
1676 1676 # cwd was removed in the course of removing files; print a helpful
1677 1677 # warning.
1678 1678 repo.ui.warn(
1679 1679 _(
1680 1680 b"current directory was removed\n"
1681 1681 b"(consider changing to repo root: %s)\n"
1682 1682 )
1683 1683 % repo.root
1684 1684 )
1685 1685
1686 1686
1687 1687 def batchget(repo, mctx, wctx, wantfiledata, actions):
1688 1688 """apply gets to the working directory
1689 1689
1690 1690 mctx is the context to get from
1691 1691
1692 1692 Yields arbitrarily many (False, tuple) for progress updates, followed by
1693 1693 exactly one (True, filedata). When wantfiledata is false, filedata is an
1694 1694 empty dict. When wantfiledata is true, filedata[f] is a triple (mode, size,
1695 1695 mtime) of the file f written for each action.
1696 1696 """
1697 1697 filedata = {}
1698 1698 verbose = repo.ui.verbose
1699 1699 fctx = mctx.filectx
1700 1700 ui = repo.ui
1701 1701 i = 0
1702 1702 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1703 1703 for f, (flags, backup), msg in actions:
1704 1704 repo.ui.debug(b" %s: %s -> g\n" % (f, msg))
1705 1705 if verbose:
1706 1706 repo.ui.note(_(b"getting %s\n") % f)
1707 1707
1708 1708 if backup:
1709 1709 # If a file or directory exists with the same name, back that
1710 1710 # up. Otherwise, look to see if there is a file that conflicts
1711 1711 # with a directory this file is in, and if so, back that up.
1712 1712 conflicting = f
1713 1713 if not repo.wvfs.lexists(f):
1714 1714 for p in pathutil.finddirs(f):
1715 1715 if repo.wvfs.isfileorlink(p):
1716 1716 conflicting = p
1717 1717 break
1718 1718 if repo.wvfs.lexists(conflicting):
1719 1719 orig = scmutil.backuppath(ui, repo, conflicting)
1720 1720 util.rename(repo.wjoin(conflicting), orig)
1721 1721 wfctx = wctx[f]
1722 1722 wfctx.clearunknown()
1723 1723 atomictemp = ui.configbool(b"experimental", b"update.atomic-file")
1724 1724 size = wfctx.write(
1725 1725 fctx(f).data(),
1726 1726 flags,
1727 1727 backgroundclose=True,
1728 1728 atomictemp=atomictemp,
1729 1729 )
1730 1730 if wantfiledata:
1731 1731 s = wfctx.lstat()
1732 1732 mode = s.st_mode
1733 1733 mtime = s[stat.ST_MTIME]
1734 1734 filedata[f] = (mode, size, mtime) # for dirstate.normal
1735 1735 if i == 100:
1736 1736 yield False, (i, f)
1737 1737 i = 0
1738 1738 i += 1
1739 1739 if i > 0:
1740 1740 yield False, (i, f)
1741 1741 yield True, filedata
1742 1742
1743 1743
1744 1744 def _prefetchfiles(repo, ctx, actions):
1745 1745 """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict
1746 1746 of merge actions. ``ctx`` is the context being merged in."""
1747 1747
1748 1748 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1749 1749 # don't touch the context to be merged in. 'cd' is skipped, because
1750 1750 # changed/deleted never resolves to something from the remote side.
1751 1751 oplist = [
1752 1752 actions[a]
1753 1753 for a in (
1754 1754 ACTION_GET,
1755 1755 ACTION_DELETED_CHANGED,
1756 1756 ACTION_LOCAL_DIR_RENAME_GET,
1757 1757 ACTION_MERGE,
1758 1758 )
1759 1759 ]
1760 1760 prefetch = scmutil.prefetchfiles
1761 1761 matchfiles = scmutil.matchfiles
1762 1762 prefetch(
1763 1763 repo,
1764 1764 [ctx.rev()],
1765 1765 matchfiles(repo, [f for sublist in oplist for f, args, msg in sublist]),
1766 1766 )
1767 1767
1768 1768
1769 1769 @attr.s(frozen=True)
1770 1770 class updateresult(object):
1771 1771 updatedcount = attr.ib()
1772 1772 mergedcount = attr.ib()
1773 1773 removedcount = attr.ib()
1774 1774 unresolvedcount = attr.ib()
1775 1775
1776 1776 def isempty(self):
1777 1777 return not (
1778 1778 self.updatedcount
1779 1779 or self.mergedcount
1780 1780 or self.removedcount
1781 1781 or self.unresolvedcount
1782 1782 )
1783 1783
1784 1784
1785 1785 def emptyactions():
1786 1786 """create an actions dict, to be populated and passed to applyupdates()"""
1787 1787 return dict(
1788 1788 (m, [])
1789 1789 for m in (
1790 1790 ACTION_ADD,
1791 1791 ACTION_ADD_MODIFIED,
1792 1792 ACTION_FORGET,
1793 1793 ACTION_GET,
1794 1794 ACTION_CHANGED_DELETED,
1795 1795 ACTION_DELETED_CHANGED,
1796 1796 ACTION_REMOVE,
1797 1797 ACTION_DIR_RENAME_MOVE_LOCAL,
1798 1798 ACTION_LOCAL_DIR_RENAME_GET,
1799 1799 ACTION_MERGE,
1800 1800 ACTION_EXEC,
1801 1801 ACTION_KEEP,
1802 1802 ACTION_PATH_CONFLICT,
1803 1803 ACTION_PATH_CONFLICT_RESOLVE,
1804 1804 )
1805 1805 )
1806 1806
1807 1807
1808 1808 def applyupdates(
1809 1809 repo, actions, wctx, mctx, overwrite, wantfiledata, labels=None
1810 1810 ):
1811 1811 """apply the merge action list to the working directory
1812 1812
1813 1813 wctx is the working copy context
1814 1814 mctx is the context to be merged into the working copy
1815 1815
1816 1816 Return a tuple of (counts, filedata), where counts is a tuple
1817 1817 (updated, merged, removed, unresolved) that describes how many
1818 1818 files were affected by the update, and filedata is as described in
1819 1819 batchget.
1820 1820 """
1821 1821
1822 1822 _prefetchfiles(repo, mctx, actions)
1823 1823
1824 1824 updated, merged, removed = 0, 0, 0
1825 1825 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1826 1826 moves = []
1827 1827 for m, l in actions.items():
1828 1828 l.sort()
1829 1829
1830 1830 # 'cd' and 'dc' actions are treated like other merge conflicts
1831 1831 mergeactions = sorted(actions[ACTION_CHANGED_DELETED])
1832 1832 mergeactions.extend(sorted(actions[ACTION_DELETED_CHANGED]))
1833 1833 mergeactions.extend(actions[ACTION_MERGE])
1834 1834 for f, args, msg in mergeactions:
1835 1835 f1, f2, fa, move, anc = args
1836 1836 if f == b'.hgsubstate': # merged internally
1837 1837 continue
1838 1838 if f1 is None:
1839 1839 fcl = filemerge.absentfilectx(wctx, fa)
1840 1840 else:
1841 1841 repo.ui.debug(b" preserving %s for resolve of %s\n" % (f1, f))
1842 1842 fcl = wctx[f1]
1843 1843 if f2 is None:
1844 1844 fco = filemerge.absentfilectx(mctx, fa)
1845 1845 else:
1846 1846 fco = mctx[f2]
1847 1847 actx = repo[anc]
1848 1848 if fa in actx:
1849 1849 fca = actx[fa]
1850 1850 else:
1851 1851 # TODO: move to absentfilectx
1852 1852 fca = repo.filectx(f1, fileid=nullrev)
1853 1853 ms.add(fcl, fco, fca, f)
1854 1854 if f1 != f and move:
1855 1855 moves.append(f1)
1856 1856
1857 1857 # remove renamed files after safely stored
1858 1858 for f in moves:
1859 1859 if wctx[f].lexists():
1860 1860 repo.ui.debug(b"removing %s\n" % f)
1861 1861 wctx[f].audit()
1862 1862 wctx[f].remove()
1863 1863
1864 1864 numupdates = sum(len(l) for m, l in actions.items() if m != ACTION_KEEP)
1865 1865 progress = repo.ui.makeprogress(
1866 1866 _(b'updating'), unit=_(b'files'), total=numupdates
1867 1867 )
1868 1868
1869 1869 if [a for a in actions[ACTION_REMOVE] if a[0] == b'.hgsubstate']:
1870 1870 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1871 1871
1872 1872 # record path conflicts
1873 1873 for f, args, msg in actions[ACTION_PATH_CONFLICT]:
1874 1874 f1, fo = args
1875 1875 s = repo.ui.status
1876 1876 s(
1877 1877 _(
1878 1878 b"%s: path conflict - a file or link has the same name as a "
1879 1879 b"directory\n"
1880 1880 )
1881 1881 % f
1882 1882 )
1883 1883 if fo == b'l':
1884 1884 s(_(b"the local file has been renamed to %s\n") % f1)
1885 1885 else:
1886 1886 s(_(b"the remote file has been renamed to %s\n") % f1)
1887 1887 s(_(b"resolve manually then use 'hg resolve --mark %s'\n") % f)
1888 1888 ms.addpath(f, f1, fo)
1889 1889 progress.increment(item=f)
1890 1890
1891 1891 # When merging in-memory, we can't support worker processes, so set the
1892 1892 # per-item cost at 0 in that case.
1893 1893 cost = 0 if wctx.isinmemory() else 0.001
1894 1894
1895 1895 # remove in parallel (must come before resolving path conflicts and getting)
1896 1896 prog = worker.worker(
1897 1897 repo.ui, cost, batchremove, (repo, wctx), actions[ACTION_REMOVE]
1898 1898 )
1899 1899 for i, item in prog:
1900 1900 progress.increment(step=i, item=item)
1901 1901 removed = len(actions[ACTION_REMOVE])
1902 1902
1903 1903 # resolve path conflicts (must come before getting)
1904 1904 for f, args, msg in actions[ACTION_PATH_CONFLICT_RESOLVE]:
1905 1905 repo.ui.debug(b" %s: %s -> pr\n" % (f, msg))
1906 1906 (f0,) = args
1907 1907 if wctx[f0].lexists():
1908 1908 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1909 1909 wctx[f].audit()
1910 1910 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1911 1911 wctx[f0].remove()
1912 1912 progress.increment(item=f)
1913 1913
1914 1914 # get in parallel.
1915 1915 threadsafe = repo.ui.configbool(
1916 1916 b'experimental', b'worker.wdir-get-thread-safe'
1917 1917 )
1918 1918 prog = worker.worker(
1919 1919 repo.ui,
1920 1920 cost,
1921 1921 batchget,
1922 1922 (repo, mctx, wctx, wantfiledata),
1923 1923 actions[ACTION_GET],
1924 1924 threadsafe=threadsafe,
1925 1925 hasretval=True,
1926 1926 )
1927 1927 getfiledata = {}
1928 1928 for final, res in prog:
1929 1929 if final:
1930 1930 getfiledata = res
1931 1931 else:
1932 1932 i, item = res
1933 1933 progress.increment(step=i, item=item)
1934 1934 updated = len(actions[ACTION_GET])
1935 1935
1936 1936 if [a for a in actions[ACTION_GET] if a[0] == b'.hgsubstate']:
1937 1937 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1938 1938
1939 1939 # forget (manifest only, just log it) (must come first)
1940 1940 for f, args, msg in actions[ACTION_FORGET]:
1941 1941 repo.ui.debug(b" %s: %s -> f\n" % (f, msg))
1942 1942 progress.increment(item=f)
1943 1943
1944 1944 # re-add (manifest only, just log it)
1945 1945 for f, args, msg in actions[ACTION_ADD]:
1946 1946 repo.ui.debug(b" %s: %s -> a\n" % (f, msg))
1947 1947 progress.increment(item=f)
1948 1948
1949 1949 # re-add/mark as modified (manifest only, just log it)
1950 1950 for f, args, msg in actions[ACTION_ADD_MODIFIED]:
1951 1951 repo.ui.debug(b" %s: %s -> am\n" % (f, msg))
1952 1952 progress.increment(item=f)
1953 1953
1954 1954 # keep (noop, just log it)
1955 1955 for f, args, msg in actions[ACTION_KEEP]:
1956 1956 repo.ui.debug(b" %s: %s -> k\n" % (f, msg))
1957 1957 # no progress
1958 1958
1959 1959 # directory rename, move local
1960 1960 for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]:
1961 1961 repo.ui.debug(b" %s: %s -> dm\n" % (f, msg))
1962 1962 progress.increment(item=f)
1963 1963 f0, flags = args
1964 1964 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1965 1965 wctx[f].audit()
1966 1966 wctx[f].write(wctx.filectx(f0).data(), flags)
1967 1967 wctx[f0].remove()
1968 1968 updated += 1
1969 1969
1970 1970 # local directory rename, get
1971 1971 for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]:
1972 1972 repo.ui.debug(b" %s: %s -> dg\n" % (f, msg))
1973 1973 progress.increment(item=f)
1974 1974 f0, flags = args
1975 1975 repo.ui.note(_(b"getting %s to %s\n") % (f0, f))
1976 1976 wctx[f].write(mctx.filectx(f0).data(), flags)
1977 1977 updated += 1
1978 1978
1979 1979 # exec
1980 1980 for f, args, msg in actions[ACTION_EXEC]:
1981 1981 repo.ui.debug(b" %s: %s -> e\n" % (f, msg))
1982 1982 progress.increment(item=f)
1983 1983 (flags,) = args
1984 1984 wctx[f].audit()
1985 1985 wctx[f].setflags(b'l' in flags, b'x' in flags)
1986 1986 updated += 1
1987 1987
1988 1988 # the ordering is important here -- ms.mergedriver will raise if the merge
1989 1989 # driver has changed, and we want to be able to bypass it when overwrite is
1990 1990 # True
1991 1991 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1992 1992
1993 1993 if usemergedriver:
1994 1994 if wctx.isinmemory():
1995 1995 raise error.InMemoryMergeConflictsError(
1996 1996 b"in-memory merge does not support mergedriver"
1997 1997 )
1998 1998 ms.commit()
1999 1999 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
2000 2000 # the driver might leave some files unresolved
2001 2001 unresolvedf = set(ms.unresolved())
2002 2002 if not proceed:
2003 2003 # XXX setting unresolved to at least 1 is a hack to make sure we
2004 2004 # error out
2005 2005 return updateresult(
2006 2006 updated, merged, removed, max(len(unresolvedf), 1)
2007 2007 )
2008 2008 newactions = []
2009 2009 for f, args, msg in mergeactions:
2010 2010 if f in unresolvedf:
2011 2011 newactions.append((f, args, msg))
2012 2012 mergeactions = newactions
2013 2013
2014 2014 try:
2015 2015 # premerge
2016 2016 tocomplete = []
2017 2017 for f, args, msg in mergeactions:
2018 2018 repo.ui.debug(b" %s: %s -> m (premerge)\n" % (f, msg))
2019 2019 progress.increment(item=f)
2020 2020 if f == b'.hgsubstate': # subrepo states need updating
2021 2021 subrepoutil.submerge(
2022 2022 repo, wctx, mctx, wctx.ancestor(mctx), overwrite, labels
2023 2023 )
2024 2024 continue
2025 2025 wctx[f].audit()
2026 2026 complete, r = ms.preresolve(f, wctx)
2027 2027 if not complete:
2028 2028 numupdates += 1
2029 2029 tocomplete.append((f, args, msg))
2030 2030
2031 2031 # merge
2032 2032 for f, args, msg in tocomplete:
2033 2033 repo.ui.debug(b" %s: %s -> m (merge)\n" % (f, msg))
2034 2034 progress.increment(item=f, total=numupdates)
2035 2035 ms.resolve(f, wctx)
2036 2036
2037 2037 finally:
2038 2038 ms.commit()
2039 2039
2040 2040 unresolved = ms.unresolvedcount()
2041 2041
2042 2042 if (
2043 2043 usemergedriver
2044 2044 and not unresolved
2045 2045 and ms.mdstate() != MERGE_DRIVER_STATE_SUCCESS
2046 2046 ):
2047 2047 if not driverconclude(repo, ms, wctx, labels=labels):
2048 2048 # XXX setting unresolved to at least 1 is a hack to make sure we
2049 2049 # error out
2050 2050 unresolved = max(unresolved, 1)
2051 2051
2052 2052 ms.commit()
2053 2053
2054 2054 msupdated, msmerged, msremoved = ms.counts()
2055 2055 updated += msupdated
2056 2056 merged += msmerged
2057 2057 removed += msremoved
2058 2058
2059 2059 extraactions = ms.actions()
2060 2060 if extraactions:
2061 2061 mfiles = set(a[0] for a in actions[ACTION_MERGE])
2062 2062 for k, acts in pycompat.iteritems(extraactions):
2063 2063 actions[k].extend(acts)
2064 2064 if k == ACTION_GET and wantfiledata:
2065 2065 # no filedata until mergestate is updated to provide it
2066 2066 for a in acts:
2067 2067 getfiledata[a[0]] = None
2068 2068 # Remove these files from actions[ACTION_MERGE] as well. This is
2069 2069 # important because in recordupdates, files in actions[ACTION_MERGE]
2070 2070 # are processed after files in other actions, and the merge driver
2071 2071 # might add files to those actions via extraactions above. This can
2072 2072 # lead to a file being recorded twice, with poor results. This is
2073 2073 # especially problematic for actions[ACTION_REMOVE] (currently only
2074 2074 # possible with the merge driver in the initial merge process;
2075 2075 # interrupted merges don't go through this flow).
2076 2076 #
2077 2077 # The real fix here is to have indexes by both file and action so
2078 2078 # that when the action for a file is changed it is automatically
2079 2079 # reflected in the other action lists. But that involves a more
2080 2080 # complex data structure, so this will do for now.
2081 2081 #
2082 2082 # We don't need to do the same operation for 'dc' and 'cd' because
2083 2083 # those lists aren't consulted again.
2084 2084 mfiles.difference_update(a[0] for a in acts)
2085 2085
2086 2086 actions[ACTION_MERGE] = [
2087 2087 a for a in actions[ACTION_MERGE] if a[0] in mfiles
2088 2088 ]
2089 2089
2090 2090 progress.complete()
2091 2091 assert len(getfiledata) == (len(actions[ACTION_GET]) if wantfiledata else 0)
2092 2092 return updateresult(updated, merged, removed, unresolved), getfiledata
2093 2093
2094 2094
2095 2095 def recordupdates(repo, actions, branchmerge, getfiledata):
2096 2096 """record merge actions to the dirstate"""
2097 2097 # remove (must come first)
2098 2098 for f, args, msg in actions.get(ACTION_REMOVE, []):
2099 2099 if branchmerge:
2100 2100 repo.dirstate.remove(f)
2101 2101 else:
2102 2102 repo.dirstate.drop(f)
2103 2103
2104 2104 # forget (must come first)
2105 2105 for f, args, msg in actions.get(ACTION_FORGET, []):
2106 2106 repo.dirstate.drop(f)
2107 2107
2108 2108 # resolve path conflicts
2109 2109 for f, args, msg in actions.get(ACTION_PATH_CONFLICT_RESOLVE, []):
2110 2110 (f0,) = args
2111 2111 origf0 = repo.dirstate.copied(f0) or f0
2112 2112 repo.dirstate.add(f)
2113 2113 repo.dirstate.copy(origf0, f)
2114 2114 if f0 == origf0:
2115 2115 repo.dirstate.remove(f0)
2116 2116 else:
2117 2117 repo.dirstate.drop(f0)
2118 2118
2119 2119 # re-add
2120 2120 for f, args, msg in actions.get(ACTION_ADD, []):
2121 2121 repo.dirstate.add(f)
2122 2122
2123 2123 # re-add/mark as modified
2124 2124 for f, args, msg in actions.get(ACTION_ADD_MODIFIED, []):
2125 2125 if branchmerge:
2126 2126 repo.dirstate.normallookup(f)
2127 2127 else:
2128 2128 repo.dirstate.add(f)
2129 2129
2130 2130 # exec change
2131 2131 for f, args, msg in actions.get(ACTION_EXEC, []):
2132 2132 repo.dirstate.normallookup(f)
2133 2133
2134 2134 # keep
2135 2135 for f, args, msg in actions.get(ACTION_KEEP, []):
2136 2136 pass
2137 2137
2138 2138 # get
2139 2139 for f, args, msg in actions.get(ACTION_GET, []):
2140 2140 if branchmerge:
2141 2141 repo.dirstate.otherparent(f)
2142 2142 else:
2143 2143 parentfiledata = getfiledata[f] if getfiledata else None
2144 2144 repo.dirstate.normal(f, parentfiledata=parentfiledata)
2145 2145
2146 2146 # merge
2147 2147 for f, args, msg in actions.get(ACTION_MERGE, []):
2148 2148 f1, f2, fa, move, anc = args
2149 2149 if branchmerge:
2150 2150 # We've done a branch merge, mark this file as merged
2151 2151 # so that we properly record the merger later
2152 2152 repo.dirstate.merge(f)
2153 2153 if f1 != f2: # copy/rename
2154 2154 if move:
2155 2155 repo.dirstate.remove(f1)
2156 2156 if f1 != f:
2157 2157 repo.dirstate.copy(f1, f)
2158 2158 else:
2159 2159 repo.dirstate.copy(f2, f)
2160 2160 else:
2161 2161 # We've update-merged a locally modified file, so
2162 2162 # we set the dirstate to emulate a normal checkout
2163 2163 # of that file some time in the past. Thus our
2164 2164 # merge will appear as a normal local file
2165 2165 # modification.
2166 2166 if f2 == f: # file not locally copied/moved
2167 2167 repo.dirstate.normallookup(f)
2168 2168 if move:
2169 2169 repo.dirstate.drop(f1)
2170 2170
2171 2171 # directory rename, move local
2172 2172 for f, args, msg in actions.get(ACTION_DIR_RENAME_MOVE_LOCAL, []):
2173 2173 f0, flag = args
2174 2174 if branchmerge:
2175 2175 repo.dirstate.add(f)
2176 2176 repo.dirstate.remove(f0)
2177 2177 repo.dirstate.copy(f0, f)
2178 2178 else:
2179 2179 repo.dirstate.normal(f)
2180 2180 repo.dirstate.drop(f0)
2181 2181
2182 2182 # directory rename, get
2183 2183 for f, args, msg in actions.get(ACTION_LOCAL_DIR_RENAME_GET, []):
2184 2184 f0, flag = args
2185 2185 if branchmerge:
2186 2186 repo.dirstate.add(f)
2187 2187 repo.dirstate.copy(f0, f)
2188 2188 else:
2189 2189 repo.dirstate.normal(f)
2190 2190
2191 2191
2192 2192 UPDATECHECK_ABORT = b'abort' # handled at higher layers
2193 2193 UPDATECHECK_NONE = b'none'
2194 2194 UPDATECHECK_LINEAR = b'linear'
2195 2195 UPDATECHECK_NO_CONFLICT = b'noconflict'
2196 2196
2197 2197
2198 2198 def update(
2199 2199 repo,
2200 2200 node,
2201 2201 branchmerge,
2202 2202 force,
2203 2203 ancestor=None,
2204 2204 mergeancestor=False,
2205 2205 labels=None,
2206 2206 matcher=None,
2207 2207 mergeforce=False,
2208 2208 updatecheck=None,
2209 2209 wc=None,
2210 2210 ):
2211 2211 """
2212 2212 Perform a merge between the working directory and the given node
2213 2213
2214 2214 node = the node to update to
2215 2215 branchmerge = whether to merge between branches
2216 2216 force = whether to force branch merging or file overwriting
2217 2217 matcher = a matcher to filter file lists (dirstate not updated)
2218 2218 mergeancestor = whether it is merging with an ancestor. If true,
2219 2219 we should accept the incoming changes for any prompts that occur.
2220 2220 If false, merging with an ancestor (fast-forward) is only allowed
2221 2221 between different named branches. This flag is used by rebase extension
2222 2222 as a temporary fix and should be avoided in general.
2223 2223 labels = labels to use for base, local and other
2224 2224 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
2225 2225 this is True, then 'force' should be True as well.
2226 2226
2227 2227 The table below shows all the behaviors of the update command given the
2228 2228 -c/--check and -C/--clean or no options, whether the working directory is
2229 2229 dirty, whether a revision is specified, and the relationship of the parent
2230 2230 rev to the target rev (linear or not). Match from top first. The -n
2231 2231 option doesn't exist on the command line, but represents the
2232 2232 experimental.updatecheck=noconflict option.
2233 2233
2234 2234 This logic is tested by test-update-branches.t.
2235 2235
2236 2236 -c -C -n -m dirty rev linear | result
2237 2237 y y * * * * * | (1)
2238 2238 y * y * * * * | (1)
2239 2239 y * * y * * * | (1)
2240 2240 * y y * * * * | (1)
2241 2241 * y * y * * * | (1)
2242 2242 * * y y * * * | (1)
2243 2243 * * * * * n n | x
2244 2244 * * * * n * * | ok
2245 2245 n n n n y * y | merge
2246 2246 n n n n y y n | (2)
2247 2247 n n n y y * * | merge
2248 2248 n n y n y * * | merge if no conflict
2249 2249 n y n n y * * | discard
2250 2250 y n n n y * * | (3)
2251 2251
2252 2252 x = can't happen
2253 2253 * = don't-care
2254 2254 1 = incompatible options (checked in commands.py)
2255 2255 2 = abort: uncommitted changes (commit or update --clean to discard changes)
2256 2256 3 = abort: uncommitted changes (checked in commands.py)
2257 2257
2258 2258 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
2259 2259 to repo[None] if None is passed.
2260 2260
2261 2261 Return the same tuple as applyupdates().
2262 2262 """
2263 2263 # Avoid cycle.
2264 2264 from . import sparse
2265 2265
2266 2266 # This function used to find the default destination if node was None, but
2267 2267 # that's now in destutil.py.
2268 2268 assert node is not None
2269 2269 if not branchmerge and not force:
2270 2270 # TODO: remove the default once all callers that pass branchmerge=False
2271 2271 # and force=False pass a value for updatecheck. We may want to allow
2272 2272 # updatecheck='abort' to better suppport some of these callers.
2273 2273 if updatecheck is None:
2274 2274 updatecheck = UPDATECHECK_LINEAR
2275 2275 if updatecheck not in (
2276 2276 UPDATECHECK_NONE,
2277 2277 UPDATECHECK_LINEAR,
2278 2278 UPDATECHECK_NO_CONFLICT,
2279 2279 ):
2280 2280 raise ValueError(
2281 2281 r'Invalid updatecheck %r (can accept %r)'
2282 2282 % (
2283 2283 updatecheck,
2284 2284 (
2285 2285 UPDATECHECK_NONE,
2286 2286 UPDATECHECK_LINEAR,
2287 2287 UPDATECHECK_NO_CONFLICT,
2288 2288 ),
2289 2289 )
2290 2290 )
2291 2291 # If we're doing a partial update, we need to skip updating
2292 2292 # the dirstate, so make a note of any partial-ness to the
2293 2293 # update here.
2294 2294 if matcher is None or matcher.always():
2295 2295 partial = False
2296 2296 else:
2297 2297 partial = True
2298 2298 with repo.wlock():
2299 2299 if wc is None:
2300 2300 wc = repo[None]
2301 2301 pl = wc.parents()
2302 2302 p1 = pl[0]
2303 2303 p2 = repo[node]
2304 2304 if ancestor is not None:
2305 2305 pas = [repo[ancestor]]
2306 2306 else:
2307 2307 if repo.ui.configlist(b'merge', b'preferancestor') == [b'*']:
2308 2308 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
2309 2309 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
2310 2310 else:
2311 2311 pas = [p1.ancestor(p2, warn=branchmerge)]
2312 2312
2313 2313 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
2314 2314
2315 2315 overwrite = force and not branchmerge
2316 2316 ### check phase
2317 2317 if not overwrite:
2318 2318 if len(pl) > 1:
2319 2319 raise error.Abort(_(b"outstanding uncommitted merge"))
2320 2320 ms = mergestate.read(repo)
2321 2321 if list(ms.unresolved()):
2322 2322 raise error.Abort(
2323 2323 _(b"outstanding merge conflicts"),
2324 2324 hint=_(b"use 'hg resolve' to resolve"),
2325 2325 )
2326 2326 if branchmerge:
2327 2327 if pas == [p2]:
2328 2328 raise error.Abort(
2329 2329 _(
2330 2330 b"merging with a working directory ancestor"
2331 2331 b" has no effect"
2332 2332 )
2333 2333 )
2334 2334 elif pas == [p1]:
2335 2335 if not mergeancestor and wc.branch() == p2.branch():
2336 2336 raise error.Abort(
2337 2337 _(b"nothing to merge"),
2338 2338 hint=_(b"use 'hg update' or check 'hg heads'"),
2339 2339 )
2340 2340 if not force and (wc.files() or wc.deleted()):
2341 2341 raise error.Abort(
2342 2342 _(b"uncommitted changes"),
2343 2343 hint=_(b"use 'hg status' to list changes"),
2344 2344 )
2345 2345 if not wc.isinmemory():
2346 2346 for s in sorted(wc.substate):
2347 2347 wc.sub(s).bailifchanged()
2348 2348
2349 2349 elif not overwrite:
2350 2350 if p1 == p2: # no-op update
2351 2351 # call the hooks and exit early
2352 2352 repo.hook(b'preupdate', throw=True, parent1=xp2, parent2=b'')
2353 2353 repo.hook(b'update', parent1=xp2, parent2=b'', error=0)
2354 2354 return updateresult(0, 0, 0, 0)
2355 2355
2356 2356 if updatecheck == UPDATECHECK_LINEAR and pas not in (
2357 2357 [p1],
2358 2358 [p2],
2359 2359 ): # nonlinear
2360 2360 dirty = wc.dirty(missing=True)
2361 2361 if dirty:
2362 2362 # Branching is a bit strange to ensure we do the minimal
2363 2363 # amount of call to obsutil.foreground.
2364 2364 foreground = obsutil.foreground(repo, [p1.node()])
2365 2365 # note: the <node> variable contains a random identifier
2366 2366 if repo[node].node() in foreground:
2367 2367 pass # allow updating to successors
2368 2368 else:
2369 2369 msg = _(b"uncommitted changes")
2370 2370 hint = _(b"commit or update --clean to discard changes")
2371 2371 raise error.UpdateAbort(msg, hint=hint)
2372 2372 else:
2373 2373 # Allow jumping branches if clean and specific rev given
2374 2374 pass
2375 2375
2376 2376 if overwrite:
2377 2377 pas = [wc]
2378 2378 elif not branchmerge:
2379 2379 pas = [p1]
2380 2380
2381 2381 # deprecated config: merge.followcopies
2382 2382 followcopies = repo.ui.configbool(b'merge', b'followcopies')
2383 2383 if overwrite:
2384 2384 followcopies = False
2385 2385 elif not pas[0]:
2386 2386 followcopies = False
2387 2387 if not branchmerge and not wc.dirty(missing=True):
2388 2388 followcopies = False
2389 2389
2390 2390 ### calculate phase
2391 2391 actionbyfile, diverge, renamedelete = calculateupdates(
2392 2392 repo,
2393 2393 wc,
2394 2394 p2,
2395 2395 pas,
2396 2396 branchmerge,
2397 2397 force,
2398 2398 mergeancestor,
2399 2399 followcopies,
2400 2400 matcher=matcher,
2401 2401 mergeforce=mergeforce,
2402 2402 )
2403 2403
2404 2404 if updatecheck == UPDATECHECK_NO_CONFLICT:
2405 2405 for f, (m, args, msg) in pycompat.iteritems(actionbyfile):
2406 2406 if m not in (
2407 2407 ACTION_GET,
2408 2408 ACTION_KEEP,
2409 2409 ACTION_EXEC,
2410 2410 ACTION_REMOVE,
2411 2411 ACTION_PATH_CONFLICT_RESOLVE,
2412 2412 ):
2413 2413 msg = _(b"conflicting changes")
2414 2414 hint = _(b"commit or update --clean to discard changes")
2415 2415 raise error.Abort(msg, hint=hint)
2416 2416
2417 2417 # Prompt and create actions. Most of this is in the resolve phase
2418 2418 # already, but we can't handle .hgsubstate in filemerge or
2419 2419 # subrepoutil.submerge yet so we have to keep prompting for it.
2420 2420 if b'.hgsubstate' in actionbyfile:
2421 2421 f = b'.hgsubstate'
2422 2422 m, args, msg = actionbyfile[f]
2423 2423 prompts = filemerge.partextras(labels)
2424 2424 prompts[b'f'] = f
2425 2425 if m == ACTION_CHANGED_DELETED:
2426 2426 if repo.ui.promptchoice(
2427 2427 _(
2428 2428 b"local%(l)s changed %(f)s which other%(o)s deleted\n"
2429 2429 b"use (c)hanged version or (d)elete?"
2430 2430 b"$$ &Changed $$ &Delete"
2431 2431 )
2432 2432 % prompts,
2433 2433 0,
2434 2434 ):
2435 2435 actionbyfile[f] = (ACTION_REMOVE, None, b'prompt delete')
2436 2436 elif f in p1:
2437 2437 actionbyfile[f] = (
2438 2438 ACTION_ADD_MODIFIED,
2439 2439 None,
2440 2440 b'prompt keep',
2441 2441 )
2442 2442 else:
2443 2443 actionbyfile[f] = (ACTION_ADD, None, b'prompt keep')
2444 2444 elif m == ACTION_DELETED_CHANGED:
2445 2445 f1, f2, fa, move, anc = args
2446 2446 flags = p2[f2].flags()
2447 2447 if (
2448 2448 repo.ui.promptchoice(
2449 2449 _(
2450 2450 b"other%(o)s changed %(f)s which local%(l)s deleted\n"
2451 2451 b"use (c)hanged version or leave (d)eleted?"
2452 2452 b"$$ &Changed $$ &Deleted"
2453 2453 )
2454 2454 % prompts,
2455 2455 0,
2456 2456 )
2457 2457 == 0
2458 2458 ):
2459 2459 actionbyfile[f] = (
2460 2460 ACTION_GET,
2461 2461 (flags, False),
2462 2462 b'prompt recreating',
2463 2463 )
2464 2464 else:
2465 2465 del actionbyfile[f]
2466 2466
2467 2467 # Convert to dictionary-of-lists format
2468 2468 actions = emptyactions()
2469 2469 for f, (m, args, msg) in pycompat.iteritems(actionbyfile):
2470 2470 if m not in actions:
2471 2471 actions[m] = []
2472 2472 actions[m].append((f, args, msg))
2473 2473
2474 2474 if not util.fscasesensitive(repo.path):
2475 2475 # check collision between files only in p2 for clean update
2476 2476 if not branchmerge and (
2477 2477 force or not wc.dirty(missing=True, branch=False)
2478 2478 ):
2479 2479 _checkcollision(repo, p2.manifest(), None)
2480 2480 else:
2481 2481 _checkcollision(repo, wc.manifest(), actions)
2482 2482
2483 2483 # divergent renames
2484 2484 for f, fl in sorted(pycompat.iteritems(diverge)):
2485 2485 repo.ui.warn(
2486 2486 _(
2487 2487 b"note: possible conflict - %s was renamed "
2488 2488 b"multiple times to:\n"
2489 2489 )
2490 2490 % f
2491 2491 )
2492 2492 for nf in sorted(fl):
2493 2493 repo.ui.warn(b" %s\n" % nf)
2494 2494
2495 2495 # rename and delete
2496 2496 for f, fl in sorted(pycompat.iteritems(renamedelete)):
2497 2497 repo.ui.warn(
2498 2498 _(
2499 2499 b"note: possible conflict - %s was deleted "
2500 2500 b"and renamed to:\n"
2501 2501 )
2502 2502 % f
2503 2503 )
2504 2504 for nf in sorted(fl):
2505 2505 repo.ui.warn(b" %s\n" % nf)
2506 2506
2507 2507 ### apply phase
2508 2508 if not branchmerge: # just jump to the new rev
2509 2509 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, b''
2510 2510 if not partial and not wc.isinmemory():
2511 2511 repo.hook(b'preupdate', throw=True, parent1=xp1, parent2=xp2)
2512 2512 # note that we're in the middle of an update
2513 2513 repo.vfs.write(b'updatestate', p2.hex())
2514 2514
2515 2515 # Advertise fsmonitor when its presence could be useful.
2516 2516 #
2517 2517 # We only advertise when performing an update from an empty working
2518 2518 # directory. This typically only occurs during initial clone.
2519 2519 #
2520 2520 # We give users a mechanism to disable the warning in case it is
2521 2521 # annoying.
2522 2522 #
2523 2523 # We only allow on Linux and MacOS because that's where fsmonitor is
2524 2524 # considered stable.
2525 2525 fsmonitorwarning = repo.ui.configbool(b'fsmonitor', b'warn_when_unused')
2526 2526 fsmonitorthreshold = repo.ui.configint(
2527 2527 b'fsmonitor', b'warn_update_file_count'
2528 2528 )
2529 2529 try:
2530 2530 # avoid cycle: extensions -> cmdutil -> merge
2531 2531 from . import extensions
2532 2532
2533 2533 extensions.find(b'fsmonitor')
2534 2534 fsmonitorenabled = repo.ui.config(b'fsmonitor', b'mode') != b'off'
2535 2535 # We intentionally don't look at whether fsmonitor has disabled
2536 2536 # itself because a) fsmonitor may have already printed a warning
2537 2537 # b) we only care about the config state here.
2538 2538 except KeyError:
2539 2539 fsmonitorenabled = False
2540 2540
2541 2541 if (
2542 2542 fsmonitorwarning
2543 2543 and not fsmonitorenabled
2544 2544 and p1.node() == nullid
2545 2545 and len(actions[ACTION_GET]) >= fsmonitorthreshold
2546 2546 and pycompat.sysplatform.startswith((b'linux', b'darwin'))
2547 2547 ):
2548 2548 repo.ui.warn(
2549 2549 _(
2550 2550 b'(warning: large working directory being used without '
2551 2551 b'fsmonitor enabled; enable fsmonitor to improve performance; '
2552 2552 b'see "hg help -e fsmonitor")\n'
2553 2553 )
2554 2554 )
2555 2555
2556 2556 updatedirstate = not partial and not wc.isinmemory()
2557 2557 wantfiledata = updatedirstate and not branchmerge
2558 2558 stats, getfiledata = applyupdates(
2559 2559 repo, actions, wc, p2, overwrite, wantfiledata, labels=labels
2560 2560 )
2561 2561
2562 2562 if updatedirstate:
2563 2563 with repo.dirstate.parentchange():
2564 2564 repo.setparents(fp1, fp2)
2565 2565 recordupdates(repo, actions, branchmerge, getfiledata)
2566 2566 # update completed, clear state
2567 2567 util.unlink(repo.vfs.join(b'updatestate'))
2568 2568
2569 2569 if not branchmerge:
2570 2570 repo.dirstate.setbranch(p2.branch())
2571 2571
2572 2572 # If we're updating to a location, clean up any stale temporary includes
2573 2573 # (ex: this happens during hg rebase --abort).
2574 2574 if not branchmerge:
2575 2575 sparse.prunetemporaryincludes(repo)
2576 2576
2577 2577 if not partial:
2578 2578 repo.hook(
2579 2579 b'update', parent1=xp1, parent2=xp2, error=stats.unresolvedcount
2580 2580 )
2581 2581 return stats
2582 2582
2583 2583
2584 2584 def graft(
2585 2585 repo, ctx, base, labels=None, keepparent=False, keepconflictparent=False
2586 2586 ):
2587 2587 """Do a graft-like merge.
2588 2588
2589 2589 This is a merge where the merge ancestor is chosen such that one
2590 2590 or more changesets are grafted onto the current changeset. In
2591 2591 addition to the merge, this fixes up the dirstate to include only
2592 2592 a single parent (if keepparent is False) and tries to duplicate any
2593 2593 renames/copies appropriately.
2594 2594
2595 2595 ctx - changeset to rebase
2596 2596 base - merge base, usually ctx.p1()
2597 2597 labels - merge labels eg ['local', 'graft']
2598 2598 keepparent - keep second parent if any
2599 2599 keepconflictparent - if unresolved, keep parent used for the merge
2600 2600
2601 2601 """
2602 2602 # If we're grafting a descendant onto an ancestor, be sure to pass
2603 2603 # mergeancestor=True to update. This does two things: 1) allows the merge if
2604 2604 # the destination is the same as the parent of the ctx (so we can use graft
2605 2605 # to copy commits), and 2) informs update that the incoming changes are
2606 2606 # newer than the destination so it doesn't prompt about "remote changed foo
2607 2607 # which local deleted".
2608 2608 pctx = repo[b'.']
2609 2609 mergeancestor = repo.changelog.isancestor(pctx.node(), ctx.node())
2610 2610
2611 2611 stats = update(
2612 2612 repo,
2613 2613 ctx.node(),
2614 2614 True,
2615 2615 True,
2616 2616 base.node(),
2617 2617 mergeancestor=mergeancestor,
2618 2618 labels=labels,
2619 2619 )
2620 2620
2621 2621 if keepconflictparent and stats.unresolvedcount:
2622 2622 pother = ctx.node()
2623 2623 else:
2624 2624 pother = nullid
2625 2625 parents = ctx.parents()
2626 2626 if keepparent and len(parents) == 2 and base in parents:
2627 2627 parents.remove(base)
2628 2628 pother = parents[0].node()
2629 2629 # Never set both parents equal to each other
2630 2630 if pother == pctx.node():
2631 2631 pother = nullid
2632 2632
2633 2633 with repo.dirstate.parentchange():
2634 2634 repo.setparents(pctx.node(), pother)
2635 2635 repo.dirstate.write(repo.currenttransaction())
2636 2636 # fix up dirstate for copies and renames
2637 2637 copies.duplicatecopies(repo, repo[None], ctx.rev(), base.rev())
2638 2638 return stats
2639 2639
2640 2640
2641 2641 def purge(
2642 2642 repo,
2643 2643 matcher,
2644 2644 ignored=False,
2645 2645 removeemptydirs=True,
2646 2646 removefiles=True,
2647 2647 abortonerror=False,
2648 2648 noop=False,
2649 2649 ):
2650 2650 """Purge the working directory of untracked files.
2651 2651
2652 2652 ``matcher`` is a matcher configured to scan the working directory -
2653 2653 potentially a subset.
2654 2654
2655 2655 ``ignored`` controls whether ignored files should also be purged.
2656 2656
2657 2657 ``removeemptydirs`` controls whether empty directories should be removed.
2658 2658
2659 2659 ``removefiles`` controls whether files are removed.
2660 2660
2661 2661 ``abortonerror`` causes an exception to be raised if an error occurs
2662 2662 deleting a file or directory.
2663 2663
2664 2664 ``noop`` controls whether to actually remove files. If not defined, actions
2665 2665 will be taken.
2666 2666
2667 2667 Returns an iterable of relative paths in the working directory that were
2668 2668 or would be removed.
2669 2669 """
2670 2670
2671 2671 def remove(removefn, path):
2672 2672 try:
2673 2673 removefn(path)
2674 2674 except OSError:
2675 2675 m = _(b'%s cannot be removed') % path
2676 2676 if abortonerror:
2677 2677 raise error.Abort(m)
2678 2678 else:
2679 2679 repo.ui.warn(_(b'warning: %s\n') % m)
2680 2680
2681 2681 # There's no API to copy a matcher. So mutate the passed matcher and
2682 2682 # restore it when we're done.
2683 2683 oldtraversedir = matcher.traversedir
2684 2684
2685 2685 res = []
2686 2686
2687 2687 try:
2688 2688 if removeemptydirs:
2689 2689 directories = []
2690 2690 matcher.traversedir = directories.append
2691 2691
2692 2692 status = repo.status(match=matcher, ignored=ignored, unknown=True)
2693 2693
2694 2694 if removefiles:
2695 2695 for f in sorted(status.unknown + status.ignored):
2696 2696 if not noop:
2697 2697 repo.ui.note(_(b'removing file %s\n') % f)
2698 2698 remove(repo.wvfs.unlink, f)
2699 2699 res.append(f)
2700 2700
2701 2701 if removeemptydirs:
2702 2702 for f in sorted(directories, reverse=True):
2703 2703 if matcher(f) and not repo.wvfs.listdir(f):
2704 2704 if not noop:
2705 2705 repo.ui.note(_(b'removing directory %s\n') % f)
2706 2706 remove(repo.wvfs.rmdir, f)
2707 2707 res.append(f)
2708 2708
2709 2709 return res
2710 2710
2711 2711 finally:
2712 2712 matcher.traversedir = oldtraversedir
@@ -1,1144 +1,1146 b''
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete marker handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewrite operations, and help
18 18 building new tools to reconcile conflicting rewrite actions. To
19 19 facilitate conflict resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23 The old obsoleted changeset is called a "predecessor" and possible
24 24 replacements are called "successors". Markers that used changeset X as
25 25 a predecessor are called "successor markers of X" because they hold
26 26 information about the successors of X. Markers that use changeset Y as
27 27 a successors are call "predecessor markers of Y" because they hold
28 28 information about the predecessors of Y.
29 29
30 30 Examples:
31 31
32 32 - When changeset A is replaced by changeset A', one marker is stored:
33 33
34 34 (A, (A',))
35 35
36 36 - When changesets A and B are folded into a new changeset C, two markers are
37 37 stored:
38 38
39 39 (A, (C,)) and (B, (C,))
40 40
41 41 - When changeset A is simply "pruned" from the graph, a marker is created:
42 42
43 43 (A, ())
44 44
45 45 - When changeset A is split into B and C, a single marker is used:
46 46
47 47 (A, (B, C))
48 48
49 49 We use a single marker to distinguish the "split" case from the "divergence"
50 50 case. If two independent operations rewrite the same changeset A in to A' and
51 51 A'', we have an error case: divergent rewriting. We can detect it because
52 52 two markers will be created independently:
53 53
54 54 (A, (B,)) and (A, (C,))
55 55
56 56 Format
57 57 ------
58 58
59 59 Markers are stored in an append-only file stored in
60 60 '.hg/store/obsstore'.
61 61
62 62 The file starts with a version header:
63 63
64 64 - 1 unsigned byte: version number, starting at zero.
65 65
66 66 The header is followed by the markers. Marker format depend of the version. See
67 67 comment associated with each format for details.
68 68
69 69 """
70 70 from __future__ import absolute_import
71 71
72 72 import errno
73 import hashlib
74 73 import struct
75 74
76 75 from .i18n import _
77 76 from .pycompat import getattr
78 77 from . import (
79 78 encoding,
80 79 error,
81 80 node,
82 81 obsutil,
83 82 phases,
84 83 policy,
85 84 pycompat,
86 85 util,
87 86 )
88 from .utils import dateutil
87 from .utils import (
88 dateutil,
89 hashutil,
90 )
89 91
90 92 parsers = policy.importmod('parsers')
91 93
92 94 _pack = struct.pack
93 95 _unpack = struct.unpack
94 96 _calcsize = struct.calcsize
95 97 propertycache = util.propertycache
96 98
97 99 # Options for obsolescence
98 100 createmarkersopt = b'createmarkers'
99 101 allowunstableopt = b'allowunstable'
100 102 exchangeopt = b'exchange'
101 103
102 104
103 105 def _getoptionvalue(repo, option):
104 106 """Returns True if the given repository has the given obsolete option
105 107 enabled.
106 108 """
107 109 configkey = b'evolution.%s' % option
108 110 newconfig = repo.ui.configbool(b'experimental', configkey)
109 111
110 112 # Return the value only if defined
111 113 if newconfig is not None:
112 114 return newconfig
113 115
114 116 # Fallback on generic option
115 117 try:
116 118 return repo.ui.configbool(b'experimental', b'evolution')
117 119 except (error.ConfigError, AttributeError):
118 120 # Fallback on old-fashion config
119 121 # inconsistent config: experimental.evolution
120 122 result = set(repo.ui.configlist(b'experimental', b'evolution'))
121 123
122 124 if b'all' in result:
123 125 return True
124 126
125 127 # Temporary hack for next check
126 128 newconfig = repo.ui.config(b'experimental', b'evolution.createmarkers')
127 129 if newconfig:
128 130 result.add(b'createmarkers')
129 131
130 132 return option in result
131 133
132 134
133 135 def getoptions(repo):
134 136 """Returns dicts showing state of obsolescence features."""
135 137
136 138 createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
137 139 unstablevalue = _getoptionvalue(repo, allowunstableopt)
138 140 exchangevalue = _getoptionvalue(repo, exchangeopt)
139 141
140 142 # createmarkers must be enabled if other options are enabled
141 143 if (unstablevalue or exchangevalue) and not createmarkersvalue:
142 144 raise error.Abort(
143 145 _(
144 146 b"'createmarkers' obsolete option must be enabled "
145 147 b"if other obsolete options are enabled"
146 148 )
147 149 )
148 150
149 151 return {
150 152 createmarkersopt: createmarkersvalue,
151 153 allowunstableopt: unstablevalue,
152 154 exchangeopt: exchangevalue,
153 155 }
154 156
155 157
156 158 def isenabled(repo, option):
157 159 """Returns True if the given repository has the given obsolete option
158 160 enabled.
159 161 """
160 162 return getoptions(repo)[option]
161 163
162 164
163 165 # Creating aliases for marker flags because evolve extension looks for
164 166 # bumpedfix in obsolete.py
165 167 bumpedfix = obsutil.bumpedfix
166 168 usingsha256 = obsutil.usingsha256
167 169
168 170 ## Parsing and writing of version "0"
169 171 #
170 172 # The header is followed by the markers. Each marker is made of:
171 173 #
172 174 # - 1 uint8 : number of new changesets "N", can be zero.
173 175 #
174 176 # - 1 uint32: metadata size "M" in bytes.
175 177 #
176 178 # - 1 byte: a bit field. It is reserved for flags used in common
177 179 # obsolete marker operations, to avoid repeated decoding of metadata
178 180 # entries.
179 181 #
180 182 # - 20 bytes: obsoleted changeset identifier.
181 183 #
182 184 # - N*20 bytes: new changesets identifiers.
183 185 #
184 186 # - M bytes: metadata as a sequence of nul-terminated strings. Each
185 187 # string contains a key and a value, separated by a colon ':', without
186 188 # additional encoding. Keys cannot contain '\0' or ':' and values
187 189 # cannot contain '\0'.
188 190 _fm0version = 0
189 191 _fm0fixed = b'>BIB20s'
190 192 _fm0node = b'20s'
191 193 _fm0fsize = _calcsize(_fm0fixed)
192 194 _fm0fnodesize = _calcsize(_fm0node)
193 195
194 196
195 197 def _fm0readmarkers(data, off, stop):
196 198 # Loop on markers
197 199 while off < stop:
198 200 # read fixed part
199 201 cur = data[off : off + _fm0fsize]
200 202 off += _fm0fsize
201 203 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
202 204 # read replacement
203 205 sucs = ()
204 206 if numsuc:
205 207 s = _fm0fnodesize * numsuc
206 208 cur = data[off : off + s]
207 209 sucs = _unpack(_fm0node * numsuc, cur)
208 210 off += s
209 211 # read metadata
210 212 # (metadata will be decoded on demand)
211 213 metadata = data[off : off + mdsize]
212 214 if len(metadata) != mdsize:
213 215 raise error.Abort(
214 216 _(
215 217 b'parsing obsolete marker: metadata is too '
216 218 b'short, %d bytes expected, got %d'
217 219 )
218 220 % (mdsize, len(metadata))
219 221 )
220 222 off += mdsize
221 223 metadata = _fm0decodemeta(metadata)
222 224 try:
223 225 when, offset = metadata.pop(b'date', b'0 0').split(b' ')
224 226 date = float(when), int(offset)
225 227 except ValueError:
226 228 date = (0.0, 0)
227 229 parents = None
228 230 if b'p2' in metadata:
229 231 parents = (metadata.pop(b'p1', None), metadata.pop(b'p2', None))
230 232 elif b'p1' in metadata:
231 233 parents = (metadata.pop(b'p1', None),)
232 234 elif b'p0' in metadata:
233 235 parents = ()
234 236 if parents is not None:
235 237 try:
236 238 parents = tuple(node.bin(p) for p in parents)
237 239 # if parent content is not a nodeid, drop the data
238 240 for p in parents:
239 241 if len(p) != 20:
240 242 parents = None
241 243 break
242 244 except TypeError:
243 245 # if content cannot be translated to nodeid drop the data.
244 246 parents = None
245 247
246 248 metadata = tuple(sorted(pycompat.iteritems(metadata)))
247 249
248 250 yield (pre, sucs, flags, metadata, date, parents)
249 251
250 252
251 253 def _fm0encodeonemarker(marker):
252 254 pre, sucs, flags, metadata, date, parents = marker
253 255 if flags & usingsha256:
254 256 raise error.Abort(_(b'cannot handle sha256 with old obsstore format'))
255 257 metadata = dict(metadata)
256 258 time, tz = date
257 259 metadata[b'date'] = b'%r %i' % (time, tz)
258 260 if parents is not None:
259 261 if not parents:
260 262 # mark that we explicitly recorded no parents
261 263 metadata[b'p0'] = b''
262 264 for i, p in enumerate(parents, 1):
263 265 metadata[b'p%i' % i] = node.hex(p)
264 266 metadata = _fm0encodemeta(metadata)
265 267 numsuc = len(sucs)
266 268 format = _fm0fixed + (_fm0node * numsuc)
267 269 data = [numsuc, len(metadata), flags, pre]
268 270 data.extend(sucs)
269 271 return _pack(format, *data) + metadata
270 272
271 273
272 274 def _fm0encodemeta(meta):
273 275 """Return encoded metadata string to string mapping.
274 276
275 277 Assume no ':' in key and no '\0' in both key and value."""
276 278 for key, value in pycompat.iteritems(meta):
277 279 if b':' in key or b'\0' in key:
278 280 raise ValueError(b"':' and '\0' are forbidden in metadata key'")
279 281 if b'\0' in value:
280 282 raise ValueError(b"':' is forbidden in metadata value'")
281 283 return b'\0'.join([b'%s:%s' % (k, meta[k]) for k in sorted(meta)])
282 284
283 285
284 286 def _fm0decodemeta(data):
285 287 """Return string to string dictionary from encoded version."""
286 288 d = {}
287 289 for l in data.split(b'\0'):
288 290 if l:
289 291 key, value = l.split(b':', 1)
290 292 d[key] = value
291 293 return d
292 294
293 295
294 296 ## Parsing and writing of version "1"
295 297 #
296 298 # The header is followed by the markers. Each marker is made of:
297 299 #
298 300 # - uint32: total size of the marker (including this field)
299 301 #
300 302 # - float64: date in seconds since epoch
301 303 #
302 304 # - int16: timezone offset in minutes
303 305 #
304 306 # - uint16: a bit field. It is reserved for flags used in common
305 307 # obsolete marker operations, to avoid repeated decoding of metadata
306 308 # entries.
307 309 #
308 310 # - uint8: number of successors "N", can be zero.
309 311 #
310 312 # - uint8: number of parents "P", can be zero.
311 313 #
312 314 # 0: parents data stored but no parent,
313 315 # 1: one parent stored,
314 316 # 2: two parents stored,
315 317 # 3: no parent data stored
316 318 #
317 319 # - uint8: number of metadata entries M
318 320 #
319 321 # - 20 or 32 bytes: predecessor changeset identifier.
320 322 #
321 323 # - N*(20 or 32) bytes: successors changesets identifiers.
322 324 #
323 325 # - P*(20 or 32) bytes: parents of the predecessors changesets.
324 326 #
325 327 # - M*(uint8, uint8): size of all metadata entries (key and value)
326 328 #
327 329 # - remaining bytes: the metadata, each (key, value) pair after the other.
328 330 _fm1version = 1
329 331 _fm1fixed = b'>IdhHBBB20s'
330 332 _fm1nodesha1 = b'20s'
331 333 _fm1nodesha256 = b'32s'
332 334 _fm1nodesha1size = _calcsize(_fm1nodesha1)
333 335 _fm1nodesha256size = _calcsize(_fm1nodesha256)
334 336 _fm1fsize = _calcsize(_fm1fixed)
335 337 _fm1parentnone = 3
336 338 _fm1parentshift = 14
337 339 _fm1parentmask = _fm1parentnone << _fm1parentshift
338 340 _fm1metapair = b'BB'
339 341 _fm1metapairsize = _calcsize(_fm1metapair)
340 342
341 343
342 344 def _fm1purereadmarkers(data, off, stop):
343 345 # make some global constants local for performance
344 346 noneflag = _fm1parentnone
345 347 sha2flag = usingsha256
346 348 sha1size = _fm1nodesha1size
347 349 sha2size = _fm1nodesha256size
348 350 sha1fmt = _fm1nodesha1
349 351 sha2fmt = _fm1nodesha256
350 352 metasize = _fm1metapairsize
351 353 metafmt = _fm1metapair
352 354 fsize = _fm1fsize
353 355 unpack = _unpack
354 356
355 357 # Loop on markers
356 358 ufixed = struct.Struct(_fm1fixed).unpack
357 359
358 360 while off < stop:
359 361 # read fixed part
360 362 o1 = off + fsize
361 363 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
362 364
363 365 if flags & sha2flag:
364 366 # FIXME: prec was read as a SHA1, needs to be amended
365 367
366 368 # read 0 or more successors
367 369 if numsuc == 1:
368 370 o2 = o1 + sha2size
369 371 sucs = (data[o1:o2],)
370 372 else:
371 373 o2 = o1 + sha2size * numsuc
372 374 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
373 375
374 376 # read parents
375 377 if numpar == noneflag:
376 378 o3 = o2
377 379 parents = None
378 380 elif numpar == 1:
379 381 o3 = o2 + sha2size
380 382 parents = (data[o2:o3],)
381 383 else:
382 384 o3 = o2 + sha2size * numpar
383 385 parents = unpack(sha2fmt * numpar, data[o2:o3])
384 386 else:
385 387 # read 0 or more successors
386 388 if numsuc == 1:
387 389 o2 = o1 + sha1size
388 390 sucs = (data[o1:o2],)
389 391 else:
390 392 o2 = o1 + sha1size * numsuc
391 393 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
392 394
393 395 # read parents
394 396 if numpar == noneflag:
395 397 o3 = o2
396 398 parents = None
397 399 elif numpar == 1:
398 400 o3 = o2 + sha1size
399 401 parents = (data[o2:o3],)
400 402 else:
401 403 o3 = o2 + sha1size * numpar
402 404 parents = unpack(sha1fmt * numpar, data[o2:o3])
403 405
404 406 # read metadata
405 407 off = o3 + metasize * nummeta
406 408 metapairsize = unpack(b'>' + (metafmt * nummeta), data[o3:off])
407 409 metadata = []
408 410 for idx in pycompat.xrange(0, len(metapairsize), 2):
409 411 o1 = off + metapairsize[idx]
410 412 o2 = o1 + metapairsize[idx + 1]
411 413 metadata.append((data[off:o1], data[o1:o2]))
412 414 off = o2
413 415
414 416 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
415 417
416 418
417 419 def _fm1encodeonemarker(marker):
418 420 pre, sucs, flags, metadata, date, parents = marker
419 421 # determine node size
420 422 _fm1node = _fm1nodesha1
421 423 if flags & usingsha256:
422 424 _fm1node = _fm1nodesha256
423 425 numsuc = len(sucs)
424 426 numextranodes = numsuc
425 427 if parents is None:
426 428 numpar = _fm1parentnone
427 429 else:
428 430 numpar = len(parents)
429 431 numextranodes += numpar
430 432 formatnodes = _fm1node * numextranodes
431 433 formatmeta = _fm1metapair * len(metadata)
432 434 format = _fm1fixed + formatnodes + formatmeta
433 435 # tz is stored in minutes so we divide by 60
434 436 tz = date[1] // 60
435 437 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
436 438 data.extend(sucs)
437 439 if parents is not None:
438 440 data.extend(parents)
439 441 totalsize = _calcsize(format)
440 442 for key, value in metadata:
441 443 lk = len(key)
442 444 lv = len(value)
443 445 if lk > 255:
444 446 msg = (
445 447 b'obsstore metadata key cannot be longer than 255 bytes'
446 448 b' (key "%s" is %u bytes)'
447 449 ) % (key, lk)
448 450 raise error.ProgrammingError(msg)
449 451 if lv > 255:
450 452 msg = (
451 453 b'obsstore metadata value cannot be longer than 255 bytes'
452 454 b' (value "%s" for key "%s" is %u bytes)'
453 455 ) % (value, key, lv)
454 456 raise error.ProgrammingError(msg)
455 457 data.append(lk)
456 458 data.append(lv)
457 459 totalsize += lk + lv
458 460 data[0] = totalsize
459 461 data = [_pack(format, *data)]
460 462 for key, value in metadata:
461 463 data.append(key)
462 464 data.append(value)
463 465 return b''.join(data)
464 466
465 467
466 468 def _fm1readmarkers(data, off, stop):
467 469 native = getattr(parsers, 'fm1readmarkers', None)
468 470 if not native:
469 471 return _fm1purereadmarkers(data, off, stop)
470 472 return native(data, off, stop)
471 473
472 474
473 475 # mapping to read/write various marker formats
474 476 # <version> -> (decoder, encoder)
475 477 formats = {
476 478 _fm0version: (_fm0readmarkers, _fm0encodeonemarker),
477 479 _fm1version: (_fm1readmarkers, _fm1encodeonemarker),
478 480 }
479 481
480 482
481 483 def _readmarkerversion(data):
482 484 return _unpack(b'>B', data[0:1])[0]
483 485
484 486
485 487 @util.nogc
486 488 def _readmarkers(data, off=None, stop=None):
487 489 """Read and enumerate markers from raw data"""
488 490 diskversion = _readmarkerversion(data)
489 491 if not off:
490 492 off = 1 # skip 1 byte version number
491 493 if stop is None:
492 494 stop = len(data)
493 495 if diskversion not in formats:
494 496 msg = _(b'parsing obsolete marker: unknown version %r') % diskversion
495 497 raise error.UnknownVersion(msg, version=diskversion)
496 498 return diskversion, formats[diskversion][0](data, off, stop)
497 499
498 500
499 501 def encodeheader(version=_fm0version):
500 502 return _pack(b'>B', version)
501 503
502 504
503 505 def encodemarkers(markers, addheader=False, version=_fm0version):
504 506 # Kept separate from flushmarkers(), it will be reused for
505 507 # markers exchange.
506 508 encodeone = formats[version][1]
507 509 if addheader:
508 510 yield encodeheader(version)
509 511 for marker in markers:
510 512 yield encodeone(marker)
511 513
512 514
513 515 @util.nogc
514 516 def _addsuccessors(successors, markers):
515 517 for mark in markers:
516 518 successors.setdefault(mark[0], set()).add(mark)
517 519
518 520
519 521 @util.nogc
520 522 def _addpredecessors(predecessors, markers):
521 523 for mark in markers:
522 524 for suc in mark[1]:
523 525 predecessors.setdefault(suc, set()).add(mark)
524 526
525 527
526 528 @util.nogc
527 529 def _addchildren(children, markers):
528 530 for mark in markers:
529 531 parents = mark[5]
530 532 if parents is not None:
531 533 for p in parents:
532 534 children.setdefault(p, set()).add(mark)
533 535
534 536
535 537 def _checkinvalidmarkers(markers):
536 538 """search for marker with invalid data and raise error if needed
537 539
538 540 Exist as a separated function to allow the evolve extension for a more
539 541 subtle handling.
540 542 """
541 543 for mark in markers:
542 544 if node.nullid in mark[1]:
543 545 raise error.Abort(
544 546 _(
545 547 b'bad obsolescence marker detected: '
546 548 b'invalid successors nullid'
547 549 )
548 550 )
549 551
550 552
551 553 class obsstore(object):
552 554 """Store obsolete markers
553 555
554 556 Markers can be accessed with two mappings:
555 557 - predecessors[x] -> set(markers on predecessors edges of x)
556 558 - successors[x] -> set(markers on successors edges of x)
557 559 - children[x] -> set(markers on predecessors edges of children(x)
558 560 """
559 561
560 562 fields = (b'prec', b'succs', b'flag', b'meta', b'date', b'parents')
561 563 # prec: nodeid, predecessors changesets
562 564 # succs: tuple of nodeid, successor changesets (0-N length)
563 565 # flag: integer, flag field carrying modifier for the markers (see doc)
564 566 # meta: binary blob in UTF-8, encoded metadata dictionary
565 567 # date: (float, int) tuple, date of marker creation
566 568 # parents: (tuple of nodeid) or None, parents of predecessors
567 569 # None is used when no data has been recorded
568 570
569 571 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
570 572 # caches for various obsolescence related cache
571 573 self.caches = {}
572 574 self.svfs = svfs
573 575 self._defaultformat = defaultformat
574 576 self._readonly = readonly
575 577
576 578 def __iter__(self):
577 579 return iter(self._all)
578 580
579 581 def __len__(self):
580 582 return len(self._all)
581 583
582 584 def __nonzero__(self):
583 585 if not self._cached('_all'):
584 586 try:
585 587 return self.svfs.stat(b'obsstore').st_size > 1
586 588 except OSError as inst:
587 589 if inst.errno != errno.ENOENT:
588 590 raise
589 591 # just build an empty _all list if no obsstore exists, which
590 592 # avoids further stat() syscalls
591 593 return bool(self._all)
592 594
593 595 __bool__ = __nonzero__
594 596
595 597 @property
596 598 def readonly(self):
597 599 """True if marker creation is disabled
598 600
599 601 Remove me in the future when obsolete marker is always on."""
600 602 return self._readonly
601 603
602 604 def create(
603 605 self,
604 606 transaction,
605 607 prec,
606 608 succs=(),
607 609 flag=0,
608 610 parents=None,
609 611 date=None,
610 612 metadata=None,
611 613 ui=None,
612 614 ):
613 615 """obsolete: add a new obsolete marker
614 616
615 617 * ensuring it is hashable
616 618 * check mandatory metadata
617 619 * encode metadata
618 620
619 621 If you are a human writing code creating marker you want to use the
620 622 `createmarkers` function in this module instead.
621 623
622 624 return True if a new marker have been added, False if the markers
623 625 already existed (no op).
624 626 """
625 627 if metadata is None:
626 628 metadata = {}
627 629 if date is None:
628 630 if b'date' in metadata:
629 631 # as a courtesy for out-of-tree extensions
630 632 date = dateutil.parsedate(metadata.pop(b'date'))
631 633 elif ui is not None:
632 634 date = ui.configdate(b'devel', b'default-date')
633 635 if date is None:
634 636 date = dateutil.makedate()
635 637 else:
636 638 date = dateutil.makedate()
637 639 if len(prec) != 20:
638 640 raise ValueError(prec)
639 641 for succ in succs:
640 642 if len(succ) != 20:
641 643 raise ValueError(succ)
642 644 if prec in succs:
643 645 raise ValueError(
644 646 'in-marker cycle with %s' % pycompat.sysstr(node.hex(prec))
645 647 )
646 648
647 649 metadata = tuple(sorted(pycompat.iteritems(metadata)))
648 650 for k, v in metadata:
649 651 try:
650 652 # might be better to reject non-ASCII keys
651 653 k.decode('utf-8')
652 654 v.decode('utf-8')
653 655 except UnicodeDecodeError:
654 656 raise error.ProgrammingError(
655 657 b'obsstore metadata must be valid UTF-8 sequence '
656 658 b'(key = %r, value = %r)'
657 659 % (pycompat.bytestr(k), pycompat.bytestr(v))
658 660 )
659 661
660 662 marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents)
661 663 return bool(self.add(transaction, [marker]))
662 664
663 665 def add(self, transaction, markers):
664 666 """Add new markers to the store
665 667
666 668 Take care of filtering duplicate.
667 669 Return the number of new marker."""
668 670 if self._readonly:
669 671 raise error.Abort(
670 672 _(b'creating obsolete markers is not enabled on this repo')
671 673 )
672 674 known = set()
673 675 getsuccessors = self.successors.get
674 676 new = []
675 677 for m in markers:
676 678 if m not in getsuccessors(m[0], ()) and m not in known:
677 679 known.add(m)
678 680 new.append(m)
679 681 if new:
680 682 f = self.svfs(b'obsstore', b'ab')
681 683 try:
682 684 offset = f.tell()
683 685 transaction.add(b'obsstore', offset)
684 686 # offset == 0: new file - add the version header
685 687 data = b''.join(encodemarkers(new, offset == 0, self._version))
686 688 f.write(data)
687 689 finally:
688 690 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
689 691 # call 'filecacheentry.refresh()' here
690 692 f.close()
691 693 addedmarkers = transaction.changes.get(b'obsmarkers')
692 694 if addedmarkers is not None:
693 695 addedmarkers.update(new)
694 696 self._addmarkers(new, data)
695 697 # new marker *may* have changed several set. invalidate the cache.
696 698 self.caches.clear()
697 699 # records the number of new markers for the transaction hooks
698 700 previous = int(transaction.hookargs.get(b'new_obsmarkers', b'0'))
699 701 transaction.hookargs[b'new_obsmarkers'] = b'%d' % (previous + len(new))
700 702 return len(new)
701 703
702 704 def mergemarkers(self, transaction, data):
703 705 """merge a binary stream of markers inside the obsstore
704 706
705 707 Returns the number of new markers added."""
706 708 version, markers = _readmarkers(data)
707 709 return self.add(transaction, markers)
708 710
709 711 @propertycache
710 712 def _data(self):
711 713 return self.svfs.tryread(b'obsstore')
712 714
713 715 @propertycache
714 716 def _version(self):
715 717 if len(self._data) >= 1:
716 718 return _readmarkerversion(self._data)
717 719 else:
718 720 return self._defaultformat
719 721
720 722 @propertycache
721 723 def _all(self):
722 724 data = self._data
723 725 if not data:
724 726 return []
725 727 self._version, markers = _readmarkers(data)
726 728 markers = list(markers)
727 729 _checkinvalidmarkers(markers)
728 730 return markers
729 731
730 732 @propertycache
731 733 def successors(self):
732 734 successors = {}
733 735 _addsuccessors(successors, self._all)
734 736 return successors
735 737
736 738 @propertycache
737 739 def predecessors(self):
738 740 predecessors = {}
739 741 _addpredecessors(predecessors, self._all)
740 742 return predecessors
741 743
742 744 @propertycache
743 745 def children(self):
744 746 children = {}
745 747 _addchildren(children, self._all)
746 748 return children
747 749
748 750 def _cached(self, attr):
749 751 return attr in self.__dict__
750 752
751 753 def _addmarkers(self, markers, rawdata):
752 754 markers = list(markers) # to allow repeated iteration
753 755 self._data = self._data + rawdata
754 756 self._all.extend(markers)
755 757 if self._cached('successors'):
756 758 _addsuccessors(self.successors, markers)
757 759 if self._cached('predecessors'):
758 760 _addpredecessors(self.predecessors, markers)
759 761 if self._cached('children'):
760 762 _addchildren(self.children, markers)
761 763 _checkinvalidmarkers(markers)
762 764
763 765 def relevantmarkers(self, nodes):
764 766 """return a set of all obsolescence markers relevant to a set of nodes.
765 767
766 768 "relevant" to a set of nodes mean:
767 769
768 770 - marker that use this changeset as successor
769 771 - prune marker of direct children on this changeset
770 772 - recursive application of the two rules on predecessors of these
771 773 markers
772 774
773 775 It is a set so you cannot rely on order."""
774 776
775 777 pendingnodes = set(nodes)
776 778 seenmarkers = set()
777 779 seennodes = set(pendingnodes)
778 780 precursorsmarkers = self.predecessors
779 781 succsmarkers = self.successors
780 782 children = self.children
781 783 while pendingnodes:
782 784 direct = set()
783 785 for current in pendingnodes:
784 786 direct.update(precursorsmarkers.get(current, ()))
785 787 pruned = [m for m in children.get(current, ()) if not m[1]]
786 788 direct.update(pruned)
787 789 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
788 790 direct.update(pruned)
789 791 direct -= seenmarkers
790 792 pendingnodes = {m[0] for m in direct}
791 793 seenmarkers |= direct
792 794 pendingnodes -= seennodes
793 795 seennodes |= pendingnodes
794 796 return seenmarkers
795 797
796 798
797 799 def makestore(ui, repo):
798 800 """Create an obsstore instance from a repo."""
799 801 # read default format for new obsstore.
800 802 # developer config: format.obsstore-version
801 803 defaultformat = ui.configint(b'format', b'obsstore-version')
802 804 # rely on obsstore class default when possible.
803 805 kwargs = {}
804 806 if defaultformat is not None:
805 807 kwargs['defaultformat'] = defaultformat
806 808 readonly = not isenabled(repo, createmarkersopt)
807 809 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
808 810 if store and readonly:
809 811 ui.warn(
810 812 _(b'obsolete feature not enabled but %i markers found!\n')
811 813 % len(list(store))
812 814 )
813 815 return store
814 816
815 817
816 818 def commonversion(versions):
817 819 """Return the newest version listed in both versions and our local formats.
818 820
819 821 Returns None if no common version exists.
820 822 """
821 823 versions.sort(reverse=True)
822 824 # search for highest version known on both side
823 825 for v in versions:
824 826 if v in formats:
825 827 return v
826 828 return None
827 829
828 830
829 831 # arbitrary picked to fit into 8K limit from HTTP server
830 832 # you have to take in account:
831 833 # - the version header
832 834 # - the base85 encoding
833 835 _maxpayload = 5300
834 836
835 837
836 838 def _pushkeyescape(markers):
837 839 """encode markers into a dict suitable for pushkey exchange
838 840
839 841 - binary data is base85 encoded
840 842 - split in chunks smaller than 5300 bytes"""
841 843 keys = {}
842 844 parts = []
843 845 currentlen = _maxpayload * 2 # ensure we create a new part
844 846 for marker in markers:
845 847 nextdata = _fm0encodeonemarker(marker)
846 848 if len(nextdata) + currentlen > _maxpayload:
847 849 currentpart = []
848 850 currentlen = 0
849 851 parts.append(currentpart)
850 852 currentpart.append(nextdata)
851 853 currentlen += len(nextdata)
852 854 for idx, part in enumerate(reversed(parts)):
853 855 data = b''.join([_pack(b'>B', _fm0version)] + part)
854 856 keys[b'dump%i' % idx] = util.b85encode(data)
855 857 return keys
856 858
857 859
858 860 def listmarkers(repo):
859 861 """List markers over pushkey"""
860 862 if not repo.obsstore:
861 863 return {}
862 864 return _pushkeyescape(sorted(repo.obsstore))
863 865
864 866
865 867 def pushmarker(repo, key, old, new):
866 868 """Push markers over pushkey"""
867 869 if not key.startswith(b'dump'):
868 870 repo.ui.warn(_(b'unknown key: %r') % key)
869 871 return False
870 872 if old:
871 873 repo.ui.warn(_(b'unexpected old value for %r') % key)
872 874 return False
873 875 data = util.b85decode(new)
874 876 with repo.lock(), repo.transaction(b'pushkey: obsolete markers') as tr:
875 877 repo.obsstore.mergemarkers(tr, data)
876 878 repo.invalidatevolatilesets()
877 879 return True
878 880
879 881
880 882 # mapping of 'set-name' -> <function to compute this set>
881 883 cachefuncs = {}
882 884
883 885
884 886 def cachefor(name):
885 887 """Decorator to register a function as computing the cache for a set"""
886 888
887 889 def decorator(func):
888 890 if name in cachefuncs:
889 891 msg = b"duplicated registration for volatileset '%s' (existing: %r)"
890 892 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
891 893 cachefuncs[name] = func
892 894 return func
893 895
894 896 return decorator
895 897
896 898
897 899 def getrevs(repo, name):
898 900 """Return the set of revision that belong to the <name> set
899 901
900 902 Such access may compute the set and cache it for future use"""
901 903 repo = repo.unfiltered()
902 904 with util.timedcm('getrevs %s', name):
903 905 if not repo.obsstore:
904 906 return frozenset()
905 907 if name not in repo.obsstore.caches:
906 908 repo.obsstore.caches[name] = cachefuncs[name](repo)
907 909 return repo.obsstore.caches[name]
908 910
909 911
910 912 # To be simple we need to invalidate obsolescence cache when:
911 913 #
912 914 # - new changeset is added:
913 915 # - public phase is changed
914 916 # - obsolescence marker are added
915 917 # - strip is used a repo
916 918 def clearobscaches(repo):
917 919 """Remove all obsolescence related cache from a repo
918 920
919 921 This remove all cache in obsstore is the obsstore already exist on the
920 922 repo.
921 923
922 924 (We could be smarter here given the exact event that trigger the cache
923 925 clearing)"""
924 926 # only clear cache is there is obsstore data in this repo
925 927 if b'obsstore' in repo._filecache:
926 928 repo.obsstore.caches.clear()
927 929
928 930
929 931 def _mutablerevs(repo):
930 932 """the set of mutable revision in the repository"""
931 933 return repo._phasecache.getrevset(repo, phases.mutablephases)
932 934
933 935
934 936 @cachefor(b'obsolete')
935 937 def _computeobsoleteset(repo):
936 938 """the set of obsolete revisions"""
937 939 getnode = repo.changelog.node
938 940 notpublic = _mutablerevs(repo)
939 941 isobs = repo.obsstore.successors.__contains__
940 942 obs = set(r for r in notpublic if isobs(getnode(r)))
941 943 return obs
942 944
943 945
944 946 @cachefor(b'orphan')
945 947 def _computeorphanset(repo):
946 948 """the set of non obsolete revisions with obsolete parents"""
947 949 pfunc = repo.changelog.parentrevs
948 950 mutable = _mutablerevs(repo)
949 951 obsolete = getrevs(repo, b'obsolete')
950 952 others = mutable - obsolete
951 953 unstable = set()
952 954 for r in sorted(others):
953 955 # A rev is unstable if one of its parent is obsolete or unstable
954 956 # this works since we traverse following growing rev order
955 957 for p in pfunc(r):
956 958 if p in obsolete or p in unstable:
957 959 unstable.add(r)
958 960 break
959 961 return unstable
960 962
961 963
962 964 @cachefor(b'suspended')
963 965 def _computesuspendedset(repo):
964 966 """the set of obsolete parents with non obsolete descendants"""
965 967 suspended = repo.changelog.ancestors(getrevs(repo, b'orphan'))
966 968 return set(r for r in getrevs(repo, b'obsolete') if r in suspended)
967 969
968 970
969 971 @cachefor(b'extinct')
970 972 def _computeextinctset(repo):
971 973 """the set of obsolete parents without non obsolete descendants"""
972 974 return getrevs(repo, b'obsolete') - getrevs(repo, b'suspended')
973 975
974 976
975 977 @cachefor(b'phasedivergent')
976 978 def _computephasedivergentset(repo):
977 979 """the set of revs trying to obsolete public revisions"""
978 980 bumped = set()
979 981 # util function (avoid attribute lookup in the loop)
980 982 phase = repo._phasecache.phase # would be faster to grab the full list
981 983 public = phases.public
982 984 cl = repo.changelog
983 985 torev = cl.index.get_rev
984 986 tonode = cl.node
985 987 obsstore = repo.obsstore
986 988 for rev in repo.revs(b'(not public()) and (not obsolete())'):
987 989 # We only evaluate mutable, non-obsolete revision
988 990 node = tonode(rev)
989 991 # (future) A cache of predecessors may worth if split is very common
990 992 for pnode in obsutil.allpredecessors(
991 993 obsstore, [node], ignoreflags=bumpedfix
992 994 ):
993 995 prev = torev(pnode) # unfiltered! but so is phasecache
994 996 if (prev is not None) and (phase(repo, prev) <= public):
995 997 # we have a public predecessor
996 998 bumped.add(rev)
997 999 break # Next draft!
998 1000 return bumped
999 1001
1000 1002
1001 1003 @cachefor(b'contentdivergent')
1002 1004 def _computecontentdivergentset(repo):
1003 1005 """the set of rev that compete to be the final successors of some revision.
1004 1006 """
1005 1007 divergent = set()
1006 1008 obsstore = repo.obsstore
1007 1009 newermap = {}
1008 1010 tonode = repo.changelog.node
1009 1011 for rev in repo.revs(b'(not public()) - obsolete()'):
1010 1012 node = tonode(rev)
1011 1013 mark = obsstore.predecessors.get(node, ())
1012 1014 toprocess = set(mark)
1013 1015 seen = set()
1014 1016 while toprocess:
1015 1017 prec = toprocess.pop()[0]
1016 1018 if prec in seen:
1017 1019 continue # emergency cycle hanging prevention
1018 1020 seen.add(prec)
1019 1021 if prec not in newermap:
1020 1022 obsutil.successorssets(repo, prec, cache=newermap)
1021 1023 newer = [n for n in newermap[prec] if n]
1022 1024 if len(newer) > 1:
1023 1025 divergent.add(rev)
1024 1026 break
1025 1027 toprocess.update(obsstore.predecessors.get(prec, ()))
1026 1028 return divergent
1027 1029
1028 1030
1029 1031 def makefoldid(relation, user):
1030 1032
1031 folddigest = hashlib.sha1(user)
1033 folddigest = hashutil.sha1(user)
1032 1034 for p in relation[0] + relation[1]:
1033 1035 folddigest.update(b'%d' % p.rev())
1034 1036 folddigest.update(p.node())
1035 1037 # Since fold only has to compete against fold for the same successors, it
1036 1038 # seems fine to use a small ID. Smaller ID save space.
1037 1039 return node.hex(folddigest.digest())[:8]
1038 1040
1039 1041
1040 1042 def createmarkers(
1041 1043 repo, relations, flag=0, date=None, metadata=None, operation=None
1042 1044 ):
1043 1045 """Add obsolete markers between changesets in a repo
1044 1046
1045 1047 <relations> must be an iterable of ((<old>,...), (<new>, ...)[,{metadata}])
1046 1048 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1047 1049 containing metadata for this marker only. It is merged with the global
1048 1050 metadata specified through the `metadata` argument of this function.
1049 1051 Any string values in metadata must be UTF-8 bytes.
1050 1052
1051 1053 Trying to obsolete a public changeset will raise an exception.
1052 1054
1053 1055 Current user and date are used except if specified otherwise in the
1054 1056 metadata attribute.
1055 1057
1056 1058 This function operates within a transaction of its own, but does
1057 1059 not take any lock on the repo.
1058 1060 """
1059 1061 # prepare metadata
1060 1062 if metadata is None:
1061 1063 metadata = {}
1062 1064 if b'user' not in metadata:
1063 1065 luser = (
1064 1066 repo.ui.config(b'devel', b'user.obsmarker') or repo.ui.username()
1065 1067 )
1066 1068 metadata[b'user'] = encoding.fromlocal(luser)
1067 1069
1068 1070 # Operation metadata handling
1069 1071 useoperation = repo.ui.configbool(
1070 1072 b'experimental', b'evolution.track-operation'
1071 1073 )
1072 1074 if useoperation and operation:
1073 1075 metadata[b'operation'] = operation
1074 1076
1075 1077 # Effect flag metadata handling
1076 1078 saveeffectflag = repo.ui.configbool(
1077 1079 b'experimental', b'evolution.effect-flags'
1078 1080 )
1079 1081
1080 1082 with repo.transaction(b'add-obsolescence-marker') as tr:
1081 1083 markerargs = []
1082 1084 for rel in relations:
1083 1085 predecessors = rel[0]
1084 1086 if not isinstance(predecessors, tuple):
1085 1087 # preserve compat with old API until all caller are migrated
1086 1088 predecessors = (predecessors,)
1087 1089 if len(predecessors) > 1 and len(rel[1]) != 1:
1088 1090 msg = b'Fold markers can only have 1 successors, not %d'
1089 1091 raise error.ProgrammingError(msg % len(rel[1]))
1090 1092 foldid = None
1091 1093 foldsize = len(predecessors)
1092 1094 if 1 < foldsize:
1093 1095 foldid = makefoldid(rel, metadata[b'user'])
1094 1096 for foldidx, prec in enumerate(predecessors, 1):
1095 1097 sucs = rel[1]
1096 1098 localmetadata = metadata.copy()
1097 1099 if len(rel) > 2:
1098 1100 localmetadata.update(rel[2])
1099 1101 if foldid is not None:
1100 1102 localmetadata[b'fold-id'] = foldid
1101 1103 localmetadata[b'fold-idx'] = b'%d' % foldidx
1102 1104 localmetadata[b'fold-size'] = b'%d' % foldsize
1103 1105
1104 1106 if not prec.mutable():
1105 1107 raise error.Abort(
1106 1108 _(b"cannot obsolete public changeset: %s") % prec,
1107 1109 hint=b"see 'hg help phases' for details",
1108 1110 )
1109 1111 nprec = prec.node()
1110 1112 nsucs = tuple(s.node() for s in sucs)
1111 1113 npare = None
1112 1114 if not nsucs:
1113 1115 npare = tuple(p.node() for p in prec.parents())
1114 1116 if nprec in nsucs:
1115 1117 raise error.Abort(
1116 1118 _(b"changeset %s cannot obsolete itself") % prec
1117 1119 )
1118 1120
1119 1121 # Effect flag can be different by relation
1120 1122 if saveeffectflag:
1121 1123 # The effect flag is saved in a versioned field name for
1122 1124 # future evolution
1123 1125 effectflag = obsutil.geteffectflag(prec, sucs)
1124 1126 localmetadata[obsutil.EFFECTFLAGFIELD] = b"%d" % effectflag
1125 1127
1126 1128 # Creating the marker causes the hidden cache to become
1127 1129 # invalid, which causes recomputation when we ask for
1128 1130 # prec.parents() above. Resulting in n^2 behavior. So let's
1129 1131 # prepare all of the args first, then create the markers.
1130 1132 markerargs.append((nprec, nsucs, npare, localmetadata))
1131 1133
1132 1134 for args in markerargs:
1133 1135 nprec, nsucs, npare, localmetadata = args
1134 1136 repo.obsstore.create(
1135 1137 tr,
1136 1138 nprec,
1137 1139 nsucs,
1138 1140 flag,
1139 1141 parents=npare,
1140 1142 date=date,
1141 1143 metadata=localmetadata,
1142 1144 ui=repo.ui,
1143 1145 )
1144 1146 repo.filteredrevcache.clear()
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
General Comments 0
You need to be logged in to leave comments. Login now