##// END OF EJS Templates
py3: prevent transformer from adding b'' by adding r'' prefix...
Pulkit Goyal -
r37596:56df2ca5 default
parent child Browse files
Show More
@@ -1,1186 +1,1186 b''
1 1 # Infinite push
2 2 #
3 3 # Copyright 2016 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 """ store some pushes in a remote blob store on the server (EXPERIMENTAL)
8 8
9 9 [infinitepush]
10 10 # Server-side and client-side option. Pattern of the infinitepush bookmark
11 11 branchpattern = PATTERN
12 12
13 13 # Server or client
14 14 server = False
15 15
16 16 # Server-side option. Possible values: 'disk' or 'sql'. Fails if not set
17 17 indextype = disk
18 18
19 19 # Server-side option. Used only if indextype=sql.
20 20 # Format: 'IP:PORT:DB_NAME:USER:PASSWORD'
21 21 sqlhost = IP:PORT:DB_NAME:USER:PASSWORD
22 22
23 23 # Server-side option. Used only if indextype=disk.
24 24 # Filesystem path to the index store
25 25 indexpath = PATH
26 26
27 27 # Server-side option. Possible values: 'disk' or 'external'
28 28 # Fails if not set
29 29 storetype = disk
30 30
31 31 # Server-side option.
32 32 # Path to the binary that will save bundle to the bundlestore
33 33 # Formatted cmd line will be passed to it (see `put_args`)
34 34 put_binary = put
35 35
36 36 # Serser-side option. Used only if storetype=external.
37 37 # Format cmd-line string for put binary. Placeholder: {filename}
38 38 put_args = {filename}
39 39
40 40 # Server-side option.
41 41 # Path to the binary that get bundle from the bundlestore.
42 42 # Formatted cmd line will be passed to it (see `get_args`)
43 43 get_binary = get
44 44
45 45 # Serser-side option. Used only if storetype=external.
46 46 # Format cmd-line string for get binary. Placeholders: {filename} {handle}
47 47 get_args = {filename} {handle}
48 48
49 49 # Server-side option
50 50 logfile = FIlE
51 51
52 52 # Server-side option
53 53 loglevel = DEBUG
54 54
55 55 # Server-side option. Used only if indextype=sql.
56 56 # Sets mysql wait_timeout option.
57 57 waittimeout = 300
58 58
59 59 # Server-side option. Used only if indextype=sql.
60 60 # Sets mysql innodb_lock_wait_timeout option.
61 61 locktimeout = 120
62 62
63 63 # Server-side option. Used only if indextype=sql.
64 64 # Name of the repository
65 65 reponame = ''
66 66
67 67 # Client-side option. Used by --list-remote option. List of remote scratch
68 68 # patterns to list if no patterns are specified.
69 69 defaultremotepatterns = ['*']
70 70
71 71 # Instructs infinitepush to forward all received bundle2 parts to the
72 72 # bundle for storage. Defaults to False.
73 73 storeallparts = True
74 74
75 75 # routes each incoming push to the bundlestore. defaults to False
76 76 pushtobundlestore = True
77 77
78 78 [remotenames]
79 79 # Client-side option
80 80 # This option should be set only if remotenames extension is enabled.
81 81 # Whether remote bookmarks are tracked by remotenames extension.
82 82 bookmarks = True
83 83 """
84 84
85 85 from __future__ import absolute_import
86 86
87 87 import collections
88 88 import contextlib
89 89 import errno
90 90 import functools
91 91 import logging
92 92 import os
93 93 import random
94 94 import re
95 95 import socket
96 96 import subprocess
97 97 import tempfile
98 98 import time
99 99
100 100 from mercurial.node import (
101 101 bin,
102 102 hex,
103 103 )
104 104
105 105 from mercurial.i18n import _
106 106
107 107 from mercurial.utils import (
108 108 procutil,
109 109 stringutil,
110 110 )
111 111
112 112 from mercurial import (
113 113 bundle2,
114 114 changegroup,
115 115 commands,
116 116 discovery,
117 117 encoding,
118 118 error,
119 119 exchange,
120 120 extensions,
121 121 hg,
122 122 localrepo,
123 123 peer,
124 124 phases,
125 125 pushkey,
126 126 pycompat,
127 127 registrar,
128 128 util,
129 129 wireproto,
130 130 )
131 131
132 132 from . import (
133 133 bundleparts,
134 134 common,
135 135 )
136 136
137 137 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
138 138 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
139 139 # be specifying the version(s) of Mercurial they are tested with, or
140 140 # leave the attribute unspecified.
141 141 testedwith = 'ships-with-hg-core'
142 142
143 143 configtable = {}
144 144 configitem = registrar.configitem(configtable)
145 145
146 146 configitem('infinitepush', 'server',
147 147 default=False,
148 148 )
149 149 configitem('infinitepush', 'storetype',
150 150 default='',
151 151 )
152 152 configitem('infinitepush', 'indextype',
153 153 default='',
154 154 )
155 155 configitem('infinitepush', 'indexpath',
156 156 default='',
157 157 )
158 158 configitem('infinitepush', 'storeallparts',
159 159 default=False,
160 160 )
161 161 configitem('infinitepush', 'reponame',
162 162 default='',
163 163 )
164 164 configitem('scratchbranch', 'storepath',
165 165 default='',
166 166 )
167 167 configitem('infinitepush', 'branchpattern',
168 168 default='',
169 169 )
170 170 configitem('infinitepush', 'pushtobundlestore',
171 171 default=False,
172 172 )
173 173 configitem('experimental', 'server-bundlestore-bookmark',
174 174 default='',
175 175 )
176 176 configitem('experimental', 'infinitepush-scratchpush',
177 177 default=False,
178 178 )
179 179
180 180 experimental = 'experimental'
181 181 configbookmark = 'server-bundlestore-bookmark'
182 182 configscratchpush = 'infinitepush-scratchpush'
183 183
184 184 scratchbranchparttype = bundleparts.scratchbranchparttype
185 185 revsetpredicate = registrar.revsetpredicate()
186 186 templatekeyword = registrar.templatekeyword()
187 187 _scratchbranchmatcher = lambda x: False
188 188 _maybehash = re.compile(r'^[a-f0-9]+$').search
189 189
190 190 def _buildexternalbundlestore(ui):
191 191 put_args = ui.configlist('infinitepush', 'put_args', [])
192 192 put_binary = ui.config('infinitepush', 'put_binary')
193 193 if not put_binary:
194 194 raise error.Abort('put binary is not specified')
195 195 get_args = ui.configlist('infinitepush', 'get_args', [])
196 196 get_binary = ui.config('infinitepush', 'get_binary')
197 197 if not get_binary:
198 198 raise error.Abort('get binary is not specified')
199 199 from . import store
200 200 return store.externalbundlestore(put_binary, put_args, get_binary, get_args)
201 201
202 202 def _buildsqlindex(ui):
203 203 sqlhost = ui.config('infinitepush', 'sqlhost')
204 204 if not sqlhost:
205 205 raise error.Abort(_('please set infinitepush.sqlhost'))
206 206 host, port, db, user, password = sqlhost.split(':')
207 207 reponame = ui.config('infinitepush', 'reponame')
208 208 if not reponame:
209 209 raise error.Abort(_('please set infinitepush.reponame'))
210 210
211 211 logfile = ui.config('infinitepush', 'logfile', '')
212 212 waittimeout = ui.configint('infinitepush', 'waittimeout', 300)
213 213 locktimeout = ui.configint('infinitepush', 'locktimeout', 120)
214 214 from . import sqlindexapi
215 215 return sqlindexapi.sqlindexapi(
216 216 reponame, host, port, db, user, password,
217 217 logfile, _getloglevel(ui), waittimeout=waittimeout,
218 218 locktimeout=locktimeout)
219 219
220 220 def _getloglevel(ui):
221 221 loglevel = ui.config('infinitepush', 'loglevel', 'DEBUG')
222 222 numeric_loglevel = getattr(logging, loglevel.upper(), None)
223 223 if not isinstance(numeric_loglevel, int):
224 224 raise error.Abort(_('invalid log level %s') % loglevel)
225 225 return numeric_loglevel
226 226
227 227 def _tryhoist(ui, remotebookmark):
228 228 '''returns a bookmarks with hoisted part removed
229 229
230 230 Remotenames extension has a 'hoist' config that allows to use remote
231 231 bookmarks without specifying remote path. For example, 'hg update master'
232 232 works as well as 'hg update remote/master'. We want to allow the same in
233 233 infinitepush.
234 234 '''
235 235
236 236 if common.isremotebooksenabled(ui):
237 237 hoist = ui.config('remotenames', 'hoistedpeer') + '/'
238 238 if remotebookmark.startswith(hoist):
239 239 return remotebookmark[len(hoist):]
240 240 return remotebookmark
241 241
242 242 class bundlestore(object):
243 243 def __init__(self, repo):
244 244 self._repo = repo
245 245 storetype = self._repo.ui.config('infinitepush', 'storetype')
246 246 if storetype == 'disk':
247 247 from . import store
248 248 self.store = store.filebundlestore(self._repo.ui, self._repo)
249 249 elif storetype == 'external':
250 250 self.store = _buildexternalbundlestore(self._repo.ui)
251 251 else:
252 252 raise error.Abort(
253 253 _('unknown infinitepush store type specified %s') % storetype)
254 254
255 255 indextype = self._repo.ui.config('infinitepush', 'indextype')
256 256 if indextype == 'disk':
257 257 from . import fileindexapi
258 258 self.index = fileindexapi.fileindexapi(self._repo)
259 259 elif indextype == 'sql':
260 260 self.index = _buildsqlindex(self._repo.ui)
261 261 else:
262 262 raise error.Abort(
263 263 _('unknown infinitepush index type specified %s') % indextype)
264 264
265 265 def _isserver(ui):
266 266 return ui.configbool('infinitepush', 'server')
267 267
268 268 def reposetup(ui, repo):
269 269 if _isserver(ui) and repo.local():
270 270 repo.bundlestore = bundlestore(repo)
271 271
272 272 def extsetup(ui):
273 273 commonsetup(ui)
274 274 if _isserver(ui):
275 275 serverextsetup(ui)
276 276 else:
277 277 clientextsetup(ui)
278 278
279 279 def commonsetup(ui):
280 280 wireproto.commands['listkeyspatterns'] = (
281 281 wireprotolistkeyspatterns, 'namespace patterns')
282 282 scratchbranchpat = ui.config('infinitepush', 'branchpattern')
283 283 if scratchbranchpat:
284 284 global _scratchbranchmatcher
285 285 kind, pat, _scratchbranchmatcher = \
286 286 stringutil.stringmatcher(scratchbranchpat)
287 287
288 288 def serverextsetup(ui):
289 289 origpushkeyhandler = bundle2.parthandlermapping['pushkey']
290 290
291 291 def newpushkeyhandler(*args, **kwargs):
292 292 bundle2pushkey(origpushkeyhandler, *args, **kwargs)
293 293 newpushkeyhandler.params = origpushkeyhandler.params
294 294 bundle2.parthandlermapping['pushkey'] = newpushkeyhandler
295 295
296 296 orighandlephasehandler = bundle2.parthandlermapping['phase-heads']
297 297 newphaseheadshandler = lambda *args, **kwargs: \
298 298 bundle2handlephases(orighandlephasehandler, *args, **kwargs)
299 299 newphaseheadshandler.params = orighandlephasehandler.params
300 300 bundle2.parthandlermapping['phase-heads'] = newphaseheadshandler
301 301
302 302 extensions.wrapfunction(localrepo.localrepository, 'listkeys',
303 303 localrepolistkeys)
304 304 wireproto.commands['lookup'] = (
305 305 _lookupwrap(wireproto.commands['lookup'][0]), 'key')
306 306 extensions.wrapfunction(exchange, 'getbundlechunks', getbundlechunks)
307 307
308 308 extensions.wrapfunction(bundle2, 'processparts', processparts)
309 309
310 310 def clientextsetup(ui):
311 311 entry = extensions.wrapcommand(commands.table, 'push', _push)
312 312
313 313 entry[1].append(
314 314 ('', 'bundle-store', None,
315 315 _('force push to go to bundle store (EXPERIMENTAL)')))
316 316
317 317 extensions.wrapcommand(commands.table, 'pull', _pull)
318 318
319 319 extensions.wrapfunction(discovery, 'checkheads', _checkheads)
320 320
321 321 wireproto.wirepeer.listkeyspatterns = listkeyspatterns
322 322
323 323 partorder = exchange.b2partsgenorder
324 324 index = partorder.index('changeset')
325 325 partorder.insert(
326 326 index, partorder.pop(partorder.index(scratchbranchparttype)))
327 327
328 328 def _checkheads(orig, pushop):
329 329 if pushop.ui.configbool(experimental, configscratchpush, False):
330 330 return
331 331 return orig(pushop)
332 332
333 333 def wireprotolistkeyspatterns(repo, proto, namespace, patterns):
334 334 patterns = wireproto.decodelist(patterns)
335 335 d = repo.listkeys(encoding.tolocal(namespace), patterns).iteritems()
336 336 return pushkey.encodekeys(d)
337 337
338 338 def localrepolistkeys(orig, self, namespace, patterns=None):
339 339 if namespace == 'bookmarks' and patterns:
340 340 index = self.bundlestore.index
341 341 results = {}
342 342 bookmarks = orig(self, namespace)
343 343 for pattern in patterns:
344 344 results.update(index.getbookmarks(pattern))
345 345 if pattern.endswith('*'):
346 346 pattern = 're:^' + pattern[:-1] + '.*'
347 347 kind, pat, matcher = stringutil.stringmatcher(pattern)
348 348 for bookmark, node in bookmarks.iteritems():
349 349 if matcher(bookmark):
350 350 results[bookmark] = node
351 351 return results
352 352 else:
353 353 return orig(self, namespace)
354 354
355 355 @peer.batchable
356 356 def listkeyspatterns(self, namespace, patterns):
357 357 if not self.capable('pushkey'):
358 358 yield {}, None
359 359 f = peer.future()
360 360 self.ui.debug('preparing listkeys for "%s" with pattern "%s"\n' %
361 361 (namespace, patterns))
362 362 yield {
363 363 'namespace': encoding.fromlocal(namespace),
364 364 'patterns': wireproto.encodelist(patterns)
365 365 }, f
366 366 d = f.value
367 367 self.ui.debug('received listkey for "%s": %i bytes\n'
368 368 % (namespace, len(d)))
369 369 yield pushkey.decodekeys(d)
370 370
371 371 def _readbundlerevs(bundlerepo):
372 372 return list(bundlerepo.revs('bundle()'))
373 373
374 374 def _includefilelogstobundle(bundlecaps, bundlerepo, bundlerevs, ui):
375 375 '''Tells remotefilelog to include all changed files to the changegroup
376 376
377 377 By default remotefilelog doesn't include file content to the changegroup.
378 378 But we need to include it if we are fetching from bundlestore.
379 379 '''
380 380 changedfiles = set()
381 381 cl = bundlerepo.changelog
382 382 for r in bundlerevs:
383 383 # [3] means changed files
384 384 changedfiles.update(cl.read(r)[3])
385 385 if not changedfiles:
386 386 return bundlecaps
387 387
388 388 changedfiles = '\0'.join(changedfiles)
389 389 newcaps = []
390 390 appended = False
391 391 for cap in (bundlecaps or []):
392 392 if cap.startswith('excludepattern='):
393 393 newcaps.append('\0'.join((cap, changedfiles)))
394 394 appended = True
395 395 else:
396 396 newcaps.append(cap)
397 397 if not appended:
398 398 # Not found excludepattern cap. Just append it
399 399 newcaps.append('excludepattern=' + changedfiles)
400 400
401 401 return newcaps
402 402
403 403 def _rebundle(bundlerepo, bundleroots, unknownhead):
404 404 '''
405 405 Bundle may include more revision then user requested. For example,
406 406 if user asks for revision but bundle also consists its descendants.
407 407 This function will filter out all revision that user is not requested.
408 408 '''
409 409 parts = []
410 410
411 411 version = '02'
412 412 outgoing = discovery.outgoing(bundlerepo, commonheads=bundleroots,
413 413 missingheads=[unknownhead])
414 414 cgstream = changegroup.makestream(bundlerepo, outgoing, version, 'pull')
415 415 cgstream = util.chunkbuffer(cgstream).read()
416 416 cgpart = bundle2.bundlepart('changegroup', data=cgstream)
417 417 cgpart.addparam('version', version)
418 418 parts.append(cgpart)
419 419
420 420 return parts
421 421
422 422 def _getbundleroots(oldrepo, bundlerepo, bundlerevs):
423 423 cl = bundlerepo.changelog
424 424 bundleroots = []
425 425 for rev in bundlerevs:
426 426 node = cl.node(rev)
427 427 parents = cl.parents(node)
428 428 for parent in parents:
429 429 # include all revs that exist in the main repo
430 430 # to make sure that bundle may apply client-side
431 431 if parent in oldrepo:
432 432 bundleroots.append(parent)
433 433 return bundleroots
434 434
435 435 def _needsrebundling(head, bundlerepo):
436 436 bundleheads = list(bundlerepo.revs('heads(bundle())'))
437 437 return not (len(bundleheads) == 1 and
438 438 bundlerepo[bundleheads[0]].node() == head)
439 439
440 440 def _generateoutputparts(head, bundlerepo, bundleroots, bundlefile):
441 441 '''generates bundle that will be send to the user
442 442
443 443 returns tuple with raw bundle string and bundle type
444 444 '''
445 445 parts = []
446 446 if not _needsrebundling(head, bundlerepo):
447 447 with util.posixfile(bundlefile, "rb") as f:
448 448 unbundler = exchange.readbundle(bundlerepo.ui, f, bundlefile)
449 449 if isinstance(unbundler, changegroup.cg1unpacker):
450 450 part = bundle2.bundlepart('changegroup',
451 451 data=unbundler._stream.read())
452 452 part.addparam('version', '01')
453 453 parts.append(part)
454 454 elif isinstance(unbundler, bundle2.unbundle20):
455 455 haschangegroup = False
456 456 for part in unbundler.iterparts():
457 457 if part.type == 'changegroup':
458 458 haschangegroup = True
459 459 newpart = bundle2.bundlepart(part.type, data=part.read())
460 460 for key, value in part.params.iteritems():
461 461 newpart.addparam(key, value)
462 462 parts.append(newpart)
463 463
464 464 if not haschangegroup:
465 465 raise error.Abort(
466 466 'unexpected bundle without changegroup part, ' +
467 467 'head: %s' % hex(head),
468 468 hint='report to administrator')
469 469 else:
470 470 raise error.Abort('unknown bundle type')
471 471 else:
472 472 parts = _rebundle(bundlerepo, bundleroots, head)
473 473
474 474 return parts
475 475
476 476 def getbundlechunks(orig, repo, source, heads=None, bundlecaps=None, **kwargs):
477 477 heads = heads or []
478 478 # newheads are parents of roots of scratch bundles that were requested
479 479 newphases = {}
480 480 scratchbundles = []
481 481 newheads = []
482 482 scratchheads = []
483 483 nodestobundle = {}
484 484 allbundlestocleanup = []
485 485 try:
486 486 for head in heads:
487 487 if head not in repo.changelog.nodemap:
488 488 if head not in nodestobundle:
489 489 newbundlefile = common.downloadbundle(repo, head)
490 490 bundlepath = "bundle:%s+%s" % (repo.root, newbundlefile)
491 491 bundlerepo = hg.repository(repo.ui, bundlepath)
492 492
493 493 allbundlestocleanup.append((bundlerepo, newbundlefile))
494 494 bundlerevs = set(_readbundlerevs(bundlerepo))
495 495 bundlecaps = _includefilelogstobundle(
496 496 bundlecaps, bundlerepo, bundlerevs, repo.ui)
497 497 cl = bundlerepo.changelog
498 498 bundleroots = _getbundleroots(repo, bundlerepo, bundlerevs)
499 499 for rev in bundlerevs:
500 500 node = cl.node(rev)
501 501 newphases[hex(node)] = str(phases.draft)
502 502 nodestobundle[node] = (bundlerepo, bundleroots,
503 503 newbundlefile)
504 504
505 505 scratchbundles.append(
506 506 _generateoutputparts(head, *nodestobundle[head]))
507 507 newheads.extend(bundleroots)
508 508 scratchheads.append(head)
509 509 finally:
510 510 for bundlerepo, bundlefile in allbundlestocleanup:
511 511 bundlerepo.close()
512 512 try:
513 513 os.unlink(bundlefile)
514 514 except (IOError, OSError):
515 515 # if we can't cleanup the file then just ignore the error,
516 516 # no need to fail
517 517 pass
518 518
519 519 pullfrombundlestore = bool(scratchbundles)
520 520 wrappedchangegrouppart = False
521 521 wrappedlistkeys = False
522 522 oldchangegrouppart = exchange.getbundle2partsmapping['changegroup']
523 523 try:
524 524 def _changegrouppart(bundler, *args, **kwargs):
525 525 # Order is important here. First add non-scratch part
526 526 # and only then add parts with scratch bundles because
527 527 # non-scratch part contains parents of roots of scratch bundles.
528 528 result = oldchangegrouppart(bundler, *args, **kwargs)
529 529 for bundle in scratchbundles:
530 530 for part in bundle:
531 531 bundler.addpart(part)
532 532 return result
533 533
534 534 exchange.getbundle2partsmapping['changegroup'] = _changegrouppart
535 535 wrappedchangegrouppart = True
536 536
537 537 def _listkeys(orig, self, namespace):
538 538 origvalues = orig(self, namespace)
539 539 if namespace == 'phases' and pullfrombundlestore:
540 540 if origvalues.get('publishing') == 'True':
541 541 # Make repo non-publishing to preserve draft phase
542 542 del origvalues['publishing']
543 543 origvalues.update(newphases)
544 544 return origvalues
545 545
546 546 extensions.wrapfunction(localrepo.localrepository, 'listkeys',
547 547 _listkeys)
548 548 wrappedlistkeys = True
549 549 heads = list((set(newheads) | set(heads)) - set(scratchheads))
550 550 result = orig(repo, source, heads=heads,
551 551 bundlecaps=bundlecaps, **kwargs)
552 552 finally:
553 553 if wrappedchangegrouppart:
554 554 exchange.getbundle2partsmapping['changegroup'] = oldchangegrouppart
555 555 if wrappedlistkeys:
556 556 extensions.unwrapfunction(localrepo.localrepository, 'listkeys',
557 557 _listkeys)
558 558 return result
559 559
560 560 def _lookupwrap(orig):
561 561 def _lookup(repo, proto, key):
562 562 localkey = encoding.tolocal(key)
563 563
564 564 if isinstance(localkey, str) and _scratchbranchmatcher(localkey):
565 565 scratchnode = repo.bundlestore.index.getnode(localkey)
566 566 if scratchnode:
567 567 return "%s %s\n" % (1, scratchnode)
568 568 else:
569 569 return "%s %s\n" % (0, 'scratch branch %s not found' % localkey)
570 570 else:
571 571 try:
572 572 r = hex(repo.lookup(localkey))
573 573 return "%s %s\n" % (1, r)
574 574 except Exception as inst:
575 575 if repo.bundlestore.index.getbundle(localkey):
576 576 return "%s %s\n" % (1, localkey)
577 577 else:
578 578 r = str(inst)
579 579 return "%s %s\n" % (0, r)
580 580 return _lookup
581 581
582 582 def _pull(orig, ui, repo, source="default", **opts):
583 583 opts = pycompat.byteskwargs(opts)
584 584 # Copy paste from `pull` command
585 585 source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch'))
586 586
587 587 scratchbookmarks = {}
588 588 unfi = repo.unfiltered()
589 589 unknownnodes = []
590 590 for rev in opts.get('rev', []):
591 591 if rev not in unfi:
592 592 unknownnodes.append(rev)
593 593 if opts.get('bookmark'):
594 594 bookmarks = []
595 595 revs = opts.get('rev') or []
596 596 for bookmark in opts.get('bookmark'):
597 597 if _scratchbranchmatcher(bookmark):
598 598 # rev is not known yet
599 599 # it will be fetched with listkeyspatterns next
600 600 scratchbookmarks[bookmark] = 'REVTOFETCH'
601 601 else:
602 602 bookmarks.append(bookmark)
603 603
604 604 if scratchbookmarks:
605 605 other = hg.peer(repo, opts, source)
606 606 fetchedbookmarks = other.listkeyspatterns(
607 607 'bookmarks', patterns=scratchbookmarks)
608 608 for bookmark in scratchbookmarks:
609 609 if bookmark not in fetchedbookmarks:
610 610 raise error.Abort('remote bookmark %s not found!' %
611 611 bookmark)
612 612 scratchbookmarks[bookmark] = fetchedbookmarks[bookmark]
613 613 revs.append(fetchedbookmarks[bookmark])
614 614 opts['bookmark'] = bookmarks
615 615 opts['rev'] = revs
616 616
617 617 if scratchbookmarks or unknownnodes:
618 618 # Set anyincoming to True
619 619 extensions.wrapfunction(discovery, 'findcommonincoming',
620 620 _findcommonincoming)
621 621 try:
622 622 # Remote scratch bookmarks will be deleted because remotenames doesn't
623 623 # know about them. Let's save it before pull and restore after
624 624 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, source)
625 625 result = orig(ui, repo, source, **pycompat.strkwargs(opts))
626 626 # TODO(stash): race condition is possible
627 627 # if scratch bookmarks was updated right after orig.
628 628 # But that's unlikely and shouldn't be harmful.
629 629 if common.isremotebooksenabled(ui):
630 630 remotescratchbookmarks.update(scratchbookmarks)
631 631 _saveremotebookmarks(repo, remotescratchbookmarks, source)
632 632 else:
633 633 _savelocalbookmarks(repo, scratchbookmarks)
634 634 return result
635 635 finally:
636 636 if scratchbookmarks:
637 637 extensions.unwrapfunction(discovery, 'findcommonincoming')
638 638
639 639 def _readscratchremotebookmarks(ui, repo, other):
640 640 if common.isremotebooksenabled(ui):
641 641 remotenamesext = extensions.find('remotenames')
642 642 remotepath = remotenamesext.activepath(repo.ui, other)
643 643 result = {}
644 644 # Let's refresh remotenames to make sure we have it up to date
645 645 # Seems that `repo.names['remotebookmarks']` may return stale bookmarks
646 646 # and it results in deleting scratch bookmarks. Our best guess how to
647 647 # fix it is to use `clearnames()`
648 648 repo._remotenames.clearnames()
649 649 for remotebookmark in repo.names['remotebookmarks'].listnames(repo):
650 650 path, bookname = remotenamesext.splitremotename(remotebookmark)
651 651 if path == remotepath and _scratchbranchmatcher(bookname):
652 652 nodes = repo.names['remotebookmarks'].nodes(repo,
653 653 remotebookmark)
654 654 if nodes:
655 655 result[bookname] = hex(nodes[0])
656 656 return result
657 657 else:
658 658 return {}
659 659
660 660 def _saveremotebookmarks(repo, newbookmarks, remote):
661 661 remotenamesext = extensions.find('remotenames')
662 662 remotepath = remotenamesext.activepath(repo.ui, remote)
663 663 branches = collections.defaultdict(list)
664 664 bookmarks = {}
665 665 remotenames = remotenamesext.readremotenames(repo)
666 666 for hexnode, nametype, remote, rname in remotenames:
667 667 if remote != remotepath:
668 668 continue
669 669 if nametype == 'bookmarks':
670 670 if rname in newbookmarks:
671 671 # It's possible if we have a normal bookmark that matches
672 672 # scratch branch pattern. In this case just use the current
673 673 # bookmark node
674 674 del newbookmarks[rname]
675 675 bookmarks[rname] = hexnode
676 676 elif nametype == 'branches':
677 677 # saveremotenames expects 20 byte binary nodes for branches
678 678 branches[rname].append(bin(hexnode))
679 679
680 680 for bookmark, hexnode in newbookmarks.iteritems():
681 681 bookmarks[bookmark] = hexnode
682 682 remotenamesext.saveremotenames(repo, remotepath, branches, bookmarks)
683 683
684 684 def _savelocalbookmarks(repo, bookmarks):
685 685 if not bookmarks:
686 686 return
687 687 with repo.wlock(), repo.lock(), repo.transaction('bookmark') as tr:
688 688 changes = []
689 689 for scratchbook, node in bookmarks.iteritems():
690 690 changectx = repo[node]
691 691 changes.append((scratchbook, changectx.node()))
692 692 repo._bookmarks.applychanges(repo, tr, changes)
693 693
694 694 def _findcommonincoming(orig, *args, **kwargs):
695 695 common, inc, remoteheads = orig(*args, **kwargs)
696 696 return common, True, remoteheads
697 697
698 698 def _push(orig, ui, repo, dest=None, *args, **opts):
699 699
700 bookmark = opts.get('bookmark')
700 bookmark = opts.get(r'bookmark')
701 701 # we only support pushing one infinitepush bookmark at once
702 702 if len(bookmark) == 1:
703 703 bookmark = bookmark[0]
704 704 else:
705 705 bookmark = ''
706 706
707 707 oldphasemove = None
708 708 overrides = {(experimental, configbookmark): bookmark}
709 709
710 710 with ui.configoverride(overrides, 'infinitepush'):
711 711 scratchpush = opts.get('bundle_store')
712 712 if _scratchbranchmatcher(bookmark):
713 713 scratchpush = True
714 714 # bundle2 can be sent back after push (for example, bundle2
715 715 # containing `pushkey` part to update bookmarks)
716 716 ui.setconfig(experimental, 'bundle2.pushback', True)
717 717
718 718 if scratchpush:
719 719 # this is an infinitepush, we don't want the bookmark to be applied
720 720 # rather that should be stored in the bundlestore
721 opts['bookmark'] = []
721 opts[r'bookmark'] = []
722 722 ui.setconfig(experimental, configscratchpush, True)
723 723 oldphasemove = extensions.wrapfunction(exchange,
724 724 '_localphasemove',
725 725 _phasemove)
726 726 # Copy-paste from `push` command
727 727 path = ui.paths.getpath(dest, default=('default-push', 'default'))
728 728 if not path:
729 729 raise error.Abort(_('default repository not configured!'),
730 730 hint=_("see 'hg help config.paths'"))
731 731 destpath = path.pushloc or path.loc
732 732 # Remote scratch bookmarks will be deleted because remotenames doesn't
733 733 # know about them. Let's save it before push and restore after
734 734 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, destpath)
735 735 result = orig(ui, repo, dest, *args, **opts)
736 736 if common.isremotebooksenabled(ui):
737 737 if bookmark and scratchpush:
738 738 other = hg.peer(repo, opts, destpath)
739 739 fetchedbookmarks = other.listkeyspatterns('bookmarks',
740 740 patterns=[bookmark])
741 741 remotescratchbookmarks.update(fetchedbookmarks)
742 742 _saveremotebookmarks(repo, remotescratchbookmarks, destpath)
743 743 if oldphasemove:
744 744 exchange._localphasemove = oldphasemove
745 745 return result
746 746
747 747 def _deleteinfinitepushbookmarks(ui, repo, path, names):
748 748 """Prune remote names by removing the bookmarks we don't want anymore,
749 749 then writing the result back to disk
750 750 """
751 751 remotenamesext = extensions.find('remotenames')
752 752
753 753 # remotename format is:
754 754 # (node, nametype ("branches" or "bookmarks"), remote, name)
755 755 nametype_idx = 1
756 756 remote_idx = 2
757 757 name_idx = 3
758 758 remotenames = [remotename for remotename in \
759 759 remotenamesext.readremotenames(repo) \
760 760 if remotename[remote_idx] == path]
761 761 remote_bm_names = [remotename[name_idx] for remotename in \
762 762 remotenames if remotename[nametype_idx] == "bookmarks"]
763 763
764 764 for name in names:
765 765 if name not in remote_bm_names:
766 766 raise error.Abort(_("infinitepush bookmark '{}' does not exist "
767 767 "in path '{}'").format(name, path))
768 768
769 769 bookmarks = {}
770 770 branches = collections.defaultdict(list)
771 771 for node, nametype, remote, name in remotenames:
772 772 if nametype == "bookmarks" and name not in names:
773 773 bookmarks[name] = node
774 774 elif nametype == "branches":
775 775 # saveremotenames wants binary nodes for branches
776 776 branches[name].append(bin(node))
777 777
778 778 remotenamesext.saveremotenames(repo, path, branches, bookmarks)
779 779
780 780 def _phasemove(orig, pushop, nodes, phase=phases.public):
781 781 """prevent commits from being marked public
782 782
783 783 Since these are going to a scratch branch, they aren't really being
784 784 published."""
785 785
786 786 if phase != phases.public:
787 787 orig(pushop, nodes, phase)
788 788
789 789 @exchange.b2partsgenerator(scratchbranchparttype)
790 790 def partgen(pushop, bundler):
791 791 bookmark = pushop.ui.config(experimental, configbookmark)
792 792 scratchpush = pushop.ui.configbool(experimental, configscratchpush)
793 793 if 'changesets' in pushop.stepsdone or not scratchpush:
794 794 return
795 795
796 796 if scratchbranchparttype not in bundle2.bundle2caps(pushop.remote):
797 797 return
798 798
799 799 pushop.stepsdone.add('changesets')
800 800 if not pushop.outgoing.missing:
801 801 pushop.ui.status(_('no changes found\n'))
802 802 pushop.cgresult = 0
803 803 return
804 804
805 805 # This parameter tells the server that the following bundle is an
806 806 # infinitepush. This let's it switch the part processing to our infinitepush
807 807 # code path.
808 808 bundler.addparam("infinitepush", "True")
809 809
810 810 scratchparts = bundleparts.getscratchbranchparts(pushop.repo,
811 811 pushop.remote,
812 812 pushop.outgoing,
813 813 pushop.ui,
814 814 bookmark)
815 815
816 816 for scratchpart in scratchparts:
817 817 bundler.addpart(scratchpart)
818 818
819 819 def handlereply(op):
820 820 # server either succeeds or aborts; no code to read
821 821 pushop.cgresult = 1
822 822
823 823 return handlereply
824 824
825 825 bundle2.capabilities[bundleparts.scratchbranchparttype] = ()
826 826
827 827 def _getrevs(bundle, oldnode, force, bookmark):
828 828 'extracts and validates the revs to be imported'
829 829 revs = [bundle[r] for r in bundle.revs('sort(bundle())')]
830 830
831 831 # new bookmark
832 832 if oldnode is None:
833 833 return revs
834 834
835 835 # Fast forward update
836 836 if oldnode in bundle and list(bundle.set('bundle() & %s::', oldnode)):
837 837 return revs
838 838
839 839 return revs
840 840
841 841 @contextlib.contextmanager
842 842 def logservicecall(logger, service, **kwargs):
843 843 start = time.time()
844 844 logger(service, eventtype='start', **kwargs)
845 845 try:
846 846 yield
847 847 logger(service, eventtype='success',
848 848 elapsedms=(time.time() - start) * 1000, **kwargs)
849 849 except Exception as e:
850 850 logger(service, eventtype='failure',
851 851 elapsedms=(time.time() - start) * 1000, errormsg=str(e),
852 852 **kwargs)
853 853 raise
854 854
855 855 def _getorcreateinfinitepushlogger(op):
856 856 logger = op.records['infinitepushlogger']
857 857 if not logger:
858 858 ui = op.repo.ui
859 859 try:
860 860 username = procutil.getuser()
861 861 except Exception:
862 862 username = 'unknown'
863 863 # Generate random request id to be able to find all logged entries
864 864 # for the same request. Since requestid is pseudo-generated it may
865 865 # not be unique, but we assume that (hostname, username, requestid)
866 866 # is unique.
867 867 random.seed()
868 868 requestid = random.randint(0, 2000000000)
869 869 hostname = socket.gethostname()
870 870 logger = functools.partial(ui.log, 'infinitepush', user=username,
871 871 requestid=requestid, hostname=hostname,
872 872 reponame=ui.config('infinitepush',
873 873 'reponame'))
874 874 op.records.add('infinitepushlogger', logger)
875 875 else:
876 876 logger = logger[0]
877 877 return logger
878 878
879 879 def storetobundlestore(orig, repo, op, unbundler):
880 880 """stores the incoming bundle coming from push command to the bundlestore
881 881 instead of applying on the revlogs"""
882 882
883 883 repo.ui.status(_("storing changesets on the bundlestore\n"))
884 884 bundler = bundle2.bundle20(repo.ui)
885 885
886 886 # processing each part and storing it in bundler
887 887 with bundle2.partiterator(repo, op, unbundler) as parts:
888 888 for part in parts:
889 889 bundlepart = None
890 890 if part.type == 'replycaps':
891 891 # This configures the current operation to allow reply parts.
892 892 bundle2._processpart(op, part)
893 893 else:
894 894 bundlepart = bundle2.bundlepart(part.type, data=part.read())
895 895 for key, value in part.params.iteritems():
896 896 bundlepart.addparam(key, value)
897 897
898 898 # Certain parts require a response
899 899 if part.type in ('pushkey', 'changegroup'):
900 900 if op.reply is not None:
901 901 rpart = op.reply.newpart('reply:%s' % part.type)
902 902 rpart.addparam('in-reply-to', str(part.id),
903 903 mandatory=False)
904 904 rpart.addparam('return', '1', mandatory=False)
905 905
906 906 op.records.add(part.type, {
907 907 'return': 1,
908 908 })
909 909 if bundlepart:
910 910 bundler.addpart(bundlepart)
911 911
912 912 # storing the bundle in the bundlestore
913 913 buf = util.chunkbuffer(bundler.getchunks())
914 914 fd, bundlefile = tempfile.mkstemp()
915 915 try:
916 916 try:
917 fp = os.fdopen(fd, 'wb')
917 fp = os.fdopen(fd, r'wb')
918 918 fp.write(buf.read())
919 919 finally:
920 920 fp.close()
921 921 storebundle(op, {}, bundlefile)
922 922 finally:
923 923 try:
924 924 os.unlink(bundlefile)
925 925 except Exception:
926 926 # we would rather see the original exception
927 927 pass
928 928
929 929 def processparts(orig, repo, op, unbundler):
930 930
931 931 # make sure we don't wrap processparts in case of `hg unbundle`
932 932 if op.source == 'unbundle':
933 933 return orig(repo, op, unbundler)
934 934
935 935 # this server routes each push to bundle store
936 936 if repo.ui.configbool('infinitepush', 'pushtobundlestore'):
937 937 return storetobundlestore(orig, repo, op, unbundler)
938 938
939 939 if unbundler.params.get('infinitepush') != 'True':
940 940 return orig(repo, op, unbundler)
941 941
942 942 handleallparts = repo.ui.configbool('infinitepush', 'storeallparts')
943 943
944 944 bundler = bundle2.bundle20(repo.ui)
945 945 cgparams = None
946 946 with bundle2.partiterator(repo, op, unbundler) as parts:
947 947 for part in parts:
948 948 bundlepart = None
949 949 if part.type == 'replycaps':
950 950 # This configures the current operation to allow reply parts.
951 951 bundle2._processpart(op, part)
952 952 elif part.type == bundleparts.scratchbranchparttype:
953 953 # Scratch branch parts need to be converted to normal
954 954 # changegroup parts, and the extra parameters stored for later
955 955 # when we upload to the store. Eventually those parameters will
956 956 # be put on the actual bundle instead of this part, then we can
957 957 # send a vanilla changegroup instead of the scratchbranch part.
958 958 cgversion = part.params.get('cgversion', '01')
959 959 bundlepart = bundle2.bundlepart('changegroup', data=part.read())
960 960 bundlepart.addparam('version', cgversion)
961 961 cgparams = part.params
962 962
963 963 # If we're not dumping all parts into the new bundle, we need to
964 964 # alert the future pushkey and phase-heads handler to skip
965 965 # the part.
966 966 if not handleallparts:
967 967 op.records.add(scratchbranchparttype + '_skippushkey', True)
968 968 op.records.add(scratchbranchparttype + '_skipphaseheads',
969 969 True)
970 970 else:
971 971 if handleallparts:
972 972 # Ideally we would not process any parts, and instead just
973 973 # forward them to the bundle for storage, but since this
974 974 # differs from previous behavior, we need to put it behind a
975 975 # config flag for incremental rollout.
976 976 bundlepart = bundle2.bundlepart(part.type, data=part.read())
977 977 for key, value in part.params.iteritems():
978 978 bundlepart.addparam(key, value)
979 979
980 980 # Certain parts require a response
981 981 if part.type == 'pushkey':
982 982 if op.reply is not None:
983 983 rpart = op.reply.newpart('reply:pushkey')
984 984 rpart.addparam('in-reply-to', str(part.id),
985 985 mandatory=False)
986 986 rpart.addparam('return', '1', mandatory=False)
987 987 else:
988 988 bundle2._processpart(op, part)
989 989
990 990 if handleallparts:
991 991 op.records.add(part.type, {
992 992 'return': 1,
993 993 })
994 994 if bundlepart:
995 995 bundler.addpart(bundlepart)
996 996
997 997 # If commits were sent, store them
998 998 if cgparams:
999 999 buf = util.chunkbuffer(bundler.getchunks())
1000 1000 fd, bundlefile = tempfile.mkstemp()
1001 1001 try:
1002 1002 try:
1003 fp = os.fdopen(fd, 'wb')
1003 fp = os.fdopen(fd, r'wb')
1004 1004 fp.write(buf.read())
1005 1005 finally:
1006 1006 fp.close()
1007 1007 storebundle(op, cgparams, bundlefile)
1008 1008 finally:
1009 1009 try:
1010 1010 os.unlink(bundlefile)
1011 1011 except Exception:
1012 1012 # we would rather see the original exception
1013 1013 pass
1014 1014
1015 1015 def storebundle(op, params, bundlefile):
1016 1016 log = _getorcreateinfinitepushlogger(op)
1017 1017 parthandlerstart = time.time()
1018 1018 log(scratchbranchparttype, eventtype='start')
1019 1019 index = op.repo.bundlestore.index
1020 1020 store = op.repo.bundlestore.store
1021 1021 op.records.add(scratchbranchparttype + '_skippushkey', True)
1022 1022
1023 1023 bundle = None
1024 1024 try: # guards bundle
1025 1025 bundlepath = "bundle:%s+%s" % (op.repo.root, bundlefile)
1026 1026 bundle = hg.repository(op.repo.ui, bundlepath)
1027 1027
1028 1028 bookmark = params.get('bookmark')
1029 1029 bookprevnode = params.get('bookprevnode', '')
1030 1030 force = params.get('force')
1031 1031
1032 1032 if bookmark:
1033 1033 oldnode = index.getnode(bookmark)
1034 1034 else:
1035 1035 oldnode = None
1036 1036 bundleheads = bundle.revs('heads(bundle())')
1037 1037 if bookmark and len(bundleheads) > 1:
1038 1038 raise error.Abort(
1039 1039 _('cannot push more than one head to a scratch branch'))
1040 1040
1041 1041 revs = _getrevs(bundle, oldnode, force, bookmark)
1042 1042
1043 1043 # Notify the user of what is being pushed
1044 1044 plural = 's' if len(revs) > 1 else ''
1045 1045 op.repo.ui.warn(_("pushing %s commit%s:\n") % (len(revs), plural))
1046 1046 maxoutput = 10
1047 1047 for i in range(0, min(len(revs), maxoutput)):
1048 1048 firstline = bundle[revs[i]].description().split('\n')[0][:50]
1049 1049 op.repo.ui.warn((" %s %s\n") % (revs[i], firstline))
1050 1050
1051 1051 if len(revs) > maxoutput + 1:
1052 1052 op.repo.ui.warn((" ...\n"))
1053 1053 firstline = bundle[revs[-1]].description().split('\n')[0][:50]
1054 1054 op.repo.ui.warn((" %s %s\n") % (revs[-1], firstline))
1055 1055
1056 1056 nodesctx = [bundle[rev] for rev in revs]
1057 1057 inindex = lambda rev: bool(index.getbundle(bundle[rev].hex()))
1058 1058 if bundleheads:
1059 1059 newheadscount = sum(not inindex(rev) for rev in bundleheads)
1060 1060 else:
1061 1061 newheadscount = 0
1062 1062 # If there's a bookmark specified, there should be only one head,
1063 1063 # so we choose the last node, which will be that head.
1064 1064 # If a bug or malicious client allows there to be a bookmark
1065 1065 # with multiple heads, we will place the bookmark on the last head.
1066 1066 bookmarknode = nodesctx[-1].hex() if nodesctx else None
1067 1067 key = None
1068 1068 if newheadscount:
1069 1069 with open(bundlefile, 'r') as f:
1070 1070 bundledata = f.read()
1071 1071 with logservicecall(log, 'bundlestore',
1072 1072 bundlesize=len(bundledata)):
1073 1073 bundlesizelimit = 100 * 1024 * 1024 # 100 MB
1074 1074 if len(bundledata) > bundlesizelimit:
1075 1075 error_msg = ('bundle is too big: %d bytes. ' +
1076 1076 'max allowed size is 100 MB')
1077 1077 raise error.Abort(error_msg % (len(bundledata),))
1078 1078 key = store.write(bundledata)
1079 1079
1080 1080 with logservicecall(log, 'index', newheadscount=newheadscount), index:
1081 1081 if key:
1082 1082 index.addbundle(key, nodesctx)
1083 1083 if bookmark:
1084 1084 index.addbookmark(bookmark, bookmarknode)
1085 1085 _maybeaddpushbackpart(op, bookmark, bookmarknode,
1086 1086 bookprevnode, params)
1087 1087 log(scratchbranchparttype, eventtype='success',
1088 1088 elapsedms=(time.time() - parthandlerstart) * 1000)
1089 1089
1090 1090 except Exception as e:
1091 1091 log(scratchbranchparttype, eventtype='failure',
1092 1092 elapsedms=(time.time() - parthandlerstart) * 1000,
1093 1093 errormsg=str(e))
1094 1094 raise
1095 1095 finally:
1096 1096 if bundle:
1097 1097 bundle.close()
1098 1098
1099 1099 @bundle2.parthandler(scratchbranchparttype,
1100 1100 ('bookmark', 'bookprevnode', 'force',
1101 1101 'pushbackbookmarks', 'cgversion'))
1102 1102 def bundle2scratchbranch(op, part):
1103 1103 '''unbundle a bundle2 part containing a changegroup to store'''
1104 1104
1105 1105 bundler = bundle2.bundle20(op.repo.ui)
1106 1106 cgversion = part.params.get('cgversion', '01')
1107 1107 cgpart = bundle2.bundlepart('changegroup', data=part.read())
1108 1108 cgpart.addparam('version', cgversion)
1109 1109 bundler.addpart(cgpart)
1110 1110 buf = util.chunkbuffer(bundler.getchunks())
1111 1111
1112 1112 fd, bundlefile = tempfile.mkstemp()
1113 1113 try:
1114 1114 try:
1115 fp = os.fdopen(fd, 'wb')
1115 fp = os.fdopen(fd, r'wb')
1116 1116 fp.write(buf.read())
1117 1117 finally:
1118 1118 fp.close()
1119 1119 storebundle(op, part.params, bundlefile)
1120 1120 finally:
1121 1121 try:
1122 1122 os.unlink(bundlefile)
1123 1123 except OSError as e:
1124 1124 if e.errno != errno.ENOENT:
1125 1125 raise
1126 1126
1127 1127 return 1
1128 1128
1129 1129 def _maybeaddpushbackpart(op, bookmark, newnode, oldnode, params):
1130 1130 if params.get('pushbackbookmarks'):
1131 1131 if op.reply and 'pushback' in op.reply.capabilities:
1132 1132 params = {
1133 1133 'namespace': 'bookmarks',
1134 1134 'key': bookmark,
1135 1135 'new': newnode,
1136 1136 'old': oldnode,
1137 1137 }
1138 1138 op.reply.newpart('pushkey', mandatoryparams=params.iteritems())
1139 1139
1140 1140 def bundle2pushkey(orig, op, part):
1141 1141 '''Wrapper of bundle2.handlepushkey()
1142 1142
1143 1143 The only goal is to skip calling the original function if flag is set.
1144 1144 It's set if infinitepush push is happening.
1145 1145 '''
1146 1146 if op.records[scratchbranchparttype + '_skippushkey']:
1147 1147 if op.reply is not None:
1148 1148 rpart = op.reply.newpart('reply:pushkey')
1149 1149 rpart.addparam('in-reply-to', str(part.id), mandatory=False)
1150 1150 rpart.addparam('return', '1', mandatory=False)
1151 1151 return 1
1152 1152
1153 1153 return orig(op, part)
1154 1154
1155 1155 def bundle2handlephases(orig, op, part):
1156 1156 '''Wrapper of bundle2.handlephases()
1157 1157
1158 1158 The only goal is to skip calling the original function if flag is set.
1159 1159 It's set if infinitepush push is happening.
1160 1160 '''
1161 1161
1162 1162 if op.records[scratchbranchparttype + '_skipphaseheads']:
1163 1163 return
1164 1164
1165 1165 return orig(op, part)
1166 1166
1167 1167 def _asyncsavemetadata(root, nodes):
1168 1168 '''starts a separate process that fills metadata for the nodes
1169 1169
1170 1170 This function creates a separate process and doesn't wait for it's
1171 1171 completion. This was done to avoid slowing down pushes
1172 1172 '''
1173 1173
1174 1174 maxnodes = 50
1175 1175 if len(nodes) > maxnodes:
1176 1176 return
1177 1177 nodesargs = []
1178 1178 for node in nodes:
1179 1179 nodesargs.append('--node')
1180 1180 nodesargs.append(node)
1181 1181 with open(os.devnull, 'w+b') as devnull:
1182 1182 cmdline = [util.hgexecutable(), 'debugfillinfinitepushmetadata',
1183 1183 '-R', root] + nodesargs
1184 1184 # Process will run in background. We don't care about the return code
1185 1185 subprocess.Popen(cmdline, close_fds=True, shell=False,
1186 1186 stdin=devnull, stdout=devnull, stderr=devnull)
@@ -1,1814 +1,1814 b''
1 1 # subrepo.py - sub-repository classes and factory
2 2 #
3 3 # Copyright 2009-2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import copy
11 11 import errno
12 12 import hashlib
13 13 import os
14 14 import posixpath
15 15 import re
16 16 import stat
17 17 import subprocess
18 18 import sys
19 19 import tarfile
20 20 import xml.dom.minidom
21 21
22 22 from .i18n import _
23 23 from . import (
24 24 cmdutil,
25 25 encoding,
26 26 error,
27 27 exchange,
28 28 logcmdutil,
29 29 match as matchmod,
30 30 node,
31 31 pathutil,
32 32 phases,
33 33 pycompat,
34 34 scmutil,
35 35 subrepoutil,
36 36 util,
37 37 vfs as vfsmod,
38 38 )
39 39 from .utils import (
40 40 dateutil,
41 41 procutil,
42 42 stringutil,
43 43 )
44 44
45 45 hg = None
46 46 reporelpath = subrepoutil.reporelpath
47 47 subrelpath = subrepoutil.subrelpath
48 48 _abssource = subrepoutil._abssource
49 49 propertycache = util.propertycache
50 50
51 51 def _expandedabspath(path):
52 52 '''
53 53 get a path or url and if it is a path expand it and return an absolute path
54 54 '''
55 55 expandedpath = util.urllocalpath(util.expandpath(path))
56 56 u = util.url(expandedpath)
57 57 if not u.scheme:
58 58 path = util.normpath(os.path.abspath(u.path))
59 59 return path
60 60
61 61 def _getstorehashcachename(remotepath):
62 62 '''get a unique filename for the store hash cache of a remote repository'''
63 63 return node.hex(hashlib.sha1(_expandedabspath(remotepath)).digest())[0:12]
64 64
65 65 class SubrepoAbort(error.Abort):
66 66 """Exception class used to avoid handling a subrepo error more than once"""
67 67 def __init__(self, *args, **kw):
68 68 self.subrepo = kw.pop(r'subrepo', None)
69 69 self.cause = kw.pop(r'cause', None)
70 70 error.Abort.__init__(self, *args, **kw)
71 71
72 72 def annotatesubrepoerror(func):
73 73 def decoratedmethod(self, *args, **kargs):
74 74 try:
75 75 res = func(self, *args, **kargs)
76 76 except SubrepoAbort as ex:
77 77 # This exception has already been handled
78 78 raise ex
79 79 except error.Abort as ex:
80 80 subrepo = subrelpath(self)
81 81 errormsg = (stringutil.forcebytestr(ex) + ' '
82 82 + _('(in subrepository "%s")') % subrepo)
83 83 # avoid handling this exception by raising a SubrepoAbort exception
84 84 raise SubrepoAbort(errormsg, hint=ex.hint, subrepo=subrepo,
85 85 cause=sys.exc_info())
86 86 return res
87 87 return decoratedmethod
88 88
89 89 def _updateprompt(ui, sub, dirty, local, remote):
90 90 if dirty:
91 91 msg = (_(' subrepository sources for %s differ\n'
92 92 'use (l)ocal source (%s) or (r)emote source (%s)?'
93 93 '$$ &Local $$ &Remote')
94 94 % (subrelpath(sub), local, remote))
95 95 else:
96 96 msg = (_(' subrepository sources for %s differ (in checked out '
97 97 'version)\n'
98 98 'use (l)ocal source (%s) or (r)emote source (%s)?'
99 99 '$$ &Local $$ &Remote')
100 100 % (subrelpath(sub), local, remote))
101 101 return ui.promptchoice(msg, 0)
102 102
103 103 def _sanitize(ui, vfs, ignore):
104 104 for dirname, dirs, names in vfs.walk():
105 105 for i, d in enumerate(dirs):
106 106 if d.lower() == ignore:
107 107 del dirs[i]
108 108 break
109 109 if vfs.basename(dirname).lower() != '.hg':
110 110 continue
111 111 for f in names:
112 112 if f.lower() == 'hgrc':
113 113 ui.warn(_("warning: removing potentially hostile 'hgrc' "
114 114 "in '%s'\n") % vfs.join(dirname))
115 115 vfs.unlink(vfs.reljoin(dirname, f))
116 116
117 117 def _auditsubrepopath(repo, path):
118 118 # auditor doesn't check if the path itself is a symlink
119 119 pathutil.pathauditor(repo.root)(path)
120 120 if repo.wvfs.islink(path):
121 121 raise error.Abort(_("subrepo '%s' traverses symbolic link") % path)
122 122
123 123 SUBREPO_ALLOWED_DEFAULTS = {
124 124 'hg': True,
125 125 'git': False,
126 126 'svn': False,
127 127 }
128 128
129 129 def _checktype(ui, kind):
130 130 # subrepos.allowed is a master kill switch. If disabled, subrepos are
131 131 # disabled period.
132 132 if not ui.configbool('subrepos', 'allowed', True):
133 133 raise error.Abort(_('subrepos not enabled'),
134 134 hint=_("see 'hg help config.subrepos' for details"))
135 135
136 136 default = SUBREPO_ALLOWED_DEFAULTS.get(kind, False)
137 137 if not ui.configbool('subrepos', '%s:allowed' % kind, default):
138 138 raise error.Abort(_('%s subrepos not allowed') % kind,
139 139 hint=_("see 'hg help config.subrepos' for details"))
140 140
141 141 if kind not in types:
142 142 raise error.Abort(_('unknown subrepo type %s') % kind)
143 143
144 144 def subrepo(ctx, path, allowwdir=False, allowcreate=True):
145 145 """return instance of the right subrepo class for subrepo in path"""
146 146 # subrepo inherently violates our import layering rules
147 147 # because it wants to make repo objects from deep inside the stack
148 148 # so we manually delay the circular imports to not break
149 149 # scripts that don't use our demand-loading
150 150 global hg
151 151 from . import hg as h
152 152 hg = h
153 153
154 154 repo = ctx.repo()
155 155 _auditsubrepopath(repo, path)
156 156 state = ctx.substate[path]
157 157 _checktype(repo.ui, state[2])
158 158 if allowwdir:
159 159 state = (state[0], ctx.subrev(path), state[2])
160 160 return types[state[2]](ctx, path, state[:2], allowcreate)
161 161
162 162 def nullsubrepo(ctx, path, pctx):
163 163 """return an empty subrepo in pctx for the extant subrepo in ctx"""
164 164 # subrepo inherently violates our import layering rules
165 165 # because it wants to make repo objects from deep inside the stack
166 166 # so we manually delay the circular imports to not break
167 167 # scripts that don't use our demand-loading
168 168 global hg
169 169 from . import hg as h
170 170 hg = h
171 171
172 172 repo = ctx.repo()
173 173 _auditsubrepopath(repo, path)
174 174 state = ctx.substate[path]
175 175 _checktype(repo.ui, state[2])
176 176 subrev = ''
177 177 if state[2] == 'hg':
178 178 subrev = "0" * 40
179 179 return types[state[2]](pctx, path, (state[0], subrev), True)
180 180
181 181 # subrepo classes need to implement the following abstract class:
182 182
183 183 class abstractsubrepo(object):
184 184
185 185 def __init__(self, ctx, path):
186 186 """Initialize abstractsubrepo part
187 187
188 188 ``ctx`` is the context referring this subrepository in the
189 189 parent repository.
190 190
191 191 ``path`` is the path to this subrepository as seen from
192 192 innermost repository.
193 193 """
194 194 self.ui = ctx.repo().ui
195 195 self._ctx = ctx
196 196 self._path = path
197 197
198 198 def addwebdirpath(self, serverpath, webconf):
199 199 """Add the hgwebdir entries for this subrepo, and any of its subrepos.
200 200
201 201 ``serverpath`` is the path component of the URL for this repo.
202 202
203 203 ``webconf`` is the dictionary of hgwebdir entries.
204 204 """
205 205 pass
206 206
207 207 def storeclean(self, path):
208 208 """
209 209 returns true if the repository has not changed since it was last
210 210 cloned from or pushed to a given repository.
211 211 """
212 212 return False
213 213
214 214 def dirty(self, ignoreupdate=False, missing=False):
215 215 """returns true if the dirstate of the subrepo is dirty or does not
216 216 match current stored state. If ignoreupdate is true, only check
217 217 whether the subrepo has uncommitted changes in its dirstate. If missing
218 218 is true, check for deleted files.
219 219 """
220 220 raise NotImplementedError
221 221
222 222 def dirtyreason(self, ignoreupdate=False, missing=False):
223 223 """return reason string if it is ``dirty()``
224 224
225 225 Returned string should have enough information for the message
226 226 of exception.
227 227
228 228 This returns None, otherwise.
229 229 """
230 230 if self.dirty(ignoreupdate=ignoreupdate, missing=missing):
231 231 return _('uncommitted changes in subrepository "%s"'
232 232 ) % subrelpath(self)
233 233
234 234 def bailifchanged(self, ignoreupdate=False, hint=None):
235 235 """raise Abort if subrepository is ``dirty()``
236 236 """
237 237 dirtyreason = self.dirtyreason(ignoreupdate=ignoreupdate,
238 238 missing=True)
239 239 if dirtyreason:
240 240 raise error.Abort(dirtyreason, hint=hint)
241 241
242 242 def basestate(self):
243 243 """current working directory base state, disregarding .hgsubstate
244 244 state and working directory modifications"""
245 245 raise NotImplementedError
246 246
247 247 def checknested(self, path):
248 248 """check if path is a subrepository within this repository"""
249 249 return False
250 250
251 251 def commit(self, text, user, date):
252 252 """commit the current changes to the subrepo with the given
253 253 log message. Use given user and date if possible. Return the
254 254 new state of the subrepo.
255 255 """
256 256 raise NotImplementedError
257 257
258 258 def phase(self, state):
259 259 """returns phase of specified state in the subrepository.
260 260 """
261 261 return phases.public
262 262
263 263 def remove(self):
264 264 """remove the subrepo
265 265
266 266 (should verify the dirstate is not dirty first)
267 267 """
268 268 raise NotImplementedError
269 269
270 270 def get(self, state, overwrite=False):
271 271 """run whatever commands are needed to put the subrepo into
272 272 this state
273 273 """
274 274 raise NotImplementedError
275 275
276 276 def merge(self, state):
277 277 """merge currently-saved state with the new state."""
278 278 raise NotImplementedError
279 279
280 280 def push(self, opts):
281 281 """perform whatever action is analogous to 'hg push'
282 282
283 283 This may be a no-op on some systems.
284 284 """
285 285 raise NotImplementedError
286 286
287 287 def add(self, ui, match, prefix, explicitonly, **opts):
288 288 return []
289 289
290 290 def addremove(self, matcher, prefix, opts):
291 291 self.ui.warn("%s: %s" % (prefix, _("addremove is not supported")))
292 292 return 1
293 293
294 294 def cat(self, match, fm, fntemplate, prefix, **opts):
295 295 return 1
296 296
297 297 def status(self, rev2, **opts):
298 298 return scmutil.status([], [], [], [], [], [], [])
299 299
300 300 def diff(self, ui, diffopts, node2, match, prefix, **opts):
301 301 pass
302 302
303 303 def outgoing(self, ui, dest, opts):
304 304 return 1
305 305
306 306 def incoming(self, ui, source, opts):
307 307 return 1
308 308
309 309 def files(self):
310 310 """return filename iterator"""
311 311 raise NotImplementedError
312 312
313 313 def filedata(self, name, decode):
314 314 """return file data, optionally passed through repo decoders"""
315 315 raise NotImplementedError
316 316
317 317 def fileflags(self, name):
318 318 """return file flags"""
319 319 return ''
320 320
321 321 def getfileset(self, expr):
322 322 """Resolve the fileset expression for this repo"""
323 323 return set()
324 324
325 325 def printfiles(self, ui, m, fm, fmt, subrepos):
326 326 """handle the files command for this subrepo"""
327 327 return 1
328 328
329 329 def archive(self, archiver, prefix, match=None, decode=True):
330 330 if match is not None:
331 331 files = [f for f in self.files() if match(f)]
332 332 else:
333 333 files = self.files()
334 334 total = len(files)
335 335 relpath = subrelpath(self)
336 336 self.ui.progress(_('archiving (%s)') % relpath, 0,
337 337 unit=_('files'), total=total)
338 338 for i, name in enumerate(files):
339 339 flags = self.fileflags(name)
340 340 mode = 'x' in flags and 0o755 or 0o644
341 341 symlink = 'l' in flags
342 342 archiver.addfile(prefix + self._path + '/' + name,
343 343 mode, symlink, self.filedata(name, decode))
344 344 self.ui.progress(_('archiving (%s)') % relpath, i + 1,
345 345 unit=_('files'), total=total)
346 346 self.ui.progress(_('archiving (%s)') % relpath, None)
347 347 return total
348 348
349 349 def walk(self, match):
350 350 '''
351 351 walk recursively through the directory tree, finding all files
352 352 matched by the match function
353 353 '''
354 354
355 355 def forget(self, match, prefix, dryrun):
356 356 return ([], [])
357 357
358 358 def removefiles(self, matcher, prefix, after, force, subrepos,
359 359 dryrun, warnings):
360 360 """remove the matched files from the subrepository and the filesystem,
361 361 possibly by force and/or after the file has been removed from the
362 362 filesystem. Return 0 on success, 1 on any warning.
363 363 """
364 364 warnings.append(_("warning: removefiles not implemented (%s)")
365 365 % self._path)
366 366 return 1
367 367
368 368 def revert(self, substate, *pats, **opts):
369 369 self.ui.warn(_('%s: reverting %s subrepos is unsupported\n') \
370 370 % (substate[0], substate[2]))
371 371 return []
372 372
373 373 def shortid(self, revid):
374 374 return revid
375 375
376 376 def unshare(self):
377 377 '''
378 378 convert this repository from shared to normal storage.
379 379 '''
380 380
381 381 def verify(self):
382 382 '''verify the integrity of the repository. Return 0 on success or
383 383 warning, 1 on any error.
384 384 '''
385 385 return 0
386 386
387 387 @propertycache
388 388 def wvfs(self):
389 389 """return vfs to access the working directory of this subrepository
390 390 """
391 391 return vfsmod.vfs(self._ctx.repo().wvfs.join(self._path))
392 392
393 393 @propertycache
394 394 def _relpath(self):
395 395 """return path to this subrepository as seen from outermost repository
396 396 """
397 397 return self.wvfs.reljoin(reporelpath(self._ctx.repo()), self._path)
398 398
399 399 class hgsubrepo(abstractsubrepo):
400 400 def __init__(self, ctx, path, state, allowcreate):
401 401 super(hgsubrepo, self).__init__(ctx, path)
402 402 self._state = state
403 403 r = ctx.repo()
404 404 root = r.wjoin(path)
405 405 create = allowcreate and not r.wvfs.exists('%s/.hg' % path)
406 406 self._repo = hg.repository(r.baseui, root, create=create)
407 407
408 408 # Propagate the parent's --hidden option
409 409 if r is r.unfiltered():
410 410 self._repo = self._repo.unfiltered()
411 411
412 412 self.ui = self._repo.ui
413 413 for s, k in [('ui', 'commitsubrepos')]:
414 414 v = r.ui.config(s, k)
415 415 if v:
416 416 self.ui.setconfig(s, k, v, 'subrepo')
417 417 # internal config: ui._usedassubrepo
418 418 self.ui.setconfig('ui', '_usedassubrepo', 'True', 'subrepo')
419 419 self._initrepo(r, state[0], create)
420 420
421 421 @annotatesubrepoerror
422 422 def addwebdirpath(self, serverpath, webconf):
423 423 cmdutil.addwebdirpath(self._repo, subrelpath(self), webconf)
424 424
425 425 def storeclean(self, path):
426 426 with self._repo.lock():
427 427 return self._storeclean(path)
428 428
429 429 def _storeclean(self, path):
430 430 clean = True
431 431 itercache = self._calcstorehash(path)
432 432 for filehash in self._readstorehashcache(path):
433 433 if filehash != next(itercache, None):
434 434 clean = False
435 435 break
436 436 if clean:
437 437 # if not empty:
438 438 # the cached and current pull states have a different size
439 439 clean = next(itercache, None) is None
440 440 return clean
441 441
442 442 def _calcstorehash(self, remotepath):
443 443 '''calculate a unique "store hash"
444 444
445 445 This method is used to to detect when there are changes that may
446 446 require a push to a given remote path.'''
447 447 # sort the files that will be hashed in increasing (likely) file size
448 448 filelist = ('bookmarks', 'store/phaseroots', 'store/00changelog.i')
449 449 yield '# %s\n' % _expandedabspath(remotepath)
450 450 vfs = self._repo.vfs
451 451 for relname in filelist:
452 452 filehash = node.hex(hashlib.sha1(vfs.tryread(relname)).digest())
453 453 yield '%s = %s\n' % (relname, filehash)
454 454
455 455 @propertycache
456 456 def _cachestorehashvfs(self):
457 457 return vfsmod.vfs(self._repo.vfs.join('cache/storehash'))
458 458
459 459 def _readstorehashcache(self, remotepath):
460 460 '''read the store hash cache for a given remote repository'''
461 461 cachefile = _getstorehashcachename(remotepath)
462 462 return self._cachestorehashvfs.tryreadlines(cachefile, 'r')
463 463
464 464 def _cachestorehash(self, remotepath):
465 465 '''cache the current store hash
466 466
467 467 Each remote repo requires its own store hash cache, because a subrepo
468 468 store may be "clean" versus a given remote repo, but not versus another
469 469 '''
470 470 cachefile = _getstorehashcachename(remotepath)
471 471 with self._repo.lock():
472 472 storehash = list(self._calcstorehash(remotepath))
473 473 vfs = self._cachestorehashvfs
474 474 vfs.writelines(cachefile, storehash, mode='wb', notindexed=True)
475 475
476 476 def _getctx(self):
477 477 '''fetch the context for this subrepo revision, possibly a workingctx
478 478 '''
479 479 if self._ctx.rev() is None:
480 480 return self._repo[None] # workingctx if parent is workingctx
481 481 else:
482 482 rev = self._state[1]
483 483 return self._repo[rev]
484 484
485 485 @annotatesubrepoerror
486 486 def _initrepo(self, parentrepo, source, create):
487 487 self._repo._subparent = parentrepo
488 488 self._repo._subsource = source
489 489
490 490 if create:
491 491 lines = ['[paths]\n']
492 492
493 493 def addpathconfig(key, value):
494 494 if value:
495 495 lines.append('%s = %s\n' % (key, value))
496 496 self.ui.setconfig('paths', key, value, 'subrepo')
497 497
498 498 defpath = _abssource(self._repo, abort=False)
499 499 defpushpath = _abssource(self._repo, True, abort=False)
500 500 addpathconfig('default', defpath)
501 501 if defpath != defpushpath:
502 502 addpathconfig('default-push', defpushpath)
503 503
504 504 self._repo.vfs.write('hgrc', util.tonativeeol(''.join(lines)))
505 505
506 506 @annotatesubrepoerror
507 507 def add(self, ui, match, prefix, explicitonly, **opts):
508 508 return cmdutil.add(ui, self._repo, match,
509 509 self.wvfs.reljoin(prefix, self._path),
510 510 explicitonly, **opts)
511 511
512 512 @annotatesubrepoerror
513 513 def addremove(self, m, prefix, opts):
514 514 # In the same way as sub directories are processed, once in a subrepo,
515 515 # always entry any of its subrepos. Don't corrupt the options that will
516 516 # be used to process sibling subrepos however.
517 517 opts = copy.copy(opts)
518 518 opts['subrepos'] = True
519 519 return scmutil.addremove(self._repo, m,
520 520 self.wvfs.reljoin(prefix, self._path), opts)
521 521
522 522 @annotatesubrepoerror
523 523 def cat(self, match, fm, fntemplate, prefix, **opts):
524 524 rev = self._state[1]
525 525 ctx = self._repo[rev]
526 526 return cmdutil.cat(self.ui, self._repo, ctx, match, fm, fntemplate,
527 527 prefix, **opts)
528 528
529 529 @annotatesubrepoerror
530 530 def status(self, rev2, **opts):
531 531 try:
532 532 rev1 = self._state[1]
533 533 ctx1 = self._repo[rev1]
534 534 ctx2 = self._repo[rev2]
535 535 return self._repo.status(ctx1, ctx2, **opts)
536 536 except error.RepoLookupError as inst:
537 537 self.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
538 538 % (inst, subrelpath(self)))
539 539 return scmutil.status([], [], [], [], [], [], [])
540 540
541 541 @annotatesubrepoerror
542 542 def diff(self, ui, diffopts, node2, match, prefix, **opts):
543 543 try:
544 544 node1 = node.bin(self._state[1])
545 545 # We currently expect node2 to come from substate and be
546 546 # in hex format
547 547 if node2 is not None:
548 548 node2 = node.bin(node2)
549 549 logcmdutil.diffordiffstat(ui, self._repo, diffopts,
550 550 node1, node2, match,
551 551 prefix=posixpath.join(prefix, self._path),
552 552 listsubrepos=True, **opts)
553 553 except error.RepoLookupError as inst:
554 554 self.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
555 555 % (inst, subrelpath(self)))
556 556
557 557 @annotatesubrepoerror
558 558 def archive(self, archiver, prefix, match=None, decode=True):
559 559 self._get(self._state + ('hg',))
560 560 files = self.files()
561 561 if match:
562 562 files = [f for f in files if match(f)]
563 563 rev = self._state[1]
564 564 ctx = self._repo[rev]
565 565 scmutil.fileprefetchhooks(self._repo, ctx, files)
566 566 total = abstractsubrepo.archive(self, archiver, prefix, match)
567 567 for subpath in ctx.substate:
568 568 s = subrepo(ctx, subpath, True)
569 569 submatch = matchmod.subdirmatcher(subpath, match)
570 570 total += s.archive(archiver, prefix + self._path + '/', submatch,
571 571 decode)
572 572 return total
573 573
574 574 @annotatesubrepoerror
575 575 def dirty(self, ignoreupdate=False, missing=False):
576 576 r = self._state[1]
577 577 if r == '' and not ignoreupdate: # no state recorded
578 578 return True
579 579 w = self._repo[None]
580 580 if r != w.p1().hex() and not ignoreupdate:
581 581 # different version checked out
582 582 return True
583 583 return w.dirty(missing=missing) # working directory changed
584 584
585 585 def basestate(self):
586 586 return self._repo['.'].hex()
587 587
588 588 def checknested(self, path):
589 589 return self._repo._checknested(self._repo.wjoin(path))
590 590
591 591 @annotatesubrepoerror
592 592 def commit(self, text, user, date):
593 593 # don't bother committing in the subrepo if it's only been
594 594 # updated
595 595 if not self.dirty(True):
596 596 return self._repo['.'].hex()
597 597 self.ui.debug("committing subrepo %s\n" % subrelpath(self))
598 598 n = self._repo.commit(text, user, date)
599 599 if not n:
600 600 return self._repo['.'].hex() # different version checked out
601 601 return node.hex(n)
602 602
603 603 @annotatesubrepoerror
604 604 def phase(self, state):
605 605 return self._repo[state or '.'].phase()
606 606
607 607 @annotatesubrepoerror
608 608 def remove(self):
609 609 # we can't fully delete the repository as it may contain
610 610 # local-only history
611 611 self.ui.note(_('removing subrepo %s\n') % subrelpath(self))
612 612 hg.clean(self._repo, node.nullid, False)
613 613
614 614 def _get(self, state):
615 615 source, revision, kind = state
616 616 parentrepo = self._repo._subparent
617 617
618 618 if revision in self._repo.unfiltered():
619 619 # Allow shared subrepos tracked at null to setup the sharedpath
620 620 if len(self._repo) != 0 or not parentrepo.shared():
621 621 return True
622 622 self._repo._subsource = source
623 623 srcurl = _abssource(self._repo)
624 624 other = hg.peer(self._repo, {}, srcurl)
625 625 if len(self._repo) == 0:
626 626 # use self._repo.vfs instead of self.wvfs to remove .hg only
627 627 self._repo.vfs.rmtree()
628 628
629 629 # A remote subrepo could be shared if there is a local copy
630 630 # relative to the parent's share source. But clone pooling doesn't
631 631 # assemble the repos in a tree, so that can't be consistently done.
632 632 # A simpler option is for the user to configure clone pooling, and
633 633 # work with that.
634 634 if parentrepo.shared() and hg.islocal(srcurl):
635 635 self.ui.status(_('sharing subrepo %s from %s\n')
636 636 % (subrelpath(self), srcurl))
637 637 shared = hg.share(self._repo._subparent.baseui,
638 638 other, self._repo.root,
639 639 update=False, bookmarks=False)
640 640 self._repo = shared.local()
641 641 else:
642 642 # TODO: find a common place for this and this code in the
643 643 # share.py wrap of the clone command.
644 644 if parentrepo.shared():
645 645 pool = self.ui.config('share', 'pool')
646 646 if pool:
647 647 pool = util.expandpath(pool)
648 648
649 649 shareopts = {
650 650 'pool': pool,
651 651 'mode': self.ui.config('share', 'poolnaming'),
652 652 }
653 653 else:
654 654 shareopts = {}
655 655
656 656 self.ui.status(_('cloning subrepo %s from %s\n')
657 657 % (subrelpath(self), srcurl))
658 658 other, cloned = hg.clone(self._repo._subparent.baseui, {},
659 659 other, self._repo.root,
660 660 update=False, shareopts=shareopts)
661 661 self._repo = cloned.local()
662 662 self._initrepo(parentrepo, source, create=True)
663 663 self._cachestorehash(srcurl)
664 664 else:
665 665 self.ui.status(_('pulling subrepo %s from %s\n')
666 666 % (subrelpath(self), srcurl))
667 667 cleansub = self.storeclean(srcurl)
668 668 exchange.pull(self._repo, other)
669 669 if cleansub:
670 670 # keep the repo clean after pull
671 671 self._cachestorehash(srcurl)
672 672 return False
673 673
674 674 @annotatesubrepoerror
675 675 def get(self, state, overwrite=False):
676 676 inrepo = self._get(state)
677 677 source, revision, kind = state
678 678 repo = self._repo
679 679 repo.ui.debug("getting subrepo %s\n" % self._path)
680 680 if inrepo:
681 681 urepo = repo.unfiltered()
682 682 ctx = urepo[revision]
683 683 if ctx.hidden():
684 684 urepo.ui.warn(
685 685 _('revision %s in subrepository "%s" is hidden\n') \
686 686 % (revision[0:12], self._path))
687 687 repo = urepo
688 688 hg.updaterepo(repo, revision, overwrite)
689 689
690 690 @annotatesubrepoerror
691 691 def merge(self, state):
692 692 self._get(state)
693 693 cur = self._repo['.']
694 694 dst = self._repo[state[1]]
695 695 anc = dst.ancestor(cur)
696 696
697 697 def mergefunc():
698 698 if anc == cur and dst.branch() == cur.branch():
699 699 self.ui.debug('updating subrepository "%s"\n'
700 700 % subrelpath(self))
701 701 hg.update(self._repo, state[1])
702 702 elif anc == dst:
703 703 self.ui.debug('skipping subrepository "%s"\n'
704 704 % subrelpath(self))
705 705 else:
706 706 self.ui.debug('merging subrepository "%s"\n' % subrelpath(self))
707 707 hg.merge(self._repo, state[1], remind=False)
708 708
709 709 wctx = self._repo[None]
710 710 if self.dirty():
711 711 if anc != dst:
712 712 if _updateprompt(self.ui, self, wctx.dirty(), cur, dst):
713 713 mergefunc()
714 714 else:
715 715 mergefunc()
716 716 else:
717 717 mergefunc()
718 718
719 719 @annotatesubrepoerror
720 720 def push(self, opts):
721 721 force = opts.get('force')
722 722 newbranch = opts.get('new_branch')
723 723 ssh = opts.get('ssh')
724 724
725 725 # push subrepos depth-first for coherent ordering
726 726 c = self._repo['.']
727 727 subs = c.substate # only repos that are committed
728 728 for s in sorted(subs):
729 729 if c.sub(s).push(opts) == 0:
730 730 return False
731 731
732 732 dsturl = _abssource(self._repo, True)
733 733 if not force:
734 734 if self.storeclean(dsturl):
735 735 self.ui.status(
736 736 _('no changes made to subrepo %s since last push to %s\n')
737 737 % (subrelpath(self), dsturl))
738 738 return None
739 739 self.ui.status(_('pushing subrepo %s to %s\n') %
740 740 (subrelpath(self), dsturl))
741 741 other = hg.peer(self._repo, {'ssh': ssh}, dsturl)
742 742 res = exchange.push(self._repo, other, force, newbranch=newbranch)
743 743
744 744 # the repo is now clean
745 745 self._cachestorehash(dsturl)
746 746 return res.cgresult
747 747
748 748 @annotatesubrepoerror
749 749 def outgoing(self, ui, dest, opts):
750 750 if 'rev' in opts or 'branch' in opts:
751 751 opts = copy.copy(opts)
752 752 opts.pop('rev', None)
753 753 opts.pop('branch', None)
754 754 return hg.outgoing(ui, self._repo, _abssource(self._repo, True), opts)
755 755
756 756 @annotatesubrepoerror
757 757 def incoming(self, ui, source, opts):
758 758 if 'rev' in opts or 'branch' in opts:
759 759 opts = copy.copy(opts)
760 760 opts.pop('rev', None)
761 761 opts.pop('branch', None)
762 762 return hg.incoming(ui, self._repo, _abssource(self._repo, False), opts)
763 763
764 764 @annotatesubrepoerror
765 765 def files(self):
766 766 rev = self._state[1]
767 767 ctx = self._repo[rev]
768 768 return ctx.manifest().keys()
769 769
770 770 def filedata(self, name, decode):
771 771 rev = self._state[1]
772 772 data = self._repo[rev][name].data()
773 773 if decode:
774 774 data = self._repo.wwritedata(name, data)
775 775 return data
776 776
777 777 def fileflags(self, name):
778 778 rev = self._state[1]
779 779 ctx = self._repo[rev]
780 780 return ctx.flags(name)
781 781
782 782 @annotatesubrepoerror
783 783 def printfiles(self, ui, m, fm, fmt, subrepos):
784 784 # If the parent context is a workingctx, use the workingctx here for
785 785 # consistency.
786 786 if self._ctx.rev() is None:
787 787 ctx = self._repo[None]
788 788 else:
789 789 rev = self._state[1]
790 790 ctx = self._repo[rev]
791 791 return cmdutil.files(ui, ctx, m, fm, fmt, subrepos)
792 792
793 793 @annotatesubrepoerror
794 794 def getfileset(self, expr):
795 795 if self._ctx.rev() is None:
796 796 ctx = self._repo[None]
797 797 else:
798 798 rev = self._state[1]
799 799 ctx = self._repo[rev]
800 800
801 801 files = ctx.getfileset(expr)
802 802
803 803 for subpath in ctx.substate:
804 804 sub = ctx.sub(subpath)
805 805
806 806 try:
807 807 files.extend(subpath + '/' + f for f in sub.getfileset(expr))
808 808 except error.LookupError:
809 809 self.ui.status(_("skipping missing subrepository: %s\n")
810 810 % self.wvfs.reljoin(reporelpath(self), subpath))
811 811 return files
812 812
813 813 def walk(self, match):
814 814 ctx = self._repo[None]
815 815 return ctx.walk(match)
816 816
817 817 @annotatesubrepoerror
818 818 def forget(self, match, prefix, dryrun):
819 819 return cmdutil.forget(self.ui, self._repo, match,
820 820 self.wvfs.reljoin(prefix, self._path),
821 821 True, dryrun=dryrun)
822 822
823 823 @annotatesubrepoerror
824 824 def removefiles(self, matcher, prefix, after, force, subrepos,
825 825 dryrun, warnings):
826 826 return cmdutil.remove(self.ui, self._repo, matcher,
827 827 self.wvfs.reljoin(prefix, self._path),
828 828 after, force, subrepos, dryrun)
829 829
830 830 @annotatesubrepoerror
831 831 def revert(self, substate, *pats, **opts):
832 832 # reverting a subrepo is a 2 step process:
833 833 # 1. if the no_backup is not set, revert all modified
834 834 # files inside the subrepo
835 835 # 2. update the subrepo to the revision specified in
836 836 # the corresponding substate dictionary
837 837 self.ui.status(_('reverting subrepo %s\n') % substate[0])
838 838 if not opts.get(r'no_backup'):
839 839 # Revert all files on the subrepo, creating backups
840 840 # Note that this will not recursively revert subrepos
841 841 # We could do it if there was a set:subrepos() predicate
842 842 opts = opts.copy()
843 843 opts[r'date'] = None
844 844 opts[r'rev'] = substate[1]
845 845
846 846 self.filerevert(*pats, **opts)
847 847
848 848 # Update the repo to the revision specified in the given substate
849 849 if not opts.get(r'dry_run'):
850 850 self.get(substate, overwrite=True)
851 851
852 852 def filerevert(self, *pats, **opts):
853 853 ctx = self._repo[opts[r'rev']]
854 854 parents = self._repo.dirstate.parents()
855 855 if opts.get(r'all'):
856 856 pats = ['set:modified()']
857 857 else:
858 858 pats = []
859 859 cmdutil.revert(self.ui, self._repo, ctx, parents, *pats, **opts)
860 860
861 861 def shortid(self, revid):
862 862 return revid[:12]
863 863
864 864 @annotatesubrepoerror
865 865 def unshare(self):
866 866 # subrepo inherently violates our import layering rules
867 867 # because it wants to make repo objects from deep inside the stack
868 868 # so we manually delay the circular imports to not break
869 869 # scripts that don't use our demand-loading
870 870 global hg
871 871 from . import hg as h
872 872 hg = h
873 873
874 874 # Nothing prevents a user from sharing in a repo, and then making that a
875 875 # subrepo. Alternately, the previous unshare attempt may have failed
876 876 # part way through. So recurse whether or not this layer is shared.
877 877 if self._repo.shared():
878 878 self.ui.status(_("unsharing subrepo '%s'\n") % self._relpath)
879 879
880 880 hg.unshare(self.ui, self._repo)
881 881
882 882 def verify(self):
883 883 try:
884 884 rev = self._state[1]
885 885 ctx = self._repo.unfiltered()[rev]
886 886 if ctx.hidden():
887 887 # Since hidden revisions aren't pushed/pulled, it seems worth an
888 888 # explicit warning.
889 889 ui = self._repo.ui
890 890 ui.warn(_("subrepo '%s' is hidden in revision %s\n") %
891 891 (self._relpath, node.short(self._ctx.node())))
892 892 return 0
893 893 except error.RepoLookupError:
894 894 # A missing subrepo revision may be a case of needing to pull it, so
895 895 # don't treat this as an error.
896 896 self._repo.ui.warn(_("subrepo '%s' not found in revision %s\n") %
897 897 (self._relpath, node.short(self._ctx.node())))
898 898 return 0
899 899
900 900 @propertycache
901 901 def wvfs(self):
902 902 """return own wvfs for efficiency and consistency
903 903 """
904 904 return self._repo.wvfs
905 905
906 906 @propertycache
907 907 def _relpath(self):
908 908 """return path to this subrepository as seen from outermost repository
909 909 """
910 910 # Keep consistent dir separators by avoiding vfs.join(self._path)
911 911 return reporelpath(self._repo)
912 912
913 913 class svnsubrepo(abstractsubrepo):
914 914 def __init__(self, ctx, path, state, allowcreate):
915 915 super(svnsubrepo, self).__init__(ctx, path)
916 916 self._state = state
917 917 self._exe = procutil.findexe('svn')
918 918 if not self._exe:
919 919 raise error.Abort(_("'svn' executable not found for subrepo '%s'")
920 920 % self._path)
921 921
922 922 def _svncommand(self, commands, filename='', failok=False):
923 923 cmd = [self._exe]
924 924 extrakw = {}
925 925 if not self.ui.interactive():
926 926 # Making stdin be a pipe should prevent svn from behaving
927 927 # interactively even if we can't pass --non-interactive.
928 928 extrakw[r'stdin'] = subprocess.PIPE
929 929 # Starting in svn 1.5 --non-interactive is a global flag
930 930 # instead of being per-command, but we need to support 1.4 so
931 931 # we have to be intelligent about what commands take
932 932 # --non-interactive.
933 933 if commands[0] in ('update', 'checkout', 'commit'):
934 934 cmd.append('--non-interactive')
935 935 cmd.extend(commands)
936 936 if filename is not None:
937 937 path = self.wvfs.reljoin(self._ctx.repo().origroot,
938 938 self._path, filename)
939 939 cmd.append(path)
940 940 env = dict(encoding.environ)
941 941 # Avoid localized output, preserve current locale for everything else.
942 942 lc_all = env.get('LC_ALL')
943 943 if lc_all:
944 944 env['LANG'] = lc_all
945 945 del env['LC_ALL']
946 946 env['LC_MESSAGES'] = 'C'
947 947 p = subprocess.Popen(cmd, bufsize=-1, close_fds=procutil.closefds,
948 948 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
949 949 universal_newlines=True, env=env, **extrakw)
950 950 stdout, stderr = p.communicate()
951 951 stderr = stderr.strip()
952 952 if not failok:
953 953 if p.returncode:
954 954 raise error.Abort(stderr or 'exited with code %d'
955 955 % p.returncode)
956 956 if stderr:
957 957 self.ui.warn(stderr + '\n')
958 958 return stdout, stderr
959 959
960 960 @propertycache
961 961 def _svnversion(self):
962 962 output, err = self._svncommand(['--version', '--quiet'], filename=None)
963 963 m = re.search(br'^(\d+)\.(\d+)', output)
964 964 if not m:
965 965 raise error.Abort(_('cannot retrieve svn tool version'))
966 966 return (int(m.group(1)), int(m.group(2)))
967 967
968 968 def _svnmissing(self):
969 969 return not self.wvfs.exists('.svn')
970 970
971 971 def _wcrevs(self):
972 972 # Get the working directory revision as well as the last
973 973 # commit revision so we can compare the subrepo state with
974 974 # both. We used to store the working directory one.
975 975 output, err = self._svncommand(['info', '--xml'])
976 976 doc = xml.dom.minidom.parseString(output)
977 977 entries = doc.getElementsByTagName('entry')
978 978 lastrev, rev = '0', '0'
979 979 if entries:
980 980 rev = str(entries[0].getAttribute('revision')) or '0'
981 981 commits = entries[0].getElementsByTagName('commit')
982 982 if commits:
983 983 lastrev = str(commits[0].getAttribute('revision')) or '0'
984 984 return (lastrev, rev)
985 985
986 986 def _wcrev(self):
987 987 return self._wcrevs()[0]
988 988
989 989 def _wcchanged(self):
990 990 """Return (changes, extchanges, missing) where changes is True
991 991 if the working directory was changed, extchanges is
992 992 True if any of these changes concern an external entry and missing
993 993 is True if any change is a missing entry.
994 994 """
995 995 output, err = self._svncommand(['status', '--xml'])
996 996 externals, changes, missing = [], [], []
997 997 doc = xml.dom.minidom.parseString(output)
998 998 for e in doc.getElementsByTagName('entry'):
999 999 s = e.getElementsByTagName('wc-status')
1000 1000 if not s:
1001 1001 continue
1002 1002 item = s[0].getAttribute('item')
1003 1003 props = s[0].getAttribute('props')
1004 1004 path = e.getAttribute('path')
1005 1005 if item == 'external':
1006 1006 externals.append(path)
1007 1007 elif item == 'missing':
1008 1008 missing.append(path)
1009 1009 if (item not in ('', 'normal', 'unversioned', 'external')
1010 1010 or props not in ('', 'none', 'normal')):
1011 1011 changes.append(path)
1012 1012 for path in changes:
1013 1013 for ext in externals:
1014 1014 if path == ext or path.startswith(ext + pycompat.ossep):
1015 1015 return True, True, bool(missing)
1016 1016 return bool(changes), False, bool(missing)
1017 1017
1018 1018 @annotatesubrepoerror
1019 1019 def dirty(self, ignoreupdate=False, missing=False):
1020 1020 if self._svnmissing():
1021 1021 return self._state[1] != ''
1022 1022 wcchanged = self._wcchanged()
1023 1023 changed = wcchanged[0] or (missing and wcchanged[2])
1024 1024 if not changed:
1025 1025 if self._state[1] in self._wcrevs() or ignoreupdate:
1026 1026 return False
1027 1027 return True
1028 1028
1029 1029 def basestate(self):
1030 1030 lastrev, rev = self._wcrevs()
1031 1031 if lastrev != rev:
1032 1032 # Last committed rev is not the same than rev. We would
1033 1033 # like to take lastrev but we do not know if the subrepo
1034 1034 # URL exists at lastrev. Test it and fallback to rev it
1035 1035 # is not there.
1036 1036 try:
1037 1037 self._svncommand(['list', '%s@%s' % (self._state[0], lastrev)])
1038 1038 return lastrev
1039 1039 except error.Abort:
1040 1040 pass
1041 1041 return rev
1042 1042
1043 1043 @annotatesubrepoerror
1044 1044 def commit(self, text, user, date):
1045 1045 # user and date are out of our hands since svn is centralized
1046 1046 changed, extchanged, missing = self._wcchanged()
1047 1047 if not changed:
1048 1048 return self.basestate()
1049 1049 if extchanged:
1050 1050 # Do not try to commit externals
1051 1051 raise error.Abort(_('cannot commit svn externals'))
1052 1052 if missing:
1053 1053 # svn can commit with missing entries but aborting like hg
1054 1054 # seems a better approach.
1055 1055 raise error.Abort(_('cannot commit missing svn entries'))
1056 1056 commitinfo, err = self._svncommand(['commit', '-m', text])
1057 1057 self.ui.status(commitinfo)
1058 1058 newrev = re.search('Committed revision ([0-9]+).', commitinfo)
1059 1059 if not newrev:
1060 1060 if not commitinfo.strip():
1061 1061 # Sometimes, our definition of "changed" differs from
1062 1062 # svn one. For instance, svn ignores missing files
1063 1063 # when committing. If there are only missing files, no
1064 1064 # commit is made, no output and no error code.
1065 1065 raise error.Abort(_('failed to commit svn changes'))
1066 1066 raise error.Abort(commitinfo.splitlines()[-1])
1067 1067 newrev = newrev.groups()[0]
1068 1068 self.ui.status(self._svncommand(['update', '-r', newrev])[0])
1069 1069 return newrev
1070 1070
1071 1071 @annotatesubrepoerror
1072 1072 def remove(self):
1073 1073 if self.dirty():
1074 1074 self.ui.warn(_('not removing repo %s because '
1075 1075 'it has changes.\n') % self._path)
1076 1076 return
1077 1077 self.ui.note(_('removing subrepo %s\n') % self._path)
1078 1078
1079 1079 self.wvfs.rmtree(forcibly=True)
1080 1080 try:
1081 1081 pwvfs = self._ctx.repo().wvfs
1082 1082 pwvfs.removedirs(pwvfs.dirname(self._path))
1083 1083 except OSError:
1084 1084 pass
1085 1085
1086 1086 @annotatesubrepoerror
1087 1087 def get(self, state, overwrite=False):
1088 1088 if overwrite:
1089 1089 self._svncommand(['revert', '--recursive'])
1090 1090 args = ['checkout']
1091 1091 if self._svnversion >= (1, 5):
1092 1092 args.append('--force')
1093 1093 # The revision must be specified at the end of the URL to properly
1094 1094 # update to a directory which has since been deleted and recreated.
1095 1095 args.append('%s@%s' % (state[0], state[1]))
1096 1096
1097 1097 # SEC: check that the ssh url is safe
1098 1098 util.checksafessh(state[0])
1099 1099
1100 1100 status, err = self._svncommand(args, failok=True)
1101 1101 _sanitize(self.ui, self.wvfs, '.svn')
1102 1102 if not re.search('Checked out revision [0-9]+.', status):
1103 1103 if ('is already a working copy for a different URL' in err
1104 1104 and (self._wcchanged()[:2] == (False, False))):
1105 1105 # obstructed but clean working copy, so just blow it away.
1106 1106 self.remove()
1107 1107 self.get(state, overwrite=False)
1108 1108 return
1109 1109 raise error.Abort((status or err).splitlines()[-1])
1110 1110 self.ui.status(status)
1111 1111
1112 1112 @annotatesubrepoerror
1113 1113 def merge(self, state):
1114 1114 old = self._state[1]
1115 1115 new = state[1]
1116 1116 wcrev = self._wcrev()
1117 1117 if new != wcrev:
1118 1118 dirty = old == wcrev or self._wcchanged()[0]
1119 1119 if _updateprompt(self.ui, self, dirty, wcrev, new):
1120 1120 self.get(state, False)
1121 1121
1122 1122 def push(self, opts):
1123 1123 # push is a no-op for SVN
1124 1124 return True
1125 1125
1126 1126 @annotatesubrepoerror
1127 1127 def files(self):
1128 1128 output = self._svncommand(['list', '--recursive', '--xml'])[0]
1129 1129 doc = xml.dom.minidom.parseString(output)
1130 1130 paths = []
1131 1131 for e in doc.getElementsByTagName('entry'):
1132 1132 kind = pycompat.bytestr(e.getAttribute('kind'))
1133 1133 if kind != 'file':
1134 1134 continue
1135 1135 name = ''.join(c.data for c
1136 1136 in e.getElementsByTagName('name')[0].childNodes
1137 1137 if c.nodeType == c.TEXT_NODE)
1138 1138 paths.append(name.encode('utf-8'))
1139 1139 return paths
1140 1140
1141 1141 def filedata(self, name, decode):
1142 1142 return self._svncommand(['cat'], name)[0]
1143 1143
1144 1144
1145 1145 class gitsubrepo(abstractsubrepo):
1146 1146 def __init__(self, ctx, path, state, allowcreate):
1147 1147 super(gitsubrepo, self).__init__(ctx, path)
1148 1148 self._state = state
1149 1149 self._abspath = ctx.repo().wjoin(path)
1150 1150 self._subparent = ctx.repo()
1151 1151 self._ensuregit()
1152 1152
1153 1153 def _ensuregit(self):
1154 1154 try:
1155 1155 self._gitexecutable = 'git'
1156 1156 out, err = self._gitnodir(['--version'])
1157 1157 except OSError as e:
1158 1158 genericerror = _("error executing git for subrepo '%s': %s")
1159 1159 notfoundhint = _("check git is installed and in your PATH")
1160 1160 if e.errno != errno.ENOENT:
1161 1161 raise error.Abort(genericerror % (
1162 1162 self._path, encoding.strtolocal(e.strerror)))
1163 1163 elif pycompat.iswindows:
1164 1164 try:
1165 1165 self._gitexecutable = 'git.cmd'
1166 1166 out, err = self._gitnodir(['--version'])
1167 1167 except OSError as e2:
1168 1168 if e2.errno == errno.ENOENT:
1169 1169 raise error.Abort(_("couldn't find 'git' or 'git.cmd'"
1170 1170 " for subrepo '%s'") % self._path,
1171 1171 hint=notfoundhint)
1172 1172 else:
1173 1173 raise error.Abort(genericerror % (self._path,
1174 1174 encoding.strtolocal(e2.strerror)))
1175 1175 else:
1176 1176 raise error.Abort(_("couldn't find git for subrepo '%s'")
1177 1177 % self._path, hint=notfoundhint)
1178 1178 versionstatus = self._checkversion(out)
1179 1179 if versionstatus == 'unknown':
1180 1180 self.ui.warn(_('cannot retrieve git version\n'))
1181 1181 elif versionstatus == 'abort':
1182 1182 raise error.Abort(_('git subrepo requires at least 1.6.0 or later'))
1183 1183 elif versionstatus == 'warning':
1184 1184 self.ui.warn(_('git subrepo requires at least 1.6.0 or later\n'))
1185 1185
1186 1186 @staticmethod
1187 1187 def _gitversion(out):
1188 1188 m = re.search(br'^git version (\d+)\.(\d+)\.(\d+)', out)
1189 1189 if m:
1190 1190 return (int(m.group(1)), int(m.group(2)), int(m.group(3)))
1191 1191
1192 1192 m = re.search(br'^git version (\d+)\.(\d+)', out)
1193 1193 if m:
1194 1194 return (int(m.group(1)), int(m.group(2)), 0)
1195 1195
1196 1196 return -1
1197 1197
1198 1198 @staticmethod
1199 1199 def _checkversion(out):
1200 1200 '''ensure git version is new enough
1201 1201
1202 1202 >>> _checkversion = gitsubrepo._checkversion
1203 1203 >>> _checkversion(b'git version 1.6.0')
1204 1204 'ok'
1205 1205 >>> _checkversion(b'git version 1.8.5')
1206 1206 'ok'
1207 1207 >>> _checkversion(b'git version 1.4.0')
1208 1208 'abort'
1209 1209 >>> _checkversion(b'git version 1.5.0')
1210 1210 'warning'
1211 1211 >>> _checkversion(b'git version 1.9-rc0')
1212 1212 'ok'
1213 1213 >>> _checkversion(b'git version 1.9.0.265.g81cdec2')
1214 1214 'ok'
1215 1215 >>> _checkversion(b'git version 1.9.0.GIT')
1216 1216 'ok'
1217 1217 >>> _checkversion(b'git version 12345')
1218 1218 'unknown'
1219 1219 >>> _checkversion(b'no')
1220 1220 'unknown'
1221 1221 '''
1222 1222 version = gitsubrepo._gitversion(out)
1223 1223 # git 1.4.0 can't work at all, but 1.5.X can in at least some cases,
1224 1224 # despite the docstring comment. For now, error on 1.4.0, warn on
1225 1225 # 1.5.0 but attempt to continue.
1226 1226 if version == -1:
1227 1227 return 'unknown'
1228 1228 if version < (1, 5, 0):
1229 1229 return 'abort'
1230 1230 elif version < (1, 6, 0):
1231 1231 return 'warning'
1232 1232 return 'ok'
1233 1233
1234 1234 def _gitcommand(self, commands, env=None, stream=False):
1235 1235 return self._gitdir(commands, env=env, stream=stream)[0]
1236 1236
1237 1237 def _gitdir(self, commands, env=None, stream=False):
1238 1238 return self._gitnodir(commands, env=env, stream=stream,
1239 1239 cwd=self._abspath)
1240 1240
1241 1241 def _gitnodir(self, commands, env=None, stream=False, cwd=None):
1242 1242 """Calls the git command
1243 1243
1244 1244 The methods tries to call the git command. versions prior to 1.6.0
1245 1245 are not supported and very probably fail.
1246 1246 """
1247 1247 self.ui.debug('%s: git %s\n' % (self._relpath, ' '.join(commands)))
1248 1248 if env is None:
1249 1249 env = encoding.environ.copy()
1250 1250 # disable localization for Git output (issue5176)
1251 1251 env['LC_ALL'] = 'C'
1252 1252 # fix for Git CVE-2015-7545
1253 1253 if 'GIT_ALLOW_PROTOCOL' not in env:
1254 1254 env['GIT_ALLOW_PROTOCOL'] = 'file:git:http:https:ssh'
1255 1255 # unless ui.quiet is set, print git's stderr,
1256 1256 # which is mostly progress and useful info
1257 1257 errpipe = None
1258 1258 if self.ui.quiet:
1259 1259 errpipe = open(os.devnull, 'w')
1260 1260 if self.ui._colormode and len(commands) and commands[0] == "diff":
1261 1261 # insert the argument in the front,
1262 1262 # the end of git diff arguments is used for paths
1263 1263 commands.insert(1, '--color')
1264 1264 p = subprocess.Popen([self._gitexecutable] + commands, bufsize=-1,
1265 1265 cwd=cwd, env=env, close_fds=procutil.closefds,
1266 1266 stdout=subprocess.PIPE, stderr=errpipe)
1267 1267 if stream:
1268 1268 return p.stdout, None
1269 1269
1270 1270 retdata = p.stdout.read().strip()
1271 1271 # wait for the child to exit to avoid race condition.
1272 1272 p.wait()
1273 1273
1274 1274 if p.returncode != 0 and p.returncode != 1:
1275 1275 # there are certain error codes that are ok
1276 1276 command = commands[0]
1277 1277 if command in ('cat-file', 'symbolic-ref'):
1278 1278 return retdata, p.returncode
1279 1279 # for all others, abort
1280 1280 raise error.Abort(_('git %s error %d in %s') %
1281 1281 (command, p.returncode, self._relpath))
1282 1282
1283 1283 return retdata, p.returncode
1284 1284
1285 1285 def _gitmissing(self):
1286 1286 return not self.wvfs.exists('.git')
1287 1287
1288 1288 def _gitstate(self):
1289 1289 return self._gitcommand(['rev-parse', 'HEAD'])
1290 1290
1291 1291 def _gitcurrentbranch(self):
1292 1292 current, err = self._gitdir(['symbolic-ref', 'HEAD', '--quiet'])
1293 1293 if err:
1294 1294 current = None
1295 1295 return current
1296 1296
1297 1297 def _gitremote(self, remote):
1298 1298 out = self._gitcommand(['remote', 'show', '-n', remote])
1299 1299 line = out.split('\n')[1]
1300 1300 i = line.index('URL: ') + len('URL: ')
1301 1301 return line[i:]
1302 1302
1303 1303 def _githavelocally(self, revision):
1304 1304 out, code = self._gitdir(['cat-file', '-e', revision])
1305 1305 return code == 0
1306 1306
1307 1307 def _gitisancestor(self, r1, r2):
1308 1308 base = self._gitcommand(['merge-base', r1, r2])
1309 1309 return base == r1
1310 1310
1311 1311 def _gitisbare(self):
1312 1312 return self._gitcommand(['config', '--bool', 'core.bare']) == 'true'
1313 1313
1314 1314 def _gitupdatestat(self):
1315 1315 """This must be run before git diff-index.
1316 1316 diff-index only looks at changes to file stat;
1317 1317 this command looks at file contents and updates the stat."""
1318 1318 self._gitcommand(['update-index', '-q', '--refresh'])
1319 1319
1320 1320 def _gitbranchmap(self):
1321 1321 '''returns 2 things:
1322 1322 a map from git branch to revision
1323 1323 a map from revision to branches'''
1324 1324 branch2rev = {}
1325 1325 rev2branch = {}
1326 1326
1327 1327 out = self._gitcommand(['for-each-ref', '--format',
1328 1328 '%(objectname) %(refname)'])
1329 1329 for line in out.split('\n'):
1330 1330 revision, ref = line.split(' ')
1331 1331 if (not ref.startswith('refs/heads/') and
1332 1332 not ref.startswith('refs/remotes/')):
1333 1333 continue
1334 1334 if ref.startswith('refs/remotes/') and ref.endswith('/HEAD'):
1335 1335 continue # ignore remote/HEAD redirects
1336 1336 branch2rev[ref] = revision
1337 1337 rev2branch.setdefault(revision, []).append(ref)
1338 1338 return branch2rev, rev2branch
1339 1339
1340 1340 def _gittracking(self, branches):
1341 1341 'return map of remote branch to local tracking branch'
1342 1342 # assumes no more than one local tracking branch for each remote
1343 1343 tracking = {}
1344 1344 for b in branches:
1345 1345 if b.startswith('refs/remotes/'):
1346 1346 continue
1347 1347 bname = b.split('/', 2)[2]
1348 1348 remote = self._gitcommand(['config', 'branch.%s.remote' % bname])
1349 1349 if remote:
1350 1350 ref = self._gitcommand(['config', 'branch.%s.merge' % bname])
1351 1351 tracking['refs/remotes/%s/%s' %
1352 1352 (remote, ref.split('/', 2)[2])] = b
1353 1353 return tracking
1354 1354
1355 1355 def _abssource(self, source):
1356 1356 if '://' not in source:
1357 1357 # recognize the scp syntax as an absolute source
1358 1358 colon = source.find(':')
1359 1359 if colon != -1 and '/' not in source[:colon]:
1360 1360 return source
1361 1361 self._subsource = source
1362 1362 return _abssource(self)
1363 1363
1364 1364 def _fetch(self, source, revision):
1365 1365 if self._gitmissing():
1366 1366 # SEC: check for safe ssh url
1367 1367 util.checksafessh(source)
1368 1368
1369 1369 source = self._abssource(source)
1370 1370 self.ui.status(_('cloning subrepo %s from %s\n') %
1371 1371 (self._relpath, source))
1372 1372 self._gitnodir(['clone', source, self._abspath])
1373 1373 if self._githavelocally(revision):
1374 1374 return
1375 1375 self.ui.status(_('pulling subrepo %s from %s\n') %
1376 1376 (self._relpath, self._gitremote('origin')))
1377 1377 # try only origin: the originally cloned repo
1378 1378 self._gitcommand(['fetch'])
1379 1379 if not self._githavelocally(revision):
1380 1380 raise error.Abort(_('revision %s does not exist in subrepository '
1381 1381 '"%s"\n') % (revision, self._relpath))
1382 1382
1383 1383 @annotatesubrepoerror
1384 1384 def dirty(self, ignoreupdate=False, missing=False):
1385 1385 if self._gitmissing():
1386 1386 return self._state[1] != ''
1387 1387 if self._gitisbare():
1388 1388 return True
1389 1389 if not ignoreupdate and self._state[1] != self._gitstate():
1390 1390 # different version checked out
1391 1391 return True
1392 1392 # check for staged changes or modified files; ignore untracked files
1393 1393 self._gitupdatestat()
1394 1394 out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
1395 1395 return code == 1
1396 1396
1397 1397 def basestate(self):
1398 1398 return self._gitstate()
1399 1399
1400 1400 @annotatesubrepoerror
1401 1401 def get(self, state, overwrite=False):
1402 1402 source, revision, kind = state
1403 1403 if not revision:
1404 1404 self.remove()
1405 1405 return
1406 1406 self._fetch(source, revision)
1407 1407 # if the repo was set to be bare, unbare it
1408 1408 if self._gitisbare():
1409 1409 self._gitcommand(['config', 'core.bare', 'false'])
1410 1410 if self._gitstate() == revision:
1411 1411 self._gitcommand(['reset', '--hard', 'HEAD'])
1412 1412 return
1413 1413 elif self._gitstate() == revision:
1414 1414 if overwrite:
1415 1415 # first reset the index to unmark new files for commit, because
1416 1416 # reset --hard will otherwise throw away files added for commit,
1417 1417 # not just unmark them.
1418 1418 self._gitcommand(['reset', 'HEAD'])
1419 1419 self._gitcommand(['reset', '--hard', 'HEAD'])
1420 1420 return
1421 1421 branch2rev, rev2branch = self._gitbranchmap()
1422 1422
1423 1423 def checkout(args):
1424 1424 cmd = ['checkout']
1425 1425 if overwrite:
1426 1426 # first reset the index to unmark new files for commit, because
1427 1427 # the -f option will otherwise throw away files added for
1428 1428 # commit, not just unmark them.
1429 1429 self._gitcommand(['reset', 'HEAD'])
1430 1430 cmd.append('-f')
1431 1431 self._gitcommand(cmd + args)
1432 1432 _sanitize(self.ui, self.wvfs, '.git')
1433 1433
1434 1434 def rawcheckout():
1435 1435 # no branch to checkout, check it out with no branch
1436 1436 self.ui.warn(_('checking out detached HEAD in '
1437 1437 'subrepository "%s"\n') % self._relpath)
1438 1438 self.ui.warn(_('check out a git branch if you intend '
1439 1439 'to make changes\n'))
1440 1440 checkout(['-q', revision])
1441 1441
1442 1442 if revision not in rev2branch:
1443 1443 rawcheckout()
1444 1444 return
1445 1445 branches = rev2branch[revision]
1446 1446 firstlocalbranch = None
1447 1447 for b in branches:
1448 1448 if b == 'refs/heads/master':
1449 1449 # master trumps all other branches
1450 1450 checkout(['refs/heads/master'])
1451 1451 return
1452 1452 if not firstlocalbranch and not b.startswith('refs/remotes/'):
1453 1453 firstlocalbranch = b
1454 1454 if firstlocalbranch:
1455 1455 checkout([firstlocalbranch])
1456 1456 return
1457 1457
1458 1458 tracking = self._gittracking(branch2rev.keys())
1459 1459 # choose a remote branch already tracked if possible
1460 1460 remote = branches[0]
1461 1461 if remote not in tracking:
1462 1462 for b in branches:
1463 1463 if b in tracking:
1464 1464 remote = b
1465 1465 break
1466 1466
1467 1467 if remote not in tracking:
1468 1468 # create a new local tracking branch
1469 1469 local = remote.split('/', 3)[3]
1470 1470 checkout(['-b', local, remote])
1471 1471 elif self._gitisancestor(branch2rev[tracking[remote]], remote):
1472 1472 # When updating to a tracked remote branch,
1473 1473 # if the local tracking branch is downstream of it,
1474 1474 # a normal `git pull` would have performed a "fast-forward merge"
1475 1475 # which is equivalent to updating the local branch to the remote.
1476 1476 # Since we are only looking at branching at update, we need to
1477 1477 # detect this situation and perform this action lazily.
1478 1478 if tracking[remote] != self._gitcurrentbranch():
1479 1479 checkout([tracking[remote]])
1480 1480 self._gitcommand(['merge', '--ff', remote])
1481 1481 _sanitize(self.ui, self.wvfs, '.git')
1482 1482 else:
1483 1483 # a real merge would be required, just checkout the revision
1484 1484 rawcheckout()
1485 1485
1486 1486 @annotatesubrepoerror
1487 1487 def commit(self, text, user, date):
1488 1488 if self._gitmissing():
1489 1489 raise error.Abort(_("subrepo %s is missing") % self._relpath)
1490 1490 cmd = ['commit', '-a', '-m', text]
1491 1491 env = encoding.environ.copy()
1492 1492 if user:
1493 1493 cmd += ['--author', user]
1494 1494 if date:
1495 1495 # git's date parser silently ignores when seconds < 1e9
1496 1496 # convert to ISO8601
1497 1497 env['GIT_AUTHOR_DATE'] = dateutil.datestr(date,
1498 1498 '%Y-%m-%dT%H:%M:%S %1%2')
1499 1499 self._gitcommand(cmd, env=env)
1500 1500 # make sure commit works otherwise HEAD might not exist under certain
1501 1501 # circumstances
1502 1502 return self._gitstate()
1503 1503
1504 1504 @annotatesubrepoerror
1505 1505 def merge(self, state):
1506 1506 source, revision, kind = state
1507 1507 self._fetch(source, revision)
1508 1508 base = self._gitcommand(['merge-base', revision, self._state[1]])
1509 1509 self._gitupdatestat()
1510 1510 out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
1511 1511
1512 1512 def mergefunc():
1513 1513 if base == revision:
1514 1514 self.get(state) # fast forward merge
1515 1515 elif base != self._state[1]:
1516 1516 self._gitcommand(['merge', '--no-commit', revision])
1517 1517 _sanitize(self.ui, self.wvfs, '.git')
1518 1518
1519 1519 if self.dirty():
1520 1520 if self._gitstate() != revision:
1521 1521 dirty = self._gitstate() == self._state[1] or code != 0
1522 1522 if _updateprompt(self.ui, self, dirty,
1523 1523 self._state[1][:7], revision[:7]):
1524 1524 mergefunc()
1525 1525 else:
1526 1526 mergefunc()
1527 1527
1528 1528 @annotatesubrepoerror
1529 1529 def push(self, opts):
1530 1530 force = opts.get('force')
1531 1531
1532 1532 if not self._state[1]:
1533 1533 return True
1534 1534 if self._gitmissing():
1535 1535 raise error.Abort(_("subrepo %s is missing") % self._relpath)
1536 1536 # if a branch in origin contains the revision, nothing to do
1537 1537 branch2rev, rev2branch = self._gitbranchmap()
1538 1538 if self._state[1] in rev2branch:
1539 1539 for b in rev2branch[self._state[1]]:
1540 1540 if b.startswith('refs/remotes/origin/'):
1541 1541 return True
1542 1542 for b, revision in branch2rev.iteritems():
1543 1543 if b.startswith('refs/remotes/origin/'):
1544 1544 if self._gitisancestor(self._state[1], revision):
1545 1545 return True
1546 1546 # otherwise, try to push the currently checked out branch
1547 1547 cmd = ['push']
1548 1548 if force:
1549 1549 cmd.append('--force')
1550 1550
1551 1551 current = self._gitcurrentbranch()
1552 1552 if current:
1553 1553 # determine if the current branch is even useful
1554 1554 if not self._gitisancestor(self._state[1], current):
1555 1555 self.ui.warn(_('unrelated git branch checked out '
1556 1556 'in subrepository "%s"\n') % self._relpath)
1557 1557 return False
1558 1558 self.ui.status(_('pushing branch %s of subrepository "%s"\n') %
1559 1559 (current.split('/', 2)[2], self._relpath))
1560 1560 ret = self._gitdir(cmd + ['origin', current])
1561 1561 return ret[1] == 0
1562 1562 else:
1563 1563 self.ui.warn(_('no branch checked out in subrepository "%s"\n'
1564 1564 'cannot push revision %s\n') %
1565 1565 (self._relpath, self._state[1]))
1566 1566 return False
1567 1567
1568 1568 @annotatesubrepoerror
1569 1569 def add(self, ui, match, prefix, explicitonly, **opts):
1570 1570 if self._gitmissing():
1571 1571 return []
1572 1572
1573 1573 (modified, added, removed,
1574 1574 deleted, unknown, ignored, clean) = self.status(None, unknown=True,
1575 1575 clean=True)
1576 1576
1577 1577 tracked = set()
1578 1578 # dirstates 'amn' warn, 'r' is added again
1579 1579 for l in (modified, added, deleted, clean):
1580 1580 tracked.update(l)
1581 1581
1582 1582 # Unknown files not of interest will be rejected by the matcher
1583 1583 files = unknown
1584 1584 files.extend(match.files())
1585 1585
1586 1586 rejected = []
1587 1587
1588 1588 files = [f for f in sorted(set(files)) if match(f)]
1589 1589 for f in files:
1590 1590 exact = match.exact(f)
1591 1591 command = ["add"]
1592 1592 if exact:
1593 1593 command.append("-f") #should be added, even if ignored
1594 1594 if ui.verbose or not exact:
1595 1595 ui.status(_('adding %s\n') % match.rel(f))
1596 1596
1597 1597 if f in tracked: # hg prints 'adding' even if already tracked
1598 1598 if exact:
1599 1599 rejected.append(f)
1600 1600 continue
1601 1601 if not opts.get(r'dry_run'):
1602 1602 self._gitcommand(command + [f])
1603 1603
1604 1604 for f in rejected:
1605 1605 ui.warn(_("%s already tracked!\n") % match.abs(f))
1606 1606
1607 1607 return rejected
1608 1608
1609 1609 @annotatesubrepoerror
1610 1610 def remove(self):
1611 1611 if self._gitmissing():
1612 1612 return
1613 1613 if self.dirty():
1614 1614 self.ui.warn(_('not removing repo %s because '
1615 1615 'it has changes.\n') % self._relpath)
1616 1616 return
1617 1617 # we can't fully delete the repository as it may contain
1618 1618 # local-only history
1619 1619 self.ui.note(_('removing subrepo %s\n') % self._relpath)
1620 1620 self._gitcommand(['config', 'core.bare', 'true'])
1621 1621 for f, kind in self.wvfs.readdir():
1622 1622 if f == '.git':
1623 1623 continue
1624 1624 if kind == stat.S_IFDIR:
1625 1625 self.wvfs.rmtree(f)
1626 1626 else:
1627 1627 self.wvfs.unlink(f)
1628 1628
1629 1629 def archive(self, archiver, prefix, match=None, decode=True):
1630 1630 total = 0
1631 1631 source, revision = self._state
1632 1632 if not revision:
1633 1633 return total
1634 1634 self._fetch(source, revision)
1635 1635
1636 1636 # Parse git's native archive command.
1637 1637 # This should be much faster than manually traversing the trees
1638 1638 # and objects with many subprocess calls.
1639 1639 tarstream = self._gitcommand(['archive', revision], stream=True)
1640 tar = tarfile.open(fileobj=tarstream, mode='r|')
1640 tar = tarfile.open(fileobj=tarstream, mode=r'r|')
1641 1641 relpath = subrelpath(self)
1642 1642 self.ui.progress(_('archiving (%s)') % relpath, 0, unit=_('files'))
1643 1643 for i, info in enumerate(tar):
1644 1644 if info.isdir():
1645 1645 continue
1646 1646 if match and not match(info.name):
1647 1647 continue
1648 1648 if info.issym():
1649 1649 data = info.linkname
1650 1650 else:
1651 1651 data = tar.extractfile(info).read()
1652 1652 archiver.addfile(prefix + self._path + '/' + info.name,
1653 1653 info.mode, info.issym(), data)
1654 1654 total += 1
1655 1655 self.ui.progress(_('archiving (%s)') % relpath, i + 1,
1656 1656 unit=_('files'))
1657 1657 self.ui.progress(_('archiving (%s)') % relpath, None)
1658 1658 return total
1659 1659
1660 1660
1661 1661 @annotatesubrepoerror
1662 1662 def cat(self, match, fm, fntemplate, prefix, **opts):
1663 1663 rev = self._state[1]
1664 1664 if match.anypats():
1665 1665 return 1 #No support for include/exclude yet
1666 1666
1667 1667 if not match.files():
1668 1668 return 1
1669 1669
1670 1670 # TODO: add support for non-plain formatter (see cmdutil.cat())
1671 1671 for f in match.files():
1672 1672 output = self._gitcommand(["show", "%s:%s" % (rev, f)])
1673 1673 fp = cmdutil.makefileobj(self._ctx, fntemplate,
1674 1674 pathname=self.wvfs.reljoin(prefix, f))
1675 1675 fp.write(output)
1676 1676 fp.close()
1677 1677 return 0
1678 1678
1679 1679
1680 1680 @annotatesubrepoerror
1681 1681 def status(self, rev2, **opts):
1682 1682 rev1 = self._state[1]
1683 1683 if self._gitmissing() or not rev1:
1684 1684 # if the repo is missing, return no results
1685 1685 return scmutil.status([], [], [], [], [], [], [])
1686 1686 modified, added, removed = [], [], []
1687 1687 self._gitupdatestat()
1688 1688 if rev2:
1689 1689 command = ['diff-tree', '--no-renames', '-r', rev1, rev2]
1690 1690 else:
1691 1691 command = ['diff-index', '--no-renames', rev1]
1692 1692 out = self._gitcommand(command)
1693 1693 for line in out.split('\n'):
1694 1694 tab = line.find('\t')
1695 1695 if tab == -1:
1696 1696 continue
1697 1697 status, f = line[tab - 1], line[tab + 1:]
1698 1698 if status == 'M':
1699 1699 modified.append(f)
1700 1700 elif status == 'A':
1701 1701 added.append(f)
1702 1702 elif status == 'D':
1703 1703 removed.append(f)
1704 1704
1705 1705 deleted, unknown, ignored, clean = [], [], [], []
1706 1706
1707 1707 command = ['status', '--porcelain', '-z']
1708 1708 if opts.get(r'unknown'):
1709 1709 command += ['--untracked-files=all']
1710 1710 if opts.get(r'ignored'):
1711 1711 command += ['--ignored']
1712 1712 out = self._gitcommand(command)
1713 1713
1714 1714 changedfiles = set()
1715 1715 changedfiles.update(modified)
1716 1716 changedfiles.update(added)
1717 1717 changedfiles.update(removed)
1718 1718 for line in out.split('\0'):
1719 1719 if not line:
1720 1720 continue
1721 1721 st = line[0:2]
1722 1722 #moves and copies show 2 files on one line
1723 1723 if line.find('\0') >= 0:
1724 1724 filename1, filename2 = line[3:].split('\0')
1725 1725 else:
1726 1726 filename1 = line[3:]
1727 1727 filename2 = None
1728 1728
1729 1729 changedfiles.add(filename1)
1730 1730 if filename2:
1731 1731 changedfiles.add(filename2)
1732 1732
1733 1733 if st == '??':
1734 1734 unknown.append(filename1)
1735 1735 elif st == '!!':
1736 1736 ignored.append(filename1)
1737 1737
1738 1738 if opts.get(r'clean'):
1739 1739 out = self._gitcommand(['ls-files'])
1740 1740 for f in out.split('\n'):
1741 1741 if not f in changedfiles:
1742 1742 clean.append(f)
1743 1743
1744 1744 return scmutil.status(modified, added, removed, deleted,
1745 1745 unknown, ignored, clean)
1746 1746
1747 1747 @annotatesubrepoerror
1748 1748 def diff(self, ui, diffopts, node2, match, prefix, **opts):
1749 1749 node1 = self._state[1]
1750 1750 cmd = ['diff', '--no-renames']
1751 1751 if opts[r'stat']:
1752 1752 cmd.append('--stat')
1753 1753 else:
1754 1754 # for Git, this also implies '-p'
1755 1755 cmd.append('-U%d' % diffopts.context)
1756 1756
1757 1757 gitprefix = self.wvfs.reljoin(prefix, self._path)
1758 1758
1759 1759 if diffopts.noprefix:
1760 1760 cmd.extend(['--src-prefix=%s/' % gitprefix,
1761 1761 '--dst-prefix=%s/' % gitprefix])
1762 1762 else:
1763 1763 cmd.extend(['--src-prefix=a/%s/' % gitprefix,
1764 1764 '--dst-prefix=b/%s/' % gitprefix])
1765 1765
1766 1766 if diffopts.ignorews:
1767 1767 cmd.append('--ignore-all-space')
1768 1768 if diffopts.ignorewsamount:
1769 1769 cmd.append('--ignore-space-change')
1770 1770 if self._gitversion(self._gitcommand(['--version'])) >= (1, 8, 4) \
1771 1771 and diffopts.ignoreblanklines:
1772 1772 cmd.append('--ignore-blank-lines')
1773 1773
1774 1774 cmd.append(node1)
1775 1775 if node2:
1776 1776 cmd.append(node2)
1777 1777
1778 1778 output = ""
1779 1779 if match.always():
1780 1780 output += self._gitcommand(cmd) + '\n'
1781 1781 else:
1782 1782 st = self.status(node2)[:3]
1783 1783 files = [f for sublist in st for f in sublist]
1784 1784 for f in files:
1785 1785 if match(f):
1786 1786 output += self._gitcommand(cmd + ['--', f]) + '\n'
1787 1787
1788 1788 if output.strip():
1789 1789 ui.write(output)
1790 1790
1791 1791 @annotatesubrepoerror
1792 1792 def revert(self, substate, *pats, **opts):
1793 1793 self.ui.status(_('reverting subrepo %s\n') % substate[0])
1794 1794 if not opts.get(r'no_backup'):
1795 1795 status = self.status(None)
1796 1796 names = status.modified
1797 1797 for name in names:
1798 1798 bakname = scmutil.origpath(self.ui, self._subparent, name)
1799 1799 self.ui.note(_('saving current version of %s as %s\n') %
1800 1800 (name, bakname))
1801 1801 self.wvfs.rename(name, bakname)
1802 1802
1803 1803 if not opts.get(r'dry_run'):
1804 1804 self.get(substate, overwrite=True)
1805 1805 return []
1806 1806
1807 1807 def shortid(self, revid):
1808 1808 return revid[:7]
1809 1809
1810 1810 types = {
1811 1811 'hg': hgsubrepo,
1812 1812 'svn': svnsubrepo,
1813 1813 'git': gitsubrepo,
1814 1814 }
General Comments 0
You need to be logged in to leave comments. Login now