##// END OF EJS Templates
infinitepush: drop the scratchbookmarksparttype bundle2 part...
Pulkit Goyal -
r37210:5a9692d0 default
parent child Browse files
Show More
@@ -1,1349 +1,1298 b''
1 1 # Infinite push
2 2 #
3 3 # Copyright 2016 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 """ store some pushes in a remote blob store on the server (EXPERIMENTAL)
8 8
9 9 [infinitepush]
10 10 # Server-side and client-side option. Pattern of the infinitepush bookmark
11 11 branchpattern = PATTERN
12 12
13 13 # Server or client
14 14 server = False
15 15
16 16 # Server-side option. Possible values: 'disk' or 'sql'. Fails if not set
17 17 indextype = disk
18 18
19 19 # Server-side option. Used only if indextype=sql.
20 20 # Format: 'IP:PORT:DB_NAME:USER:PASSWORD'
21 21 sqlhost = IP:PORT:DB_NAME:USER:PASSWORD
22 22
23 23 # Server-side option. Used only if indextype=disk.
24 24 # Filesystem path to the index store
25 25 indexpath = PATH
26 26
27 27 # Server-side option. Possible values: 'disk' or 'external'
28 28 # Fails if not set
29 29 storetype = disk
30 30
31 31 # Server-side option.
32 32 # Path to the binary that will save bundle to the bundlestore
33 33 # Formatted cmd line will be passed to it (see `put_args`)
34 34 put_binary = put
35 35
36 36 # Serser-side option. Used only if storetype=external.
37 37 # Format cmd-line string for put binary. Placeholder: {filename}
38 38 put_args = {filename}
39 39
40 40 # Server-side option.
41 41 # Path to the binary that get bundle from the bundlestore.
42 42 # Formatted cmd line will be passed to it (see `get_args`)
43 43 get_binary = get
44 44
45 45 # Serser-side option. Used only if storetype=external.
46 46 # Format cmd-line string for get binary. Placeholders: {filename} {handle}
47 47 get_args = {filename} {handle}
48 48
49 49 # Server-side option
50 50 logfile = FIlE
51 51
52 52 # Server-side option
53 53 loglevel = DEBUG
54 54
55 55 # Server-side option. Used only if indextype=sql.
56 56 # Sets mysql wait_timeout option.
57 57 waittimeout = 300
58 58
59 59 # Server-side option. Used only if indextype=sql.
60 60 # Sets mysql innodb_lock_wait_timeout option.
61 61 locktimeout = 120
62 62
63 63 # Server-side option. Used only if indextype=sql.
64 64 # Name of the repository
65 65 reponame = ''
66 66
67 67 # Client-side option. Used by --list-remote option. List of remote scratch
68 68 # patterns to list if no patterns are specified.
69 69 defaultremotepatterns = ['*']
70 70
71 71 # Server-side option. If bookmark that was pushed matches
72 72 # `fillmetadatabranchpattern` then background
73 73 # `hg debugfillinfinitepushmetadata` process will save metadata
74 74 # in infinitepush index for nodes that are ancestor of the bookmark.
75 75 fillmetadatabranchpattern = ''
76 76
77 77 # Instructs infinitepush to forward all received bundle2 parts to the
78 78 # bundle for storage. Defaults to False.
79 79 storeallparts = True
80 80
81 81 [remotenames]
82 82 # Client-side option
83 83 # This option should be set only if remotenames extension is enabled.
84 84 # Whether remote bookmarks are tracked by remotenames extension.
85 85 bookmarks = True
86 86 """
87 87
88 88 from __future__ import absolute_import
89 89
90 90 import collections
91 91 import contextlib
92 92 import errno
93 93 import functools
94 import json
95 94 import logging
96 95 import os
97 96 import random
98 97 import re
99 98 import socket
100 import struct
101 99 import subprocess
102 100 import sys
103 101 import tempfile
104 102 import time
105 103
106 104 from mercurial.node import (
107 105 bin,
108 106 hex,
109 107 )
110 108
111 109 from mercurial.i18n import _
112 110
113 111 from mercurial import (
114 112 bundle2,
115 113 changegroup,
116 114 commands,
117 115 discovery,
118 116 encoding,
119 117 error,
120 118 exchange,
121 119 extensions,
122 120 hg,
123 121 localrepo,
124 122 peer,
125 123 phases,
126 124 pushkey,
127 125 registrar,
128 126 util,
129 127 wireproto,
130 128 )
131 129
132 130 from . import (
133 131 bundleparts,
134 132 common,
135 133 infinitepushcommands,
136 134 )
137 135
138 136 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
139 137 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
140 138 # be specifying the version(s) of Mercurial they are tested with, or
141 139 # leave the attribute unspecified.
142 140 testedwith = 'ships-with-hg-core'
143 141
144 142 configtable = {}
145 143 configitem = registrar.configitem(configtable)
146 144
147 145 configitem('infinitepush', 'server',
148 146 default=False,
149 147 )
150 148 configitem('infinitepush', 'storetype',
151 149 default='',
152 150 )
153 151 configitem('infinitepush', 'indextype',
154 152 default='',
155 153 )
156 154 configitem('infinitepush', 'indexpath',
157 155 default='',
158 156 )
159 157 configitem('infinitepush', 'fillmetadatabranchpattern',
160 158 default='',
161 159 )
162 160 configitem('infinitepush', 'storeallparts',
163 161 default=False,
164 162 )
165 163 configitem('infinitepush', 'reponame',
166 164 default='',
167 165 )
168 166 configitem('scratchbranch', 'storepath',
169 167 default='',
170 168 )
171 169 configitem('infinitepush', 'branchpattern',
172 170 default='',
173 171 )
174 172 configitem('infinitepush', 'metadatafilelimit',
175 173 default=100,
176 174 )
177 175 configitem('experimental', 'server-bundlestore-bookmark',
178 176 default='',
179 177 )
180 178 configitem('experimental', 'server-bundlestore-create',
181 179 default='',
182 180 )
183 181 configitem('experimental', 'infinitepush-scratchpush',
184 182 default=False,
185 183 )
186 184 configitem('experimental', 'non-forward-move',
187 185 default=False,
188 186 )
189 187
190 188 experimental = 'experimental'
191 189 configbookmark = 'server-bundlestore-bookmark'
192 190 configcreate = 'server-bundlestore-create'
193 191 configscratchpush = 'infinitepush-scratchpush'
194 192 confignonforwardmove = 'non-forward-move'
195 193
196 194 scratchbranchparttype = bundleparts.scratchbranchparttype
197 195 cmdtable = infinitepushcommands.cmdtable
198 196 revsetpredicate = registrar.revsetpredicate()
199 197 templatekeyword = registrar.templatekeyword()
200 198 _scratchbranchmatcher = lambda x: False
201 199 _maybehash = re.compile(r'^[a-f0-9]+$').search
202 200
203 201 def _buildexternalbundlestore(ui):
204 202 put_args = ui.configlist('infinitepush', 'put_args', [])
205 203 put_binary = ui.config('infinitepush', 'put_binary')
206 204 if not put_binary:
207 205 raise error.Abort('put binary is not specified')
208 206 get_args = ui.configlist('infinitepush', 'get_args', [])
209 207 get_binary = ui.config('infinitepush', 'get_binary')
210 208 if not get_binary:
211 209 raise error.Abort('get binary is not specified')
212 210 from . import store
213 211 return store.externalbundlestore(put_binary, put_args, get_binary, get_args)
214 212
215 213 def _buildsqlindex(ui):
216 214 sqlhost = ui.config('infinitepush', 'sqlhost')
217 215 if not sqlhost:
218 216 raise error.Abort(_('please set infinitepush.sqlhost'))
219 217 host, port, db, user, password = sqlhost.split(':')
220 218 reponame = ui.config('infinitepush', 'reponame')
221 219 if not reponame:
222 220 raise error.Abort(_('please set infinitepush.reponame'))
223 221
224 222 logfile = ui.config('infinitepush', 'logfile', '')
225 223 waittimeout = ui.configint('infinitepush', 'waittimeout', 300)
226 224 locktimeout = ui.configint('infinitepush', 'locktimeout', 120)
227 225 from . import sqlindexapi
228 226 return sqlindexapi.sqlindexapi(
229 227 reponame, host, port, db, user, password,
230 228 logfile, _getloglevel(ui), waittimeout=waittimeout,
231 229 locktimeout=locktimeout)
232 230
233 231 def _getloglevel(ui):
234 232 loglevel = ui.config('infinitepush', 'loglevel', 'DEBUG')
235 233 numeric_loglevel = getattr(logging, loglevel.upper(), None)
236 234 if not isinstance(numeric_loglevel, int):
237 235 raise error.Abort(_('invalid log level %s') % loglevel)
238 236 return numeric_loglevel
239 237
240 238 def _tryhoist(ui, remotebookmark):
241 239 '''returns a bookmarks with hoisted part removed
242 240
243 241 Remotenames extension has a 'hoist' config that allows to use remote
244 242 bookmarks without specifying remote path. For example, 'hg update master'
245 243 works as well as 'hg update remote/master'. We want to allow the same in
246 244 infinitepush.
247 245 '''
248 246
249 247 if common.isremotebooksenabled(ui):
250 248 hoist = ui.config('remotenames', 'hoist') + '/'
251 249 if remotebookmark.startswith(hoist):
252 250 return remotebookmark[len(hoist):]
253 251 return remotebookmark
254 252
255 253 class bundlestore(object):
256 254 def __init__(self, repo):
257 255 self._repo = repo
258 256 storetype = self._repo.ui.config('infinitepush', 'storetype', '')
259 257 if storetype == 'disk':
260 258 from . import store
261 259 self.store = store.filebundlestore(self._repo.ui, self._repo)
262 260 elif storetype == 'external':
263 261 self.store = _buildexternalbundlestore(self._repo.ui)
264 262 else:
265 263 raise error.Abort(
266 264 _('unknown infinitepush store type specified %s') % storetype)
267 265
268 266 indextype = self._repo.ui.config('infinitepush', 'indextype', '')
269 267 if indextype == 'disk':
270 268 from . import fileindexapi
271 269 self.index = fileindexapi.fileindexapi(self._repo)
272 270 elif indextype == 'sql':
273 271 self.index = _buildsqlindex(self._repo.ui)
274 272 else:
275 273 raise error.Abort(
276 274 _('unknown infinitepush index type specified %s') % indextype)
277 275
278 276 def _isserver(ui):
279 277 return ui.configbool('infinitepush', 'server')
280 278
281 279 def reposetup(ui, repo):
282 280 if _isserver(ui) and repo.local():
283 281 repo.bundlestore = bundlestore(repo)
284 282
285 283 def extsetup(ui):
286 284 commonsetup(ui)
287 285 if _isserver(ui):
288 286 serverextsetup(ui)
289 287 else:
290 288 clientextsetup(ui)
291 289
292 290 def commonsetup(ui):
293 291 wireproto.commands['listkeyspatterns'] = (
294 292 wireprotolistkeyspatterns, 'namespace patterns')
295 293 scratchbranchpat = ui.config('infinitepush', 'branchpattern')
296 294 if scratchbranchpat:
297 295 global _scratchbranchmatcher
298 296 kind, pat, _scratchbranchmatcher = util.stringmatcher(scratchbranchpat)
299 297
300 298 def serverextsetup(ui):
301 299 origpushkeyhandler = bundle2.parthandlermapping['pushkey']
302 300
303 301 def newpushkeyhandler(*args, **kwargs):
304 302 bundle2pushkey(origpushkeyhandler, *args, **kwargs)
305 303 newpushkeyhandler.params = origpushkeyhandler.params
306 304 bundle2.parthandlermapping['pushkey'] = newpushkeyhandler
307 305
308 306 orighandlephasehandler = bundle2.parthandlermapping['phase-heads']
309 307 newphaseheadshandler = lambda *args, **kwargs: \
310 308 bundle2handlephases(orighandlephasehandler, *args, **kwargs)
311 309 newphaseheadshandler.params = orighandlephasehandler.params
312 310 bundle2.parthandlermapping['phase-heads'] = newphaseheadshandler
313 311
314 312 extensions.wrapfunction(localrepo.localrepository, 'listkeys',
315 313 localrepolistkeys)
316 314 wireproto.commands['lookup'] = (
317 315 _lookupwrap(wireproto.commands['lookup'][0]), 'key')
318 316 extensions.wrapfunction(exchange, 'getbundlechunks', getbundlechunks)
319 317
320 318 extensions.wrapfunction(bundle2, 'processparts', processparts)
321 319
322 320 def clientextsetup(ui):
323 321 entry = extensions.wrapcommand(commands.table, 'push', _push)
324 322 # Don't add the 'to' arg if it already exists
325 323 if not any(a for a in entry[1] if a[1] == 'to'):
326 324 entry[1].append(('', 'to', '', _('push revs to this bookmark')))
327 325
328 326 if not any(a for a in entry[1] if a[1] == 'non-forward-move'):
329 327 entry[1].append(('', 'non-forward-move', None,
330 328 _('allows moving a remote bookmark to an '
331 329 'arbitrary place')))
332 330
333 331 if not any(a for a in entry[1] if a[1] == 'create'):
334 332 entry[1].append(
335 333 ('', 'create', None, _('create a new remote bookmark')))
336 334
337 335 entry[1].append(
338 336 ('', 'bundle-store', None,
339 337 _('force push to go to bundle store (EXPERIMENTAL)')))
340 338
341 339 bookcmd = extensions.wrapcommand(commands.table, 'bookmarks', exbookmarks)
342 340 bookcmd[1].append(
343 341 ('', 'list-remote', None,
344 342 'list remote bookmarks. '
345 343 'Positional arguments are interpreted as wildcard patterns. '
346 344 'Only allowed wildcard is \'*\' in the end of the pattern. '
347 345 'If no positional arguments are specified then it will list '
348 346 'the most "important" remote bookmarks. '
349 347 'Otherwise it will list remote bookmarks '
350 348 'that match at least one pattern '
351 349 ''))
352 350 bookcmd[1].append(
353 351 ('', 'remote-path', '',
354 352 'name of the remote path to list the bookmarks'))
355 353
356 354 extensions.wrapcommand(commands.table, 'pull', _pull)
357 355 extensions.wrapcommand(commands.table, 'update', _update)
358 356
359 357 extensions.wrapfunction(discovery, 'checkheads', _checkheads)
360 358
361 359 wireproto.wirepeer.listkeyspatterns = listkeyspatterns
362 360
363 361 partorder = exchange.b2partsgenorder
364 362 index = partorder.index('changeset')
365 363 partorder.insert(
366 364 index, partorder.pop(partorder.index(scratchbranchparttype)))
367 365
368 366 def _showbookmarks(ui, bookmarks, **opts):
369 367 # Copy-paste from commands.py
370 368 fm = ui.formatter('bookmarks', opts)
371 369 for bmark, n in sorted(bookmarks.iteritems()):
372 370 fm.startitem()
373 371 if not ui.quiet:
374 372 fm.plain(' ')
375 373 fm.write('bookmark', '%s', bmark)
376 374 pad = ' ' * (25 - encoding.colwidth(bmark))
377 375 fm.condwrite(not ui.quiet, 'node', pad + ' %s', n)
378 376 fm.plain('\n')
379 377 fm.end()
380 378
381 379 def exbookmarks(orig, ui, repo, *names, **opts):
382 380 pattern = opts.get('list_remote')
383 381 delete = opts.get('delete')
384 382 remotepath = opts.get('remote_path')
385 383 path = ui.paths.getpath(remotepath or None, default=('default'))
386 384 if pattern:
387 385 destpath = path.pushloc or path.loc
388 386 other = hg.peer(repo, opts, destpath)
389 387 if not names:
390 388 raise error.Abort(
391 389 '--list-remote requires a bookmark pattern',
392 390 hint='use "hg book" to get a list of your local bookmarks')
393 391 else:
394 392 fetchedbookmarks = other.listkeyspatterns('bookmarks',
395 393 patterns=names)
396 394 _showbookmarks(ui, fetchedbookmarks, **opts)
397 395 return
398 396 elif delete and 'remotenames' in extensions._extensions:
399 397 existing_local_bms = set(repo._bookmarks.keys())
400 398 scratch_bms = []
401 399 other_bms = []
402 400 for name in names:
403 401 if _scratchbranchmatcher(name) and name not in existing_local_bms:
404 402 scratch_bms.append(name)
405 403 else:
406 404 other_bms.append(name)
407 405
408 406 if len(scratch_bms) > 0:
409 407 if remotepath == '':
410 408 remotepath = 'default'
411 409 _deleteinfinitepushbookmarks(ui,
412 410 repo,
413 411 remotepath,
414 412 scratch_bms)
415 413
416 414 if len(other_bms) > 0 or len(scratch_bms) == 0:
417 415 return orig(ui, repo, *other_bms, **opts)
418 416 else:
419 417 return orig(ui, repo, *names, **opts)
420 418
421 419 def _checkheads(orig, pushop):
422 420 if pushop.ui.configbool(experimental, configscratchpush, False):
423 421 return
424 422 return orig(pushop)
425 423
426 424 def wireprotolistkeyspatterns(repo, proto, namespace, patterns):
427 425 patterns = wireproto.decodelist(patterns)
428 426 d = repo.listkeys(encoding.tolocal(namespace), patterns).iteritems()
429 427 return pushkey.encodekeys(d)
430 428
431 429 def localrepolistkeys(orig, self, namespace, patterns=None):
432 430 if namespace == 'bookmarks' and patterns:
433 431 index = self.bundlestore.index
434 432 results = {}
435 433 bookmarks = orig(self, namespace)
436 434 for pattern in patterns:
437 435 results.update(index.getbookmarks(pattern))
438 436 if pattern.endswith('*'):
439 437 pattern = 're:^' + pattern[:-1] + '.*'
440 438 kind, pat, matcher = util.stringmatcher(pattern)
441 439 for bookmark, node in bookmarks.iteritems():
442 440 if matcher(bookmark):
443 441 results[bookmark] = node
444 442 return results
445 443 else:
446 444 return orig(self, namespace)
447 445
448 446 @peer.batchable
449 447 def listkeyspatterns(self, namespace, patterns):
450 448 if not self.capable('pushkey'):
451 449 yield {}, None
452 450 f = peer.future()
453 451 self.ui.debug('preparing listkeys for "%s" with pattern "%s"\n' %
454 452 (namespace, patterns))
455 453 yield {
456 454 'namespace': encoding.fromlocal(namespace),
457 455 'patterns': wireproto.encodelist(patterns)
458 456 }, f
459 457 d = f.value
460 458 self.ui.debug('received listkey for "%s": %i bytes\n'
461 459 % (namespace, len(d)))
462 460 yield pushkey.decodekeys(d)
463 461
464 462 def _readbundlerevs(bundlerepo):
465 463 return list(bundlerepo.revs('bundle()'))
466 464
467 465 def _includefilelogstobundle(bundlecaps, bundlerepo, bundlerevs, ui):
468 466 '''Tells remotefilelog to include all changed files to the changegroup
469 467
470 468 By default remotefilelog doesn't include file content to the changegroup.
471 469 But we need to include it if we are fetching from bundlestore.
472 470 '''
473 471 changedfiles = set()
474 472 cl = bundlerepo.changelog
475 473 for r in bundlerevs:
476 474 # [3] means changed files
477 475 changedfiles.update(cl.read(r)[3])
478 476 if not changedfiles:
479 477 return bundlecaps
480 478
481 479 changedfiles = '\0'.join(changedfiles)
482 480 newcaps = []
483 481 appended = False
484 482 for cap in (bundlecaps or []):
485 483 if cap.startswith('excludepattern='):
486 484 newcaps.append('\0'.join((cap, changedfiles)))
487 485 appended = True
488 486 else:
489 487 newcaps.append(cap)
490 488 if not appended:
491 489 # Not found excludepattern cap. Just append it
492 490 newcaps.append('excludepattern=' + changedfiles)
493 491
494 492 return newcaps
495 493
496 494 def _rebundle(bundlerepo, bundleroots, unknownhead):
497 495 '''
498 496 Bundle may include more revision then user requested. For example,
499 497 if user asks for revision but bundle also consists its descendants.
500 498 This function will filter out all revision that user is not requested.
501 499 '''
502 500 parts = []
503 501
504 502 version = '02'
505 503 outgoing = discovery.outgoing(bundlerepo, commonheads=bundleroots,
506 504 missingheads=[unknownhead])
507 505 cgstream = changegroup.makestream(bundlerepo, outgoing, version, 'pull')
508 506 cgstream = util.chunkbuffer(cgstream).read()
509 507 cgpart = bundle2.bundlepart('changegroup', data=cgstream)
510 508 cgpart.addparam('version', version)
511 509 parts.append(cgpart)
512 510
513 511 try:
514 512 treemod = extensions.find('treemanifest')
515 513 except KeyError:
516 514 pass
517 515 else:
518 516 if treemod._cansendtrees(bundlerepo, outgoing.missing):
519 517 treepart = treemod.createtreepackpart(bundlerepo, outgoing,
520 518 treemod.TREEGROUP_PARTTYPE2)
521 519 parts.append(treepart)
522 520
523 521 return parts
524 522
525 523 def _getbundleroots(oldrepo, bundlerepo, bundlerevs):
526 524 cl = bundlerepo.changelog
527 525 bundleroots = []
528 526 for rev in bundlerevs:
529 527 node = cl.node(rev)
530 528 parents = cl.parents(node)
531 529 for parent in parents:
532 530 # include all revs that exist in the main repo
533 531 # to make sure that bundle may apply client-side
534 532 if parent in oldrepo:
535 533 bundleroots.append(parent)
536 534 return bundleroots
537 535
538 536 def _needsrebundling(head, bundlerepo):
539 537 bundleheads = list(bundlerepo.revs('heads(bundle())'))
540 538 return not (len(bundleheads) == 1 and
541 539 bundlerepo[bundleheads[0]].node() == head)
542 540
543 541 def _generateoutputparts(head, bundlerepo, bundleroots, bundlefile):
544 542 '''generates bundle that will be send to the user
545 543
546 544 returns tuple with raw bundle string and bundle type
547 545 '''
548 546 parts = []
549 547 if not _needsrebundling(head, bundlerepo):
550 548 with util.posixfile(bundlefile, "rb") as f:
551 549 unbundler = exchange.readbundle(bundlerepo.ui, f, bundlefile)
552 550 if isinstance(unbundler, changegroup.cg1unpacker):
553 551 part = bundle2.bundlepart('changegroup',
554 552 data=unbundler._stream.read())
555 553 part.addparam('version', '01')
556 554 parts.append(part)
557 555 elif isinstance(unbundler, bundle2.unbundle20):
558 556 haschangegroup = False
559 557 for part in unbundler.iterparts():
560 558 if part.type == 'changegroup':
561 559 haschangegroup = True
562 560 newpart = bundle2.bundlepart(part.type, data=part.read())
563 561 for key, value in part.params.iteritems():
564 562 newpart.addparam(key, value)
565 563 parts.append(newpart)
566 564
567 565 if not haschangegroup:
568 566 raise error.Abort(
569 567 'unexpected bundle without changegroup part, ' +
570 568 'head: %s' % hex(head),
571 569 hint='report to administrator')
572 570 else:
573 571 raise error.Abort('unknown bundle type')
574 572 else:
575 573 parts = _rebundle(bundlerepo, bundleroots, head)
576 574
577 575 return parts
578 576
579 577 def getbundlechunks(orig, repo, source, heads=None, bundlecaps=None, **kwargs):
580 578 heads = heads or []
581 579 # newheads are parents of roots of scratch bundles that were requested
582 580 newphases = {}
583 581 scratchbundles = []
584 582 newheads = []
585 583 scratchheads = []
586 584 nodestobundle = {}
587 585 allbundlestocleanup = []
588 586 try:
589 587 for head in heads:
590 588 if head not in repo.changelog.nodemap:
591 589 if head not in nodestobundle:
592 590 newbundlefile = common.downloadbundle(repo, head)
593 591 bundlepath = "bundle:%s+%s" % (repo.root, newbundlefile)
594 592 bundlerepo = hg.repository(repo.ui, bundlepath)
595 593
596 594 allbundlestocleanup.append((bundlerepo, newbundlefile))
597 595 bundlerevs = set(_readbundlerevs(bundlerepo))
598 596 bundlecaps = _includefilelogstobundle(
599 597 bundlecaps, bundlerepo, bundlerevs, repo.ui)
600 598 cl = bundlerepo.changelog
601 599 bundleroots = _getbundleroots(repo, bundlerepo, bundlerevs)
602 600 for rev in bundlerevs:
603 601 node = cl.node(rev)
604 602 newphases[hex(node)] = str(phases.draft)
605 603 nodestobundle[node] = (bundlerepo, bundleroots,
606 604 newbundlefile)
607 605
608 606 scratchbundles.append(
609 607 _generateoutputparts(head, *nodestobundle[head]))
610 608 newheads.extend(bundleroots)
611 609 scratchheads.append(head)
612 610 finally:
613 611 for bundlerepo, bundlefile in allbundlestocleanup:
614 612 bundlerepo.close()
615 613 try:
616 614 os.unlink(bundlefile)
617 615 except (IOError, OSError):
618 616 # if we can't cleanup the file then just ignore the error,
619 617 # no need to fail
620 618 pass
621 619
622 620 pullfrombundlestore = bool(scratchbundles)
623 621 wrappedchangegrouppart = False
624 622 wrappedlistkeys = False
625 623 oldchangegrouppart = exchange.getbundle2partsmapping['changegroup']
626 624 try:
627 625 def _changegrouppart(bundler, *args, **kwargs):
628 626 # Order is important here. First add non-scratch part
629 627 # and only then add parts with scratch bundles because
630 628 # non-scratch part contains parents of roots of scratch bundles.
631 629 result = oldchangegrouppart(bundler, *args, **kwargs)
632 630 for bundle in scratchbundles:
633 631 for part in bundle:
634 632 bundler.addpart(part)
635 633 return result
636 634
637 635 exchange.getbundle2partsmapping['changegroup'] = _changegrouppart
638 636 wrappedchangegrouppart = True
639 637
640 638 def _listkeys(orig, self, namespace):
641 639 origvalues = orig(self, namespace)
642 640 if namespace == 'phases' and pullfrombundlestore:
643 641 if origvalues.get('publishing') == 'True':
644 642 # Make repo non-publishing to preserve draft phase
645 643 del origvalues['publishing']
646 644 origvalues.update(newphases)
647 645 return origvalues
648 646
649 647 extensions.wrapfunction(localrepo.localrepository, 'listkeys',
650 648 _listkeys)
651 649 wrappedlistkeys = True
652 650 heads = list((set(newheads) | set(heads)) - set(scratchheads))
653 651 result = orig(repo, source, heads=heads,
654 652 bundlecaps=bundlecaps, **kwargs)
655 653 finally:
656 654 if wrappedchangegrouppart:
657 655 exchange.getbundle2partsmapping['changegroup'] = oldchangegrouppart
658 656 if wrappedlistkeys:
659 657 extensions.unwrapfunction(localrepo.localrepository, 'listkeys',
660 658 _listkeys)
661 659 return result
662 660
663 661 def _lookupwrap(orig):
664 662 def _lookup(repo, proto, key):
665 663 localkey = encoding.tolocal(key)
666 664
667 665 if isinstance(localkey, str) and _scratchbranchmatcher(localkey):
668 666 scratchnode = repo.bundlestore.index.getnode(localkey)
669 667 if scratchnode:
670 668 return "%s %s\n" % (1, scratchnode)
671 669 else:
672 670 return "%s %s\n" % (0, 'scratch branch %s not found' % localkey)
673 671 else:
674 672 try:
675 673 r = hex(repo.lookup(localkey))
676 674 return "%s %s\n" % (1, r)
677 675 except Exception as inst:
678 676 if repo.bundlestore.index.getbundle(localkey):
679 677 return "%s %s\n" % (1, localkey)
680 678 else:
681 679 r = str(inst)
682 680 return "%s %s\n" % (0, r)
683 681 return _lookup
684 682
685 def _decodebookmarks(stream):
686 sizeofjsonsize = struct.calcsize('>i')
687 size = struct.unpack('>i', stream.read(sizeofjsonsize))[0]
688 unicodedict = json.loads(stream.read(size))
689 # python json module always returns unicode strings. We need to convert
690 # it back to bytes string
691 result = {}
692 for bookmark, node in unicodedict.iteritems():
693 bookmark = bookmark.encode('ascii')
694 node = node.encode('ascii')
695 result[bookmark] = node
696 return result
697
698 683 def _update(orig, ui, repo, node=None, rev=None, **opts):
699 684 if rev and node:
700 685 raise error.Abort(_("please specify just one revision"))
701 686
702 687 if not opts.get('date') and (rev or node) not in repo:
703 688 mayberemote = rev or node
704 689 mayberemote = _tryhoist(ui, mayberemote)
705 690 dopull = False
706 691 kwargs = {}
707 692 if _scratchbranchmatcher(mayberemote):
708 693 dopull = True
709 694 kwargs['bookmark'] = [mayberemote]
710 695 elif len(mayberemote) == 40 and _maybehash(mayberemote):
711 696 dopull = True
712 697 kwargs['rev'] = [mayberemote]
713 698
714 699 if dopull:
715 700 ui.warn(
716 701 _("'%s' does not exist locally - looking for it " +
717 702 "remotely...\n") % mayberemote)
718 703 # Try pulling node from remote repo
719 704 try:
720 705 cmdname = '^pull'
721 706 pullcmd = commands.table[cmdname][0]
722 707 pullopts = dict(opt[1:3] for opt in commands.table[cmdname][1])
723 708 pullopts.update(kwargs)
724 709 pullcmd(ui, repo, **pullopts)
725 710 except Exception:
726 711 ui.warn(_('pull failed: %s\n') % sys.exc_info()[1])
727 712 else:
728 713 ui.warn(_("'%s' found remotely\n") % mayberemote)
729 714 return orig(ui, repo, node, rev, **opts)
730 715
731 716 def _pull(orig, ui, repo, source="default", **opts):
732 717 # Copy paste from `pull` command
733 718 source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch'))
734 719
735 720 scratchbookmarks = {}
736 721 unfi = repo.unfiltered()
737 722 unknownnodes = []
738 723 for rev in opts.get('rev', []):
739 724 if rev not in unfi:
740 725 unknownnodes.append(rev)
741 726 if opts.get('bookmark'):
742 727 bookmarks = []
743 728 revs = opts.get('rev') or []
744 729 for bookmark in opts.get('bookmark'):
745 730 if _scratchbranchmatcher(bookmark):
746 731 # rev is not known yet
747 732 # it will be fetched with listkeyspatterns next
748 733 scratchbookmarks[bookmark] = 'REVTOFETCH'
749 734 else:
750 735 bookmarks.append(bookmark)
751 736
752 737 if scratchbookmarks:
753 738 other = hg.peer(repo, opts, source)
754 739 fetchedbookmarks = other.listkeyspatterns(
755 740 'bookmarks', patterns=scratchbookmarks)
756 741 for bookmark in scratchbookmarks:
757 742 if bookmark not in fetchedbookmarks:
758 743 raise error.Abort('remote bookmark %s not found!' %
759 744 bookmark)
760 745 scratchbookmarks[bookmark] = fetchedbookmarks[bookmark]
761 746 revs.append(fetchedbookmarks[bookmark])
762 747 opts['bookmark'] = bookmarks
763 748 opts['rev'] = revs
764 749
765 750 if scratchbookmarks or unknownnodes:
766 751 # Set anyincoming to True
767 752 extensions.wrapfunction(discovery, 'findcommonincoming',
768 753 _findcommonincoming)
769 754 try:
770 755 # Remote scratch bookmarks will be deleted because remotenames doesn't
771 756 # know about them. Let's save it before pull and restore after
772 757 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, source)
773 758 result = orig(ui, repo, source, **opts)
774 759 # TODO(stash): race condition is possible
775 760 # if scratch bookmarks was updated right after orig.
776 761 # But that's unlikely and shouldn't be harmful.
777 762 if common.isremotebooksenabled(ui):
778 763 remotescratchbookmarks.update(scratchbookmarks)
779 764 _saveremotebookmarks(repo, remotescratchbookmarks, source)
780 765 else:
781 766 _savelocalbookmarks(repo, scratchbookmarks)
782 767 return result
783 768 finally:
784 769 if scratchbookmarks:
785 770 extensions.unwrapfunction(discovery, 'findcommonincoming')
786 771
787 772 def _readscratchremotebookmarks(ui, repo, other):
788 773 if common.isremotebooksenabled(ui):
789 774 remotenamesext = extensions.find('remotenames')
790 775 remotepath = remotenamesext.activepath(repo.ui, other)
791 776 result = {}
792 777 # Let's refresh remotenames to make sure we have it up to date
793 778 # Seems that `repo.names['remotebookmarks']` may return stale bookmarks
794 779 # and it results in deleting scratch bookmarks. Our best guess how to
795 780 # fix it is to use `clearnames()`
796 781 repo._remotenames.clearnames()
797 782 for remotebookmark in repo.names['remotebookmarks'].listnames(repo):
798 783 path, bookname = remotenamesext.splitremotename(remotebookmark)
799 784 if path == remotepath and _scratchbranchmatcher(bookname):
800 785 nodes = repo.names['remotebookmarks'].nodes(repo,
801 786 remotebookmark)
802 787 if nodes:
803 788 result[bookname] = hex(nodes[0])
804 789 return result
805 790 else:
806 791 return {}
807 792
808 793 def _saveremotebookmarks(repo, newbookmarks, remote):
809 794 remotenamesext = extensions.find('remotenames')
810 795 remotepath = remotenamesext.activepath(repo.ui, remote)
811 796 branches = collections.defaultdict(list)
812 797 bookmarks = {}
813 798 remotenames = remotenamesext.readremotenames(repo)
814 799 for hexnode, nametype, remote, rname in remotenames:
815 800 if remote != remotepath:
816 801 continue
817 802 if nametype == 'bookmarks':
818 803 if rname in newbookmarks:
819 804 # It's possible if we have a normal bookmark that matches
820 805 # scratch branch pattern. In this case just use the current
821 806 # bookmark node
822 807 del newbookmarks[rname]
823 808 bookmarks[rname] = hexnode
824 809 elif nametype == 'branches':
825 810 # saveremotenames expects 20 byte binary nodes for branches
826 811 branches[rname].append(bin(hexnode))
827 812
828 813 for bookmark, hexnode in newbookmarks.iteritems():
829 814 bookmarks[bookmark] = hexnode
830 815 remotenamesext.saveremotenames(repo, remotepath, branches, bookmarks)
831 816
832 817 def _savelocalbookmarks(repo, bookmarks):
833 818 if not bookmarks:
834 819 return
835 820 with repo.wlock(), repo.lock(), repo.transaction('bookmark') as tr:
836 821 changes = []
837 822 for scratchbook, node in bookmarks.iteritems():
838 823 changectx = repo[node]
839 824 changes.append((scratchbook, changectx.node()))
840 825 repo._bookmarks.applychanges(repo, tr, changes)
841 826
842 827 def _findcommonincoming(orig, *args, **kwargs):
843 828 common, inc, remoteheads = orig(*args, **kwargs)
844 829 return common, True, remoteheads
845 830
846 831 def _push(orig, ui, repo, dest=None, *args, **opts):
847 832 bookmark = opts.get('to') or ''
848 833 create = opts.get('create') or False
849 834
850 835 oldphasemove = None
851 836 overrides = {(experimental, configbookmark): bookmark,
852 837 (experimental, configcreate): create}
853 838
854 839 with ui.configoverride(overrides, 'infinitepush'):
855 840 scratchpush = opts.get('bundle_store')
856 841 if _scratchbranchmatcher(bookmark):
857 842 # Hack to fix interaction with remotenames. Remotenames push
858 843 # '--to' bookmark to the server but we don't want to push scratch
859 844 # bookmark to the server. Let's delete '--to' and '--create' and
860 845 # also set allow_anon to True (because if --to is not set
861 846 # remotenames will think that we are pushing anonymoush head)
862 847 if 'to' in opts:
863 848 del opts['to']
864 849 if 'create' in opts:
865 850 del opts['create']
866 851 opts['allow_anon'] = True
867 852 scratchpush = True
868 853 # bundle2 can be sent back after push (for example, bundle2
869 854 # containing `pushkey` part to update bookmarks)
870 855 ui.setconfig(experimental, 'bundle2.pushback', True)
871 856
872 857 ui.setconfig(experimental, confignonforwardmove,
873 858 opts.get('non_forward_move'), '--non-forward-move')
874 859 if scratchpush:
875 860 ui.setconfig(experimental, configscratchpush, True)
876 861 oldphasemove = extensions.wrapfunction(exchange,
877 862 '_localphasemove',
878 863 _phasemove)
879 864 # Copy-paste from `push` command
880 865 path = ui.paths.getpath(dest, default=('default-push', 'default'))
881 866 if not path:
882 867 raise error.Abort(_('default repository not configured!'),
883 868 hint=_("see 'hg help config.paths'"))
884 869 destpath = path.pushloc or path.loc
885 870 if destpath.startswith('svn+') and scratchpush:
886 871 raise error.Abort('infinite push does not work with svn repo',
887 872 hint='did you forget to `hg push default`?')
888 873 # Remote scratch bookmarks will be deleted because remotenames doesn't
889 874 # know about them. Let's save it before push and restore after
890 875 remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, destpath)
891 876 result = orig(ui, repo, dest, *args, **opts)
892 877 if common.isremotebooksenabled(ui):
893 878 if bookmark and scratchpush:
894 879 other = hg.peer(repo, opts, destpath)
895 880 fetchedbookmarks = other.listkeyspatterns('bookmarks',
896 881 patterns=[bookmark])
897 882 remotescratchbookmarks.update(fetchedbookmarks)
898 883 _saveremotebookmarks(repo, remotescratchbookmarks, destpath)
899 884 if oldphasemove:
900 885 exchange._localphasemove = oldphasemove
901 886 return result
902 887
903 888 def _deleteinfinitepushbookmarks(ui, repo, path, names):
904 889 """Prune remote names by removing the bookmarks we don't want anymore,
905 890 then writing the result back to disk
906 891 """
907 892 remotenamesext = extensions.find('remotenames')
908 893
909 894 # remotename format is:
910 895 # (node, nametype ("branches" or "bookmarks"), remote, name)
911 896 nametype_idx = 1
912 897 remote_idx = 2
913 898 name_idx = 3
914 899 remotenames = [remotename for remotename in \
915 900 remotenamesext.readremotenames(repo) \
916 901 if remotename[remote_idx] == path]
917 902 remote_bm_names = [remotename[name_idx] for remotename in \
918 903 remotenames if remotename[nametype_idx] == "bookmarks"]
919 904
920 905 for name in names:
921 906 if name not in remote_bm_names:
922 907 raise error.Abort(_("infinitepush bookmark '{}' does not exist "
923 908 "in path '{}'").format(name, path))
924 909
925 910 bookmarks = {}
926 911 branches = collections.defaultdict(list)
927 912 for node, nametype, remote, name in remotenames:
928 913 if nametype == "bookmarks" and name not in names:
929 914 bookmarks[name] = node
930 915 elif nametype == "branches":
931 916 # saveremotenames wants binary nodes for branches
932 917 branches[name].append(bin(node))
933 918
934 919 remotenamesext.saveremotenames(repo, path, branches, bookmarks)
935 920
936 921 def _phasemove(orig, pushop, nodes, phase=phases.public):
937 922 """prevent commits from being marked public
938 923
939 924 Since these are going to a scratch branch, they aren't really being
940 925 published."""
941 926
942 927 if phase != phases.public:
943 928 orig(pushop, nodes, phase)
944 929
945 930 @exchange.b2partsgenerator(scratchbranchparttype)
946 931 def partgen(pushop, bundler):
947 932 bookmark = pushop.ui.config(experimental, configbookmark)
948 933 create = pushop.ui.configbool(experimental, configcreate)
949 934 scratchpush = pushop.ui.configbool(experimental, configscratchpush)
950 935 if 'changesets' in pushop.stepsdone or not scratchpush:
951 936 return
952 937
953 938 if scratchbranchparttype not in bundle2.bundle2caps(pushop.remote):
954 939 return
955 940
956 941 pushop.stepsdone.add('changesets')
957 942 pushop.stepsdone.add('treepack')
958 943 if not pushop.outgoing.missing:
959 944 pushop.ui.status(_('no changes found\n'))
960 945 pushop.cgresult = 0
961 946 return
962 947
963 948 # This parameter tells the server that the following bundle is an
964 949 # infinitepush. This let's it switch the part processing to our infinitepush
965 950 # code path.
966 951 bundler.addparam("infinitepush", "True")
967 952
968 953 nonforwardmove = pushop.force or pushop.ui.configbool(experimental,
969 954 confignonforwardmove)
970 955 scratchparts = bundleparts.getscratchbranchparts(pushop.repo,
971 956 pushop.remote,
972 957 pushop.outgoing,
973 958 nonforwardmove,
974 959 pushop.ui,
975 960 bookmark,
976 961 create)
977 962
978 963 for scratchpart in scratchparts:
979 964 bundler.addpart(scratchpart)
980 965
981 966 def handlereply(op):
982 967 # server either succeeds or aborts; no code to read
983 968 pushop.cgresult = 1
984 969
985 970 return handlereply
986 971
987 972 bundle2.capabilities[bundleparts.scratchbranchparttype] = ()
988 bundle2.capabilities[bundleparts.scratchbookmarksparttype] = ()
989 973
990 974 def _getrevs(bundle, oldnode, force, bookmark):
991 975 'extracts and validates the revs to be imported'
992 976 revs = [bundle[r] for r in bundle.revs('sort(bundle())')]
993 977
994 978 # new bookmark
995 979 if oldnode is None:
996 980 return revs
997 981
998 982 # Fast forward update
999 983 if oldnode in bundle and list(bundle.set('bundle() & %s::', oldnode)):
1000 984 return revs
1001 985
1002 986 # Forced non-fast forward update
1003 987 if force:
1004 988 return revs
1005 989 else:
1006 990 raise error.Abort(_('non-forward push'),
1007 991 hint=_('use --non-forward-move to override'))
1008 992
1009 993 @contextlib.contextmanager
1010 994 def logservicecall(logger, service, **kwargs):
1011 995 start = time.time()
1012 996 logger(service, eventtype='start', **kwargs)
1013 997 try:
1014 998 yield
1015 999 logger(service, eventtype='success',
1016 1000 elapsedms=(time.time() - start) * 1000, **kwargs)
1017 1001 except Exception as e:
1018 1002 logger(service, eventtype='failure',
1019 1003 elapsedms=(time.time() - start) * 1000, errormsg=str(e),
1020 1004 **kwargs)
1021 1005 raise
1022 1006
1023 1007 def _getorcreateinfinitepushlogger(op):
1024 1008 logger = op.records['infinitepushlogger']
1025 1009 if not logger:
1026 1010 ui = op.repo.ui
1027 1011 try:
1028 1012 username = util.getuser()
1029 1013 except Exception:
1030 1014 username = 'unknown'
1031 1015 # Generate random request id to be able to find all logged entries
1032 1016 # for the same request. Since requestid is pseudo-generated it may
1033 1017 # not be unique, but we assume that (hostname, username, requestid)
1034 1018 # is unique.
1035 1019 random.seed()
1036 1020 requestid = random.randint(0, 2000000000)
1037 1021 hostname = socket.gethostname()
1038 1022 logger = functools.partial(ui.log, 'infinitepush', user=username,
1039 1023 requestid=requestid, hostname=hostname,
1040 1024 reponame=ui.config('infinitepush',
1041 1025 'reponame'))
1042 1026 op.records.add('infinitepushlogger', logger)
1043 1027 else:
1044 1028 logger = logger[0]
1045 1029 return logger
1046 1030
1047 1031 def processparts(orig, repo, op, unbundler):
1048 1032 if unbundler.params.get('infinitepush') != 'True':
1049 1033 return orig(repo, op, unbundler)
1050 1034
1051 1035 handleallparts = repo.ui.configbool('infinitepush', 'storeallparts')
1052 1036
1053 1037 partforwardingwhitelist = []
1054 1038 try:
1055 1039 treemfmod = extensions.find('treemanifest')
1056 1040 partforwardingwhitelist.append(treemfmod.TREEGROUP_PARTTYPE2)
1057 1041 except KeyError:
1058 1042 pass
1059 1043
1060 1044 bundler = bundle2.bundle20(repo.ui)
1061 1045 cgparams = None
1062 scratchbookpart = None
1063 1046 with bundle2.partiterator(repo, op, unbundler) as parts:
1064 1047 for part in parts:
1065 1048 bundlepart = None
1066 1049 if part.type == 'replycaps':
1067 1050 # This configures the current operation to allow reply parts.
1068 1051 bundle2._processpart(op, part)
1069 1052 elif part.type == bundleparts.scratchbranchparttype:
1070 1053 # Scratch branch parts need to be converted to normal
1071 1054 # changegroup parts, and the extra parameters stored for later
1072 1055 # when we upload to the store. Eventually those parameters will
1073 1056 # be put on the actual bundle instead of this part, then we can
1074 1057 # send a vanilla changegroup instead of the scratchbranch part.
1075 1058 cgversion = part.params.get('cgversion', '01')
1076 1059 bundlepart = bundle2.bundlepart('changegroup', data=part.read())
1077 1060 bundlepart.addparam('version', cgversion)
1078 1061 cgparams = part.params
1079 1062
1080 1063 # If we're not dumping all parts into the new bundle, we need to
1081 1064 # alert the future pushkey and phase-heads handler to skip
1082 1065 # the part.
1083 1066 if not handleallparts:
1084 1067 op.records.add(scratchbranchparttype + '_skippushkey', True)
1085 1068 op.records.add(scratchbranchparttype + '_skipphaseheads',
1086 1069 True)
1087 elif part.type == bundleparts.scratchbookmarksparttype:
1088 # Save this for later processing. Details below.
1089 #
1090 # Upstream https://phab.mercurial-scm.org/D1389 and its
1091 # follow-ups stop part.seek support to reduce memory usage
1092 # (https://bz.mercurial-scm.org/5691). So we need to copy
1093 # the part so it can be consumed later.
1094 scratchbookpart = bundleparts.copiedpart(part)
1095 1070 else:
1096 1071 if handleallparts or part.type in partforwardingwhitelist:
1097 1072 # Ideally we would not process any parts, and instead just
1098 1073 # forward them to the bundle for storage, but since this
1099 1074 # differs from previous behavior, we need to put it behind a
1100 1075 # config flag for incremental rollout.
1101 1076 bundlepart = bundle2.bundlepart(part.type, data=part.read())
1102 1077 for key, value in part.params.iteritems():
1103 1078 bundlepart.addparam(key, value)
1104 1079
1105 1080 # Certain parts require a response
1106 1081 if part.type == 'pushkey':
1107 1082 if op.reply is not None:
1108 1083 rpart = op.reply.newpart('reply:pushkey')
1109 1084 rpart.addparam('in-reply-to', str(part.id),
1110 1085 mandatory=False)
1111 1086 rpart.addparam('return', '1', mandatory=False)
1112 1087 else:
1113 1088 bundle2._processpart(op, part)
1114 1089
1115 1090 if handleallparts:
1116 1091 op.records.add(part.type, {
1117 1092 'return': 1,
1118 1093 })
1119 1094 if bundlepart:
1120 1095 bundler.addpart(bundlepart)
1121 1096
1122 1097 # If commits were sent, store them
1123 1098 if cgparams:
1124 1099 buf = util.chunkbuffer(bundler.getchunks())
1125 1100 fd, bundlefile = tempfile.mkstemp()
1126 1101 try:
1127 1102 try:
1128 1103 fp = os.fdopen(fd, 'wb')
1129 1104 fp.write(buf.read())
1130 1105 finally:
1131 1106 fp.close()
1132 1107 storebundle(op, cgparams, bundlefile)
1133 1108 finally:
1134 1109 try:
1135 1110 os.unlink(bundlefile)
1136 1111 except Exception:
1137 1112 # we would rather see the original exception
1138 1113 pass
1139 1114
1140 # The scratch bookmark part is sent as part of a push backup. It needs to be
1141 # processed after the main bundle has been stored, so that any commits it
1142 # references are available in the store.
1143 if scratchbookpart:
1144 bundle2._processpart(op, scratchbookpart)
1145
1146 1115 def storebundle(op, params, bundlefile):
1147 1116 log = _getorcreateinfinitepushlogger(op)
1148 1117 parthandlerstart = time.time()
1149 1118 log(scratchbranchparttype, eventtype='start')
1150 1119 index = op.repo.bundlestore.index
1151 1120 store = op.repo.bundlestore.store
1152 1121 op.records.add(scratchbranchparttype + '_skippushkey', True)
1153 1122
1154 1123 bundle = None
1155 1124 try: # guards bundle
1156 1125 bundlepath = "bundle:%s+%s" % (op.repo.root, bundlefile)
1157 1126 bundle = hg.repository(op.repo.ui, bundlepath)
1158 1127
1159 1128 bookmark = params.get('bookmark')
1160 1129 bookprevnode = params.get('bookprevnode', '')
1161 1130 create = params.get('create')
1162 1131 force = params.get('force')
1163 1132
1164 1133 if bookmark:
1165 1134 oldnode = index.getnode(bookmark)
1166 1135
1167 1136 if not oldnode and not create:
1168 1137 raise error.Abort("unknown bookmark %s" % bookmark,
1169 1138 hint="use --create if you want to create one")
1170 1139 else:
1171 1140 oldnode = None
1172 1141 bundleheads = bundle.revs('heads(bundle())')
1173 1142 if bookmark and len(bundleheads) > 1:
1174 1143 raise error.Abort(
1175 1144 _('cannot push more than one head to a scratch branch'))
1176 1145
1177 1146 revs = _getrevs(bundle, oldnode, force, bookmark)
1178 1147
1179 1148 # Notify the user of what is being pushed
1180 1149 plural = 's' if len(revs) > 1 else ''
1181 1150 op.repo.ui.warn(_("pushing %s commit%s:\n") % (len(revs), plural))
1182 1151 maxoutput = 10
1183 1152 for i in range(0, min(len(revs), maxoutput)):
1184 1153 firstline = bundle[revs[i]].description().split('\n')[0][:50]
1185 1154 op.repo.ui.warn((" %s %s\n") % (revs[i], firstline))
1186 1155
1187 1156 if len(revs) > maxoutput + 1:
1188 1157 op.repo.ui.warn((" ...\n"))
1189 1158 firstline = bundle[revs[-1]].description().split('\n')[0][:50]
1190 1159 op.repo.ui.warn((" %s %s\n") % (revs[-1], firstline))
1191 1160
1192 1161 nodesctx = [bundle[rev] for rev in revs]
1193 1162 inindex = lambda rev: bool(index.getbundle(bundle[rev].hex()))
1194 1163 if bundleheads:
1195 1164 newheadscount = sum(not inindex(rev) for rev in bundleheads)
1196 1165 else:
1197 1166 newheadscount = 0
1198 1167 # If there's a bookmark specified, there should be only one head,
1199 1168 # so we choose the last node, which will be that head.
1200 1169 # If a bug or malicious client allows there to be a bookmark
1201 1170 # with multiple heads, we will place the bookmark on the last head.
1202 1171 bookmarknode = nodesctx[-1].hex() if nodesctx else None
1203 1172 key = None
1204 1173 if newheadscount:
1205 1174 with open(bundlefile, 'r') as f:
1206 1175 bundledata = f.read()
1207 1176 with logservicecall(log, 'bundlestore',
1208 1177 bundlesize=len(bundledata)):
1209 1178 bundlesizelimit = 100 * 1024 * 1024 # 100 MB
1210 1179 if len(bundledata) > bundlesizelimit:
1211 1180 error_msg = ('bundle is too big: %d bytes. ' +
1212 1181 'max allowed size is 100 MB')
1213 1182 raise error.Abort(error_msg % (len(bundledata),))
1214 1183 key = store.write(bundledata)
1215 1184
1216 1185 with logservicecall(log, 'index', newheadscount=newheadscount), index:
1217 1186 if key:
1218 1187 index.addbundle(key, nodesctx)
1219 1188 if bookmark:
1220 1189 index.addbookmark(bookmark, bookmarknode)
1221 1190 _maybeaddpushbackpart(op, bookmark, bookmarknode,
1222 1191 bookprevnode, params)
1223 1192 log(scratchbranchparttype, eventtype='success',
1224 1193 elapsedms=(time.time() - parthandlerstart) * 1000)
1225 1194
1226 1195 fillmetadatabranchpattern = op.repo.ui.config(
1227 1196 'infinitepush', 'fillmetadatabranchpattern', '')
1228 1197 if bookmark and fillmetadatabranchpattern:
1229 1198 __, __, matcher = util.stringmatcher(fillmetadatabranchpattern)
1230 1199 if matcher(bookmark):
1231 1200 _asyncsavemetadata(op.repo.root,
1232 1201 [ctx.hex() for ctx in nodesctx])
1233 1202 except Exception as e:
1234 1203 log(scratchbranchparttype, eventtype='failure',
1235 1204 elapsedms=(time.time() - parthandlerstart) * 1000,
1236 1205 errormsg=str(e))
1237 1206 raise
1238 1207 finally:
1239 1208 if bundle:
1240 1209 bundle.close()
1241 1210
1242 1211 @bundle2.parthandler(scratchbranchparttype,
1243 1212 ('bookmark', 'bookprevnode' 'create', 'force',
1244 1213 'pushbackbookmarks', 'cgversion'))
1245 1214 def bundle2scratchbranch(op, part):
1246 1215 '''unbundle a bundle2 part containing a changegroup to store'''
1247 1216
1248 1217 bundler = bundle2.bundle20(op.repo.ui)
1249 1218 cgversion = part.params.get('cgversion', '01')
1250 1219 cgpart = bundle2.bundlepart('changegroup', data=part.read())
1251 1220 cgpart.addparam('version', cgversion)
1252 1221 bundler.addpart(cgpart)
1253 1222 buf = util.chunkbuffer(bundler.getchunks())
1254 1223
1255 1224 fd, bundlefile = tempfile.mkstemp()
1256 1225 try:
1257 1226 try:
1258 1227 fp = os.fdopen(fd, 'wb')
1259 1228 fp.write(buf.read())
1260 1229 finally:
1261 1230 fp.close()
1262 1231 storebundle(op, part.params, bundlefile)
1263 1232 finally:
1264 1233 try:
1265 1234 os.unlink(bundlefile)
1266 1235 except OSError as e:
1267 1236 if e.errno != errno.ENOENT:
1268 1237 raise
1269 1238
1270 1239 return 1
1271 1240
1272 @bundle2.parthandler(bundleparts.scratchbookmarksparttype)
1273 def bundle2scratchbookmarks(op, part):
1274 '''Handler deletes bookmarks first then adds new bookmarks.
1275 '''
1276 index = op.repo.bundlestore.index
1277 decodedbookmarks = _decodebookmarks(part)
1278 toinsert = {}
1279 todelete = []
1280 for bookmark, node in decodedbookmarks.iteritems():
1281 if node:
1282 toinsert[bookmark] = node
1283 else:
1284 todelete.append(bookmark)
1285 log = _getorcreateinfinitepushlogger(op)
1286 with logservicecall(log, bundleparts.scratchbookmarksparttype), index:
1287 if todelete:
1288 index.deletebookmarks(todelete)
1289 if toinsert:
1290 index.addmanybookmarks(toinsert)
1291
1292 1241 def _maybeaddpushbackpart(op, bookmark, newnode, oldnode, params):
1293 1242 if params.get('pushbackbookmarks'):
1294 1243 if op.reply and 'pushback' in op.reply.capabilities:
1295 1244 params = {
1296 1245 'namespace': 'bookmarks',
1297 1246 'key': bookmark,
1298 1247 'new': newnode,
1299 1248 'old': oldnode,
1300 1249 }
1301 1250 op.reply.newpart('pushkey', mandatoryparams=params.iteritems())
1302 1251
1303 1252 def bundle2pushkey(orig, op, part):
1304 1253 '''Wrapper of bundle2.handlepushkey()
1305 1254
1306 1255 The only goal is to skip calling the original function if flag is set.
1307 1256 It's set if infinitepush push is happening.
1308 1257 '''
1309 1258 if op.records[scratchbranchparttype + '_skippushkey']:
1310 1259 if op.reply is not None:
1311 1260 rpart = op.reply.newpart('reply:pushkey')
1312 1261 rpart.addparam('in-reply-to', str(part.id), mandatory=False)
1313 1262 rpart.addparam('return', '1', mandatory=False)
1314 1263 return 1
1315 1264
1316 1265 return orig(op, part)
1317 1266
1318 1267 def bundle2handlephases(orig, op, part):
1319 1268 '''Wrapper of bundle2.handlephases()
1320 1269
1321 1270 The only goal is to skip calling the original function if flag is set.
1322 1271 It's set if infinitepush push is happening.
1323 1272 '''
1324 1273
1325 1274 if op.records[scratchbranchparttype + '_skipphaseheads']:
1326 1275 return
1327 1276
1328 1277 return orig(op, part)
1329 1278
1330 1279 def _asyncsavemetadata(root, nodes):
1331 1280 '''starts a separate process that fills metadata for the nodes
1332 1281
1333 1282 This function creates a separate process and doesn't wait for it's
1334 1283 completion. This was done to avoid slowing down pushes
1335 1284 '''
1336 1285
1337 1286 maxnodes = 50
1338 1287 if len(nodes) > maxnodes:
1339 1288 return
1340 1289 nodesargs = []
1341 1290 for node in nodes:
1342 1291 nodesargs.append('--node')
1343 1292 nodesargs.append(node)
1344 1293 with open(os.devnull, 'w+b') as devnull:
1345 1294 cmdline = [util.hgexecutable(), 'debugfillinfinitepushmetadata',
1346 1295 '-R', root] + nodesargs
1347 1296 # Process will run in background. We don't care about the return code
1348 1297 subprocess.Popen(cmdline, close_fds=True, shell=False,
1349 1298 stdin=devnull, stdout=devnull, stderr=devnull)
@@ -1,143 +1,132 b''
1 1 # Copyright 2017 Facebook, Inc.
2 2 #
3 3 # This software may be used and distributed according to the terms of the
4 4 # GNU General Public License version 2 or any later version.
5 5
6 6 from __future__ import absolute_import
7 7
8 8 from mercurial.i18n import _
9 9
10 10 from mercurial import (
11 11 bundle2,
12 12 changegroup,
13 13 error,
14 14 extensions,
15 15 revsetlang,
16 16 util,
17 17 )
18 18
19 19 from . import common
20 20
21 encodebookmarks = common.encodebookmarks
22 21 isremotebooksenabled = common.isremotebooksenabled
23 22
24 23 scratchbranchparttype = 'b2x:infinitepush'
25 scratchbookmarksparttype = 'b2x:infinitepushscratchbookmarks'
26 24
27 25 def getscratchbranchparts(repo, peer, outgoing, confignonforwardmove,
28 26 ui, bookmark, create):
29 27 if not outgoing.missing:
30 28 raise error.Abort(_('no commits to push'))
31 29
32 30 if scratchbranchparttype not in bundle2.bundle2caps(peer):
33 31 raise error.Abort(_('no server support for %r') % scratchbranchparttype)
34 32
35 33 _validaterevset(repo, revsetlang.formatspec('%ln', outgoing.missing),
36 34 bookmark)
37 35
38 36 supportedversions = changegroup.supportedoutgoingversions(repo)
39 37 # Explicitly avoid using '01' changegroup version in infinitepush to
40 38 # support general delta
41 39 supportedversions.discard('01')
42 40 cgversion = min(supportedversions)
43 41 _handlelfs(repo, outgoing.missing)
44 42 cg = changegroup.makestream(repo, outgoing, cgversion, 'push')
45 43
46 44 params = {}
47 45 params['cgversion'] = cgversion
48 46 if bookmark:
49 47 params['bookmark'] = bookmark
50 48 # 'prevbooknode' is necessary for pushkey reply part
51 49 params['bookprevnode'] = ''
52 50 if bookmark in repo:
53 51 params['bookprevnode'] = repo[bookmark].hex()
54 52 if create:
55 53 params['create'] = '1'
56 54 if confignonforwardmove:
57 55 params['force'] = '1'
58 56
59 57 # Do not send pushback bundle2 part with bookmarks if remotenames extension
60 58 # is enabled. It will be handled manually in `_push()`
61 59 if not isremotebooksenabled(ui):
62 60 params['pushbackbookmarks'] = '1'
63 61
64 62 parts = []
65 63
66 64 # .upper() marks this as a mandatory part: server will abort if there's no
67 65 # handler
68 66 parts.append(bundle2.bundlepart(
69 67 scratchbranchparttype.upper(),
70 68 advisoryparams=params.iteritems(),
71 69 data=cg))
72 70
73 71 try:
74 72 treemod = extensions.find('treemanifest')
75 73 mfnodes = []
76 74 for node in outgoing.missing:
77 75 mfnodes.append(('', repo[node].manifestnode()))
78 76
79 77 # Only include the tree parts if they all exist
80 78 if not repo.manifestlog.datastore.getmissing(mfnodes):
81 79 parts.append(treemod.createtreepackpart(
82 80 repo, outgoing, treemod.TREEGROUP_PARTTYPE2))
83 81 except KeyError:
84 82 pass
85 83
86 84 return parts
87 85
88 def getscratchbookmarkspart(peer, bookmarks):
89 if scratchbookmarksparttype not in bundle2.bundle2caps(peer):
90 raise error.Abort(
91 _('no server support for %r') % scratchbookmarksparttype)
92
93 return bundle2.bundlepart(
94 scratchbookmarksparttype.upper(),
95 data=encodebookmarks(bookmarks))
96
97 86 def _validaterevset(repo, revset, bookmark):
98 87 """Abort if the revs to be pushed aren't valid for a scratch branch."""
99 88 if not repo.revs(revset):
100 89 raise error.Abort(_('nothing to push'))
101 90 if bookmark:
102 91 # Allow bundle with many heads only if no bookmark is specified
103 92 heads = repo.revs('heads(%r)', revset)
104 93 if len(heads) > 1:
105 94 raise error.Abort(
106 95 _('cannot push more than one head to a scratch branch'))
107 96
108 97 def _handlelfs(repo, missing):
109 98 '''Special case if lfs is enabled
110 99
111 100 If lfs is enabled then we need to call prepush hook
112 101 to make sure large files are uploaded to lfs
113 102 '''
114 103 try:
115 104 lfsmod = extensions.find('lfs')
116 105 lfsmod.wrapper.uploadblobsfromrevs(repo, missing)
117 106 except KeyError:
118 107 # Ignore if lfs extension is not enabled
119 108 return
120 109
121 110 class copiedpart(object):
122 111 """a copy of unbundlepart content that can be consumed later"""
123 112
124 113 def __init__(self, part):
125 114 # copy "public properties"
126 115 self.type = part.type
127 116 self.id = part.id
128 117 self.mandatory = part.mandatory
129 118 self.mandatoryparams = part.mandatoryparams
130 119 self.advisoryparams = part.advisoryparams
131 120 self.params = part.params
132 121 self.mandatorykeys = part.mandatorykeys
133 122 # copy the buffer
134 123 self._io = util.stringio(part.read())
135 124
136 125 def consume(self):
137 126 return
138 127
139 128 def read(self, size=None):
140 129 if size is None:
141 130 return self._io.read()
142 131 else:
143 132 return self._io.read(size)
@@ -1,58 +1,48 b''
1 1 # Copyright 2017 Facebook, Inc.
2 2 #
3 3 # This software may be used and distributed according to the terms of the
4 4 # GNU General Public License version 2 or any later version.
5 5
6 6 from __future__ import absolute_import
7 7
8 import json
9 8 import os
10 import struct
11 9 import tempfile
12 10
13 11 from mercurial.node import hex
14 12
15 13 from mercurial import (
16 14 error,
17 15 extensions,
18 16 )
19 17
20 18 def isremotebooksenabled(ui):
21 19 return ('remotenames' in extensions._extensions and
22 20 ui.configbool('remotenames', 'bookmarks'))
23 21
24 def encodebookmarks(bookmarks):
25 encoded = {}
26 for bookmark, node in bookmarks.iteritems():
27 encoded[bookmark] = node
28 dumped = json.dumps(encoded)
29 result = struct.pack('>i', len(dumped)) + dumped
30 return result
31
32 22 def downloadbundle(repo, unknownbinhead):
33 23 index = repo.bundlestore.index
34 24 store = repo.bundlestore.store
35 25 bundleid = index.getbundle(hex(unknownbinhead))
36 26 if bundleid is None:
37 27 raise error.Abort('%s head is not known' % hex(unknownbinhead))
38 28 bundleraw = store.read(bundleid)
39 29 return _makebundlefromraw(bundleraw)
40 30
41 31 def _makebundlefromraw(data):
42 32 fp = None
43 33 fd, bundlefile = tempfile.mkstemp()
44 34 try: # guards bundlefile
45 35 try: # guards fp
46 36 fp = os.fdopen(fd, 'wb')
47 37 fp.write(data)
48 38 finally:
49 39 fp.close()
50 40 except Exception:
51 41 try:
52 42 os.unlink(bundlefile)
53 43 except Exception:
54 44 # we would rather see the original exception
55 45 pass
56 46 raise
57 47
58 48 return bundlefile
General Comments 0
You need to be logged in to leave comments. Login now