##// END OF EJS Templates
hg: move share._getsrcrepo into core...
Gregory Szorc -
r36177:0fe7e39d default
parent child Browse files
Show More
@@ -1,518 +1,516 b''
1 1 # journal.py
2 2 #
3 3 # Copyright 2014-2016 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 """track previous positions of bookmarks (EXPERIMENTAL)
8 8
9 9 This extension adds a new command: `hg journal`, which shows you where
10 10 bookmarks were previously located.
11 11
12 12 """
13 13
14 14 from __future__ import absolute_import
15 15
16 16 import collections
17 17 import errno
18 18 import os
19 19 import weakref
20 20
21 21 from mercurial.i18n import _
22 22
23 23 from mercurial import (
24 24 bookmarks,
25 25 cmdutil,
26 26 dispatch,
27 27 error,
28 28 extensions,
29 29 hg,
30 30 localrepo,
31 31 lock,
32 32 logcmdutil,
33 33 node,
34 34 pycompat,
35 35 registrar,
36 36 util,
37 37 )
38 38
39 from . import share
40
41 39 cmdtable = {}
42 40 command = registrar.command(cmdtable)
43 41
44 42 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
45 43 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
46 44 # be specifying the version(s) of Mercurial they are tested with, or
47 45 # leave the attribute unspecified.
48 46 testedwith = 'ships-with-hg-core'
49 47
50 48 # storage format version; increment when the format changes
51 49 storageversion = 0
52 50
53 51 # namespaces
54 52 bookmarktype = 'bookmark'
55 53 wdirparenttype = 'wdirparent'
56 54 # In a shared repository, what shared feature name is used
57 55 # to indicate this namespace is shared with the source?
58 56 sharednamespaces = {
59 57 bookmarktype: hg.sharedbookmarks,
60 58 }
61 59
62 60 # Journal recording, register hooks and storage object
63 61 def extsetup(ui):
64 62 extensions.wrapfunction(dispatch, 'runcommand', runcommand)
65 63 extensions.wrapfunction(bookmarks.bmstore, '_write', recordbookmarks)
66 64 extensions.wrapfilecache(
67 65 localrepo.localrepository, 'dirstate', wrapdirstate)
68 66 extensions.wrapfunction(hg, 'postshare', wrappostshare)
69 67 extensions.wrapfunction(hg, 'copystore', unsharejournal)
70 68
71 69 def reposetup(ui, repo):
72 70 if repo.local():
73 71 repo.journal = journalstorage(repo)
74 72 repo._wlockfreeprefix.add('namejournal')
75 73
76 74 dirstate, cached = localrepo.isfilecached(repo, 'dirstate')
77 75 if cached:
78 76 # already instantiated dirstate isn't yet marked as
79 77 # "journal"-ing, even though repo.dirstate() was already
80 78 # wrapped by own wrapdirstate()
81 79 _setupdirstate(repo, dirstate)
82 80
83 81 def runcommand(orig, lui, repo, cmd, fullargs, *args):
84 82 """Track the command line options for recording in the journal"""
85 83 journalstorage.recordcommand(*fullargs)
86 84 return orig(lui, repo, cmd, fullargs, *args)
87 85
88 86 def _setupdirstate(repo, dirstate):
89 87 dirstate.journalstorage = repo.journal
90 88 dirstate.addparentchangecallback('journal', recorddirstateparents)
91 89
92 90 # hooks to record dirstate changes
93 91 def wrapdirstate(orig, repo):
94 92 """Make journal storage available to the dirstate object"""
95 93 dirstate = orig(repo)
96 94 if util.safehasattr(repo, 'journal'):
97 95 _setupdirstate(repo, dirstate)
98 96 return dirstate
99 97
100 98 def recorddirstateparents(dirstate, old, new):
101 99 """Records all dirstate parent changes in the journal."""
102 100 old = list(old)
103 101 new = list(new)
104 102 if util.safehasattr(dirstate, 'journalstorage'):
105 103 # only record two hashes if there was a merge
106 104 oldhashes = old[:1] if old[1] == node.nullid else old
107 105 newhashes = new[:1] if new[1] == node.nullid else new
108 106 dirstate.journalstorage.record(
109 107 wdirparenttype, '.', oldhashes, newhashes)
110 108
111 109 # hooks to record bookmark changes (both local and remote)
112 110 def recordbookmarks(orig, store, fp):
113 111 """Records all bookmark changes in the journal."""
114 112 repo = store._repo
115 113 if util.safehasattr(repo, 'journal'):
116 114 oldmarks = bookmarks.bmstore(repo)
117 115 for mark, value in store.iteritems():
118 116 oldvalue = oldmarks.get(mark, node.nullid)
119 117 if value != oldvalue:
120 118 repo.journal.record(bookmarktype, mark, oldvalue, value)
121 119 return orig(store, fp)
122 120
123 121 # shared repository support
124 122 def _readsharedfeatures(repo):
125 123 """A set of shared features for this repository"""
126 124 try:
127 125 return set(repo.vfs.read('shared').splitlines())
128 126 except IOError as inst:
129 127 if inst.errno != errno.ENOENT:
130 128 raise
131 129 return set()
132 130
133 131 def _mergeentriesiter(*iterables, **kwargs):
134 132 """Given a set of sorted iterables, yield the next entry in merged order
135 133
136 134 Note that by default entries go from most recent to oldest.
137 135 """
138 136 order = kwargs.pop(r'order', max)
139 137 iterables = [iter(it) for it in iterables]
140 138 # this tracks still active iterables; iterables are deleted as they are
141 139 # exhausted, which is why this is a dictionary and why each entry also
142 140 # stores the key. Entries are mutable so we can store the next value each
143 141 # time.
144 142 iterable_map = {}
145 143 for key, it in enumerate(iterables):
146 144 try:
147 145 iterable_map[key] = [next(it), key, it]
148 146 except StopIteration:
149 147 # empty entry, can be ignored
150 148 pass
151 149
152 150 while iterable_map:
153 151 value, key, it = order(iterable_map.itervalues())
154 152 yield value
155 153 try:
156 154 iterable_map[key][0] = next(it)
157 155 except StopIteration:
158 156 # this iterable is empty, remove it from consideration
159 157 del iterable_map[key]
160 158
161 159 def wrappostshare(orig, sourcerepo, destrepo, **kwargs):
162 160 """Mark this shared working copy as sharing journal information"""
163 161 with destrepo.wlock():
164 162 orig(sourcerepo, destrepo, **kwargs)
165 163 with destrepo.vfs('shared', 'a') as fp:
166 164 fp.write('journal\n')
167 165
168 166 def unsharejournal(orig, ui, repo, repopath):
169 167 """Copy shared journal entries into this repo when unsharing"""
170 168 if (repo.path == repopath and repo.shared() and
171 169 util.safehasattr(repo, 'journal')):
172 sharedrepo = share._getsrcrepo(repo)
170 sharedrepo = hg.sharedreposource(repo)
173 171 sharedfeatures = _readsharedfeatures(repo)
174 172 if sharedrepo and sharedfeatures > {'journal'}:
175 173 # there is a shared repository and there are shared journal entries
176 174 # to copy. move shared date over from source to destination but
177 175 # move the local file first
178 176 if repo.vfs.exists('namejournal'):
179 177 journalpath = repo.vfs.join('namejournal')
180 178 util.rename(journalpath, journalpath + '.bak')
181 179 storage = repo.journal
182 180 local = storage._open(
183 181 repo.vfs, filename='namejournal.bak', _newestfirst=False)
184 182 shared = (
185 183 e for e in storage._open(sharedrepo.vfs, _newestfirst=False)
186 184 if sharednamespaces.get(e.namespace) in sharedfeatures)
187 185 for entry in _mergeentriesiter(local, shared, order=min):
188 186 storage._write(repo.vfs, entry)
189 187
190 188 return orig(ui, repo, repopath)
191 189
192 190 class journalentry(collections.namedtuple(
193 191 u'journalentry',
194 192 u'timestamp user command namespace name oldhashes newhashes')):
195 193 """Individual journal entry
196 194
197 195 * timestamp: a mercurial (time, timezone) tuple
198 196 * user: the username that ran the command
199 197 * namespace: the entry namespace, an opaque string
200 198 * name: the name of the changed item, opaque string with meaning in the
201 199 namespace
202 200 * command: the hg command that triggered this record
203 201 * oldhashes: a tuple of one or more binary hashes for the old location
204 202 * newhashes: a tuple of one or more binary hashes for the new location
205 203
206 204 Handles serialisation from and to the storage format. Fields are
207 205 separated by newlines, hashes are written out in hex separated by commas,
208 206 timestamp and timezone are separated by a space.
209 207
210 208 """
211 209 @classmethod
212 210 def fromstorage(cls, line):
213 211 (time, user, command, namespace, name,
214 212 oldhashes, newhashes) = line.split('\n')
215 213 timestamp, tz = time.split()
216 214 timestamp, tz = float(timestamp), int(tz)
217 215 oldhashes = tuple(node.bin(hash) for hash in oldhashes.split(','))
218 216 newhashes = tuple(node.bin(hash) for hash in newhashes.split(','))
219 217 return cls(
220 218 (timestamp, tz), user, command, namespace, name,
221 219 oldhashes, newhashes)
222 220
223 221 def __str__(self):
224 222 """String representation for storage"""
225 223 time = ' '.join(map(str, self.timestamp))
226 224 oldhashes = ','.join([node.hex(hash) for hash in self.oldhashes])
227 225 newhashes = ','.join([node.hex(hash) for hash in self.newhashes])
228 226 return '\n'.join((
229 227 time, self.user, self.command, self.namespace, self.name,
230 228 oldhashes, newhashes))
231 229
232 230 class journalstorage(object):
233 231 """Storage for journal entries
234 232
235 233 Entries are divided over two files; one with entries that pertain to the
236 234 local working copy *only*, and one with entries that are shared across
237 235 multiple working copies when shared using the share extension.
238 236
239 237 Entries are stored with NUL bytes as separators. See the journalentry
240 238 class for the per-entry structure.
241 239
242 240 The file format starts with an integer version, delimited by a NUL.
243 241
244 242 This storage uses a dedicated lock; this makes it easier to avoid issues
245 243 with adding entries that added when the regular wlock is unlocked (e.g.
246 244 the dirstate).
247 245
248 246 """
249 247 _currentcommand = ()
250 248 _lockref = None
251 249
252 250 def __init__(self, repo):
253 251 self.user = util.getuser()
254 252 self.ui = repo.ui
255 253 self.vfs = repo.vfs
256 254
257 255 # is this working copy using a shared storage?
258 256 self.sharedfeatures = self.sharedvfs = None
259 257 if repo.shared():
260 258 features = _readsharedfeatures(repo)
261 sharedrepo = share._getsrcrepo(repo)
259 sharedrepo = hg.sharedreposource(repo)
262 260 if sharedrepo is not None and 'journal' in features:
263 261 self.sharedvfs = sharedrepo.vfs
264 262 self.sharedfeatures = features
265 263
266 264 # track the current command for recording in journal entries
267 265 @property
268 266 def command(self):
269 267 commandstr = ' '.join(
270 268 map(util.shellquote, journalstorage._currentcommand))
271 269 if '\n' in commandstr:
272 270 # truncate multi-line commands
273 271 commandstr = commandstr.partition('\n')[0] + ' ...'
274 272 return commandstr
275 273
276 274 @classmethod
277 275 def recordcommand(cls, *fullargs):
278 276 """Set the current hg arguments, stored with recorded entries"""
279 277 # Set the current command on the class because we may have started
280 278 # with a non-local repo (cloning for example).
281 279 cls._currentcommand = fullargs
282 280
283 281 def _currentlock(self, lockref):
284 282 """Returns the lock if it's held, or None if it's not.
285 283
286 284 (This is copied from the localrepo class)
287 285 """
288 286 if lockref is None:
289 287 return None
290 288 l = lockref()
291 289 if l is None or not l.held:
292 290 return None
293 291 return l
294 292
295 293 def jlock(self, vfs):
296 294 """Create a lock for the journal file"""
297 295 if self._currentlock(self._lockref) is not None:
298 296 raise error.Abort(_('journal lock does not support nesting'))
299 297 desc = _('journal of %s') % vfs.base
300 298 try:
301 299 l = lock.lock(vfs, 'namejournal.lock', 0, desc=desc)
302 300 except error.LockHeld as inst:
303 301 self.ui.warn(
304 302 _("waiting for lock on %s held by %r\n") % (desc, inst.locker))
305 303 # default to 600 seconds timeout
306 304 l = lock.lock(
307 305 vfs, 'namejournal.lock',
308 306 self.ui.configint("ui", "timeout"), desc=desc)
309 307 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
310 308 self._lockref = weakref.ref(l)
311 309 return l
312 310
313 311 def record(self, namespace, name, oldhashes, newhashes):
314 312 """Record a new journal entry
315 313
316 314 * namespace: an opaque string; this can be used to filter on the type
317 315 of recorded entries.
318 316 * name: the name defining this entry; for bookmarks, this is the
319 317 bookmark name. Can be filtered on when retrieving entries.
320 318 * oldhashes and newhashes: each a single binary hash, or a list of
321 319 binary hashes. These represent the old and new position of the named
322 320 item.
323 321
324 322 """
325 323 if not isinstance(oldhashes, list):
326 324 oldhashes = [oldhashes]
327 325 if not isinstance(newhashes, list):
328 326 newhashes = [newhashes]
329 327
330 328 entry = journalentry(
331 329 util.makedate(), self.user, self.command, namespace, name,
332 330 oldhashes, newhashes)
333 331
334 332 vfs = self.vfs
335 333 if self.sharedvfs is not None:
336 334 # write to the shared repository if this feature is being
337 335 # shared between working copies.
338 336 if sharednamespaces.get(namespace) in self.sharedfeatures:
339 337 vfs = self.sharedvfs
340 338
341 339 self._write(vfs, entry)
342 340
343 341 def _write(self, vfs, entry):
344 342 with self.jlock(vfs):
345 343 version = None
346 344 # open file in amend mode to ensure it is created if missing
347 345 with vfs('namejournal', mode='a+b') as f:
348 346 f.seek(0, os.SEEK_SET)
349 347 # Read just enough bytes to get a version number (up to 2
350 348 # digits plus separator)
351 349 version = f.read(3).partition('\0')[0]
352 350 if version and version != str(storageversion):
353 351 # different version of the storage. Exit early (and not
354 352 # write anything) if this is not a version we can handle or
355 353 # the file is corrupt. In future, perhaps rotate the file
356 354 # instead?
357 355 self.ui.warn(
358 356 _("unsupported journal file version '%s'\n") % version)
359 357 return
360 358 if not version:
361 359 # empty file, write version first
362 360 f.write(str(storageversion) + '\0')
363 361 f.seek(0, os.SEEK_END)
364 362 f.write(str(entry) + '\0')
365 363
366 364 def filtered(self, namespace=None, name=None):
367 365 """Yield all journal entries with the given namespace or name
368 366
369 367 Both the namespace and the name are optional; if neither is given all
370 368 entries in the journal are produced.
371 369
372 370 Matching supports regular expressions by using the `re:` prefix
373 371 (use `literal:` to match names or namespaces that start with `re:`)
374 372
375 373 """
376 374 if namespace is not None:
377 375 namespace = util.stringmatcher(namespace)[-1]
378 376 if name is not None:
379 377 name = util.stringmatcher(name)[-1]
380 378 for entry in self:
381 379 if namespace is not None and not namespace(entry.namespace):
382 380 continue
383 381 if name is not None and not name(entry.name):
384 382 continue
385 383 yield entry
386 384
387 385 def __iter__(self):
388 386 """Iterate over the storage
389 387
390 388 Yields journalentry instances for each contained journal record.
391 389
392 390 """
393 391 local = self._open(self.vfs)
394 392
395 393 if self.sharedvfs is None:
396 394 return local
397 395
398 396 # iterate over both local and shared entries, but only those
399 397 # shared entries that are among the currently shared features
400 398 shared = (
401 399 e for e in self._open(self.sharedvfs)
402 400 if sharednamespaces.get(e.namespace) in self.sharedfeatures)
403 401 return _mergeentriesiter(local, shared)
404 402
405 403 def _open(self, vfs, filename='namejournal', _newestfirst=True):
406 404 if not vfs.exists(filename):
407 405 return
408 406
409 407 with vfs(filename) as f:
410 408 raw = f.read()
411 409
412 410 lines = raw.split('\0')
413 411 version = lines and lines[0]
414 412 if version != str(storageversion):
415 413 version = version or _('not available')
416 414 raise error.Abort(_("unknown journal file version '%s'") % version)
417 415
418 416 # Skip the first line, it's a version number. Normally we iterate over
419 417 # these in reverse order to list newest first; only when copying across
420 418 # a shared storage do we forgo reversing.
421 419 lines = lines[1:]
422 420 if _newestfirst:
423 421 lines = reversed(lines)
424 422 for line in lines:
425 423 if not line:
426 424 continue
427 425 yield journalentry.fromstorage(line)
428 426
429 427 # journal reading
430 428 # log options that don't make sense for journal
431 429 _ignoreopts = ('no-merges', 'graph')
432 430 @command(
433 431 'journal', [
434 432 ('', 'all', None, 'show history for all names'),
435 433 ('c', 'commits', None, 'show commit metadata'),
436 434 ] + [opt for opt in cmdutil.logopts if opt[1] not in _ignoreopts],
437 435 '[OPTION]... [BOOKMARKNAME]')
438 436 def journal(ui, repo, *args, **opts):
439 437 """show the previous position of bookmarks and the working copy
440 438
441 439 The journal is used to see the previous commits that bookmarks and the
442 440 working copy pointed to. By default the previous locations for the working
443 441 copy. Passing a bookmark name will show all the previous positions of
444 442 that bookmark. Use the --all switch to show previous locations for all
445 443 bookmarks and the working copy; each line will then include the bookmark
446 444 name, or '.' for the working copy, as well.
447 445
448 446 If `name` starts with `re:`, the remainder of the name is treated as
449 447 a regular expression. To match a name that actually starts with `re:`,
450 448 use the prefix `literal:`.
451 449
452 450 By default hg journal only shows the commit hash and the command that was
453 451 running at that time. -v/--verbose will show the prior hash, the user, and
454 452 the time at which it happened.
455 453
456 454 Use -c/--commits to output log information on each commit hash; at this
457 455 point you can use the usual `--patch`, `--git`, `--stat` and `--template`
458 456 switches to alter the log output for these.
459 457
460 458 `hg journal -T json` can be used to produce machine readable output.
461 459
462 460 """
463 461 opts = pycompat.byteskwargs(opts)
464 462 name = '.'
465 463 if opts.get('all'):
466 464 if args:
467 465 raise error.Abort(
468 466 _("You can't combine --all and filtering on a name"))
469 467 name = None
470 468 if args:
471 469 name = args[0]
472 470
473 471 fm = ui.formatter('journal', opts)
474 472
475 473 if opts.get("template") != "json":
476 474 if name is None:
477 475 displayname = _('the working copy and bookmarks')
478 476 else:
479 477 displayname = "'%s'" % name
480 478 ui.status(_("previous locations of %s:\n") % displayname)
481 479
482 480 limit = logcmdutil.getlimit(opts)
483 481 entry = None
484 482 ui.pager('journal')
485 483 for count, entry in enumerate(repo.journal.filtered(name=name)):
486 484 if count == limit:
487 485 break
488 486 newhashesstr = fm.formatlist(map(fm.hexfunc, entry.newhashes),
489 487 name='node', sep=',')
490 488 oldhashesstr = fm.formatlist(map(fm.hexfunc, entry.oldhashes),
491 489 name='node', sep=',')
492 490
493 491 fm.startitem()
494 492 fm.condwrite(ui.verbose, 'oldhashes', '%s -> ', oldhashesstr)
495 493 fm.write('newhashes', '%s', newhashesstr)
496 494 fm.condwrite(ui.verbose, 'user', ' %-8s', entry.user)
497 495 fm.condwrite(
498 496 opts.get('all') or name.startswith('re:'),
499 497 'name', ' %-8s', entry.name)
500 498
501 499 timestring = fm.formatdate(entry.timestamp, '%Y-%m-%d %H:%M %1%2')
502 500 fm.condwrite(ui.verbose, 'date', ' %s', timestring)
503 501 fm.write('command', ' %s\n', entry.command)
504 502
505 503 if opts.get("commits"):
506 504 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
507 505 for hash in entry.newhashes:
508 506 try:
509 507 ctx = repo[hash]
510 508 displayer.show(ctx)
511 509 except error.RepoLookupError as e:
512 510 fm.write('repolookuperror', "%s\n\n", str(e))
513 511 displayer.close()
514 512
515 513 fm.end()
516 514
517 515 if entry is None:
518 516 ui.status(_("no recorded locations\n"))
@@ -1,116 +1,113 b''
1 1 # narrowrepo.py - repository which supports narrow revlogs, lazy loading
2 2 #
3 3 # Copyright 2017 Google, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 from mercurial import (
11 11 bundlerepo,
12 hg,
12 13 localrepo,
13 14 match as matchmod,
14 15 scmutil,
15 16 )
16 17
17 from .. import (
18 share,
19 )
20
21 18 from . import (
22 19 narrowrevlog,
23 20 narrowspec,
24 21 )
25 22
26 23 # When narrowing is finalized and no longer subject to format changes,
27 24 # we should move this to just "narrow" or similar.
28 25 REQUIREMENT = 'narrowhg-experimental'
29 26
30 27 def wrappostshare(orig, sourcerepo, destrepo, **kwargs):
31 28 orig(sourcerepo, destrepo, **kwargs)
32 29 if REQUIREMENT in sourcerepo.requirements:
33 30 with destrepo.wlock():
34 31 with destrepo.vfs('shared', 'a') as fp:
35 32 fp.write(narrowspec.FILENAME + '\n')
36 33
37 34 def unsharenarrowspec(orig, ui, repo, repopath):
38 35 if (REQUIREMENT in repo.requirements
39 36 and repo.path == repopath and repo.shared()):
40 srcrepo = share._getsrcrepo(repo)
37 srcrepo = hg.sharedreposource(repo)
41 38 with srcrepo.vfs(narrowspec.FILENAME) as f:
42 39 spec = f.read()
43 40 with repo.vfs(narrowspec.FILENAME, 'w') as f:
44 41 f.write(spec)
45 42 return orig(ui, repo, repopath)
46 43
47 44 def wraprepo(repo, opts_narrow):
48 45 """Enables narrow clone functionality on a single local repository."""
49 46
50 47 cacheprop = localrepo.storecache
51 48 if isinstance(repo, bundlerepo.bundlerepository):
52 49 # We have to use a different caching property decorator for
53 50 # bundlerepo because storecache blows up in strange ways on a
54 51 # bundlerepo. Fortunately, there's no risk of data changing in
55 52 # a bundlerepo.
56 53 cacheprop = lambda name: localrepo.unfilteredpropertycache
57 54
58 55 class narrowrepository(repo.__class__):
59 56
60 57 def _constructmanifest(self):
61 58 manifest = super(narrowrepository, self)._constructmanifest()
62 59 narrowrevlog.makenarrowmanifestrevlog(manifest, repo)
63 60 return manifest
64 61
65 62 @cacheprop('00manifest.i')
66 63 def manifestlog(self):
67 64 mfl = super(narrowrepository, self).manifestlog
68 65 narrowrevlog.makenarrowmanifestlog(mfl, self)
69 66 return mfl
70 67
71 68 def file(self, f):
72 69 fl = super(narrowrepository, self).file(f)
73 70 narrowrevlog.makenarrowfilelog(fl, self.narrowmatch())
74 71 return fl
75 72
76 73 @localrepo.repofilecache(narrowspec.FILENAME)
77 74 def narrowpats(self):
78 75 """matcher patterns for this repository's narrowspec
79 76
80 77 A tuple of (includes, excludes).
81 78 """
82 79 return narrowspec.load(self)
83 80
84 81 @localrepo.repofilecache(narrowspec.FILENAME)
85 82 def _narrowmatch(self):
86 83 include, exclude = self.narrowpats
87 84 if not opts_narrow and not include and not exclude:
88 85 return matchmod.always(self.root, '')
89 86 return narrowspec.match(self.root, include=include, exclude=exclude)
90 87
91 88 # TODO(martinvonz): make this property-like instead?
92 89 def narrowmatch(self):
93 90 return self._narrowmatch
94 91
95 92 def setnarrowpats(self, newincludes, newexcludes):
96 93 narrowspec.save(self, newincludes, newexcludes)
97 94 self.invalidate(clearfilecache=True)
98 95
99 96 # I'm not sure this is the right place to do this filter.
100 97 # context._manifestmatches() would probably be better, or perhaps
101 98 # move it to a later place, in case some of the callers do want to know
102 99 # which directories changed. This seems to work for now, though.
103 100 def status(self, *args, **kwargs):
104 101 s = super(narrowrepository, self).status(*args, **kwargs)
105 102 narrowmatch = self.narrowmatch()
106 103 modified = list(filter(narrowmatch, s.modified))
107 104 added = list(filter(narrowmatch, s.added))
108 105 removed = list(filter(narrowmatch, s.removed))
109 106 deleted = list(filter(narrowmatch, s.deleted))
110 107 unknown = list(filter(narrowmatch, s.unknown))
111 108 ignored = list(filter(narrowmatch, s.ignored))
112 109 clean = list(filter(narrowmatch, s.clean))
113 110 return scmutil.status(modified, added, removed, deleted, unknown,
114 111 ignored, clean)
115 112
116 113 repo.__class__ = narrowrepository
@@ -1,207 +1,204 b''
1 1 # narrowspec.py - methods for working with a narrow view of a repository
2 2 #
3 3 # Copyright 2017 Google, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11
12 12 from mercurial.i18n import _
13 13 from mercurial import (
14 14 error,
15 hg,
15 16 match as matchmod,
16 17 util,
17 18 )
18 19
19 from .. import (
20 share,
21 )
22
23 20 FILENAME = 'narrowspec'
24 21
25 22 def _parsestoredpatterns(text):
26 23 """Parses the narrowspec format that's stored on disk."""
27 24 patlist = None
28 25 includepats = []
29 26 excludepats = []
30 27 for l in text.splitlines():
31 28 if l == '[includes]':
32 29 if patlist is None:
33 30 patlist = includepats
34 31 else:
35 32 raise error.Abort(_('narrowspec includes section must appear '
36 33 'at most once, before excludes'))
37 34 elif l == '[excludes]':
38 35 if patlist is not excludepats:
39 36 patlist = excludepats
40 37 else:
41 38 raise error.Abort(_('narrowspec excludes section must appear '
42 39 'at most once'))
43 40 else:
44 41 patlist.append(l)
45 42
46 43 return set(includepats), set(excludepats)
47 44
48 45 def parseserverpatterns(text):
49 46 """Parses the narrowspec format that's returned by the server."""
50 47 includepats = set()
51 48 excludepats = set()
52 49
53 50 # We get one entry per line, in the format "<key> <value>".
54 51 # It's OK for value to contain other spaces.
55 52 for kp in (l.split(' ', 1) for l in text.splitlines()):
56 53 if len(kp) != 2:
57 54 raise error.Abort(_('Invalid narrowspec pattern line: "%s"') % kp)
58 55 key = kp[0]
59 56 pat = kp[1]
60 57 if key == 'include':
61 58 includepats.add(pat)
62 59 elif key == 'exclude':
63 60 excludepats.add(pat)
64 61 else:
65 62 raise error.Abort(_('Invalid key "%s" in server response') % key)
66 63
67 64 return includepats, excludepats
68 65
69 66 def normalizesplitpattern(kind, pat):
70 67 """Returns the normalized version of a pattern and kind.
71 68
72 69 Returns a tuple with the normalized kind and normalized pattern.
73 70 """
74 71 pat = pat.rstrip('/')
75 72 _validatepattern(pat)
76 73 return kind, pat
77 74
78 75 def _numlines(s):
79 76 """Returns the number of lines in s, including ending empty lines."""
80 77 # We use splitlines because it is Unicode-friendly and thus Python 3
81 78 # compatible. However, it does not count empty lines at the end, so trick
82 79 # it by adding a character at the end.
83 80 return len((s + 'x').splitlines())
84 81
85 82 def _validatepattern(pat):
86 83 """Validates the pattern and aborts if it is invalid.
87 84
88 85 Patterns are stored in the narrowspec as newline-separated
89 86 POSIX-style bytestring paths. There's no escaping.
90 87 """
91 88
92 89 # We use newlines as separators in the narrowspec file, so don't allow them
93 90 # in patterns.
94 91 if _numlines(pat) > 1:
95 92 raise error.Abort('newlines are not allowed in narrowspec paths')
96 93
97 94 components = pat.split('/')
98 95 if '.' in components or '..' in components:
99 96 raise error.Abort(_('"." and ".." are not allowed in narrowspec paths'))
100 97
101 98 def normalizepattern(pattern, defaultkind='path'):
102 99 """Returns the normalized version of a text-format pattern.
103 100
104 101 If the pattern has no kind, the default will be added.
105 102 """
106 103 kind, pat = matchmod._patsplit(pattern, defaultkind)
107 104 return '%s:%s' % normalizesplitpattern(kind, pat)
108 105
109 106 def parsepatterns(pats):
110 107 """Parses a list of patterns into a typed pattern set."""
111 108 return set(normalizepattern(p) for p in pats)
112 109
113 110 def format(includes, excludes):
114 111 output = '[includes]\n'
115 112 for i in sorted(includes - excludes):
116 113 output += i + '\n'
117 114 output += '[excludes]\n'
118 115 for e in sorted(excludes):
119 116 output += e + '\n'
120 117 return output
121 118
122 119 def match(root, include=None, exclude=None):
123 120 if not include:
124 121 # Passing empty include and empty exclude to matchmod.match()
125 122 # gives a matcher that matches everything, so explicitly use
126 123 # the nevermatcher.
127 124 return matchmod.never(root, '')
128 125 return matchmod.match(root, '', [], include=include or [],
129 126 exclude=exclude or [])
130 127
131 128 def needsexpansion(includes):
132 129 return [i for i in includes if i.startswith('include:')]
133 130
134 131 def load(repo):
135 132 if repo.shared():
136 repo = share._getsrcrepo(repo)
133 repo = hg.sharedreposource(repo)
137 134 try:
138 135 spec = repo.vfs.read(FILENAME)
139 136 except IOError as e:
140 137 # Treat "narrowspec does not exist" the same as "narrowspec file exists
141 138 # and is empty".
142 139 if e.errno == errno.ENOENT:
143 140 # Without this the next call to load will use the cached
144 141 # non-existence of the file, which can cause some odd issues.
145 142 repo.invalidate(clearfilecache=True)
146 143 return set(), set()
147 144 raise
148 145 return _parsestoredpatterns(spec)
149 146
150 147 def save(repo, includepats, excludepats):
151 148 spec = format(includepats, excludepats)
152 149 if repo.shared():
153 repo = share._getsrcrepo(repo)
150 repo = hg.sharedreposource(repo)
154 151 repo.vfs.write(FILENAME, spec)
155 152
156 153 def restrictpatterns(req_includes, req_excludes, repo_includes, repo_excludes):
157 154 r""" Restricts the patterns according to repo settings,
158 155 results in a logical AND operation
159 156
160 157 :param req_includes: requested includes
161 158 :param req_excludes: requested excludes
162 159 :param repo_includes: repo includes
163 160 :param repo_excludes: repo excludes
164 161 :return: include patterns, exclude patterns, and invalid include patterns.
165 162
166 163 >>> restrictpatterns({'f1','f2'}, {}, ['f1'], [])
167 164 (set(['f1']), {}, [])
168 165 >>> restrictpatterns({'f1'}, {}, ['f1','f2'], [])
169 166 (set(['f1']), {}, [])
170 167 >>> restrictpatterns({'f1/fc1', 'f3/fc3'}, {}, ['f1','f2'], [])
171 168 (set(['f1/fc1']), {}, [])
172 169 >>> restrictpatterns({'f1_fc1'}, {}, ['f1','f2'], [])
173 170 ([], set(['path:.']), [])
174 171 >>> restrictpatterns({'f1/../f2/fc2'}, {}, ['f1','f2'], [])
175 172 (set(['f2/fc2']), {}, [])
176 173 >>> restrictpatterns({'f1/../f3/fc3'}, {}, ['f1','f2'], [])
177 174 ([], set(['path:.']), [])
178 175 >>> restrictpatterns({'f1/$non_exitent_var'}, {}, ['f1','f2'], [])
179 176 (set(['f1/$non_exitent_var']), {}, [])
180 177 """
181 178 res_excludes = set(req_excludes)
182 179 res_excludes.update(repo_excludes)
183 180 invalid_includes = []
184 181 if not req_includes:
185 182 res_includes = set(repo_includes)
186 183 elif 'path:.' not in repo_includes:
187 184 res_includes = []
188 185 for req_include in req_includes:
189 186 req_include = util.expandpath(util.normpath(req_include))
190 187 if req_include in repo_includes:
191 188 res_includes.append(req_include)
192 189 continue
193 190 valid = False
194 191 for repo_include in repo_includes:
195 192 if req_include.startswith(repo_include + '/'):
196 193 valid = True
197 194 res_includes.append(req_include)
198 195 break
199 196 if not valid:
200 197 invalid_includes.append(req_include)
201 198 if len(res_includes) == 0:
202 199 res_excludes = {'path:.'}
203 200 else:
204 201 res_includes = set(res_includes)
205 202 else:
206 203 res_includes = set(req_includes)
207 204 return res_includes, res_excludes, invalid_includes
@@ -1,201 +1,180 b''
1 1 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
2 2 #
3 3 # This software may be used and distributed according to the terms of the
4 4 # GNU General Public License version 2 or any later version.
5 5
6 6 '''share a common history between several working directories
7 7
8 8 Automatic Pooled Storage for Clones
9 9 -----------------------------------
10 10
11 11 When this extension is active, :hg:`clone` can be configured to
12 12 automatically share/pool storage across multiple clones. This
13 13 mode effectively converts :hg:`clone` to :hg:`clone` + :hg:`share`.
14 14 The benefit of using this mode is the automatic management of
15 15 store paths and intelligent pooling of related repositories.
16 16
17 17 The following ``share.`` config options influence this feature:
18 18
19 19 ``share.pool``
20 20 Filesystem path where shared repository data will be stored. When
21 21 defined, :hg:`clone` will automatically use shared repository
22 22 storage instead of creating a store inside each clone.
23 23
24 24 ``share.poolnaming``
25 25 How directory names in ``share.pool`` are constructed.
26 26
27 27 "identity" means the name is derived from the first changeset in the
28 28 repository. In this mode, different remotes share storage if their
29 29 root/initial changeset is identical. In this mode, the local shared
30 30 repository is an aggregate of all encountered remote repositories.
31 31
32 32 "remote" means the name is derived from the source repository's
33 33 path or URL. In this mode, storage is only shared if the path or URL
34 34 requested in the :hg:`clone` command matches exactly to a repository
35 35 that was cloned before.
36 36
37 37 The default naming mode is "identity".
38 38 '''
39 39
40 40 from __future__ import absolute_import
41 41
42 42 import errno
43 43 from mercurial.i18n import _
44 44 from mercurial import (
45 45 bookmarks,
46 46 commands,
47 47 error,
48 48 extensions,
49 49 hg,
50 50 registrar,
51 51 txnutil,
52 52 util,
53 53 )
54 54
55 repository = hg.repository
56 parseurl = hg.parseurl
57
58 55 cmdtable = {}
59 56 command = registrar.command(cmdtable)
60 57 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
61 58 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
62 59 # be specifying the version(s) of Mercurial they are tested with, or
63 60 # leave the attribute unspecified.
64 61 testedwith = 'ships-with-hg-core'
65 62
66 63 @command('share',
67 64 [('U', 'noupdate', None, _('do not create a working directory')),
68 65 ('B', 'bookmarks', None, _('also share bookmarks')),
69 66 ('', 'relative', None, _('point to source using a relative path '
70 67 '(EXPERIMENTAL)')),
71 68 ],
72 69 _('[-U] [-B] SOURCE [DEST]'),
73 70 norepo=True)
74 71 def share(ui, source, dest=None, noupdate=False, bookmarks=False,
75 72 relative=False):
76 73 """create a new shared repository
77 74
78 75 Initialize a new repository and working directory that shares its
79 76 history (and optionally bookmarks) with another repository.
80 77
81 78 .. note::
82 79
83 80 using rollback or extensions that destroy/modify history (mq,
84 81 rebase, etc.) can cause considerable confusion with shared
85 82 clones. In particular, if two shared clones are both updated to
86 83 the same changeset, and one of them destroys that changeset
87 84 with rollback, the other clone will suddenly stop working: all
88 85 operations will fail with "abort: working directory has unknown
89 86 parent". The only known workaround is to use debugsetparents on
90 87 the broken clone to reset it to a changeset that still exists.
91 88 """
92 89
93 90 hg.share(ui, source, dest=dest, update=not noupdate,
94 91 bookmarks=bookmarks, relative=relative)
95 92 return 0
96 93
97 94 @command('unshare', [], '')
98 95 def unshare(ui, repo):
99 96 """convert a shared repository to a normal one
100 97
101 98 Copy the store data to the repo and remove the sharedpath data.
102 99 """
103 100
104 101 if not repo.shared():
105 102 raise error.Abort(_("this is not a shared repo"))
106 103
107 104 hg.unshare(ui, repo)
108 105
109 106 # Wrap clone command to pass auto share options.
110 107 def clone(orig, ui, source, *args, **opts):
111 108 pool = ui.config('share', 'pool')
112 109 if pool:
113 110 pool = util.expandpath(pool)
114 111
115 112 opts[r'shareopts'] = {
116 113 'pool': pool,
117 114 'mode': ui.config('share', 'poolnaming'),
118 115 }
119 116
120 117 return orig(ui, source, *args, **opts)
121 118
122 119 def extsetup(ui):
123 120 extensions.wrapfunction(bookmarks, '_getbkfile', getbkfile)
124 121 extensions.wrapfunction(bookmarks.bmstore, '_recordchange', recordchange)
125 122 extensions.wrapfunction(bookmarks.bmstore, '_writerepo', writerepo)
126 123 extensions.wrapcommand(commands.table, 'clone', clone)
127 124
128 125 def _hassharedbookmarks(repo):
129 126 """Returns whether this repo has shared bookmarks"""
130 127 try:
131 128 shared = repo.vfs.read('shared').splitlines()
132 129 except IOError as inst:
133 130 if inst.errno != errno.ENOENT:
134 131 raise
135 132 return False
136 133 return hg.sharedbookmarks in shared
137 134
138 def _getsrcrepo(repo):
139 """
140 Returns the source repository object for a given shared repository.
141 If repo is not a shared repository, return None.
142 """
143 if repo.sharedpath == repo.path:
144 return None
145
146 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
147 return repo.srcrepo
148
149 # the sharedpath always ends in the .hg; we want the path to the repo
150 source = repo.vfs.split(repo.sharedpath)[0]
151 srcurl, branches = parseurl(source)
152 srcrepo = repository(repo.ui, srcurl)
153 repo.srcrepo = srcrepo
154 return srcrepo
155
156 135 def getbkfile(orig, repo):
157 136 if _hassharedbookmarks(repo):
158 srcrepo = _getsrcrepo(repo)
137 srcrepo = hg.sharedreposource(repo)
159 138 if srcrepo is not None:
160 139 # just orig(srcrepo) doesn't work as expected, because
161 140 # HG_PENDING refers repo.root.
162 141 try:
163 142 fp, pending = txnutil.trypending(repo.root, repo.vfs,
164 143 'bookmarks')
165 144 if pending:
166 145 # only in this case, bookmark information in repo
167 146 # is up-to-date.
168 147 return fp
169 148 fp.close()
170 149 except IOError as inst:
171 150 if inst.errno != errno.ENOENT:
172 151 raise
173 152
174 153 # otherwise, we should read bookmarks from srcrepo,
175 154 # because .hg/bookmarks in srcrepo might be already
176 155 # changed via another sharing repo
177 156 repo = srcrepo
178 157
179 158 # TODO: Pending changes in repo are still invisible in
180 159 # srcrepo, because bookmarks.pending is written only into repo.
181 160 # See also https://www.mercurial-scm.org/wiki/SharedRepository
182 161 return orig(repo)
183 162
184 163 def recordchange(orig, self, tr):
185 164 # Continue with write to local bookmarks file as usual
186 165 orig(self, tr)
187 166
188 167 if _hassharedbookmarks(self._repo):
189 srcrepo = _getsrcrepo(self._repo)
168 srcrepo = hg.sharedreposource(self._repo)
190 169 if srcrepo is not None:
191 170 category = 'share-bookmarks'
192 171 tr.addpostclose(category, lambda tr: self._writerepo(srcrepo))
193 172
194 173 def writerepo(orig, self, repo):
195 174 # First write local bookmarks file in case we ever unshare
196 175 orig(self, repo)
197 176
198 177 if _hassharedbookmarks(self._repo):
199 srcrepo = _getsrcrepo(self._repo)
178 srcrepo = hg.sharedreposource(self._repo)
200 179 if srcrepo is not None:
201 180 orig(self, srcrepo)
@@ -1,1117 +1,1135 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import hashlib
13 13 import os
14 14 import shutil
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 nullid,
19 19 )
20 20
21 21 from . import (
22 22 bookmarks,
23 23 bundlerepo,
24 24 cacheutil,
25 25 cmdutil,
26 26 destutil,
27 27 discovery,
28 28 error,
29 29 exchange,
30 30 extensions,
31 31 httppeer,
32 32 localrepo,
33 33 lock,
34 34 logcmdutil,
35 35 logexchange,
36 36 merge as mergemod,
37 37 node,
38 38 phases,
39 39 scmutil,
40 40 sshpeer,
41 41 statichttprepo,
42 42 ui as uimod,
43 43 unionrepo,
44 44 url,
45 45 util,
46 46 verify as verifymod,
47 47 vfs as vfsmod,
48 48 )
49 49
50 50 release = lock.release
51 51
52 52 # shared features
53 53 sharedbookmarks = 'bookmarks'
54 54
55 55 def _local(path):
56 56 path = util.expandpath(util.urllocalpath(path))
57 57 return (os.path.isfile(path) and bundlerepo or localrepo)
58 58
59 59 def addbranchrevs(lrepo, other, branches, revs):
60 60 peer = other.peer() # a courtesy to callers using a localrepo for other
61 61 hashbranch, branches = branches
62 62 if not hashbranch and not branches:
63 63 x = revs or None
64 64 if util.safehasattr(revs, 'first'):
65 65 y = revs.first()
66 66 elif revs:
67 67 y = revs[0]
68 68 else:
69 69 y = None
70 70 return x, y
71 71 if revs:
72 72 revs = list(revs)
73 73 else:
74 74 revs = []
75 75
76 76 if not peer.capable('branchmap'):
77 77 if branches:
78 78 raise error.Abort(_("remote branch lookup not supported"))
79 79 revs.append(hashbranch)
80 80 return revs, revs[0]
81 81 branchmap = peer.branchmap()
82 82
83 83 def primary(branch):
84 84 if branch == '.':
85 85 if not lrepo:
86 86 raise error.Abort(_("dirstate branch not accessible"))
87 87 branch = lrepo.dirstate.branch()
88 88 if branch in branchmap:
89 89 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
90 90 return True
91 91 else:
92 92 return False
93 93
94 94 for branch in branches:
95 95 if not primary(branch):
96 96 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
97 97 if hashbranch:
98 98 if not primary(hashbranch):
99 99 revs.append(hashbranch)
100 100 return revs, revs[0]
101 101
102 102 def parseurl(path, branches=None):
103 103 '''parse url#branch, returning (url, (branch, branches))'''
104 104
105 105 u = util.url(path)
106 106 branch = None
107 107 if u.fragment:
108 108 branch = u.fragment
109 109 u.fragment = None
110 110 return bytes(u), (branch, branches or [])
111 111
112 112 schemes = {
113 113 'bundle': bundlerepo,
114 114 'union': unionrepo,
115 115 'file': _local,
116 116 'http': httppeer,
117 117 'https': httppeer,
118 118 'ssh': sshpeer,
119 119 'static-http': statichttprepo,
120 120 }
121 121
122 122 def _peerlookup(path):
123 123 u = util.url(path)
124 124 scheme = u.scheme or 'file'
125 125 thing = schemes.get(scheme) or schemes['file']
126 126 try:
127 127 return thing(path)
128 128 except TypeError:
129 129 # we can't test callable(thing) because 'thing' can be an unloaded
130 130 # module that implements __call__
131 131 if not util.safehasattr(thing, 'instance'):
132 132 raise
133 133 return thing
134 134
135 135 def islocal(repo):
136 136 '''return true if repo (or path pointing to repo) is local'''
137 137 if isinstance(repo, bytes):
138 138 try:
139 139 return _peerlookup(repo).islocal(repo)
140 140 except AttributeError:
141 141 return False
142 142 return repo.local()
143 143
144 144 def openpath(ui, path):
145 145 '''open path with open if local, url.open if remote'''
146 146 pathurl = util.url(path, parsequery=False, parsefragment=False)
147 147 if pathurl.islocal():
148 148 return util.posixfile(pathurl.localpath(), 'rb')
149 149 else:
150 150 return url.open(ui, path)
151 151
152 152 # a list of (ui, repo) functions called for wire peer initialization
153 153 wirepeersetupfuncs = []
154 154
155 155 def _peerorrepo(ui, path, create=False, presetupfuncs=None):
156 156 """return a repository object for the specified path"""
157 157 obj = _peerlookup(path).instance(ui, path, create)
158 158 ui = getattr(obj, "ui", ui)
159 159 for f in presetupfuncs or []:
160 160 f(ui, obj)
161 161 for name, module in extensions.extensions(ui):
162 162 hook = getattr(module, 'reposetup', None)
163 163 if hook:
164 164 hook(ui, obj)
165 165 if not obj.local():
166 166 for f in wirepeersetupfuncs:
167 167 f(ui, obj)
168 168 return obj
169 169
170 170 def repository(ui, path='', create=False, presetupfuncs=None):
171 171 """return a repository object for the specified path"""
172 172 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs)
173 173 repo = peer.local()
174 174 if not repo:
175 175 raise error.Abort(_("repository '%s' is not local") %
176 176 (path or peer.url()))
177 177 return repo.filtered('visible')
178 178
179 179 def peer(uiorrepo, opts, path, create=False):
180 180 '''return a repository peer for the specified path'''
181 181 rui = remoteui(uiorrepo, opts)
182 182 return _peerorrepo(rui, path, create).peer()
183 183
184 184 def defaultdest(source):
185 185 '''return default destination of clone if none is given
186 186
187 187 >>> defaultdest(b'foo')
188 188 'foo'
189 189 >>> defaultdest(b'/foo/bar')
190 190 'bar'
191 191 >>> defaultdest(b'/')
192 192 ''
193 193 >>> defaultdest(b'')
194 194 ''
195 195 >>> defaultdest(b'http://example.org/')
196 196 ''
197 197 >>> defaultdest(b'http://example.org/foo/')
198 198 'foo'
199 199 '''
200 200 path = util.url(source).path
201 201 if not path:
202 202 return ''
203 203 return os.path.basename(os.path.normpath(path))
204 204
205 def sharedreposource(repo):
206 """Returns repository object for source repository of a shared repo.
207
208 If repo is not a shared repository, returns None.
209 """
210 if repo.sharedpath == repo.path:
211 return None
212
213 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
214 return repo.srcrepo
215
216 # the sharedpath always ends in the .hg; we want the path to the repo
217 source = repo.vfs.split(repo.sharedpath)[0]
218 srcurl, branches = parseurl(source)
219 srcrepo = repository(repo.ui, srcurl)
220 repo.srcrepo = srcrepo
221 return srcrepo
222
205 223 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
206 224 relative=False):
207 225 '''create a shared repository'''
208 226
209 227 if not islocal(source):
210 228 raise error.Abort(_('can only share local repositories'))
211 229
212 230 if not dest:
213 231 dest = defaultdest(source)
214 232 else:
215 233 dest = ui.expandpath(dest)
216 234
217 235 if isinstance(source, bytes):
218 236 origsource = ui.expandpath(source)
219 237 source, branches = parseurl(origsource)
220 238 srcrepo = repository(ui, source)
221 239 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
222 240 else:
223 241 srcrepo = source.local()
224 242 origsource = source = srcrepo.url()
225 243 checkout = None
226 244
227 245 sharedpath = srcrepo.sharedpath # if our source is already sharing
228 246
229 247 destwvfs = vfsmod.vfs(dest, realpath=True)
230 248 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
231 249
232 250 if destvfs.lexists():
233 251 raise error.Abort(_('destination already exists'))
234 252
235 253 if not destwvfs.isdir():
236 254 destwvfs.mkdir()
237 255 destvfs.makedir()
238 256
239 257 requirements = ''
240 258 try:
241 259 requirements = srcrepo.vfs.read('requires')
242 260 except IOError as inst:
243 261 if inst.errno != errno.ENOENT:
244 262 raise
245 263
246 264 if relative:
247 265 try:
248 266 sharedpath = os.path.relpath(sharedpath, destvfs.base)
249 267 requirements += 'relshared\n'
250 268 except (IOError, ValueError) as e:
251 269 # ValueError is raised on Windows if the drive letters differ on
252 270 # each path
253 271 raise error.Abort(_('cannot calculate relative path'),
254 272 hint=str(e))
255 273 else:
256 274 requirements += 'shared\n'
257 275
258 276 destvfs.write('requires', requirements)
259 277 destvfs.write('sharedpath', sharedpath)
260 278
261 279 r = repository(ui, destwvfs.base)
262 280 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
263 281 _postshareupdate(r, update, checkout=checkout)
264 282 return r
265 283
266 284 def unshare(ui, repo):
267 285 """convert a shared repository to a normal one
268 286
269 287 Copy the store data to the repo and remove the sharedpath data.
270 288 """
271 289
272 290 destlock = lock = None
273 291 lock = repo.lock()
274 292 try:
275 293 # we use locks here because if we race with commit, we
276 294 # can end up with extra data in the cloned revlogs that's
277 295 # not pointed to by changesets, thus causing verify to
278 296 # fail
279 297
280 298 destlock = copystore(ui, repo, repo.path)
281 299
282 300 sharefile = repo.vfs.join('sharedpath')
283 301 util.rename(sharefile, sharefile + '.old')
284 302
285 303 repo.requirements.discard('shared')
286 304 repo.requirements.discard('relshared')
287 305 repo._writerequirements()
288 306 finally:
289 307 destlock and destlock.release()
290 308 lock and lock.release()
291 309
292 310 # update store, spath, svfs and sjoin of repo
293 311 repo.unfiltered().__init__(repo.baseui, repo.root)
294 312
295 313 # TODO: figure out how to access subrepos that exist, but were previously
296 314 # removed from .hgsub
297 315 c = repo['.']
298 316 subs = c.substate
299 317 for s in sorted(subs):
300 318 c.sub(s).unshare()
301 319
302 320 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
303 321 """Called after a new shared repo is created.
304 322
305 323 The new repo only has a requirements file and pointer to the source.
306 324 This function configures additional shared data.
307 325
308 326 Extensions can wrap this function and write additional entries to
309 327 destrepo/.hg/shared to indicate additional pieces of data to be shared.
310 328 """
311 329 default = defaultpath or sourcerepo.ui.config('paths', 'default')
312 330 if default:
313 331 template = ('[paths]\n'
314 332 'default = %s\n')
315 333 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
316 334
317 335 with destrepo.wlock():
318 336 if bookmarks:
319 337 destrepo.vfs.write('shared', sharedbookmarks + '\n')
320 338
321 339 def _postshareupdate(repo, update, checkout=None):
322 340 """Maybe perform a working directory update after a shared repo is created.
323 341
324 342 ``update`` can be a boolean or a revision to update to.
325 343 """
326 344 if not update:
327 345 return
328 346
329 347 repo.ui.status(_("updating working directory\n"))
330 348 if update is not True:
331 349 checkout = update
332 350 for test in (checkout, 'default', 'tip'):
333 351 if test is None:
334 352 continue
335 353 try:
336 354 uprev = repo.lookup(test)
337 355 break
338 356 except error.RepoLookupError:
339 357 continue
340 358 _update(repo, uprev)
341 359
342 360 def copystore(ui, srcrepo, destpath):
343 361 '''copy files from store of srcrepo in destpath
344 362
345 363 returns destlock
346 364 '''
347 365 destlock = None
348 366 try:
349 367 hardlink = None
350 368 num = 0
351 369 closetopic = [None]
352 370 def prog(topic, pos):
353 371 if pos is None:
354 372 closetopic[0] = topic
355 373 else:
356 374 ui.progress(topic, pos + num)
357 375 srcpublishing = srcrepo.publishing()
358 376 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
359 377 dstvfs = vfsmod.vfs(destpath)
360 378 for f in srcrepo.store.copylist():
361 379 if srcpublishing and f.endswith('phaseroots'):
362 380 continue
363 381 dstbase = os.path.dirname(f)
364 382 if dstbase and not dstvfs.exists(dstbase):
365 383 dstvfs.mkdir(dstbase)
366 384 if srcvfs.exists(f):
367 385 if f.endswith('data'):
368 386 # 'dstbase' may be empty (e.g. revlog format 0)
369 387 lockfile = os.path.join(dstbase, "lock")
370 388 # lock to avoid premature writing to the target
371 389 destlock = lock.lock(dstvfs, lockfile)
372 390 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
373 391 hardlink, progress=prog)
374 392 num += n
375 393 if hardlink:
376 394 ui.debug("linked %d files\n" % num)
377 395 if closetopic[0]:
378 396 ui.progress(closetopic[0], None)
379 397 else:
380 398 ui.debug("copied %d files\n" % num)
381 399 if closetopic[0]:
382 400 ui.progress(closetopic[0], None)
383 401 return destlock
384 402 except: # re-raises
385 403 release(destlock)
386 404 raise
387 405
388 406 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
389 407 rev=None, update=True, stream=False):
390 408 """Perform a clone using a shared repo.
391 409
392 410 The store for the repository will be located at <sharepath>/.hg. The
393 411 specified revisions will be cloned or pulled from "source". A shared repo
394 412 will be created at "dest" and a working copy will be created if "update" is
395 413 True.
396 414 """
397 415 revs = None
398 416 if rev:
399 417 if not srcpeer.capable('lookup'):
400 418 raise error.Abort(_("src repository does not support "
401 419 "revision lookup and so doesn't "
402 420 "support clone by revision"))
403 421 revs = [srcpeer.lookup(r) for r in rev]
404 422
405 423 # Obtain a lock before checking for or cloning the pooled repo otherwise
406 424 # 2 clients may race creating or populating it.
407 425 pooldir = os.path.dirname(sharepath)
408 426 # lock class requires the directory to exist.
409 427 try:
410 428 util.makedir(pooldir, False)
411 429 except OSError as e:
412 430 if e.errno != errno.EEXIST:
413 431 raise
414 432
415 433 poolvfs = vfsmod.vfs(pooldir)
416 434 basename = os.path.basename(sharepath)
417 435
418 436 with lock.lock(poolvfs, '%s.lock' % basename):
419 437 if os.path.exists(sharepath):
420 438 ui.status(_('(sharing from existing pooled repository %s)\n') %
421 439 basename)
422 440 else:
423 441 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
424 442 # Always use pull mode because hardlinks in share mode don't work
425 443 # well. Never update because working copies aren't necessary in
426 444 # share mode.
427 445 clone(ui, peeropts, source, dest=sharepath, pull=True,
428 446 rev=rev, update=False, stream=stream)
429 447
430 448 # Resolve the value to put in [paths] section for the source.
431 449 if islocal(source):
432 450 defaultpath = os.path.abspath(util.urllocalpath(source))
433 451 else:
434 452 defaultpath = source
435 453
436 454 sharerepo = repository(ui, path=sharepath)
437 455 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
438 456 defaultpath=defaultpath)
439 457
440 458 # We need to perform a pull against the dest repo to fetch bookmarks
441 459 # and other non-store data that isn't shared by default. In the case of
442 460 # non-existing shared repo, this means we pull from the remote twice. This
443 461 # is a bit weird. But at the time it was implemented, there wasn't an easy
444 462 # way to pull just non-changegroup data.
445 463 destrepo = repository(ui, path=dest)
446 464 exchange.pull(destrepo, srcpeer, heads=revs)
447 465
448 466 _postshareupdate(destrepo, update)
449 467
450 468 return srcpeer, peer(ui, peeropts, dest)
451 469
452 470 # Recomputing branch cache might be slow on big repos,
453 471 # so just copy it
454 472 def _copycache(srcrepo, dstcachedir, fname):
455 473 """copy a cache from srcrepo to destcachedir (if it exists)"""
456 474 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
457 475 dstbranchcache = os.path.join(dstcachedir, fname)
458 476 if os.path.exists(srcbranchcache):
459 477 if not os.path.exists(dstcachedir):
460 478 os.mkdir(dstcachedir)
461 479 util.copyfile(srcbranchcache, dstbranchcache)
462 480
463 481 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
464 482 update=True, stream=False, branch=None, shareopts=None):
465 483 """Make a copy of an existing repository.
466 484
467 485 Create a copy of an existing repository in a new directory. The
468 486 source and destination are URLs, as passed to the repository
469 487 function. Returns a pair of repository peers, the source and
470 488 newly created destination.
471 489
472 490 The location of the source is added to the new repository's
473 491 .hg/hgrc file, as the default to be used for future pulls and
474 492 pushes.
475 493
476 494 If an exception is raised, the partly cloned/updated destination
477 495 repository will be deleted.
478 496
479 497 Arguments:
480 498
481 499 source: repository object or URL
482 500
483 501 dest: URL of destination repository to create (defaults to base
484 502 name of source repository)
485 503
486 504 pull: always pull from source repository, even in local case or if the
487 505 server prefers streaming
488 506
489 507 stream: stream raw data uncompressed from repository (fast over
490 508 LAN, slow over WAN)
491 509
492 510 rev: revision to clone up to (implies pull=True)
493 511
494 512 update: update working directory after clone completes, if
495 513 destination is local repository (True means update to default rev,
496 514 anything else is treated as a revision)
497 515
498 516 branch: branches to clone
499 517
500 518 shareopts: dict of options to control auto sharing behavior. The "pool" key
501 519 activates auto sharing mode and defines the directory for stores. The
502 520 "mode" key determines how to construct the directory name of the shared
503 521 repository. "identity" means the name is derived from the node of the first
504 522 changeset in the repository. "remote" means the name is derived from the
505 523 remote's path/URL. Defaults to "identity."
506 524 """
507 525
508 526 if isinstance(source, bytes):
509 527 origsource = ui.expandpath(source)
510 528 source, branch = parseurl(origsource, branch)
511 529 srcpeer = peer(ui, peeropts, source)
512 530 else:
513 531 srcpeer = source.peer() # in case we were called with a localrepo
514 532 branch = (None, branch or [])
515 533 origsource = source = srcpeer.url()
516 534 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
517 535
518 536 if dest is None:
519 537 dest = defaultdest(source)
520 538 if dest:
521 539 ui.status(_("destination directory: %s\n") % dest)
522 540 else:
523 541 dest = ui.expandpath(dest)
524 542
525 543 dest = util.urllocalpath(dest)
526 544 source = util.urllocalpath(source)
527 545
528 546 if not dest:
529 547 raise error.Abort(_("empty destination path is not valid"))
530 548
531 549 destvfs = vfsmod.vfs(dest, expandpath=True)
532 550 if destvfs.lexists():
533 551 if not destvfs.isdir():
534 552 raise error.Abort(_("destination '%s' already exists") % dest)
535 553 elif destvfs.listdir():
536 554 raise error.Abort(_("destination '%s' is not empty") % dest)
537 555
538 556 shareopts = shareopts or {}
539 557 sharepool = shareopts.get('pool')
540 558 sharenamemode = shareopts.get('mode')
541 559 if sharepool and islocal(dest):
542 560 sharepath = None
543 561 if sharenamemode == 'identity':
544 562 # Resolve the name from the initial changeset in the remote
545 563 # repository. This returns nullid when the remote is empty. It
546 564 # raises RepoLookupError if revision 0 is filtered or otherwise
547 565 # not available. If we fail to resolve, sharing is not enabled.
548 566 try:
549 567 rootnode = srcpeer.lookup('0')
550 568 if rootnode != node.nullid:
551 569 sharepath = os.path.join(sharepool, node.hex(rootnode))
552 570 else:
553 571 ui.status(_('(not using pooled storage: '
554 572 'remote appears to be empty)\n'))
555 573 except error.RepoLookupError:
556 574 ui.status(_('(not using pooled storage: '
557 575 'unable to resolve identity of remote)\n'))
558 576 elif sharenamemode == 'remote':
559 577 sharepath = os.path.join(
560 578 sharepool, node.hex(hashlib.sha1(source).digest()))
561 579 else:
562 580 raise error.Abort(_('unknown share naming mode: %s') %
563 581 sharenamemode)
564 582
565 583 if sharepath:
566 584 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
567 585 dest, pull=pull, rev=rev, update=update,
568 586 stream=stream)
569 587
570 588 srclock = destlock = cleandir = None
571 589 srcrepo = srcpeer.local()
572 590 try:
573 591 abspath = origsource
574 592 if islocal(origsource):
575 593 abspath = os.path.abspath(util.urllocalpath(origsource))
576 594
577 595 if islocal(dest):
578 596 cleandir = dest
579 597
580 598 copy = False
581 599 if (srcrepo and srcrepo.cancopy() and islocal(dest)
582 600 and not phases.hassecret(srcrepo)):
583 601 copy = not pull and not rev
584 602
585 603 if copy:
586 604 try:
587 605 # we use a lock here because if we race with commit, we
588 606 # can end up with extra data in the cloned revlogs that's
589 607 # not pointed to by changesets, thus causing verify to
590 608 # fail
591 609 srclock = srcrepo.lock(wait=False)
592 610 except error.LockError:
593 611 copy = False
594 612
595 613 if copy:
596 614 srcrepo.hook('preoutgoing', throw=True, source='clone')
597 615 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
598 616 if not os.path.exists(dest):
599 617 os.mkdir(dest)
600 618 else:
601 619 # only clean up directories we create ourselves
602 620 cleandir = hgdir
603 621 try:
604 622 destpath = hgdir
605 623 util.makedir(destpath, notindexed=True)
606 624 except OSError as inst:
607 625 if inst.errno == errno.EEXIST:
608 626 cleandir = None
609 627 raise error.Abort(_("destination '%s' already exists")
610 628 % dest)
611 629 raise
612 630
613 631 destlock = copystore(ui, srcrepo, destpath)
614 632 # copy bookmarks over
615 633 srcbookmarks = srcrepo.vfs.join('bookmarks')
616 634 dstbookmarks = os.path.join(destpath, 'bookmarks')
617 635 if os.path.exists(srcbookmarks):
618 636 util.copyfile(srcbookmarks, dstbookmarks)
619 637
620 638 dstcachedir = os.path.join(destpath, 'cache')
621 639 for cache in cacheutil.cachetocopy(srcrepo):
622 640 _copycache(srcrepo, dstcachedir, cache)
623 641
624 642 # we need to re-init the repo after manually copying the data
625 643 # into it
626 644 destpeer = peer(srcrepo, peeropts, dest)
627 645 srcrepo.hook('outgoing', source='clone',
628 646 node=node.hex(node.nullid))
629 647 else:
630 648 try:
631 649 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
632 650 # only pass ui when no srcrepo
633 651 except OSError as inst:
634 652 if inst.errno == errno.EEXIST:
635 653 cleandir = None
636 654 raise error.Abort(_("destination '%s' already exists")
637 655 % dest)
638 656 raise
639 657
640 658 revs = None
641 659 if rev:
642 660 if not srcpeer.capable('lookup'):
643 661 raise error.Abort(_("src repository does not support "
644 662 "revision lookup and so doesn't "
645 663 "support clone by revision"))
646 664 revs = [srcpeer.lookup(r) for r in rev]
647 665 checkout = revs[0]
648 666 local = destpeer.local()
649 667 if local:
650 668 u = util.url(abspath)
651 669 defaulturl = bytes(u)
652 670 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
653 671 if not stream:
654 672 if pull:
655 673 stream = False
656 674 else:
657 675 stream = None
658 676 # internal config: ui.quietbookmarkmove
659 677 overrides = {('ui', 'quietbookmarkmove'): True}
660 678 with local.ui.configoverride(overrides, 'clone'):
661 679 exchange.pull(local, srcpeer, revs,
662 680 streamclonerequested=stream)
663 681 elif srcrepo:
664 682 exchange.push(srcrepo, destpeer, revs=revs,
665 683 bookmarks=srcrepo._bookmarks.keys())
666 684 else:
667 685 raise error.Abort(_("clone from remote to remote not supported")
668 686 )
669 687
670 688 cleandir = None
671 689
672 690 destrepo = destpeer.local()
673 691 if destrepo:
674 692 template = uimod.samplehgrcs['cloned']
675 693 u = util.url(abspath)
676 694 u.passwd = None
677 695 defaulturl = bytes(u)
678 696 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
679 697 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
680 698
681 699 if ui.configbool('experimental', 'remotenames'):
682 700 logexchange.pullremotenames(destrepo, srcpeer)
683 701
684 702 if update:
685 703 if update is not True:
686 704 checkout = srcpeer.lookup(update)
687 705 uprev = None
688 706 status = None
689 707 if checkout is not None:
690 708 try:
691 709 uprev = destrepo.lookup(checkout)
692 710 except error.RepoLookupError:
693 711 if update is not True:
694 712 try:
695 713 uprev = destrepo.lookup(update)
696 714 except error.RepoLookupError:
697 715 pass
698 716 if uprev is None:
699 717 try:
700 718 uprev = destrepo._bookmarks['@']
701 719 update = '@'
702 720 bn = destrepo[uprev].branch()
703 721 if bn == 'default':
704 722 status = _("updating to bookmark @\n")
705 723 else:
706 724 status = (_("updating to bookmark @ on branch %s\n")
707 725 % bn)
708 726 except KeyError:
709 727 try:
710 728 uprev = destrepo.branchtip('default')
711 729 except error.RepoLookupError:
712 730 uprev = destrepo.lookup('tip')
713 731 if not status:
714 732 bn = destrepo[uprev].branch()
715 733 status = _("updating to branch %s\n") % bn
716 734 destrepo.ui.status(status)
717 735 _update(destrepo, uprev)
718 736 if update in destrepo._bookmarks:
719 737 bookmarks.activate(destrepo, update)
720 738 finally:
721 739 release(srclock, destlock)
722 740 if cleandir is not None:
723 741 shutil.rmtree(cleandir, True)
724 742 if srcpeer is not None:
725 743 srcpeer.close()
726 744 return srcpeer, destpeer
727 745
728 746 def _showstats(repo, stats, quietempty=False):
729 747 if quietempty and not any(stats):
730 748 return
731 749 repo.ui.status(_("%d files updated, %d files merged, "
732 750 "%d files removed, %d files unresolved\n") % stats)
733 751
734 752 def updaterepo(repo, node, overwrite, updatecheck=None):
735 753 """Update the working directory to node.
736 754
737 755 When overwrite is set, changes are clobbered, merged else
738 756
739 757 returns stats (see pydoc mercurial.merge.applyupdates)"""
740 758 return mergemod.update(repo, node, False, overwrite,
741 759 labels=['working copy', 'destination'],
742 760 updatecheck=updatecheck)
743 761
744 762 def update(repo, node, quietempty=False, updatecheck=None):
745 763 """update the working directory to node"""
746 764 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
747 765 _showstats(repo, stats, quietempty)
748 766 if stats[3]:
749 767 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
750 768 return stats[3] > 0
751 769
752 770 # naming conflict in clone()
753 771 _update = update
754 772
755 773 def clean(repo, node, show_stats=True, quietempty=False):
756 774 """forcibly switch the working directory to node, clobbering changes"""
757 775 stats = updaterepo(repo, node, True)
758 776 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
759 777 if show_stats:
760 778 _showstats(repo, stats, quietempty)
761 779 return stats[3] > 0
762 780
763 781 # naming conflict in updatetotally()
764 782 _clean = clean
765 783
766 784 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
767 785 """Update the working directory with extra care for non-file components
768 786
769 787 This takes care of non-file components below:
770 788
771 789 :bookmark: might be advanced or (in)activated
772 790
773 791 This takes arguments below:
774 792
775 793 :checkout: to which revision the working directory is updated
776 794 :brev: a name, which might be a bookmark to be activated after updating
777 795 :clean: whether changes in the working directory can be discarded
778 796 :updatecheck: how to deal with a dirty working directory
779 797
780 798 Valid values for updatecheck are (None => linear):
781 799
782 800 * abort: abort if the working directory is dirty
783 801 * none: don't check (merge working directory changes into destination)
784 802 * linear: check that update is linear before merging working directory
785 803 changes into destination
786 804 * noconflict: check that the update does not result in file merges
787 805
788 806 This returns whether conflict is detected at updating or not.
789 807 """
790 808 if updatecheck is None:
791 809 updatecheck = ui.config('commands', 'update.check')
792 810 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
793 811 # If not configured, or invalid value configured
794 812 updatecheck = 'linear'
795 813 with repo.wlock():
796 814 movemarkfrom = None
797 815 warndest = False
798 816 if checkout is None:
799 817 updata = destutil.destupdate(repo, clean=clean)
800 818 checkout, movemarkfrom, brev = updata
801 819 warndest = True
802 820
803 821 if clean:
804 822 ret = _clean(repo, checkout)
805 823 else:
806 824 if updatecheck == 'abort':
807 825 cmdutil.bailifchanged(repo, merge=False)
808 826 updatecheck = 'none'
809 827 ret = _update(repo, checkout, updatecheck=updatecheck)
810 828
811 829 if not ret and movemarkfrom:
812 830 if movemarkfrom == repo['.'].node():
813 831 pass # no-op update
814 832 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
815 833 b = ui.label(repo._activebookmark, 'bookmarks.active')
816 834 ui.status(_("updating bookmark %s\n") % b)
817 835 else:
818 836 # this can happen with a non-linear update
819 837 b = ui.label(repo._activebookmark, 'bookmarks')
820 838 ui.status(_("(leaving bookmark %s)\n") % b)
821 839 bookmarks.deactivate(repo)
822 840 elif brev in repo._bookmarks:
823 841 if brev != repo._activebookmark:
824 842 b = ui.label(brev, 'bookmarks.active')
825 843 ui.status(_("(activating bookmark %s)\n") % b)
826 844 bookmarks.activate(repo, brev)
827 845 elif brev:
828 846 if repo._activebookmark:
829 847 b = ui.label(repo._activebookmark, 'bookmarks')
830 848 ui.status(_("(leaving bookmark %s)\n") % b)
831 849 bookmarks.deactivate(repo)
832 850
833 851 if warndest:
834 852 destutil.statusotherdests(ui, repo)
835 853
836 854 return ret
837 855
838 856 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
839 857 abort=False):
840 858 """Branch merge with node, resolving changes. Return true if any
841 859 unresolved conflicts."""
842 860 if not abort:
843 861 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
844 862 labels=labels)
845 863 else:
846 864 ms = mergemod.mergestate.read(repo)
847 865 if ms.active():
848 866 # there were conflicts
849 867 node = ms.localctx.hex()
850 868 else:
851 869 # there were no conficts, mergestate was not stored
852 870 node = repo['.'].hex()
853 871
854 872 repo.ui.status(_("aborting the merge, updating back to"
855 873 " %s\n") % node[:12])
856 874 stats = mergemod.update(repo, node, branchmerge=False, force=True,
857 875 labels=labels)
858 876
859 877 _showstats(repo, stats)
860 878 if stats[3]:
861 879 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
862 880 "or 'hg merge --abort' to abandon\n"))
863 881 elif remind and not abort:
864 882 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
865 883 return stats[3] > 0
866 884
867 885 def _incoming(displaychlist, subreporecurse, ui, repo, source,
868 886 opts, buffered=False):
869 887 """
870 888 Helper for incoming / gincoming.
871 889 displaychlist gets called with
872 890 (remoterepo, incomingchangesetlist, displayer) parameters,
873 891 and is supposed to contain only code that can't be unified.
874 892 """
875 893 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
876 894 other = peer(repo, opts, source)
877 895 ui.status(_('comparing with %s\n') % util.hidepassword(source))
878 896 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
879 897
880 898 if revs:
881 899 revs = [other.lookup(rev) for rev in revs]
882 900 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
883 901 revs, opts["bundle"], opts["force"])
884 902 try:
885 903 if not chlist:
886 904 ui.status(_("no changes found\n"))
887 905 return subreporecurse()
888 906 ui.pager('incoming')
889 907 displayer = logcmdutil.changesetdisplayer(ui, other, opts,
890 908 buffered=buffered)
891 909 displaychlist(other, chlist, displayer)
892 910 displayer.close()
893 911 finally:
894 912 cleanupfn()
895 913 subreporecurse()
896 914 return 0 # exit code is zero since we found incoming changes
897 915
898 916 def incoming(ui, repo, source, opts):
899 917 def subreporecurse():
900 918 ret = 1
901 919 if opts.get('subrepos'):
902 920 ctx = repo[None]
903 921 for subpath in sorted(ctx.substate):
904 922 sub = ctx.sub(subpath)
905 923 ret = min(ret, sub.incoming(ui, source, opts))
906 924 return ret
907 925
908 926 def display(other, chlist, displayer):
909 927 limit = logcmdutil.getlimit(opts)
910 928 if opts.get('newest_first'):
911 929 chlist.reverse()
912 930 count = 0
913 931 for n in chlist:
914 932 if limit is not None and count >= limit:
915 933 break
916 934 parents = [p for p in other.changelog.parents(n) if p != nullid]
917 935 if opts.get('no_merges') and len(parents) == 2:
918 936 continue
919 937 count += 1
920 938 displayer.show(other[n])
921 939 return _incoming(display, subreporecurse, ui, repo, source, opts)
922 940
923 941 def _outgoing(ui, repo, dest, opts):
924 942 path = ui.paths.getpath(dest, default=('default-push', 'default'))
925 943 if not path:
926 944 raise error.Abort(_('default repository not configured!'),
927 945 hint=_("see 'hg help config.paths'"))
928 946 dest = path.pushloc or path.loc
929 947 branches = path.branch, opts.get('branch') or []
930 948
931 949 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
932 950 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
933 951 if revs:
934 952 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
935 953
936 954 other = peer(repo, opts, dest)
937 955 outgoing = discovery.findcommonoutgoing(repo, other, revs,
938 956 force=opts.get('force'))
939 957 o = outgoing.missing
940 958 if not o:
941 959 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
942 960 return o, other
943 961
944 962 def outgoing(ui, repo, dest, opts):
945 963 def recurse():
946 964 ret = 1
947 965 if opts.get('subrepos'):
948 966 ctx = repo[None]
949 967 for subpath in sorted(ctx.substate):
950 968 sub = ctx.sub(subpath)
951 969 ret = min(ret, sub.outgoing(ui, dest, opts))
952 970 return ret
953 971
954 972 limit = logcmdutil.getlimit(opts)
955 973 o, other = _outgoing(ui, repo, dest, opts)
956 974 if not o:
957 975 cmdutil.outgoinghooks(ui, repo, other, opts, o)
958 976 return recurse()
959 977
960 978 if opts.get('newest_first'):
961 979 o.reverse()
962 980 ui.pager('outgoing')
963 981 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
964 982 count = 0
965 983 for n in o:
966 984 if limit is not None and count >= limit:
967 985 break
968 986 parents = [p for p in repo.changelog.parents(n) if p != nullid]
969 987 if opts.get('no_merges') and len(parents) == 2:
970 988 continue
971 989 count += 1
972 990 displayer.show(repo[n])
973 991 displayer.close()
974 992 cmdutil.outgoinghooks(ui, repo, other, opts, o)
975 993 recurse()
976 994 return 0 # exit code is zero since we found outgoing changes
977 995
978 996 def verify(repo):
979 997 """verify the consistency of a repository"""
980 998 ret = verifymod.verify(repo)
981 999
982 1000 # Broken subrepo references in hidden csets don't seem worth worrying about,
983 1001 # since they can't be pushed/pulled, and --hidden can be used if they are a
984 1002 # concern.
985 1003
986 1004 # pathto() is needed for -R case
987 1005 revs = repo.revs("filelog(%s)",
988 1006 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
989 1007
990 1008 if revs:
991 1009 repo.ui.status(_('checking subrepo links\n'))
992 1010 for rev in revs:
993 1011 ctx = repo[rev]
994 1012 try:
995 1013 for subpath in ctx.substate:
996 1014 try:
997 1015 ret = (ctx.sub(subpath, allowcreate=False).verify()
998 1016 or ret)
999 1017 except error.RepoError as e:
1000 1018 repo.ui.warn(('%s: %s\n') % (rev, e))
1001 1019 except Exception:
1002 1020 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1003 1021 node.short(ctx.node()))
1004 1022
1005 1023 return ret
1006 1024
1007 1025 def remoteui(src, opts):
1008 1026 'build a remote ui from ui or repo and opts'
1009 1027 if util.safehasattr(src, 'baseui'): # looks like a repository
1010 1028 dst = src.baseui.copy() # drop repo-specific config
1011 1029 src = src.ui # copy target options from repo
1012 1030 else: # assume it's a global ui object
1013 1031 dst = src.copy() # keep all global options
1014 1032
1015 1033 # copy ssh-specific options
1016 1034 for o in 'ssh', 'remotecmd':
1017 1035 v = opts.get(o) or src.config('ui', o)
1018 1036 if v:
1019 1037 dst.setconfig("ui", o, v, 'copied')
1020 1038
1021 1039 # copy bundle-specific options
1022 1040 r = src.config('bundle', 'mainreporoot')
1023 1041 if r:
1024 1042 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1025 1043
1026 1044 # copy selected local settings to the remote ui
1027 1045 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1028 1046 for key, val in src.configitems(sect):
1029 1047 dst.setconfig(sect, key, val, 'copied')
1030 1048 v = src.config('web', 'cacerts')
1031 1049 if v:
1032 1050 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1033 1051
1034 1052 return dst
1035 1053
1036 1054 # Files of interest
1037 1055 # Used to check if the repository has changed looking at mtime and size of
1038 1056 # these files.
1039 1057 foi = [('spath', '00changelog.i'),
1040 1058 ('spath', 'phaseroots'), # ! phase can change content at the same size
1041 1059 ('spath', 'obsstore'),
1042 1060 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1043 1061 ]
1044 1062
1045 1063 class cachedlocalrepo(object):
1046 1064 """Holds a localrepository that can be cached and reused."""
1047 1065
1048 1066 def __init__(self, repo):
1049 1067 """Create a new cached repo from an existing repo.
1050 1068
1051 1069 We assume the passed in repo was recently created. If the
1052 1070 repo has changed between when it was created and when it was
1053 1071 turned into a cache, it may not refresh properly.
1054 1072 """
1055 1073 assert isinstance(repo, localrepo.localrepository)
1056 1074 self._repo = repo
1057 1075 self._state, self.mtime = self._repostate()
1058 1076 self._filtername = repo.filtername
1059 1077
1060 1078 def fetch(self):
1061 1079 """Refresh (if necessary) and return a repository.
1062 1080
1063 1081 If the cached instance is out of date, it will be recreated
1064 1082 automatically and returned.
1065 1083
1066 1084 Returns a tuple of the repo and a boolean indicating whether a new
1067 1085 repo instance was created.
1068 1086 """
1069 1087 # We compare the mtimes and sizes of some well-known files to
1070 1088 # determine if the repo changed. This is not precise, as mtimes
1071 1089 # are susceptible to clock skew and imprecise filesystems and
1072 1090 # file content can change while maintaining the same size.
1073 1091
1074 1092 state, mtime = self._repostate()
1075 1093 if state == self._state:
1076 1094 return self._repo, False
1077 1095
1078 1096 repo = repository(self._repo.baseui, self._repo.url())
1079 1097 if self._filtername:
1080 1098 self._repo = repo.filtered(self._filtername)
1081 1099 else:
1082 1100 self._repo = repo.unfiltered()
1083 1101 self._state = state
1084 1102 self.mtime = mtime
1085 1103
1086 1104 return self._repo, True
1087 1105
1088 1106 def _repostate(self):
1089 1107 state = []
1090 1108 maxmtime = -1
1091 1109 for attr, fname in foi:
1092 1110 prefix = getattr(self._repo, attr)
1093 1111 p = os.path.join(prefix, fname)
1094 1112 try:
1095 1113 st = os.stat(p)
1096 1114 except OSError:
1097 1115 st = os.stat(prefix)
1098 1116 state.append((st.st_mtime, st.st_size))
1099 1117 maxmtime = max(maxmtime, st.st_mtime)
1100 1118
1101 1119 return tuple(state), maxmtime
1102 1120
1103 1121 def copy(self):
1104 1122 """Obtain a copy of this class instance.
1105 1123
1106 1124 A new localrepository instance is obtained. The new instance should be
1107 1125 completely independent of the original.
1108 1126 """
1109 1127 repo = repository(self._repo.baseui, self._repo.origroot)
1110 1128 if self._filtername:
1111 1129 repo = repo.filtered(self._filtername)
1112 1130 else:
1113 1131 repo = repo.unfiltered()
1114 1132 c = cachedlocalrepo(repo)
1115 1133 c._state = self._state
1116 1134 c.mtime = self.mtime
1117 1135 return c
General Comments 0
You need to be logged in to leave comments. Login now