##// END OF EJS Templates
hg: move share._getsrcrepo into core...
Gregory Szorc -
r36177:0fe7e39d default
parent child Browse files
Show More
@@ -1,518 +1,516 b''
1 # journal.py
1 # journal.py
2 #
2 #
3 # Copyright 2014-2016 Facebook, Inc.
3 # Copyright 2014-2016 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 """track previous positions of bookmarks (EXPERIMENTAL)
7 """track previous positions of bookmarks (EXPERIMENTAL)
8
8
9 This extension adds a new command: `hg journal`, which shows you where
9 This extension adds a new command: `hg journal`, which shows you where
10 bookmarks were previously located.
10 bookmarks were previously located.
11
11
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import collections
16 import collections
17 import errno
17 import errno
18 import os
18 import os
19 import weakref
19 import weakref
20
20
21 from mercurial.i18n import _
21 from mercurial.i18n import _
22
22
23 from mercurial import (
23 from mercurial import (
24 bookmarks,
24 bookmarks,
25 cmdutil,
25 cmdutil,
26 dispatch,
26 dispatch,
27 error,
27 error,
28 extensions,
28 extensions,
29 hg,
29 hg,
30 localrepo,
30 localrepo,
31 lock,
31 lock,
32 logcmdutil,
32 logcmdutil,
33 node,
33 node,
34 pycompat,
34 pycompat,
35 registrar,
35 registrar,
36 util,
36 util,
37 )
37 )
38
38
39 from . import share
40
41 cmdtable = {}
39 cmdtable = {}
42 command = registrar.command(cmdtable)
40 command = registrar.command(cmdtable)
43
41
44 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
42 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
45 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
43 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
46 # be specifying the version(s) of Mercurial they are tested with, or
44 # be specifying the version(s) of Mercurial they are tested with, or
47 # leave the attribute unspecified.
45 # leave the attribute unspecified.
48 testedwith = 'ships-with-hg-core'
46 testedwith = 'ships-with-hg-core'
49
47
50 # storage format version; increment when the format changes
48 # storage format version; increment when the format changes
51 storageversion = 0
49 storageversion = 0
52
50
53 # namespaces
51 # namespaces
54 bookmarktype = 'bookmark'
52 bookmarktype = 'bookmark'
55 wdirparenttype = 'wdirparent'
53 wdirparenttype = 'wdirparent'
56 # In a shared repository, what shared feature name is used
54 # In a shared repository, what shared feature name is used
57 # to indicate this namespace is shared with the source?
55 # to indicate this namespace is shared with the source?
58 sharednamespaces = {
56 sharednamespaces = {
59 bookmarktype: hg.sharedbookmarks,
57 bookmarktype: hg.sharedbookmarks,
60 }
58 }
61
59
62 # Journal recording, register hooks and storage object
60 # Journal recording, register hooks and storage object
63 def extsetup(ui):
61 def extsetup(ui):
64 extensions.wrapfunction(dispatch, 'runcommand', runcommand)
62 extensions.wrapfunction(dispatch, 'runcommand', runcommand)
65 extensions.wrapfunction(bookmarks.bmstore, '_write', recordbookmarks)
63 extensions.wrapfunction(bookmarks.bmstore, '_write', recordbookmarks)
66 extensions.wrapfilecache(
64 extensions.wrapfilecache(
67 localrepo.localrepository, 'dirstate', wrapdirstate)
65 localrepo.localrepository, 'dirstate', wrapdirstate)
68 extensions.wrapfunction(hg, 'postshare', wrappostshare)
66 extensions.wrapfunction(hg, 'postshare', wrappostshare)
69 extensions.wrapfunction(hg, 'copystore', unsharejournal)
67 extensions.wrapfunction(hg, 'copystore', unsharejournal)
70
68
71 def reposetup(ui, repo):
69 def reposetup(ui, repo):
72 if repo.local():
70 if repo.local():
73 repo.journal = journalstorage(repo)
71 repo.journal = journalstorage(repo)
74 repo._wlockfreeprefix.add('namejournal')
72 repo._wlockfreeprefix.add('namejournal')
75
73
76 dirstate, cached = localrepo.isfilecached(repo, 'dirstate')
74 dirstate, cached = localrepo.isfilecached(repo, 'dirstate')
77 if cached:
75 if cached:
78 # already instantiated dirstate isn't yet marked as
76 # already instantiated dirstate isn't yet marked as
79 # "journal"-ing, even though repo.dirstate() was already
77 # "journal"-ing, even though repo.dirstate() was already
80 # wrapped by own wrapdirstate()
78 # wrapped by own wrapdirstate()
81 _setupdirstate(repo, dirstate)
79 _setupdirstate(repo, dirstate)
82
80
83 def runcommand(orig, lui, repo, cmd, fullargs, *args):
81 def runcommand(orig, lui, repo, cmd, fullargs, *args):
84 """Track the command line options for recording in the journal"""
82 """Track the command line options for recording in the journal"""
85 journalstorage.recordcommand(*fullargs)
83 journalstorage.recordcommand(*fullargs)
86 return orig(lui, repo, cmd, fullargs, *args)
84 return orig(lui, repo, cmd, fullargs, *args)
87
85
88 def _setupdirstate(repo, dirstate):
86 def _setupdirstate(repo, dirstate):
89 dirstate.journalstorage = repo.journal
87 dirstate.journalstorage = repo.journal
90 dirstate.addparentchangecallback('journal', recorddirstateparents)
88 dirstate.addparentchangecallback('journal', recorddirstateparents)
91
89
92 # hooks to record dirstate changes
90 # hooks to record dirstate changes
93 def wrapdirstate(orig, repo):
91 def wrapdirstate(orig, repo):
94 """Make journal storage available to the dirstate object"""
92 """Make journal storage available to the dirstate object"""
95 dirstate = orig(repo)
93 dirstate = orig(repo)
96 if util.safehasattr(repo, 'journal'):
94 if util.safehasattr(repo, 'journal'):
97 _setupdirstate(repo, dirstate)
95 _setupdirstate(repo, dirstate)
98 return dirstate
96 return dirstate
99
97
100 def recorddirstateparents(dirstate, old, new):
98 def recorddirstateparents(dirstate, old, new):
101 """Records all dirstate parent changes in the journal."""
99 """Records all dirstate parent changes in the journal."""
102 old = list(old)
100 old = list(old)
103 new = list(new)
101 new = list(new)
104 if util.safehasattr(dirstate, 'journalstorage'):
102 if util.safehasattr(dirstate, 'journalstorage'):
105 # only record two hashes if there was a merge
103 # only record two hashes if there was a merge
106 oldhashes = old[:1] if old[1] == node.nullid else old
104 oldhashes = old[:1] if old[1] == node.nullid else old
107 newhashes = new[:1] if new[1] == node.nullid else new
105 newhashes = new[:1] if new[1] == node.nullid else new
108 dirstate.journalstorage.record(
106 dirstate.journalstorage.record(
109 wdirparenttype, '.', oldhashes, newhashes)
107 wdirparenttype, '.', oldhashes, newhashes)
110
108
111 # hooks to record bookmark changes (both local and remote)
109 # hooks to record bookmark changes (both local and remote)
112 def recordbookmarks(orig, store, fp):
110 def recordbookmarks(orig, store, fp):
113 """Records all bookmark changes in the journal."""
111 """Records all bookmark changes in the journal."""
114 repo = store._repo
112 repo = store._repo
115 if util.safehasattr(repo, 'journal'):
113 if util.safehasattr(repo, 'journal'):
116 oldmarks = bookmarks.bmstore(repo)
114 oldmarks = bookmarks.bmstore(repo)
117 for mark, value in store.iteritems():
115 for mark, value in store.iteritems():
118 oldvalue = oldmarks.get(mark, node.nullid)
116 oldvalue = oldmarks.get(mark, node.nullid)
119 if value != oldvalue:
117 if value != oldvalue:
120 repo.journal.record(bookmarktype, mark, oldvalue, value)
118 repo.journal.record(bookmarktype, mark, oldvalue, value)
121 return orig(store, fp)
119 return orig(store, fp)
122
120
123 # shared repository support
121 # shared repository support
124 def _readsharedfeatures(repo):
122 def _readsharedfeatures(repo):
125 """A set of shared features for this repository"""
123 """A set of shared features for this repository"""
126 try:
124 try:
127 return set(repo.vfs.read('shared').splitlines())
125 return set(repo.vfs.read('shared').splitlines())
128 except IOError as inst:
126 except IOError as inst:
129 if inst.errno != errno.ENOENT:
127 if inst.errno != errno.ENOENT:
130 raise
128 raise
131 return set()
129 return set()
132
130
133 def _mergeentriesiter(*iterables, **kwargs):
131 def _mergeentriesiter(*iterables, **kwargs):
134 """Given a set of sorted iterables, yield the next entry in merged order
132 """Given a set of sorted iterables, yield the next entry in merged order
135
133
136 Note that by default entries go from most recent to oldest.
134 Note that by default entries go from most recent to oldest.
137 """
135 """
138 order = kwargs.pop(r'order', max)
136 order = kwargs.pop(r'order', max)
139 iterables = [iter(it) for it in iterables]
137 iterables = [iter(it) for it in iterables]
140 # this tracks still active iterables; iterables are deleted as they are
138 # this tracks still active iterables; iterables are deleted as they are
141 # exhausted, which is why this is a dictionary and why each entry also
139 # exhausted, which is why this is a dictionary and why each entry also
142 # stores the key. Entries are mutable so we can store the next value each
140 # stores the key. Entries are mutable so we can store the next value each
143 # time.
141 # time.
144 iterable_map = {}
142 iterable_map = {}
145 for key, it in enumerate(iterables):
143 for key, it in enumerate(iterables):
146 try:
144 try:
147 iterable_map[key] = [next(it), key, it]
145 iterable_map[key] = [next(it), key, it]
148 except StopIteration:
146 except StopIteration:
149 # empty entry, can be ignored
147 # empty entry, can be ignored
150 pass
148 pass
151
149
152 while iterable_map:
150 while iterable_map:
153 value, key, it = order(iterable_map.itervalues())
151 value, key, it = order(iterable_map.itervalues())
154 yield value
152 yield value
155 try:
153 try:
156 iterable_map[key][0] = next(it)
154 iterable_map[key][0] = next(it)
157 except StopIteration:
155 except StopIteration:
158 # this iterable is empty, remove it from consideration
156 # this iterable is empty, remove it from consideration
159 del iterable_map[key]
157 del iterable_map[key]
160
158
161 def wrappostshare(orig, sourcerepo, destrepo, **kwargs):
159 def wrappostshare(orig, sourcerepo, destrepo, **kwargs):
162 """Mark this shared working copy as sharing journal information"""
160 """Mark this shared working copy as sharing journal information"""
163 with destrepo.wlock():
161 with destrepo.wlock():
164 orig(sourcerepo, destrepo, **kwargs)
162 orig(sourcerepo, destrepo, **kwargs)
165 with destrepo.vfs('shared', 'a') as fp:
163 with destrepo.vfs('shared', 'a') as fp:
166 fp.write('journal\n')
164 fp.write('journal\n')
167
165
168 def unsharejournal(orig, ui, repo, repopath):
166 def unsharejournal(orig, ui, repo, repopath):
169 """Copy shared journal entries into this repo when unsharing"""
167 """Copy shared journal entries into this repo when unsharing"""
170 if (repo.path == repopath and repo.shared() and
168 if (repo.path == repopath and repo.shared() and
171 util.safehasattr(repo, 'journal')):
169 util.safehasattr(repo, 'journal')):
172 sharedrepo = share._getsrcrepo(repo)
170 sharedrepo = hg.sharedreposource(repo)
173 sharedfeatures = _readsharedfeatures(repo)
171 sharedfeatures = _readsharedfeatures(repo)
174 if sharedrepo and sharedfeatures > {'journal'}:
172 if sharedrepo and sharedfeatures > {'journal'}:
175 # there is a shared repository and there are shared journal entries
173 # there is a shared repository and there are shared journal entries
176 # to copy. move shared date over from source to destination but
174 # to copy. move shared date over from source to destination but
177 # move the local file first
175 # move the local file first
178 if repo.vfs.exists('namejournal'):
176 if repo.vfs.exists('namejournal'):
179 journalpath = repo.vfs.join('namejournal')
177 journalpath = repo.vfs.join('namejournal')
180 util.rename(journalpath, journalpath + '.bak')
178 util.rename(journalpath, journalpath + '.bak')
181 storage = repo.journal
179 storage = repo.journal
182 local = storage._open(
180 local = storage._open(
183 repo.vfs, filename='namejournal.bak', _newestfirst=False)
181 repo.vfs, filename='namejournal.bak', _newestfirst=False)
184 shared = (
182 shared = (
185 e for e in storage._open(sharedrepo.vfs, _newestfirst=False)
183 e for e in storage._open(sharedrepo.vfs, _newestfirst=False)
186 if sharednamespaces.get(e.namespace) in sharedfeatures)
184 if sharednamespaces.get(e.namespace) in sharedfeatures)
187 for entry in _mergeentriesiter(local, shared, order=min):
185 for entry in _mergeentriesiter(local, shared, order=min):
188 storage._write(repo.vfs, entry)
186 storage._write(repo.vfs, entry)
189
187
190 return orig(ui, repo, repopath)
188 return orig(ui, repo, repopath)
191
189
192 class journalentry(collections.namedtuple(
190 class journalentry(collections.namedtuple(
193 u'journalentry',
191 u'journalentry',
194 u'timestamp user command namespace name oldhashes newhashes')):
192 u'timestamp user command namespace name oldhashes newhashes')):
195 """Individual journal entry
193 """Individual journal entry
196
194
197 * timestamp: a mercurial (time, timezone) tuple
195 * timestamp: a mercurial (time, timezone) tuple
198 * user: the username that ran the command
196 * user: the username that ran the command
199 * namespace: the entry namespace, an opaque string
197 * namespace: the entry namespace, an opaque string
200 * name: the name of the changed item, opaque string with meaning in the
198 * name: the name of the changed item, opaque string with meaning in the
201 namespace
199 namespace
202 * command: the hg command that triggered this record
200 * command: the hg command that triggered this record
203 * oldhashes: a tuple of one or more binary hashes for the old location
201 * oldhashes: a tuple of one or more binary hashes for the old location
204 * newhashes: a tuple of one or more binary hashes for the new location
202 * newhashes: a tuple of one or more binary hashes for the new location
205
203
206 Handles serialisation from and to the storage format. Fields are
204 Handles serialisation from and to the storage format. Fields are
207 separated by newlines, hashes are written out in hex separated by commas,
205 separated by newlines, hashes are written out in hex separated by commas,
208 timestamp and timezone are separated by a space.
206 timestamp and timezone are separated by a space.
209
207
210 """
208 """
211 @classmethod
209 @classmethod
212 def fromstorage(cls, line):
210 def fromstorage(cls, line):
213 (time, user, command, namespace, name,
211 (time, user, command, namespace, name,
214 oldhashes, newhashes) = line.split('\n')
212 oldhashes, newhashes) = line.split('\n')
215 timestamp, tz = time.split()
213 timestamp, tz = time.split()
216 timestamp, tz = float(timestamp), int(tz)
214 timestamp, tz = float(timestamp), int(tz)
217 oldhashes = tuple(node.bin(hash) for hash in oldhashes.split(','))
215 oldhashes = tuple(node.bin(hash) for hash in oldhashes.split(','))
218 newhashes = tuple(node.bin(hash) for hash in newhashes.split(','))
216 newhashes = tuple(node.bin(hash) for hash in newhashes.split(','))
219 return cls(
217 return cls(
220 (timestamp, tz), user, command, namespace, name,
218 (timestamp, tz), user, command, namespace, name,
221 oldhashes, newhashes)
219 oldhashes, newhashes)
222
220
223 def __str__(self):
221 def __str__(self):
224 """String representation for storage"""
222 """String representation for storage"""
225 time = ' '.join(map(str, self.timestamp))
223 time = ' '.join(map(str, self.timestamp))
226 oldhashes = ','.join([node.hex(hash) for hash in self.oldhashes])
224 oldhashes = ','.join([node.hex(hash) for hash in self.oldhashes])
227 newhashes = ','.join([node.hex(hash) for hash in self.newhashes])
225 newhashes = ','.join([node.hex(hash) for hash in self.newhashes])
228 return '\n'.join((
226 return '\n'.join((
229 time, self.user, self.command, self.namespace, self.name,
227 time, self.user, self.command, self.namespace, self.name,
230 oldhashes, newhashes))
228 oldhashes, newhashes))
231
229
232 class journalstorage(object):
230 class journalstorage(object):
233 """Storage for journal entries
231 """Storage for journal entries
234
232
235 Entries are divided over two files; one with entries that pertain to the
233 Entries are divided over two files; one with entries that pertain to the
236 local working copy *only*, and one with entries that are shared across
234 local working copy *only*, and one with entries that are shared across
237 multiple working copies when shared using the share extension.
235 multiple working copies when shared using the share extension.
238
236
239 Entries are stored with NUL bytes as separators. See the journalentry
237 Entries are stored with NUL bytes as separators. See the journalentry
240 class for the per-entry structure.
238 class for the per-entry structure.
241
239
242 The file format starts with an integer version, delimited by a NUL.
240 The file format starts with an integer version, delimited by a NUL.
243
241
244 This storage uses a dedicated lock; this makes it easier to avoid issues
242 This storage uses a dedicated lock; this makes it easier to avoid issues
245 with adding entries that added when the regular wlock is unlocked (e.g.
243 with adding entries that added when the regular wlock is unlocked (e.g.
246 the dirstate).
244 the dirstate).
247
245
248 """
246 """
249 _currentcommand = ()
247 _currentcommand = ()
250 _lockref = None
248 _lockref = None
251
249
252 def __init__(self, repo):
250 def __init__(self, repo):
253 self.user = util.getuser()
251 self.user = util.getuser()
254 self.ui = repo.ui
252 self.ui = repo.ui
255 self.vfs = repo.vfs
253 self.vfs = repo.vfs
256
254
257 # is this working copy using a shared storage?
255 # is this working copy using a shared storage?
258 self.sharedfeatures = self.sharedvfs = None
256 self.sharedfeatures = self.sharedvfs = None
259 if repo.shared():
257 if repo.shared():
260 features = _readsharedfeatures(repo)
258 features = _readsharedfeatures(repo)
261 sharedrepo = share._getsrcrepo(repo)
259 sharedrepo = hg.sharedreposource(repo)
262 if sharedrepo is not None and 'journal' in features:
260 if sharedrepo is not None and 'journal' in features:
263 self.sharedvfs = sharedrepo.vfs
261 self.sharedvfs = sharedrepo.vfs
264 self.sharedfeatures = features
262 self.sharedfeatures = features
265
263
266 # track the current command for recording in journal entries
264 # track the current command for recording in journal entries
267 @property
265 @property
268 def command(self):
266 def command(self):
269 commandstr = ' '.join(
267 commandstr = ' '.join(
270 map(util.shellquote, journalstorage._currentcommand))
268 map(util.shellquote, journalstorage._currentcommand))
271 if '\n' in commandstr:
269 if '\n' in commandstr:
272 # truncate multi-line commands
270 # truncate multi-line commands
273 commandstr = commandstr.partition('\n')[0] + ' ...'
271 commandstr = commandstr.partition('\n')[0] + ' ...'
274 return commandstr
272 return commandstr
275
273
276 @classmethod
274 @classmethod
277 def recordcommand(cls, *fullargs):
275 def recordcommand(cls, *fullargs):
278 """Set the current hg arguments, stored with recorded entries"""
276 """Set the current hg arguments, stored with recorded entries"""
279 # Set the current command on the class because we may have started
277 # Set the current command on the class because we may have started
280 # with a non-local repo (cloning for example).
278 # with a non-local repo (cloning for example).
281 cls._currentcommand = fullargs
279 cls._currentcommand = fullargs
282
280
283 def _currentlock(self, lockref):
281 def _currentlock(self, lockref):
284 """Returns the lock if it's held, or None if it's not.
282 """Returns the lock if it's held, or None if it's not.
285
283
286 (This is copied from the localrepo class)
284 (This is copied from the localrepo class)
287 """
285 """
288 if lockref is None:
286 if lockref is None:
289 return None
287 return None
290 l = lockref()
288 l = lockref()
291 if l is None or not l.held:
289 if l is None or not l.held:
292 return None
290 return None
293 return l
291 return l
294
292
295 def jlock(self, vfs):
293 def jlock(self, vfs):
296 """Create a lock for the journal file"""
294 """Create a lock for the journal file"""
297 if self._currentlock(self._lockref) is not None:
295 if self._currentlock(self._lockref) is not None:
298 raise error.Abort(_('journal lock does not support nesting'))
296 raise error.Abort(_('journal lock does not support nesting'))
299 desc = _('journal of %s') % vfs.base
297 desc = _('journal of %s') % vfs.base
300 try:
298 try:
301 l = lock.lock(vfs, 'namejournal.lock', 0, desc=desc)
299 l = lock.lock(vfs, 'namejournal.lock', 0, desc=desc)
302 except error.LockHeld as inst:
300 except error.LockHeld as inst:
303 self.ui.warn(
301 self.ui.warn(
304 _("waiting for lock on %s held by %r\n") % (desc, inst.locker))
302 _("waiting for lock on %s held by %r\n") % (desc, inst.locker))
305 # default to 600 seconds timeout
303 # default to 600 seconds timeout
306 l = lock.lock(
304 l = lock.lock(
307 vfs, 'namejournal.lock',
305 vfs, 'namejournal.lock',
308 self.ui.configint("ui", "timeout"), desc=desc)
306 self.ui.configint("ui", "timeout"), desc=desc)
309 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
307 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
310 self._lockref = weakref.ref(l)
308 self._lockref = weakref.ref(l)
311 return l
309 return l
312
310
313 def record(self, namespace, name, oldhashes, newhashes):
311 def record(self, namespace, name, oldhashes, newhashes):
314 """Record a new journal entry
312 """Record a new journal entry
315
313
316 * namespace: an opaque string; this can be used to filter on the type
314 * namespace: an opaque string; this can be used to filter on the type
317 of recorded entries.
315 of recorded entries.
318 * name: the name defining this entry; for bookmarks, this is the
316 * name: the name defining this entry; for bookmarks, this is the
319 bookmark name. Can be filtered on when retrieving entries.
317 bookmark name. Can be filtered on when retrieving entries.
320 * oldhashes and newhashes: each a single binary hash, or a list of
318 * oldhashes and newhashes: each a single binary hash, or a list of
321 binary hashes. These represent the old and new position of the named
319 binary hashes. These represent the old and new position of the named
322 item.
320 item.
323
321
324 """
322 """
325 if not isinstance(oldhashes, list):
323 if not isinstance(oldhashes, list):
326 oldhashes = [oldhashes]
324 oldhashes = [oldhashes]
327 if not isinstance(newhashes, list):
325 if not isinstance(newhashes, list):
328 newhashes = [newhashes]
326 newhashes = [newhashes]
329
327
330 entry = journalentry(
328 entry = journalentry(
331 util.makedate(), self.user, self.command, namespace, name,
329 util.makedate(), self.user, self.command, namespace, name,
332 oldhashes, newhashes)
330 oldhashes, newhashes)
333
331
334 vfs = self.vfs
332 vfs = self.vfs
335 if self.sharedvfs is not None:
333 if self.sharedvfs is not None:
336 # write to the shared repository if this feature is being
334 # write to the shared repository if this feature is being
337 # shared between working copies.
335 # shared between working copies.
338 if sharednamespaces.get(namespace) in self.sharedfeatures:
336 if sharednamespaces.get(namespace) in self.sharedfeatures:
339 vfs = self.sharedvfs
337 vfs = self.sharedvfs
340
338
341 self._write(vfs, entry)
339 self._write(vfs, entry)
342
340
343 def _write(self, vfs, entry):
341 def _write(self, vfs, entry):
344 with self.jlock(vfs):
342 with self.jlock(vfs):
345 version = None
343 version = None
346 # open file in amend mode to ensure it is created if missing
344 # open file in amend mode to ensure it is created if missing
347 with vfs('namejournal', mode='a+b') as f:
345 with vfs('namejournal', mode='a+b') as f:
348 f.seek(0, os.SEEK_SET)
346 f.seek(0, os.SEEK_SET)
349 # Read just enough bytes to get a version number (up to 2
347 # Read just enough bytes to get a version number (up to 2
350 # digits plus separator)
348 # digits plus separator)
351 version = f.read(3).partition('\0')[0]
349 version = f.read(3).partition('\0')[0]
352 if version and version != str(storageversion):
350 if version and version != str(storageversion):
353 # different version of the storage. Exit early (and not
351 # different version of the storage. Exit early (and not
354 # write anything) if this is not a version we can handle or
352 # write anything) if this is not a version we can handle or
355 # the file is corrupt. In future, perhaps rotate the file
353 # the file is corrupt. In future, perhaps rotate the file
356 # instead?
354 # instead?
357 self.ui.warn(
355 self.ui.warn(
358 _("unsupported journal file version '%s'\n") % version)
356 _("unsupported journal file version '%s'\n") % version)
359 return
357 return
360 if not version:
358 if not version:
361 # empty file, write version first
359 # empty file, write version first
362 f.write(str(storageversion) + '\0')
360 f.write(str(storageversion) + '\0')
363 f.seek(0, os.SEEK_END)
361 f.seek(0, os.SEEK_END)
364 f.write(str(entry) + '\0')
362 f.write(str(entry) + '\0')
365
363
366 def filtered(self, namespace=None, name=None):
364 def filtered(self, namespace=None, name=None):
367 """Yield all journal entries with the given namespace or name
365 """Yield all journal entries with the given namespace or name
368
366
369 Both the namespace and the name are optional; if neither is given all
367 Both the namespace and the name are optional; if neither is given all
370 entries in the journal are produced.
368 entries in the journal are produced.
371
369
372 Matching supports regular expressions by using the `re:` prefix
370 Matching supports regular expressions by using the `re:` prefix
373 (use `literal:` to match names or namespaces that start with `re:`)
371 (use `literal:` to match names or namespaces that start with `re:`)
374
372
375 """
373 """
376 if namespace is not None:
374 if namespace is not None:
377 namespace = util.stringmatcher(namespace)[-1]
375 namespace = util.stringmatcher(namespace)[-1]
378 if name is not None:
376 if name is not None:
379 name = util.stringmatcher(name)[-1]
377 name = util.stringmatcher(name)[-1]
380 for entry in self:
378 for entry in self:
381 if namespace is not None and not namespace(entry.namespace):
379 if namespace is not None and not namespace(entry.namespace):
382 continue
380 continue
383 if name is not None and not name(entry.name):
381 if name is not None and not name(entry.name):
384 continue
382 continue
385 yield entry
383 yield entry
386
384
387 def __iter__(self):
385 def __iter__(self):
388 """Iterate over the storage
386 """Iterate over the storage
389
387
390 Yields journalentry instances for each contained journal record.
388 Yields journalentry instances for each contained journal record.
391
389
392 """
390 """
393 local = self._open(self.vfs)
391 local = self._open(self.vfs)
394
392
395 if self.sharedvfs is None:
393 if self.sharedvfs is None:
396 return local
394 return local
397
395
398 # iterate over both local and shared entries, but only those
396 # iterate over both local and shared entries, but only those
399 # shared entries that are among the currently shared features
397 # shared entries that are among the currently shared features
400 shared = (
398 shared = (
401 e for e in self._open(self.sharedvfs)
399 e for e in self._open(self.sharedvfs)
402 if sharednamespaces.get(e.namespace) in self.sharedfeatures)
400 if sharednamespaces.get(e.namespace) in self.sharedfeatures)
403 return _mergeentriesiter(local, shared)
401 return _mergeentriesiter(local, shared)
404
402
405 def _open(self, vfs, filename='namejournal', _newestfirst=True):
403 def _open(self, vfs, filename='namejournal', _newestfirst=True):
406 if not vfs.exists(filename):
404 if not vfs.exists(filename):
407 return
405 return
408
406
409 with vfs(filename) as f:
407 with vfs(filename) as f:
410 raw = f.read()
408 raw = f.read()
411
409
412 lines = raw.split('\0')
410 lines = raw.split('\0')
413 version = lines and lines[0]
411 version = lines and lines[0]
414 if version != str(storageversion):
412 if version != str(storageversion):
415 version = version or _('not available')
413 version = version or _('not available')
416 raise error.Abort(_("unknown journal file version '%s'") % version)
414 raise error.Abort(_("unknown journal file version '%s'") % version)
417
415
418 # Skip the first line, it's a version number. Normally we iterate over
416 # Skip the first line, it's a version number. Normally we iterate over
419 # these in reverse order to list newest first; only when copying across
417 # these in reverse order to list newest first; only when copying across
420 # a shared storage do we forgo reversing.
418 # a shared storage do we forgo reversing.
421 lines = lines[1:]
419 lines = lines[1:]
422 if _newestfirst:
420 if _newestfirst:
423 lines = reversed(lines)
421 lines = reversed(lines)
424 for line in lines:
422 for line in lines:
425 if not line:
423 if not line:
426 continue
424 continue
427 yield journalentry.fromstorage(line)
425 yield journalentry.fromstorage(line)
428
426
429 # journal reading
427 # journal reading
430 # log options that don't make sense for journal
428 # log options that don't make sense for journal
431 _ignoreopts = ('no-merges', 'graph')
429 _ignoreopts = ('no-merges', 'graph')
432 @command(
430 @command(
433 'journal', [
431 'journal', [
434 ('', 'all', None, 'show history for all names'),
432 ('', 'all', None, 'show history for all names'),
435 ('c', 'commits', None, 'show commit metadata'),
433 ('c', 'commits', None, 'show commit metadata'),
436 ] + [opt for opt in cmdutil.logopts if opt[1] not in _ignoreopts],
434 ] + [opt for opt in cmdutil.logopts if opt[1] not in _ignoreopts],
437 '[OPTION]... [BOOKMARKNAME]')
435 '[OPTION]... [BOOKMARKNAME]')
438 def journal(ui, repo, *args, **opts):
436 def journal(ui, repo, *args, **opts):
439 """show the previous position of bookmarks and the working copy
437 """show the previous position of bookmarks and the working copy
440
438
441 The journal is used to see the previous commits that bookmarks and the
439 The journal is used to see the previous commits that bookmarks and the
442 working copy pointed to. By default the previous locations for the working
440 working copy pointed to. By default the previous locations for the working
443 copy. Passing a bookmark name will show all the previous positions of
441 copy. Passing a bookmark name will show all the previous positions of
444 that bookmark. Use the --all switch to show previous locations for all
442 that bookmark. Use the --all switch to show previous locations for all
445 bookmarks and the working copy; each line will then include the bookmark
443 bookmarks and the working copy; each line will then include the bookmark
446 name, or '.' for the working copy, as well.
444 name, or '.' for the working copy, as well.
447
445
448 If `name` starts with `re:`, the remainder of the name is treated as
446 If `name` starts with `re:`, the remainder of the name is treated as
449 a regular expression. To match a name that actually starts with `re:`,
447 a regular expression. To match a name that actually starts with `re:`,
450 use the prefix `literal:`.
448 use the prefix `literal:`.
451
449
452 By default hg journal only shows the commit hash and the command that was
450 By default hg journal only shows the commit hash and the command that was
453 running at that time. -v/--verbose will show the prior hash, the user, and
451 running at that time. -v/--verbose will show the prior hash, the user, and
454 the time at which it happened.
452 the time at which it happened.
455
453
456 Use -c/--commits to output log information on each commit hash; at this
454 Use -c/--commits to output log information on each commit hash; at this
457 point you can use the usual `--patch`, `--git`, `--stat` and `--template`
455 point you can use the usual `--patch`, `--git`, `--stat` and `--template`
458 switches to alter the log output for these.
456 switches to alter the log output for these.
459
457
460 `hg journal -T json` can be used to produce machine readable output.
458 `hg journal -T json` can be used to produce machine readable output.
461
459
462 """
460 """
463 opts = pycompat.byteskwargs(opts)
461 opts = pycompat.byteskwargs(opts)
464 name = '.'
462 name = '.'
465 if opts.get('all'):
463 if opts.get('all'):
466 if args:
464 if args:
467 raise error.Abort(
465 raise error.Abort(
468 _("You can't combine --all and filtering on a name"))
466 _("You can't combine --all and filtering on a name"))
469 name = None
467 name = None
470 if args:
468 if args:
471 name = args[0]
469 name = args[0]
472
470
473 fm = ui.formatter('journal', opts)
471 fm = ui.formatter('journal', opts)
474
472
475 if opts.get("template") != "json":
473 if opts.get("template") != "json":
476 if name is None:
474 if name is None:
477 displayname = _('the working copy and bookmarks')
475 displayname = _('the working copy and bookmarks')
478 else:
476 else:
479 displayname = "'%s'" % name
477 displayname = "'%s'" % name
480 ui.status(_("previous locations of %s:\n") % displayname)
478 ui.status(_("previous locations of %s:\n") % displayname)
481
479
482 limit = logcmdutil.getlimit(opts)
480 limit = logcmdutil.getlimit(opts)
483 entry = None
481 entry = None
484 ui.pager('journal')
482 ui.pager('journal')
485 for count, entry in enumerate(repo.journal.filtered(name=name)):
483 for count, entry in enumerate(repo.journal.filtered(name=name)):
486 if count == limit:
484 if count == limit:
487 break
485 break
488 newhashesstr = fm.formatlist(map(fm.hexfunc, entry.newhashes),
486 newhashesstr = fm.formatlist(map(fm.hexfunc, entry.newhashes),
489 name='node', sep=',')
487 name='node', sep=',')
490 oldhashesstr = fm.formatlist(map(fm.hexfunc, entry.oldhashes),
488 oldhashesstr = fm.formatlist(map(fm.hexfunc, entry.oldhashes),
491 name='node', sep=',')
489 name='node', sep=',')
492
490
493 fm.startitem()
491 fm.startitem()
494 fm.condwrite(ui.verbose, 'oldhashes', '%s -> ', oldhashesstr)
492 fm.condwrite(ui.verbose, 'oldhashes', '%s -> ', oldhashesstr)
495 fm.write('newhashes', '%s', newhashesstr)
493 fm.write('newhashes', '%s', newhashesstr)
496 fm.condwrite(ui.verbose, 'user', ' %-8s', entry.user)
494 fm.condwrite(ui.verbose, 'user', ' %-8s', entry.user)
497 fm.condwrite(
495 fm.condwrite(
498 opts.get('all') or name.startswith('re:'),
496 opts.get('all') or name.startswith('re:'),
499 'name', ' %-8s', entry.name)
497 'name', ' %-8s', entry.name)
500
498
501 timestring = fm.formatdate(entry.timestamp, '%Y-%m-%d %H:%M %1%2')
499 timestring = fm.formatdate(entry.timestamp, '%Y-%m-%d %H:%M %1%2')
502 fm.condwrite(ui.verbose, 'date', ' %s', timestring)
500 fm.condwrite(ui.verbose, 'date', ' %s', timestring)
503 fm.write('command', ' %s\n', entry.command)
501 fm.write('command', ' %s\n', entry.command)
504
502
505 if opts.get("commits"):
503 if opts.get("commits"):
506 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
504 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
507 for hash in entry.newhashes:
505 for hash in entry.newhashes:
508 try:
506 try:
509 ctx = repo[hash]
507 ctx = repo[hash]
510 displayer.show(ctx)
508 displayer.show(ctx)
511 except error.RepoLookupError as e:
509 except error.RepoLookupError as e:
512 fm.write('repolookuperror', "%s\n\n", str(e))
510 fm.write('repolookuperror', "%s\n\n", str(e))
513 displayer.close()
511 displayer.close()
514
512
515 fm.end()
513 fm.end()
516
514
517 if entry is None:
515 if entry is None:
518 ui.status(_("no recorded locations\n"))
516 ui.status(_("no recorded locations\n"))
@@ -1,116 +1,113 b''
1 # narrowrepo.py - repository which supports narrow revlogs, lazy loading
1 # narrowrepo.py - repository which supports narrow revlogs, lazy loading
2 #
2 #
3 # Copyright 2017 Google, Inc.
3 # Copyright 2017 Google, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from mercurial import (
10 from mercurial import (
11 bundlerepo,
11 bundlerepo,
12 hg,
12 localrepo,
13 localrepo,
13 match as matchmod,
14 match as matchmod,
14 scmutil,
15 scmutil,
15 )
16 )
16
17
17 from .. import (
18 share,
19 )
20
21 from . import (
18 from . import (
22 narrowrevlog,
19 narrowrevlog,
23 narrowspec,
20 narrowspec,
24 )
21 )
25
22
26 # When narrowing is finalized and no longer subject to format changes,
23 # When narrowing is finalized and no longer subject to format changes,
27 # we should move this to just "narrow" or similar.
24 # we should move this to just "narrow" or similar.
28 REQUIREMENT = 'narrowhg-experimental'
25 REQUIREMENT = 'narrowhg-experimental'
29
26
30 def wrappostshare(orig, sourcerepo, destrepo, **kwargs):
27 def wrappostshare(orig, sourcerepo, destrepo, **kwargs):
31 orig(sourcerepo, destrepo, **kwargs)
28 orig(sourcerepo, destrepo, **kwargs)
32 if REQUIREMENT in sourcerepo.requirements:
29 if REQUIREMENT in sourcerepo.requirements:
33 with destrepo.wlock():
30 with destrepo.wlock():
34 with destrepo.vfs('shared', 'a') as fp:
31 with destrepo.vfs('shared', 'a') as fp:
35 fp.write(narrowspec.FILENAME + '\n')
32 fp.write(narrowspec.FILENAME + '\n')
36
33
37 def unsharenarrowspec(orig, ui, repo, repopath):
34 def unsharenarrowspec(orig, ui, repo, repopath):
38 if (REQUIREMENT in repo.requirements
35 if (REQUIREMENT in repo.requirements
39 and repo.path == repopath and repo.shared()):
36 and repo.path == repopath and repo.shared()):
40 srcrepo = share._getsrcrepo(repo)
37 srcrepo = hg.sharedreposource(repo)
41 with srcrepo.vfs(narrowspec.FILENAME) as f:
38 with srcrepo.vfs(narrowspec.FILENAME) as f:
42 spec = f.read()
39 spec = f.read()
43 with repo.vfs(narrowspec.FILENAME, 'w') as f:
40 with repo.vfs(narrowspec.FILENAME, 'w') as f:
44 f.write(spec)
41 f.write(spec)
45 return orig(ui, repo, repopath)
42 return orig(ui, repo, repopath)
46
43
47 def wraprepo(repo, opts_narrow):
44 def wraprepo(repo, opts_narrow):
48 """Enables narrow clone functionality on a single local repository."""
45 """Enables narrow clone functionality on a single local repository."""
49
46
50 cacheprop = localrepo.storecache
47 cacheprop = localrepo.storecache
51 if isinstance(repo, bundlerepo.bundlerepository):
48 if isinstance(repo, bundlerepo.bundlerepository):
52 # We have to use a different caching property decorator for
49 # We have to use a different caching property decorator for
53 # bundlerepo because storecache blows up in strange ways on a
50 # bundlerepo because storecache blows up in strange ways on a
54 # bundlerepo. Fortunately, there's no risk of data changing in
51 # bundlerepo. Fortunately, there's no risk of data changing in
55 # a bundlerepo.
52 # a bundlerepo.
56 cacheprop = lambda name: localrepo.unfilteredpropertycache
53 cacheprop = lambda name: localrepo.unfilteredpropertycache
57
54
58 class narrowrepository(repo.__class__):
55 class narrowrepository(repo.__class__):
59
56
60 def _constructmanifest(self):
57 def _constructmanifest(self):
61 manifest = super(narrowrepository, self)._constructmanifest()
58 manifest = super(narrowrepository, self)._constructmanifest()
62 narrowrevlog.makenarrowmanifestrevlog(manifest, repo)
59 narrowrevlog.makenarrowmanifestrevlog(manifest, repo)
63 return manifest
60 return manifest
64
61
65 @cacheprop('00manifest.i')
62 @cacheprop('00manifest.i')
66 def manifestlog(self):
63 def manifestlog(self):
67 mfl = super(narrowrepository, self).manifestlog
64 mfl = super(narrowrepository, self).manifestlog
68 narrowrevlog.makenarrowmanifestlog(mfl, self)
65 narrowrevlog.makenarrowmanifestlog(mfl, self)
69 return mfl
66 return mfl
70
67
71 def file(self, f):
68 def file(self, f):
72 fl = super(narrowrepository, self).file(f)
69 fl = super(narrowrepository, self).file(f)
73 narrowrevlog.makenarrowfilelog(fl, self.narrowmatch())
70 narrowrevlog.makenarrowfilelog(fl, self.narrowmatch())
74 return fl
71 return fl
75
72
76 @localrepo.repofilecache(narrowspec.FILENAME)
73 @localrepo.repofilecache(narrowspec.FILENAME)
77 def narrowpats(self):
74 def narrowpats(self):
78 """matcher patterns for this repository's narrowspec
75 """matcher patterns for this repository's narrowspec
79
76
80 A tuple of (includes, excludes).
77 A tuple of (includes, excludes).
81 """
78 """
82 return narrowspec.load(self)
79 return narrowspec.load(self)
83
80
84 @localrepo.repofilecache(narrowspec.FILENAME)
81 @localrepo.repofilecache(narrowspec.FILENAME)
85 def _narrowmatch(self):
82 def _narrowmatch(self):
86 include, exclude = self.narrowpats
83 include, exclude = self.narrowpats
87 if not opts_narrow and not include and not exclude:
84 if not opts_narrow and not include and not exclude:
88 return matchmod.always(self.root, '')
85 return matchmod.always(self.root, '')
89 return narrowspec.match(self.root, include=include, exclude=exclude)
86 return narrowspec.match(self.root, include=include, exclude=exclude)
90
87
91 # TODO(martinvonz): make this property-like instead?
88 # TODO(martinvonz): make this property-like instead?
92 def narrowmatch(self):
89 def narrowmatch(self):
93 return self._narrowmatch
90 return self._narrowmatch
94
91
95 def setnarrowpats(self, newincludes, newexcludes):
92 def setnarrowpats(self, newincludes, newexcludes):
96 narrowspec.save(self, newincludes, newexcludes)
93 narrowspec.save(self, newincludes, newexcludes)
97 self.invalidate(clearfilecache=True)
94 self.invalidate(clearfilecache=True)
98
95
99 # I'm not sure this is the right place to do this filter.
96 # I'm not sure this is the right place to do this filter.
100 # context._manifestmatches() would probably be better, or perhaps
97 # context._manifestmatches() would probably be better, or perhaps
101 # move it to a later place, in case some of the callers do want to know
98 # move it to a later place, in case some of the callers do want to know
102 # which directories changed. This seems to work for now, though.
99 # which directories changed. This seems to work for now, though.
103 def status(self, *args, **kwargs):
100 def status(self, *args, **kwargs):
104 s = super(narrowrepository, self).status(*args, **kwargs)
101 s = super(narrowrepository, self).status(*args, **kwargs)
105 narrowmatch = self.narrowmatch()
102 narrowmatch = self.narrowmatch()
106 modified = list(filter(narrowmatch, s.modified))
103 modified = list(filter(narrowmatch, s.modified))
107 added = list(filter(narrowmatch, s.added))
104 added = list(filter(narrowmatch, s.added))
108 removed = list(filter(narrowmatch, s.removed))
105 removed = list(filter(narrowmatch, s.removed))
109 deleted = list(filter(narrowmatch, s.deleted))
106 deleted = list(filter(narrowmatch, s.deleted))
110 unknown = list(filter(narrowmatch, s.unknown))
107 unknown = list(filter(narrowmatch, s.unknown))
111 ignored = list(filter(narrowmatch, s.ignored))
108 ignored = list(filter(narrowmatch, s.ignored))
112 clean = list(filter(narrowmatch, s.clean))
109 clean = list(filter(narrowmatch, s.clean))
113 return scmutil.status(modified, added, removed, deleted, unknown,
110 return scmutil.status(modified, added, removed, deleted, unknown,
114 ignored, clean)
111 ignored, clean)
115
112
116 repo.__class__ = narrowrepository
113 repo.__class__ = narrowrepository
@@ -1,207 +1,204 b''
1 # narrowspec.py - methods for working with a narrow view of a repository
1 # narrowspec.py - methods for working with a narrow view of a repository
2 #
2 #
3 # Copyright 2017 Google, Inc.
3 # Copyright 2017 Google, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11
11
12 from mercurial.i18n import _
12 from mercurial.i18n import _
13 from mercurial import (
13 from mercurial import (
14 error,
14 error,
15 hg,
15 match as matchmod,
16 match as matchmod,
16 util,
17 util,
17 )
18 )
18
19
19 from .. import (
20 share,
21 )
22
23 FILENAME = 'narrowspec'
20 FILENAME = 'narrowspec'
24
21
25 def _parsestoredpatterns(text):
22 def _parsestoredpatterns(text):
26 """Parses the narrowspec format that's stored on disk."""
23 """Parses the narrowspec format that's stored on disk."""
27 patlist = None
24 patlist = None
28 includepats = []
25 includepats = []
29 excludepats = []
26 excludepats = []
30 for l in text.splitlines():
27 for l in text.splitlines():
31 if l == '[includes]':
28 if l == '[includes]':
32 if patlist is None:
29 if patlist is None:
33 patlist = includepats
30 patlist = includepats
34 else:
31 else:
35 raise error.Abort(_('narrowspec includes section must appear '
32 raise error.Abort(_('narrowspec includes section must appear '
36 'at most once, before excludes'))
33 'at most once, before excludes'))
37 elif l == '[excludes]':
34 elif l == '[excludes]':
38 if patlist is not excludepats:
35 if patlist is not excludepats:
39 patlist = excludepats
36 patlist = excludepats
40 else:
37 else:
41 raise error.Abort(_('narrowspec excludes section must appear '
38 raise error.Abort(_('narrowspec excludes section must appear '
42 'at most once'))
39 'at most once'))
43 else:
40 else:
44 patlist.append(l)
41 patlist.append(l)
45
42
46 return set(includepats), set(excludepats)
43 return set(includepats), set(excludepats)
47
44
48 def parseserverpatterns(text):
45 def parseserverpatterns(text):
49 """Parses the narrowspec format that's returned by the server."""
46 """Parses the narrowspec format that's returned by the server."""
50 includepats = set()
47 includepats = set()
51 excludepats = set()
48 excludepats = set()
52
49
53 # We get one entry per line, in the format "<key> <value>".
50 # We get one entry per line, in the format "<key> <value>".
54 # It's OK for value to contain other spaces.
51 # It's OK for value to contain other spaces.
55 for kp in (l.split(' ', 1) for l in text.splitlines()):
52 for kp in (l.split(' ', 1) for l in text.splitlines()):
56 if len(kp) != 2:
53 if len(kp) != 2:
57 raise error.Abort(_('Invalid narrowspec pattern line: "%s"') % kp)
54 raise error.Abort(_('Invalid narrowspec pattern line: "%s"') % kp)
58 key = kp[0]
55 key = kp[0]
59 pat = kp[1]
56 pat = kp[1]
60 if key == 'include':
57 if key == 'include':
61 includepats.add(pat)
58 includepats.add(pat)
62 elif key == 'exclude':
59 elif key == 'exclude':
63 excludepats.add(pat)
60 excludepats.add(pat)
64 else:
61 else:
65 raise error.Abort(_('Invalid key "%s" in server response') % key)
62 raise error.Abort(_('Invalid key "%s" in server response') % key)
66
63
67 return includepats, excludepats
64 return includepats, excludepats
68
65
69 def normalizesplitpattern(kind, pat):
66 def normalizesplitpattern(kind, pat):
70 """Returns the normalized version of a pattern and kind.
67 """Returns the normalized version of a pattern and kind.
71
68
72 Returns a tuple with the normalized kind and normalized pattern.
69 Returns a tuple with the normalized kind and normalized pattern.
73 """
70 """
74 pat = pat.rstrip('/')
71 pat = pat.rstrip('/')
75 _validatepattern(pat)
72 _validatepattern(pat)
76 return kind, pat
73 return kind, pat
77
74
78 def _numlines(s):
75 def _numlines(s):
79 """Returns the number of lines in s, including ending empty lines."""
76 """Returns the number of lines in s, including ending empty lines."""
80 # We use splitlines because it is Unicode-friendly and thus Python 3
77 # We use splitlines because it is Unicode-friendly and thus Python 3
81 # compatible. However, it does not count empty lines at the end, so trick
78 # compatible. However, it does not count empty lines at the end, so trick
82 # it by adding a character at the end.
79 # it by adding a character at the end.
83 return len((s + 'x').splitlines())
80 return len((s + 'x').splitlines())
84
81
85 def _validatepattern(pat):
82 def _validatepattern(pat):
86 """Validates the pattern and aborts if it is invalid.
83 """Validates the pattern and aborts if it is invalid.
87
84
88 Patterns are stored in the narrowspec as newline-separated
85 Patterns are stored in the narrowspec as newline-separated
89 POSIX-style bytestring paths. There's no escaping.
86 POSIX-style bytestring paths. There's no escaping.
90 """
87 """
91
88
92 # We use newlines as separators in the narrowspec file, so don't allow them
89 # We use newlines as separators in the narrowspec file, so don't allow them
93 # in patterns.
90 # in patterns.
94 if _numlines(pat) > 1:
91 if _numlines(pat) > 1:
95 raise error.Abort('newlines are not allowed in narrowspec paths')
92 raise error.Abort('newlines are not allowed in narrowspec paths')
96
93
97 components = pat.split('/')
94 components = pat.split('/')
98 if '.' in components or '..' in components:
95 if '.' in components or '..' in components:
99 raise error.Abort(_('"." and ".." are not allowed in narrowspec paths'))
96 raise error.Abort(_('"." and ".." are not allowed in narrowspec paths'))
100
97
101 def normalizepattern(pattern, defaultkind='path'):
98 def normalizepattern(pattern, defaultkind='path'):
102 """Returns the normalized version of a text-format pattern.
99 """Returns the normalized version of a text-format pattern.
103
100
104 If the pattern has no kind, the default will be added.
101 If the pattern has no kind, the default will be added.
105 """
102 """
106 kind, pat = matchmod._patsplit(pattern, defaultkind)
103 kind, pat = matchmod._patsplit(pattern, defaultkind)
107 return '%s:%s' % normalizesplitpattern(kind, pat)
104 return '%s:%s' % normalizesplitpattern(kind, pat)
108
105
109 def parsepatterns(pats):
106 def parsepatterns(pats):
110 """Parses a list of patterns into a typed pattern set."""
107 """Parses a list of patterns into a typed pattern set."""
111 return set(normalizepattern(p) for p in pats)
108 return set(normalizepattern(p) for p in pats)
112
109
113 def format(includes, excludes):
110 def format(includes, excludes):
114 output = '[includes]\n'
111 output = '[includes]\n'
115 for i in sorted(includes - excludes):
112 for i in sorted(includes - excludes):
116 output += i + '\n'
113 output += i + '\n'
117 output += '[excludes]\n'
114 output += '[excludes]\n'
118 for e in sorted(excludes):
115 for e in sorted(excludes):
119 output += e + '\n'
116 output += e + '\n'
120 return output
117 return output
121
118
122 def match(root, include=None, exclude=None):
119 def match(root, include=None, exclude=None):
123 if not include:
120 if not include:
124 # Passing empty include and empty exclude to matchmod.match()
121 # Passing empty include and empty exclude to matchmod.match()
125 # gives a matcher that matches everything, so explicitly use
122 # gives a matcher that matches everything, so explicitly use
126 # the nevermatcher.
123 # the nevermatcher.
127 return matchmod.never(root, '')
124 return matchmod.never(root, '')
128 return matchmod.match(root, '', [], include=include or [],
125 return matchmod.match(root, '', [], include=include or [],
129 exclude=exclude or [])
126 exclude=exclude or [])
130
127
131 def needsexpansion(includes):
128 def needsexpansion(includes):
132 return [i for i in includes if i.startswith('include:')]
129 return [i for i in includes if i.startswith('include:')]
133
130
134 def load(repo):
131 def load(repo):
135 if repo.shared():
132 if repo.shared():
136 repo = share._getsrcrepo(repo)
133 repo = hg.sharedreposource(repo)
137 try:
134 try:
138 spec = repo.vfs.read(FILENAME)
135 spec = repo.vfs.read(FILENAME)
139 except IOError as e:
136 except IOError as e:
140 # Treat "narrowspec does not exist" the same as "narrowspec file exists
137 # Treat "narrowspec does not exist" the same as "narrowspec file exists
141 # and is empty".
138 # and is empty".
142 if e.errno == errno.ENOENT:
139 if e.errno == errno.ENOENT:
143 # Without this the next call to load will use the cached
140 # Without this the next call to load will use the cached
144 # non-existence of the file, which can cause some odd issues.
141 # non-existence of the file, which can cause some odd issues.
145 repo.invalidate(clearfilecache=True)
142 repo.invalidate(clearfilecache=True)
146 return set(), set()
143 return set(), set()
147 raise
144 raise
148 return _parsestoredpatterns(spec)
145 return _parsestoredpatterns(spec)
149
146
150 def save(repo, includepats, excludepats):
147 def save(repo, includepats, excludepats):
151 spec = format(includepats, excludepats)
148 spec = format(includepats, excludepats)
152 if repo.shared():
149 if repo.shared():
153 repo = share._getsrcrepo(repo)
150 repo = hg.sharedreposource(repo)
154 repo.vfs.write(FILENAME, spec)
151 repo.vfs.write(FILENAME, spec)
155
152
156 def restrictpatterns(req_includes, req_excludes, repo_includes, repo_excludes):
153 def restrictpatterns(req_includes, req_excludes, repo_includes, repo_excludes):
157 r""" Restricts the patterns according to repo settings,
154 r""" Restricts the patterns according to repo settings,
158 results in a logical AND operation
155 results in a logical AND operation
159
156
160 :param req_includes: requested includes
157 :param req_includes: requested includes
161 :param req_excludes: requested excludes
158 :param req_excludes: requested excludes
162 :param repo_includes: repo includes
159 :param repo_includes: repo includes
163 :param repo_excludes: repo excludes
160 :param repo_excludes: repo excludes
164 :return: include patterns, exclude patterns, and invalid include patterns.
161 :return: include patterns, exclude patterns, and invalid include patterns.
165
162
166 >>> restrictpatterns({'f1','f2'}, {}, ['f1'], [])
163 >>> restrictpatterns({'f1','f2'}, {}, ['f1'], [])
167 (set(['f1']), {}, [])
164 (set(['f1']), {}, [])
168 >>> restrictpatterns({'f1'}, {}, ['f1','f2'], [])
165 >>> restrictpatterns({'f1'}, {}, ['f1','f2'], [])
169 (set(['f1']), {}, [])
166 (set(['f1']), {}, [])
170 >>> restrictpatterns({'f1/fc1', 'f3/fc3'}, {}, ['f1','f2'], [])
167 >>> restrictpatterns({'f1/fc1', 'f3/fc3'}, {}, ['f1','f2'], [])
171 (set(['f1/fc1']), {}, [])
168 (set(['f1/fc1']), {}, [])
172 >>> restrictpatterns({'f1_fc1'}, {}, ['f1','f2'], [])
169 >>> restrictpatterns({'f1_fc1'}, {}, ['f1','f2'], [])
173 ([], set(['path:.']), [])
170 ([], set(['path:.']), [])
174 >>> restrictpatterns({'f1/../f2/fc2'}, {}, ['f1','f2'], [])
171 >>> restrictpatterns({'f1/../f2/fc2'}, {}, ['f1','f2'], [])
175 (set(['f2/fc2']), {}, [])
172 (set(['f2/fc2']), {}, [])
176 >>> restrictpatterns({'f1/../f3/fc3'}, {}, ['f1','f2'], [])
173 >>> restrictpatterns({'f1/../f3/fc3'}, {}, ['f1','f2'], [])
177 ([], set(['path:.']), [])
174 ([], set(['path:.']), [])
178 >>> restrictpatterns({'f1/$non_exitent_var'}, {}, ['f1','f2'], [])
175 >>> restrictpatterns({'f1/$non_exitent_var'}, {}, ['f1','f2'], [])
179 (set(['f1/$non_exitent_var']), {}, [])
176 (set(['f1/$non_exitent_var']), {}, [])
180 """
177 """
181 res_excludes = set(req_excludes)
178 res_excludes = set(req_excludes)
182 res_excludes.update(repo_excludes)
179 res_excludes.update(repo_excludes)
183 invalid_includes = []
180 invalid_includes = []
184 if not req_includes:
181 if not req_includes:
185 res_includes = set(repo_includes)
182 res_includes = set(repo_includes)
186 elif 'path:.' not in repo_includes:
183 elif 'path:.' not in repo_includes:
187 res_includes = []
184 res_includes = []
188 for req_include in req_includes:
185 for req_include in req_includes:
189 req_include = util.expandpath(util.normpath(req_include))
186 req_include = util.expandpath(util.normpath(req_include))
190 if req_include in repo_includes:
187 if req_include in repo_includes:
191 res_includes.append(req_include)
188 res_includes.append(req_include)
192 continue
189 continue
193 valid = False
190 valid = False
194 for repo_include in repo_includes:
191 for repo_include in repo_includes:
195 if req_include.startswith(repo_include + '/'):
192 if req_include.startswith(repo_include + '/'):
196 valid = True
193 valid = True
197 res_includes.append(req_include)
194 res_includes.append(req_include)
198 break
195 break
199 if not valid:
196 if not valid:
200 invalid_includes.append(req_include)
197 invalid_includes.append(req_include)
201 if len(res_includes) == 0:
198 if len(res_includes) == 0:
202 res_excludes = {'path:.'}
199 res_excludes = {'path:.'}
203 else:
200 else:
204 res_includes = set(res_includes)
201 res_includes = set(res_includes)
205 else:
202 else:
206 res_includes = set(req_includes)
203 res_includes = set(req_includes)
207 return res_includes, res_excludes, invalid_includes
204 return res_includes, res_excludes, invalid_includes
@@ -1,201 +1,180 b''
1 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
1 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 '''share a common history between several working directories
6 '''share a common history between several working directories
7
7
8 Automatic Pooled Storage for Clones
8 Automatic Pooled Storage for Clones
9 -----------------------------------
9 -----------------------------------
10
10
11 When this extension is active, :hg:`clone` can be configured to
11 When this extension is active, :hg:`clone` can be configured to
12 automatically share/pool storage across multiple clones. This
12 automatically share/pool storage across multiple clones. This
13 mode effectively converts :hg:`clone` to :hg:`clone` + :hg:`share`.
13 mode effectively converts :hg:`clone` to :hg:`clone` + :hg:`share`.
14 The benefit of using this mode is the automatic management of
14 The benefit of using this mode is the automatic management of
15 store paths and intelligent pooling of related repositories.
15 store paths and intelligent pooling of related repositories.
16
16
17 The following ``share.`` config options influence this feature:
17 The following ``share.`` config options influence this feature:
18
18
19 ``share.pool``
19 ``share.pool``
20 Filesystem path where shared repository data will be stored. When
20 Filesystem path where shared repository data will be stored. When
21 defined, :hg:`clone` will automatically use shared repository
21 defined, :hg:`clone` will automatically use shared repository
22 storage instead of creating a store inside each clone.
22 storage instead of creating a store inside each clone.
23
23
24 ``share.poolnaming``
24 ``share.poolnaming``
25 How directory names in ``share.pool`` are constructed.
25 How directory names in ``share.pool`` are constructed.
26
26
27 "identity" means the name is derived from the first changeset in the
27 "identity" means the name is derived from the first changeset in the
28 repository. In this mode, different remotes share storage if their
28 repository. In this mode, different remotes share storage if their
29 root/initial changeset is identical. In this mode, the local shared
29 root/initial changeset is identical. In this mode, the local shared
30 repository is an aggregate of all encountered remote repositories.
30 repository is an aggregate of all encountered remote repositories.
31
31
32 "remote" means the name is derived from the source repository's
32 "remote" means the name is derived from the source repository's
33 path or URL. In this mode, storage is only shared if the path or URL
33 path or URL. In this mode, storage is only shared if the path or URL
34 requested in the :hg:`clone` command matches exactly to a repository
34 requested in the :hg:`clone` command matches exactly to a repository
35 that was cloned before.
35 that was cloned before.
36
36
37 The default naming mode is "identity".
37 The default naming mode is "identity".
38 '''
38 '''
39
39
40 from __future__ import absolute_import
40 from __future__ import absolute_import
41
41
42 import errno
42 import errno
43 from mercurial.i18n import _
43 from mercurial.i18n import _
44 from mercurial import (
44 from mercurial import (
45 bookmarks,
45 bookmarks,
46 commands,
46 commands,
47 error,
47 error,
48 extensions,
48 extensions,
49 hg,
49 hg,
50 registrar,
50 registrar,
51 txnutil,
51 txnutil,
52 util,
52 util,
53 )
53 )
54
54
55 repository = hg.repository
56 parseurl = hg.parseurl
57
58 cmdtable = {}
55 cmdtable = {}
59 command = registrar.command(cmdtable)
56 command = registrar.command(cmdtable)
60 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
57 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
61 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
58 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
62 # be specifying the version(s) of Mercurial they are tested with, or
59 # be specifying the version(s) of Mercurial they are tested with, or
63 # leave the attribute unspecified.
60 # leave the attribute unspecified.
64 testedwith = 'ships-with-hg-core'
61 testedwith = 'ships-with-hg-core'
65
62
66 @command('share',
63 @command('share',
67 [('U', 'noupdate', None, _('do not create a working directory')),
64 [('U', 'noupdate', None, _('do not create a working directory')),
68 ('B', 'bookmarks', None, _('also share bookmarks')),
65 ('B', 'bookmarks', None, _('also share bookmarks')),
69 ('', 'relative', None, _('point to source using a relative path '
66 ('', 'relative', None, _('point to source using a relative path '
70 '(EXPERIMENTAL)')),
67 '(EXPERIMENTAL)')),
71 ],
68 ],
72 _('[-U] [-B] SOURCE [DEST]'),
69 _('[-U] [-B] SOURCE [DEST]'),
73 norepo=True)
70 norepo=True)
74 def share(ui, source, dest=None, noupdate=False, bookmarks=False,
71 def share(ui, source, dest=None, noupdate=False, bookmarks=False,
75 relative=False):
72 relative=False):
76 """create a new shared repository
73 """create a new shared repository
77
74
78 Initialize a new repository and working directory that shares its
75 Initialize a new repository and working directory that shares its
79 history (and optionally bookmarks) with another repository.
76 history (and optionally bookmarks) with another repository.
80
77
81 .. note::
78 .. note::
82
79
83 using rollback or extensions that destroy/modify history (mq,
80 using rollback or extensions that destroy/modify history (mq,
84 rebase, etc.) can cause considerable confusion with shared
81 rebase, etc.) can cause considerable confusion with shared
85 clones. In particular, if two shared clones are both updated to
82 clones. In particular, if two shared clones are both updated to
86 the same changeset, and one of them destroys that changeset
83 the same changeset, and one of them destroys that changeset
87 with rollback, the other clone will suddenly stop working: all
84 with rollback, the other clone will suddenly stop working: all
88 operations will fail with "abort: working directory has unknown
85 operations will fail with "abort: working directory has unknown
89 parent". The only known workaround is to use debugsetparents on
86 parent". The only known workaround is to use debugsetparents on
90 the broken clone to reset it to a changeset that still exists.
87 the broken clone to reset it to a changeset that still exists.
91 """
88 """
92
89
93 hg.share(ui, source, dest=dest, update=not noupdate,
90 hg.share(ui, source, dest=dest, update=not noupdate,
94 bookmarks=bookmarks, relative=relative)
91 bookmarks=bookmarks, relative=relative)
95 return 0
92 return 0
96
93
97 @command('unshare', [], '')
94 @command('unshare', [], '')
98 def unshare(ui, repo):
95 def unshare(ui, repo):
99 """convert a shared repository to a normal one
96 """convert a shared repository to a normal one
100
97
101 Copy the store data to the repo and remove the sharedpath data.
98 Copy the store data to the repo and remove the sharedpath data.
102 """
99 """
103
100
104 if not repo.shared():
101 if not repo.shared():
105 raise error.Abort(_("this is not a shared repo"))
102 raise error.Abort(_("this is not a shared repo"))
106
103
107 hg.unshare(ui, repo)
104 hg.unshare(ui, repo)
108
105
109 # Wrap clone command to pass auto share options.
106 # Wrap clone command to pass auto share options.
110 def clone(orig, ui, source, *args, **opts):
107 def clone(orig, ui, source, *args, **opts):
111 pool = ui.config('share', 'pool')
108 pool = ui.config('share', 'pool')
112 if pool:
109 if pool:
113 pool = util.expandpath(pool)
110 pool = util.expandpath(pool)
114
111
115 opts[r'shareopts'] = {
112 opts[r'shareopts'] = {
116 'pool': pool,
113 'pool': pool,
117 'mode': ui.config('share', 'poolnaming'),
114 'mode': ui.config('share', 'poolnaming'),
118 }
115 }
119
116
120 return orig(ui, source, *args, **opts)
117 return orig(ui, source, *args, **opts)
121
118
122 def extsetup(ui):
119 def extsetup(ui):
123 extensions.wrapfunction(bookmarks, '_getbkfile', getbkfile)
120 extensions.wrapfunction(bookmarks, '_getbkfile', getbkfile)
124 extensions.wrapfunction(bookmarks.bmstore, '_recordchange', recordchange)
121 extensions.wrapfunction(bookmarks.bmstore, '_recordchange', recordchange)
125 extensions.wrapfunction(bookmarks.bmstore, '_writerepo', writerepo)
122 extensions.wrapfunction(bookmarks.bmstore, '_writerepo', writerepo)
126 extensions.wrapcommand(commands.table, 'clone', clone)
123 extensions.wrapcommand(commands.table, 'clone', clone)
127
124
128 def _hassharedbookmarks(repo):
125 def _hassharedbookmarks(repo):
129 """Returns whether this repo has shared bookmarks"""
126 """Returns whether this repo has shared bookmarks"""
130 try:
127 try:
131 shared = repo.vfs.read('shared').splitlines()
128 shared = repo.vfs.read('shared').splitlines()
132 except IOError as inst:
129 except IOError as inst:
133 if inst.errno != errno.ENOENT:
130 if inst.errno != errno.ENOENT:
134 raise
131 raise
135 return False
132 return False
136 return hg.sharedbookmarks in shared
133 return hg.sharedbookmarks in shared
137
134
138 def _getsrcrepo(repo):
139 """
140 Returns the source repository object for a given shared repository.
141 If repo is not a shared repository, return None.
142 """
143 if repo.sharedpath == repo.path:
144 return None
145
146 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
147 return repo.srcrepo
148
149 # the sharedpath always ends in the .hg; we want the path to the repo
150 source = repo.vfs.split(repo.sharedpath)[0]
151 srcurl, branches = parseurl(source)
152 srcrepo = repository(repo.ui, srcurl)
153 repo.srcrepo = srcrepo
154 return srcrepo
155
156 def getbkfile(orig, repo):
135 def getbkfile(orig, repo):
157 if _hassharedbookmarks(repo):
136 if _hassharedbookmarks(repo):
158 srcrepo = _getsrcrepo(repo)
137 srcrepo = hg.sharedreposource(repo)
159 if srcrepo is not None:
138 if srcrepo is not None:
160 # just orig(srcrepo) doesn't work as expected, because
139 # just orig(srcrepo) doesn't work as expected, because
161 # HG_PENDING refers repo.root.
140 # HG_PENDING refers repo.root.
162 try:
141 try:
163 fp, pending = txnutil.trypending(repo.root, repo.vfs,
142 fp, pending = txnutil.trypending(repo.root, repo.vfs,
164 'bookmarks')
143 'bookmarks')
165 if pending:
144 if pending:
166 # only in this case, bookmark information in repo
145 # only in this case, bookmark information in repo
167 # is up-to-date.
146 # is up-to-date.
168 return fp
147 return fp
169 fp.close()
148 fp.close()
170 except IOError as inst:
149 except IOError as inst:
171 if inst.errno != errno.ENOENT:
150 if inst.errno != errno.ENOENT:
172 raise
151 raise
173
152
174 # otherwise, we should read bookmarks from srcrepo,
153 # otherwise, we should read bookmarks from srcrepo,
175 # because .hg/bookmarks in srcrepo might be already
154 # because .hg/bookmarks in srcrepo might be already
176 # changed via another sharing repo
155 # changed via another sharing repo
177 repo = srcrepo
156 repo = srcrepo
178
157
179 # TODO: Pending changes in repo are still invisible in
158 # TODO: Pending changes in repo are still invisible in
180 # srcrepo, because bookmarks.pending is written only into repo.
159 # srcrepo, because bookmarks.pending is written only into repo.
181 # See also https://www.mercurial-scm.org/wiki/SharedRepository
160 # See also https://www.mercurial-scm.org/wiki/SharedRepository
182 return orig(repo)
161 return orig(repo)
183
162
184 def recordchange(orig, self, tr):
163 def recordchange(orig, self, tr):
185 # Continue with write to local bookmarks file as usual
164 # Continue with write to local bookmarks file as usual
186 orig(self, tr)
165 orig(self, tr)
187
166
188 if _hassharedbookmarks(self._repo):
167 if _hassharedbookmarks(self._repo):
189 srcrepo = _getsrcrepo(self._repo)
168 srcrepo = hg.sharedreposource(self._repo)
190 if srcrepo is not None:
169 if srcrepo is not None:
191 category = 'share-bookmarks'
170 category = 'share-bookmarks'
192 tr.addpostclose(category, lambda tr: self._writerepo(srcrepo))
171 tr.addpostclose(category, lambda tr: self._writerepo(srcrepo))
193
172
194 def writerepo(orig, self, repo):
173 def writerepo(orig, self, repo):
195 # First write local bookmarks file in case we ever unshare
174 # First write local bookmarks file in case we ever unshare
196 orig(self, repo)
175 orig(self, repo)
197
176
198 if _hassharedbookmarks(self._repo):
177 if _hassharedbookmarks(self._repo):
199 srcrepo = _getsrcrepo(self._repo)
178 srcrepo = hg.sharedreposource(self._repo)
200 if srcrepo is not None:
179 if srcrepo is not None:
201 orig(self, srcrepo)
180 orig(self, srcrepo)
@@ -1,1117 +1,1135 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13 import os
13 import os
14 import shutil
14 import shutil
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 nullid,
18 nullid,
19 )
19 )
20
20
21 from . import (
21 from . import (
22 bookmarks,
22 bookmarks,
23 bundlerepo,
23 bundlerepo,
24 cacheutil,
24 cacheutil,
25 cmdutil,
25 cmdutil,
26 destutil,
26 destutil,
27 discovery,
27 discovery,
28 error,
28 error,
29 exchange,
29 exchange,
30 extensions,
30 extensions,
31 httppeer,
31 httppeer,
32 localrepo,
32 localrepo,
33 lock,
33 lock,
34 logcmdutil,
34 logcmdutil,
35 logexchange,
35 logexchange,
36 merge as mergemod,
36 merge as mergemod,
37 node,
37 node,
38 phases,
38 phases,
39 scmutil,
39 scmutil,
40 sshpeer,
40 sshpeer,
41 statichttprepo,
41 statichttprepo,
42 ui as uimod,
42 ui as uimod,
43 unionrepo,
43 unionrepo,
44 url,
44 url,
45 util,
45 util,
46 verify as verifymod,
46 verify as verifymod,
47 vfs as vfsmod,
47 vfs as vfsmod,
48 )
48 )
49
49
50 release = lock.release
50 release = lock.release
51
51
52 # shared features
52 # shared features
53 sharedbookmarks = 'bookmarks'
53 sharedbookmarks = 'bookmarks'
54
54
55 def _local(path):
55 def _local(path):
56 path = util.expandpath(util.urllocalpath(path))
56 path = util.expandpath(util.urllocalpath(path))
57 return (os.path.isfile(path) and bundlerepo or localrepo)
57 return (os.path.isfile(path) and bundlerepo or localrepo)
58
58
59 def addbranchrevs(lrepo, other, branches, revs):
59 def addbranchrevs(lrepo, other, branches, revs):
60 peer = other.peer() # a courtesy to callers using a localrepo for other
60 peer = other.peer() # a courtesy to callers using a localrepo for other
61 hashbranch, branches = branches
61 hashbranch, branches = branches
62 if not hashbranch and not branches:
62 if not hashbranch and not branches:
63 x = revs or None
63 x = revs or None
64 if util.safehasattr(revs, 'first'):
64 if util.safehasattr(revs, 'first'):
65 y = revs.first()
65 y = revs.first()
66 elif revs:
66 elif revs:
67 y = revs[0]
67 y = revs[0]
68 else:
68 else:
69 y = None
69 y = None
70 return x, y
70 return x, y
71 if revs:
71 if revs:
72 revs = list(revs)
72 revs = list(revs)
73 else:
73 else:
74 revs = []
74 revs = []
75
75
76 if not peer.capable('branchmap'):
76 if not peer.capable('branchmap'):
77 if branches:
77 if branches:
78 raise error.Abort(_("remote branch lookup not supported"))
78 raise error.Abort(_("remote branch lookup not supported"))
79 revs.append(hashbranch)
79 revs.append(hashbranch)
80 return revs, revs[0]
80 return revs, revs[0]
81 branchmap = peer.branchmap()
81 branchmap = peer.branchmap()
82
82
83 def primary(branch):
83 def primary(branch):
84 if branch == '.':
84 if branch == '.':
85 if not lrepo:
85 if not lrepo:
86 raise error.Abort(_("dirstate branch not accessible"))
86 raise error.Abort(_("dirstate branch not accessible"))
87 branch = lrepo.dirstate.branch()
87 branch = lrepo.dirstate.branch()
88 if branch in branchmap:
88 if branch in branchmap:
89 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
89 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
90 return True
90 return True
91 else:
91 else:
92 return False
92 return False
93
93
94 for branch in branches:
94 for branch in branches:
95 if not primary(branch):
95 if not primary(branch):
96 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
96 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
97 if hashbranch:
97 if hashbranch:
98 if not primary(hashbranch):
98 if not primary(hashbranch):
99 revs.append(hashbranch)
99 revs.append(hashbranch)
100 return revs, revs[0]
100 return revs, revs[0]
101
101
102 def parseurl(path, branches=None):
102 def parseurl(path, branches=None):
103 '''parse url#branch, returning (url, (branch, branches))'''
103 '''parse url#branch, returning (url, (branch, branches))'''
104
104
105 u = util.url(path)
105 u = util.url(path)
106 branch = None
106 branch = None
107 if u.fragment:
107 if u.fragment:
108 branch = u.fragment
108 branch = u.fragment
109 u.fragment = None
109 u.fragment = None
110 return bytes(u), (branch, branches or [])
110 return bytes(u), (branch, branches or [])
111
111
112 schemes = {
112 schemes = {
113 'bundle': bundlerepo,
113 'bundle': bundlerepo,
114 'union': unionrepo,
114 'union': unionrepo,
115 'file': _local,
115 'file': _local,
116 'http': httppeer,
116 'http': httppeer,
117 'https': httppeer,
117 'https': httppeer,
118 'ssh': sshpeer,
118 'ssh': sshpeer,
119 'static-http': statichttprepo,
119 'static-http': statichttprepo,
120 }
120 }
121
121
122 def _peerlookup(path):
122 def _peerlookup(path):
123 u = util.url(path)
123 u = util.url(path)
124 scheme = u.scheme or 'file'
124 scheme = u.scheme or 'file'
125 thing = schemes.get(scheme) or schemes['file']
125 thing = schemes.get(scheme) or schemes['file']
126 try:
126 try:
127 return thing(path)
127 return thing(path)
128 except TypeError:
128 except TypeError:
129 # we can't test callable(thing) because 'thing' can be an unloaded
129 # we can't test callable(thing) because 'thing' can be an unloaded
130 # module that implements __call__
130 # module that implements __call__
131 if not util.safehasattr(thing, 'instance'):
131 if not util.safehasattr(thing, 'instance'):
132 raise
132 raise
133 return thing
133 return thing
134
134
135 def islocal(repo):
135 def islocal(repo):
136 '''return true if repo (or path pointing to repo) is local'''
136 '''return true if repo (or path pointing to repo) is local'''
137 if isinstance(repo, bytes):
137 if isinstance(repo, bytes):
138 try:
138 try:
139 return _peerlookup(repo).islocal(repo)
139 return _peerlookup(repo).islocal(repo)
140 except AttributeError:
140 except AttributeError:
141 return False
141 return False
142 return repo.local()
142 return repo.local()
143
143
144 def openpath(ui, path):
144 def openpath(ui, path):
145 '''open path with open if local, url.open if remote'''
145 '''open path with open if local, url.open if remote'''
146 pathurl = util.url(path, parsequery=False, parsefragment=False)
146 pathurl = util.url(path, parsequery=False, parsefragment=False)
147 if pathurl.islocal():
147 if pathurl.islocal():
148 return util.posixfile(pathurl.localpath(), 'rb')
148 return util.posixfile(pathurl.localpath(), 'rb')
149 else:
149 else:
150 return url.open(ui, path)
150 return url.open(ui, path)
151
151
152 # a list of (ui, repo) functions called for wire peer initialization
152 # a list of (ui, repo) functions called for wire peer initialization
153 wirepeersetupfuncs = []
153 wirepeersetupfuncs = []
154
154
155 def _peerorrepo(ui, path, create=False, presetupfuncs=None):
155 def _peerorrepo(ui, path, create=False, presetupfuncs=None):
156 """return a repository object for the specified path"""
156 """return a repository object for the specified path"""
157 obj = _peerlookup(path).instance(ui, path, create)
157 obj = _peerlookup(path).instance(ui, path, create)
158 ui = getattr(obj, "ui", ui)
158 ui = getattr(obj, "ui", ui)
159 for f in presetupfuncs or []:
159 for f in presetupfuncs or []:
160 f(ui, obj)
160 f(ui, obj)
161 for name, module in extensions.extensions(ui):
161 for name, module in extensions.extensions(ui):
162 hook = getattr(module, 'reposetup', None)
162 hook = getattr(module, 'reposetup', None)
163 if hook:
163 if hook:
164 hook(ui, obj)
164 hook(ui, obj)
165 if not obj.local():
165 if not obj.local():
166 for f in wirepeersetupfuncs:
166 for f in wirepeersetupfuncs:
167 f(ui, obj)
167 f(ui, obj)
168 return obj
168 return obj
169
169
170 def repository(ui, path='', create=False, presetupfuncs=None):
170 def repository(ui, path='', create=False, presetupfuncs=None):
171 """return a repository object for the specified path"""
171 """return a repository object for the specified path"""
172 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs)
172 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs)
173 repo = peer.local()
173 repo = peer.local()
174 if not repo:
174 if not repo:
175 raise error.Abort(_("repository '%s' is not local") %
175 raise error.Abort(_("repository '%s' is not local") %
176 (path or peer.url()))
176 (path or peer.url()))
177 return repo.filtered('visible')
177 return repo.filtered('visible')
178
178
179 def peer(uiorrepo, opts, path, create=False):
179 def peer(uiorrepo, opts, path, create=False):
180 '''return a repository peer for the specified path'''
180 '''return a repository peer for the specified path'''
181 rui = remoteui(uiorrepo, opts)
181 rui = remoteui(uiorrepo, opts)
182 return _peerorrepo(rui, path, create).peer()
182 return _peerorrepo(rui, path, create).peer()
183
183
184 def defaultdest(source):
184 def defaultdest(source):
185 '''return default destination of clone if none is given
185 '''return default destination of clone if none is given
186
186
187 >>> defaultdest(b'foo')
187 >>> defaultdest(b'foo')
188 'foo'
188 'foo'
189 >>> defaultdest(b'/foo/bar')
189 >>> defaultdest(b'/foo/bar')
190 'bar'
190 'bar'
191 >>> defaultdest(b'/')
191 >>> defaultdest(b'/')
192 ''
192 ''
193 >>> defaultdest(b'')
193 >>> defaultdest(b'')
194 ''
194 ''
195 >>> defaultdest(b'http://example.org/')
195 >>> defaultdest(b'http://example.org/')
196 ''
196 ''
197 >>> defaultdest(b'http://example.org/foo/')
197 >>> defaultdest(b'http://example.org/foo/')
198 'foo'
198 'foo'
199 '''
199 '''
200 path = util.url(source).path
200 path = util.url(source).path
201 if not path:
201 if not path:
202 return ''
202 return ''
203 return os.path.basename(os.path.normpath(path))
203 return os.path.basename(os.path.normpath(path))
204
204
205 def sharedreposource(repo):
206 """Returns repository object for source repository of a shared repo.
207
208 If repo is not a shared repository, returns None.
209 """
210 if repo.sharedpath == repo.path:
211 return None
212
213 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
214 return repo.srcrepo
215
216 # the sharedpath always ends in the .hg; we want the path to the repo
217 source = repo.vfs.split(repo.sharedpath)[0]
218 srcurl, branches = parseurl(source)
219 srcrepo = repository(repo.ui, srcurl)
220 repo.srcrepo = srcrepo
221 return srcrepo
222
205 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
223 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
206 relative=False):
224 relative=False):
207 '''create a shared repository'''
225 '''create a shared repository'''
208
226
209 if not islocal(source):
227 if not islocal(source):
210 raise error.Abort(_('can only share local repositories'))
228 raise error.Abort(_('can only share local repositories'))
211
229
212 if not dest:
230 if not dest:
213 dest = defaultdest(source)
231 dest = defaultdest(source)
214 else:
232 else:
215 dest = ui.expandpath(dest)
233 dest = ui.expandpath(dest)
216
234
217 if isinstance(source, bytes):
235 if isinstance(source, bytes):
218 origsource = ui.expandpath(source)
236 origsource = ui.expandpath(source)
219 source, branches = parseurl(origsource)
237 source, branches = parseurl(origsource)
220 srcrepo = repository(ui, source)
238 srcrepo = repository(ui, source)
221 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
239 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
222 else:
240 else:
223 srcrepo = source.local()
241 srcrepo = source.local()
224 origsource = source = srcrepo.url()
242 origsource = source = srcrepo.url()
225 checkout = None
243 checkout = None
226
244
227 sharedpath = srcrepo.sharedpath # if our source is already sharing
245 sharedpath = srcrepo.sharedpath # if our source is already sharing
228
246
229 destwvfs = vfsmod.vfs(dest, realpath=True)
247 destwvfs = vfsmod.vfs(dest, realpath=True)
230 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
248 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
231
249
232 if destvfs.lexists():
250 if destvfs.lexists():
233 raise error.Abort(_('destination already exists'))
251 raise error.Abort(_('destination already exists'))
234
252
235 if not destwvfs.isdir():
253 if not destwvfs.isdir():
236 destwvfs.mkdir()
254 destwvfs.mkdir()
237 destvfs.makedir()
255 destvfs.makedir()
238
256
239 requirements = ''
257 requirements = ''
240 try:
258 try:
241 requirements = srcrepo.vfs.read('requires')
259 requirements = srcrepo.vfs.read('requires')
242 except IOError as inst:
260 except IOError as inst:
243 if inst.errno != errno.ENOENT:
261 if inst.errno != errno.ENOENT:
244 raise
262 raise
245
263
246 if relative:
264 if relative:
247 try:
265 try:
248 sharedpath = os.path.relpath(sharedpath, destvfs.base)
266 sharedpath = os.path.relpath(sharedpath, destvfs.base)
249 requirements += 'relshared\n'
267 requirements += 'relshared\n'
250 except (IOError, ValueError) as e:
268 except (IOError, ValueError) as e:
251 # ValueError is raised on Windows if the drive letters differ on
269 # ValueError is raised on Windows if the drive letters differ on
252 # each path
270 # each path
253 raise error.Abort(_('cannot calculate relative path'),
271 raise error.Abort(_('cannot calculate relative path'),
254 hint=str(e))
272 hint=str(e))
255 else:
273 else:
256 requirements += 'shared\n'
274 requirements += 'shared\n'
257
275
258 destvfs.write('requires', requirements)
276 destvfs.write('requires', requirements)
259 destvfs.write('sharedpath', sharedpath)
277 destvfs.write('sharedpath', sharedpath)
260
278
261 r = repository(ui, destwvfs.base)
279 r = repository(ui, destwvfs.base)
262 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
280 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
263 _postshareupdate(r, update, checkout=checkout)
281 _postshareupdate(r, update, checkout=checkout)
264 return r
282 return r
265
283
266 def unshare(ui, repo):
284 def unshare(ui, repo):
267 """convert a shared repository to a normal one
285 """convert a shared repository to a normal one
268
286
269 Copy the store data to the repo and remove the sharedpath data.
287 Copy the store data to the repo and remove the sharedpath data.
270 """
288 """
271
289
272 destlock = lock = None
290 destlock = lock = None
273 lock = repo.lock()
291 lock = repo.lock()
274 try:
292 try:
275 # we use locks here because if we race with commit, we
293 # we use locks here because if we race with commit, we
276 # can end up with extra data in the cloned revlogs that's
294 # can end up with extra data in the cloned revlogs that's
277 # not pointed to by changesets, thus causing verify to
295 # not pointed to by changesets, thus causing verify to
278 # fail
296 # fail
279
297
280 destlock = copystore(ui, repo, repo.path)
298 destlock = copystore(ui, repo, repo.path)
281
299
282 sharefile = repo.vfs.join('sharedpath')
300 sharefile = repo.vfs.join('sharedpath')
283 util.rename(sharefile, sharefile + '.old')
301 util.rename(sharefile, sharefile + '.old')
284
302
285 repo.requirements.discard('shared')
303 repo.requirements.discard('shared')
286 repo.requirements.discard('relshared')
304 repo.requirements.discard('relshared')
287 repo._writerequirements()
305 repo._writerequirements()
288 finally:
306 finally:
289 destlock and destlock.release()
307 destlock and destlock.release()
290 lock and lock.release()
308 lock and lock.release()
291
309
292 # update store, spath, svfs and sjoin of repo
310 # update store, spath, svfs and sjoin of repo
293 repo.unfiltered().__init__(repo.baseui, repo.root)
311 repo.unfiltered().__init__(repo.baseui, repo.root)
294
312
295 # TODO: figure out how to access subrepos that exist, but were previously
313 # TODO: figure out how to access subrepos that exist, but were previously
296 # removed from .hgsub
314 # removed from .hgsub
297 c = repo['.']
315 c = repo['.']
298 subs = c.substate
316 subs = c.substate
299 for s in sorted(subs):
317 for s in sorted(subs):
300 c.sub(s).unshare()
318 c.sub(s).unshare()
301
319
302 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
320 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
303 """Called after a new shared repo is created.
321 """Called after a new shared repo is created.
304
322
305 The new repo only has a requirements file and pointer to the source.
323 The new repo only has a requirements file and pointer to the source.
306 This function configures additional shared data.
324 This function configures additional shared data.
307
325
308 Extensions can wrap this function and write additional entries to
326 Extensions can wrap this function and write additional entries to
309 destrepo/.hg/shared to indicate additional pieces of data to be shared.
327 destrepo/.hg/shared to indicate additional pieces of data to be shared.
310 """
328 """
311 default = defaultpath or sourcerepo.ui.config('paths', 'default')
329 default = defaultpath or sourcerepo.ui.config('paths', 'default')
312 if default:
330 if default:
313 template = ('[paths]\n'
331 template = ('[paths]\n'
314 'default = %s\n')
332 'default = %s\n')
315 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
333 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
316
334
317 with destrepo.wlock():
335 with destrepo.wlock():
318 if bookmarks:
336 if bookmarks:
319 destrepo.vfs.write('shared', sharedbookmarks + '\n')
337 destrepo.vfs.write('shared', sharedbookmarks + '\n')
320
338
321 def _postshareupdate(repo, update, checkout=None):
339 def _postshareupdate(repo, update, checkout=None):
322 """Maybe perform a working directory update after a shared repo is created.
340 """Maybe perform a working directory update after a shared repo is created.
323
341
324 ``update`` can be a boolean or a revision to update to.
342 ``update`` can be a boolean or a revision to update to.
325 """
343 """
326 if not update:
344 if not update:
327 return
345 return
328
346
329 repo.ui.status(_("updating working directory\n"))
347 repo.ui.status(_("updating working directory\n"))
330 if update is not True:
348 if update is not True:
331 checkout = update
349 checkout = update
332 for test in (checkout, 'default', 'tip'):
350 for test in (checkout, 'default', 'tip'):
333 if test is None:
351 if test is None:
334 continue
352 continue
335 try:
353 try:
336 uprev = repo.lookup(test)
354 uprev = repo.lookup(test)
337 break
355 break
338 except error.RepoLookupError:
356 except error.RepoLookupError:
339 continue
357 continue
340 _update(repo, uprev)
358 _update(repo, uprev)
341
359
342 def copystore(ui, srcrepo, destpath):
360 def copystore(ui, srcrepo, destpath):
343 '''copy files from store of srcrepo in destpath
361 '''copy files from store of srcrepo in destpath
344
362
345 returns destlock
363 returns destlock
346 '''
364 '''
347 destlock = None
365 destlock = None
348 try:
366 try:
349 hardlink = None
367 hardlink = None
350 num = 0
368 num = 0
351 closetopic = [None]
369 closetopic = [None]
352 def prog(topic, pos):
370 def prog(topic, pos):
353 if pos is None:
371 if pos is None:
354 closetopic[0] = topic
372 closetopic[0] = topic
355 else:
373 else:
356 ui.progress(topic, pos + num)
374 ui.progress(topic, pos + num)
357 srcpublishing = srcrepo.publishing()
375 srcpublishing = srcrepo.publishing()
358 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
376 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
359 dstvfs = vfsmod.vfs(destpath)
377 dstvfs = vfsmod.vfs(destpath)
360 for f in srcrepo.store.copylist():
378 for f in srcrepo.store.copylist():
361 if srcpublishing and f.endswith('phaseroots'):
379 if srcpublishing and f.endswith('phaseroots'):
362 continue
380 continue
363 dstbase = os.path.dirname(f)
381 dstbase = os.path.dirname(f)
364 if dstbase and not dstvfs.exists(dstbase):
382 if dstbase and not dstvfs.exists(dstbase):
365 dstvfs.mkdir(dstbase)
383 dstvfs.mkdir(dstbase)
366 if srcvfs.exists(f):
384 if srcvfs.exists(f):
367 if f.endswith('data'):
385 if f.endswith('data'):
368 # 'dstbase' may be empty (e.g. revlog format 0)
386 # 'dstbase' may be empty (e.g. revlog format 0)
369 lockfile = os.path.join(dstbase, "lock")
387 lockfile = os.path.join(dstbase, "lock")
370 # lock to avoid premature writing to the target
388 # lock to avoid premature writing to the target
371 destlock = lock.lock(dstvfs, lockfile)
389 destlock = lock.lock(dstvfs, lockfile)
372 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
390 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
373 hardlink, progress=prog)
391 hardlink, progress=prog)
374 num += n
392 num += n
375 if hardlink:
393 if hardlink:
376 ui.debug("linked %d files\n" % num)
394 ui.debug("linked %d files\n" % num)
377 if closetopic[0]:
395 if closetopic[0]:
378 ui.progress(closetopic[0], None)
396 ui.progress(closetopic[0], None)
379 else:
397 else:
380 ui.debug("copied %d files\n" % num)
398 ui.debug("copied %d files\n" % num)
381 if closetopic[0]:
399 if closetopic[0]:
382 ui.progress(closetopic[0], None)
400 ui.progress(closetopic[0], None)
383 return destlock
401 return destlock
384 except: # re-raises
402 except: # re-raises
385 release(destlock)
403 release(destlock)
386 raise
404 raise
387
405
388 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
406 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
389 rev=None, update=True, stream=False):
407 rev=None, update=True, stream=False):
390 """Perform a clone using a shared repo.
408 """Perform a clone using a shared repo.
391
409
392 The store for the repository will be located at <sharepath>/.hg. The
410 The store for the repository will be located at <sharepath>/.hg. The
393 specified revisions will be cloned or pulled from "source". A shared repo
411 specified revisions will be cloned or pulled from "source". A shared repo
394 will be created at "dest" and a working copy will be created if "update" is
412 will be created at "dest" and a working copy will be created if "update" is
395 True.
413 True.
396 """
414 """
397 revs = None
415 revs = None
398 if rev:
416 if rev:
399 if not srcpeer.capable('lookup'):
417 if not srcpeer.capable('lookup'):
400 raise error.Abort(_("src repository does not support "
418 raise error.Abort(_("src repository does not support "
401 "revision lookup and so doesn't "
419 "revision lookup and so doesn't "
402 "support clone by revision"))
420 "support clone by revision"))
403 revs = [srcpeer.lookup(r) for r in rev]
421 revs = [srcpeer.lookup(r) for r in rev]
404
422
405 # Obtain a lock before checking for or cloning the pooled repo otherwise
423 # Obtain a lock before checking for or cloning the pooled repo otherwise
406 # 2 clients may race creating or populating it.
424 # 2 clients may race creating or populating it.
407 pooldir = os.path.dirname(sharepath)
425 pooldir = os.path.dirname(sharepath)
408 # lock class requires the directory to exist.
426 # lock class requires the directory to exist.
409 try:
427 try:
410 util.makedir(pooldir, False)
428 util.makedir(pooldir, False)
411 except OSError as e:
429 except OSError as e:
412 if e.errno != errno.EEXIST:
430 if e.errno != errno.EEXIST:
413 raise
431 raise
414
432
415 poolvfs = vfsmod.vfs(pooldir)
433 poolvfs = vfsmod.vfs(pooldir)
416 basename = os.path.basename(sharepath)
434 basename = os.path.basename(sharepath)
417
435
418 with lock.lock(poolvfs, '%s.lock' % basename):
436 with lock.lock(poolvfs, '%s.lock' % basename):
419 if os.path.exists(sharepath):
437 if os.path.exists(sharepath):
420 ui.status(_('(sharing from existing pooled repository %s)\n') %
438 ui.status(_('(sharing from existing pooled repository %s)\n') %
421 basename)
439 basename)
422 else:
440 else:
423 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
441 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
424 # Always use pull mode because hardlinks in share mode don't work
442 # Always use pull mode because hardlinks in share mode don't work
425 # well. Never update because working copies aren't necessary in
443 # well. Never update because working copies aren't necessary in
426 # share mode.
444 # share mode.
427 clone(ui, peeropts, source, dest=sharepath, pull=True,
445 clone(ui, peeropts, source, dest=sharepath, pull=True,
428 rev=rev, update=False, stream=stream)
446 rev=rev, update=False, stream=stream)
429
447
430 # Resolve the value to put in [paths] section for the source.
448 # Resolve the value to put in [paths] section for the source.
431 if islocal(source):
449 if islocal(source):
432 defaultpath = os.path.abspath(util.urllocalpath(source))
450 defaultpath = os.path.abspath(util.urllocalpath(source))
433 else:
451 else:
434 defaultpath = source
452 defaultpath = source
435
453
436 sharerepo = repository(ui, path=sharepath)
454 sharerepo = repository(ui, path=sharepath)
437 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
455 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
438 defaultpath=defaultpath)
456 defaultpath=defaultpath)
439
457
440 # We need to perform a pull against the dest repo to fetch bookmarks
458 # We need to perform a pull against the dest repo to fetch bookmarks
441 # and other non-store data that isn't shared by default. In the case of
459 # and other non-store data that isn't shared by default. In the case of
442 # non-existing shared repo, this means we pull from the remote twice. This
460 # non-existing shared repo, this means we pull from the remote twice. This
443 # is a bit weird. But at the time it was implemented, there wasn't an easy
461 # is a bit weird. But at the time it was implemented, there wasn't an easy
444 # way to pull just non-changegroup data.
462 # way to pull just non-changegroup data.
445 destrepo = repository(ui, path=dest)
463 destrepo = repository(ui, path=dest)
446 exchange.pull(destrepo, srcpeer, heads=revs)
464 exchange.pull(destrepo, srcpeer, heads=revs)
447
465
448 _postshareupdate(destrepo, update)
466 _postshareupdate(destrepo, update)
449
467
450 return srcpeer, peer(ui, peeropts, dest)
468 return srcpeer, peer(ui, peeropts, dest)
451
469
452 # Recomputing branch cache might be slow on big repos,
470 # Recomputing branch cache might be slow on big repos,
453 # so just copy it
471 # so just copy it
454 def _copycache(srcrepo, dstcachedir, fname):
472 def _copycache(srcrepo, dstcachedir, fname):
455 """copy a cache from srcrepo to destcachedir (if it exists)"""
473 """copy a cache from srcrepo to destcachedir (if it exists)"""
456 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
474 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
457 dstbranchcache = os.path.join(dstcachedir, fname)
475 dstbranchcache = os.path.join(dstcachedir, fname)
458 if os.path.exists(srcbranchcache):
476 if os.path.exists(srcbranchcache):
459 if not os.path.exists(dstcachedir):
477 if not os.path.exists(dstcachedir):
460 os.mkdir(dstcachedir)
478 os.mkdir(dstcachedir)
461 util.copyfile(srcbranchcache, dstbranchcache)
479 util.copyfile(srcbranchcache, dstbranchcache)
462
480
463 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
481 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
464 update=True, stream=False, branch=None, shareopts=None):
482 update=True, stream=False, branch=None, shareopts=None):
465 """Make a copy of an existing repository.
483 """Make a copy of an existing repository.
466
484
467 Create a copy of an existing repository in a new directory. The
485 Create a copy of an existing repository in a new directory. The
468 source and destination are URLs, as passed to the repository
486 source and destination are URLs, as passed to the repository
469 function. Returns a pair of repository peers, the source and
487 function. Returns a pair of repository peers, the source and
470 newly created destination.
488 newly created destination.
471
489
472 The location of the source is added to the new repository's
490 The location of the source is added to the new repository's
473 .hg/hgrc file, as the default to be used for future pulls and
491 .hg/hgrc file, as the default to be used for future pulls and
474 pushes.
492 pushes.
475
493
476 If an exception is raised, the partly cloned/updated destination
494 If an exception is raised, the partly cloned/updated destination
477 repository will be deleted.
495 repository will be deleted.
478
496
479 Arguments:
497 Arguments:
480
498
481 source: repository object or URL
499 source: repository object or URL
482
500
483 dest: URL of destination repository to create (defaults to base
501 dest: URL of destination repository to create (defaults to base
484 name of source repository)
502 name of source repository)
485
503
486 pull: always pull from source repository, even in local case or if the
504 pull: always pull from source repository, even in local case or if the
487 server prefers streaming
505 server prefers streaming
488
506
489 stream: stream raw data uncompressed from repository (fast over
507 stream: stream raw data uncompressed from repository (fast over
490 LAN, slow over WAN)
508 LAN, slow over WAN)
491
509
492 rev: revision to clone up to (implies pull=True)
510 rev: revision to clone up to (implies pull=True)
493
511
494 update: update working directory after clone completes, if
512 update: update working directory after clone completes, if
495 destination is local repository (True means update to default rev,
513 destination is local repository (True means update to default rev,
496 anything else is treated as a revision)
514 anything else is treated as a revision)
497
515
498 branch: branches to clone
516 branch: branches to clone
499
517
500 shareopts: dict of options to control auto sharing behavior. The "pool" key
518 shareopts: dict of options to control auto sharing behavior. The "pool" key
501 activates auto sharing mode and defines the directory for stores. The
519 activates auto sharing mode and defines the directory for stores. The
502 "mode" key determines how to construct the directory name of the shared
520 "mode" key determines how to construct the directory name of the shared
503 repository. "identity" means the name is derived from the node of the first
521 repository. "identity" means the name is derived from the node of the first
504 changeset in the repository. "remote" means the name is derived from the
522 changeset in the repository. "remote" means the name is derived from the
505 remote's path/URL. Defaults to "identity."
523 remote's path/URL. Defaults to "identity."
506 """
524 """
507
525
508 if isinstance(source, bytes):
526 if isinstance(source, bytes):
509 origsource = ui.expandpath(source)
527 origsource = ui.expandpath(source)
510 source, branch = parseurl(origsource, branch)
528 source, branch = parseurl(origsource, branch)
511 srcpeer = peer(ui, peeropts, source)
529 srcpeer = peer(ui, peeropts, source)
512 else:
530 else:
513 srcpeer = source.peer() # in case we were called with a localrepo
531 srcpeer = source.peer() # in case we were called with a localrepo
514 branch = (None, branch or [])
532 branch = (None, branch or [])
515 origsource = source = srcpeer.url()
533 origsource = source = srcpeer.url()
516 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
534 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
517
535
518 if dest is None:
536 if dest is None:
519 dest = defaultdest(source)
537 dest = defaultdest(source)
520 if dest:
538 if dest:
521 ui.status(_("destination directory: %s\n") % dest)
539 ui.status(_("destination directory: %s\n") % dest)
522 else:
540 else:
523 dest = ui.expandpath(dest)
541 dest = ui.expandpath(dest)
524
542
525 dest = util.urllocalpath(dest)
543 dest = util.urllocalpath(dest)
526 source = util.urllocalpath(source)
544 source = util.urllocalpath(source)
527
545
528 if not dest:
546 if not dest:
529 raise error.Abort(_("empty destination path is not valid"))
547 raise error.Abort(_("empty destination path is not valid"))
530
548
531 destvfs = vfsmod.vfs(dest, expandpath=True)
549 destvfs = vfsmod.vfs(dest, expandpath=True)
532 if destvfs.lexists():
550 if destvfs.lexists():
533 if not destvfs.isdir():
551 if not destvfs.isdir():
534 raise error.Abort(_("destination '%s' already exists") % dest)
552 raise error.Abort(_("destination '%s' already exists") % dest)
535 elif destvfs.listdir():
553 elif destvfs.listdir():
536 raise error.Abort(_("destination '%s' is not empty") % dest)
554 raise error.Abort(_("destination '%s' is not empty") % dest)
537
555
538 shareopts = shareopts or {}
556 shareopts = shareopts or {}
539 sharepool = shareopts.get('pool')
557 sharepool = shareopts.get('pool')
540 sharenamemode = shareopts.get('mode')
558 sharenamemode = shareopts.get('mode')
541 if sharepool and islocal(dest):
559 if sharepool and islocal(dest):
542 sharepath = None
560 sharepath = None
543 if sharenamemode == 'identity':
561 if sharenamemode == 'identity':
544 # Resolve the name from the initial changeset in the remote
562 # Resolve the name from the initial changeset in the remote
545 # repository. This returns nullid when the remote is empty. It
563 # repository. This returns nullid when the remote is empty. It
546 # raises RepoLookupError if revision 0 is filtered or otherwise
564 # raises RepoLookupError if revision 0 is filtered or otherwise
547 # not available. If we fail to resolve, sharing is not enabled.
565 # not available. If we fail to resolve, sharing is not enabled.
548 try:
566 try:
549 rootnode = srcpeer.lookup('0')
567 rootnode = srcpeer.lookup('0')
550 if rootnode != node.nullid:
568 if rootnode != node.nullid:
551 sharepath = os.path.join(sharepool, node.hex(rootnode))
569 sharepath = os.path.join(sharepool, node.hex(rootnode))
552 else:
570 else:
553 ui.status(_('(not using pooled storage: '
571 ui.status(_('(not using pooled storage: '
554 'remote appears to be empty)\n'))
572 'remote appears to be empty)\n'))
555 except error.RepoLookupError:
573 except error.RepoLookupError:
556 ui.status(_('(not using pooled storage: '
574 ui.status(_('(not using pooled storage: '
557 'unable to resolve identity of remote)\n'))
575 'unable to resolve identity of remote)\n'))
558 elif sharenamemode == 'remote':
576 elif sharenamemode == 'remote':
559 sharepath = os.path.join(
577 sharepath = os.path.join(
560 sharepool, node.hex(hashlib.sha1(source).digest()))
578 sharepool, node.hex(hashlib.sha1(source).digest()))
561 else:
579 else:
562 raise error.Abort(_('unknown share naming mode: %s') %
580 raise error.Abort(_('unknown share naming mode: %s') %
563 sharenamemode)
581 sharenamemode)
564
582
565 if sharepath:
583 if sharepath:
566 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
584 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
567 dest, pull=pull, rev=rev, update=update,
585 dest, pull=pull, rev=rev, update=update,
568 stream=stream)
586 stream=stream)
569
587
570 srclock = destlock = cleandir = None
588 srclock = destlock = cleandir = None
571 srcrepo = srcpeer.local()
589 srcrepo = srcpeer.local()
572 try:
590 try:
573 abspath = origsource
591 abspath = origsource
574 if islocal(origsource):
592 if islocal(origsource):
575 abspath = os.path.abspath(util.urllocalpath(origsource))
593 abspath = os.path.abspath(util.urllocalpath(origsource))
576
594
577 if islocal(dest):
595 if islocal(dest):
578 cleandir = dest
596 cleandir = dest
579
597
580 copy = False
598 copy = False
581 if (srcrepo and srcrepo.cancopy() and islocal(dest)
599 if (srcrepo and srcrepo.cancopy() and islocal(dest)
582 and not phases.hassecret(srcrepo)):
600 and not phases.hassecret(srcrepo)):
583 copy = not pull and not rev
601 copy = not pull and not rev
584
602
585 if copy:
603 if copy:
586 try:
604 try:
587 # we use a lock here because if we race with commit, we
605 # we use a lock here because if we race with commit, we
588 # can end up with extra data in the cloned revlogs that's
606 # can end up with extra data in the cloned revlogs that's
589 # not pointed to by changesets, thus causing verify to
607 # not pointed to by changesets, thus causing verify to
590 # fail
608 # fail
591 srclock = srcrepo.lock(wait=False)
609 srclock = srcrepo.lock(wait=False)
592 except error.LockError:
610 except error.LockError:
593 copy = False
611 copy = False
594
612
595 if copy:
613 if copy:
596 srcrepo.hook('preoutgoing', throw=True, source='clone')
614 srcrepo.hook('preoutgoing', throw=True, source='clone')
597 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
615 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
598 if not os.path.exists(dest):
616 if not os.path.exists(dest):
599 os.mkdir(dest)
617 os.mkdir(dest)
600 else:
618 else:
601 # only clean up directories we create ourselves
619 # only clean up directories we create ourselves
602 cleandir = hgdir
620 cleandir = hgdir
603 try:
621 try:
604 destpath = hgdir
622 destpath = hgdir
605 util.makedir(destpath, notindexed=True)
623 util.makedir(destpath, notindexed=True)
606 except OSError as inst:
624 except OSError as inst:
607 if inst.errno == errno.EEXIST:
625 if inst.errno == errno.EEXIST:
608 cleandir = None
626 cleandir = None
609 raise error.Abort(_("destination '%s' already exists")
627 raise error.Abort(_("destination '%s' already exists")
610 % dest)
628 % dest)
611 raise
629 raise
612
630
613 destlock = copystore(ui, srcrepo, destpath)
631 destlock = copystore(ui, srcrepo, destpath)
614 # copy bookmarks over
632 # copy bookmarks over
615 srcbookmarks = srcrepo.vfs.join('bookmarks')
633 srcbookmarks = srcrepo.vfs.join('bookmarks')
616 dstbookmarks = os.path.join(destpath, 'bookmarks')
634 dstbookmarks = os.path.join(destpath, 'bookmarks')
617 if os.path.exists(srcbookmarks):
635 if os.path.exists(srcbookmarks):
618 util.copyfile(srcbookmarks, dstbookmarks)
636 util.copyfile(srcbookmarks, dstbookmarks)
619
637
620 dstcachedir = os.path.join(destpath, 'cache')
638 dstcachedir = os.path.join(destpath, 'cache')
621 for cache in cacheutil.cachetocopy(srcrepo):
639 for cache in cacheutil.cachetocopy(srcrepo):
622 _copycache(srcrepo, dstcachedir, cache)
640 _copycache(srcrepo, dstcachedir, cache)
623
641
624 # we need to re-init the repo after manually copying the data
642 # we need to re-init the repo after manually copying the data
625 # into it
643 # into it
626 destpeer = peer(srcrepo, peeropts, dest)
644 destpeer = peer(srcrepo, peeropts, dest)
627 srcrepo.hook('outgoing', source='clone',
645 srcrepo.hook('outgoing', source='clone',
628 node=node.hex(node.nullid))
646 node=node.hex(node.nullid))
629 else:
647 else:
630 try:
648 try:
631 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
649 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
632 # only pass ui when no srcrepo
650 # only pass ui when no srcrepo
633 except OSError as inst:
651 except OSError as inst:
634 if inst.errno == errno.EEXIST:
652 if inst.errno == errno.EEXIST:
635 cleandir = None
653 cleandir = None
636 raise error.Abort(_("destination '%s' already exists")
654 raise error.Abort(_("destination '%s' already exists")
637 % dest)
655 % dest)
638 raise
656 raise
639
657
640 revs = None
658 revs = None
641 if rev:
659 if rev:
642 if not srcpeer.capable('lookup'):
660 if not srcpeer.capable('lookup'):
643 raise error.Abort(_("src repository does not support "
661 raise error.Abort(_("src repository does not support "
644 "revision lookup and so doesn't "
662 "revision lookup and so doesn't "
645 "support clone by revision"))
663 "support clone by revision"))
646 revs = [srcpeer.lookup(r) for r in rev]
664 revs = [srcpeer.lookup(r) for r in rev]
647 checkout = revs[0]
665 checkout = revs[0]
648 local = destpeer.local()
666 local = destpeer.local()
649 if local:
667 if local:
650 u = util.url(abspath)
668 u = util.url(abspath)
651 defaulturl = bytes(u)
669 defaulturl = bytes(u)
652 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
670 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
653 if not stream:
671 if not stream:
654 if pull:
672 if pull:
655 stream = False
673 stream = False
656 else:
674 else:
657 stream = None
675 stream = None
658 # internal config: ui.quietbookmarkmove
676 # internal config: ui.quietbookmarkmove
659 overrides = {('ui', 'quietbookmarkmove'): True}
677 overrides = {('ui', 'quietbookmarkmove'): True}
660 with local.ui.configoverride(overrides, 'clone'):
678 with local.ui.configoverride(overrides, 'clone'):
661 exchange.pull(local, srcpeer, revs,
679 exchange.pull(local, srcpeer, revs,
662 streamclonerequested=stream)
680 streamclonerequested=stream)
663 elif srcrepo:
681 elif srcrepo:
664 exchange.push(srcrepo, destpeer, revs=revs,
682 exchange.push(srcrepo, destpeer, revs=revs,
665 bookmarks=srcrepo._bookmarks.keys())
683 bookmarks=srcrepo._bookmarks.keys())
666 else:
684 else:
667 raise error.Abort(_("clone from remote to remote not supported")
685 raise error.Abort(_("clone from remote to remote not supported")
668 )
686 )
669
687
670 cleandir = None
688 cleandir = None
671
689
672 destrepo = destpeer.local()
690 destrepo = destpeer.local()
673 if destrepo:
691 if destrepo:
674 template = uimod.samplehgrcs['cloned']
692 template = uimod.samplehgrcs['cloned']
675 u = util.url(abspath)
693 u = util.url(abspath)
676 u.passwd = None
694 u.passwd = None
677 defaulturl = bytes(u)
695 defaulturl = bytes(u)
678 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
696 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
679 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
697 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
680
698
681 if ui.configbool('experimental', 'remotenames'):
699 if ui.configbool('experimental', 'remotenames'):
682 logexchange.pullremotenames(destrepo, srcpeer)
700 logexchange.pullremotenames(destrepo, srcpeer)
683
701
684 if update:
702 if update:
685 if update is not True:
703 if update is not True:
686 checkout = srcpeer.lookup(update)
704 checkout = srcpeer.lookup(update)
687 uprev = None
705 uprev = None
688 status = None
706 status = None
689 if checkout is not None:
707 if checkout is not None:
690 try:
708 try:
691 uprev = destrepo.lookup(checkout)
709 uprev = destrepo.lookup(checkout)
692 except error.RepoLookupError:
710 except error.RepoLookupError:
693 if update is not True:
711 if update is not True:
694 try:
712 try:
695 uprev = destrepo.lookup(update)
713 uprev = destrepo.lookup(update)
696 except error.RepoLookupError:
714 except error.RepoLookupError:
697 pass
715 pass
698 if uprev is None:
716 if uprev is None:
699 try:
717 try:
700 uprev = destrepo._bookmarks['@']
718 uprev = destrepo._bookmarks['@']
701 update = '@'
719 update = '@'
702 bn = destrepo[uprev].branch()
720 bn = destrepo[uprev].branch()
703 if bn == 'default':
721 if bn == 'default':
704 status = _("updating to bookmark @\n")
722 status = _("updating to bookmark @\n")
705 else:
723 else:
706 status = (_("updating to bookmark @ on branch %s\n")
724 status = (_("updating to bookmark @ on branch %s\n")
707 % bn)
725 % bn)
708 except KeyError:
726 except KeyError:
709 try:
727 try:
710 uprev = destrepo.branchtip('default')
728 uprev = destrepo.branchtip('default')
711 except error.RepoLookupError:
729 except error.RepoLookupError:
712 uprev = destrepo.lookup('tip')
730 uprev = destrepo.lookup('tip')
713 if not status:
731 if not status:
714 bn = destrepo[uprev].branch()
732 bn = destrepo[uprev].branch()
715 status = _("updating to branch %s\n") % bn
733 status = _("updating to branch %s\n") % bn
716 destrepo.ui.status(status)
734 destrepo.ui.status(status)
717 _update(destrepo, uprev)
735 _update(destrepo, uprev)
718 if update in destrepo._bookmarks:
736 if update in destrepo._bookmarks:
719 bookmarks.activate(destrepo, update)
737 bookmarks.activate(destrepo, update)
720 finally:
738 finally:
721 release(srclock, destlock)
739 release(srclock, destlock)
722 if cleandir is not None:
740 if cleandir is not None:
723 shutil.rmtree(cleandir, True)
741 shutil.rmtree(cleandir, True)
724 if srcpeer is not None:
742 if srcpeer is not None:
725 srcpeer.close()
743 srcpeer.close()
726 return srcpeer, destpeer
744 return srcpeer, destpeer
727
745
728 def _showstats(repo, stats, quietempty=False):
746 def _showstats(repo, stats, quietempty=False):
729 if quietempty and not any(stats):
747 if quietempty and not any(stats):
730 return
748 return
731 repo.ui.status(_("%d files updated, %d files merged, "
749 repo.ui.status(_("%d files updated, %d files merged, "
732 "%d files removed, %d files unresolved\n") % stats)
750 "%d files removed, %d files unresolved\n") % stats)
733
751
734 def updaterepo(repo, node, overwrite, updatecheck=None):
752 def updaterepo(repo, node, overwrite, updatecheck=None):
735 """Update the working directory to node.
753 """Update the working directory to node.
736
754
737 When overwrite is set, changes are clobbered, merged else
755 When overwrite is set, changes are clobbered, merged else
738
756
739 returns stats (see pydoc mercurial.merge.applyupdates)"""
757 returns stats (see pydoc mercurial.merge.applyupdates)"""
740 return mergemod.update(repo, node, False, overwrite,
758 return mergemod.update(repo, node, False, overwrite,
741 labels=['working copy', 'destination'],
759 labels=['working copy', 'destination'],
742 updatecheck=updatecheck)
760 updatecheck=updatecheck)
743
761
744 def update(repo, node, quietempty=False, updatecheck=None):
762 def update(repo, node, quietempty=False, updatecheck=None):
745 """update the working directory to node"""
763 """update the working directory to node"""
746 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
764 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
747 _showstats(repo, stats, quietempty)
765 _showstats(repo, stats, quietempty)
748 if stats[3]:
766 if stats[3]:
749 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
767 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
750 return stats[3] > 0
768 return stats[3] > 0
751
769
752 # naming conflict in clone()
770 # naming conflict in clone()
753 _update = update
771 _update = update
754
772
755 def clean(repo, node, show_stats=True, quietempty=False):
773 def clean(repo, node, show_stats=True, quietempty=False):
756 """forcibly switch the working directory to node, clobbering changes"""
774 """forcibly switch the working directory to node, clobbering changes"""
757 stats = updaterepo(repo, node, True)
775 stats = updaterepo(repo, node, True)
758 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
776 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
759 if show_stats:
777 if show_stats:
760 _showstats(repo, stats, quietempty)
778 _showstats(repo, stats, quietempty)
761 return stats[3] > 0
779 return stats[3] > 0
762
780
763 # naming conflict in updatetotally()
781 # naming conflict in updatetotally()
764 _clean = clean
782 _clean = clean
765
783
766 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
784 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
767 """Update the working directory with extra care for non-file components
785 """Update the working directory with extra care for non-file components
768
786
769 This takes care of non-file components below:
787 This takes care of non-file components below:
770
788
771 :bookmark: might be advanced or (in)activated
789 :bookmark: might be advanced or (in)activated
772
790
773 This takes arguments below:
791 This takes arguments below:
774
792
775 :checkout: to which revision the working directory is updated
793 :checkout: to which revision the working directory is updated
776 :brev: a name, which might be a bookmark to be activated after updating
794 :brev: a name, which might be a bookmark to be activated after updating
777 :clean: whether changes in the working directory can be discarded
795 :clean: whether changes in the working directory can be discarded
778 :updatecheck: how to deal with a dirty working directory
796 :updatecheck: how to deal with a dirty working directory
779
797
780 Valid values for updatecheck are (None => linear):
798 Valid values for updatecheck are (None => linear):
781
799
782 * abort: abort if the working directory is dirty
800 * abort: abort if the working directory is dirty
783 * none: don't check (merge working directory changes into destination)
801 * none: don't check (merge working directory changes into destination)
784 * linear: check that update is linear before merging working directory
802 * linear: check that update is linear before merging working directory
785 changes into destination
803 changes into destination
786 * noconflict: check that the update does not result in file merges
804 * noconflict: check that the update does not result in file merges
787
805
788 This returns whether conflict is detected at updating or not.
806 This returns whether conflict is detected at updating or not.
789 """
807 """
790 if updatecheck is None:
808 if updatecheck is None:
791 updatecheck = ui.config('commands', 'update.check')
809 updatecheck = ui.config('commands', 'update.check')
792 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
810 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
793 # If not configured, or invalid value configured
811 # If not configured, or invalid value configured
794 updatecheck = 'linear'
812 updatecheck = 'linear'
795 with repo.wlock():
813 with repo.wlock():
796 movemarkfrom = None
814 movemarkfrom = None
797 warndest = False
815 warndest = False
798 if checkout is None:
816 if checkout is None:
799 updata = destutil.destupdate(repo, clean=clean)
817 updata = destutil.destupdate(repo, clean=clean)
800 checkout, movemarkfrom, brev = updata
818 checkout, movemarkfrom, brev = updata
801 warndest = True
819 warndest = True
802
820
803 if clean:
821 if clean:
804 ret = _clean(repo, checkout)
822 ret = _clean(repo, checkout)
805 else:
823 else:
806 if updatecheck == 'abort':
824 if updatecheck == 'abort':
807 cmdutil.bailifchanged(repo, merge=False)
825 cmdutil.bailifchanged(repo, merge=False)
808 updatecheck = 'none'
826 updatecheck = 'none'
809 ret = _update(repo, checkout, updatecheck=updatecheck)
827 ret = _update(repo, checkout, updatecheck=updatecheck)
810
828
811 if not ret and movemarkfrom:
829 if not ret and movemarkfrom:
812 if movemarkfrom == repo['.'].node():
830 if movemarkfrom == repo['.'].node():
813 pass # no-op update
831 pass # no-op update
814 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
832 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
815 b = ui.label(repo._activebookmark, 'bookmarks.active')
833 b = ui.label(repo._activebookmark, 'bookmarks.active')
816 ui.status(_("updating bookmark %s\n") % b)
834 ui.status(_("updating bookmark %s\n") % b)
817 else:
835 else:
818 # this can happen with a non-linear update
836 # this can happen with a non-linear update
819 b = ui.label(repo._activebookmark, 'bookmarks')
837 b = ui.label(repo._activebookmark, 'bookmarks')
820 ui.status(_("(leaving bookmark %s)\n") % b)
838 ui.status(_("(leaving bookmark %s)\n") % b)
821 bookmarks.deactivate(repo)
839 bookmarks.deactivate(repo)
822 elif brev in repo._bookmarks:
840 elif brev in repo._bookmarks:
823 if brev != repo._activebookmark:
841 if brev != repo._activebookmark:
824 b = ui.label(brev, 'bookmarks.active')
842 b = ui.label(brev, 'bookmarks.active')
825 ui.status(_("(activating bookmark %s)\n") % b)
843 ui.status(_("(activating bookmark %s)\n") % b)
826 bookmarks.activate(repo, brev)
844 bookmarks.activate(repo, brev)
827 elif brev:
845 elif brev:
828 if repo._activebookmark:
846 if repo._activebookmark:
829 b = ui.label(repo._activebookmark, 'bookmarks')
847 b = ui.label(repo._activebookmark, 'bookmarks')
830 ui.status(_("(leaving bookmark %s)\n") % b)
848 ui.status(_("(leaving bookmark %s)\n") % b)
831 bookmarks.deactivate(repo)
849 bookmarks.deactivate(repo)
832
850
833 if warndest:
851 if warndest:
834 destutil.statusotherdests(ui, repo)
852 destutil.statusotherdests(ui, repo)
835
853
836 return ret
854 return ret
837
855
838 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
856 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
839 abort=False):
857 abort=False):
840 """Branch merge with node, resolving changes. Return true if any
858 """Branch merge with node, resolving changes. Return true if any
841 unresolved conflicts."""
859 unresolved conflicts."""
842 if not abort:
860 if not abort:
843 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
861 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
844 labels=labels)
862 labels=labels)
845 else:
863 else:
846 ms = mergemod.mergestate.read(repo)
864 ms = mergemod.mergestate.read(repo)
847 if ms.active():
865 if ms.active():
848 # there were conflicts
866 # there were conflicts
849 node = ms.localctx.hex()
867 node = ms.localctx.hex()
850 else:
868 else:
851 # there were no conficts, mergestate was not stored
869 # there were no conficts, mergestate was not stored
852 node = repo['.'].hex()
870 node = repo['.'].hex()
853
871
854 repo.ui.status(_("aborting the merge, updating back to"
872 repo.ui.status(_("aborting the merge, updating back to"
855 " %s\n") % node[:12])
873 " %s\n") % node[:12])
856 stats = mergemod.update(repo, node, branchmerge=False, force=True,
874 stats = mergemod.update(repo, node, branchmerge=False, force=True,
857 labels=labels)
875 labels=labels)
858
876
859 _showstats(repo, stats)
877 _showstats(repo, stats)
860 if stats[3]:
878 if stats[3]:
861 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
879 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
862 "or 'hg merge --abort' to abandon\n"))
880 "or 'hg merge --abort' to abandon\n"))
863 elif remind and not abort:
881 elif remind and not abort:
864 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
882 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
865 return stats[3] > 0
883 return stats[3] > 0
866
884
867 def _incoming(displaychlist, subreporecurse, ui, repo, source,
885 def _incoming(displaychlist, subreporecurse, ui, repo, source,
868 opts, buffered=False):
886 opts, buffered=False):
869 """
887 """
870 Helper for incoming / gincoming.
888 Helper for incoming / gincoming.
871 displaychlist gets called with
889 displaychlist gets called with
872 (remoterepo, incomingchangesetlist, displayer) parameters,
890 (remoterepo, incomingchangesetlist, displayer) parameters,
873 and is supposed to contain only code that can't be unified.
891 and is supposed to contain only code that can't be unified.
874 """
892 """
875 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
893 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
876 other = peer(repo, opts, source)
894 other = peer(repo, opts, source)
877 ui.status(_('comparing with %s\n') % util.hidepassword(source))
895 ui.status(_('comparing with %s\n') % util.hidepassword(source))
878 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
896 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
879
897
880 if revs:
898 if revs:
881 revs = [other.lookup(rev) for rev in revs]
899 revs = [other.lookup(rev) for rev in revs]
882 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
900 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
883 revs, opts["bundle"], opts["force"])
901 revs, opts["bundle"], opts["force"])
884 try:
902 try:
885 if not chlist:
903 if not chlist:
886 ui.status(_("no changes found\n"))
904 ui.status(_("no changes found\n"))
887 return subreporecurse()
905 return subreporecurse()
888 ui.pager('incoming')
906 ui.pager('incoming')
889 displayer = logcmdutil.changesetdisplayer(ui, other, opts,
907 displayer = logcmdutil.changesetdisplayer(ui, other, opts,
890 buffered=buffered)
908 buffered=buffered)
891 displaychlist(other, chlist, displayer)
909 displaychlist(other, chlist, displayer)
892 displayer.close()
910 displayer.close()
893 finally:
911 finally:
894 cleanupfn()
912 cleanupfn()
895 subreporecurse()
913 subreporecurse()
896 return 0 # exit code is zero since we found incoming changes
914 return 0 # exit code is zero since we found incoming changes
897
915
898 def incoming(ui, repo, source, opts):
916 def incoming(ui, repo, source, opts):
899 def subreporecurse():
917 def subreporecurse():
900 ret = 1
918 ret = 1
901 if opts.get('subrepos'):
919 if opts.get('subrepos'):
902 ctx = repo[None]
920 ctx = repo[None]
903 for subpath in sorted(ctx.substate):
921 for subpath in sorted(ctx.substate):
904 sub = ctx.sub(subpath)
922 sub = ctx.sub(subpath)
905 ret = min(ret, sub.incoming(ui, source, opts))
923 ret = min(ret, sub.incoming(ui, source, opts))
906 return ret
924 return ret
907
925
908 def display(other, chlist, displayer):
926 def display(other, chlist, displayer):
909 limit = logcmdutil.getlimit(opts)
927 limit = logcmdutil.getlimit(opts)
910 if opts.get('newest_first'):
928 if opts.get('newest_first'):
911 chlist.reverse()
929 chlist.reverse()
912 count = 0
930 count = 0
913 for n in chlist:
931 for n in chlist:
914 if limit is not None and count >= limit:
932 if limit is not None and count >= limit:
915 break
933 break
916 parents = [p for p in other.changelog.parents(n) if p != nullid]
934 parents = [p for p in other.changelog.parents(n) if p != nullid]
917 if opts.get('no_merges') and len(parents) == 2:
935 if opts.get('no_merges') and len(parents) == 2:
918 continue
936 continue
919 count += 1
937 count += 1
920 displayer.show(other[n])
938 displayer.show(other[n])
921 return _incoming(display, subreporecurse, ui, repo, source, opts)
939 return _incoming(display, subreporecurse, ui, repo, source, opts)
922
940
923 def _outgoing(ui, repo, dest, opts):
941 def _outgoing(ui, repo, dest, opts):
924 path = ui.paths.getpath(dest, default=('default-push', 'default'))
942 path = ui.paths.getpath(dest, default=('default-push', 'default'))
925 if not path:
943 if not path:
926 raise error.Abort(_('default repository not configured!'),
944 raise error.Abort(_('default repository not configured!'),
927 hint=_("see 'hg help config.paths'"))
945 hint=_("see 'hg help config.paths'"))
928 dest = path.pushloc or path.loc
946 dest = path.pushloc or path.loc
929 branches = path.branch, opts.get('branch') or []
947 branches = path.branch, opts.get('branch') or []
930
948
931 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
949 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
932 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
950 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
933 if revs:
951 if revs:
934 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
952 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
935
953
936 other = peer(repo, opts, dest)
954 other = peer(repo, opts, dest)
937 outgoing = discovery.findcommonoutgoing(repo, other, revs,
955 outgoing = discovery.findcommonoutgoing(repo, other, revs,
938 force=opts.get('force'))
956 force=opts.get('force'))
939 o = outgoing.missing
957 o = outgoing.missing
940 if not o:
958 if not o:
941 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
959 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
942 return o, other
960 return o, other
943
961
944 def outgoing(ui, repo, dest, opts):
962 def outgoing(ui, repo, dest, opts):
945 def recurse():
963 def recurse():
946 ret = 1
964 ret = 1
947 if opts.get('subrepos'):
965 if opts.get('subrepos'):
948 ctx = repo[None]
966 ctx = repo[None]
949 for subpath in sorted(ctx.substate):
967 for subpath in sorted(ctx.substate):
950 sub = ctx.sub(subpath)
968 sub = ctx.sub(subpath)
951 ret = min(ret, sub.outgoing(ui, dest, opts))
969 ret = min(ret, sub.outgoing(ui, dest, opts))
952 return ret
970 return ret
953
971
954 limit = logcmdutil.getlimit(opts)
972 limit = logcmdutil.getlimit(opts)
955 o, other = _outgoing(ui, repo, dest, opts)
973 o, other = _outgoing(ui, repo, dest, opts)
956 if not o:
974 if not o:
957 cmdutil.outgoinghooks(ui, repo, other, opts, o)
975 cmdutil.outgoinghooks(ui, repo, other, opts, o)
958 return recurse()
976 return recurse()
959
977
960 if opts.get('newest_first'):
978 if opts.get('newest_first'):
961 o.reverse()
979 o.reverse()
962 ui.pager('outgoing')
980 ui.pager('outgoing')
963 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
981 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
964 count = 0
982 count = 0
965 for n in o:
983 for n in o:
966 if limit is not None and count >= limit:
984 if limit is not None and count >= limit:
967 break
985 break
968 parents = [p for p in repo.changelog.parents(n) if p != nullid]
986 parents = [p for p in repo.changelog.parents(n) if p != nullid]
969 if opts.get('no_merges') and len(parents) == 2:
987 if opts.get('no_merges') and len(parents) == 2:
970 continue
988 continue
971 count += 1
989 count += 1
972 displayer.show(repo[n])
990 displayer.show(repo[n])
973 displayer.close()
991 displayer.close()
974 cmdutil.outgoinghooks(ui, repo, other, opts, o)
992 cmdutil.outgoinghooks(ui, repo, other, opts, o)
975 recurse()
993 recurse()
976 return 0 # exit code is zero since we found outgoing changes
994 return 0 # exit code is zero since we found outgoing changes
977
995
978 def verify(repo):
996 def verify(repo):
979 """verify the consistency of a repository"""
997 """verify the consistency of a repository"""
980 ret = verifymod.verify(repo)
998 ret = verifymod.verify(repo)
981
999
982 # Broken subrepo references in hidden csets don't seem worth worrying about,
1000 # Broken subrepo references in hidden csets don't seem worth worrying about,
983 # since they can't be pushed/pulled, and --hidden can be used if they are a
1001 # since they can't be pushed/pulled, and --hidden can be used if they are a
984 # concern.
1002 # concern.
985
1003
986 # pathto() is needed for -R case
1004 # pathto() is needed for -R case
987 revs = repo.revs("filelog(%s)",
1005 revs = repo.revs("filelog(%s)",
988 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
1006 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
989
1007
990 if revs:
1008 if revs:
991 repo.ui.status(_('checking subrepo links\n'))
1009 repo.ui.status(_('checking subrepo links\n'))
992 for rev in revs:
1010 for rev in revs:
993 ctx = repo[rev]
1011 ctx = repo[rev]
994 try:
1012 try:
995 for subpath in ctx.substate:
1013 for subpath in ctx.substate:
996 try:
1014 try:
997 ret = (ctx.sub(subpath, allowcreate=False).verify()
1015 ret = (ctx.sub(subpath, allowcreate=False).verify()
998 or ret)
1016 or ret)
999 except error.RepoError as e:
1017 except error.RepoError as e:
1000 repo.ui.warn(('%s: %s\n') % (rev, e))
1018 repo.ui.warn(('%s: %s\n') % (rev, e))
1001 except Exception:
1019 except Exception:
1002 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1020 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1003 node.short(ctx.node()))
1021 node.short(ctx.node()))
1004
1022
1005 return ret
1023 return ret
1006
1024
1007 def remoteui(src, opts):
1025 def remoteui(src, opts):
1008 'build a remote ui from ui or repo and opts'
1026 'build a remote ui from ui or repo and opts'
1009 if util.safehasattr(src, 'baseui'): # looks like a repository
1027 if util.safehasattr(src, 'baseui'): # looks like a repository
1010 dst = src.baseui.copy() # drop repo-specific config
1028 dst = src.baseui.copy() # drop repo-specific config
1011 src = src.ui # copy target options from repo
1029 src = src.ui # copy target options from repo
1012 else: # assume it's a global ui object
1030 else: # assume it's a global ui object
1013 dst = src.copy() # keep all global options
1031 dst = src.copy() # keep all global options
1014
1032
1015 # copy ssh-specific options
1033 # copy ssh-specific options
1016 for o in 'ssh', 'remotecmd':
1034 for o in 'ssh', 'remotecmd':
1017 v = opts.get(o) or src.config('ui', o)
1035 v = opts.get(o) or src.config('ui', o)
1018 if v:
1036 if v:
1019 dst.setconfig("ui", o, v, 'copied')
1037 dst.setconfig("ui", o, v, 'copied')
1020
1038
1021 # copy bundle-specific options
1039 # copy bundle-specific options
1022 r = src.config('bundle', 'mainreporoot')
1040 r = src.config('bundle', 'mainreporoot')
1023 if r:
1041 if r:
1024 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1042 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1025
1043
1026 # copy selected local settings to the remote ui
1044 # copy selected local settings to the remote ui
1027 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1045 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1028 for key, val in src.configitems(sect):
1046 for key, val in src.configitems(sect):
1029 dst.setconfig(sect, key, val, 'copied')
1047 dst.setconfig(sect, key, val, 'copied')
1030 v = src.config('web', 'cacerts')
1048 v = src.config('web', 'cacerts')
1031 if v:
1049 if v:
1032 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1050 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1033
1051
1034 return dst
1052 return dst
1035
1053
1036 # Files of interest
1054 # Files of interest
1037 # Used to check if the repository has changed looking at mtime and size of
1055 # Used to check if the repository has changed looking at mtime and size of
1038 # these files.
1056 # these files.
1039 foi = [('spath', '00changelog.i'),
1057 foi = [('spath', '00changelog.i'),
1040 ('spath', 'phaseroots'), # ! phase can change content at the same size
1058 ('spath', 'phaseroots'), # ! phase can change content at the same size
1041 ('spath', 'obsstore'),
1059 ('spath', 'obsstore'),
1042 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1060 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1043 ]
1061 ]
1044
1062
1045 class cachedlocalrepo(object):
1063 class cachedlocalrepo(object):
1046 """Holds a localrepository that can be cached and reused."""
1064 """Holds a localrepository that can be cached and reused."""
1047
1065
1048 def __init__(self, repo):
1066 def __init__(self, repo):
1049 """Create a new cached repo from an existing repo.
1067 """Create a new cached repo from an existing repo.
1050
1068
1051 We assume the passed in repo was recently created. If the
1069 We assume the passed in repo was recently created. If the
1052 repo has changed between when it was created and when it was
1070 repo has changed between when it was created and when it was
1053 turned into a cache, it may not refresh properly.
1071 turned into a cache, it may not refresh properly.
1054 """
1072 """
1055 assert isinstance(repo, localrepo.localrepository)
1073 assert isinstance(repo, localrepo.localrepository)
1056 self._repo = repo
1074 self._repo = repo
1057 self._state, self.mtime = self._repostate()
1075 self._state, self.mtime = self._repostate()
1058 self._filtername = repo.filtername
1076 self._filtername = repo.filtername
1059
1077
1060 def fetch(self):
1078 def fetch(self):
1061 """Refresh (if necessary) and return a repository.
1079 """Refresh (if necessary) and return a repository.
1062
1080
1063 If the cached instance is out of date, it will be recreated
1081 If the cached instance is out of date, it will be recreated
1064 automatically and returned.
1082 automatically and returned.
1065
1083
1066 Returns a tuple of the repo and a boolean indicating whether a new
1084 Returns a tuple of the repo and a boolean indicating whether a new
1067 repo instance was created.
1085 repo instance was created.
1068 """
1086 """
1069 # We compare the mtimes and sizes of some well-known files to
1087 # We compare the mtimes and sizes of some well-known files to
1070 # determine if the repo changed. This is not precise, as mtimes
1088 # determine if the repo changed. This is not precise, as mtimes
1071 # are susceptible to clock skew and imprecise filesystems and
1089 # are susceptible to clock skew and imprecise filesystems and
1072 # file content can change while maintaining the same size.
1090 # file content can change while maintaining the same size.
1073
1091
1074 state, mtime = self._repostate()
1092 state, mtime = self._repostate()
1075 if state == self._state:
1093 if state == self._state:
1076 return self._repo, False
1094 return self._repo, False
1077
1095
1078 repo = repository(self._repo.baseui, self._repo.url())
1096 repo = repository(self._repo.baseui, self._repo.url())
1079 if self._filtername:
1097 if self._filtername:
1080 self._repo = repo.filtered(self._filtername)
1098 self._repo = repo.filtered(self._filtername)
1081 else:
1099 else:
1082 self._repo = repo.unfiltered()
1100 self._repo = repo.unfiltered()
1083 self._state = state
1101 self._state = state
1084 self.mtime = mtime
1102 self.mtime = mtime
1085
1103
1086 return self._repo, True
1104 return self._repo, True
1087
1105
1088 def _repostate(self):
1106 def _repostate(self):
1089 state = []
1107 state = []
1090 maxmtime = -1
1108 maxmtime = -1
1091 for attr, fname in foi:
1109 for attr, fname in foi:
1092 prefix = getattr(self._repo, attr)
1110 prefix = getattr(self._repo, attr)
1093 p = os.path.join(prefix, fname)
1111 p = os.path.join(prefix, fname)
1094 try:
1112 try:
1095 st = os.stat(p)
1113 st = os.stat(p)
1096 except OSError:
1114 except OSError:
1097 st = os.stat(prefix)
1115 st = os.stat(prefix)
1098 state.append((st.st_mtime, st.st_size))
1116 state.append((st.st_mtime, st.st_size))
1099 maxmtime = max(maxmtime, st.st_mtime)
1117 maxmtime = max(maxmtime, st.st_mtime)
1100
1118
1101 return tuple(state), maxmtime
1119 return tuple(state), maxmtime
1102
1120
1103 def copy(self):
1121 def copy(self):
1104 """Obtain a copy of this class instance.
1122 """Obtain a copy of this class instance.
1105
1123
1106 A new localrepository instance is obtained. The new instance should be
1124 A new localrepository instance is obtained. The new instance should be
1107 completely independent of the original.
1125 completely independent of the original.
1108 """
1126 """
1109 repo = repository(self._repo.baseui, self._repo.origroot)
1127 repo = repository(self._repo.baseui, self._repo.origroot)
1110 if self._filtername:
1128 if self._filtername:
1111 repo = repo.filtered(self._filtername)
1129 repo = repo.filtered(self._filtername)
1112 else:
1130 else:
1113 repo = repo.unfiltered()
1131 repo = repo.unfiltered()
1114 c = cachedlocalrepo(repo)
1132 c = cachedlocalrepo(repo)
1115 c._state = self._state
1133 c._state = self._state
1116 c.mtime = self.mtime
1134 c.mtime = self.mtime
1117 return c
1135 return c
General Comments 0
You need to be logged in to leave comments. Login now