##// END OF EJS Templates
share: move the implementation of 'unshare' to the 'hg' module...
Matt Harbison -
r34879:9f7ecc5b default
parent child Browse files
Show More
@@ -1,232 +1,211 b''
1 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
1 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 '''share a common history between several working directories
6 '''share a common history between several working directories
7
7
8 Automatic Pooled Storage for Clones
8 Automatic Pooled Storage for Clones
9 -----------------------------------
9 -----------------------------------
10
10
11 When this extension is active, :hg:`clone` can be configured to
11 When this extension is active, :hg:`clone` can be configured to
12 automatically share/pool storage across multiple clones. This
12 automatically share/pool storage across multiple clones. This
13 mode effectively converts :hg:`clone` to :hg:`clone` + :hg:`share`.
13 mode effectively converts :hg:`clone` to :hg:`clone` + :hg:`share`.
14 The benefit of using this mode is the automatic management of
14 The benefit of using this mode is the automatic management of
15 store paths and intelligent pooling of related repositories.
15 store paths and intelligent pooling of related repositories.
16
16
17 The following ``share.`` config options influence this feature:
17 The following ``share.`` config options influence this feature:
18
18
19 ``share.pool``
19 ``share.pool``
20 Filesystem path where shared repository data will be stored. When
20 Filesystem path where shared repository data will be stored. When
21 defined, :hg:`clone` will automatically use shared repository
21 defined, :hg:`clone` will automatically use shared repository
22 storage instead of creating a store inside each clone.
22 storage instead of creating a store inside each clone.
23
23
24 ``share.poolnaming``
24 ``share.poolnaming``
25 How directory names in ``share.pool`` are constructed.
25 How directory names in ``share.pool`` are constructed.
26
26
27 "identity" means the name is derived from the first changeset in the
27 "identity" means the name is derived from the first changeset in the
28 repository. In this mode, different remotes share storage if their
28 repository. In this mode, different remotes share storage if their
29 root/initial changeset is identical. In this mode, the local shared
29 root/initial changeset is identical. In this mode, the local shared
30 repository is an aggregate of all encountered remote repositories.
30 repository is an aggregate of all encountered remote repositories.
31
31
32 "remote" means the name is derived from the source repository's
32 "remote" means the name is derived from the source repository's
33 path or URL. In this mode, storage is only shared if the path or URL
33 path or URL. In this mode, storage is only shared if the path or URL
34 requested in the :hg:`clone` command matches exactly to a repository
34 requested in the :hg:`clone` command matches exactly to a repository
35 that was cloned before.
35 that was cloned before.
36
36
37 The default naming mode is "identity."
37 The default naming mode is "identity."
38 '''
38 '''
39
39
40 from __future__ import absolute_import
40 from __future__ import absolute_import
41
41
42 import errno
42 import errno
43 from mercurial.i18n import _
43 from mercurial.i18n import _
44 from mercurial import (
44 from mercurial import (
45 bookmarks,
45 bookmarks,
46 commands,
46 commands,
47 error,
47 error,
48 extensions,
48 extensions,
49 hg,
49 hg,
50 registrar,
50 registrar,
51 txnutil,
51 txnutil,
52 util,
52 util,
53 )
53 )
54
54
55 repository = hg.repository
55 repository = hg.repository
56 parseurl = hg.parseurl
56 parseurl = hg.parseurl
57
57
58 cmdtable = {}
58 cmdtable = {}
59 command = registrar.command(cmdtable)
59 command = registrar.command(cmdtable)
60 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
60 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
61 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
61 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
62 # be specifying the version(s) of Mercurial they are tested with, or
62 # be specifying the version(s) of Mercurial they are tested with, or
63 # leave the attribute unspecified.
63 # leave the attribute unspecified.
64 testedwith = 'ships-with-hg-core'
64 testedwith = 'ships-with-hg-core'
65
65
66 configtable = {}
66 configtable = {}
67 configitem = registrar.configitem(configtable)
67 configitem = registrar.configitem(configtable)
68
68
69 configitem('share', 'pool',
69 configitem('share', 'pool',
70 default=None,
70 default=None,
71 )
71 )
72 configitem('share', 'poolnaming',
72 configitem('share', 'poolnaming',
73 default='identity',
73 default='identity',
74 )
74 )
75
75
76 @command('share',
76 @command('share',
77 [('U', 'noupdate', None, _('do not create a working directory')),
77 [('U', 'noupdate', None, _('do not create a working directory')),
78 ('B', 'bookmarks', None, _('also share bookmarks')),
78 ('B', 'bookmarks', None, _('also share bookmarks')),
79 ('', 'relative', None, _('point to source using a relative path '
79 ('', 'relative', None, _('point to source using a relative path '
80 '(EXPERIMENTAL)')),
80 '(EXPERIMENTAL)')),
81 ],
81 ],
82 _('[-U] [-B] SOURCE [DEST]'),
82 _('[-U] [-B] SOURCE [DEST]'),
83 norepo=True)
83 norepo=True)
84 def share(ui, source, dest=None, noupdate=False, bookmarks=False,
84 def share(ui, source, dest=None, noupdate=False, bookmarks=False,
85 relative=False):
85 relative=False):
86 """create a new shared repository
86 """create a new shared repository
87
87
88 Initialize a new repository and working directory that shares its
88 Initialize a new repository and working directory that shares its
89 history (and optionally bookmarks) with another repository.
89 history (and optionally bookmarks) with another repository.
90
90
91 .. note::
91 .. note::
92
92
93 using rollback or extensions that destroy/modify history (mq,
93 using rollback or extensions that destroy/modify history (mq,
94 rebase, etc.) can cause considerable confusion with shared
94 rebase, etc.) can cause considerable confusion with shared
95 clones. In particular, if two shared clones are both updated to
95 clones. In particular, if two shared clones are both updated to
96 the same changeset, and one of them destroys that changeset
96 the same changeset, and one of them destroys that changeset
97 with rollback, the other clone will suddenly stop working: all
97 with rollback, the other clone will suddenly stop working: all
98 operations will fail with "abort: working directory has unknown
98 operations will fail with "abort: working directory has unknown
99 parent". The only known workaround is to use debugsetparents on
99 parent". The only known workaround is to use debugsetparents on
100 the broken clone to reset it to a changeset that still exists.
100 the broken clone to reset it to a changeset that still exists.
101 """
101 """
102
102
103 hg.share(ui, source, dest=dest, update=not noupdate,
103 hg.share(ui, source, dest=dest, update=not noupdate,
104 bookmarks=bookmarks, relative=relative)
104 bookmarks=bookmarks, relative=relative)
105 return 0
105 return 0
106
106
107 @command('unshare', [], '')
107 @command('unshare', [], '')
108 def unshare(ui, repo):
108 def unshare(ui, repo):
109 """convert a shared repository to a normal one
109 """convert a shared repository to a normal one
110
110
111 Copy the store data to the repo and remove the sharedpath data.
111 Copy the store data to the repo and remove the sharedpath data.
112 """
112 """
113
113
114 if not repo.shared():
114 if not repo.shared():
115 raise error.Abort(_("this is not a shared repo"))
115 raise error.Abort(_("this is not a shared repo"))
116
116
117 destlock = lock = None
117 hg.unshare(ui, repo)
118 lock = repo.lock()
119 try:
120 # we use locks here because if we race with commit, we
121 # can end up with extra data in the cloned revlogs that's
122 # not pointed to by changesets, thus causing verify to
123 # fail
124
125 destlock = hg.copystore(ui, repo, repo.path)
126
127 sharefile = repo.vfs.join('sharedpath')
128 util.rename(sharefile, sharefile + '.old')
129
130 repo.requirements.discard('shared')
131 repo.requirements.discard('relshared')
132 repo._writerequirements()
133 finally:
134 destlock and destlock.release()
135 lock and lock.release()
136
137 # update store, spath, svfs and sjoin of repo
138 repo.unfiltered().__init__(repo.baseui, repo.root)
139
118
140 # Wrap clone command to pass auto share options.
119 # Wrap clone command to pass auto share options.
141 def clone(orig, ui, source, *args, **opts):
120 def clone(orig, ui, source, *args, **opts):
142 pool = ui.config('share', 'pool')
121 pool = ui.config('share', 'pool')
143 if pool:
122 if pool:
144 pool = util.expandpath(pool)
123 pool = util.expandpath(pool)
145
124
146 opts[r'shareopts'] = {
125 opts[r'shareopts'] = {
147 'pool': pool,
126 'pool': pool,
148 'mode': ui.config('share', 'poolnaming'),
127 'mode': ui.config('share', 'poolnaming'),
149 }
128 }
150
129
151 return orig(ui, source, *args, **opts)
130 return orig(ui, source, *args, **opts)
152
131
153 def extsetup(ui):
132 def extsetup(ui):
154 extensions.wrapfunction(bookmarks, '_getbkfile', getbkfile)
133 extensions.wrapfunction(bookmarks, '_getbkfile', getbkfile)
155 extensions.wrapfunction(bookmarks.bmstore, '_recordchange', recordchange)
134 extensions.wrapfunction(bookmarks.bmstore, '_recordchange', recordchange)
156 extensions.wrapfunction(bookmarks.bmstore, '_writerepo', writerepo)
135 extensions.wrapfunction(bookmarks.bmstore, '_writerepo', writerepo)
157 extensions.wrapcommand(commands.table, 'clone', clone)
136 extensions.wrapcommand(commands.table, 'clone', clone)
158
137
159 def _hassharedbookmarks(repo):
138 def _hassharedbookmarks(repo):
160 """Returns whether this repo has shared bookmarks"""
139 """Returns whether this repo has shared bookmarks"""
161 try:
140 try:
162 shared = repo.vfs.read('shared').splitlines()
141 shared = repo.vfs.read('shared').splitlines()
163 except IOError as inst:
142 except IOError as inst:
164 if inst.errno != errno.ENOENT:
143 if inst.errno != errno.ENOENT:
165 raise
144 raise
166 return False
145 return False
167 return hg.sharedbookmarks in shared
146 return hg.sharedbookmarks in shared
168
147
169 def _getsrcrepo(repo):
148 def _getsrcrepo(repo):
170 """
149 """
171 Returns the source repository object for a given shared repository.
150 Returns the source repository object for a given shared repository.
172 If repo is not a shared repository, return None.
151 If repo is not a shared repository, return None.
173 """
152 """
174 if repo.sharedpath == repo.path:
153 if repo.sharedpath == repo.path:
175 return None
154 return None
176
155
177 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
156 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
178 return repo.srcrepo
157 return repo.srcrepo
179
158
180 # the sharedpath always ends in the .hg; we want the path to the repo
159 # the sharedpath always ends in the .hg; we want the path to the repo
181 source = repo.vfs.split(repo.sharedpath)[0]
160 source = repo.vfs.split(repo.sharedpath)[0]
182 srcurl, branches = parseurl(source)
161 srcurl, branches = parseurl(source)
183 srcrepo = repository(repo.ui, srcurl)
162 srcrepo = repository(repo.ui, srcurl)
184 repo.srcrepo = srcrepo
163 repo.srcrepo = srcrepo
185 return srcrepo
164 return srcrepo
186
165
187 def getbkfile(orig, repo):
166 def getbkfile(orig, repo):
188 if _hassharedbookmarks(repo):
167 if _hassharedbookmarks(repo):
189 srcrepo = _getsrcrepo(repo)
168 srcrepo = _getsrcrepo(repo)
190 if srcrepo is not None:
169 if srcrepo is not None:
191 # just orig(srcrepo) doesn't work as expected, because
170 # just orig(srcrepo) doesn't work as expected, because
192 # HG_PENDING refers repo.root.
171 # HG_PENDING refers repo.root.
193 try:
172 try:
194 fp, pending = txnutil.trypending(repo.root, repo.vfs,
173 fp, pending = txnutil.trypending(repo.root, repo.vfs,
195 'bookmarks')
174 'bookmarks')
196 if pending:
175 if pending:
197 # only in this case, bookmark information in repo
176 # only in this case, bookmark information in repo
198 # is up-to-date.
177 # is up-to-date.
199 return fp
178 return fp
200 fp.close()
179 fp.close()
201 except IOError as inst:
180 except IOError as inst:
202 if inst.errno != errno.ENOENT:
181 if inst.errno != errno.ENOENT:
203 raise
182 raise
204
183
205 # otherwise, we should read bookmarks from srcrepo,
184 # otherwise, we should read bookmarks from srcrepo,
206 # because .hg/bookmarks in srcrepo might be already
185 # because .hg/bookmarks in srcrepo might be already
207 # changed via another sharing repo
186 # changed via another sharing repo
208 repo = srcrepo
187 repo = srcrepo
209
188
210 # TODO: Pending changes in repo are still invisible in
189 # TODO: Pending changes in repo are still invisible in
211 # srcrepo, because bookmarks.pending is written only into repo.
190 # srcrepo, because bookmarks.pending is written only into repo.
212 # See also https://www.mercurial-scm.org/wiki/SharedRepository
191 # See also https://www.mercurial-scm.org/wiki/SharedRepository
213 return orig(repo)
192 return orig(repo)
214
193
215 def recordchange(orig, self, tr):
194 def recordchange(orig, self, tr):
216 # Continue with write to local bookmarks file as usual
195 # Continue with write to local bookmarks file as usual
217 orig(self, tr)
196 orig(self, tr)
218
197
219 if _hassharedbookmarks(self._repo):
198 if _hassharedbookmarks(self._repo):
220 srcrepo = _getsrcrepo(self._repo)
199 srcrepo = _getsrcrepo(self._repo)
221 if srcrepo is not None:
200 if srcrepo is not None:
222 category = 'share-bookmarks'
201 category = 'share-bookmarks'
223 tr.addpostclose(category, lambda tr: self._writerepo(srcrepo))
202 tr.addpostclose(category, lambda tr: self._writerepo(srcrepo))
224
203
225 def writerepo(orig, self, repo):
204 def writerepo(orig, self, repo):
226 # First write local bookmarks file in case we ever unshare
205 # First write local bookmarks file in case we ever unshare
227 orig(self, repo)
206 orig(self, repo)
228
207
229 if _hassharedbookmarks(self._repo):
208 if _hassharedbookmarks(self._repo):
230 srcrepo = _getsrcrepo(self._repo)
209 srcrepo = _getsrcrepo(self._repo)
231 if srcrepo is not None:
210 if srcrepo is not None:
232 orig(self, srcrepo)
211 orig(self, srcrepo)
@@ -1,1065 +1,1094 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13 import os
13 import os
14 import shutil
14 import shutil
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import nullid
17 from .node import nullid
18
18
19 from . import (
19 from . import (
20 bookmarks,
20 bookmarks,
21 bundlerepo,
21 bundlerepo,
22 cmdutil,
22 cmdutil,
23 destutil,
23 destutil,
24 discovery,
24 discovery,
25 error,
25 error,
26 exchange,
26 exchange,
27 extensions,
27 extensions,
28 httppeer,
28 httppeer,
29 localrepo,
29 localrepo,
30 lock,
30 lock,
31 merge as mergemod,
31 merge as mergemod,
32 node,
32 node,
33 phases,
33 phases,
34 repoview,
34 repoview,
35 scmutil,
35 scmutil,
36 sshpeer,
36 sshpeer,
37 statichttprepo,
37 statichttprepo,
38 ui as uimod,
38 ui as uimod,
39 unionrepo,
39 unionrepo,
40 url,
40 url,
41 util,
41 util,
42 verify as verifymod,
42 verify as verifymod,
43 vfs as vfsmod,
43 vfs as vfsmod,
44 )
44 )
45
45
46 release = lock.release
46 release = lock.release
47
47
48 # shared features
48 # shared features
49 sharedbookmarks = 'bookmarks'
49 sharedbookmarks = 'bookmarks'
50
50
51 def _local(path):
51 def _local(path):
52 path = util.expandpath(util.urllocalpath(path))
52 path = util.expandpath(util.urllocalpath(path))
53 return (os.path.isfile(path) and bundlerepo or localrepo)
53 return (os.path.isfile(path) and bundlerepo or localrepo)
54
54
55 def addbranchrevs(lrepo, other, branches, revs):
55 def addbranchrevs(lrepo, other, branches, revs):
56 peer = other.peer() # a courtesy to callers using a localrepo for other
56 peer = other.peer() # a courtesy to callers using a localrepo for other
57 hashbranch, branches = branches
57 hashbranch, branches = branches
58 if not hashbranch and not branches:
58 if not hashbranch and not branches:
59 x = revs or None
59 x = revs or None
60 if util.safehasattr(revs, 'first'):
60 if util.safehasattr(revs, 'first'):
61 y = revs.first()
61 y = revs.first()
62 elif revs:
62 elif revs:
63 y = revs[0]
63 y = revs[0]
64 else:
64 else:
65 y = None
65 y = None
66 return x, y
66 return x, y
67 if revs:
67 if revs:
68 revs = list(revs)
68 revs = list(revs)
69 else:
69 else:
70 revs = []
70 revs = []
71
71
72 if not peer.capable('branchmap'):
72 if not peer.capable('branchmap'):
73 if branches:
73 if branches:
74 raise error.Abort(_("remote branch lookup not supported"))
74 raise error.Abort(_("remote branch lookup not supported"))
75 revs.append(hashbranch)
75 revs.append(hashbranch)
76 return revs, revs[0]
76 return revs, revs[0]
77 branchmap = peer.branchmap()
77 branchmap = peer.branchmap()
78
78
79 def primary(branch):
79 def primary(branch):
80 if branch == '.':
80 if branch == '.':
81 if not lrepo:
81 if not lrepo:
82 raise error.Abort(_("dirstate branch not accessible"))
82 raise error.Abort(_("dirstate branch not accessible"))
83 branch = lrepo.dirstate.branch()
83 branch = lrepo.dirstate.branch()
84 if branch in branchmap:
84 if branch in branchmap:
85 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
85 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
86 return True
86 return True
87 else:
87 else:
88 return False
88 return False
89
89
90 for branch in branches:
90 for branch in branches:
91 if not primary(branch):
91 if not primary(branch):
92 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
92 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
93 if hashbranch:
93 if hashbranch:
94 if not primary(hashbranch):
94 if not primary(hashbranch):
95 revs.append(hashbranch)
95 revs.append(hashbranch)
96 return revs, revs[0]
96 return revs, revs[0]
97
97
98 def parseurl(path, branches=None):
98 def parseurl(path, branches=None):
99 '''parse url#branch, returning (url, (branch, branches))'''
99 '''parse url#branch, returning (url, (branch, branches))'''
100
100
101 u = util.url(path)
101 u = util.url(path)
102 branch = None
102 branch = None
103 if u.fragment:
103 if u.fragment:
104 branch = u.fragment
104 branch = u.fragment
105 u.fragment = None
105 u.fragment = None
106 return bytes(u), (branch, branches or [])
106 return bytes(u), (branch, branches or [])
107
107
108 schemes = {
108 schemes = {
109 'bundle': bundlerepo,
109 'bundle': bundlerepo,
110 'union': unionrepo,
110 'union': unionrepo,
111 'file': _local,
111 'file': _local,
112 'http': httppeer,
112 'http': httppeer,
113 'https': httppeer,
113 'https': httppeer,
114 'ssh': sshpeer,
114 'ssh': sshpeer,
115 'static-http': statichttprepo,
115 'static-http': statichttprepo,
116 }
116 }
117
117
118 def _peerlookup(path):
118 def _peerlookup(path):
119 u = util.url(path)
119 u = util.url(path)
120 scheme = u.scheme or 'file'
120 scheme = u.scheme or 'file'
121 thing = schemes.get(scheme) or schemes['file']
121 thing = schemes.get(scheme) or schemes['file']
122 try:
122 try:
123 return thing(path)
123 return thing(path)
124 except TypeError:
124 except TypeError:
125 # we can't test callable(thing) because 'thing' can be an unloaded
125 # we can't test callable(thing) because 'thing' can be an unloaded
126 # module that implements __call__
126 # module that implements __call__
127 if not util.safehasattr(thing, 'instance'):
127 if not util.safehasattr(thing, 'instance'):
128 raise
128 raise
129 return thing
129 return thing
130
130
131 def islocal(repo):
131 def islocal(repo):
132 '''return true if repo (or path pointing to repo) is local'''
132 '''return true if repo (or path pointing to repo) is local'''
133 if isinstance(repo, bytes):
133 if isinstance(repo, bytes):
134 try:
134 try:
135 return _peerlookup(repo).islocal(repo)
135 return _peerlookup(repo).islocal(repo)
136 except AttributeError:
136 except AttributeError:
137 return False
137 return False
138 return repo.local()
138 return repo.local()
139
139
140 def openpath(ui, path):
140 def openpath(ui, path):
141 '''open path with open if local, url.open if remote'''
141 '''open path with open if local, url.open if remote'''
142 pathurl = util.url(path, parsequery=False, parsefragment=False)
142 pathurl = util.url(path, parsequery=False, parsefragment=False)
143 if pathurl.islocal():
143 if pathurl.islocal():
144 return util.posixfile(pathurl.localpath(), 'rb')
144 return util.posixfile(pathurl.localpath(), 'rb')
145 else:
145 else:
146 return url.open(ui, path)
146 return url.open(ui, path)
147
147
148 # a list of (ui, repo) functions called for wire peer initialization
148 # a list of (ui, repo) functions called for wire peer initialization
149 wirepeersetupfuncs = []
149 wirepeersetupfuncs = []
150
150
151 def _peerorrepo(ui, path, create=False, presetupfuncs=None):
151 def _peerorrepo(ui, path, create=False, presetupfuncs=None):
152 """return a repository object for the specified path"""
152 """return a repository object for the specified path"""
153 obj = _peerlookup(path).instance(ui, path, create)
153 obj = _peerlookup(path).instance(ui, path, create)
154 ui = getattr(obj, "ui", ui)
154 ui = getattr(obj, "ui", ui)
155 for f in presetupfuncs or []:
155 for f in presetupfuncs or []:
156 f(ui, obj)
156 f(ui, obj)
157 for name, module in extensions.extensions(ui):
157 for name, module in extensions.extensions(ui):
158 hook = getattr(module, 'reposetup', None)
158 hook = getattr(module, 'reposetup', None)
159 if hook:
159 if hook:
160 hook(ui, obj)
160 hook(ui, obj)
161 if not obj.local():
161 if not obj.local():
162 for f in wirepeersetupfuncs:
162 for f in wirepeersetupfuncs:
163 f(ui, obj)
163 f(ui, obj)
164 return obj
164 return obj
165
165
166 def repository(ui, path='', create=False, presetupfuncs=None):
166 def repository(ui, path='', create=False, presetupfuncs=None):
167 """return a repository object for the specified path"""
167 """return a repository object for the specified path"""
168 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs)
168 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs)
169 repo = peer.local()
169 repo = peer.local()
170 if not repo:
170 if not repo:
171 raise error.Abort(_("repository '%s' is not local") %
171 raise error.Abort(_("repository '%s' is not local") %
172 (path or peer.url()))
172 (path or peer.url()))
173 return repo.filtered('visible')
173 return repo.filtered('visible')
174
174
175 def peer(uiorrepo, opts, path, create=False):
175 def peer(uiorrepo, opts, path, create=False):
176 '''return a repository peer for the specified path'''
176 '''return a repository peer for the specified path'''
177 rui = remoteui(uiorrepo, opts)
177 rui = remoteui(uiorrepo, opts)
178 return _peerorrepo(rui, path, create).peer()
178 return _peerorrepo(rui, path, create).peer()
179
179
180 def defaultdest(source):
180 def defaultdest(source):
181 '''return default destination of clone if none is given
181 '''return default destination of clone if none is given
182
182
183 >>> defaultdest(b'foo')
183 >>> defaultdest(b'foo')
184 'foo'
184 'foo'
185 >>> defaultdest(b'/foo/bar')
185 >>> defaultdest(b'/foo/bar')
186 'bar'
186 'bar'
187 >>> defaultdest(b'/')
187 >>> defaultdest(b'/')
188 ''
188 ''
189 >>> defaultdest(b'')
189 >>> defaultdest(b'')
190 ''
190 ''
191 >>> defaultdest(b'http://example.org/')
191 >>> defaultdest(b'http://example.org/')
192 ''
192 ''
193 >>> defaultdest(b'http://example.org/foo/')
193 >>> defaultdest(b'http://example.org/foo/')
194 'foo'
194 'foo'
195 '''
195 '''
196 path = util.url(source).path
196 path = util.url(source).path
197 if not path:
197 if not path:
198 return ''
198 return ''
199 return os.path.basename(os.path.normpath(path))
199 return os.path.basename(os.path.normpath(path))
200
200
201 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
201 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
202 relative=False):
202 relative=False):
203 '''create a shared repository'''
203 '''create a shared repository'''
204
204
205 if not islocal(source):
205 if not islocal(source):
206 raise error.Abort(_('can only share local repositories'))
206 raise error.Abort(_('can only share local repositories'))
207
207
208 if not dest:
208 if not dest:
209 dest = defaultdest(source)
209 dest = defaultdest(source)
210 else:
210 else:
211 dest = ui.expandpath(dest)
211 dest = ui.expandpath(dest)
212
212
213 if isinstance(source, str):
213 if isinstance(source, str):
214 origsource = ui.expandpath(source)
214 origsource = ui.expandpath(source)
215 source, branches = parseurl(origsource)
215 source, branches = parseurl(origsource)
216 srcrepo = repository(ui, source)
216 srcrepo = repository(ui, source)
217 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
217 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
218 else:
218 else:
219 srcrepo = source.local()
219 srcrepo = source.local()
220 origsource = source = srcrepo.url()
220 origsource = source = srcrepo.url()
221 checkout = None
221 checkout = None
222
222
223 sharedpath = srcrepo.sharedpath # if our source is already sharing
223 sharedpath = srcrepo.sharedpath # if our source is already sharing
224
224
225 destwvfs = vfsmod.vfs(dest, realpath=True)
225 destwvfs = vfsmod.vfs(dest, realpath=True)
226 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
226 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
227
227
228 if destvfs.lexists():
228 if destvfs.lexists():
229 raise error.Abort(_('destination already exists'))
229 raise error.Abort(_('destination already exists'))
230
230
231 if not destwvfs.isdir():
231 if not destwvfs.isdir():
232 destwvfs.mkdir()
232 destwvfs.mkdir()
233 destvfs.makedir()
233 destvfs.makedir()
234
234
235 requirements = ''
235 requirements = ''
236 try:
236 try:
237 requirements = srcrepo.vfs.read('requires')
237 requirements = srcrepo.vfs.read('requires')
238 except IOError as inst:
238 except IOError as inst:
239 if inst.errno != errno.ENOENT:
239 if inst.errno != errno.ENOENT:
240 raise
240 raise
241
241
242 if relative:
242 if relative:
243 try:
243 try:
244 sharedpath = os.path.relpath(sharedpath, destvfs.base)
244 sharedpath = os.path.relpath(sharedpath, destvfs.base)
245 requirements += 'relshared\n'
245 requirements += 'relshared\n'
246 except IOError as e:
246 except IOError as e:
247 raise error.Abort(_('cannot calculate relative path'),
247 raise error.Abort(_('cannot calculate relative path'),
248 hint=str(e))
248 hint=str(e))
249 else:
249 else:
250 requirements += 'shared\n'
250 requirements += 'shared\n'
251
251
252 destvfs.write('requires', requirements)
252 destvfs.write('requires', requirements)
253 destvfs.write('sharedpath', sharedpath)
253 destvfs.write('sharedpath', sharedpath)
254
254
255 r = repository(ui, destwvfs.base)
255 r = repository(ui, destwvfs.base)
256 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
256 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
257 _postshareupdate(r, update, checkout=checkout)
257 _postshareupdate(r, update, checkout=checkout)
258 return r
258 return r
259
259
260 def unshare(ui, repo):
261 """convert a shared repository to a normal one
262
263 Copy the store data to the repo and remove the sharedpath data.
264 """
265
266 destlock = lock = None
267 lock = repo.lock()
268 try:
269 # we use locks here because if we race with commit, we
270 # can end up with extra data in the cloned revlogs that's
271 # not pointed to by changesets, thus causing verify to
272 # fail
273
274 destlock = copystore(ui, repo, repo.path)
275
276 sharefile = repo.vfs.join('sharedpath')
277 util.rename(sharefile, sharefile + '.old')
278
279 repo.requirements.discard('shared')
280 repo.requirements.discard('relshared')
281 repo._writerequirements()
282 finally:
283 destlock and destlock.release()
284 lock and lock.release()
285
286 # update store, spath, svfs and sjoin of repo
287 repo.unfiltered().__init__(repo.baseui, repo.root)
288
260 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
289 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
261 """Called after a new shared repo is created.
290 """Called after a new shared repo is created.
262
291
263 The new repo only has a requirements file and pointer to the source.
292 The new repo only has a requirements file and pointer to the source.
264 This function configures additional shared data.
293 This function configures additional shared data.
265
294
266 Extensions can wrap this function and write additional entries to
295 Extensions can wrap this function and write additional entries to
267 destrepo/.hg/shared to indicate additional pieces of data to be shared.
296 destrepo/.hg/shared to indicate additional pieces of data to be shared.
268 """
297 """
269 default = defaultpath or sourcerepo.ui.config('paths', 'default')
298 default = defaultpath or sourcerepo.ui.config('paths', 'default')
270 if default:
299 if default:
271 fp = destrepo.vfs("hgrc", "w", text=True)
300 fp = destrepo.vfs("hgrc", "w", text=True)
272 fp.write("[paths]\n")
301 fp.write("[paths]\n")
273 fp.write("default = %s\n" % default)
302 fp.write("default = %s\n" % default)
274 fp.close()
303 fp.close()
275
304
276 with destrepo.wlock():
305 with destrepo.wlock():
277 if bookmarks:
306 if bookmarks:
278 fp = destrepo.vfs('shared', 'w')
307 fp = destrepo.vfs('shared', 'w')
279 fp.write(sharedbookmarks + '\n')
308 fp.write(sharedbookmarks + '\n')
280 fp.close()
309 fp.close()
281
310
282 def _postshareupdate(repo, update, checkout=None):
311 def _postshareupdate(repo, update, checkout=None):
283 """Maybe perform a working directory update after a shared repo is created.
312 """Maybe perform a working directory update after a shared repo is created.
284
313
285 ``update`` can be a boolean or a revision to update to.
314 ``update`` can be a boolean or a revision to update to.
286 """
315 """
287 if not update:
316 if not update:
288 return
317 return
289
318
290 repo.ui.status(_("updating working directory\n"))
319 repo.ui.status(_("updating working directory\n"))
291 if update is not True:
320 if update is not True:
292 checkout = update
321 checkout = update
293 for test in (checkout, 'default', 'tip'):
322 for test in (checkout, 'default', 'tip'):
294 if test is None:
323 if test is None:
295 continue
324 continue
296 try:
325 try:
297 uprev = repo.lookup(test)
326 uprev = repo.lookup(test)
298 break
327 break
299 except error.RepoLookupError:
328 except error.RepoLookupError:
300 continue
329 continue
301 _update(repo, uprev)
330 _update(repo, uprev)
302
331
303 def copystore(ui, srcrepo, destpath):
332 def copystore(ui, srcrepo, destpath):
304 '''copy files from store of srcrepo in destpath
333 '''copy files from store of srcrepo in destpath
305
334
306 returns destlock
335 returns destlock
307 '''
336 '''
308 destlock = None
337 destlock = None
309 try:
338 try:
310 hardlink = None
339 hardlink = None
311 num = 0
340 num = 0
312 closetopic = [None]
341 closetopic = [None]
313 def prog(topic, pos):
342 def prog(topic, pos):
314 if pos is None:
343 if pos is None:
315 closetopic[0] = topic
344 closetopic[0] = topic
316 else:
345 else:
317 ui.progress(topic, pos + num)
346 ui.progress(topic, pos + num)
318 srcpublishing = srcrepo.publishing()
347 srcpublishing = srcrepo.publishing()
319 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
348 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
320 dstvfs = vfsmod.vfs(destpath)
349 dstvfs = vfsmod.vfs(destpath)
321 for f in srcrepo.store.copylist():
350 for f in srcrepo.store.copylist():
322 if srcpublishing and f.endswith('phaseroots'):
351 if srcpublishing and f.endswith('phaseroots'):
323 continue
352 continue
324 dstbase = os.path.dirname(f)
353 dstbase = os.path.dirname(f)
325 if dstbase and not dstvfs.exists(dstbase):
354 if dstbase and not dstvfs.exists(dstbase):
326 dstvfs.mkdir(dstbase)
355 dstvfs.mkdir(dstbase)
327 if srcvfs.exists(f):
356 if srcvfs.exists(f):
328 if f.endswith('data'):
357 if f.endswith('data'):
329 # 'dstbase' may be empty (e.g. revlog format 0)
358 # 'dstbase' may be empty (e.g. revlog format 0)
330 lockfile = os.path.join(dstbase, "lock")
359 lockfile = os.path.join(dstbase, "lock")
331 # lock to avoid premature writing to the target
360 # lock to avoid premature writing to the target
332 destlock = lock.lock(dstvfs, lockfile)
361 destlock = lock.lock(dstvfs, lockfile)
333 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
362 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
334 hardlink, progress=prog)
363 hardlink, progress=prog)
335 num += n
364 num += n
336 if hardlink:
365 if hardlink:
337 ui.debug("linked %d files\n" % num)
366 ui.debug("linked %d files\n" % num)
338 if closetopic[0]:
367 if closetopic[0]:
339 ui.progress(closetopic[0], None)
368 ui.progress(closetopic[0], None)
340 else:
369 else:
341 ui.debug("copied %d files\n" % num)
370 ui.debug("copied %d files\n" % num)
342 if closetopic[0]:
371 if closetopic[0]:
343 ui.progress(closetopic[0], None)
372 ui.progress(closetopic[0], None)
344 return destlock
373 return destlock
345 except: # re-raises
374 except: # re-raises
346 release(destlock)
375 release(destlock)
347 raise
376 raise
348
377
349 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
378 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
350 rev=None, update=True, stream=False):
379 rev=None, update=True, stream=False):
351 """Perform a clone using a shared repo.
380 """Perform a clone using a shared repo.
352
381
353 The store for the repository will be located at <sharepath>/.hg. The
382 The store for the repository will be located at <sharepath>/.hg. The
354 specified revisions will be cloned or pulled from "source". A shared repo
383 specified revisions will be cloned or pulled from "source". A shared repo
355 will be created at "dest" and a working copy will be created if "update" is
384 will be created at "dest" and a working copy will be created if "update" is
356 True.
385 True.
357 """
386 """
358 revs = None
387 revs = None
359 if rev:
388 if rev:
360 if not srcpeer.capable('lookup'):
389 if not srcpeer.capable('lookup'):
361 raise error.Abort(_("src repository does not support "
390 raise error.Abort(_("src repository does not support "
362 "revision lookup and so doesn't "
391 "revision lookup and so doesn't "
363 "support clone by revision"))
392 "support clone by revision"))
364 revs = [srcpeer.lookup(r) for r in rev]
393 revs = [srcpeer.lookup(r) for r in rev]
365
394
366 # Obtain a lock before checking for or cloning the pooled repo otherwise
395 # Obtain a lock before checking for or cloning the pooled repo otherwise
367 # 2 clients may race creating or populating it.
396 # 2 clients may race creating or populating it.
368 pooldir = os.path.dirname(sharepath)
397 pooldir = os.path.dirname(sharepath)
369 # lock class requires the directory to exist.
398 # lock class requires the directory to exist.
370 try:
399 try:
371 util.makedir(pooldir, False)
400 util.makedir(pooldir, False)
372 except OSError as e:
401 except OSError as e:
373 if e.errno != errno.EEXIST:
402 if e.errno != errno.EEXIST:
374 raise
403 raise
375
404
376 poolvfs = vfsmod.vfs(pooldir)
405 poolvfs = vfsmod.vfs(pooldir)
377 basename = os.path.basename(sharepath)
406 basename = os.path.basename(sharepath)
378
407
379 with lock.lock(poolvfs, '%s.lock' % basename):
408 with lock.lock(poolvfs, '%s.lock' % basename):
380 if os.path.exists(sharepath):
409 if os.path.exists(sharepath):
381 ui.status(_('(sharing from existing pooled repository %s)\n') %
410 ui.status(_('(sharing from existing pooled repository %s)\n') %
382 basename)
411 basename)
383 else:
412 else:
384 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
413 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
385 # Always use pull mode because hardlinks in share mode don't work
414 # Always use pull mode because hardlinks in share mode don't work
386 # well. Never update because working copies aren't necessary in
415 # well. Never update because working copies aren't necessary in
387 # share mode.
416 # share mode.
388 clone(ui, peeropts, source, dest=sharepath, pull=True,
417 clone(ui, peeropts, source, dest=sharepath, pull=True,
389 rev=rev, update=False, stream=stream)
418 rev=rev, update=False, stream=stream)
390
419
391 # Resolve the value to put in [paths] section for the source.
420 # Resolve the value to put in [paths] section for the source.
392 if islocal(source):
421 if islocal(source):
393 defaultpath = os.path.abspath(util.urllocalpath(source))
422 defaultpath = os.path.abspath(util.urllocalpath(source))
394 else:
423 else:
395 defaultpath = source
424 defaultpath = source
396
425
397 sharerepo = repository(ui, path=sharepath)
426 sharerepo = repository(ui, path=sharepath)
398 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
427 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
399 defaultpath=defaultpath)
428 defaultpath=defaultpath)
400
429
401 # We need to perform a pull against the dest repo to fetch bookmarks
430 # We need to perform a pull against the dest repo to fetch bookmarks
402 # and other non-store data that isn't shared by default. In the case of
431 # and other non-store data that isn't shared by default. In the case of
403 # non-existing shared repo, this means we pull from the remote twice. This
432 # non-existing shared repo, this means we pull from the remote twice. This
404 # is a bit weird. But at the time it was implemented, there wasn't an easy
433 # is a bit weird. But at the time it was implemented, there wasn't an easy
405 # way to pull just non-changegroup data.
434 # way to pull just non-changegroup data.
406 destrepo = repository(ui, path=dest)
435 destrepo = repository(ui, path=dest)
407 exchange.pull(destrepo, srcpeer, heads=revs)
436 exchange.pull(destrepo, srcpeer, heads=revs)
408
437
409 _postshareupdate(destrepo, update)
438 _postshareupdate(destrepo, update)
410
439
411 return srcpeer, peer(ui, peeropts, dest)
440 return srcpeer, peer(ui, peeropts, dest)
412
441
413 # Recomputing branch cache might be slow on big repos,
442 # Recomputing branch cache might be slow on big repos,
414 # so just copy it
443 # so just copy it
415 def _copycache(srcrepo, dstcachedir, fname):
444 def _copycache(srcrepo, dstcachedir, fname):
416 """copy a cache from srcrepo to destcachedir (if it exists)"""
445 """copy a cache from srcrepo to destcachedir (if it exists)"""
417 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
446 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
418 dstbranchcache = os.path.join(dstcachedir, fname)
447 dstbranchcache = os.path.join(dstcachedir, fname)
419 if os.path.exists(srcbranchcache):
448 if os.path.exists(srcbranchcache):
420 if not os.path.exists(dstcachedir):
449 if not os.path.exists(dstcachedir):
421 os.mkdir(dstcachedir)
450 os.mkdir(dstcachedir)
422 util.copyfile(srcbranchcache, dstbranchcache)
451 util.copyfile(srcbranchcache, dstbranchcache)
423
452
424 def _cachetocopy(srcrepo):
453 def _cachetocopy(srcrepo):
425 """return the list of cache file valuable to copy during a clone"""
454 """return the list of cache file valuable to copy during a clone"""
426 # In local clones we're copying all nodes, not just served
455 # In local clones we're copying all nodes, not just served
427 # ones. Therefore copy all branch caches over.
456 # ones. Therefore copy all branch caches over.
428 cachefiles = ['branch2']
457 cachefiles = ['branch2']
429 cachefiles += ['branch2-%s' % f for f in repoview.filtertable]
458 cachefiles += ['branch2-%s' % f for f in repoview.filtertable]
430 cachefiles += ['rbc-names-v1', 'rbc-revs-v1']
459 cachefiles += ['rbc-names-v1', 'rbc-revs-v1']
431 cachefiles += ['tags2']
460 cachefiles += ['tags2']
432 cachefiles += ['tags2-%s' % f for f in repoview.filtertable]
461 cachefiles += ['tags2-%s' % f for f in repoview.filtertable]
433 cachefiles += ['hgtagsfnodes1']
462 cachefiles += ['hgtagsfnodes1']
434 return cachefiles
463 return cachefiles
435
464
436 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
465 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
437 update=True, stream=False, branch=None, shareopts=None):
466 update=True, stream=False, branch=None, shareopts=None):
438 """Make a copy of an existing repository.
467 """Make a copy of an existing repository.
439
468
440 Create a copy of an existing repository in a new directory. The
469 Create a copy of an existing repository in a new directory. The
441 source and destination are URLs, as passed to the repository
470 source and destination are URLs, as passed to the repository
442 function. Returns a pair of repository peers, the source and
471 function. Returns a pair of repository peers, the source and
443 newly created destination.
472 newly created destination.
444
473
445 The location of the source is added to the new repository's
474 The location of the source is added to the new repository's
446 .hg/hgrc file, as the default to be used for future pulls and
475 .hg/hgrc file, as the default to be used for future pulls and
447 pushes.
476 pushes.
448
477
449 If an exception is raised, the partly cloned/updated destination
478 If an exception is raised, the partly cloned/updated destination
450 repository will be deleted.
479 repository will be deleted.
451
480
452 Arguments:
481 Arguments:
453
482
454 source: repository object or URL
483 source: repository object or URL
455
484
456 dest: URL of destination repository to create (defaults to base
485 dest: URL of destination repository to create (defaults to base
457 name of source repository)
486 name of source repository)
458
487
459 pull: always pull from source repository, even in local case or if the
488 pull: always pull from source repository, even in local case or if the
460 server prefers streaming
489 server prefers streaming
461
490
462 stream: stream raw data uncompressed from repository (fast over
491 stream: stream raw data uncompressed from repository (fast over
463 LAN, slow over WAN)
492 LAN, slow over WAN)
464
493
465 rev: revision to clone up to (implies pull=True)
494 rev: revision to clone up to (implies pull=True)
466
495
467 update: update working directory after clone completes, if
496 update: update working directory after clone completes, if
468 destination is local repository (True means update to default rev,
497 destination is local repository (True means update to default rev,
469 anything else is treated as a revision)
498 anything else is treated as a revision)
470
499
471 branch: branches to clone
500 branch: branches to clone
472
501
473 shareopts: dict of options to control auto sharing behavior. The "pool" key
502 shareopts: dict of options to control auto sharing behavior. The "pool" key
474 activates auto sharing mode and defines the directory for stores. The
503 activates auto sharing mode and defines the directory for stores. The
475 "mode" key determines how to construct the directory name of the shared
504 "mode" key determines how to construct the directory name of the shared
476 repository. "identity" means the name is derived from the node of the first
505 repository. "identity" means the name is derived from the node of the first
477 changeset in the repository. "remote" means the name is derived from the
506 changeset in the repository. "remote" means the name is derived from the
478 remote's path/URL. Defaults to "identity."
507 remote's path/URL. Defaults to "identity."
479 """
508 """
480
509
481 if isinstance(source, bytes):
510 if isinstance(source, bytes):
482 origsource = ui.expandpath(source)
511 origsource = ui.expandpath(source)
483 source, branch = parseurl(origsource, branch)
512 source, branch = parseurl(origsource, branch)
484 srcpeer = peer(ui, peeropts, source)
513 srcpeer = peer(ui, peeropts, source)
485 else:
514 else:
486 srcpeer = source.peer() # in case we were called with a localrepo
515 srcpeer = source.peer() # in case we were called with a localrepo
487 branch = (None, branch or [])
516 branch = (None, branch or [])
488 origsource = source = srcpeer.url()
517 origsource = source = srcpeer.url()
489 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
518 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
490
519
491 if dest is None:
520 if dest is None:
492 dest = defaultdest(source)
521 dest = defaultdest(source)
493 if dest:
522 if dest:
494 ui.status(_("destination directory: %s\n") % dest)
523 ui.status(_("destination directory: %s\n") % dest)
495 else:
524 else:
496 dest = ui.expandpath(dest)
525 dest = ui.expandpath(dest)
497
526
498 dest = util.urllocalpath(dest)
527 dest = util.urllocalpath(dest)
499 source = util.urllocalpath(source)
528 source = util.urllocalpath(source)
500
529
501 if not dest:
530 if not dest:
502 raise error.Abort(_("empty destination path is not valid"))
531 raise error.Abort(_("empty destination path is not valid"))
503
532
504 destvfs = vfsmod.vfs(dest, expandpath=True)
533 destvfs = vfsmod.vfs(dest, expandpath=True)
505 if destvfs.lexists():
534 if destvfs.lexists():
506 if not destvfs.isdir():
535 if not destvfs.isdir():
507 raise error.Abort(_("destination '%s' already exists") % dest)
536 raise error.Abort(_("destination '%s' already exists") % dest)
508 elif destvfs.listdir():
537 elif destvfs.listdir():
509 raise error.Abort(_("destination '%s' is not empty") % dest)
538 raise error.Abort(_("destination '%s' is not empty") % dest)
510
539
511 shareopts = shareopts or {}
540 shareopts = shareopts or {}
512 sharepool = shareopts.get('pool')
541 sharepool = shareopts.get('pool')
513 sharenamemode = shareopts.get('mode')
542 sharenamemode = shareopts.get('mode')
514 if sharepool and islocal(dest):
543 if sharepool and islocal(dest):
515 sharepath = None
544 sharepath = None
516 if sharenamemode == 'identity':
545 if sharenamemode == 'identity':
517 # Resolve the name from the initial changeset in the remote
546 # Resolve the name from the initial changeset in the remote
518 # repository. This returns nullid when the remote is empty. It
547 # repository. This returns nullid when the remote is empty. It
519 # raises RepoLookupError if revision 0 is filtered or otherwise
548 # raises RepoLookupError if revision 0 is filtered or otherwise
520 # not available. If we fail to resolve, sharing is not enabled.
549 # not available. If we fail to resolve, sharing is not enabled.
521 try:
550 try:
522 rootnode = srcpeer.lookup('0')
551 rootnode = srcpeer.lookup('0')
523 if rootnode != node.nullid:
552 if rootnode != node.nullid:
524 sharepath = os.path.join(sharepool, node.hex(rootnode))
553 sharepath = os.path.join(sharepool, node.hex(rootnode))
525 else:
554 else:
526 ui.status(_('(not using pooled storage: '
555 ui.status(_('(not using pooled storage: '
527 'remote appears to be empty)\n'))
556 'remote appears to be empty)\n'))
528 except error.RepoLookupError:
557 except error.RepoLookupError:
529 ui.status(_('(not using pooled storage: '
558 ui.status(_('(not using pooled storage: '
530 'unable to resolve identity of remote)\n'))
559 'unable to resolve identity of remote)\n'))
531 elif sharenamemode == 'remote':
560 elif sharenamemode == 'remote':
532 sharepath = os.path.join(
561 sharepath = os.path.join(
533 sharepool, hashlib.sha1(source).hexdigest())
562 sharepool, hashlib.sha1(source).hexdigest())
534 else:
563 else:
535 raise error.Abort(_('unknown share naming mode: %s') %
564 raise error.Abort(_('unknown share naming mode: %s') %
536 sharenamemode)
565 sharenamemode)
537
566
538 if sharepath:
567 if sharepath:
539 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
568 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
540 dest, pull=pull, rev=rev, update=update,
569 dest, pull=pull, rev=rev, update=update,
541 stream=stream)
570 stream=stream)
542
571
543 srclock = destlock = cleandir = None
572 srclock = destlock = cleandir = None
544 srcrepo = srcpeer.local()
573 srcrepo = srcpeer.local()
545 try:
574 try:
546 abspath = origsource
575 abspath = origsource
547 if islocal(origsource):
576 if islocal(origsource):
548 abspath = os.path.abspath(util.urllocalpath(origsource))
577 abspath = os.path.abspath(util.urllocalpath(origsource))
549
578
550 if islocal(dest):
579 if islocal(dest):
551 cleandir = dest
580 cleandir = dest
552
581
553 copy = False
582 copy = False
554 if (srcrepo and srcrepo.cancopy() and islocal(dest)
583 if (srcrepo and srcrepo.cancopy() and islocal(dest)
555 and not phases.hassecret(srcrepo)):
584 and not phases.hassecret(srcrepo)):
556 copy = not pull and not rev
585 copy = not pull and not rev
557
586
558 if copy:
587 if copy:
559 try:
588 try:
560 # we use a lock here because if we race with commit, we
589 # we use a lock here because if we race with commit, we
561 # can end up with extra data in the cloned revlogs that's
590 # can end up with extra data in the cloned revlogs that's
562 # not pointed to by changesets, thus causing verify to
591 # not pointed to by changesets, thus causing verify to
563 # fail
592 # fail
564 srclock = srcrepo.lock(wait=False)
593 srclock = srcrepo.lock(wait=False)
565 except error.LockError:
594 except error.LockError:
566 copy = False
595 copy = False
567
596
568 if copy:
597 if copy:
569 srcrepo.hook('preoutgoing', throw=True, source='clone')
598 srcrepo.hook('preoutgoing', throw=True, source='clone')
570 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
599 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
571 if not os.path.exists(dest):
600 if not os.path.exists(dest):
572 os.mkdir(dest)
601 os.mkdir(dest)
573 else:
602 else:
574 # only clean up directories we create ourselves
603 # only clean up directories we create ourselves
575 cleandir = hgdir
604 cleandir = hgdir
576 try:
605 try:
577 destpath = hgdir
606 destpath = hgdir
578 util.makedir(destpath, notindexed=True)
607 util.makedir(destpath, notindexed=True)
579 except OSError as inst:
608 except OSError as inst:
580 if inst.errno == errno.EEXIST:
609 if inst.errno == errno.EEXIST:
581 cleandir = None
610 cleandir = None
582 raise error.Abort(_("destination '%s' already exists")
611 raise error.Abort(_("destination '%s' already exists")
583 % dest)
612 % dest)
584 raise
613 raise
585
614
586 destlock = copystore(ui, srcrepo, destpath)
615 destlock = copystore(ui, srcrepo, destpath)
587 # copy bookmarks over
616 # copy bookmarks over
588 srcbookmarks = srcrepo.vfs.join('bookmarks')
617 srcbookmarks = srcrepo.vfs.join('bookmarks')
589 dstbookmarks = os.path.join(destpath, 'bookmarks')
618 dstbookmarks = os.path.join(destpath, 'bookmarks')
590 if os.path.exists(srcbookmarks):
619 if os.path.exists(srcbookmarks):
591 util.copyfile(srcbookmarks, dstbookmarks)
620 util.copyfile(srcbookmarks, dstbookmarks)
592
621
593 dstcachedir = os.path.join(destpath, 'cache')
622 dstcachedir = os.path.join(destpath, 'cache')
594 for cache in _cachetocopy(srcrepo):
623 for cache in _cachetocopy(srcrepo):
595 _copycache(srcrepo, dstcachedir, cache)
624 _copycache(srcrepo, dstcachedir, cache)
596
625
597 # we need to re-init the repo after manually copying the data
626 # we need to re-init the repo after manually copying the data
598 # into it
627 # into it
599 destpeer = peer(srcrepo, peeropts, dest)
628 destpeer = peer(srcrepo, peeropts, dest)
600 srcrepo.hook('outgoing', source='clone',
629 srcrepo.hook('outgoing', source='clone',
601 node=node.hex(node.nullid))
630 node=node.hex(node.nullid))
602 else:
631 else:
603 try:
632 try:
604 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
633 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
605 # only pass ui when no srcrepo
634 # only pass ui when no srcrepo
606 except OSError as inst:
635 except OSError as inst:
607 if inst.errno == errno.EEXIST:
636 if inst.errno == errno.EEXIST:
608 cleandir = None
637 cleandir = None
609 raise error.Abort(_("destination '%s' already exists")
638 raise error.Abort(_("destination '%s' already exists")
610 % dest)
639 % dest)
611 raise
640 raise
612
641
613 revs = None
642 revs = None
614 if rev:
643 if rev:
615 if not srcpeer.capable('lookup'):
644 if not srcpeer.capable('lookup'):
616 raise error.Abort(_("src repository does not support "
645 raise error.Abort(_("src repository does not support "
617 "revision lookup and so doesn't "
646 "revision lookup and so doesn't "
618 "support clone by revision"))
647 "support clone by revision"))
619 revs = [srcpeer.lookup(r) for r in rev]
648 revs = [srcpeer.lookup(r) for r in rev]
620 checkout = revs[0]
649 checkout = revs[0]
621 local = destpeer.local()
650 local = destpeer.local()
622 if local:
651 if local:
623 if not stream:
652 if not stream:
624 if pull:
653 if pull:
625 stream = False
654 stream = False
626 else:
655 else:
627 stream = None
656 stream = None
628 # internal config: ui.quietbookmarkmove
657 # internal config: ui.quietbookmarkmove
629 overrides = {('ui', 'quietbookmarkmove'): True}
658 overrides = {('ui', 'quietbookmarkmove'): True}
630 with local.ui.configoverride(overrides, 'clone'):
659 with local.ui.configoverride(overrides, 'clone'):
631 exchange.pull(local, srcpeer, revs,
660 exchange.pull(local, srcpeer, revs,
632 streamclonerequested=stream)
661 streamclonerequested=stream)
633 elif srcrepo:
662 elif srcrepo:
634 exchange.push(srcrepo, destpeer, revs=revs,
663 exchange.push(srcrepo, destpeer, revs=revs,
635 bookmarks=srcrepo._bookmarks.keys())
664 bookmarks=srcrepo._bookmarks.keys())
636 else:
665 else:
637 raise error.Abort(_("clone from remote to remote not supported")
666 raise error.Abort(_("clone from remote to remote not supported")
638 )
667 )
639
668
640 cleandir = None
669 cleandir = None
641
670
642 destrepo = destpeer.local()
671 destrepo = destpeer.local()
643 if destrepo:
672 if destrepo:
644 template = uimod.samplehgrcs['cloned']
673 template = uimod.samplehgrcs['cloned']
645 fp = destrepo.vfs("hgrc", "wb")
674 fp = destrepo.vfs("hgrc", "wb")
646 u = util.url(abspath)
675 u = util.url(abspath)
647 u.passwd = None
676 u.passwd = None
648 defaulturl = bytes(u)
677 defaulturl = bytes(u)
649 fp.write(util.tonativeeol(template % defaulturl))
678 fp.write(util.tonativeeol(template % defaulturl))
650 fp.close()
679 fp.close()
651
680
652 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
681 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
653
682
654 if update:
683 if update:
655 if update is not True:
684 if update is not True:
656 checkout = srcpeer.lookup(update)
685 checkout = srcpeer.lookup(update)
657 uprev = None
686 uprev = None
658 status = None
687 status = None
659 if checkout is not None:
688 if checkout is not None:
660 try:
689 try:
661 uprev = destrepo.lookup(checkout)
690 uprev = destrepo.lookup(checkout)
662 except error.RepoLookupError:
691 except error.RepoLookupError:
663 if update is not True:
692 if update is not True:
664 try:
693 try:
665 uprev = destrepo.lookup(update)
694 uprev = destrepo.lookup(update)
666 except error.RepoLookupError:
695 except error.RepoLookupError:
667 pass
696 pass
668 if uprev is None:
697 if uprev is None:
669 try:
698 try:
670 uprev = destrepo._bookmarks['@']
699 uprev = destrepo._bookmarks['@']
671 update = '@'
700 update = '@'
672 bn = destrepo[uprev].branch()
701 bn = destrepo[uprev].branch()
673 if bn == 'default':
702 if bn == 'default':
674 status = _("updating to bookmark @\n")
703 status = _("updating to bookmark @\n")
675 else:
704 else:
676 status = (_("updating to bookmark @ on branch %s\n")
705 status = (_("updating to bookmark @ on branch %s\n")
677 % bn)
706 % bn)
678 except KeyError:
707 except KeyError:
679 try:
708 try:
680 uprev = destrepo.branchtip('default')
709 uprev = destrepo.branchtip('default')
681 except error.RepoLookupError:
710 except error.RepoLookupError:
682 uprev = destrepo.lookup('tip')
711 uprev = destrepo.lookup('tip')
683 if not status:
712 if not status:
684 bn = destrepo[uprev].branch()
713 bn = destrepo[uprev].branch()
685 status = _("updating to branch %s\n") % bn
714 status = _("updating to branch %s\n") % bn
686 destrepo.ui.status(status)
715 destrepo.ui.status(status)
687 _update(destrepo, uprev)
716 _update(destrepo, uprev)
688 if update in destrepo._bookmarks:
717 if update in destrepo._bookmarks:
689 bookmarks.activate(destrepo, update)
718 bookmarks.activate(destrepo, update)
690 finally:
719 finally:
691 release(srclock, destlock)
720 release(srclock, destlock)
692 if cleandir is not None:
721 if cleandir is not None:
693 shutil.rmtree(cleandir, True)
722 shutil.rmtree(cleandir, True)
694 if srcpeer is not None:
723 if srcpeer is not None:
695 srcpeer.close()
724 srcpeer.close()
696 return srcpeer, destpeer
725 return srcpeer, destpeer
697
726
698 def _showstats(repo, stats, quietempty=False):
727 def _showstats(repo, stats, quietempty=False):
699 if quietempty and not any(stats):
728 if quietempty and not any(stats):
700 return
729 return
701 repo.ui.status(_("%d files updated, %d files merged, "
730 repo.ui.status(_("%d files updated, %d files merged, "
702 "%d files removed, %d files unresolved\n") % stats)
731 "%d files removed, %d files unresolved\n") % stats)
703
732
704 def updaterepo(repo, node, overwrite, updatecheck=None):
733 def updaterepo(repo, node, overwrite, updatecheck=None):
705 """Update the working directory to node.
734 """Update the working directory to node.
706
735
707 When overwrite is set, changes are clobbered, merged else
736 When overwrite is set, changes are clobbered, merged else
708
737
709 returns stats (see pydoc mercurial.merge.applyupdates)"""
738 returns stats (see pydoc mercurial.merge.applyupdates)"""
710 return mergemod.update(repo, node, False, overwrite,
739 return mergemod.update(repo, node, False, overwrite,
711 labels=['working copy', 'destination'],
740 labels=['working copy', 'destination'],
712 updatecheck=updatecheck)
741 updatecheck=updatecheck)
713
742
714 def update(repo, node, quietempty=False, updatecheck=None):
743 def update(repo, node, quietempty=False, updatecheck=None):
715 """update the working directory to node"""
744 """update the working directory to node"""
716 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
745 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
717 _showstats(repo, stats, quietempty)
746 _showstats(repo, stats, quietempty)
718 if stats[3]:
747 if stats[3]:
719 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
748 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
720 return stats[3] > 0
749 return stats[3] > 0
721
750
722 # naming conflict in clone()
751 # naming conflict in clone()
723 _update = update
752 _update = update
724
753
725 def clean(repo, node, show_stats=True, quietempty=False):
754 def clean(repo, node, show_stats=True, quietempty=False):
726 """forcibly switch the working directory to node, clobbering changes"""
755 """forcibly switch the working directory to node, clobbering changes"""
727 stats = updaterepo(repo, node, True)
756 stats = updaterepo(repo, node, True)
728 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
757 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
729 if show_stats:
758 if show_stats:
730 _showstats(repo, stats, quietempty)
759 _showstats(repo, stats, quietempty)
731 return stats[3] > 0
760 return stats[3] > 0
732
761
733 # naming conflict in updatetotally()
762 # naming conflict in updatetotally()
734 _clean = clean
763 _clean = clean
735
764
736 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
765 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
737 """Update the working directory with extra care for non-file components
766 """Update the working directory with extra care for non-file components
738
767
739 This takes care of non-file components below:
768 This takes care of non-file components below:
740
769
741 :bookmark: might be advanced or (in)activated
770 :bookmark: might be advanced or (in)activated
742
771
743 This takes arguments below:
772 This takes arguments below:
744
773
745 :checkout: to which revision the working directory is updated
774 :checkout: to which revision the working directory is updated
746 :brev: a name, which might be a bookmark to be activated after updating
775 :brev: a name, which might be a bookmark to be activated after updating
747 :clean: whether changes in the working directory can be discarded
776 :clean: whether changes in the working directory can be discarded
748 :updatecheck: how to deal with a dirty working directory
777 :updatecheck: how to deal with a dirty working directory
749
778
750 Valid values for updatecheck are (None => linear):
779 Valid values for updatecheck are (None => linear):
751
780
752 * abort: abort if the working directory is dirty
781 * abort: abort if the working directory is dirty
753 * none: don't check (merge working directory changes into destination)
782 * none: don't check (merge working directory changes into destination)
754 * linear: check that update is linear before merging working directory
783 * linear: check that update is linear before merging working directory
755 changes into destination
784 changes into destination
756 * noconflict: check that the update does not result in file merges
785 * noconflict: check that the update does not result in file merges
757
786
758 This returns whether conflict is detected at updating or not.
787 This returns whether conflict is detected at updating or not.
759 """
788 """
760 if updatecheck is None:
789 if updatecheck is None:
761 updatecheck = ui.config('commands', 'update.check')
790 updatecheck = ui.config('commands', 'update.check')
762 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
791 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
763 # If not configured, or invalid value configured
792 # If not configured, or invalid value configured
764 updatecheck = 'linear'
793 updatecheck = 'linear'
765 with repo.wlock():
794 with repo.wlock():
766 movemarkfrom = None
795 movemarkfrom = None
767 warndest = False
796 warndest = False
768 if checkout is None:
797 if checkout is None:
769 updata = destutil.destupdate(repo, clean=clean)
798 updata = destutil.destupdate(repo, clean=clean)
770 checkout, movemarkfrom, brev = updata
799 checkout, movemarkfrom, brev = updata
771 warndest = True
800 warndest = True
772
801
773 if clean:
802 if clean:
774 ret = _clean(repo, checkout)
803 ret = _clean(repo, checkout)
775 else:
804 else:
776 if updatecheck == 'abort':
805 if updatecheck == 'abort':
777 cmdutil.bailifchanged(repo, merge=False)
806 cmdutil.bailifchanged(repo, merge=False)
778 updatecheck = 'none'
807 updatecheck = 'none'
779 ret = _update(repo, checkout, updatecheck=updatecheck)
808 ret = _update(repo, checkout, updatecheck=updatecheck)
780
809
781 if not ret and movemarkfrom:
810 if not ret and movemarkfrom:
782 if movemarkfrom == repo['.'].node():
811 if movemarkfrom == repo['.'].node():
783 pass # no-op update
812 pass # no-op update
784 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
813 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
785 b = ui.label(repo._activebookmark, 'bookmarks.active')
814 b = ui.label(repo._activebookmark, 'bookmarks.active')
786 ui.status(_("updating bookmark %s\n") % b)
815 ui.status(_("updating bookmark %s\n") % b)
787 else:
816 else:
788 # this can happen with a non-linear update
817 # this can happen with a non-linear update
789 b = ui.label(repo._activebookmark, 'bookmarks')
818 b = ui.label(repo._activebookmark, 'bookmarks')
790 ui.status(_("(leaving bookmark %s)\n") % b)
819 ui.status(_("(leaving bookmark %s)\n") % b)
791 bookmarks.deactivate(repo)
820 bookmarks.deactivate(repo)
792 elif brev in repo._bookmarks:
821 elif brev in repo._bookmarks:
793 if brev != repo._activebookmark:
822 if brev != repo._activebookmark:
794 b = ui.label(brev, 'bookmarks.active')
823 b = ui.label(brev, 'bookmarks.active')
795 ui.status(_("(activating bookmark %s)\n") % b)
824 ui.status(_("(activating bookmark %s)\n") % b)
796 bookmarks.activate(repo, brev)
825 bookmarks.activate(repo, brev)
797 elif brev:
826 elif brev:
798 if repo._activebookmark:
827 if repo._activebookmark:
799 b = ui.label(repo._activebookmark, 'bookmarks')
828 b = ui.label(repo._activebookmark, 'bookmarks')
800 ui.status(_("(leaving bookmark %s)\n") % b)
829 ui.status(_("(leaving bookmark %s)\n") % b)
801 bookmarks.deactivate(repo)
830 bookmarks.deactivate(repo)
802
831
803 if warndest:
832 if warndest:
804 destutil.statusotherdests(ui, repo)
833 destutil.statusotherdests(ui, repo)
805
834
806 return ret
835 return ret
807
836
808 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None):
837 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None):
809 """Branch merge with node, resolving changes. Return true if any
838 """Branch merge with node, resolving changes. Return true if any
810 unresolved conflicts."""
839 unresolved conflicts."""
811 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
840 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
812 labels=labels)
841 labels=labels)
813 _showstats(repo, stats)
842 _showstats(repo, stats)
814 if stats[3]:
843 if stats[3]:
815 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
844 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
816 "or 'hg update -C .' to abandon\n"))
845 "or 'hg update -C .' to abandon\n"))
817 elif remind:
846 elif remind:
818 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
847 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
819 return stats[3] > 0
848 return stats[3] > 0
820
849
821 def _incoming(displaychlist, subreporecurse, ui, repo, source,
850 def _incoming(displaychlist, subreporecurse, ui, repo, source,
822 opts, buffered=False):
851 opts, buffered=False):
823 """
852 """
824 Helper for incoming / gincoming.
853 Helper for incoming / gincoming.
825 displaychlist gets called with
854 displaychlist gets called with
826 (remoterepo, incomingchangesetlist, displayer) parameters,
855 (remoterepo, incomingchangesetlist, displayer) parameters,
827 and is supposed to contain only code that can't be unified.
856 and is supposed to contain only code that can't be unified.
828 """
857 """
829 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
858 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
830 other = peer(repo, opts, source)
859 other = peer(repo, opts, source)
831 ui.status(_('comparing with %s\n') % util.hidepassword(source))
860 ui.status(_('comparing with %s\n') % util.hidepassword(source))
832 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
861 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
833
862
834 if revs:
863 if revs:
835 revs = [other.lookup(rev) for rev in revs]
864 revs = [other.lookup(rev) for rev in revs]
836 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
865 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
837 revs, opts["bundle"], opts["force"])
866 revs, opts["bundle"], opts["force"])
838 try:
867 try:
839 if not chlist:
868 if not chlist:
840 ui.status(_("no changes found\n"))
869 ui.status(_("no changes found\n"))
841 return subreporecurse()
870 return subreporecurse()
842 ui.pager('incoming')
871 ui.pager('incoming')
843 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
872 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
844 displaychlist(other, chlist, displayer)
873 displaychlist(other, chlist, displayer)
845 displayer.close()
874 displayer.close()
846 finally:
875 finally:
847 cleanupfn()
876 cleanupfn()
848 subreporecurse()
877 subreporecurse()
849 return 0 # exit code is zero since we found incoming changes
878 return 0 # exit code is zero since we found incoming changes
850
879
851 def incoming(ui, repo, source, opts):
880 def incoming(ui, repo, source, opts):
852 def subreporecurse():
881 def subreporecurse():
853 ret = 1
882 ret = 1
854 if opts.get('subrepos'):
883 if opts.get('subrepos'):
855 ctx = repo[None]
884 ctx = repo[None]
856 for subpath in sorted(ctx.substate):
885 for subpath in sorted(ctx.substate):
857 sub = ctx.sub(subpath)
886 sub = ctx.sub(subpath)
858 ret = min(ret, sub.incoming(ui, source, opts))
887 ret = min(ret, sub.incoming(ui, source, opts))
859 return ret
888 return ret
860
889
861 def display(other, chlist, displayer):
890 def display(other, chlist, displayer):
862 limit = cmdutil.loglimit(opts)
891 limit = cmdutil.loglimit(opts)
863 if opts.get('newest_first'):
892 if opts.get('newest_first'):
864 chlist.reverse()
893 chlist.reverse()
865 count = 0
894 count = 0
866 for n in chlist:
895 for n in chlist:
867 if limit is not None and count >= limit:
896 if limit is not None and count >= limit:
868 break
897 break
869 parents = [p for p in other.changelog.parents(n) if p != nullid]
898 parents = [p for p in other.changelog.parents(n) if p != nullid]
870 if opts.get('no_merges') and len(parents) == 2:
899 if opts.get('no_merges') and len(parents) == 2:
871 continue
900 continue
872 count += 1
901 count += 1
873 displayer.show(other[n])
902 displayer.show(other[n])
874 return _incoming(display, subreporecurse, ui, repo, source, opts)
903 return _incoming(display, subreporecurse, ui, repo, source, opts)
875
904
876 def _outgoing(ui, repo, dest, opts):
905 def _outgoing(ui, repo, dest, opts):
877 dest = ui.expandpath(dest or 'default-push', dest or 'default')
906 dest = ui.expandpath(dest or 'default-push', dest or 'default')
878 dest, branches = parseurl(dest, opts.get('branch'))
907 dest, branches = parseurl(dest, opts.get('branch'))
879 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
908 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
880 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
909 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
881 if revs:
910 if revs:
882 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
911 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
883
912
884 other = peer(repo, opts, dest)
913 other = peer(repo, opts, dest)
885 outgoing = discovery.findcommonoutgoing(repo, other, revs,
914 outgoing = discovery.findcommonoutgoing(repo, other, revs,
886 force=opts.get('force'))
915 force=opts.get('force'))
887 o = outgoing.missing
916 o = outgoing.missing
888 if not o:
917 if not o:
889 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
918 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
890 return o, other
919 return o, other
891
920
892 def outgoing(ui, repo, dest, opts):
921 def outgoing(ui, repo, dest, opts):
893 def recurse():
922 def recurse():
894 ret = 1
923 ret = 1
895 if opts.get('subrepos'):
924 if opts.get('subrepos'):
896 ctx = repo[None]
925 ctx = repo[None]
897 for subpath in sorted(ctx.substate):
926 for subpath in sorted(ctx.substate):
898 sub = ctx.sub(subpath)
927 sub = ctx.sub(subpath)
899 ret = min(ret, sub.outgoing(ui, dest, opts))
928 ret = min(ret, sub.outgoing(ui, dest, opts))
900 return ret
929 return ret
901
930
902 limit = cmdutil.loglimit(opts)
931 limit = cmdutil.loglimit(opts)
903 o, other = _outgoing(ui, repo, dest, opts)
932 o, other = _outgoing(ui, repo, dest, opts)
904 if not o:
933 if not o:
905 cmdutil.outgoinghooks(ui, repo, other, opts, o)
934 cmdutil.outgoinghooks(ui, repo, other, opts, o)
906 return recurse()
935 return recurse()
907
936
908 if opts.get('newest_first'):
937 if opts.get('newest_first'):
909 o.reverse()
938 o.reverse()
910 ui.pager('outgoing')
939 ui.pager('outgoing')
911 displayer = cmdutil.show_changeset(ui, repo, opts)
940 displayer = cmdutil.show_changeset(ui, repo, opts)
912 count = 0
941 count = 0
913 for n in o:
942 for n in o:
914 if limit is not None and count >= limit:
943 if limit is not None and count >= limit:
915 break
944 break
916 parents = [p for p in repo.changelog.parents(n) if p != nullid]
945 parents = [p for p in repo.changelog.parents(n) if p != nullid]
917 if opts.get('no_merges') and len(parents) == 2:
946 if opts.get('no_merges') and len(parents) == 2:
918 continue
947 continue
919 count += 1
948 count += 1
920 displayer.show(repo[n])
949 displayer.show(repo[n])
921 displayer.close()
950 displayer.close()
922 cmdutil.outgoinghooks(ui, repo, other, opts, o)
951 cmdutil.outgoinghooks(ui, repo, other, opts, o)
923 recurse()
952 recurse()
924 return 0 # exit code is zero since we found outgoing changes
953 return 0 # exit code is zero since we found outgoing changes
925
954
926 def verify(repo):
955 def verify(repo):
927 """verify the consistency of a repository"""
956 """verify the consistency of a repository"""
928 ret = verifymod.verify(repo)
957 ret = verifymod.verify(repo)
929
958
930 # Broken subrepo references in hidden csets don't seem worth worrying about,
959 # Broken subrepo references in hidden csets don't seem worth worrying about,
931 # since they can't be pushed/pulled, and --hidden can be used if they are a
960 # since they can't be pushed/pulled, and --hidden can be used if they are a
932 # concern.
961 # concern.
933
962
934 # pathto() is needed for -R case
963 # pathto() is needed for -R case
935 revs = repo.revs("filelog(%s)",
964 revs = repo.revs("filelog(%s)",
936 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
965 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
937
966
938 if revs:
967 if revs:
939 repo.ui.status(_('checking subrepo links\n'))
968 repo.ui.status(_('checking subrepo links\n'))
940 for rev in revs:
969 for rev in revs:
941 ctx = repo[rev]
970 ctx = repo[rev]
942 try:
971 try:
943 for subpath in ctx.substate:
972 for subpath in ctx.substate:
944 try:
973 try:
945 ret = (ctx.sub(subpath, allowcreate=False).verify()
974 ret = (ctx.sub(subpath, allowcreate=False).verify()
946 or ret)
975 or ret)
947 except error.RepoError as e:
976 except error.RepoError as e:
948 repo.ui.warn(('%s: %s\n') % (rev, e))
977 repo.ui.warn(('%s: %s\n') % (rev, e))
949 except Exception:
978 except Exception:
950 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
979 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
951 node.short(ctx.node()))
980 node.short(ctx.node()))
952
981
953 return ret
982 return ret
954
983
955 def remoteui(src, opts):
984 def remoteui(src, opts):
956 'build a remote ui from ui or repo and opts'
985 'build a remote ui from ui or repo and opts'
957 if util.safehasattr(src, 'baseui'): # looks like a repository
986 if util.safehasattr(src, 'baseui'): # looks like a repository
958 dst = src.baseui.copy() # drop repo-specific config
987 dst = src.baseui.copy() # drop repo-specific config
959 src = src.ui # copy target options from repo
988 src = src.ui # copy target options from repo
960 else: # assume it's a global ui object
989 else: # assume it's a global ui object
961 dst = src.copy() # keep all global options
990 dst = src.copy() # keep all global options
962
991
963 # copy ssh-specific options
992 # copy ssh-specific options
964 for o in 'ssh', 'remotecmd':
993 for o in 'ssh', 'remotecmd':
965 v = opts.get(o) or src.config('ui', o)
994 v = opts.get(o) or src.config('ui', o)
966 if v:
995 if v:
967 dst.setconfig("ui", o, v, 'copied')
996 dst.setconfig("ui", o, v, 'copied')
968
997
969 # copy bundle-specific options
998 # copy bundle-specific options
970 r = src.config('bundle', 'mainreporoot')
999 r = src.config('bundle', 'mainreporoot')
971 if r:
1000 if r:
972 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1001 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
973
1002
974 # copy selected local settings to the remote ui
1003 # copy selected local settings to the remote ui
975 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1004 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
976 for key, val in src.configitems(sect):
1005 for key, val in src.configitems(sect):
977 dst.setconfig(sect, key, val, 'copied')
1006 dst.setconfig(sect, key, val, 'copied')
978 v = src.config('web', 'cacerts')
1007 v = src.config('web', 'cacerts')
979 if v:
1008 if v:
980 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1009 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
981
1010
982 return dst
1011 return dst
983
1012
984 # Files of interest
1013 # Files of interest
985 # Used to check if the repository has changed looking at mtime and size of
1014 # Used to check if the repository has changed looking at mtime and size of
986 # these files.
1015 # these files.
987 foi = [('spath', '00changelog.i'),
1016 foi = [('spath', '00changelog.i'),
988 ('spath', 'phaseroots'), # ! phase can change content at the same size
1017 ('spath', 'phaseroots'), # ! phase can change content at the same size
989 ('spath', 'obsstore'),
1018 ('spath', 'obsstore'),
990 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1019 ('path', 'bookmarks'), # ! bookmark can change content at the same size
991 ]
1020 ]
992
1021
993 class cachedlocalrepo(object):
1022 class cachedlocalrepo(object):
994 """Holds a localrepository that can be cached and reused."""
1023 """Holds a localrepository that can be cached and reused."""
995
1024
996 def __init__(self, repo):
1025 def __init__(self, repo):
997 """Create a new cached repo from an existing repo.
1026 """Create a new cached repo from an existing repo.
998
1027
999 We assume the passed in repo was recently created. If the
1028 We assume the passed in repo was recently created. If the
1000 repo has changed between when it was created and when it was
1029 repo has changed between when it was created and when it was
1001 turned into a cache, it may not refresh properly.
1030 turned into a cache, it may not refresh properly.
1002 """
1031 """
1003 assert isinstance(repo, localrepo.localrepository)
1032 assert isinstance(repo, localrepo.localrepository)
1004 self._repo = repo
1033 self._repo = repo
1005 self._state, self.mtime = self._repostate()
1034 self._state, self.mtime = self._repostate()
1006 self._filtername = repo.filtername
1035 self._filtername = repo.filtername
1007
1036
1008 def fetch(self):
1037 def fetch(self):
1009 """Refresh (if necessary) and return a repository.
1038 """Refresh (if necessary) and return a repository.
1010
1039
1011 If the cached instance is out of date, it will be recreated
1040 If the cached instance is out of date, it will be recreated
1012 automatically and returned.
1041 automatically and returned.
1013
1042
1014 Returns a tuple of the repo and a boolean indicating whether a new
1043 Returns a tuple of the repo and a boolean indicating whether a new
1015 repo instance was created.
1044 repo instance was created.
1016 """
1045 """
1017 # We compare the mtimes and sizes of some well-known files to
1046 # We compare the mtimes and sizes of some well-known files to
1018 # determine if the repo changed. This is not precise, as mtimes
1047 # determine if the repo changed. This is not precise, as mtimes
1019 # are susceptible to clock skew and imprecise filesystems and
1048 # are susceptible to clock skew and imprecise filesystems and
1020 # file content can change while maintaining the same size.
1049 # file content can change while maintaining the same size.
1021
1050
1022 state, mtime = self._repostate()
1051 state, mtime = self._repostate()
1023 if state == self._state:
1052 if state == self._state:
1024 return self._repo, False
1053 return self._repo, False
1025
1054
1026 repo = repository(self._repo.baseui, self._repo.url())
1055 repo = repository(self._repo.baseui, self._repo.url())
1027 if self._filtername:
1056 if self._filtername:
1028 self._repo = repo.filtered(self._filtername)
1057 self._repo = repo.filtered(self._filtername)
1029 else:
1058 else:
1030 self._repo = repo.unfiltered()
1059 self._repo = repo.unfiltered()
1031 self._state = state
1060 self._state = state
1032 self.mtime = mtime
1061 self.mtime = mtime
1033
1062
1034 return self._repo, True
1063 return self._repo, True
1035
1064
1036 def _repostate(self):
1065 def _repostate(self):
1037 state = []
1066 state = []
1038 maxmtime = -1
1067 maxmtime = -1
1039 for attr, fname in foi:
1068 for attr, fname in foi:
1040 prefix = getattr(self._repo, attr)
1069 prefix = getattr(self._repo, attr)
1041 p = os.path.join(prefix, fname)
1070 p = os.path.join(prefix, fname)
1042 try:
1071 try:
1043 st = os.stat(p)
1072 st = os.stat(p)
1044 except OSError:
1073 except OSError:
1045 st = os.stat(prefix)
1074 st = os.stat(prefix)
1046 state.append((st.st_mtime, st.st_size))
1075 state.append((st.st_mtime, st.st_size))
1047 maxmtime = max(maxmtime, st.st_mtime)
1076 maxmtime = max(maxmtime, st.st_mtime)
1048
1077
1049 return tuple(state), maxmtime
1078 return tuple(state), maxmtime
1050
1079
1051 def copy(self):
1080 def copy(self):
1052 """Obtain a copy of this class instance.
1081 """Obtain a copy of this class instance.
1053
1082
1054 A new localrepository instance is obtained. The new instance should be
1083 A new localrepository instance is obtained. The new instance should be
1055 completely independent of the original.
1084 completely independent of the original.
1056 """
1085 """
1057 repo = repository(self._repo.baseui, self._repo.origroot)
1086 repo = repository(self._repo.baseui, self._repo.origroot)
1058 if self._filtername:
1087 if self._filtername:
1059 repo = repo.filtered(self._filtername)
1088 repo = repo.filtered(self._filtername)
1060 else:
1089 else:
1061 repo = repo.unfiltered()
1090 repo = repo.unfiltered()
1062 c = cachedlocalrepo(repo)
1091 c = cachedlocalrepo(repo)
1063 c._state = self._state
1092 c._state = self._state
1064 c.mtime = self.mtime
1093 c.mtime = self.mtime
1065 return c
1094 return c
General Comments 0
You need to be logged in to leave comments. Login now