##// END OF EJS Templates
share: add --relative flag to store a relative path to the source...
Dan Villiom Podlaski Christiansen -
r31133:23080c03 default
parent child Browse files
Show More
@@ -1,216 +1,220 b''
1 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
1 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 '''share a common history between several working directories
6 '''share a common history between several working directories
7
7
8 Automatic Pooled Storage for Clones
8 Automatic Pooled Storage for Clones
9 -----------------------------------
9 -----------------------------------
10
10
11 When this extension is active, :hg:`clone` can be configured to
11 When this extension is active, :hg:`clone` can be configured to
12 automatically share/pool storage across multiple clones. This
12 automatically share/pool storage across multiple clones. This
13 mode effectively converts :hg:`clone` to :hg:`clone` + :hg:`share`.
13 mode effectively converts :hg:`clone` to :hg:`clone` + :hg:`share`.
14 The benefit of using this mode is the automatic management of
14 The benefit of using this mode is the automatic management of
15 store paths and intelligent pooling of related repositories.
15 store paths and intelligent pooling of related repositories.
16
16
17 The following ``share.`` config options influence this feature:
17 The following ``share.`` config options influence this feature:
18
18
19 ``share.pool``
19 ``share.pool``
20 Filesystem path where shared repository data will be stored. When
20 Filesystem path where shared repository data will be stored. When
21 defined, :hg:`clone` will automatically use shared repository
21 defined, :hg:`clone` will automatically use shared repository
22 storage instead of creating a store inside each clone.
22 storage instead of creating a store inside each clone.
23
23
24 ``share.poolnaming``
24 ``share.poolnaming``
25 How directory names in ``share.pool`` are constructed.
25 How directory names in ``share.pool`` are constructed.
26
26
27 "identity" means the name is derived from the first changeset in the
27 "identity" means the name is derived from the first changeset in the
28 repository. In this mode, different remotes share storage if their
28 repository. In this mode, different remotes share storage if their
29 root/initial changeset is identical. In this mode, the local shared
29 root/initial changeset is identical. In this mode, the local shared
30 repository is an aggregate of all encountered remote repositories.
30 repository is an aggregate of all encountered remote repositories.
31
31
32 "remote" means the name is derived from the source repository's
32 "remote" means the name is derived from the source repository's
33 path or URL. In this mode, storage is only shared if the path or URL
33 path or URL. In this mode, storage is only shared if the path or URL
34 requested in the :hg:`clone` command matches exactly to a repository
34 requested in the :hg:`clone` command matches exactly to a repository
35 that was cloned before.
35 that was cloned before.
36
36
37 The default naming mode is "identity."
37 The default naming mode is "identity."
38 '''
38 '''
39
39
40 from __future__ import absolute_import
40 from __future__ import absolute_import
41
41
42 import errno
42 import errno
43 from mercurial.i18n import _
43 from mercurial.i18n import _
44 from mercurial import (
44 from mercurial import (
45 bookmarks,
45 bookmarks,
46 cmdutil,
46 cmdutil,
47 commands,
47 commands,
48 error,
48 error,
49 extensions,
49 extensions,
50 hg,
50 hg,
51 txnutil,
51 txnutil,
52 util,
52 util,
53 )
53 )
54
54
55 repository = hg.repository
55 repository = hg.repository
56 parseurl = hg.parseurl
56 parseurl = hg.parseurl
57
57
58 cmdtable = {}
58 cmdtable = {}
59 command = cmdutil.command(cmdtable)
59 command = cmdutil.command(cmdtable)
60 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
60 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
61 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
61 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
62 # be specifying the version(s) of Mercurial they are tested with, or
62 # be specifying the version(s) of Mercurial they are tested with, or
63 # leave the attribute unspecified.
63 # leave the attribute unspecified.
64 testedwith = 'ships-with-hg-core'
64 testedwith = 'ships-with-hg-core'
65
65
66 @command('share',
66 @command('share',
67 [('U', 'noupdate', None, _('do not create a working directory')),
67 [('U', 'noupdate', None, _('do not create a working directory')),
68 ('B', 'bookmarks', None, _('also share bookmarks'))],
68 ('B', 'bookmarks', None, _('also share bookmarks')),
69 ('', 'relative', None, _('point to source using a relative path '
70 '(EXPERIMENTAL)')),
71 ],
69 _('[-U] [-B] SOURCE [DEST]'),
72 _('[-U] [-B] SOURCE [DEST]'),
70 norepo=True)
73 norepo=True)
71 def share(ui, source, dest=None, noupdate=False, bookmarks=False):
74 def share(ui, source, dest=None, noupdate=False, bookmarks=False,
75 relative=False):
72 """create a new shared repository
76 """create a new shared repository
73
77
74 Initialize a new repository and working directory that shares its
78 Initialize a new repository and working directory that shares its
75 history (and optionally bookmarks) with another repository.
79 history (and optionally bookmarks) with another repository.
76
80
77 .. note::
81 .. note::
78
82
79 using rollback or extensions that destroy/modify history (mq,
83 using rollback or extensions that destroy/modify history (mq,
80 rebase, etc.) can cause considerable confusion with shared
84 rebase, etc.) can cause considerable confusion with shared
81 clones. In particular, if two shared clones are both updated to
85 clones. In particular, if two shared clones are both updated to
82 the same changeset, and one of them destroys that changeset
86 the same changeset, and one of them destroys that changeset
83 with rollback, the other clone will suddenly stop working: all
87 with rollback, the other clone will suddenly stop working: all
84 operations will fail with "abort: working directory has unknown
88 operations will fail with "abort: working directory has unknown
85 parent". The only known workaround is to use debugsetparents on
89 parent". The only known workaround is to use debugsetparents on
86 the broken clone to reset it to a changeset that still exists.
90 the broken clone to reset it to a changeset that still exists.
87 """
91 """
88
92
89 return hg.share(ui, source, dest=dest, update=not noupdate,
93 return hg.share(ui, source, dest=dest, update=not noupdate,
90 bookmarks=bookmarks)
94 bookmarks=bookmarks, relative=relative)
91
95
92 @command('unshare', [], '')
96 @command('unshare', [], '')
93 def unshare(ui, repo):
97 def unshare(ui, repo):
94 """convert a shared repository to a normal one
98 """convert a shared repository to a normal one
95
99
96 Copy the store data to the repo and remove the sharedpath data.
100 Copy the store data to the repo and remove the sharedpath data.
97 """
101 """
98
102
99 if not repo.shared():
103 if not repo.shared():
100 raise error.Abort(_("this is not a shared repo"))
104 raise error.Abort(_("this is not a shared repo"))
101
105
102 destlock = lock = None
106 destlock = lock = None
103 lock = repo.lock()
107 lock = repo.lock()
104 try:
108 try:
105 # we use locks here because if we race with commit, we
109 # we use locks here because if we race with commit, we
106 # can end up with extra data in the cloned revlogs that's
110 # can end up with extra data in the cloned revlogs that's
107 # not pointed to by changesets, thus causing verify to
111 # not pointed to by changesets, thus causing verify to
108 # fail
112 # fail
109
113
110 destlock = hg.copystore(ui, repo, repo.path)
114 destlock = hg.copystore(ui, repo, repo.path)
111
115
112 sharefile = repo.join('sharedpath')
116 sharefile = repo.join('sharedpath')
113 util.rename(sharefile, sharefile + '.old')
117 util.rename(sharefile, sharefile + '.old')
114
118
115 repo.requirements.discard('sharedpath')
119 repo.requirements.discard('sharedpath')
116 repo._writerequirements()
120 repo._writerequirements()
117 finally:
121 finally:
118 destlock and destlock.release()
122 destlock and destlock.release()
119 lock and lock.release()
123 lock and lock.release()
120
124
121 # update store, spath, svfs and sjoin of repo
125 # update store, spath, svfs and sjoin of repo
122 repo.unfiltered().__init__(repo.baseui, repo.root)
126 repo.unfiltered().__init__(repo.baseui, repo.root)
123
127
124 # Wrap clone command to pass auto share options.
128 # Wrap clone command to pass auto share options.
125 def clone(orig, ui, source, *args, **opts):
129 def clone(orig, ui, source, *args, **opts):
126 pool = ui.config('share', 'pool', None)
130 pool = ui.config('share', 'pool', None)
127 if pool:
131 if pool:
128 pool = util.expandpath(pool)
132 pool = util.expandpath(pool)
129
133
130 opts['shareopts'] = dict(
134 opts['shareopts'] = dict(
131 pool=pool,
135 pool=pool,
132 mode=ui.config('share', 'poolnaming', 'identity'),
136 mode=ui.config('share', 'poolnaming', 'identity'),
133 )
137 )
134
138
135 return orig(ui, source, *args, **opts)
139 return orig(ui, source, *args, **opts)
136
140
137 def extsetup(ui):
141 def extsetup(ui):
138 extensions.wrapfunction(bookmarks, '_getbkfile', getbkfile)
142 extensions.wrapfunction(bookmarks, '_getbkfile', getbkfile)
139 extensions.wrapfunction(bookmarks.bmstore, 'recordchange', recordchange)
143 extensions.wrapfunction(bookmarks.bmstore, 'recordchange', recordchange)
140 extensions.wrapfunction(bookmarks.bmstore, '_writerepo', writerepo)
144 extensions.wrapfunction(bookmarks.bmstore, '_writerepo', writerepo)
141 extensions.wrapcommand(commands.table, 'clone', clone)
145 extensions.wrapcommand(commands.table, 'clone', clone)
142
146
143 def _hassharedbookmarks(repo):
147 def _hassharedbookmarks(repo):
144 """Returns whether this repo has shared bookmarks"""
148 """Returns whether this repo has shared bookmarks"""
145 try:
149 try:
146 shared = repo.vfs.read('shared').splitlines()
150 shared = repo.vfs.read('shared').splitlines()
147 except IOError as inst:
151 except IOError as inst:
148 if inst.errno != errno.ENOENT:
152 if inst.errno != errno.ENOENT:
149 raise
153 raise
150 return False
154 return False
151 return hg.sharedbookmarks in shared
155 return hg.sharedbookmarks in shared
152
156
153 def _getsrcrepo(repo):
157 def _getsrcrepo(repo):
154 """
158 """
155 Returns the source repository object for a given shared repository.
159 Returns the source repository object for a given shared repository.
156 If repo is not a shared repository, return None.
160 If repo is not a shared repository, return None.
157 """
161 """
158 if repo.sharedpath == repo.path:
162 if repo.sharedpath == repo.path:
159 return None
163 return None
160
164
161 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
165 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
162 return repo.srcrepo
166 return repo.srcrepo
163
167
164 # the sharedpath always ends in the .hg; we want the path to the repo
168 # the sharedpath always ends in the .hg; we want the path to the repo
165 source = repo.vfs.split(repo.sharedpath)[0]
169 source = repo.vfs.split(repo.sharedpath)[0]
166 srcurl, branches = parseurl(source)
170 srcurl, branches = parseurl(source)
167 srcrepo = repository(repo.ui, srcurl)
171 srcrepo = repository(repo.ui, srcurl)
168 repo.srcrepo = srcrepo
172 repo.srcrepo = srcrepo
169 return srcrepo
173 return srcrepo
170
174
171 def getbkfile(orig, repo):
175 def getbkfile(orig, repo):
172 if _hassharedbookmarks(repo):
176 if _hassharedbookmarks(repo):
173 srcrepo = _getsrcrepo(repo)
177 srcrepo = _getsrcrepo(repo)
174 if srcrepo is not None:
178 if srcrepo is not None:
175 # just orig(srcrepo) doesn't work as expected, because
179 # just orig(srcrepo) doesn't work as expected, because
176 # HG_PENDING refers repo.root.
180 # HG_PENDING refers repo.root.
177 try:
181 try:
178 fp, pending = txnutil.trypending(repo.root, repo.vfs,
182 fp, pending = txnutil.trypending(repo.root, repo.vfs,
179 'bookmarks')
183 'bookmarks')
180 if pending:
184 if pending:
181 # only in this case, bookmark information in repo
185 # only in this case, bookmark information in repo
182 # is up-to-date.
186 # is up-to-date.
183 return fp
187 return fp
184 fp.close()
188 fp.close()
185 except IOError as inst:
189 except IOError as inst:
186 if inst.errno != errno.ENOENT:
190 if inst.errno != errno.ENOENT:
187 raise
191 raise
188
192
189 # otherwise, we should read bookmarks from srcrepo,
193 # otherwise, we should read bookmarks from srcrepo,
190 # because .hg/bookmarks in srcrepo might be already
194 # because .hg/bookmarks in srcrepo might be already
191 # changed via another sharing repo
195 # changed via another sharing repo
192 repo = srcrepo
196 repo = srcrepo
193
197
194 # TODO: Pending changes in repo are still invisible in
198 # TODO: Pending changes in repo are still invisible in
195 # srcrepo, because bookmarks.pending is written only into repo.
199 # srcrepo, because bookmarks.pending is written only into repo.
196 # See also https://www.mercurial-scm.org/wiki/SharedRepository
200 # See also https://www.mercurial-scm.org/wiki/SharedRepository
197 return orig(repo)
201 return orig(repo)
198
202
199 def recordchange(orig, self, tr):
203 def recordchange(orig, self, tr):
200 # Continue with write to local bookmarks file as usual
204 # Continue with write to local bookmarks file as usual
201 orig(self, tr)
205 orig(self, tr)
202
206
203 if _hassharedbookmarks(self._repo):
207 if _hassharedbookmarks(self._repo):
204 srcrepo = _getsrcrepo(self._repo)
208 srcrepo = _getsrcrepo(self._repo)
205 if srcrepo is not None:
209 if srcrepo is not None:
206 category = 'share-bookmarks'
210 category = 'share-bookmarks'
207 tr.addpostclose(category, lambda tr: self._writerepo(srcrepo))
211 tr.addpostclose(category, lambda tr: self._writerepo(srcrepo))
208
212
209 def writerepo(orig, self, repo):
213 def writerepo(orig, self, repo):
210 # First write local bookmarks file in case we ever unshare
214 # First write local bookmarks file in case we ever unshare
211 orig(self, repo)
215 orig(self, repo)
212
216
213 if _hassharedbookmarks(self._repo):
217 if _hassharedbookmarks(self._repo):
214 srcrepo = _getsrcrepo(self._repo)
218 srcrepo = _getsrcrepo(self._repo)
215 if srcrepo is not None:
219 if srcrepo is not None:
216 orig(self, srcrepo)
220 orig(self, srcrepo)
@@ -1,108 +1,119 b''
1
1
2 Repositories contain a file (``.hg/requires``) containing a list of
2 Repositories contain a file (``.hg/requires``) containing a list of
3 features/capabilities that are *required* for clients to interface
3 features/capabilities that are *required* for clients to interface
4 with the repository. This file has been present in Mercurial since
4 with the repository. This file has been present in Mercurial since
5 version 0.9.2 (released December 2006).
5 version 0.9.2 (released December 2006).
6
6
7 One of the first things clients do when opening a repository is read
7 One of the first things clients do when opening a repository is read
8 ``.hg/requires`` and verify that all listed requirements are supported,
8 ``.hg/requires`` and verify that all listed requirements are supported,
9 aborting if not. Requirements are therefore a strong mechanism to
9 aborting if not. Requirements are therefore a strong mechanism to
10 prevent incompatible clients from reading from unknown repository
10 prevent incompatible clients from reading from unknown repository
11 formats or even corrupting them by writing to them.
11 formats or even corrupting them by writing to them.
12
12
13 Extensions may add requirements. When they do this, clients not running
13 Extensions may add requirements. When they do this, clients not running
14 an extension will be unable to read from repositories.
14 an extension will be unable to read from repositories.
15
15
16 The following sections describe the requirements defined by the
16 The following sections describe the requirements defined by the
17 Mercurial core distribution.
17 Mercurial core distribution.
18
18
19 revlogv1
19 revlogv1
20 ========
20 ========
21
21
22 When present, revlogs are version 1 (RevlogNG). RevlogNG was introduced
22 When present, revlogs are version 1 (RevlogNG). RevlogNG was introduced
23 in 2006. The ``revlogv1`` requirement has been enabled by default
23 in 2006. The ``revlogv1`` requirement has been enabled by default
24 since the ``requires`` file was introduced in Mercurial 0.9.2.
24 since the ``requires`` file was introduced in Mercurial 0.9.2.
25
25
26 If this requirement is not present, version 0 revlogs are assumed.
26 If this requirement is not present, version 0 revlogs are assumed.
27
27
28 store
28 store
29 =====
29 =====
30
30
31 The *store* repository layout should be used.
31 The *store* repository layout should be used.
32
32
33 This requirement has been enabled by default since the ``requires`` file
33 This requirement has been enabled by default since the ``requires`` file
34 was introduced in Mercurial 0.9.2.
34 was introduced in Mercurial 0.9.2.
35
35
36 fncache
36 fncache
37 =======
37 =======
38
38
39 The *fncache* repository layout should be used.
39 The *fncache* repository layout should be used.
40
40
41 The *fncache* layout hash encodes filenames with long paths and
41 The *fncache* layout hash encodes filenames with long paths and
42 encodes reserved filenames.
42 encodes reserved filenames.
43
43
44 This requirement is enabled by default when the *store* requirement is
44 This requirement is enabled by default when the *store* requirement is
45 enabled (which is the default behavior). It was introduced in Mercurial
45 enabled (which is the default behavior). It was introduced in Mercurial
46 1.1 (released December 2008).
46 1.1 (released December 2008).
47
47
48 shared
48 shared
49 ======
49 ======
50
50
51 Denotes that the store for a repository is shared from another location
51 Denotes that the store for a repository is shared from another location
52 (defined by the ``.hg/sharedpath`` file).
52 (defined by the ``.hg/sharedpath`` file).
53
53
54 This requirement is set when a repository is created via :hg:`share`.
54 This requirement is set when a repository is created via :hg:`share`.
55
55
56 The requirement was added in Mercurial 1.3 (released July 2009).
56 The requirement was added in Mercurial 1.3 (released July 2009).
57
57
58 relshared
59 =========
60
61 Derivative of ``shared``; the location of the store is relative to the
62 store of this repository.
63
64 This requirement is set when a repository is created via :hg:`share`
65 using the ``--relative`` option.
66
67 The requirement was added in Mercurial 4.2 (released May 2017).
68
58 dotencode
69 dotencode
59 =========
70 =========
60
71
61 The *dotencode* repository layout should be used.
72 The *dotencode* repository layout should be used.
62
73
63 The *dotencode* layout encodes the first period or space in filenames
74 The *dotencode* layout encodes the first period or space in filenames
64 to prevent issues on OS X and Windows.
75 to prevent issues on OS X and Windows.
65
76
66 This requirement is enabled by default when the *store* requirement
77 This requirement is enabled by default when the *store* requirement
67 is enabled (which is the default behavior). It was introduced in
78 is enabled (which is the default behavior). It was introduced in
68 Mercurial 1.7 (released November 2010).
79 Mercurial 1.7 (released November 2010).
69
80
70 parentdelta
81 parentdelta
71 ===========
82 ===========
72
83
73 Denotes a revlog delta encoding format that was experimental and
84 Denotes a revlog delta encoding format that was experimental and
74 replaced by *generaldelta*. It should not be seen in the wild because
85 replaced by *generaldelta*. It should not be seen in the wild because
75 it was never enabled by default.
86 it was never enabled by default.
76
87
77 This requirement was added in Mercurial 1.7 and removed in Mercurial
88 This requirement was added in Mercurial 1.7 and removed in Mercurial
78 1.9.
89 1.9.
79
90
80 generaldelta
91 generaldelta
81 ============
92 ============
82
93
83 Revlogs should be created with the *generaldelta* flag enabled. The
94 Revlogs should be created with the *generaldelta* flag enabled. The
84 generaldelta flag will cause deltas to be encoded against a parent
95 generaldelta flag will cause deltas to be encoded against a parent
85 revision instead of the previous revision in the revlog.
96 revision instead of the previous revision in the revlog.
86
97
87 Support for this requirement was added in Mercurial 1.9 (released
98 Support for this requirement was added in Mercurial 1.9 (released
88 July 2011). The requirement was disabled on new repositories by
99 July 2011). The requirement was disabled on new repositories by
89 default until Mercurial 3.7 (released February 2016).
100 default until Mercurial 3.7 (released February 2016).
90
101
91 manifestv2
102 manifestv2
92 ==========
103 ==========
93
104
94 Denotes that version 2 of manifests are being used.
105 Denotes that version 2 of manifests are being used.
95
106
96 Support for this requirement was added in Mercurial 3.4 (released
107 Support for this requirement was added in Mercurial 3.4 (released
97 May 2015). The requirement is currently experimental and is disabled
108 May 2015). The requirement is currently experimental and is disabled
98 by default.
109 by default.
99
110
100 treemanifest
111 treemanifest
101 ============
112 ============
102
113
103 Denotes that tree manifests are being used. Tree manifests are
114 Denotes that tree manifests are being used. Tree manifests are
104 one manifest per directory (as opposed to a single flat manifest).
115 one manifest per directory (as opposed to a single flat manifest).
105
116
106 Support for this requirement was added in Mercurial 3.4 (released
117 Support for this requirement was added in Mercurial 3.4 (released
107 August 2015). The requirement is currently experimental and is
118 August 2015). The requirement is currently experimental and is
108 disabled by default.
119 disabled by default.
@@ -1,1030 +1,1040 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13 import os
13 import os
14 import shutil
14 import shutil
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import nullid
17 from .node import nullid
18
18
19 from . import (
19 from . import (
20 bookmarks,
20 bookmarks,
21 bundlerepo,
21 bundlerepo,
22 cmdutil,
22 cmdutil,
23 destutil,
23 destutil,
24 discovery,
24 discovery,
25 error,
25 error,
26 exchange,
26 exchange,
27 extensions,
27 extensions,
28 httppeer,
28 httppeer,
29 localrepo,
29 localrepo,
30 lock,
30 lock,
31 merge as mergemod,
31 merge as mergemod,
32 node,
32 node,
33 phases,
33 phases,
34 repoview,
34 repoview,
35 scmutil,
35 scmutil,
36 sshpeer,
36 sshpeer,
37 statichttprepo,
37 statichttprepo,
38 ui as uimod,
38 ui as uimod,
39 unionrepo,
39 unionrepo,
40 url,
40 url,
41 util,
41 util,
42 verify as verifymod,
42 verify as verifymod,
43 )
43 )
44
44
45 release = lock.release
45 release = lock.release
46
46
47 # shared features
47 # shared features
48 sharedbookmarks = 'bookmarks'
48 sharedbookmarks = 'bookmarks'
49
49
50 def _local(path):
50 def _local(path):
51 path = util.expandpath(util.urllocalpath(path))
51 path = util.expandpath(util.urllocalpath(path))
52 return (os.path.isfile(path) and bundlerepo or localrepo)
52 return (os.path.isfile(path) and bundlerepo or localrepo)
53
53
54 def addbranchrevs(lrepo, other, branches, revs):
54 def addbranchrevs(lrepo, other, branches, revs):
55 peer = other.peer() # a courtesy to callers using a localrepo for other
55 peer = other.peer() # a courtesy to callers using a localrepo for other
56 hashbranch, branches = branches
56 hashbranch, branches = branches
57 if not hashbranch and not branches:
57 if not hashbranch and not branches:
58 x = revs or None
58 x = revs or None
59 if util.safehasattr(revs, 'first'):
59 if util.safehasattr(revs, 'first'):
60 y = revs.first()
60 y = revs.first()
61 elif revs:
61 elif revs:
62 y = revs[0]
62 y = revs[0]
63 else:
63 else:
64 y = None
64 y = None
65 return x, y
65 return x, y
66 if revs:
66 if revs:
67 revs = list(revs)
67 revs = list(revs)
68 else:
68 else:
69 revs = []
69 revs = []
70
70
71 if not peer.capable('branchmap'):
71 if not peer.capable('branchmap'):
72 if branches:
72 if branches:
73 raise error.Abort(_("remote branch lookup not supported"))
73 raise error.Abort(_("remote branch lookup not supported"))
74 revs.append(hashbranch)
74 revs.append(hashbranch)
75 return revs, revs[0]
75 return revs, revs[0]
76 branchmap = peer.branchmap()
76 branchmap = peer.branchmap()
77
77
78 def primary(branch):
78 def primary(branch):
79 if branch == '.':
79 if branch == '.':
80 if not lrepo:
80 if not lrepo:
81 raise error.Abort(_("dirstate branch not accessible"))
81 raise error.Abort(_("dirstate branch not accessible"))
82 branch = lrepo.dirstate.branch()
82 branch = lrepo.dirstate.branch()
83 if branch in branchmap:
83 if branch in branchmap:
84 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
84 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
85 return True
85 return True
86 else:
86 else:
87 return False
87 return False
88
88
89 for branch in branches:
89 for branch in branches:
90 if not primary(branch):
90 if not primary(branch):
91 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
91 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
92 if hashbranch:
92 if hashbranch:
93 if not primary(hashbranch):
93 if not primary(hashbranch):
94 revs.append(hashbranch)
94 revs.append(hashbranch)
95 return revs, revs[0]
95 return revs, revs[0]
96
96
97 def parseurl(path, branches=None):
97 def parseurl(path, branches=None):
98 '''parse url#branch, returning (url, (branch, branches))'''
98 '''parse url#branch, returning (url, (branch, branches))'''
99
99
100 u = util.url(path)
100 u = util.url(path)
101 branch = None
101 branch = None
102 if u.fragment:
102 if u.fragment:
103 branch = u.fragment
103 branch = u.fragment
104 u.fragment = None
104 u.fragment = None
105 return str(u), (branch, branches or [])
105 return str(u), (branch, branches or [])
106
106
107 schemes = {
107 schemes = {
108 'bundle': bundlerepo,
108 'bundle': bundlerepo,
109 'union': unionrepo,
109 'union': unionrepo,
110 'file': _local,
110 'file': _local,
111 'http': httppeer,
111 'http': httppeer,
112 'https': httppeer,
112 'https': httppeer,
113 'ssh': sshpeer,
113 'ssh': sshpeer,
114 'static-http': statichttprepo,
114 'static-http': statichttprepo,
115 }
115 }
116
116
117 def _peerlookup(path):
117 def _peerlookup(path):
118 u = util.url(path)
118 u = util.url(path)
119 scheme = u.scheme or 'file'
119 scheme = u.scheme or 'file'
120 thing = schemes.get(scheme) or schemes['file']
120 thing = schemes.get(scheme) or schemes['file']
121 try:
121 try:
122 return thing(path)
122 return thing(path)
123 except TypeError:
123 except TypeError:
124 # we can't test callable(thing) because 'thing' can be an unloaded
124 # we can't test callable(thing) because 'thing' can be an unloaded
125 # module that implements __call__
125 # module that implements __call__
126 if not util.safehasattr(thing, 'instance'):
126 if not util.safehasattr(thing, 'instance'):
127 raise
127 raise
128 return thing
128 return thing
129
129
130 def islocal(repo):
130 def islocal(repo):
131 '''return true if repo (or path pointing to repo) is local'''
131 '''return true if repo (or path pointing to repo) is local'''
132 if isinstance(repo, str):
132 if isinstance(repo, str):
133 try:
133 try:
134 return _peerlookup(repo).islocal(repo)
134 return _peerlookup(repo).islocal(repo)
135 except AttributeError:
135 except AttributeError:
136 return False
136 return False
137 return repo.local()
137 return repo.local()
138
138
139 def openpath(ui, path):
139 def openpath(ui, path):
140 '''open path with open if local, url.open if remote'''
140 '''open path with open if local, url.open if remote'''
141 pathurl = util.url(path, parsequery=False, parsefragment=False)
141 pathurl = util.url(path, parsequery=False, parsefragment=False)
142 if pathurl.islocal():
142 if pathurl.islocal():
143 return util.posixfile(pathurl.localpath(), 'rb')
143 return util.posixfile(pathurl.localpath(), 'rb')
144 else:
144 else:
145 return url.open(ui, path)
145 return url.open(ui, path)
146
146
147 # a list of (ui, repo) functions called for wire peer initialization
147 # a list of (ui, repo) functions called for wire peer initialization
148 wirepeersetupfuncs = []
148 wirepeersetupfuncs = []
149
149
150 def _peerorrepo(ui, path, create=False):
150 def _peerorrepo(ui, path, create=False):
151 """return a repository object for the specified path"""
151 """return a repository object for the specified path"""
152 obj = _peerlookup(path).instance(ui, path, create)
152 obj = _peerlookup(path).instance(ui, path, create)
153 ui = getattr(obj, "ui", ui)
153 ui = getattr(obj, "ui", ui)
154 for name, module in extensions.extensions(ui):
154 for name, module in extensions.extensions(ui):
155 hook = getattr(module, 'reposetup', None)
155 hook = getattr(module, 'reposetup', None)
156 if hook:
156 if hook:
157 hook(ui, obj)
157 hook(ui, obj)
158 if not obj.local():
158 if not obj.local():
159 for f in wirepeersetupfuncs:
159 for f in wirepeersetupfuncs:
160 f(ui, obj)
160 f(ui, obj)
161 return obj
161 return obj
162
162
163 def repository(ui, path='', create=False):
163 def repository(ui, path='', create=False):
164 """return a repository object for the specified path"""
164 """return a repository object for the specified path"""
165 peer = _peerorrepo(ui, path, create)
165 peer = _peerorrepo(ui, path, create)
166 repo = peer.local()
166 repo = peer.local()
167 if not repo:
167 if not repo:
168 raise error.Abort(_("repository '%s' is not local") %
168 raise error.Abort(_("repository '%s' is not local") %
169 (path or peer.url()))
169 (path or peer.url()))
170 return repo.filtered('visible')
170 return repo.filtered('visible')
171
171
172 def peer(uiorrepo, opts, path, create=False):
172 def peer(uiorrepo, opts, path, create=False):
173 '''return a repository peer for the specified path'''
173 '''return a repository peer for the specified path'''
174 rui = remoteui(uiorrepo, opts)
174 rui = remoteui(uiorrepo, opts)
175 return _peerorrepo(rui, path, create).peer()
175 return _peerorrepo(rui, path, create).peer()
176
176
177 def defaultdest(source):
177 def defaultdest(source):
178 '''return default destination of clone if none is given
178 '''return default destination of clone if none is given
179
179
180 >>> defaultdest('foo')
180 >>> defaultdest('foo')
181 'foo'
181 'foo'
182 >>> defaultdest('/foo/bar')
182 >>> defaultdest('/foo/bar')
183 'bar'
183 'bar'
184 >>> defaultdest('/')
184 >>> defaultdest('/')
185 ''
185 ''
186 >>> defaultdest('')
186 >>> defaultdest('')
187 ''
187 ''
188 >>> defaultdest('http://example.org/')
188 >>> defaultdest('http://example.org/')
189 ''
189 ''
190 >>> defaultdest('http://example.org/foo/')
190 >>> defaultdest('http://example.org/foo/')
191 'foo'
191 'foo'
192 '''
192 '''
193 path = util.url(source).path
193 path = util.url(source).path
194 if not path:
194 if not path:
195 return ''
195 return ''
196 return os.path.basename(os.path.normpath(path))
196 return os.path.basename(os.path.normpath(path))
197
197
198 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None):
198 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
199 relative=False):
199 '''create a shared repository'''
200 '''create a shared repository'''
200
201
201 if not islocal(source):
202 if not islocal(source):
202 raise error.Abort(_('can only share local repositories'))
203 raise error.Abort(_('can only share local repositories'))
203
204
204 if not dest:
205 if not dest:
205 dest = defaultdest(source)
206 dest = defaultdest(source)
206 else:
207 else:
207 dest = ui.expandpath(dest)
208 dest = ui.expandpath(dest)
208
209
209 if isinstance(source, str):
210 if isinstance(source, str):
210 origsource = ui.expandpath(source)
211 origsource = ui.expandpath(source)
211 source, branches = parseurl(origsource)
212 source, branches = parseurl(origsource)
212 srcrepo = repository(ui, source)
213 srcrepo = repository(ui, source)
213 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
214 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
214 else:
215 else:
215 srcrepo = source.local()
216 srcrepo = source.local()
216 origsource = source = srcrepo.url()
217 origsource = source = srcrepo.url()
217 checkout = None
218 checkout = None
218
219
219 sharedpath = srcrepo.sharedpath # if our source is already sharing
220 sharedpath = srcrepo.sharedpath # if our source is already sharing
220
221
221 destwvfs = scmutil.vfs(dest, realpath=True)
222 destwvfs = scmutil.vfs(dest, realpath=True)
222 destvfs = scmutil.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
223 destvfs = scmutil.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
223
224
224 if destvfs.lexists():
225 if destvfs.lexists():
225 raise error.Abort(_('destination already exists'))
226 raise error.Abort(_('destination already exists'))
226
227
227 if not destwvfs.isdir():
228 if not destwvfs.isdir():
228 destwvfs.mkdir()
229 destwvfs.mkdir()
229 destvfs.makedir()
230 destvfs.makedir()
230
231
231 requirements = ''
232 requirements = ''
232 try:
233 try:
233 requirements = srcrepo.vfs.read('requires')
234 requirements = srcrepo.vfs.read('requires')
234 except IOError as inst:
235 except IOError as inst:
235 if inst.errno != errno.ENOENT:
236 if inst.errno != errno.ENOENT:
236 raise
237 raise
237
238
239 if relative:
240 try:
241 sharedpath = os.path.relpath(sharedpath, destvfs.base)
242 requirements += 'relshared\n'
243 except IOError as e:
244 raise error.Abort(_('cannot calculate relative path'),
245 hint=str(e))
246 else:
238 requirements += 'shared\n'
247 requirements += 'shared\n'
248
239 destvfs.write('requires', requirements)
249 destvfs.write('requires', requirements)
240 destvfs.write('sharedpath', sharedpath)
250 destvfs.write('sharedpath', sharedpath)
241
251
242 r = repository(ui, destwvfs.base)
252 r = repository(ui, destwvfs.base)
243 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
253 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
244 _postshareupdate(r, update, checkout=checkout)
254 _postshareupdate(r, update, checkout=checkout)
245
255
246 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
256 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
247 """Called after a new shared repo is created.
257 """Called after a new shared repo is created.
248
258
249 The new repo only has a requirements file and pointer to the source.
259 The new repo only has a requirements file and pointer to the source.
250 This function configures additional shared data.
260 This function configures additional shared data.
251
261
252 Extensions can wrap this function and write additional entries to
262 Extensions can wrap this function and write additional entries to
253 destrepo/.hg/shared to indicate additional pieces of data to be shared.
263 destrepo/.hg/shared to indicate additional pieces of data to be shared.
254 """
264 """
255 default = defaultpath or sourcerepo.ui.config('paths', 'default')
265 default = defaultpath or sourcerepo.ui.config('paths', 'default')
256 if default:
266 if default:
257 fp = destrepo.vfs("hgrc", "w", text=True)
267 fp = destrepo.vfs("hgrc", "w", text=True)
258 fp.write("[paths]\n")
268 fp.write("[paths]\n")
259 fp.write("default = %s\n" % default)
269 fp.write("default = %s\n" % default)
260 fp.close()
270 fp.close()
261
271
262 with destrepo.wlock():
272 with destrepo.wlock():
263 if bookmarks:
273 if bookmarks:
264 fp = destrepo.vfs('shared', 'w')
274 fp = destrepo.vfs('shared', 'w')
265 fp.write(sharedbookmarks + '\n')
275 fp.write(sharedbookmarks + '\n')
266 fp.close()
276 fp.close()
267
277
268 def _postshareupdate(repo, update, checkout=None):
278 def _postshareupdate(repo, update, checkout=None):
269 """Maybe perform a working directory update after a shared repo is created.
279 """Maybe perform a working directory update after a shared repo is created.
270
280
271 ``update`` can be a boolean or a revision to update to.
281 ``update`` can be a boolean or a revision to update to.
272 """
282 """
273 if not update:
283 if not update:
274 return
284 return
275
285
276 repo.ui.status(_("updating working directory\n"))
286 repo.ui.status(_("updating working directory\n"))
277 if update is not True:
287 if update is not True:
278 checkout = update
288 checkout = update
279 for test in (checkout, 'default', 'tip'):
289 for test in (checkout, 'default', 'tip'):
280 if test is None:
290 if test is None:
281 continue
291 continue
282 try:
292 try:
283 uprev = repo.lookup(test)
293 uprev = repo.lookup(test)
284 break
294 break
285 except error.RepoLookupError:
295 except error.RepoLookupError:
286 continue
296 continue
287 _update(repo, uprev)
297 _update(repo, uprev)
288
298
289 def copystore(ui, srcrepo, destpath):
299 def copystore(ui, srcrepo, destpath):
290 '''copy files from store of srcrepo in destpath
300 '''copy files from store of srcrepo in destpath
291
301
292 returns destlock
302 returns destlock
293 '''
303 '''
294 destlock = None
304 destlock = None
295 try:
305 try:
296 hardlink = None
306 hardlink = None
297 num = 0
307 num = 0
298 closetopic = [None]
308 closetopic = [None]
299 def prog(topic, pos):
309 def prog(topic, pos):
300 if pos is None:
310 if pos is None:
301 closetopic[0] = topic
311 closetopic[0] = topic
302 else:
312 else:
303 ui.progress(topic, pos + num)
313 ui.progress(topic, pos + num)
304 srcpublishing = srcrepo.publishing()
314 srcpublishing = srcrepo.publishing()
305 srcvfs = scmutil.vfs(srcrepo.sharedpath)
315 srcvfs = scmutil.vfs(srcrepo.sharedpath)
306 dstvfs = scmutil.vfs(destpath)
316 dstvfs = scmutil.vfs(destpath)
307 for f in srcrepo.store.copylist():
317 for f in srcrepo.store.copylist():
308 if srcpublishing and f.endswith('phaseroots'):
318 if srcpublishing and f.endswith('phaseroots'):
309 continue
319 continue
310 dstbase = os.path.dirname(f)
320 dstbase = os.path.dirname(f)
311 if dstbase and not dstvfs.exists(dstbase):
321 if dstbase and not dstvfs.exists(dstbase):
312 dstvfs.mkdir(dstbase)
322 dstvfs.mkdir(dstbase)
313 if srcvfs.exists(f):
323 if srcvfs.exists(f):
314 if f.endswith('data'):
324 if f.endswith('data'):
315 # 'dstbase' may be empty (e.g. revlog format 0)
325 # 'dstbase' may be empty (e.g. revlog format 0)
316 lockfile = os.path.join(dstbase, "lock")
326 lockfile = os.path.join(dstbase, "lock")
317 # lock to avoid premature writing to the target
327 # lock to avoid premature writing to the target
318 destlock = lock.lock(dstvfs, lockfile)
328 destlock = lock.lock(dstvfs, lockfile)
319 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
329 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
320 hardlink, progress=prog)
330 hardlink, progress=prog)
321 num += n
331 num += n
322 if hardlink:
332 if hardlink:
323 ui.debug("linked %d files\n" % num)
333 ui.debug("linked %d files\n" % num)
324 if closetopic[0]:
334 if closetopic[0]:
325 ui.progress(closetopic[0], None)
335 ui.progress(closetopic[0], None)
326 else:
336 else:
327 ui.debug("copied %d files\n" % num)
337 ui.debug("copied %d files\n" % num)
328 if closetopic[0]:
338 if closetopic[0]:
329 ui.progress(closetopic[0], None)
339 ui.progress(closetopic[0], None)
330 return destlock
340 return destlock
331 except: # re-raises
341 except: # re-raises
332 release(destlock)
342 release(destlock)
333 raise
343 raise
334
344
335 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
345 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
336 rev=None, update=True, stream=False):
346 rev=None, update=True, stream=False):
337 """Perform a clone using a shared repo.
347 """Perform a clone using a shared repo.
338
348
339 The store for the repository will be located at <sharepath>/.hg. The
349 The store for the repository will be located at <sharepath>/.hg. The
340 specified revisions will be cloned or pulled from "source". A shared repo
350 specified revisions will be cloned or pulled from "source". A shared repo
341 will be created at "dest" and a working copy will be created if "update" is
351 will be created at "dest" and a working copy will be created if "update" is
342 True.
352 True.
343 """
353 """
344 revs = None
354 revs = None
345 if rev:
355 if rev:
346 if not srcpeer.capable('lookup'):
356 if not srcpeer.capable('lookup'):
347 raise error.Abort(_("src repository does not support "
357 raise error.Abort(_("src repository does not support "
348 "revision lookup and so doesn't "
358 "revision lookup and so doesn't "
349 "support clone by revision"))
359 "support clone by revision"))
350 revs = [srcpeer.lookup(r) for r in rev]
360 revs = [srcpeer.lookup(r) for r in rev]
351
361
352 # Obtain a lock before checking for or cloning the pooled repo otherwise
362 # Obtain a lock before checking for or cloning the pooled repo otherwise
353 # 2 clients may race creating or populating it.
363 # 2 clients may race creating or populating it.
354 pooldir = os.path.dirname(sharepath)
364 pooldir = os.path.dirname(sharepath)
355 # lock class requires the directory to exist.
365 # lock class requires the directory to exist.
356 try:
366 try:
357 util.makedir(pooldir, False)
367 util.makedir(pooldir, False)
358 except OSError as e:
368 except OSError as e:
359 if e.errno != errno.EEXIST:
369 if e.errno != errno.EEXIST:
360 raise
370 raise
361
371
362 poolvfs = scmutil.vfs(pooldir)
372 poolvfs = scmutil.vfs(pooldir)
363 basename = os.path.basename(sharepath)
373 basename = os.path.basename(sharepath)
364
374
365 with lock.lock(poolvfs, '%s.lock' % basename):
375 with lock.lock(poolvfs, '%s.lock' % basename):
366 if os.path.exists(sharepath):
376 if os.path.exists(sharepath):
367 ui.status(_('(sharing from existing pooled repository %s)\n') %
377 ui.status(_('(sharing from existing pooled repository %s)\n') %
368 basename)
378 basename)
369 else:
379 else:
370 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
380 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
371 # Always use pull mode because hardlinks in share mode don't work
381 # Always use pull mode because hardlinks in share mode don't work
372 # well. Never update because working copies aren't necessary in
382 # well. Never update because working copies aren't necessary in
373 # share mode.
383 # share mode.
374 clone(ui, peeropts, source, dest=sharepath, pull=True,
384 clone(ui, peeropts, source, dest=sharepath, pull=True,
375 rev=rev, update=False, stream=stream)
385 rev=rev, update=False, stream=stream)
376
386
377 # Resolve the value to put in [paths] section for the source.
387 # Resolve the value to put in [paths] section for the source.
378 if islocal(source):
388 if islocal(source):
379 defaultpath = os.path.abspath(util.urllocalpath(source))
389 defaultpath = os.path.abspath(util.urllocalpath(source))
380 else:
390 else:
381 defaultpath = source
391 defaultpath = source
382
392
383 sharerepo = repository(ui, path=sharepath)
393 sharerepo = repository(ui, path=sharepath)
384 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
394 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
385 defaultpath=defaultpath)
395 defaultpath=defaultpath)
386
396
387 # We need to perform a pull against the dest repo to fetch bookmarks
397 # We need to perform a pull against the dest repo to fetch bookmarks
388 # and other non-store data that isn't shared by default. In the case of
398 # and other non-store data that isn't shared by default. In the case of
389 # non-existing shared repo, this means we pull from the remote twice. This
399 # non-existing shared repo, this means we pull from the remote twice. This
390 # is a bit weird. But at the time it was implemented, there wasn't an easy
400 # is a bit weird. But at the time it was implemented, there wasn't an easy
391 # way to pull just non-changegroup data.
401 # way to pull just non-changegroup data.
392 destrepo = repository(ui, path=dest)
402 destrepo = repository(ui, path=dest)
393 exchange.pull(destrepo, srcpeer, heads=revs)
403 exchange.pull(destrepo, srcpeer, heads=revs)
394
404
395 _postshareupdate(destrepo, update)
405 _postshareupdate(destrepo, update)
396
406
397 return srcpeer, peer(ui, peeropts, dest)
407 return srcpeer, peer(ui, peeropts, dest)
398
408
399 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
409 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
400 update=True, stream=False, branch=None, shareopts=None):
410 update=True, stream=False, branch=None, shareopts=None):
401 """Make a copy of an existing repository.
411 """Make a copy of an existing repository.
402
412
403 Create a copy of an existing repository in a new directory. The
413 Create a copy of an existing repository in a new directory. The
404 source and destination are URLs, as passed to the repository
414 source and destination are URLs, as passed to the repository
405 function. Returns a pair of repository peers, the source and
415 function. Returns a pair of repository peers, the source and
406 newly created destination.
416 newly created destination.
407
417
408 The location of the source is added to the new repository's
418 The location of the source is added to the new repository's
409 .hg/hgrc file, as the default to be used for future pulls and
419 .hg/hgrc file, as the default to be used for future pulls and
410 pushes.
420 pushes.
411
421
412 If an exception is raised, the partly cloned/updated destination
422 If an exception is raised, the partly cloned/updated destination
413 repository will be deleted.
423 repository will be deleted.
414
424
415 Arguments:
425 Arguments:
416
426
417 source: repository object or URL
427 source: repository object or URL
418
428
419 dest: URL of destination repository to create (defaults to base
429 dest: URL of destination repository to create (defaults to base
420 name of source repository)
430 name of source repository)
421
431
422 pull: always pull from source repository, even in local case or if the
432 pull: always pull from source repository, even in local case or if the
423 server prefers streaming
433 server prefers streaming
424
434
425 stream: stream raw data uncompressed from repository (fast over
435 stream: stream raw data uncompressed from repository (fast over
426 LAN, slow over WAN)
436 LAN, slow over WAN)
427
437
428 rev: revision to clone up to (implies pull=True)
438 rev: revision to clone up to (implies pull=True)
429
439
430 update: update working directory after clone completes, if
440 update: update working directory after clone completes, if
431 destination is local repository (True means update to default rev,
441 destination is local repository (True means update to default rev,
432 anything else is treated as a revision)
442 anything else is treated as a revision)
433
443
434 branch: branches to clone
444 branch: branches to clone
435
445
436 shareopts: dict of options to control auto sharing behavior. The "pool" key
446 shareopts: dict of options to control auto sharing behavior. The "pool" key
437 activates auto sharing mode and defines the directory for stores. The
447 activates auto sharing mode and defines the directory for stores. The
438 "mode" key determines how to construct the directory name of the shared
448 "mode" key determines how to construct the directory name of the shared
439 repository. "identity" means the name is derived from the node of the first
449 repository. "identity" means the name is derived from the node of the first
440 changeset in the repository. "remote" means the name is derived from the
450 changeset in the repository. "remote" means the name is derived from the
441 remote's path/URL. Defaults to "identity."
451 remote's path/URL. Defaults to "identity."
442 """
452 """
443
453
444 if isinstance(source, str):
454 if isinstance(source, str):
445 origsource = ui.expandpath(source)
455 origsource = ui.expandpath(source)
446 source, branch = parseurl(origsource, branch)
456 source, branch = parseurl(origsource, branch)
447 srcpeer = peer(ui, peeropts, source)
457 srcpeer = peer(ui, peeropts, source)
448 else:
458 else:
449 srcpeer = source.peer() # in case we were called with a localrepo
459 srcpeer = source.peer() # in case we were called with a localrepo
450 branch = (None, branch or [])
460 branch = (None, branch or [])
451 origsource = source = srcpeer.url()
461 origsource = source = srcpeer.url()
452 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
462 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
453
463
454 if dest is None:
464 if dest is None:
455 dest = defaultdest(source)
465 dest = defaultdest(source)
456 if dest:
466 if dest:
457 ui.status(_("destination directory: %s\n") % dest)
467 ui.status(_("destination directory: %s\n") % dest)
458 else:
468 else:
459 dest = ui.expandpath(dest)
469 dest = ui.expandpath(dest)
460
470
461 dest = util.urllocalpath(dest)
471 dest = util.urllocalpath(dest)
462 source = util.urllocalpath(source)
472 source = util.urllocalpath(source)
463
473
464 if not dest:
474 if not dest:
465 raise error.Abort(_("empty destination path is not valid"))
475 raise error.Abort(_("empty destination path is not valid"))
466
476
467 destvfs = scmutil.vfs(dest, expandpath=True)
477 destvfs = scmutil.vfs(dest, expandpath=True)
468 if destvfs.lexists():
478 if destvfs.lexists():
469 if not destvfs.isdir():
479 if not destvfs.isdir():
470 raise error.Abort(_("destination '%s' already exists") % dest)
480 raise error.Abort(_("destination '%s' already exists") % dest)
471 elif destvfs.listdir():
481 elif destvfs.listdir():
472 raise error.Abort(_("destination '%s' is not empty") % dest)
482 raise error.Abort(_("destination '%s' is not empty") % dest)
473
483
474 shareopts = shareopts or {}
484 shareopts = shareopts or {}
475 sharepool = shareopts.get('pool')
485 sharepool = shareopts.get('pool')
476 sharenamemode = shareopts.get('mode')
486 sharenamemode = shareopts.get('mode')
477 if sharepool and islocal(dest):
487 if sharepool and islocal(dest):
478 sharepath = None
488 sharepath = None
479 if sharenamemode == 'identity':
489 if sharenamemode == 'identity':
480 # Resolve the name from the initial changeset in the remote
490 # Resolve the name from the initial changeset in the remote
481 # repository. This returns nullid when the remote is empty. It
491 # repository. This returns nullid when the remote is empty. It
482 # raises RepoLookupError if revision 0 is filtered or otherwise
492 # raises RepoLookupError if revision 0 is filtered or otherwise
483 # not available. If we fail to resolve, sharing is not enabled.
493 # not available. If we fail to resolve, sharing is not enabled.
484 try:
494 try:
485 rootnode = srcpeer.lookup('0')
495 rootnode = srcpeer.lookup('0')
486 if rootnode != node.nullid:
496 if rootnode != node.nullid:
487 sharepath = os.path.join(sharepool, node.hex(rootnode))
497 sharepath = os.path.join(sharepool, node.hex(rootnode))
488 else:
498 else:
489 ui.status(_('(not using pooled storage: '
499 ui.status(_('(not using pooled storage: '
490 'remote appears to be empty)\n'))
500 'remote appears to be empty)\n'))
491 except error.RepoLookupError:
501 except error.RepoLookupError:
492 ui.status(_('(not using pooled storage: '
502 ui.status(_('(not using pooled storage: '
493 'unable to resolve identity of remote)\n'))
503 'unable to resolve identity of remote)\n'))
494 elif sharenamemode == 'remote':
504 elif sharenamemode == 'remote':
495 sharepath = os.path.join(
505 sharepath = os.path.join(
496 sharepool, hashlib.sha1(source).hexdigest())
506 sharepool, hashlib.sha1(source).hexdigest())
497 else:
507 else:
498 raise error.Abort(_('unknown share naming mode: %s') %
508 raise error.Abort(_('unknown share naming mode: %s') %
499 sharenamemode)
509 sharenamemode)
500
510
501 if sharepath:
511 if sharepath:
502 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
512 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
503 dest, pull=pull, rev=rev, update=update,
513 dest, pull=pull, rev=rev, update=update,
504 stream=stream)
514 stream=stream)
505
515
506 srclock = destlock = cleandir = None
516 srclock = destlock = cleandir = None
507 srcrepo = srcpeer.local()
517 srcrepo = srcpeer.local()
508 try:
518 try:
509 abspath = origsource
519 abspath = origsource
510 if islocal(origsource):
520 if islocal(origsource):
511 abspath = os.path.abspath(util.urllocalpath(origsource))
521 abspath = os.path.abspath(util.urllocalpath(origsource))
512
522
513 if islocal(dest):
523 if islocal(dest):
514 cleandir = dest
524 cleandir = dest
515
525
516 copy = False
526 copy = False
517 if (srcrepo and srcrepo.cancopy() and islocal(dest)
527 if (srcrepo and srcrepo.cancopy() and islocal(dest)
518 and not phases.hassecret(srcrepo)):
528 and not phases.hassecret(srcrepo)):
519 copy = not pull and not rev
529 copy = not pull and not rev
520
530
521 if copy:
531 if copy:
522 try:
532 try:
523 # we use a lock here because if we race with commit, we
533 # we use a lock here because if we race with commit, we
524 # can end up with extra data in the cloned revlogs that's
534 # can end up with extra data in the cloned revlogs that's
525 # not pointed to by changesets, thus causing verify to
535 # not pointed to by changesets, thus causing verify to
526 # fail
536 # fail
527 srclock = srcrepo.lock(wait=False)
537 srclock = srcrepo.lock(wait=False)
528 except error.LockError:
538 except error.LockError:
529 copy = False
539 copy = False
530
540
531 if copy:
541 if copy:
532 srcrepo.hook('preoutgoing', throw=True, source='clone')
542 srcrepo.hook('preoutgoing', throw=True, source='clone')
533 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
543 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
534 if not os.path.exists(dest):
544 if not os.path.exists(dest):
535 os.mkdir(dest)
545 os.mkdir(dest)
536 else:
546 else:
537 # only clean up directories we create ourselves
547 # only clean up directories we create ourselves
538 cleandir = hgdir
548 cleandir = hgdir
539 try:
549 try:
540 destpath = hgdir
550 destpath = hgdir
541 util.makedir(destpath, notindexed=True)
551 util.makedir(destpath, notindexed=True)
542 except OSError as inst:
552 except OSError as inst:
543 if inst.errno == errno.EEXIST:
553 if inst.errno == errno.EEXIST:
544 cleandir = None
554 cleandir = None
545 raise error.Abort(_("destination '%s' already exists")
555 raise error.Abort(_("destination '%s' already exists")
546 % dest)
556 % dest)
547 raise
557 raise
548
558
549 destlock = copystore(ui, srcrepo, destpath)
559 destlock = copystore(ui, srcrepo, destpath)
550 # copy bookmarks over
560 # copy bookmarks over
551 srcbookmarks = srcrepo.join('bookmarks')
561 srcbookmarks = srcrepo.join('bookmarks')
552 dstbookmarks = os.path.join(destpath, 'bookmarks')
562 dstbookmarks = os.path.join(destpath, 'bookmarks')
553 if os.path.exists(srcbookmarks):
563 if os.path.exists(srcbookmarks):
554 util.copyfile(srcbookmarks, dstbookmarks)
564 util.copyfile(srcbookmarks, dstbookmarks)
555
565
556 # Recomputing branch cache might be slow on big repos,
566 # Recomputing branch cache might be slow on big repos,
557 # so just copy it
567 # so just copy it
558 def copybranchcache(fname):
568 def copybranchcache(fname):
559 srcbranchcache = srcrepo.join('cache/%s' % fname)
569 srcbranchcache = srcrepo.join('cache/%s' % fname)
560 dstbranchcache = os.path.join(dstcachedir, fname)
570 dstbranchcache = os.path.join(dstcachedir, fname)
561 if os.path.exists(srcbranchcache):
571 if os.path.exists(srcbranchcache):
562 if not os.path.exists(dstcachedir):
572 if not os.path.exists(dstcachedir):
563 os.mkdir(dstcachedir)
573 os.mkdir(dstcachedir)
564 util.copyfile(srcbranchcache, dstbranchcache)
574 util.copyfile(srcbranchcache, dstbranchcache)
565
575
566 dstcachedir = os.path.join(destpath, 'cache')
576 dstcachedir = os.path.join(destpath, 'cache')
567 # In local clones we're copying all nodes, not just served
577 # In local clones we're copying all nodes, not just served
568 # ones. Therefore copy all branch caches over.
578 # ones. Therefore copy all branch caches over.
569 copybranchcache('branch2')
579 copybranchcache('branch2')
570 for cachename in repoview.filtertable:
580 for cachename in repoview.filtertable:
571 copybranchcache('branch2-%s' % cachename)
581 copybranchcache('branch2-%s' % cachename)
572
582
573 # we need to re-init the repo after manually copying the data
583 # we need to re-init the repo after manually copying the data
574 # into it
584 # into it
575 destpeer = peer(srcrepo, peeropts, dest)
585 destpeer = peer(srcrepo, peeropts, dest)
576 srcrepo.hook('outgoing', source='clone',
586 srcrepo.hook('outgoing', source='clone',
577 node=node.hex(node.nullid))
587 node=node.hex(node.nullid))
578 else:
588 else:
579 try:
589 try:
580 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
590 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
581 # only pass ui when no srcrepo
591 # only pass ui when no srcrepo
582 except OSError as inst:
592 except OSError as inst:
583 if inst.errno == errno.EEXIST:
593 if inst.errno == errno.EEXIST:
584 cleandir = None
594 cleandir = None
585 raise error.Abort(_("destination '%s' already exists")
595 raise error.Abort(_("destination '%s' already exists")
586 % dest)
596 % dest)
587 raise
597 raise
588
598
589 revs = None
599 revs = None
590 if rev:
600 if rev:
591 if not srcpeer.capable('lookup'):
601 if not srcpeer.capable('lookup'):
592 raise error.Abort(_("src repository does not support "
602 raise error.Abort(_("src repository does not support "
593 "revision lookup and so doesn't "
603 "revision lookup and so doesn't "
594 "support clone by revision"))
604 "support clone by revision"))
595 revs = [srcpeer.lookup(r) for r in rev]
605 revs = [srcpeer.lookup(r) for r in rev]
596 checkout = revs[0]
606 checkout = revs[0]
597 local = destpeer.local()
607 local = destpeer.local()
598 if local:
608 if local:
599 if not stream:
609 if not stream:
600 if pull:
610 if pull:
601 stream = False
611 stream = False
602 else:
612 else:
603 stream = None
613 stream = None
604 # internal config: ui.quietbookmarkmove
614 # internal config: ui.quietbookmarkmove
605 quiet = local.ui.backupconfig('ui', 'quietbookmarkmove')
615 quiet = local.ui.backupconfig('ui', 'quietbookmarkmove')
606 try:
616 try:
607 local.ui.setconfig(
617 local.ui.setconfig(
608 'ui', 'quietbookmarkmove', True, 'clone')
618 'ui', 'quietbookmarkmove', True, 'clone')
609 exchange.pull(local, srcpeer, revs,
619 exchange.pull(local, srcpeer, revs,
610 streamclonerequested=stream)
620 streamclonerequested=stream)
611 finally:
621 finally:
612 local.ui.restoreconfig(quiet)
622 local.ui.restoreconfig(quiet)
613 elif srcrepo:
623 elif srcrepo:
614 exchange.push(srcrepo, destpeer, revs=revs,
624 exchange.push(srcrepo, destpeer, revs=revs,
615 bookmarks=srcrepo._bookmarks.keys())
625 bookmarks=srcrepo._bookmarks.keys())
616 else:
626 else:
617 raise error.Abort(_("clone from remote to remote not supported")
627 raise error.Abort(_("clone from remote to remote not supported")
618 )
628 )
619
629
620 cleandir = None
630 cleandir = None
621
631
622 destrepo = destpeer.local()
632 destrepo = destpeer.local()
623 if destrepo:
633 if destrepo:
624 template = uimod.samplehgrcs['cloned']
634 template = uimod.samplehgrcs['cloned']
625 fp = destrepo.vfs("hgrc", "w", text=True)
635 fp = destrepo.vfs("hgrc", "w", text=True)
626 u = util.url(abspath)
636 u = util.url(abspath)
627 u.passwd = None
637 u.passwd = None
628 defaulturl = str(u)
638 defaulturl = str(u)
629 fp.write(template % defaulturl)
639 fp.write(template % defaulturl)
630 fp.close()
640 fp.close()
631
641
632 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
642 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
633
643
634 if update:
644 if update:
635 if update is not True:
645 if update is not True:
636 checkout = srcpeer.lookup(update)
646 checkout = srcpeer.lookup(update)
637 uprev = None
647 uprev = None
638 status = None
648 status = None
639 if checkout is not None:
649 if checkout is not None:
640 try:
650 try:
641 uprev = destrepo.lookup(checkout)
651 uprev = destrepo.lookup(checkout)
642 except error.RepoLookupError:
652 except error.RepoLookupError:
643 if update is not True:
653 if update is not True:
644 try:
654 try:
645 uprev = destrepo.lookup(update)
655 uprev = destrepo.lookup(update)
646 except error.RepoLookupError:
656 except error.RepoLookupError:
647 pass
657 pass
648 if uprev is None:
658 if uprev is None:
649 try:
659 try:
650 uprev = destrepo._bookmarks['@']
660 uprev = destrepo._bookmarks['@']
651 update = '@'
661 update = '@'
652 bn = destrepo[uprev].branch()
662 bn = destrepo[uprev].branch()
653 if bn == 'default':
663 if bn == 'default':
654 status = _("updating to bookmark @\n")
664 status = _("updating to bookmark @\n")
655 else:
665 else:
656 status = (_("updating to bookmark @ on branch %s\n")
666 status = (_("updating to bookmark @ on branch %s\n")
657 % bn)
667 % bn)
658 except KeyError:
668 except KeyError:
659 try:
669 try:
660 uprev = destrepo.branchtip('default')
670 uprev = destrepo.branchtip('default')
661 except error.RepoLookupError:
671 except error.RepoLookupError:
662 uprev = destrepo.lookup('tip')
672 uprev = destrepo.lookup('tip')
663 if not status:
673 if not status:
664 bn = destrepo[uprev].branch()
674 bn = destrepo[uprev].branch()
665 status = _("updating to branch %s\n") % bn
675 status = _("updating to branch %s\n") % bn
666 destrepo.ui.status(status)
676 destrepo.ui.status(status)
667 _update(destrepo, uprev)
677 _update(destrepo, uprev)
668 if update in destrepo._bookmarks:
678 if update in destrepo._bookmarks:
669 bookmarks.activate(destrepo, update)
679 bookmarks.activate(destrepo, update)
670 finally:
680 finally:
671 release(srclock, destlock)
681 release(srclock, destlock)
672 if cleandir is not None:
682 if cleandir is not None:
673 shutil.rmtree(cleandir, True)
683 shutil.rmtree(cleandir, True)
674 if srcpeer is not None:
684 if srcpeer is not None:
675 srcpeer.close()
685 srcpeer.close()
676 return srcpeer, destpeer
686 return srcpeer, destpeer
677
687
678 def _showstats(repo, stats, quietempty=False):
688 def _showstats(repo, stats, quietempty=False):
679 if quietempty and not any(stats):
689 if quietempty and not any(stats):
680 return
690 return
681 repo.ui.status(_("%d files updated, %d files merged, "
691 repo.ui.status(_("%d files updated, %d files merged, "
682 "%d files removed, %d files unresolved\n") % stats)
692 "%d files removed, %d files unresolved\n") % stats)
683
693
684 def updaterepo(repo, node, overwrite):
694 def updaterepo(repo, node, overwrite):
685 """Update the working directory to node.
695 """Update the working directory to node.
686
696
687 When overwrite is set, changes are clobbered, merged else
697 When overwrite is set, changes are clobbered, merged else
688
698
689 returns stats (see pydoc mercurial.merge.applyupdates)"""
699 returns stats (see pydoc mercurial.merge.applyupdates)"""
690 return mergemod.update(repo, node, False, overwrite,
700 return mergemod.update(repo, node, False, overwrite,
691 labels=['working copy', 'destination'])
701 labels=['working copy', 'destination'])
692
702
693 def update(repo, node, quietempty=False):
703 def update(repo, node, quietempty=False):
694 """update the working directory to node, merging linear changes"""
704 """update the working directory to node, merging linear changes"""
695 stats = updaterepo(repo, node, False)
705 stats = updaterepo(repo, node, False)
696 _showstats(repo, stats, quietempty)
706 _showstats(repo, stats, quietempty)
697 if stats[3]:
707 if stats[3]:
698 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
708 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
699 return stats[3] > 0
709 return stats[3] > 0
700
710
701 # naming conflict in clone()
711 # naming conflict in clone()
702 _update = update
712 _update = update
703
713
704 def clean(repo, node, show_stats=True, quietempty=False):
714 def clean(repo, node, show_stats=True, quietempty=False):
705 """forcibly switch the working directory to node, clobbering changes"""
715 """forcibly switch the working directory to node, clobbering changes"""
706 stats = updaterepo(repo, node, True)
716 stats = updaterepo(repo, node, True)
707 util.unlinkpath(repo.join('graftstate'), ignoremissing=True)
717 util.unlinkpath(repo.join('graftstate'), ignoremissing=True)
708 if show_stats:
718 if show_stats:
709 _showstats(repo, stats, quietempty)
719 _showstats(repo, stats, quietempty)
710 return stats[3] > 0
720 return stats[3] > 0
711
721
712 # naming conflict in updatetotally()
722 # naming conflict in updatetotally()
713 _clean = clean
723 _clean = clean
714
724
715 def updatetotally(ui, repo, checkout, brev, clean=False, check=False):
725 def updatetotally(ui, repo, checkout, brev, clean=False, check=False):
716 """Update the working directory with extra care for non-file components
726 """Update the working directory with extra care for non-file components
717
727
718 This takes care of non-file components below:
728 This takes care of non-file components below:
719
729
720 :bookmark: might be advanced or (in)activated
730 :bookmark: might be advanced or (in)activated
721
731
722 This takes arguments below:
732 This takes arguments below:
723
733
724 :checkout: to which revision the working directory is updated
734 :checkout: to which revision the working directory is updated
725 :brev: a name, which might be a bookmark to be activated after updating
735 :brev: a name, which might be a bookmark to be activated after updating
726 :clean: whether changes in the working directory can be discarded
736 :clean: whether changes in the working directory can be discarded
727 :check: whether changes in the working directory should be checked
737 :check: whether changes in the working directory should be checked
728
738
729 This returns whether conflict is detected at updating or not.
739 This returns whether conflict is detected at updating or not.
730 """
740 """
731 with repo.wlock():
741 with repo.wlock():
732 movemarkfrom = None
742 movemarkfrom = None
733 warndest = False
743 warndest = False
734 if checkout is None:
744 if checkout is None:
735 updata = destutil.destupdate(repo, clean=clean)
745 updata = destutil.destupdate(repo, clean=clean)
736 checkout, movemarkfrom, brev = updata
746 checkout, movemarkfrom, brev = updata
737 warndest = True
747 warndest = True
738
748
739 if clean:
749 if clean:
740 ret = _clean(repo, checkout)
750 ret = _clean(repo, checkout)
741 else:
751 else:
742 if check:
752 if check:
743 cmdutil.bailifchanged(repo, merge=False)
753 cmdutil.bailifchanged(repo, merge=False)
744 ret = _update(repo, checkout)
754 ret = _update(repo, checkout)
745
755
746 if not ret and movemarkfrom:
756 if not ret and movemarkfrom:
747 if movemarkfrom == repo['.'].node():
757 if movemarkfrom == repo['.'].node():
748 pass # no-op update
758 pass # no-op update
749 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
759 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
750 b = ui.label(repo._activebookmark, 'bookmarks.active')
760 b = ui.label(repo._activebookmark, 'bookmarks.active')
751 ui.status(_("updating bookmark %s\n") % b)
761 ui.status(_("updating bookmark %s\n") % b)
752 else:
762 else:
753 # this can happen with a non-linear update
763 # this can happen with a non-linear update
754 b = ui.label(repo._activebookmark, 'bookmarks')
764 b = ui.label(repo._activebookmark, 'bookmarks')
755 ui.status(_("(leaving bookmark %s)\n") % b)
765 ui.status(_("(leaving bookmark %s)\n") % b)
756 bookmarks.deactivate(repo)
766 bookmarks.deactivate(repo)
757 elif brev in repo._bookmarks:
767 elif brev in repo._bookmarks:
758 if brev != repo._activebookmark:
768 if brev != repo._activebookmark:
759 b = ui.label(brev, 'bookmarks.active')
769 b = ui.label(brev, 'bookmarks.active')
760 ui.status(_("(activating bookmark %s)\n") % b)
770 ui.status(_("(activating bookmark %s)\n") % b)
761 bookmarks.activate(repo, brev)
771 bookmarks.activate(repo, brev)
762 elif brev:
772 elif brev:
763 if repo._activebookmark:
773 if repo._activebookmark:
764 b = ui.label(repo._activebookmark, 'bookmarks')
774 b = ui.label(repo._activebookmark, 'bookmarks')
765 ui.status(_("(leaving bookmark %s)\n") % b)
775 ui.status(_("(leaving bookmark %s)\n") % b)
766 bookmarks.deactivate(repo)
776 bookmarks.deactivate(repo)
767
777
768 if warndest:
778 if warndest:
769 destutil.statusotherdests(ui, repo)
779 destutil.statusotherdests(ui, repo)
770
780
771 return ret
781 return ret
772
782
773 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None):
783 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None):
774 """Branch merge with node, resolving changes. Return true if any
784 """Branch merge with node, resolving changes. Return true if any
775 unresolved conflicts."""
785 unresolved conflicts."""
776 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
786 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
777 labels=labels)
787 labels=labels)
778 _showstats(repo, stats)
788 _showstats(repo, stats)
779 if stats[3]:
789 if stats[3]:
780 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
790 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
781 "or 'hg update -C .' to abandon\n"))
791 "or 'hg update -C .' to abandon\n"))
782 elif remind:
792 elif remind:
783 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
793 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
784 return stats[3] > 0
794 return stats[3] > 0
785
795
786 def _incoming(displaychlist, subreporecurse, ui, repo, source,
796 def _incoming(displaychlist, subreporecurse, ui, repo, source,
787 opts, buffered=False):
797 opts, buffered=False):
788 """
798 """
789 Helper for incoming / gincoming.
799 Helper for incoming / gincoming.
790 displaychlist gets called with
800 displaychlist gets called with
791 (remoterepo, incomingchangesetlist, displayer) parameters,
801 (remoterepo, incomingchangesetlist, displayer) parameters,
792 and is supposed to contain only code that can't be unified.
802 and is supposed to contain only code that can't be unified.
793 """
803 """
794 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
804 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
795 other = peer(repo, opts, source)
805 other = peer(repo, opts, source)
796 ui.status(_('comparing with %s\n') % util.hidepassword(source))
806 ui.status(_('comparing with %s\n') % util.hidepassword(source))
797 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
807 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
798
808
799 if revs:
809 if revs:
800 revs = [other.lookup(rev) for rev in revs]
810 revs = [other.lookup(rev) for rev in revs]
801 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
811 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
802 revs, opts["bundle"], opts["force"])
812 revs, opts["bundle"], opts["force"])
803 try:
813 try:
804 if not chlist:
814 if not chlist:
805 ui.status(_("no changes found\n"))
815 ui.status(_("no changes found\n"))
806 return subreporecurse()
816 return subreporecurse()
807 ui.pager('incoming')
817 ui.pager('incoming')
808 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
818 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
809 displaychlist(other, chlist, displayer)
819 displaychlist(other, chlist, displayer)
810 displayer.close()
820 displayer.close()
811 finally:
821 finally:
812 cleanupfn()
822 cleanupfn()
813 subreporecurse()
823 subreporecurse()
814 return 0 # exit code is zero since we found incoming changes
824 return 0 # exit code is zero since we found incoming changes
815
825
816 def incoming(ui, repo, source, opts):
826 def incoming(ui, repo, source, opts):
817 def subreporecurse():
827 def subreporecurse():
818 ret = 1
828 ret = 1
819 if opts.get('subrepos'):
829 if opts.get('subrepos'):
820 ctx = repo[None]
830 ctx = repo[None]
821 for subpath in sorted(ctx.substate):
831 for subpath in sorted(ctx.substate):
822 sub = ctx.sub(subpath)
832 sub = ctx.sub(subpath)
823 ret = min(ret, sub.incoming(ui, source, opts))
833 ret = min(ret, sub.incoming(ui, source, opts))
824 return ret
834 return ret
825
835
826 def display(other, chlist, displayer):
836 def display(other, chlist, displayer):
827 limit = cmdutil.loglimit(opts)
837 limit = cmdutil.loglimit(opts)
828 if opts.get('newest_first'):
838 if opts.get('newest_first'):
829 chlist.reverse()
839 chlist.reverse()
830 count = 0
840 count = 0
831 for n in chlist:
841 for n in chlist:
832 if limit is not None and count >= limit:
842 if limit is not None and count >= limit:
833 break
843 break
834 parents = [p for p in other.changelog.parents(n) if p != nullid]
844 parents = [p for p in other.changelog.parents(n) if p != nullid]
835 if opts.get('no_merges') and len(parents) == 2:
845 if opts.get('no_merges') and len(parents) == 2:
836 continue
846 continue
837 count += 1
847 count += 1
838 displayer.show(other[n])
848 displayer.show(other[n])
839 return _incoming(display, subreporecurse, ui, repo, source, opts)
849 return _incoming(display, subreporecurse, ui, repo, source, opts)
840
850
841 def _outgoing(ui, repo, dest, opts):
851 def _outgoing(ui, repo, dest, opts):
842 dest = ui.expandpath(dest or 'default-push', dest or 'default')
852 dest = ui.expandpath(dest or 'default-push', dest or 'default')
843 dest, branches = parseurl(dest, opts.get('branch'))
853 dest, branches = parseurl(dest, opts.get('branch'))
844 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
854 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
845 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
855 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
846 if revs:
856 if revs:
847 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
857 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
848
858
849 other = peer(repo, opts, dest)
859 other = peer(repo, opts, dest)
850 outgoing = discovery.findcommonoutgoing(repo.unfiltered(), other, revs,
860 outgoing = discovery.findcommonoutgoing(repo.unfiltered(), other, revs,
851 force=opts.get('force'))
861 force=opts.get('force'))
852 o = outgoing.missing
862 o = outgoing.missing
853 if not o:
863 if not o:
854 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
864 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
855 return o, other
865 return o, other
856
866
857 def outgoing(ui, repo, dest, opts):
867 def outgoing(ui, repo, dest, opts):
858 def recurse():
868 def recurse():
859 ret = 1
869 ret = 1
860 if opts.get('subrepos'):
870 if opts.get('subrepos'):
861 ctx = repo[None]
871 ctx = repo[None]
862 for subpath in sorted(ctx.substate):
872 for subpath in sorted(ctx.substate):
863 sub = ctx.sub(subpath)
873 sub = ctx.sub(subpath)
864 ret = min(ret, sub.outgoing(ui, dest, opts))
874 ret = min(ret, sub.outgoing(ui, dest, opts))
865 return ret
875 return ret
866
876
867 limit = cmdutil.loglimit(opts)
877 limit = cmdutil.loglimit(opts)
868 o, other = _outgoing(ui, repo, dest, opts)
878 o, other = _outgoing(ui, repo, dest, opts)
869 if not o:
879 if not o:
870 cmdutil.outgoinghooks(ui, repo, other, opts, o)
880 cmdutil.outgoinghooks(ui, repo, other, opts, o)
871 return recurse()
881 return recurse()
872
882
873 if opts.get('newest_first'):
883 if opts.get('newest_first'):
874 o.reverse()
884 o.reverse()
875 ui.pager('outgoing')
885 ui.pager('outgoing')
876 displayer = cmdutil.show_changeset(ui, repo, opts)
886 displayer = cmdutil.show_changeset(ui, repo, opts)
877 count = 0
887 count = 0
878 for n in o:
888 for n in o:
879 if limit is not None and count >= limit:
889 if limit is not None and count >= limit:
880 break
890 break
881 parents = [p for p in repo.changelog.parents(n) if p != nullid]
891 parents = [p for p in repo.changelog.parents(n) if p != nullid]
882 if opts.get('no_merges') and len(parents) == 2:
892 if opts.get('no_merges') and len(parents) == 2:
883 continue
893 continue
884 count += 1
894 count += 1
885 displayer.show(repo[n])
895 displayer.show(repo[n])
886 displayer.close()
896 displayer.close()
887 cmdutil.outgoinghooks(ui, repo, other, opts, o)
897 cmdutil.outgoinghooks(ui, repo, other, opts, o)
888 recurse()
898 recurse()
889 return 0 # exit code is zero since we found outgoing changes
899 return 0 # exit code is zero since we found outgoing changes
890
900
891 def verify(repo):
901 def verify(repo):
892 """verify the consistency of a repository"""
902 """verify the consistency of a repository"""
893 ret = verifymod.verify(repo)
903 ret = verifymod.verify(repo)
894
904
895 # Broken subrepo references in hidden csets don't seem worth worrying about,
905 # Broken subrepo references in hidden csets don't seem worth worrying about,
896 # since they can't be pushed/pulled, and --hidden can be used if they are a
906 # since they can't be pushed/pulled, and --hidden can be used if they are a
897 # concern.
907 # concern.
898
908
899 # pathto() is needed for -R case
909 # pathto() is needed for -R case
900 revs = repo.revs("filelog(%s)",
910 revs = repo.revs("filelog(%s)",
901 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
911 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
902
912
903 if revs:
913 if revs:
904 repo.ui.status(_('checking subrepo links\n'))
914 repo.ui.status(_('checking subrepo links\n'))
905 for rev in revs:
915 for rev in revs:
906 ctx = repo[rev]
916 ctx = repo[rev]
907 try:
917 try:
908 for subpath in ctx.substate:
918 for subpath in ctx.substate:
909 try:
919 try:
910 ret = (ctx.sub(subpath, allowcreate=False).verify()
920 ret = (ctx.sub(subpath, allowcreate=False).verify()
911 or ret)
921 or ret)
912 except error.RepoError as e:
922 except error.RepoError as e:
913 repo.ui.warn(('%s: %s\n') % (rev, e))
923 repo.ui.warn(('%s: %s\n') % (rev, e))
914 except Exception:
924 except Exception:
915 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
925 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
916 node.short(ctx.node()))
926 node.short(ctx.node()))
917
927
918 return ret
928 return ret
919
929
920 def remoteui(src, opts):
930 def remoteui(src, opts):
921 'build a remote ui from ui or repo and opts'
931 'build a remote ui from ui or repo and opts'
922 if util.safehasattr(src, 'baseui'): # looks like a repository
932 if util.safehasattr(src, 'baseui'): # looks like a repository
923 dst = src.baseui.copy() # drop repo-specific config
933 dst = src.baseui.copy() # drop repo-specific config
924 src = src.ui # copy target options from repo
934 src = src.ui # copy target options from repo
925 else: # assume it's a global ui object
935 else: # assume it's a global ui object
926 dst = src.copy() # keep all global options
936 dst = src.copy() # keep all global options
927
937
928 # copy ssh-specific options
938 # copy ssh-specific options
929 for o in 'ssh', 'remotecmd':
939 for o in 'ssh', 'remotecmd':
930 v = opts.get(o) or src.config('ui', o)
940 v = opts.get(o) or src.config('ui', o)
931 if v:
941 if v:
932 dst.setconfig("ui", o, v, 'copied')
942 dst.setconfig("ui", o, v, 'copied')
933
943
934 # copy bundle-specific options
944 # copy bundle-specific options
935 r = src.config('bundle', 'mainreporoot')
945 r = src.config('bundle', 'mainreporoot')
936 if r:
946 if r:
937 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
947 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
938
948
939 # copy selected local settings to the remote ui
949 # copy selected local settings to the remote ui
940 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
950 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
941 for key, val in src.configitems(sect):
951 for key, val in src.configitems(sect):
942 dst.setconfig(sect, key, val, 'copied')
952 dst.setconfig(sect, key, val, 'copied')
943 v = src.config('web', 'cacerts')
953 v = src.config('web', 'cacerts')
944 if v:
954 if v:
945 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
955 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
946
956
947 return dst
957 return dst
948
958
949 # Files of interest
959 # Files of interest
950 # Used to check if the repository has changed looking at mtime and size of
960 # Used to check if the repository has changed looking at mtime and size of
951 # these files.
961 # these files.
952 foi = [('spath', '00changelog.i'),
962 foi = [('spath', '00changelog.i'),
953 ('spath', 'phaseroots'), # ! phase can change content at the same size
963 ('spath', 'phaseroots'), # ! phase can change content at the same size
954 ('spath', 'obsstore'),
964 ('spath', 'obsstore'),
955 ('path', 'bookmarks'), # ! bookmark can change content at the same size
965 ('path', 'bookmarks'), # ! bookmark can change content at the same size
956 ]
966 ]
957
967
958 class cachedlocalrepo(object):
968 class cachedlocalrepo(object):
959 """Holds a localrepository that can be cached and reused."""
969 """Holds a localrepository that can be cached and reused."""
960
970
961 def __init__(self, repo):
971 def __init__(self, repo):
962 """Create a new cached repo from an existing repo.
972 """Create a new cached repo from an existing repo.
963
973
964 We assume the passed in repo was recently created. If the
974 We assume the passed in repo was recently created. If the
965 repo has changed between when it was created and when it was
975 repo has changed between when it was created and when it was
966 turned into a cache, it may not refresh properly.
976 turned into a cache, it may not refresh properly.
967 """
977 """
968 assert isinstance(repo, localrepo.localrepository)
978 assert isinstance(repo, localrepo.localrepository)
969 self._repo = repo
979 self._repo = repo
970 self._state, self.mtime = self._repostate()
980 self._state, self.mtime = self._repostate()
971 self._filtername = repo.filtername
981 self._filtername = repo.filtername
972
982
973 def fetch(self):
983 def fetch(self):
974 """Refresh (if necessary) and return a repository.
984 """Refresh (if necessary) and return a repository.
975
985
976 If the cached instance is out of date, it will be recreated
986 If the cached instance is out of date, it will be recreated
977 automatically and returned.
987 automatically and returned.
978
988
979 Returns a tuple of the repo and a boolean indicating whether a new
989 Returns a tuple of the repo and a boolean indicating whether a new
980 repo instance was created.
990 repo instance was created.
981 """
991 """
982 # We compare the mtimes and sizes of some well-known files to
992 # We compare the mtimes and sizes of some well-known files to
983 # determine if the repo changed. This is not precise, as mtimes
993 # determine if the repo changed. This is not precise, as mtimes
984 # are susceptible to clock skew and imprecise filesystems and
994 # are susceptible to clock skew and imprecise filesystems and
985 # file content can change while maintaining the same size.
995 # file content can change while maintaining the same size.
986
996
987 state, mtime = self._repostate()
997 state, mtime = self._repostate()
988 if state == self._state:
998 if state == self._state:
989 return self._repo, False
999 return self._repo, False
990
1000
991 repo = repository(self._repo.baseui, self._repo.url())
1001 repo = repository(self._repo.baseui, self._repo.url())
992 if self._filtername:
1002 if self._filtername:
993 self._repo = repo.filtered(self._filtername)
1003 self._repo = repo.filtered(self._filtername)
994 else:
1004 else:
995 self._repo = repo.unfiltered()
1005 self._repo = repo.unfiltered()
996 self._state = state
1006 self._state = state
997 self.mtime = mtime
1007 self.mtime = mtime
998
1008
999 return self._repo, True
1009 return self._repo, True
1000
1010
1001 def _repostate(self):
1011 def _repostate(self):
1002 state = []
1012 state = []
1003 maxmtime = -1
1013 maxmtime = -1
1004 for attr, fname in foi:
1014 for attr, fname in foi:
1005 prefix = getattr(self._repo, attr)
1015 prefix = getattr(self._repo, attr)
1006 p = os.path.join(prefix, fname)
1016 p = os.path.join(prefix, fname)
1007 try:
1017 try:
1008 st = os.stat(p)
1018 st = os.stat(p)
1009 except OSError:
1019 except OSError:
1010 st = os.stat(prefix)
1020 st = os.stat(prefix)
1011 state.append((st.st_mtime, st.st_size))
1021 state.append((st.st_mtime, st.st_size))
1012 maxmtime = max(maxmtime, st.st_mtime)
1022 maxmtime = max(maxmtime, st.st_mtime)
1013
1023
1014 return tuple(state), maxmtime
1024 return tuple(state), maxmtime
1015
1025
1016 def copy(self):
1026 def copy(self):
1017 """Obtain a copy of this class instance.
1027 """Obtain a copy of this class instance.
1018
1028
1019 A new localrepository instance is obtained. The new instance should be
1029 A new localrepository instance is obtained. The new instance should be
1020 completely independent of the original.
1030 completely independent of the original.
1021 """
1031 """
1022 repo = repository(self._repo.baseui, self._repo.origroot)
1032 repo = repository(self._repo.baseui, self._repo.origroot)
1023 if self._filtername:
1033 if self._filtername:
1024 repo = repo.filtered(self._filtername)
1034 repo = repo.filtered(self._filtername)
1025 else:
1035 else:
1026 repo = repo.unfiltered()
1036 repo = repo.unfiltered()
1027 c = cachedlocalrepo(repo)
1037 c = cachedlocalrepo(repo)
1028 c._state = self._state
1038 c._state = self._state
1029 c.mtime = self.mtime
1039 c.mtime = self.mtime
1030 return c
1040 return c
@@ -1,2049 +1,2052 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
12 import inspect
13 import os
13 import os
14 import random
14 import random
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 wdirrev,
23 wdirrev,
24 )
24 )
25 from . import (
25 from . import (
26 bookmarks,
26 bookmarks,
27 branchmap,
27 branchmap,
28 bundle2,
28 bundle2,
29 changegroup,
29 changegroup,
30 changelog,
30 changelog,
31 color,
31 color,
32 context,
32 context,
33 dirstate,
33 dirstate,
34 dirstateguard,
34 dirstateguard,
35 encoding,
35 encoding,
36 error,
36 error,
37 exchange,
37 exchange,
38 extensions,
38 extensions,
39 filelog,
39 filelog,
40 hook,
40 hook,
41 lock as lockmod,
41 lock as lockmod,
42 manifest,
42 manifest,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 obsolete,
47 obsolete,
48 pathutil,
48 pathutil,
49 peer,
49 peer,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 repoview,
52 repoview,
53 revset,
53 revset,
54 revsetlang,
54 revsetlang,
55 scmutil,
55 scmutil,
56 store,
56 store,
57 subrepo,
57 subrepo,
58 tags as tagsmod,
58 tags as tagsmod,
59 transaction,
59 transaction,
60 txnutil,
60 txnutil,
61 util,
61 util,
62 )
62 )
63
63
64 release = lockmod.release
64 release = lockmod.release
65 urlerr = util.urlerr
65 urlerr = util.urlerr
66 urlreq = util.urlreq
66 urlreq = util.urlreq
67
67
68 class repofilecache(scmutil.filecache):
68 class repofilecache(scmutil.filecache):
69 """All filecache usage on repo are done for logic that should be unfiltered
69 """All filecache usage on repo are done for logic that should be unfiltered
70 """
70 """
71
71
72 def __get__(self, repo, type=None):
72 def __get__(self, repo, type=None):
73 if repo is None:
73 if repo is None:
74 return self
74 return self
75 return super(repofilecache, self).__get__(repo.unfiltered(), type)
75 return super(repofilecache, self).__get__(repo.unfiltered(), type)
76 def __set__(self, repo, value):
76 def __set__(self, repo, value):
77 return super(repofilecache, self).__set__(repo.unfiltered(), value)
77 return super(repofilecache, self).__set__(repo.unfiltered(), value)
78 def __delete__(self, repo):
78 def __delete__(self, repo):
79 return super(repofilecache, self).__delete__(repo.unfiltered())
79 return super(repofilecache, self).__delete__(repo.unfiltered())
80
80
81 class storecache(repofilecache):
81 class storecache(repofilecache):
82 """filecache for files in the store"""
82 """filecache for files in the store"""
83 def join(self, obj, fname):
83 def join(self, obj, fname):
84 return obj.sjoin(fname)
84 return obj.sjoin(fname)
85
85
86 class unfilteredpropertycache(util.propertycache):
86 class unfilteredpropertycache(util.propertycache):
87 """propertycache that apply to unfiltered repo only"""
87 """propertycache that apply to unfiltered repo only"""
88
88
89 def __get__(self, repo, type=None):
89 def __get__(self, repo, type=None):
90 unfi = repo.unfiltered()
90 unfi = repo.unfiltered()
91 if unfi is repo:
91 if unfi is repo:
92 return super(unfilteredpropertycache, self).__get__(unfi)
92 return super(unfilteredpropertycache, self).__get__(unfi)
93 return getattr(unfi, self.name)
93 return getattr(unfi, self.name)
94
94
95 class filteredpropertycache(util.propertycache):
95 class filteredpropertycache(util.propertycache):
96 """propertycache that must take filtering in account"""
96 """propertycache that must take filtering in account"""
97
97
98 def cachevalue(self, obj, value):
98 def cachevalue(self, obj, value):
99 object.__setattr__(obj, self.name, value)
99 object.__setattr__(obj, self.name, value)
100
100
101
101
102 def hasunfilteredcache(repo, name):
102 def hasunfilteredcache(repo, name):
103 """check if a repo has an unfilteredpropertycache value for <name>"""
103 """check if a repo has an unfilteredpropertycache value for <name>"""
104 return name in vars(repo.unfiltered())
104 return name in vars(repo.unfiltered())
105
105
106 def unfilteredmethod(orig):
106 def unfilteredmethod(orig):
107 """decorate method that always need to be run on unfiltered version"""
107 """decorate method that always need to be run on unfiltered version"""
108 def wrapper(repo, *args, **kwargs):
108 def wrapper(repo, *args, **kwargs):
109 return orig(repo.unfiltered(), *args, **kwargs)
109 return orig(repo.unfiltered(), *args, **kwargs)
110 return wrapper
110 return wrapper
111
111
112 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
112 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
113 'unbundle'))
113 'unbundle'))
114 legacycaps = moderncaps.union(set(['changegroupsubset']))
114 legacycaps = moderncaps.union(set(['changegroupsubset']))
115
115
116 class localpeer(peer.peerrepository):
116 class localpeer(peer.peerrepository):
117 '''peer for a local repo; reflects only the most recent API'''
117 '''peer for a local repo; reflects only the most recent API'''
118
118
119 def __init__(self, repo, caps=moderncaps):
119 def __init__(self, repo, caps=moderncaps):
120 peer.peerrepository.__init__(self)
120 peer.peerrepository.__init__(self)
121 self._repo = repo.filtered('served')
121 self._repo = repo.filtered('served')
122 self.ui = repo.ui
122 self.ui = repo.ui
123 self._caps = repo._restrictcapabilities(caps)
123 self._caps = repo._restrictcapabilities(caps)
124 self.requirements = repo.requirements
124 self.requirements = repo.requirements
125 self.supportedformats = repo.supportedformats
125 self.supportedformats = repo.supportedformats
126
126
127 def close(self):
127 def close(self):
128 self._repo.close()
128 self._repo.close()
129
129
130 def _capabilities(self):
130 def _capabilities(self):
131 return self._caps
131 return self._caps
132
132
133 def local(self):
133 def local(self):
134 return self._repo
134 return self._repo
135
135
136 def canpush(self):
136 def canpush(self):
137 return True
137 return True
138
138
139 def url(self):
139 def url(self):
140 return self._repo.url()
140 return self._repo.url()
141
141
142 def lookup(self, key):
142 def lookup(self, key):
143 return self._repo.lookup(key)
143 return self._repo.lookup(key)
144
144
145 def branchmap(self):
145 def branchmap(self):
146 return self._repo.branchmap()
146 return self._repo.branchmap()
147
147
148 def heads(self):
148 def heads(self):
149 return self._repo.heads()
149 return self._repo.heads()
150
150
151 def known(self, nodes):
151 def known(self, nodes):
152 return self._repo.known(nodes)
152 return self._repo.known(nodes)
153
153
154 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
154 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
155 **kwargs):
155 **kwargs):
156 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
156 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
157 common=common, bundlecaps=bundlecaps,
157 common=common, bundlecaps=bundlecaps,
158 **kwargs)
158 **kwargs)
159 cb = util.chunkbuffer(chunks)
159 cb = util.chunkbuffer(chunks)
160
160
161 if bundlecaps is not None and 'HG20' in bundlecaps:
161 if bundlecaps is not None and 'HG20' in bundlecaps:
162 # When requesting a bundle2, getbundle returns a stream to make the
162 # When requesting a bundle2, getbundle returns a stream to make the
163 # wire level function happier. We need to build a proper object
163 # wire level function happier. We need to build a proper object
164 # from it in local peer.
164 # from it in local peer.
165 return bundle2.getunbundler(self.ui, cb)
165 return bundle2.getunbundler(self.ui, cb)
166 else:
166 else:
167 return changegroup.getunbundler('01', cb, None)
167 return changegroup.getunbundler('01', cb, None)
168
168
169 # TODO We might want to move the next two calls into legacypeer and add
169 # TODO We might want to move the next two calls into legacypeer and add
170 # unbundle instead.
170 # unbundle instead.
171
171
172 def unbundle(self, cg, heads, url):
172 def unbundle(self, cg, heads, url):
173 """apply a bundle on a repo
173 """apply a bundle on a repo
174
174
175 This function handles the repo locking itself."""
175 This function handles the repo locking itself."""
176 try:
176 try:
177 try:
177 try:
178 cg = exchange.readbundle(self.ui, cg, None)
178 cg = exchange.readbundle(self.ui, cg, None)
179 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
179 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
180 if util.safehasattr(ret, 'getchunks'):
180 if util.safehasattr(ret, 'getchunks'):
181 # This is a bundle20 object, turn it into an unbundler.
181 # This is a bundle20 object, turn it into an unbundler.
182 # This little dance should be dropped eventually when the
182 # This little dance should be dropped eventually when the
183 # API is finally improved.
183 # API is finally improved.
184 stream = util.chunkbuffer(ret.getchunks())
184 stream = util.chunkbuffer(ret.getchunks())
185 ret = bundle2.getunbundler(self.ui, stream)
185 ret = bundle2.getunbundler(self.ui, stream)
186 return ret
186 return ret
187 except Exception as exc:
187 except Exception as exc:
188 # If the exception contains output salvaged from a bundle2
188 # If the exception contains output salvaged from a bundle2
189 # reply, we need to make sure it is printed before continuing
189 # reply, we need to make sure it is printed before continuing
190 # to fail. So we build a bundle2 with such output and consume
190 # to fail. So we build a bundle2 with such output and consume
191 # it directly.
191 # it directly.
192 #
192 #
193 # This is not very elegant but allows a "simple" solution for
193 # This is not very elegant but allows a "simple" solution for
194 # issue4594
194 # issue4594
195 output = getattr(exc, '_bundle2salvagedoutput', ())
195 output = getattr(exc, '_bundle2salvagedoutput', ())
196 if output:
196 if output:
197 bundler = bundle2.bundle20(self._repo.ui)
197 bundler = bundle2.bundle20(self._repo.ui)
198 for out in output:
198 for out in output:
199 bundler.addpart(out)
199 bundler.addpart(out)
200 stream = util.chunkbuffer(bundler.getchunks())
200 stream = util.chunkbuffer(bundler.getchunks())
201 b = bundle2.getunbundler(self.ui, stream)
201 b = bundle2.getunbundler(self.ui, stream)
202 bundle2.processbundle(self._repo, b)
202 bundle2.processbundle(self._repo, b)
203 raise
203 raise
204 except error.PushRaced as exc:
204 except error.PushRaced as exc:
205 raise error.ResponseError(_('push failed:'), str(exc))
205 raise error.ResponseError(_('push failed:'), str(exc))
206
206
207 def lock(self):
207 def lock(self):
208 return self._repo.lock()
208 return self._repo.lock()
209
209
210 def addchangegroup(self, cg, source, url):
210 def addchangegroup(self, cg, source, url):
211 return cg.apply(self._repo, source, url)
211 return cg.apply(self._repo, source, url)
212
212
213 def pushkey(self, namespace, key, old, new):
213 def pushkey(self, namespace, key, old, new):
214 return self._repo.pushkey(namespace, key, old, new)
214 return self._repo.pushkey(namespace, key, old, new)
215
215
216 def listkeys(self, namespace):
216 def listkeys(self, namespace):
217 return self._repo.listkeys(namespace)
217 return self._repo.listkeys(namespace)
218
218
219 def debugwireargs(self, one, two, three=None, four=None, five=None):
219 def debugwireargs(self, one, two, three=None, four=None, five=None):
220 '''used to test argument passing over the wire'''
220 '''used to test argument passing over the wire'''
221 return "%s %s %s %s %s" % (one, two, three, four, five)
221 return "%s %s %s %s %s" % (one, two, three, four, five)
222
222
223 class locallegacypeer(localpeer):
223 class locallegacypeer(localpeer):
224 '''peer extension which implements legacy methods too; used for tests with
224 '''peer extension which implements legacy methods too; used for tests with
225 restricted capabilities'''
225 restricted capabilities'''
226
226
227 def __init__(self, repo):
227 def __init__(self, repo):
228 localpeer.__init__(self, repo, caps=legacycaps)
228 localpeer.__init__(self, repo, caps=legacycaps)
229
229
230 def branches(self, nodes):
230 def branches(self, nodes):
231 return self._repo.branches(nodes)
231 return self._repo.branches(nodes)
232
232
233 def between(self, pairs):
233 def between(self, pairs):
234 return self._repo.between(pairs)
234 return self._repo.between(pairs)
235
235
236 def changegroup(self, basenodes, source):
236 def changegroup(self, basenodes, source):
237 return changegroup.changegroup(self._repo, basenodes, source)
237 return changegroup.changegroup(self._repo, basenodes, source)
238
238
239 def changegroupsubset(self, bases, heads, source):
239 def changegroupsubset(self, bases, heads, source):
240 return changegroup.changegroupsubset(self._repo, bases, heads, source)
240 return changegroup.changegroupsubset(self._repo, bases, heads, source)
241
241
242 class localrepository(object):
242 class localrepository(object):
243
243
244 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
244 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
245 'manifestv2'))
245 'manifestv2'))
246 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
246 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
247 'dotencode'))
247 'relshared', 'dotencode'))
248 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
248 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
249 filtername = None
249 filtername = None
250
250
251 # a list of (ui, featureset) functions.
251 # a list of (ui, featureset) functions.
252 # only functions defined in module of enabled extensions are invoked
252 # only functions defined in module of enabled extensions are invoked
253 featuresetupfuncs = set()
253 featuresetupfuncs = set()
254
254
255 def __init__(self, baseui, path, create=False):
255 def __init__(self, baseui, path, create=False):
256 self.requirements = set()
256 self.requirements = set()
257 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
257 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
258 self.wopener = self.wvfs
258 self.wopener = self.wvfs
259 self.root = self.wvfs.base
259 self.root = self.wvfs.base
260 self.path = self.wvfs.join(".hg")
260 self.path = self.wvfs.join(".hg")
261 self.origroot = path
261 self.origroot = path
262 self.auditor = pathutil.pathauditor(self.root, self._checknested)
262 self.auditor = pathutil.pathauditor(self.root, self._checknested)
263 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
263 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
264 realfs=False)
264 realfs=False)
265 self.vfs = scmutil.vfs(self.path)
265 self.vfs = scmutil.vfs(self.path)
266 self.opener = self.vfs
266 self.opener = self.vfs
267 self.baseui = baseui
267 self.baseui = baseui
268 self.ui = baseui.copy()
268 self.ui = baseui.copy()
269 self.ui.copy = baseui.copy # prevent copying repo configuration
269 self.ui.copy = baseui.copy # prevent copying repo configuration
270 # A list of callback to shape the phase if no data were found.
270 # A list of callback to shape the phase if no data were found.
271 # Callback are in the form: func(repo, roots) --> processed root.
271 # Callback are in the form: func(repo, roots) --> processed root.
272 # This list it to be filled by extension during repo setup
272 # This list it to be filled by extension during repo setup
273 self._phasedefaults = []
273 self._phasedefaults = []
274 try:
274 try:
275 self.ui.readconfig(self.join("hgrc"), self.root)
275 self.ui.readconfig(self.join("hgrc"), self.root)
276 self._loadextensions()
276 self._loadextensions()
277 except IOError:
277 except IOError:
278 pass
278 pass
279
279
280 if self.featuresetupfuncs:
280 if self.featuresetupfuncs:
281 self.supported = set(self._basesupported) # use private copy
281 self.supported = set(self._basesupported) # use private copy
282 extmods = set(m.__name__ for n, m
282 extmods = set(m.__name__ for n, m
283 in extensions.extensions(self.ui))
283 in extensions.extensions(self.ui))
284 for setupfunc in self.featuresetupfuncs:
284 for setupfunc in self.featuresetupfuncs:
285 if setupfunc.__module__ in extmods:
285 if setupfunc.__module__ in extmods:
286 setupfunc(self.ui, self.supported)
286 setupfunc(self.ui, self.supported)
287 else:
287 else:
288 self.supported = self._basesupported
288 self.supported = self._basesupported
289 color.setup(self.ui)
289 color.setup(self.ui)
290
290
291 # Add compression engines.
291 # Add compression engines.
292 for name in util.compengines:
292 for name in util.compengines:
293 engine = util.compengines[name]
293 engine = util.compengines[name]
294 if engine.revlogheader():
294 if engine.revlogheader():
295 self.supported.add('exp-compression-%s' % name)
295 self.supported.add('exp-compression-%s' % name)
296
296
297 if not self.vfs.isdir():
297 if not self.vfs.isdir():
298 if create:
298 if create:
299 self.requirements = newreporequirements(self)
299 self.requirements = newreporequirements(self)
300
300
301 if not self.wvfs.exists():
301 if not self.wvfs.exists():
302 self.wvfs.makedirs()
302 self.wvfs.makedirs()
303 self.vfs.makedir(notindexed=True)
303 self.vfs.makedir(notindexed=True)
304
304
305 if 'store' in self.requirements:
305 if 'store' in self.requirements:
306 self.vfs.mkdir("store")
306 self.vfs.mkdir("store")
307
307
308 # create an invalid changelog
308 # create an invalid changelog
309 self.vfs.append(
309 self.vfs.append(
310 "00changelog.i",
310 "00changelog.i",
311 '\0\0\0\2' # represents revlogv2
311 '\0\0\0\2' # represents revlogv2
312 ' dummy changelog to prevent using the old repo layout'
312 ' dummy changelog to prevent using the old repo layout'
313 )
313 )
314 else:
314 else:
315 raise error.RepoError(_("repository %s not found") % path)
315 raise error.RepoError(_("repository %s not found") % path)
316 elif create:
316 elif create:
317 raise error.RepoError(_("repository %s already exists") % path)
317 raise error.RepoError(_("repository %s already exists") % path)
318 else:
318 else:
319 try:
319 try:
320 self.requirements = scmutil.readrequires(
320 self.requirements = scmutil.readrequires(
321 self.vfs, self.supported)
321 self.vfs, self.supported)
322 except IOError as inst:
322 except IOError as inst:
323 if inst.errno != errno.ENOENT:
323 if inst.errno != errno.ENOENT:
324 raise
324 raise
325
325
326 self.sharedpath = self.path
326 self.sharedpath = self.path
327 try:
327 try:
328 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
328 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
329 realpath=True)
329 if 'relshared' in self.requirements:
330 sharedpath = self.vfs.join(sharedpath)
331 vfs = scmutil.vfs(sharedpath, realpath=True)
332
330 s = vfs.base
333 s = vfs.base
331 if not vfs.exists():
334 if not vfs.exists():
332 raise error.RepoError(
335 raise error.RepoError(
333 _('.hg/sharedpath points to nonexistent directory %s') % s)
336 _('.hg/sharedpath points to nonexistent directory %s') % s)
334 self.sharedpath = s
337 self.sharedpath = s
335 except IOError as inst:
338 except IOError as inst:
336 if inst.errno != errno.ENOENT:
339 if inst.errno != errno.ENOENT:
337 raise
340 raise
338
341
339 self.store = store.store(
342 self.store = store.store(
340 self.requirements, self.sharedpath, scmutil.vfs)
343 self.requirements, self.sharedpath, scmutil.vfs)
341 self.spath = self.store.path
344 self.spath = self.store.path
342 self.svfs = self.store.vfs
345 self.svfs = self.store.vfs
343 self.sjoin = self.store.join
346 self.sjoin = self.store.join
344 self.vfs.createmode = self.store.createmode
347 self.vfs.createmode = self.store.createmode
345 self._applyopenerreqs()
348 self._applyopenerreqs()
346 if create:
349 if create:
347 self._writerequirements()
350 self._writerequirements()
348
351
349 self._dirstatevalidatewarned = False
352 self._dirstatevalidatewarned = False
350
353
351 self._branchcaches = {}
354 self._branchcaches = {}
352 self._revbranchcache = None
355 self._revbranchcache = None
353 self.filterpats = {}
356 self.filterpats = {}
354 self._datafilters = {}
357 self._datafilters = {}
355 self._transref = self._lockref = self._wlockref = None
358 self._transref = self._lockref = self._wlockref = None
356
359
357 # A cache for various files under .hg/ that tracks file changes,
360 # A cache for various files under .hg/ that tracks file changes,
358 # (used by the filecache decorator)
361 # (used by the filecache decorator)
359 #
362 #
360 # Maps a property name to its util.filecacheentry
363 # Maps a property name to its util.filecacheentry
361 self._filecache = {}
364 self._filecache = {}
362
365
363 # hold sets of revision to be filtered
366 # hold sets of revision to be filtered
364 # should be cleared when something might have changed the filter value:
367 # should be cleared when something might have changed the filter value:
365 # - new changesets,
368 # - new changesets,
366 # - phase change,
369 # - phase change,
367 # - new obsolescence marker,
370 # - new obsolescence marker,
368 # - working directory parent change,
371 # - working directory parent change,
369 # - bookmark changes
372 # - bookmark changes
370 self.filteredrevcache = {}
373 self.filteredrevcache = {}
371
374
372 # generic mapping between names and nodes
375 # generic mapping between names and nodes
373 self.names = namespaces.namespaces()
376 self.names = namespaces.namespaces()
374
377
375 def close(self):
378 def close(self):
376 self._writecaches()
379 self._writecaches()
377
380
378 def _loadextensions(self):
381 def _loadextensions(self):
379 extensions.loadall(self.ui)
382 extensions.loadall(self.ui)
380
383
381 def _writecaches(self):
384 def _writecaches(self):
382 if self._revbranchcache:
385 if self._revbranchcache:
383 self._revbranchcache.write()
386 self._revbranchcache.write()
384
387
385 def _restrictcapabilities(self, caps):
388 def _restrictcapabilities(self, caps):
386 if self.ui.configbool('experimental', 'bundle2-advertise', True):
389 if self.ui.configbool('experimental', 'bundle2-advertise', True):
387 caps = set(caps)
390 caps = set(caps)
388 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
391 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
389 caps.add('bundle2=' + urlreq.quote(capsblob))
392 caps.add('bundle2=' + urlreq.quote(capsblob))
390 return caps
393 return caps
391
394
392 def _applyopenerreqs(self):
395 def _applyopenerreqs(self):
393 self.svfs.options = dict((r, 1) for r in self.requirements
396 self.svfs.options = dict((r, 1) for r in self.requirements
394 if r in self.openerreqs)
397 if r in self.openerreqs)
395 # experimental config: format.chunkcachesize
398 # experimental config: format.chunkcachesize
396 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
399 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
397 if chunkcachesize is not None:
400 if chunkcachesize is not None:
398 self.svfs.options['chunkcachesize'] = chunkcachesize
401 self.svfs.options['chunkcachesize'] = chunkcachesize
399 # experimental config: format.maxchainlen
402 # experimental config: format.maxchainlen
400 maxchainlen = self.ui.configint('format', 'maxchainlen')
403 maxchainlen = self.ui.configint('format', 'maxchainlen')
401 if maxchainlen is not None:
404 if maxchainlen is not None:
402 self.svfs.options['maxchainlen'] = maxchainlen
405 self.svfs.options['maxchainlen'] = maxchainlen
403 # experimental config: format.manifestcachesize
406 # experimental config: format.manifestcachesize
404 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
407 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
405 if manifestcachesize is not None:
408 if manifestcachesize is not None:
406 self.svfs.options['manifestcachesize'] = manifestcachesize
409 self.svfs.options['manifestcachesize'] = manifestcachesize
407 # experimental config: format.aggressivemergedeltas
410 # experimental config: format.aggressivemergedeltas
408 aggressivemergedeltas = self.ui.configbool('format',
411 aggressivemergedeltas = self.ui.configbool('format',
409 'aggressivemergedeltas', False)
412 'aggressivemergedeltas', False)
410 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
413 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
411 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
414 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
412
415
413 for r in self.requirements:
416 for r in self.requirements:
414 if r.startswith('exp-compression-'):
417 if r.startswith('exp-compression-'):
415 self.svfs.options['compengine'] = r[len('exp-compression-'):]
418 self.svfs.options['compengine'] = r[len('exp-compression-'):]
416
419
417 def _writerequirements(self):
420 def _writerequirements(self):
418 scmutil.writerequires(self.vfs, self.requirements)
421 scmutil.writerequires(self.vfs, self.requirements)
419
422
420 def _checknested(self, path):
423 def _checknested(self, path):
421 """Determine if path is a legal nested repository."""
424 """Determine if path is a legal nested repository."""
422 if not path.startswith(self.root):
425 if not path.startswith(self.root):
423 return False
426 return False
424 subpath = path[len(self.root) + 1:]
427 subpath = path[len(self.root) + 1:]
425 normsubpath = util.pconvert(subpath)
428 normsubpath = util.pconvert(subpath)
426
429
427 # XXX: Checking against the current working copy is wrong in
430 # XXX: Checking against the current working copy is wrong in
428 # the sense that it can reject things like
431 # the sense that it can reject things like
429 #
432 #
430 # $ hg cat -r 10 sub/x.txt
433 # $ hg cat -r 10 sub/x.txt
431 #
434 #
432 # if sub/ is no longer a subrepository in the working copy
435 # if sub/ is no longer a subrepository in the working copy
433 # parent revision.
436 # parent revision.
434 #
437 #
435 # However, it can of course also allow things that would have
438 # However, it can of course also allow things that would have
436 # been rejected before, such as the above cat command if sub/
439 # been rejected before, such as the above cat command if sub/
437 # is a subrepository now, but was a normal directory before.
440 # is a subrepository now, but was a normal directory before.
438 # The old path auditor would have rejected by mistake since it
441 # The old path auditor would have rejected by mistake since it
439 # panics when it sees sub/.hg/.
442 # panics when it sees sub/.hg/.
440 #
443 #
441 # All in all, checking against the working copy seems sensible
444 # All in all, checking against the working copy seems sensible
442 # since we want to prevent access to nested repositories on
445 # since we want to prevent access to nested repositories on
443 # the filesystem *now*.
446 # the filesystem *now*.
444 ctx = self[None]
447 ctx = self[None]
445 parts = util.splitpath(subpath)
448 parts = util.splitpath(subpath)
446 while parts:
449 while parts:
447 prefix = '/'.join(parts)
450 prefix = '/'.join(parts)
448 if prefix in ctx.substate:
451 if prefix in ctx.substate:
449 if prefix == normsubpath:
452 if prefix == normsubpath:
450 return True
453 return True
451 else:
454 else:
452 sub = ctx.sub(prefix)
455 sub = ctx.sub(prefix)
453 return sub.checknested(subpath[len(prefix) + 1:])
456 return sub.checknested(subpath[len(prefix) + 1:])
454 else:
457 else:
455 parts.pop()
458 parts.pop()
456 return False
459 return False
457
460
458 def peer(self):
461 def peer(self):
459 return localpeer(self) # not cached to avoid reference cycle
462 return localpeer(self) # not cached to avoid reference cycle
460
463
461 def unfiltered(self):
464 def unfiltered(self):
462 """Return unfiltered version of the repository
465 """Return unfiltered version of the repository
463
466
464 Intended to be overwritten by filtered repo."""
467 Intended to be overwritten by filtered repo."""
465 return self
468 return self
466
469
467 def filtered(self, name):
470 def filtered(self, name):
468 """Return a filtered version of a repository"""
471 """Return a filtered version of a repository"""
469 # build a new class with the mixin and the current class
472 # build a new class with the mixin and the current class
470 # (possibly subclass of the repo)
473 # (possibly subclass of the repo)
471 class proxycls(repoview.repoview, self.unfiltered().__class__):
474 class proxycls(repoview.repoview, self.unfiltered().__class__):
472 pass
475 pass
473 return proxycls(self, name)
476 return proxycls(self, name)
474
477
475 @repofilecache('bookmarks', 'bookmarks.current')
478 @repofilecache('bookmarks', 'bookmarks.current')
476 def _bookmarks(self):
479 def _bookmarks(self):
477 return bookmarks.bmstore(self)
480 return bookmarks.bmstore(self)
478
481
479 @property
482 @property
480 def _activebookmark(self):
483 def _activebookmark(self):
481 return self._bookmarks.active
484 return self._bookmarks.active
482
485
483 def bookmarkheads(self, bookmark):
486 def bookmarkheads(self, bookmark):
484 name = bookmark.split('@', 1)[0]
487 name = bookmark.split('@', 1)[0]
485 heads = []
488 heads = []
486 for mark, n in self._bookmarks.iteritems():
489 for mark, n in self._bookmarks.iteritems():
487 if mark.split('@', 1)[0] == name:
490 if mark.split('@', 1)[0] == name:
488 heads.append(n)
491 heads.append(n)
489 return heads
492 return heads
490
493
491 # _phaserevs and _phasesets depend on changelog. what we need is to
494 # _phaserevs and _phasesets depend on changelog. what we need is to
492 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
495 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
493 # can't be easily expressed in filecache mechanism.
496 # can't be easily expressed in filecache mechanism.
494 @storecache('phaseroots', '00changelog.i')
497 @storecache('phaseroots', '00changelog.i')
495 def _phasecache(self):
498 def _phasecache(self):
496 return phases.phasecache(self, self._phasedefaults)
499 return phases.phasecache(self, self._phasedefaults)
497
500
498 @storecache('obsstore')
501 @storecache('obsstore')
499 def obsstore(self):
502 def obsstore(self):
500 # read default format for new obsstore.
503 # read default format for new obsstore.
501 # developer config: format.obsstore-version
504 # developer config: format.obsstore-version
502 defaultformat = self.ui.configint('format', 'obsstore-version', None)
505 defaultformat = self.ui.configint('format', 'obsstore-version', None)
503 # rely on obsstore class default when possible.
506 # rely on obsstore class default when possible.
504 kwargs = {}
507 kwargs = {}
505 if defaultformat is not None:
508 if defaultformat is not None:
506 kwargs['defaultformat'] = defaultformat
509 kwargs['defaultformat'] = defaultformat
507 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
510 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
508 store = obsolete.obsstore(self.svfs, readonly=readonly,
511 store = obsolete.obsstore(self.svfs, readonly=readonly,
509 **kwargs)
512 **kwargs)
510 if store and readonly:
513 if store and readonly:
511 self.ui.warn(
514 self.ui.warn(
512 _('obsolete feature not enabled but %i markers found!\n')
515 _('obsolete feature not enabled but %i markers found!\n')
513 % len(list(store)))
516 % len(list(store)))
514 return store
517 return store
515
518
516 @storecache('00changelog.i')
519 @storecache('00changelog.i')
517 def changelog(self):
520 def changelog(self):
518 c = changelog.changelog(self.svfs)
521 c = changelog.changelog(self.svfs)
519 if txnutil.mayhavepending(self.root):
522 if txnutil.mayhavepending(self.root):
520 c.readpending('00changelog.i.a')
523 c.readpending('00changelog.i.a')
521 return c
524 return c
522
525
523 def _constructmanifest(self):
526 def _constructmanifest(self):
524 # This is a temporary function while we migrate from manifest to
527 # This is a temporary function while we migrate from manifest to
525 # manifestlog. It allows bundlerepo and unionrepo to intercept the
528 # manifestlog. It allows bundlerepo and unionrepo to intercept the
526 # manifest creation.
529 # manifest creation.
527 return manifest.manifestrevlog(self.svfs)
530 return manifest.manifestrevlog(self.svfs)
528
531
529 @storecache('00manifest.i')
532 @storecache('00manifest.i')
530 def manifestlog(self):
533 def manifestlog(self):
531 return manifest.manifestlog(self.svfs, self)
534 return manifest.manifestlog(self.svfs, self)
532
535
533 @repofilecache('dirstate')
536 @repofilecache('dirstate')
534 def dirstate(self):
537 def dirstate(self):
535 return dirstate.dirstate(self.vfs, self.ui, self.root,
538 return dirstate.dirstate(self.vfs, self.ui, self.root,
536 self._dirstatevalidate)
539 self._dirstatevalidate)
537
540
538 def _dirstatevalidate(self, node):
541 def _dirstatevalidate(self, node):
539 try:
542 try:
540 self.changelog.rev(node)
543 self.changelog.rev(node)
541 return node
544 return node
542 except error.LookupError:
545 except error.LookupError:
543 if not self._dirstatevalidatewarned:
546 if not self._dirstatevalidatewarned:
544 self._dirstatevalidatewarned = True
547 self._dirstatevalidatewarned = True
545 self.ui.warn(_("warning: ignoring unknown"
548 self.ui.warn(_("warning: ignoring unknown"
546 " working parent %s!\n") % short(node))
549 " working parent %s!\n") % short(node))
547 return nullid
550 return nullid
548
551
549 def __getitem__(self, changeid):
552 def __getitem__(self, changeid):
550 if changeid is None or changeid == wdirrev:
553 if changeid is None or changeid == wdirrev:
551 return context.workingctx(self)
554 return context.workingctx(self)
552 if isinstance(changeid, slice):
555 if isinstance(changeid, slice):
553 return [context.changectx(self, i)
556 return [context.changectx(self, i)
554 for i in xrange(*changeid.indices(len(self)))
557 for i in xrange(*changeid.indices(len(self)))
555 if i not in self.changelog.filteredrevs]
558 if i not in self.changelog.filteredrevs]
556 return context.changectx(self, changeid)
559 return context.changectx(self, changeid)
557
560
558 def __contains__(self, changeid):
561 def __contains__(self, changeid):
559 try:
562 try:
560 self[changeid]
563 self[changeid]
561 return True
564 return True
562 except error.RepoLookupError:
565 except error.RepoLookupError:
563 return False
566 return False
564
567
565 def __nonzero__(self):
568 def __nonzero__(self):
566 return True
569 return True
567
570
568 def __len__(self):
571 def __len__(self):
569 return len(self.changelog)
572 return len(self.changelog)
570
573
571 def __iter__(self):
574 def __iter__(self):
572 return iter(self.changelog)
575 return iter(self.changelog)
573
576
574 def revs(self, expr, *args):
577 def revs(self, expr, *args):
575 '''Find revisions matching a revset.
578 '''Find revisions matching a revset.
576
579
577 The revset is specified as a string ``expr`` that may contain
580 The revset is specified as a string ``expr`` that may contain
578 %-formatting to escape certain types. See ``revsetlang.formatspec``.
581 %-formatting to escape certain types. See ``revsetlang.formatspec``.
579
582
580 Revset aliases from the configuration are not expanded. To expand
583 Revset aliases from the configuration are not expanded. To expand
581 user aliases, consider calling ``scmutil.revrange()`` or
584 user aliases, consider calling ``scmutil.revrange()`` or
582 ``repo.anyrevs([expr], user=True)``.
585 ``repo.anyrevs([expr], user=True)``.
583
586
584 Returns a revset.abstractsmartset, which is a list-like interface
587 Returns a revset.abstractsmartset, which is a list-like interface
585 that contains integer revisions.
588 that contains integer revisions.
586 '''
589 '''
587 expr = revsetlang.formatspec(expr, *args)
590 expr = revsetlang.formatspec(expr, *args)
588 m = revset.match(None, expr)
591 m = revset.match(None, expr)
589 return m(self)
592 return m(self)
590
593
591 def set(self, expr, *args):
594 def set(self, expr, *args):
592 '''Find revisions matching a revset and emit changectx instances.
595 '''Find revisions matching a revset and emit changectx instances.
593
596
594 This is a convenience wrapper around ``revs()`` that iterates the
597 This is a convenience wrapper around ``revs()`` that iterates the
595 result and is a generator of changectx instances.
598 result and is a generator of changectx instances.
596
599
597 Revset aliases from the configuration are not expanded. To expand
600 Revset aliases from the configuration are not expanded. To expand
598 user aliases, consider calling ``scmutil.revrange()``.
601 user aliases, consider calling ``scmutil.revrange()``.
599 '''
602 '''
600 for r in self.revs(expr, *args):
603 for r in self.revs(expr, *args):
601 yield self[r]
604 yield self[r]
602
605
603 def anyrevs(self, specs, user=False):
606 def anyrevs(self, specs, user=False):
604 '''Find revisions matching one of the given revsets.
607 '''Find revisions matching one of the given revsets.
605
608
606 Revset aliases from the configuration are not expanded by default. To
609 Revset aliases from the configuration are not expanded by default. To
607 expand user aliases, specify ``user=True``.
610 expand user aliases, specify ``user=True``.
608 '''
611 '''
609 if user:
612 if user:
610 m = revset.matchany(self.ui, specs, repo=self)
613 m = revset.matchany(self.ui, specs, repo=self)
611 else:
614 else:
612 m = revset.matchany(None, specs)
615 m = revset.matchany(None, specs)
613 return m(self)
616 return m(self)
614
617
615 def url(self):
618 def url(self):
616 return 'file:' + self.root
619 return 'file:' + self.root
617
620
618 def hook(self, name, throw=False, **args):
621 def hook(self, name, throw=False, **args):
619 """Call a hook, passing this repo instance.
622 """Call a hook, passing this repo instance.
620
623
621 This a convenience method to aid invoking hooks. Extensions likely
624 This a convenience method to aid invoking hooks. Extensions likely
622 won't call this unless they have registered a custom hook or are
625 won't call this unless they have registered a custom hook or are
623 replacing code that is expected to call a hook.
626 replacing code that is expected to call a hook.
624 """
627 """
625 return hook.hook(self.ui, self, name, throw, **args)
628 return hook.hook(self.ui, self, name, throw, **args)
626
629
627 @unfilteredmethod
630 @unfilteredmethod
628 def _tag(self, names, node, message, local, user, date, extra=None,
631 def _tag(self, names, node, message, local, user, date, extra=None,
629 editor=False):
632 editor=False):
630 if isinstance(names, str):
633 if isinstance(names, str):
631 names = (names,)
634 names = (names,)
632
635
633 branches = self.branchmap()
636 branches = self.branchmap()
634 for name in names:
637 for name in names:
635 self.hook('pretag', throw=True, node=hex(node), tag=name,
638 self.hook('pretag', throw=True, node=hex(node), tag=name,
636 local=local)
639 local=local)
637 if name in branches:
640 if name in branches:
638 self.ui.warn(_("warning: tag %s conflicts with existing"
641 self.ui.warn(_("warning: tag %s conflicts with existing"
639 " branch name\n") % name)
642 " branch name\n") % name)
640
643
641 def writetags(fp, names, munge, prevtags):
644 def writetags(fp, names, munge, prevtags):
642 fp.seek(0, 2)
645 fp.seek(0, 2)
643 if prevtags and prevtags[-1] != '\n':
646 if prevtags and prevtags[-1] != '\n':
644 fp.write('\n')
647 fp.write('\n')
645 for name in names:
648 for name in names:
646 if munge:
649 if munge:
647 m = munge(name)
650 m = munge(name)
648 else:
651 else:
649 m = name
652 m = name
650
653
651 if (self._tagscache.tagtypes and
654 if (self._tagscache.tagtypes and
652 name in self._tagscache.tagtypes):
655 name in self._tagscache.tagtypes):
653 old = self.tags().get(name, nullid)
656 old = self.tags().get(name, nullid)
654 fp.write('%s %s\n' % (hex(old), m))
657 fp.write('%s %s\n' % (hex(old), m))
655 fp.write('%s %s\n' % (hex(node), m))
658 fp.write('%s %s\n' % (hex(node), m))
656 fp.close()
659 fp.close()
657
660
658 prevtags = ''
661 prevtags = ''
659 if local:
662 if local:
660 try:
663 try:
661 fp = self.vfs('localtags', 'r+')
664 fp = self.vfs('localtags', 'r+')
662 except IOError:
665 except IOError:
663 fp = self.vfs('localtags', 'a')
666 fp = self.vfs('localtags', 'a')
664 else:
667 else:
665 prevtags = fp.read()
668 prevtags = fp.read()
666
669
667 # local tags are stored in the current charset
670 # local tags are stored in the current charset
668 writetags(fp, names, None, prevtags)
671 writetags(fp, names, None, prevtags)
669 for name in names:
672 for name in names:
670 self.hook('tag', node=hex(node), tag=name, local=local)
673 self.hook('tag', node=hex(node), tag=name, local=local)
671 return
674 return
672
675
673 try:
676 try:
674 fp = self.wfile('.hgtags', 'rb+')
677 fp = self.wfile('.hgtags', 'rb+')
675 except IOError as e:
678 except IOError as e:
676 if e.errno != errno.ENOENT:
679 if e.errno != errno.ENOENT:
677 raise
680 raise
678 fp = self.wfile('.hgtags', 'ab')
681 fp = self.wfile('.hgtags', 'ab')
679 else:
682 else:
680 prevtags = fp.read()
683 prevtags = fp.read()
681
684
682 # committed tags are stored in UTF-8
685 # committed tags are stored in UTF-8
683 writetags(fp, names, encoding.fromlocal, prevtags)
686 writetags(fp, names, encoding.fromlocal, prevtags)
684
687
685 fp.close()
688 fp.close()
686
689
687 self.invalidatecaches()
690 self.invalidatecaches()
688
691
689 if '.hgtags' not in self.dirstate:
692 if '.hgtags' not in self.dirstate:
690 self[None].add(['.hgtags'])
693 self[None].add(['.hgtags'])
691
694
692 m = matchmod.exact(self.root, '', ['.hgtags'])
695 m = matchmod.exact(self.root, '', ['.hgtags'])
693 tagnode = self.commit(message, user, date, extra=extra, match=m,
696 tagnode = self.commit(message, user, date, extra=extra, match=m,
694 editor=editor)
697 editor=editor)
695
698
696 for name in names:
699 for name in names:
697 self.hook('tag', node=hex(node), tag=name, local=local)
700 self.hook('tag', node=hex(node), tag=name, local=local)
698
701
699 return tagnode
702 return tagnode
700
703
701 def tag(self, names, node, message, local, user, date, editor=False):
704 def tag(self, names, node, message, local, user, date, editor=False):
702 '''tag a revision with one or more symbolic names.
705 '''tag a revision with one or more symbolic names.
703
706
704 names is a list of strings or, when adding a single tag, names may be a
707 names is a list of strings or, when adding a single tag, names may be a
705 string.
708 string.
706
709
707 if local is True, the tags are stored in a per-repository file.
710 if local is True, the tags are stored in a per-repository file.
708 otherwise, they are stored in the .hgtags file, and a new
711 otherwise, they are stored in the .hgtags file, and a new
709 changeset is committed with the change.
712 changeset is committed with the change.
710
713
711 keyword arguments:
714 keyword arguments:
712
715
713 local: whether to store tags in non-version-controlled file
716 local: whether to store tags in non-version-controlled file
714 (default False)
717 (default False)
715
718
716 message: commit message to use if committing
719 message: commit message to use if committing
717
720
718 user: name of user to use if committing
721 user: name of user to use if committing
719
722
720 date: date tuple to use if committing'''
723 date: date tuple to use if committing'''
721
724
722 if not local:
725 if not local:
723 m = matchmod.exact(self.root, '', ['.hgtags'])
726 m = matchmod.exact(self.root, '', ['.hgtags'])
724 if any(self.status(match=m, unknown=True, ignored=True)):
727 if any(self.status(match=m, unknown=True, ignored=True)):
725 raise error.Abort(_('working copy of .hgtags is changed'),
728 raise error.Abort(_('working copy of .hgtags is changed'),
726 hint=_('please commit .hgtags manually'))
729 hint=_('please commit .hgtags manually'))
727
730
728 self.tags() # instantiate the cache
731 self.tags() # instantiate the cache
729 self._tag(names, node, message, local, user, date, editor=editor)
732 self._tag(names, node, message, local, user, date, editor=editor)
730
733
731 @filteredpropertycache
734 @filteredpropertycache
732 def _tagscache(self):
735 def _tagscache(self):
733 '''Returns a tagscache object that contains various tags related
736 '''Returns a tagscache object that contains various tags related
734 caches.'''
737 caches.'''
735
738
736 # This simplifies its cache management by having one decorated
739 # This simplifies its cache management by having one decorated
737 # function (this one) and the rest simply fetch things from it.
740 # function (this one) and the rest simply fetch things from it.
738 class tagscache(object):
741 class tagscache(object):
739 def __init__(self):
742 def __init__(self):
740 # These two define the set of tags for this repository. tags
743 # These two define the set of tags for this repository. tags
741 # maps tag name to node; tagtypes maps tag name to 'global' or
744 # maps tag name to node; tagtypes maps tag name to 'global' or
742 # 'local'. (Global tags are defined by .hgtags across all
745 # 'local'. (Global tags are defined by .hgtags across all
743 # heads, and local tags are defined in .hg/localtags.)
746 # heads, and local tags are defined in .hg/localtags.)
744 # They constitute the in-memory cache of tags.
747 # They constitute the in-memory cache of tags.
745 self.tags = self.tagtypes = None
748 self.tags = self.tagtypes = None
746
749
747 self.nodetagscache = self.tagslist = None
750 self.nodetagscache = self.tagslist = None
748
751
749 cache = tagscache()
752 cache = tagscache()
750 cache.tags, cache.tagtypes = self._findtags()
753 cache.tags, cache.tagtypes = self._findtags()
751
754
752 return cache
755 return cache
753
756
754 def tags(self):
757 def tags(self):
755 '''return a mapping of tag to node'''
758 '''return a mapping of tag to node'''
756 t = {}
759 t = {}
757 if self.changelog.filteredrevs:
760 if self.changelog.filteredrevs:
758 tags, tt = self._findtags()
761 tags, tt = self._findtags()
759 else:
762 else:
760 tags = self._tagscache.tags
763 tags = self._tagscache.tags
761 for k, v in tags.iteritems():
764 for k, v in tags.iteritems():
762 try:
765 try:
763 # ignore tags to unknown nodes
766 # ignore tags to unknown nodes
764 self.changelog.rev(v)
767 self.changelog.rev(v)
765 t[k] = v
768 t[k] = v
766 except (error.LookupError, ValueError):
769 except (error.LookupError, ValueError):
767 pass
770 pass
768 return t
771 return t
769
772
770 def _findtags(self):
773 def _findtags(self):
771 '''Do the hard work of finding tags. Return a pair of dicts
774 '''Do the hard work of finding tags. Return a pair of dicts
772 (tags, tagtypes) where tags maps tag name to node, and tagtypes
775 (tags, tagtypes) where tags maps tag name to node, and tagtypes
773 maps tag name to a string like \'global\' or \'local\'.
776 maps tag name to a string like \'global\' or \'local\'.
774 Subclasses or extensions are free to add their own tags, but
777 Subclasses or extensions are free to add their own tags, but
775 should be aware that the returned dicts will be retained for the
778 should be aware that the returned dicts will be retained for the
776 duration of the localrepo object.'''
779 duration of the localrepo object.'''
777
780
778 # XXX what tagtype should subclasses/extensions use? Currently
781 # XXX what tagtype should subclasses/extensions use? Currently
779 # mq and bookmarks add tags, but do not set the tagtype at all.
782 # mq and bookmarks add tags, but do not set the tagtype at all.
780 # Should each extension invent its own tag type? Should there
783 # Should each extension invent its own tag type? Should there
781 # be one tagtype for all such "virtual" tags? Or is the status
784 # be one tagtype for all such "virtual" tags? Or is the status
782 # quo fine?
785 # quo fine?
783
786
784 alltags = {} # map tag name to (node, hist)
787 alltags = {} # map tag name to (node, hist)
785 tagtypes = {}
788 tagtypes = {}
786
789
787 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
790 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
788 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
791 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
789
792
790 # Build the return dicts. Have to re-encode tag names because
793 # Build the return dicts. Have to re-encode tag names because
791 # the tags module always uses UTF-8 (in order not to lose info
794 # the tags module always uses UTF-8 (in order not to lose info
792 # writing to the cache), but the rest of Mercurial wants them in
795 # writing to the cache), but the rest of Mercurial wants them in
793 # local encoding.
796 # local encoding.
794 tags = {}
797 tags = {}
795 for (name, (node, hist)) in alltags.iteritems():
798 for (name, (node, hist)) in alltags.iteritems():
796 if node != nullid:
799 if node != nullid:
797 tags[encoding.tolocal(name)] = node
800 tags[encoding.tolocal(name)] = node
798 tags['tip'] = self.changelog.tip()
801 tags['tip'] = self.changelog.tip()
799 tagtypes = dict([(encoding.tolocal(name), value)
802 tagtypes = dict([(encoding.tolocal(name), value)
800 for (name, value) in tagtypes.iteritems()])
803 for (name, value) in tagtypes.iteritems()])
801 return (tags, tagtypes)
804 return (tags, tagtypes)
802
805
803 def tagtype(self, tagname):
806 def tagtype(self, tagname):
804 '''
807 '''
805 return the type of the given tag. result can be:
808 return the type of the given tag. result can be:
806
809
807 'local' : a local tag
810 'local' : a local tag
808 'global' : a global tag
811 'global' : a global tag
809 None : tag does not exist
812 None : tag does not exist
810 '''
813 '''
811
814
812 return self._tagscache.tagtypes.get(tagname)
815 return self._tagscache.tagtypes.get(tagname)
813
816
814 def tagslist(self):
817 def tagslist(self):
815 '''return a list of tags ordered by revision'''
818 '''return a list of tags ordered by revision'''
816 if not self._tagscache.tagslist:
819 if not self._tagscache.tagslist:
817 l = []
820 l = []
818 for t, n in self.tags().iteritems():
821 for t, n in self.tags().iteritems():
819 l.append((self.changelog.rev(n), t, n))
822 l.append((self.changelog.rev(n), t, n))
820 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
823 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
821
824
822 return self._tagscache.tagslist
825 return self._tagscache.tagslist
823
826
824 def nodetags(self, node):
827 def nodetags(self, node):
825 '''return the tags associated with a node'''
828 '''return the tags associated with a node'''
826 if not self._tagscache.nodetagscache:
829 if not self._tagscache.nodetagscache:
827 nodetagscache = {}
830 nodetagscache = {}
828 for t, n in self._tagscache.tags.iteritems():
831 for t, n in self._tagscache.tags.iteritems():
829 nodetagscache.setdefault(n, []).append(t)
832 nodetagscache.setdefault(n, []).append(t)
830 for tags in nodetagscache.itervalues():
833 for tags in nodetagscache.itervalues():
831 tags.sort()
834 tags.sort()
832 self._tagscache.nodetagscache = nodetagscache
835 self._tagscache.nodetagscache = nodetagscache
833 return self._tagscache.nodetagscache.get(node, [])
836 return self._tagscache.nodetagscache.get(node, [])
834
837
835 def nodebookmarks(self, node):
838 def nodebookmarks(self, node):
836 """return the list of bookmarks pointing to the specified node"""
839 """return the list of bookmarks pointing to the specified node"""
837 marks = []
840 marks = []
838 for bookmark, n in self._bookmarks.iteritems():
841 for bookmark, n in self._bookmarks.iteritems():
839 if n == node:
842 if n == node:
840 marks.append(bookmark)
843 marks.append(bookmark)
841 return sorted(marks)
844 return sorted(marks)
842
845
843 def branchmap(self):
846 def branchmap(self):
844 '''returns a dictionary {branch: [branchheads]} with branchheads
847 '''returns a dictionary {branch: [branchheads]} with branchheads
845 ordered by increasing revision number'''
848 ordered by increasing revision number'''
846 branchmap.updatecache(self)
849 branchmap.updatecache(self)
847 return self._branchcaches[self.filtername]
850 return self._branchcaches[self.filtername]
848
851
849 @unfilteredmethod
852 @unfilteredmethod
850 def revbranchcache(self):
853 def revbranchcache(self):
851 if not self._revbranchcache:
854 if not self._revbranchcache:
852 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
855 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
853 return self._revbranchcache
856 return self._revbranchcache
854
857
855 def branchtip(self, branch, ignoremissing=False):
858 def branchtip(self, branch, ignoremissing=False):
856 '''return the tip node for a given branch
859 '''return the tip node for a given branch
857
860
858 If ignoremissing is True, then this method will not raise an error.
861 If ignoremissing is True, then this method will not raise an error.
859 This is helpful for callers that only expect None for a missing branch
862 This is helpful for callers that only expect None for a missing branch
860 (e.g. namespace).
863 (e.g. namespace).
861
864
862 '''
865 '''
863 try:
866 try:
864 return self.branchmap().branchtip(branch)
867 return self.branchmap().branchtip(branch)
865 except KeyError:
868 except KeyError:
866 if not ignoremissing:
869 if not ignoremissing:
867 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
870 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
868 else:
871 else:
869 pass
872 pass
870
873
871 def lookup(self, key):
874 def lookup(self, key):
872 return self[key].node()
875 return self[key].node()
873
876
874 def lookupbranch(self, key, remote=None):
877 def lookupbranch(self, key, remote=None):
875 repo = remote or self
878 repo = remote or self
876 if key in repo.branchmap():
879 if key in repo.branchmap():
877 return key
880 return key
878
881
879 repo = (remote and remote.local()) and remote or self
882 repo = (remote and remote.local()) and remote or self
880 return repo[key].branch()
883 return repo[key].branch()
881
884
882 def known(self, nodes):
885 def known(self, nodes):
883 cl = self.changelog
886 cl = self.changelog
884 nm = cl.nodemap
887 nm = cl.nodemap
885 filtered = cl.filteredrevs
888 filtered = cl.filteredrevs
886 result = []
889 result = []
887 for n in nodes:
890 for n in nodes:
888 r = nm.get(n)
891 r = nm.get(n)
889 resp = not (r is None or r in filtered)
892 resp = not (r is None or r in filtered)
890 result.append(resp)
893 result.append(resp)
891 return result
894 return result
892
895
893 def local(self):
896 def local(self):
894 return self
897 return self
895
898
896 def publishing(self):
899 def publishing(self):
897 # it's safe (and desirable) to trust the publish flag unconditionally
900 # it's safe (and desirable) to trust the publish flag unconditionally
898 # so that we don't finalize changes shared between users via ssh or nfs
901 # so that we don't finalize changes shared between users via ssh or nfs
899 return self.ui.configbool('phases', 'publish', True, untrusted=True)
902 return self.ui.configbool('phases', 'publish', True, untrusted=True)
900
903
901 def cancopy(self):
904 def cancopy(self):
902 # so statichttprepo's override of local() works
905 # so statichttprepo's override of local() works
903 if not self.local():
906 if not self.local():
904 return False
907 return False
905 if not self.publishing():
908 if not self.publishing():
906 return True
909 return True
907 # if publishing we can't copy if there is filtered content
910 # if publishing we can't copy if there is filtered content
908 return not self.filtered('visible').changelog.filteredrevs
911 return not self.filtered('visible').changelog.filteredrevs
909
912
910 def shared(self):
913 def shared(self):
911 '''the type of shared repository (None if not shared)'''
914 '''the type of shared repository (None if not shared)'''
912 if self.sharedpath != self.path:
915 if self.sharedpath != self.path:
913 return 'store'
916 return 'store'
914 return None
917 return None
915
918
916 def join(self, f, *insidef):
919 def join(self, f, *insidef):
917 return self.vfs.join(os.path.join(f, *insidef))
920 return self.vfs.join(os.path.join(f, *insidef))
918
921
919 def wjoin(self, f, *insidef):
922 def wjoin(self, f, *insidef):
920 return self.vfs.reljoin(self.root, f, *insidef)
923 return self.vfs.reljoin(self.root, f, *insidef)
921
924
922 def file(self, f):
925 def file(self, f):
923 if f[0] == '/':
926 if f[0] == '/':
924 f = f[1:]
927 f = f[1:]
925 return filelog.filelog(self.svfs, f)
928 return filelog.filelog(self.svfs, f)
926
929
927 def changectx(self, changeid):
930 def changectx(self, changeid):
928 return self[changeid]
931 return self[changeid]
929
932
930 def setparents(self, p1, p2=nullid):
933 def setparents(self, p1, p2=nullid):
931 self.dirstate.beginparentchange()
934 self.dirstate.beginparentchange()
932 copies = self.dirstate.setparents(p1, p2)
935 copies = self.dirstate.setparents(p1, p2)
933 pctx = self[p1]
936 pctx = self[p1]
934 if copies:
937 if copies:
935 # Adjust copy records, the dirstate cannot do it, it
938 # Adjust copy records, the dirstate cannot do it, it
936 # requires access to parents manifests. Preserve them
939 # requires access to parents manifests. Preserve them
937 # only for entries added to first parent.
940 # only for entries added to first parent.
938 for f in copies:
941 for f in copies:
939 if f not in pctx and copies[f] in pctx:
942 if f not in pctx and copies[f] in pctx:
940 self.dirstate.copy(copies[f], f)
943 self.dirstate.copy(copies[f], f)
941 if p2 == nullid:
944 if p2 == nullid:
942 for f, s in sorted(self.dirstate.copies().items()):
945 for f, s in sorted(self.dirstate.copies().items()):
943 if f not in pctx and s not in pctx:
946 if f not in pctx and s not in pctx:
944 self.dirstate.copy(None, f)
947 self.dirstate.copy(None, f)
945 self.dirstate.endparentchange()
948 self.dirstate.endparentchange()
946
949
947 def filectx(self, path, changeid=None, fileid=None):
950 def filectx(self, path, changeid=None, fileid=None):
948 """changeid can be a changeset revision, node, or tag.
951 """changeid can be a changeset revision, node, or tag.
949 fileid can be a file revision or node."""
952 fileid can be a file revision or node."""
950 return context.filectx(self, path, changeid, fileid)
953 return context.filectx(self, path, changeid, fileid)
951
954
952 def getcwd(self):
955 def getcwd(self):
953 return self.dirstate.getcwd()
956 return self.dirstate.getcwd()
954
957
955 def pathto(self, f, cwd=None):
958 def pathto(self, f, cwd=None):
956 return self.dirstate.pathto(f, cwd)
959 return self.dirstate.pathto(f, cwd)
957
960
958 def wfile(self, f, mode='r'):
961 def wfile(self, f, mode='r'):
959 return self.wvfs(f, mode)
962 return self.wvfs(f, mode)
960
963
961 def _link(self, f):
964 def _link(self, f):
962 return self.wvfs.islink(f)
965 return self.wvfs.islink(f)
963
966
964 def _loadfilter(self, filter):
967 def _loadfilter(self, filter):
965 if filter not in self.filterpats:
968 if filter not in self.filterpats:
966 l = []
969 l = []
967 for pat, cmd in self.ui.configitems(filter):
970 for pat, cmd in self.ui.configitems(filter):
968 if cmd == '!':
971 if cmd == '!':
969 continue
972 continue
970 mf = matchmod.match(self.root, '', [pat])
973 mf = matchmod.match(self.root, '', [pat])
971 fn = None
974 fn = None
972 params = cmd
975 params = cmd
973 for name, filterfn in self._datafilters.iteritems():
976 for name, filterfn in self._datafilters.iteritems():
974 if cmd.startswith(name):
977 if cmd.startswith(name):
975 fn = filterfn
978 fn = filterfn
976 params = cmd[len(name):].lstrip()
979 params = cmd[len(name):].lstrip()
977 break
980 break
978 if not fn:
981 if not fn:
979 fn = lambda s, c, **kwargs: util.filter(s, c)
982 fn = lambda s, c, **kwargs: util.filter(s, c)
980 # Wrap old filters not supporting keyword arguments
983 # Wrap old filters not supporting keyword arguments
981 if not inspect.getargspec(fn)[2]:
984 if not inspect.getargspec(fn)[2]:
982 oldfn = fn
985 oldfn = fn
983 fn = lambda s, c, **kwargs: oldfn(s, c)
986 fn = lambda s, c, **kwargs: oldfn(s, c)
984 l.append((mf, fn, params))
987 l.append((mf, fn, params))
985 self.filterpats[filter] = l
988 self.filterpats[filter] = l
986 return self.filterpats[filter]
989 return self.filterpats[filter]
987
990
988 def _filter(self, filterpats, filename, data):
991 def _filter(self, filterpats, filename, data):
989 for mf, fn, cmd in filterpats:
992 for mf, fn, cmd in filterpats:
990 if mf(filename):
993 if mf(filename):
991 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
994 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
992 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
995 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
993 break
996 break
994
997
995 return data
998 return data
996
999
997 @unfilteredpropertycache
1000 @unfilteredpropertycache
998 def _encodefilterpats(self):
1001 def _encodefilterpats(self):
999 return self._loadfilter('encode')
1002 return self._loadfilter('encode')
1000
1003
1001 @unfilteredpropertycache
1004 @unfilteredpropertycache
1002 def _decodefilterpats(self):
1005 def _decodefilterpats(self):
1003 return self._loadfilter('decode')
1006 return self._loadfilter('decode')
1004
1007
1005 def adddatafilter(self, name, filter):
1008 def adddatafilter(self, name, filter):
1006 self._datafilters[name] = filter
1009 self._datafilters[name] = filter
1007
1010
1008 def wread(self, filename):
1011 def wread(self, filename):
1009 if self._link(filename):
1012 if self._link(filename):
1010 data = self.wvfs.readlink(filename)
1013 data = self.wvfs.readlink(filename)
1011 else:
1014 else:
1012 data = self.wvfs.read(filename)
1015 data = self.wvfs.read(filename)
1013 return self._filter(self._encodefilterpats, filename, data)
1016 return self._filter(self._encodefilterpats, filename, data)
1014
1017
1015 def wwrite(self, filename, data, flags, backgroundclose=False):
1018 def wwrite(self, filename, data, flags, backgroundclose=False):
1016 """write ``data`` into ``filename`` in the working directory
1019 """write ``data`` into ``filename`` in the working directory
1017
1020
1018 This returns length of written (maybe decoded) data.
1021 This returns length of written (maybe decoded) data.
1019 """
1022 """
1020 data = self._filter(self._decodefilterpats, filename, data)
1023 data = self._filter(self._decodefilterpats, filename, data)
1021 if 'l' in flags:
1024 if 'l' in flags:
1022 self.wvfs.symlink(data, filename)
1025 self.wvfs.symlink(data, filename)
1023 else:
1026 else:
1024 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1027 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1025 if 'x' in flags:
1028 if 'x' in flags:
1026 self.wvfs.setflags(filename, False, True)
1029 self.wvfs.setflags(filename, False, True)
1027 return len(data)
1030 return len(data)
1028
1031
1029 def wwritedata(self, filename, data):
1032 def wwritedata(self, filename, data):
1030 return self._filter(self._decodefilterpats, filename, data)
1033 return self._filter(self._decodefilterpats, filename, data)
1031
1034
1032 def currenttransaction(self):
1035 def currenttransaction(self):
1033 """return the current transaction or None if non exists"""
1036 """return the current transaction or None if non exists"""
1034 if self._transref:
1037 if self._transref:
1035 tr = self._transref()
1038 tr = self._transref()
1036 else:
1039 else:
1037 tr = None
1040 tr = None
1038
1041
1039 if tr and tr.running():
1042 if tr and tr.running():
1040 return tr
1043 return tr
1041 return None
1044 return None
1042
1045
1043 def transaction(self, desc, report=None):
1046 def transaction(self, desc, report=None):
1044 if (self.ui.configbool('devel', 'all-warnings')
1047 if (self.ui.configbool('devel', 'all-warnings')
1045 or self.ui.configbool('devel', 'check-locks')):
1048 or self.ui.configbool('devel', 'check-locks')):
1046 if self._currentlock(self._lockref) is None:
1049 if self._currentlock(self._lockref) is None:
1047 raise error.ProgrammingError('transaction requires locking')
1050 raise error.ProgrammingError('transaction requires locking')
1048 tr = self.currenttransaction()
1051 tr = self.currenttransaction()
1049 if tr is not None:
1052 if tr is not None:
1050 return tr.nest()
1053 return tr.nest()
1051
1054
1052 # abort here if the journal already exists
1055 # abort here if the journal already exists
1053 if self.svfs.exists("journal"):
1056 if self.svfs.exists("journal"):
1054 raise error.RepoError(
1057 raise error.RepoError(
1055 _("abandoned transaction found"),
1058 _("abandoned transaction found"),
1056 hint=_("run 'hg recover' to clean up transaction"))
1059 hint=_("run 'hg recover' to clean up transaction"))
1057
1060
1058 idbase = "%.40f#%f" % (random.random(), time.time())
1061 idbase = "%.40f#%f" % (random.random(), time.time())
1059 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1062 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1060 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1063 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1061
1064
1062 self._writejournal(desc)
1065 self._writejournal(desc)
1063 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1066 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1064 if report:
1067 if report:
1065 rp = report
1068 rp = report
1066 else:
1069 else:
1067 rp = self.ui.warn
1070 rp = self.ui.warn
1068 vfsmap = {'plain': self.vfs} # root of .hg/
1071 vfsmap = {'plain': self.vfs} # root of .hg/
1069 # we must avoid cyclic reference between repo and transaction.
1072 # we must avoid cyclic reference between repo and transaction.
1070 reporef = weakref.ref(self)
1073 reporef = weakref.ref(self)
1071 def validate(tr):
1074 def validate(tr):
1072 """will run pre-closing hooks"""
1075 """will run pre-closing hooks"""
1073 reporef().hook('pretxnclose', throw=True,
1076 reporef().hook('pretxnclose', throw=True,
1074 txnname=desc, **tr.hookargs)
1077 txnname=desc, **tr.hookargs)
1075 def releasefn(tr, success):
1078 def releasefn(tr, success):
1076 repo = reporef()
1079 repo = reporef()
1077 if success:
1080 if success:
1078 # this should be explicitly invoked here, because
1081 # this should be explicitly invoked here, because
1079 # in-memory changes aren't written out at closing
1082 # in-memory changes aren't written out at closing
1080 # transaction, if tr.addfilegenerator (via
1083 # transaction, if tr.addfilegenerator (via
1081 # dirstate.write or so) isn't invoked while
1084 # dirstate.write or so) isn't invoked while
1082 # transaction running
1085 # transaction running
1083 repo.dirstate.write(None)
1086 repo.dirstate.write(None)
1084 else:
1087 else:
1085 # discard all changes (including ones already written
1088 # discard all changes (including ones already written
1086 # out) in this transaction
1089 # out) in this transaction
1087 repo.dirstate.restorebackup(None, prefix='journal.')
1090 repo.dirstate.restorebackup(None, prefix='journal.')
1088
1091
1089 repo.invalidate(clearfilecache=True)
1092 repo.invalidate(clearfilecache=True)
1090
1093
1091 tr = transaction.transaction(rp, self.svfs, vfsmap,
1094 tr = transaction.transaction(rp, self.svfs, vfsmap,
1092 "journal",
1095 "journal",
1093 "undo",
1096 "undo",
1094 aftertrans(renames),
1097 aftertrans(renames),
1095 self.store.createmode,
1098 self.store.createmode,
1096 validator=validate,
1099 validator=validate,
1097 releasefn=releasefn)
1100 releasefn=releasefn)
1098
1101
1099 tr.hookargs['txnid'] = txnid
1102 tr.hookargs['txnid'] = txnid
1100 # note: writing the fncache only during finalize mean that the file is
1103 # note: writing the fncache only during finalize mean that the file is
1101 # outdated when running hooks. As fncache is used for streaming clone,
1104 # outdated when running hooks. As fncache is used for streaming clone,
1102 # this is not expected to break anything that happen during the hooks.
1105 # this is not expected to break anything that happen during the hooks.
1103 tr.addfinalize('flush-fncache', self.store.write)
1106 tr.addfinalize('flush-fncache', self.store.write)
1104 def txnclosehook(tr2):
1107 def txnclosehook(tr2):
1105 """To be run if transaction is successful, will schedule a hook run
1108 """To be run if transaction is successful, will schedule a hook run
1106 """
1109 """
1107 # Don't reference tr2 in hook() so we don't hold a reference.
1110 # Don't reference tr2 in hook() so we don't hold a reference.
1108 # This reduces memory consumption when there are multiple
1111 # This reduces memory consumption when there are multiple
1109 # transactions per lock. This can likely go away if issue5045
1112 # transactions per lock. This can likely go away if issue5045
1110 # fixes the function accumulation.
1113 # fixes the function accumulation.
1111 hookargs = tr2.hookargs
1114 hookargs = tr2.hookargs
1112
1115
1113 def hook():
1116 def hook():
1114 reporef().hook('txnclose', throw=False, txnname=desc,
1117 reporef().hook('txnclose', throw=False, txnname=desc,
1115 **hookargs)
1118 **hookargs)
1116 reporef()._afterlock(hook)
1119 reporef()._afterlock(hook)
1117 tr.addfinalize('txnclose-hook', txnclosehook)
1120 tr.addfinalize('txnclose-hook', txnclosehook)
1118 def txnaborthook(tr2):
1121 def txnaborthook(tr2):
1119 """To be run if transaction is aborted
1122 """To be run if transaction is aborted
1120 """
1123 """
1121 reporef().hook('txnabort', throw=False, txnname=desc,
1124 reporef().hook('txnabort', throw=False, txnname=desc,
1122 **tr2.hookargs)
1125 **tr2.hookargs)
1123 tr.addabort('txnabort-hook', txnaborthook)
1126 tr.addabort('txnabort-hook', txnaborthook)
1124 # avoid eager cache invalidation. in-memory data should be identical
1127 # avoid eager cache invalidation. in-memory data should be identical
1125 # to stored data if transaction has no error.
1128 # to stored data if transaction has no error.
1126 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1129 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1127 self._transref = weakref.ref(tr)
1130 self._transref = weakref.ref(tr)
1128 return tr
1131 return tr
1129
1132
1130 def _journalfiles(self):
1133 def _journalfiles(self):
1131 return ((self.svfs, 'journal'),
1134 return ((self.svfs, 'journal'),
1132 (self.vfs, 'journal.dirstate'),
1135 (self.vfs, 'journal.dirstate'),
1133 (self.vfs, 'journal.branch'),
1136 (self.vfs, 'journal.branch'),
1134 (self.vfs, 'journal.desc'),
1137 (self.vfs, 'journal.desc'),
1135 (self.vfs, 'journal.bookmarks'),
1138 (self.vfs, 'journal.bookmarks'),
1136 (self.svfs, 'journal.phaseroots'))
1139 (self.svfs, 'journal.phaseroots'))
1137
1140
1138 def undofiles(self):
1141 def undofiles(self):
1139 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1142 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1140
1143
1141 def _writejournal(self, desc):
1144 def _writejournal(self, desc):
1142 self.dirstate.savebackup(None, prefix='journal.')
1145 self.dirstate.savebackup(None, prefix='journal.')
1143 self.vfs.write("journal.branch",
1146 self.vfs.write("journal.branch",
1144 encoding.fromlocal(self.dirstate.branch()))
1147 encoding.fromlocal(self.dirstate.branch()))
1145 self.vfs.write("journal.desc",
1148 self.vfs.write("journal.desc",
1146 "%d\n%s\n" % (len(self), desc))
1149 "%d\n%s\n" % (len(self), desc))
1147 self.vfs.write("journal.bookmarks",
1150 self.vfs.write("journal.bookmarks",
1148 self.vfs.tryread("bookmarks"))
1151 self.vfs.tryread("bookmarks"))
1149 self.svfs.write("journal.phaseroots",
1152 self.svfs.write("journal.phaseroots",
1150 self.svfs.tryread("phaseroots"))
1153 self.svfs.tryread("phaseroots"))
1151
1154
1152 def recover(self):
1155 def recover(self):
1153 with self.lock():
1156 with self.lock():
1154 if self.svfs.exists("journal"):
1157 if self.svfs.exists("journal"):
1155 self.ui.status(_("rolling back interrupted transaction\n"))
1158 self.ui.status(_("rolling back interrupted transaction\n"))
1156 vfsmap = {'': self.svfs,
1159 vfsmap = {'': self.svfs,
1157 'plain': self.vfs,}
1160 'plain': self.vfs,}
1158 transaction.rollback(self.svfs, vfsmap, "journal",
1161 transaction.rollback(self.svfs, vfsmap, "journal",
1159 self.ui.warn)
1162 self.ui.warn)
1160 self.invalidate()
1163 self.invalidate()
1161 return True
1164 return True
1162 else:
1165 else:
1163 self.ui.warn(_("no interrupted transaction available\n"))
1166 self.ui.warn(_("no interrupted transaction available\n"))
1164 return False
1167 return False
1165
1168
1166 def rollback(self, dryrun=False, force=False):
1169 def rollback(self, dryrun=False, force=False):
1167 wlock = lock = dsguard = None
1170 wlock = lock = dsguard = None
1168 try:
1171 try:
1169 wlock = self.wlock()
1172 wlock = self.wlock()
1170 lock = self.lock()
1173 lock = self.lock()
1171 if self.svfs.exists("undo"):
1174 if self.svfs.exists("undo"):
1172 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1175 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1173
1176
1174 return self._rollback(dryrun, force, dsguard)
1177 return self._rollback(dryrun, force, dsguard)
1175 else:
1178 else:
1176 self.ui.warn(_("no rollback information available\n"))
1179 self.ui.warn(_("no rollback information available\n"))
1177 return 1
1180 return 1
1178 finally:
1181 finally:
1179 release(dsguard, lock, wlock)
1182 release(dsguard, lock, wlock)
1180
1183
1181 @unfilteredmethod # Until we get smarter cache management
1184 @unfilteredmethod # Until we get smarter cache management
1182 def _rollback(self, dryrun, force, dsguard):
1185 def _rollback(self, dryrun, force, dsguard):
1183 ui = self.ui
1186 ui = self.ui
1184 try:
1187 try:
1185 args = self.vfs.read('undo.desc').splitlines()
1188 args = self.vfs.read('undo.desc').splitlines()
1186 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1189 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1187 if len(args) >= 3:
1190 if len(args) >= 3:
1188 detail = args[2]
1191 detail = args[2]
1189 oldtip = oldlen - 1
1192 oldtip = oldlen - 1
1190
1193
1191 if detail and ui.verbose:
1194 if detail and ui.verbose:
1192 msg = (_('repository tip rolled back to revision %s'
1195 msg = (_('repository tip rolled back to revision %s'
1193 ' (undo %s: %s)\n')
1196 ' (undo %s: %s)\n')
1194 % (oldtip, desc, detail))
1197 % (oldtip, desc, detail))
1195 else:
1198 else:
1196 msg = (_('repository tip rolled back to revision %s'
1199 msg = (_('repository tip rolled back to revision %s'
1197 ' (undo %s)\n')
1200 ' (undo %s)\n')
1198 % (oldtip, desc))
1201 % (oldtip, desc))
1199 except IOError:
1202 except IOError:
1200 msg = _('rolling back unknown transaction\n')
1203 msg = _('rolling back unknown transaction\n')
1201 desc = None
1204 desc = None
1202
1205
1203 if not force and self['.'] != self['tip'] and desc == 'commit':
1206 if not force and self['.'] != self['tip'] and desc == 'commit':
1204 raise error.Abort(
1207 raise error.Abort(
1205 _('rollback of last commit while not checked out '
1208 _('rollback of last commit while not checked out '
1206 'may lose data'), hint=_('use -f to force'))
1209 'may lose data'), hint=_('use -f to force'))
1207
1210
1208 ui.status(msg)
1211 ui.status(msg)
1209 if dryrun:
1212 if dryrun:
1210 return 0
1213 return 0
1211
1214
1212 parents = self.dirstate.parents()
1215 parents = self.dirstate.parents()
1213 self.destroying()
1216 self.destroying()
1214 vfsmap = {'plain': self.vfs, '': self.svfs}
1217 vfsmap = {'plain': self.vfs, '': self.svfs}
1215 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1218 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1216 if self.vfs.exists('undo.bookmarks'):
1219 if self.vfs.exists('undo.bookmarks'):
1217 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1220 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1218 if self.svfs.exists('undo.phaseroots'):
1221 if self.svfs.exists('undo.phaseroots'):
1219 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1222 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1220 self.invalidate()
1223 self.invalidate()
1221
1224
1222 parentgone = (parents[0] not in self.changelog.nodemap or
1225 parentgone = (parents[0] not in self.changelog.nodemap or
1223 parents[1] not in self.changelog.nodemap)
1226 parents[1] not in self.changelog.nodemap)
1224 if parentgone:
1227 if parentgone:
1225 # prevent dirstateguard from overwriting already restored one
1228 # prevent dirstateguard from overwriting already restored one
1226 dsguard.close()
1229 dsguard.close()
1227
1230
1228 self.dirstate.restorebackup(None, prefix='undo.')
1231 self.dirstate.restorebackup(None, prefix='undo.')
1229 try:
1232 try:
1230 branch = self.vfs.read('undo.branch')
1233 branch = self.vfs.read('undo.branch')
1231 self.dirstate.setbranch(encoding.tolocal(branch))
1234 self.dirstate.setbranch(encoding.tolocal(branch))
1232 except IOError:
1235 except IOError:
1233 ui.warn(_('named branch could not be reset: '
1236 ui.warn(_('named branch could not be reset: '
1234 'current branch is still \'%s\'\n')
1237 'current branch is still \'%s\'\n')
1235 % self.dirstate.branch())
1238 % self.dirstate.branch())
1236
1239
1237 parents = tuple([p.rev() for p in self[None].parents()])
1240 parents = tuple([p.rev() for p in self[None].parents()])
1238 if len(parents) > 1:
1241 if len(parents) > 1:
1239 ui.status(_('working directory now based on '
1242 ui.status(_('working directory now based on '
1240 'revisions %d and %d\n') % parents)
1243 'revisions %d and %d\n') % parents)
1241 else:
1244 else:
1242 ui.status(_('working directory now based on '
1245 ui.status(_('working directory now based on '
1243 'revision %d\n') % parents)
1246 'revision %d\n') % parents)
1244 mergemod.mergestate.clean(self, self['.'].node())
1247 mergemod.mergestate.clean(self, self['.'].node())
1245
1248
1246 # TODO: if we know which new heads may result from this rollback, pass
1249 # TODO: if we know which new heads may result from this rollback, pass
1247 # them to destroy(), which will prevent the branchhead cache from being
1250 # them to destroy(), which will prevent the branchhead cache from being
1248 # invalidated.
1251 # invalidated.
1249 self.destroyed()
1252 self.destroyed()
1250 return 0
1253 return 0
1251
1254
1252 def invalidatecaches(self):
1255 def invalidatecaches(self):
1253
1256
1254 if '_tagscache' in vars(self):
1257 if '_tagscache' in vars(self):
1255 # can't use delattr on proxy
1258 # can't use delattr on proxy
1256 del self.__dict__['_tagscache']
1259 del self.__dict__['_tagscache']
1257
1260
1258 self.unfiltered()._branchcaches.clear()
1261 self.unfiltered()._branchcaches.clear()
1259 self.invalidatevolatilesets()
1262 self.invalidatevolatilesets()
1260
1263
1261 def invalidatevolatilesets(self):
1264 def invalidatevolatilesets(self):
1262 self.filteredrevcache.clear()
1265 self.filteredrevcache.clear()
1263 obsolete.clearobscaches(self)
1266 obsolete.clearobscaches(self)
1264
1267
1265 def invalidatedirstate(self):
1268 def invalidatedirstate(self):
1266 '''Invalidates the dirstate, causing the next call to dirstate
1269 '''Invalidates the dirstate, causing the next call to dirstate
1267 to check if it was modified since the last time it was read,
1270 to check if it was modified since the last time it was read,
1268 rereading it if it has.
1271 rereading it if it has.
1269
1272
1270 This is different to dirstate.invalidate() that it doesn't always
1273 This is different to dirstate.invalidate() that it doesn't always
1271 rereads the dirstate. Use dirstate.invalidate() if you want to
1274 rereads the dirstate. Use dirstate.invalidate() if you want to
1272 explicitly read the dirstate again (i.e. restoring it to a previous
1275 explicitly read the dirstate again (i.e. restoring it to a previous
1273 known good state).'''
1276 known good state).'''
1274 if hasunfilteredcache(self, 'dirstate'):
1277 if hasunfilteredcache(self, 'dirstate'):
1275 for k in self.dirstate._filecache:
1278 for k in self.dirstate._filecache:
1276 try:
1279 try:
1277 delattr(self.dirstate, k)
1280 delattr(self.dirstate, k)
1278 except AttributeError:
1281 except AttributeError:
1279 pass
1282 pass
1280 delattr(self.unfiltered(), 'dirstate')
1283 delattr(self.unfiltered(), 'dirstate')
1281
1284
1282 def invalidate(self, clearfilecache=False):
1285 def invalidate(self, clearfilecache=False):
1283 '''Invalidates both store and non-store parts other than dirstate
1286 '''Invalidates both store and non-store parts other than dirstate
1284
1287
1285 If a transaction is running, invalidation of store is omitted,
1288 If a transaction is running, invalidation of store is omitted,
1286 because discarding in-memory changes might cause inconsistency
1289 because discarding in-memory changes might cause inconsistency
1287 (e.g. incomplete fncache causes unintentional failure, but
1290 (e.g. incomplete fncache causes unintentional failure, but
1288 redundant one doesn't).
1291 redundant one doesn't).
1289 '''
1292 '''
1290 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1293 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1291 for k in self._filecache.keys():
1294 for k in self._filecache.keys():
1292 # dirstate is invalidated separately in invalidatedirstate()
1295 # dirstate is invalidated separately in invalidatedirstate()
1293 if k == 'dirstate':
1296 if k == 'dirstate':
1294 continue
1297 continue
1295
1298
1296 if clearfilecache:
1299 if clearfilecache:
1297 del self._filecache[k]
1300 del self._filecache[k]
1298 try:
1301 try:
1299 delattr(unfiltered, k)
1302 delattr(unfiltered, k)
1300 except AttributeError:
1303 except AttributeError:
1301 pass
1304 pass
1302 self.invalidatecaches()
1305 self.invalidatecaches()
1303 if not self.currenttransaction():
1306 if not self.currenttransaction():
1304 # TODO: Changing contents of store outside transaction
1307 # TODO: Changing contents of store outside transaction
1305 # causes inconsistency. We should make in-memory store
1308 # causes inconsistency. We should make in-memory store
1306 # changes detectable, and abort if changed.
1309 # changes detectable, and abort if changed.
1307 self.store.invalidatecaches()
1310 self.store.invalidatecaches()
1308
1311
1309 def invalidateall(self):
1312 def invalidateall(self):
1310 '''Fully invalidates both store and non-store parts, causing the
1313 '''Fully invalidates both store and non-store parts, causing the
1311 subsequent operation to reread any outside changes.'''
1314 subsequent operation to reread any outside changes.'''
1312 # extension should hook this to invalidate its caches
1315 # extension should hook this to invalidate its caches
1313 self.invalidate()
1316 self.invalidate()
1314 self.invalidatedirstate()
1317 self.invalidatedirstate()
1315
1318
1316 @unfilteredmethod
1319 @unfilteredmethod
1317 def _refreshfilecachestats(self, tr):
1320 def _refreshfilecachestats(self, tr):
1318 """Reload stats of cached files so that they are flagged as valid"""
1321 """Reload stats of cached files so that they are flagged as valid"""
1319 for k, ce in self._filecache.items():
1322 for k, ce in self._filecache.items():
1320 if k == 'dirstate' or k not in self.__dict__:
1323 if k == 'dirstate' or k not in self.__dict__:
1321 continue
1324 continue
1322 ce.refresh()
1325 ce.refresh()
1323
1326
1324 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1327 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1325 inheritchecker=None, parentenvvar=None):
1328 inheritchecker=None, parentenvvar=None):
1326 parentlock = None
1329 parentlock = None
1327 # the contents of parentenvvar are used by the underlying lock to
1330 # the contents of parentenvvar are used by the underlying lock to
1328 # determine whether it can be inherited
1331 # determine whether it can be inherited
1329 if parentenvvar is not None:
1332 if parentenvvar is not None:
1330 parentlock = encoding.environ.get(parentenvvar)
1333 parentlock = encoding.environ.get(parentenvvar)
1331 try:
1334 try:
1332 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1335 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1333 acquirefn=acquirefn, desc=desc,
1336 acquirefn=acquirefn, desc=desc,
1334 inheritchecker=inheritchecker,
1337 inheritchecker=inheritchecker,
1335 parentlock=parentlock)
1338 parentlock=parentlock)
1336 except error.LockHeld as inst:
1339 except error.LockHeld as inst:
1337 if not wait:
1340 if not wait:
1338 raise
1341 raise
1339 # show more details for new-style locks
1342 # show more details for new-style locks
1340 if ':' in inst.locker:
1343 if ':' in inst.locker:
1341 host, pid = inst.locker.split(":", 1)
1344 host, pid = inst.locker.split(":", 1)
1342 self.ui.warn(
1345 self.ui.warn(
1343 _("waiting for lock on %s held by process %r "
1346 _("waiting for lock on %s held by process %r "
1344 "on host %r\n") % (desc, pid, host))
1347 "on host %r\n") % (desc, pid, host))
1345 else:
1348 else:
1346 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1349 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1347 (desc, inst.locker))
1350 (desc, inst.locker))
1348 # default to 600 seconds timeout
1351 # default to 600 seconds timeout
1349 l = lockmod.lock(vfs, lockname,
1352 l = lockmod.lock(vfs, lockname,
1350 int(self.ui.config("ui", "timeout", "600")),
1353 int(self.ui.config("ui", "timeout", "600")),
1351 releasefn=releasefn, acquirefn=acquirefn,
1354 releasefn=releasefn, acquirefn=acquirefn,
1352 desc=desc)
1355 desc=desc)
1353 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1356 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1354 return l
1357 return l
1355
1358
1356 def _afterlock(self, callback):
1359 def _afterlock(self, callback):
1357 """add a callback to be run when the repository is fully unlocked
1360 """add a callback to be run when the repository is fully unlocked
1358
1361
1359 The callback will be executed when the outermost lock is released
1362 The callback will be executed when the outermost lock is released
1360 (with wlock being higher level than 'lock')."""
1363 (with wlock being higher level than 'lock')."""
1361 for ref in (self._wlockref, self._lockref):
1364 for ref in (self._wlockref, self._lockref):
1362 l = ref and ref()
1365 l = ref and ref()
1363 if l and l.held:
1366 if l and l.held:
1364 l.postrelease.append(callback)
1367 l.postrelease.append(callback)
1365 break
1368 break
1366 else: # no lock have been found.
1369 else: # no lock have been found.
1367 callback()
1370 callback()
1368
1371
1369 def lock(self, wait=True):
1372 def lock(self, wait=True):
1370 '''Lock the repository store (.hg/store) and return a weak reference
1373 '''Lock the repository store (.hg/store) and return a weak reference
1371 to the lock. Use this before modifying the store (e.g. committing or
1374 to the lock. Use this before modifying the store (e.g. committing or
1372 stripping). If you are opening a transaction, get a lock as well.)
1375 stripping). If you are opening a transaction, get a lock as well.)
1373
1376
1374 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1377 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1375 'wlock' first to avoid a dead-lock hazard.'''
1378 'wlock' first to avoid a dead-lock hazard.'''
1376 l = self._currentlock(self._lockref)
1379 l = self._currentlock(self._lockref)
1377 if l is not None:
1380 if l is not None:
1378 l.lock()
1381 l.lock()
1379 return l
1382 return l
1380
1383
1381 l = self._lock(self.svfs, "lock", wait, None,
1384 l = self._lock(self.svfs, "lock", wait, None,
1382 self.invalidate, _('repository %s') % self.origroot)
1385 self.invalidate, _('repository %s') % self.origroot)
1383 self._lockref = weakref.ref(l)
1386 self._lockref = weakref.ref(l)
1384 return l
1387 return l
1385
1388
1386 def _wlockchecktransaction(self):
1389 def _wlockchecktransaction(self):
1387 if self.currenttransaction() is not None:
1390 if self.currenttransaction() is not None:
1388 raise error.LockInheritanceContractViolation(
1391 raise error.LockInheritanceContractViolation(
1389 'wlock cannot be inherited in the middle of a transaction')
1392 'wlock cannot be inherited in the middle of a transaction')
1390
1393
1391 def wlock(self, wait=True):
1394 def wlock(self, wait=True):
1392 '''Lock the non-store parts of the repository (everything under
1395 '''Lock the non-store parts of the repository (everything under
1393 .hg except .hg/store) and return a weak reference to the lock.
1396 .hg except .hg/store) and return a weak reference to the lock.
1394
1397
1395 Use this before modifying files in .hg.
1398 Use this before modifying files in .hg.
1396
1399
1397 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1400 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1398 'wlock' first to avoid a dead-lock hazard.'''
1401 'wlock' first to avoid a dead-lock hazard.'''
1399 l = self._wlockref and self._wlockref()
1402 l = self._wlockref and self._wlockref()
1400 if l is not None and l.held:
1403 if l is not None and l.held:
1401 l.lock()
1404 l.lock()
1402 return l
1405 return l
1403
1406
1404 # We do not need to check for non-waiting lock acquisition. Such
1407 # We do not need to check for non-waiting lock acquisition. Such
1405 # acquisition would not cause dead-lock as they would just fail.
1408 # acquisition would not cause dead-lock as they would just fail.
1406 if wait and (self.ui.configbool('devel', 'all-warnings')
1409 if wait and (self.ui.configbool('devel', 'all-warnings')
1407 or self.ui.configbool('devel', 'check-locks')):
1410 or self.ui.configbool('devel', 'check-locks')):
1408 if self._currentlock(self._lockref) is not None:
1411 if self._currentlock(self._lockref) is not None:
1409 self.ui.develwarn('"wlock" acquired after "lock"')
1412 self.ui.develwarn('"wlock" acquired after "lock"')
1410
1413
1411 def unlock():
1414 def unlock():
1412 if self.dirstate.pendingparentchange():
1415 if self.dirstate.pendingparentchange():
1413 self.dirstate.invalidate()
1416 self.dirstate.invalidate()
1414 else:
1417 else:
1415 self.dirstate.write(None)
1418 self.dirstate.write(None)
1416
1419
1417 self._filecache['dirstate'].refresh()
1420 self._filecache['dirstate'].refresh()
1418
1421
1419 l = self._lock(self.vfs, "wlock", wait, unlock,
1422 l = self._lock(self.vfs, "wlock", wait, unlock,
1420 self.invalidatedirstate, _('working directory of %s') %
1423 self.invalidatedirstate, _('working directory of %s') %
1421 self.origroot,
1424 self.origroot,
1422 inheritchecker=self._wlockchecktransaction,
1425 inheritchecker=self._wlockchecktransaction,
1423 parentenvvar='HG_WLOCK_LOCKER')
1426 parentenvvar='HG_WLOCK_LOCKER')
1424 self._wlockref = weakref.ref(l)
1427 self._wlockref = weakref.ref(l)
1425 return l
1428 return l
1426
1429
1427 def _currentlock(self, lockref):
1430 def _currentlock(self, lockref):
1428 """Returns the lock if it's held, or None if it's not."""
1431 """Returns the lock if it's held, or None if it's not."""
1429 if lockref is None:
1432 if lockref is None:
1430 return None
1433 return None
1431 l = lockref()
1434 l = lockref()
1432 if l is None or not l.held:
1435 if l is None or not l.held:
1433 return None
1436 return None
1434 return l
1437 return l
1435
1438
1436 def currentwlock(self):
1439 def currentwlock(self):
1437 """Returns the wlock if it's held, or None if it's not."""
1440 """Returns the wlock if it's held, or None if it's not."""
1438 return self._currentlock(self._wlockref)
1441 return self._currentlock(self._wlockref)
1439
1442
1440 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1443 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1441 """
1444 """
1442 commit an individual file as part of a larger transaction
1445 commit an individual file as part of a larger transaction
1443 """
1446 """
1444
1447
1445 fname = fctx.path()
1448 fname = fctx.path()
1446 fparent1 = manifest1.get(fname, nullid)
1449 fparent1 = manifest1.get(fname, nullid)
1447 fparent2 = manifest2.get(fname, nullid)
1450 fparent2 = manifest2.get(fname, nullid)
1448 if isinstance(fctx, context.filectx):
1451 if isinstance(fctx, context.filectx):
1449 node = fctx.filenode()
1452 node = fctx.filenode()
1450 if node in [fparent1, fparent2]:
1453 if node in [fparent1, fparent2]:
1451 self.ui.debug('reusing %s filelog entry\n' % fname)
1454 self.ui.debug('reusing %s filelog entry\n' % fname)
1452 if manifest1.flags(fname) != fctx.flags():
1455 if manifest1.flags(fname) != fctx.flags():
1453 changelist.append(fname)
1456 changelist.append(fname)
1454 return node
1457 return node
1455
1458
1456 flog = self.file(fname)
1459 flog = self.file(fname)
1457 meta = {}
1460 meta = {}
1458 copy = fctx.renamed()
1461 copy = fctx.renamed()
1459 if copy and copy[0] != fname:
1462 if copy and copy[0] != fname:
1460 # Mark the new revision of this file as a copy of another
1463 # Mark the new revision of this file as a copy of another
1461 # file. This copy data will effectively act as a parent
1464 # file. This copy data will effectively act as a parent
1462 # of this new revision. If this is a merge, the first
1465 # of this new revision. If this is a merge, the first
1463 # parent will be the nullid (meaning "look up the copy data")
1466 # parent will be the nullid (meaning "look up the copy data")
1464 # and the second one will be the other parent. For example:
1467 # and the second one will be the other parent. For example:
1465 #
1468 #
1466 # 0 --- 1 --- 3 rev1 changes file foo
1469 # 0 --- 1 --- 3 rev1 changes file foo
1467 # \ / rev2 renames foo to bar and changes it
1470 # \ / rev2 renames foo to bar and changes it
1468 # \- 2 -/ rev3 should have bar with all changes and
1471 # \- 2 -/ rev3 should have bar with all changes and
1469 # should record that bar descends from
1472 # should record that bar descends from
1470 # bar in rev2 and foo in rev1
1473 # bar in rev2 and foo in rev1
1471 #
1474 #
1472 # this allows this merge to succeed:
1475 # this allows this merge to succeed:
1473 #
1476 #
1474 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1477 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1475 # \ / merging rev3 and rev4 should use bar@rev2
1478 # \ / merging rev3 and rev4 should use bar@rev2
1476 # \- 2 --- 4 as the merge base
1479 # \- 2 --- 4 as the merge base
1477 #
1480 #
1478
1481
1479 cfname = copy[0]
1482 cfname = copy[0]
1480 crev = manifest1.get(cfname)
1483 crev = manifest1.get(cfname)
1481 newfparent = fparent2
1484 newfparent = fparent2
1482
1485
1483 if manifest2: # branch merge
1486 if manifest2: # branch merge
1484 if fparent2 == nullid or crev is None: # copied on remote side
1487 if fparent2 == nullid or crev is None: # copied on remote side
1485 if cfname in manifest2:
1488 if cfname in manifest2:
1486 crev = manifest2[cfname]
1489 crev = manifest2[cfname]
1487 newfparent = fparent1
1490 newfparent = fparent1
1488
1491
1489 # Here, we used to search backwards through history to try to find
1492 # Here, we used to search backwards through history to try to find
1490 # where the file copy came from if the source of a copy was not in
1493 # where the file copy came from if the source of a copy was not in
1491 # the parent directory. However, this doesn't actually make sense to
1494 # the parent directory. However, this doesn't actually make sense to
1492 # do (what does a copy from something not in your working copy even
1495 # do (what does a copy from something not in your working copy even
1493 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1496 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1494 # the user that copy information was dropped, so if they didn't
1497 # the user that copy information was dropped, so if they didn't
1495 # expect this outcome it can be fixed, but this is the correct
1498 # expect this outcome it can be fixed, but this is the correct
1496 # behavior in this circumstance.
1499 # behavior in this circumstance.
1497
1500
1498 if crev:
1501 if crev:
1499 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1502 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1500 meta["copy"] = cfname
1503 meta["copy"] = cfname
1501 meta["copyrev"] = hex(crev)
1504 meta["copyrev"] = hex(crev)
1502 fparent1, fparent2 = nullid, newfparent
1505 fparent1, fparent2 = nullid, newfparent
1503 else:
1506 else:
1504 self.ui.warn(_("warning: can't find ancestor for '%s' "
1507 self.ui.warn(_("warning: can't find ancestor for '%s' "
1505 "copied from '%s'!\n") % (fname, cfname))
1508 "copied from '%s'!\n") % (fname, cfname))
1506
1509
1507 elif fparent1 == nullid:
1510 elif fparent1 == nullid:
1508 fparent1, fparent2 = fparent2, nullid
1511 fparent1, fparent2 = fparent2, nullid
1509 elif fparent2 != nullid:
1512 elif fparent2 != nullid:
1510 # is one parent an ancestor of the other?
1513 # is one parent an ancestor of the other?
1511 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1514 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1512 if fparent1 in fparentancestors:
1515 if fparent1 in fparentancestors:
1513 fparent1, fparent2 = fparent2, nullid
1516 fparent1, fparent2 = fparent2, nullid
1514 elif fparent2 in fparentancestors:
1517 elif fparent2 in fparentancestors:
1515 fparent2 = nullid
1518 fparent2 = nullid
1516
1519
1517 # is the file changed?
1520 # is the file changed?
1518 text = fctx.data()
1521 text = fctx.data()
1519 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1522 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1520 changelist.append(fname)
1523 changelist.append(fname)
1521 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1524 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1522 # are just the flags changed during merge?
1525 # are just the flags changed during merge?
1523 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1526 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1524 changelist.append(fname)
1527 changelist.append(fname)
1525
1528
1526 return fparent1
1529 return fparent1
1527
1530
1528 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1531 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1529 """check for commit arguments that aren't committable"""
1532 """check for commit arguments that aren't committable"""
1530 if match.isexact() or match.prefix():
1533 if match.isexact() or match.prefix():
1531 matched = set(status.modified + status.added + status.removed)
1534 matched = set(status.modified + status.added + status.removed)
1532
1535
1533 for f in match.files():
1536 for f in match.files():
1534 f = self.dirstate.normalize(f)
1537 f = self.dirstate.normalize(f)
1535 if f == '.' or f in matched or f in wctx.substate:
1538 if f == '.' or f in matched or f in wctx.substate:
1536 continue
1539 continue
1537 if f in status.deleted:
1540 if f in status.deleted:
1538 fail(f, _('file not found!'))
1541 fail(f, _('file not found!'))
1539 if f in vdirs: # visited directory
1542 if f in vdirs: # visited directory
1540 d = f + '/'
1543 d = f + '/'
1541 for mf in matched:
1544 for mf in matched:
1542 if mf.startswith(d):
1545 if mf.startswith(d):
1543 break
1546 break
1544 else:
1547 else:
1545 fail(f, _("no match under directory!"))
1548 fail(f, _("no match under directory!"))
1546 elif f not in self.dirstate:
1549 elif f not in self.dirstate:
1547 fail(f, _("file not tracked!"))
1550 fail(f, _("file not tracked!"))
1548
1551
1549 @unfilteredmethod
1552 @unfilteredmethod
1550 def commit(self, text="", user=None, date=None, match=None, force=False,
1553 def commit(self, text="", user=None, date=None, match=None, force=False,
1551 editor=False, extra=None):
1554 editor=False, extra=None):
1552 """Add a new revision to current repository.
1555 """Add a new revision to current repository.
1553
1556
1554 Revision information is gathered from the working directory,
1557 Revision information is gathered from the working directory,
1555 match can be used to filter the committed files. If editor is
1558 match can be used to filter the committed files. If editor is
1556 supplied, it is called to get a commit message.
1559 supplied, it is called to get a commit message.
1557 """
1560 """
1558 if extra is None:
1561 if extra is None:
1559 extra = {}
1562 extra = {}
1560
1563
1561 def fail(f, msg):
1564 def fail(f, msg):
1562 raise error.Abort('%s: %s' % (f, msg))
1565 raise error.Abort('%s: %s' % (f, msg))
1563
1566
1564 if not match:
1567 if not match:
1565 match = matchmod.always(self.root, '')
1568 match = matchmod.always(self.root, '')
1566
1569
1567 if not force:
1570 if not force:
1568 vdirs = []
1571 vdirs = []
1569 match.explicitdir = vdirs.append
1572 match.explicitdir = vdirs.append
1570 match.bad = fail
1573 match.bad = fail
1571
1574
1572 wlock = lock = tr = None
1575 wlock = lock = tr = None
1573 try:
1576 try:
1574 wlock = self.wlock()
1577 wlock = self.wlock()
1575 lock = self.lock() # for recent changelog (see issue4368)
1578 lock = self.lock() # for recent changelog (see issue4368)
1576
1579
1577 wctx = self[None]
1580 wctx = self[None]
1578 merge = len(wctx.parents()) > 1
1581 merge = len(wctx.parents()) > 1
1579
1582
1580 if not force and merge and match.ispartial():
1583 if not force and merge and match.ispartial():
1581 raise error.Abort(_('cannot partially commit a merge '
1584 raise error.Abort(_('cannot partially commit a merge '
1582 '(do not specify files or patterns)'))
1585 '(do not specify files or patterns)'))
1583
1586
1584 status = self.status(match=match, clean=force)
1587 status = self.status(match=match, clean=force)
1585 if force:
1588 if force:
1586 status.modified.extend(status.clean) # mq may commit clean files
1589 status.modified.extend(status.clean) # mq may commit clean files
1587
1590
1588 # check subrepos
1591 # check subrepos
1589 subs = []
1592 subs = []
1590 commitsubs = set()
1593 commitsubs = set()
1591 newstate = wctx.substate.copy()
1594 newstate = wctx.substate.copy()
1592 # only manage subrepos and .hgsubstate if .hgsub is present
1595 # only manage subrepos and .hgsubstate if .hgsub is present
1593 if '.hgsub' in wctx:
1596 if '.hgsub' in wctx:
1594 # we'll decide whether to track this ourselves, thanks
1597 # we'll decide whether to track this ourselves, thanks
1595 for c in status.modified, status.added, status.removed:
1598 for c in status.modified, status.added, status.removed:
1596 if '.hgsubstate' in c:
1599 if '.hgsubstate' in c:
1597 c.remove('.hgsubstate')
1600 c.remove('.hgsubstate')
1598
1601
1599 # compare current state to last committed state
1602 # compare current state to last committed state
1600 # build new substate based on last committed state
1603 # build new substate based on last committed state
1601 oldstate = wctx.p1().substate
1604 oldstate = wctx.p1().substate
1602 for s in sorted(newstate.keys()):
1605 for s in sorted(newstate.keys()):
1603 if not match(s):
1606 if not match(s):
1604 # ignore working copy, use old state if present
1607 # ignore working copy, use old state if present
1605 if s in oldstate:
1608 if s in oldstate:
1606 newstate[s] = oldstate[s]
1609 newstate[s] = oldstate[s]
1607 continue
1610 continue
1608 if not force:
1611 if not force:
1609 raise error.Abort(
1612 raise error.Abort(
1610 _("commit with new subrepo %s excluded") % s)
1613 _("commit with new subrepo %s excluded") % s)
1611 dirtyreason = wctx.sub(s).dirtyreason(True)
1614 dirtyreason = wctx.sub(s).dirtyreason(True)
1612 if dirtyreason:
1615 if dirtyreason:
1613 if not self.ui.configbool('ui', 'commitsubrepos'):
1616 if not self.ui.configbool('ui', 'commitsubrepos'):
1614 raise error.Abort(dirtyreason,
1617 raise error.Abort(dirtyreason,
1615 hint=_("use --subrepos for recursive commit"))
1618 hint=_("use --subrepos for recursive commit"))
1616 subs.append(s)
1619 subs.append(s)
1617 commitsubs.add(s)
1620 commitsubs.add(s)
1618 else:
1621 else:
1619 bs = wctx.sub(s).basestate()
1622 bs = wctx.sub(s).basestate()
1620 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1623 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1621 if oldstate.get(s, (None, None, None))[1] != bs:
1624 if oldstate.get(s, (None, None, None))[1] != bs:
1622 subs.append(s)
1625 subs.append(s)
1623
1626
1624 # check for removed subrepos
1627 # check for removed subrepos
1625 for p in wctx.parents():
1628 for p in wctx.parents():
1626 r = [s for s in p.substate if s not in newstate]
1629 r = [s for s in p.substate if s not in newstate]
1627 subs += [s for s in r if match(s)]
1630 subs += [s for s in r if match(s)]
1628 if subs:
1631 if subs:
1629 if (not match('.hgsub') and
1632 if (not match('.hgsub') and
1630 '.hgsub' in (wctx.modified() + wctx.added())):
1633 '.hgsub' in (wctx.modified() + wctx.added())):
1631 raise error.Abort(
1634 raise error.Abort(
1632 _("can't commit subrepos without .hgsub"))
1635 _("can't commit subrepos without .hgsub"))
1633 status.modified.insert(0, '.hgsubstate')
1636 status.modified.insert(0, '.hgsubstate')
1634
1637
1635 elif '.hgsub' in status.removed:
1638 elif '.hgsub' in status.removed:
1636 # clean up .hgsubstate when .hgsub is removed
1639 # clean up .hgsubstate when .hgsub is removed
1637 if ('.hgsubstate' in wctx and
1640 if ('.hgsubstate' in wctx and
1638 '.hgsubstate' not in (status.modified + status.added +
1641 '.hgsubstate' not in (status.modified + status.added +
1639 status.removed)):
1642 status.removed)):
1640 status.removed.insert(0, '.hgsubstate')
1643 status.removed.insert(0, '.hgsubstate')
1641
1644
1642 # make sure all explicit patterns are matched
1645 # make sure all explicit patterns are matched
1643 if not force:
1646 if not force:
1644 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1647 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1645
1648
1646 cctx = context.workingcommitctx(self, status,
1649 cctx = context.workingcommitctx(self, status,
1647 text, user, date, extra)
1650 text, user, date, extra)
1648
1651
1649 # internal config: ui.allowemptycommit
1652 # internal config: ui.allowemptycommit
1650 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1653 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1651 or extra.get('close') or merge or cctx.files()
1654 or extra.get('close') or merge or cctx.files()
1652 or self.ui.configbool('ui', 'allowemptycommit'))
1655 or self.ui.configbool('ui', 'allowemptycommit'))
1653 if not allowemptycommit:
1656 if not allowemptycommit:
1654 return None
1657 return None
1655
1658
1656 if merge and cctx.deleted():
1659 if merge and cctx.deleted():
1657 raise error.Abort(_("cannot commit merge with missing files"))
1660 raise error.Abort(_("cannot commit merge with missing files"))
1658
1661
1659 ms = mergemod.mergestate.read(self)
1662 ms = mergemod.mergestate.read(self)
1660 mergeutil.checkunresolved(ms)
1663 mergeutil.checkunresolved(ms)
1661
1664
1662 if editor:
1665 if editor:
1663 cctx._text = editor(self, cctx, subs)
1666 cctx._text = editor(self, cctx, subs)
1664 edited = (text != cctx._text)
1667 edited = (text != cctx._text)
1665
1668
1666 # Save commit message in case this transaction gets rolled back
1669 # Save commit message in case this transaction gets rolled back
1667 # (e.g. by a pretxncommit hook). Leave the content alone on
1670 # (e.g. by a pretxncommit hook). Leave the content alone on
1668 # the assumption that the user will use the same editor again.
1671 # the assumption that the user will use the same editor again.
1669 msgfn = self.savecommitmessage(cctx._text)
1672 msgfn = self.savecommitmessage(cctx._text)
1670
1673
1671 # commit subs and write new state
1674 # commit subs and write new state
1672 if subs:
1675 if subs:
1673 for s in sorted(commitsubs):
1676 for s in sorted(commitsubs):
1674 sub = wctx.sub(s)
1677 sub = wctx.sub(s)
1675 self.ui.status(_('committing subrepository %s\n') %
1678 self.ui.status(_('committing subrepository %s\n') %
1676 subrepo.subrelpath(sub))
1679 subrepo.subrelpath(sub))
1677 sr = sub.commit(cctx._text, user, date)
1680 sr = sub.commit(cctx._text, user, date)
1678 newstate[s] = (newstate[s][0], sr)
1681 newstate[s] = (newstate[s][0], sr)
1679 subrepo.writestate(self, newstate)
1682 subrepo.writestate(self, newstate)
1680
1683
1681 p1, p2 = self.dirstate.parents()
1684 p1, p2 = self.dirstate.parents()
1682 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1685 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1683 try:
1686 try:
1684 self.hook("precommit", throw=True, parent1=hookp1,
1687 self.hook("precommit", throw=True, parent1=hookp1,
1685 parent2=hookp2)
1688 parent2=hookp2)
1686 tr = self.transaction('commit')
1689 tr = self.transaction('commit')
1687 ret = self.commitctx(cctx, True)
1690 ret = self.commitctx(cctx, True)
1688 except: # re-raises
1691 except: # re-raises
1689 if edited:
1692 if edited:
1690 self.ui.write(
1693 self.ui.write(
1691 _('note: commit message saved in %s\n') % msgfn)
1694 _('note: commit message saved in %s\n') % msgfn)
1692 raise
1695 raise
1693 # update bookmarks, dirstate and mergestate
1696 # update bookmarks, dirstate and mergestate
1694 bookmarks.update(self, [p1, p2], ret)
1697 bookmarks.update(self, [p1, p2], ret)
1695 cctx.markcommitted(ret)
1698 cctx.markcommitted(ret)
1696 ms.reset()
1699 ms.reset()
1697 tr.close()
1700 tr.close()
1698
1701
1699 finally:
1702 finally:
1700 lockmod.release(tr, lock, wlock)
1703 lockmod.release(tr, lock, wlock)
1701
1704
1702 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1705 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1703 # hack for command that use a temporary commit (eg: histedit)
1706 # hack for command that use a temporary commit (eg: histedit)
1704 # temporary commit got stripped before hook release
1707 # temporary commit got stripped before hook release
1705 if self.changelog.hasnode(ret):
1708 if self.changelog.hasnode(ret):
1706 self.hook("commit", node=node, parent1=parent1,
1709 self.hook("commit", node=node, parent1=parent1,
1707 parent2=parent2)
1710 parent2=parent2)
1708 self._afterlock(commithook)
1711 self._afterlock(commithook)
1709 return ret
1712 return ret
1710
1713
1711 @unfilteredmethod
1714 @unfilteredmethod
1712 def commitctx(self, ctx, error=False):
1715 def commitctx(self, ctx, error=False):
1713 """Add a new revision to current repository.
1716 """Add a new revision to current repository.
1714 Revision information is passed via the context argument.
1717 Revision information is passed via the context argument.
1715 """
1718 """
1716
1719
1717 tr = None
1720 tr = None
1718 p1, p2 = ctx.p1(), ctx.p2()
1721 p1, p2 = ctx.p1(), ctx.p2()
1719 user = ctx.user()
1722 user = ctx.user()
1720
1723
1721 lock = self.lock()
1724 lock = self.lock()
1722 try:
1725 try:
1723 tr = self.transaction("commit")
1726 tr = self.transaction("commit")
1724 trp = weakref.proxy(tr)
1727 trp = weakref.proxy(tr)
1725
1728
1726 if ctx.manifestnode():
1729 if ctx.manifestnode():
1727 # reuse an existing manifest revision
1730 # reuse an existing manifest revision
1728 mn = ctx.manifestnode()
1731 mn = ctx.manifestnode()
1729 files = ctx.files()
1732 files = ctx.files()
1730 elif ctx.files():
1733 elif ctx.files():
1731 m1ctx = p1.manifestctx()
1734 m1ctx = p1.manifestctx()
1732 m2ctx = p2.manifestctx()
1735 m2ctx = p2.manifestctx()
1733 mctx = m1ctx.copy()
1736 mctx = m1ctx.copy()
1734
1737
1735 m = mctx.read()
1738 m = mctx.read()
1736 m1 = m1ctx.read()
1739 m1 = m1ctx.read()
1737 m2 = m2ctx.read()
1740 m2 = m2ctx.read()
1738
1741
1739 # check in files
1742 # check in files
1740 added = []
1743 added = []
1741 changed = []
1744 changed = []
1742 removed = list(ctx.removed())
1745 removed = list(ctx.removed())
1743 linkrev = len(self)
1746 linkrev = len(self)
1744 self.ui.note(_("committing files:\n"))
1747 self.ui.note(_("committing files:\n"))
1745 for f in sorted(ctx.modified() + ctx.added()):
1748 for f in sorted(ctx.modified() + ctx.added()):
1746 self.ui.note(f + "\n")
1749 self.ui.note(f + "\n")
1747 try:
1750 try:
1748 fctx = ctx[f]
1751 fctx = ctx[f]
1749 if fctx is None:
1752 if fctx is None:
1750 removed.append(f)
1753 removed.append(f)
1751 else:
1754 else:
1752 added.append(f)
1755 added.append(f)
1753 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1756 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1754 trp, changed)
1757 trp, changed)
1755 m.setflag(f, fctx.flags())
1758 m.setflag(f, fctx.flags())
1756 except OSError as inst:
1759 except OSError as inst:
1757 self.ui.warn(_("trouble committing %s!\n") % f)
1760 self.ui.warn(_("trouble committing %s!\n") % f)
1758 raise
1761 raise
1759 except IOError as inst:
1762 except IOError as inst:
1760 errcode = getattr(inst, 'errno', errno.ENOENT)
1763 errcode = getattr(inst, 'errno', errno.ENOENT)
1761 if error or errcode and errcode != errno.ENOENT:
1764 if error or errcode and errcode != errno.ENOENT:
1762 self.ui.warn(_("trouble committing %s!\n") % f)
1765 self.ui.warn(_("trouble committing %s!\n") % f)
1763 raise
1766 raise
1764
1767
1765 # update manifest
1768 # update manifest
1766 self.ui.note(_("committing manifest\n"))
1769 self.ui.note(_("committing manifest\n"))
1767 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1770 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1768 drop = [f for f in removed if f in m]
1771 drop = [f for f in removed if f in m]
1769 for f in drop:
1772 for f in drop:
1770 del m[f]
1773 del m[f]
1771 mn = mctx.write(trp, linkrev,
1774 mn = mctx.write(trp, linkrev,
1772 p1.manifestnode(), p2.manifestnode(),
1775 p1.manifestnode(), p2.manifestnode(),
1773 added, drop)
1776 added, drop)
1774 files = changed + removed
1777 files = changed + removed
1775 else:
1778 else:
1776 mn = p1.manifestnode()
1779 mn = p1.manifestnode()
1777 files = []
1780 files = []
1778
1781
1779 # update changelog
1782 # update changelog
1780 self.ui.note(_("committing changelog\n"))
1783 self.ui.note(_("committing changelog\n"))
1781 self.changelog.delayupdate(tr)
1784 self.changelog.delayupdate(tr)
1782 n = self.changelog.add(mn, files, ctx.description(),
1785 n = self.changelog.add(mn, files, ctx.description(),
1783 trp, p1.node(), p2.node(),
1786 trp, p1.node(), p2.node(),
1784 user, ctx.date(), ctx.extra().copy())
1787 user, ctx.date(), ctx.extra().copy())
1785 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1788 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1786 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1789 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1787 parent2=xp2)
1790 parent2=xp2)
1788 # set the new commit is proper phase
1791 # set the new commit is proper phase
1789 targetphase = subrepo.newcommitphase(self.ui, ctx)
1792 targetphase = subrepo.newcommitphase(self.ui, ctx)
1790 if targetphase:
1793 if targetphase:
1791 # retract boundary do not alter parent changeset.
1794 # retract boundary do not alter parent changeset.
1792 # if a parent have higher the resulting phase will
1795 # if a parent have higher the resulting phase will
1793 # be compliant anyway
1796 # be compliant anyway
1794 #
1797 #
1795 # if minimal phase was 0 we don't need to retract anything
1798 # if minimal phase was 0 we don't need to retract anything
1796 phases.retractboundary(self, tr, targetphase, [n])
1799 phases.retractboundary(self, tr, targetphase, [n])
1797 tr.close()
1800 tr.close()
1798 branchmap.updatecache(self.filtered('served'))
1801 branchmap.updatecache(self.filtered('served'))
1799 return n
1802 return n
1800 finally:
1803 finally:
1801 if tr:
1804 if tr:
1802 tr.release()
1805 tr.release()
1803 lock.release()
1806 lock.release()
1804
1807
1805 @unfilteredmethod
1808 @unfilteredmethod
1806 def destroying(self):
1809 def destroying(self):
1807 '''Inform the repository that nodes are about to be destroyed.
1810 '''Inform the repository that nodes are about to be destroyed.
1808 Intended for use by strip and rollback, so there's a common
1811 Intended for use by strip and rollback, so there's a common
1809 place for anything that has to be done before destroying history.
1812 place for anything that has to be done before destroying history.
1810
1813
1811 This is mostly useful for saving state that is in memory and waiting
1814 This is mostly useful for saving state that is in memory and waiting
1812 to be flushed when the current lock is released. Because a call to
1815 to be flushed when the current lock is released. Because a call to
1813 destroyed is imminent, the repo will be invalidated causing those
1816 destroyed is imminent, the repo will be invalidated causing those
1814 changes to stay in memory (waiting for the next unlock), or vanish
1817 changes to stay in memory (waiting for the next unlock), or vanish
1815 completely.
1818 completely.
1816 '''
1819 '''
1817 # When using the same lock to commit and strip, the phasecache is left
1820 # When using the same lock to commit and strip, the phasecache is left
1818 # dirty after committing. Then when we strip, the repo is invalidated,
1821 # dirty after committing. Then when we strip, the repo is invalidated,
1819 # causing those changes to disappear.
1822 # causing those changes to disappear.
1820 if '_phasecache' in vars(self):
1823 if '_phasecache' in vars(self):
1821 self._phasecache.write()
1824 self._phasecache.write()
1822
1825
1823 @unfilteredmethod
1826 @unfilteredmethod
1824 def destroyed(self):
1827 def destroyed(self):
1825 '''Inform the repository that nodes have been destroyed.
1828 '''Inform the repository that nodes have been destroyed.
1826 Intended for use by strip and rollback, so there's a common
1829 Intended for use by strip and rollback, so there's a common
1827 place for anything that has to be done after destroying history.
1830 place for anything that has to be done after destroying history.
1828 '''
1831 '''
1829 # When one tries to:
1832 # When one tries to:
1830 # 1) destroy nodes thus calling this method (e.g. strip)
1833 # 1) destroy nodes thus calling this method (e.g. strip)
1831 # 2) use phasecache somewhere (e.g. commit)
1834 # 2) use phasecache somewhere (e.g. commit)
1832 #
1835 #
1833 # then 2) will fail because the phasecache contains nodes that were
1836 # then 2) will fail because the phasecache contains nodes that were
1834 # removed. We can either remove phasecache from the filecache,
1837 # removed. We can either remove phasecache from the filecache,
1835 # causing it to reload next time it is accessed, or simply filter
1838 # causing it to reload next time it is accessed, or simply filter
1836 # the removed nodes now and write the updated cache.
1839 # the removed nodes now and write the updated cache.
1837 self._phasecache.filterunknown(self)
1840 self._phasecache.filterunknown(self)
1838 self._phasecache.write()
1841 self._phasecache.write()
1839
1842
1840 # update the 'served' branch cache to help read only server process
1843 # update the 'served' branch cache to help read only server process
1841 # Thanks to branchcache collaboration this is done from the nearest
1844 # Thanks to branchcache collaboration this is done from the nearest
1842 # filtered subset and it is expected to be fast.
1845 # filtered subset and it is expected to be fast.
1843 branchmap.updatecache(self.filtered('served'))
1846 branchmap.updatecache(self.filtered('served'))
1844
1847
1845 # Ensure the persistent tag cache is updated. Doing it now
1848 # Ensure the persistent tag cache is updated. Doing it now
1846 # means that the tag cache only has to worry about destroyed
1849 # means that the tag cache only has to worry about destroyed
1847 # heads immediately after a strip/rollback. That in turn
1850 # heads immediately after a strip/rollback. That in turn
1848 # guarantees that "cachetip == currenttip" (comparing both rev
1851 # guarantees that "cachetip == currenttip" (comparing both rev
1849 # and node) always means no nodes have been added or destroyed.
1852 # and node) always means no nodes have been added or destroyed.
1850
1853
1851 # XXX this is suboptimal when qrefresh'ing: we strip the current
1854 # XXX this is suboptimal when qrefresh'ing: we strip the current
1852 # head, refresh the tag cache, then immediately add a new head.
1855 # head, refresh the tag cache, then immediately add a new head.
1853 # But I think doing it this way is necessary for the "instant
1856 # But I think doing it this way is necessary for the "instant
1854 # tag cache retrieval" case to work.
1857 # tag cache retrieval" case to work.
1855 self.invalidate()
1858 self.invalidate()
1856
1859
1857 def walk(self, match, node=None):
1860 def walk(self, match, node=None):
1858 '''
1861 '''
1859 walk recursively through the directory tree or a given
1862 walk recursively through the directory tree or a given
1860 changeset, finding all files matched by the match
1863 changeset, finding all files matched by the match
1861 function
1864 function
1862 '''
1865 '''
1863 return self[node].walk(match)
1866 return self[node].walk(match)
1864
1867
1865 def status(self, node1='.', node2=None, match=None,
1868 def status(self, node1='.', node2=None, match=None,
1866 ignored=False, clean=False, unknown=False,
1869 ignored=False, clean=False, unknown=False,
1867 listsubrepos=False):
1870 listsubrepos=False):
1868 '''a convenience method that calls node1.status(node2)'''
1871 '''a convenience method that calls node1.status(node2)'''
1869 return self[node1].status(node2, match, ignored, clean, unknown,
1872 return self[node1].status(node2, match, ignored, clean, unknown,
1870 listsubrepos)
1873 listsubrepos)
1871
1874
1872 def heads(self, start=None):
1875 def heads(self, start=None):
1873 if start is None:
1876 if start is None:
1874 cl = self.changelog
1877 cl = self.changelog
1875 headrevs = reversed(cl.headrevs())
1878 headrevs = reversed(cl.headrevs())
1876 return [cl.node(rev) for rev in headrevs]
1879 return [cl.node(rev) for rev in headrevs]
1877
1880
1878 heads = self.changelog.heads(start)
1881 heads = self.changelog.heads(start)
1879 # sort the output in rev descending order
1882 # sort the output in rev descending order
1880 return sorted(heads, key=self.changelog.rev, reverse=True)
1883 return sorted(heads, key=self.changelog.rev, reverse=True)
1881
1884
1882 def branchheads(self, branch=None, start=None, closed=False):
1885 def branchheads(self, branch=None, start=None, closed=False):
1883 '''return a (possibly filtered) list of heads for the given branch
1886 '''return a (possibly filtered) list of heads for the given branch
1884
1887
1885 Heads are returned in topological order, from newest to oldest.
1888 Heads are returned in topological order, from newest to oldest.
1886 If branch is None, use the dirstate branch.
1889 If branch is None, use the dirstate branch.
1887 If start is not None, return only heads reachable from start.
1890 If start is not None, return only heads reachable from start.
1888 If closed is True, return heads that are marked as closed as well.
1891 If closed is True, return heads that are marked as closed as well.
1889 '''
1892 '''
1890 if branch is None:
1893 if branch is None:
1891 branch = self[None].branch()
1894 branch = self[None].branch()
1892 branches = self.branchmap()
1895 branches = self.branchmap()
1893 if branch not in branches:
1896 if branch not in branches:
1894 return []
1897 return []
1895 # the cache returns heads ordered lowest to highest
1898 # the cache returns heads ordered lowest to highest
1896 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1899 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1897 if start is not None:
1900 if start is not None:
1898 # filter out the heads that cannot be reached from startrev
1901 # filter out the heads that cannot be reached from startrev
1899 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1902 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1900 bheads = [h for h in bheads if h in fbheads]
1903 bheads = [h for h in bheads if h in fbheads]
1901 return bheads
1904 return bheads
1902
1905
1903 def branches(self, nodes):
1906 def branches(self, nodes):
1904 if not nodes:
1907 if not nodes:
1905 nodes = [self.changelog.tip()]
1908 nodes = [self.changelog.tip()]
1906 b = []
1909 b = []
1907 for n in nodes:
1910 for n in nodes:
1908 t = n
1911 t = n
1909 while True:
1912 while True:
1910 p = self.changelog.parents(n)
1913 p = self.changelog.parents(n)
1911 if p[1] != nullid or p[0] == nullid:
1914 if p[1] != nullid or p[0] == nullid:
1912 b.append((t, n, p[0], p[1]))
1915 b.append((t, n, p[0], p[1]))
1913 break
1916 break
1914 n = p[0]
1917 n = p[0]
1915 return b
1918 return b
1916
1919
1917 def between(self, pairs):
1920 def between(self, pairs):
1918 r = []
1921 r = []
1919
1922
1920 for top, bottom in pairs:
1923 for top, bottom in pairs:
1921 n, l, i = top, [], 0
1924 n, l, i = top, [], 0
1922 f = 1
1925 f = 1
1923
1926
1924 while n != bottom and n != nullid:
1927 while n != bottom and n != nullid:
1925 p = self.changelog.parents(n)[0]
1928 p = self.changelog.parents(n)[0]
1926 if i == f:
1929 if i == f:
1927 l.append(n)
1930 l.append(n)
1928 f = f * 2
1931 f = f * 2
1929 n = p
1932 n = p
1930 i += 1
1933 i += 1
1931
1934
1932 r.append(l)
1935 r.append(l)
1933
1936
1934 return r
1937 return r
1935
1938
1936 def checkpush(self, pushop):
1939 def checkpush(self, pushop):
1937 """Extensions can override this function if additional checks have
1940 """Extensions can override this function if additional checks have
1938 to be performed before pushing, or call it if they override push
1941 to be performed before pushing, or call it if they override push
1939 command.
1942 command.
1940 """
1943 """
1941 pass
1944 pass
1942
1945
1943 @unfilteredpropertycache
1946 @unfilteredpropertycache
1944 def prepushoutgoinghooks(self):
1947 def prepushoutgoinghooks(self):
1945 """Return util.hooks consists of a pushop with repo, remote, outgoing
1948 """Return util.hooks consists of a pushop with repo, remote, outgoing
1946 methods, which are called before pushing changesets.
1949 methods, which are called before pushing changesets.
1947 """
1950 """
1948 return util.hooks()
1951 return util.hooks()
1949
1952
1950 def pushkey(self, namespace, key, old, new):
1953 def pushkey(self, namespace, key, old, new):
1951 try:
1954 try:
1952 tr = self.currenttransaction()
1955 tr = self.currenttransaction()
1953 hookargs = {}
1956 hookargs = {}
1954 if tr is not None:
1957 if tr is not None:
1955 hookargs.update(tr.hookargs)
1958 hookargs.update(tr.hookargs)
1956 hookargs['namespace'] = namespace
1959 hookargs['namespace'] = namespace
1957 hookargs['key'] = key
1960 hookargs['key'] = key
1958 hookargs['old'] = old
1961 hookargs['old'] = old
1959 hookargs['new'] = new
1962 hookargs['new'] = new
1960 self.hook('prepushkey', throw=True, **hookargs)
1963 self.hook('prepushkey', throw=True, **hookargs)
1961 except error.HookAbort as exc:
1964 except error.HookAbort as exc:
1962 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1965 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1963 if exc.hint:
1966 if exc.hint:
1964 self.ui.write_err(_("(%s)\n") % exc.hint)
1967 self.ui.write_err(_("(%s)\n") % exc.hint)
1965 return False
1968 return False
1966 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1969 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1967 ret = pushkey.push(self, namespace, key, old, new)
1970 ret = pushkey.push(self, namespace, key, old, new)
1968 def runhook():
1971 def runhook():
1969 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1972 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1970 ret=ret)
1973 ret=ret)
1971 self._afterlock(runhook)
1974 self._afterlock(runhook)
1972 return ret
1975 return ret
1973
1976
1974 def listkeys(self, namespace):
1977 def listkeys(self, namespace):
1975 self.hook('prelistkeys', throw=True, namespace=namespace)
1978 self.hook('prelistkeys', throw=True, namespace=namespace)
1976 self.ui.debug('listing keys for "%s"\n' % namespace)
1979 self.ui.debug('listing keys for "%s"\n' % namespace)
1977 values = pushkey.list(self, namespace)
1980 values = pushkey.list(self, namespace)
1978 self.hook('listkeys', namespace=namespace, values=values)
1981 self.hook('listkeys', namespace=namespace, values=values)
1979 return values
1982 return values
1980
1983
1981 def debugwireargs(self, one, two, three=None, four=None, five=None):
1984 def debugwireargs(self, one, two, three=None, four=None, five=None):
1982 '''used to test argument passing over the wire'''
1985 '''used to test argument passing over the wire'''
1983 return "%s %s %s %s %s" % (one, two, three, four, five)
1986 return "%s %s %s %s %s" % (one, two, three, four, five)
1984
1987
1985 def savecommitmessage(self, text):
1988 def savecommitmessage(self, text):
1986 fp = self.vfs('last-message.txt', 'wb')
1989 fp = self.vfs('last-message.txt', 'wb')
1987 try:
1990 try:
1988 fp.write(text)
1991 fp.write(text)
1989 finally:
1992 finally:
1990 fp.close()
1993 fp.close()
1991 return self.pathto(fp.name[len(self.root) + 1:])
1994 return self.pathto(fp.name[len(self.root) + 1:])
1992
1995
1993 # used to avoid circular references so destructors work
1996 # used to avoid circular references so destructors work
1994 def aftertrans(files):
1997 def aftertrans(files):
1995 renamefiles = [tuple(t) for t in files]
1998 renamefiles = [tuple(t) for t in files]
1996 def a():
1999 def a():
1997 for vfs, src, dest in renamefiles:
2000 for vfs, src, dest in renamefiles:
1998 try:
2001 try:
1999 vfs.rename(src, dest)
2002 vfs.rename(src, dest)
2000 except OSError: # journal file does not yet exist
2003 except OSError: # journal file does not yet exist
2001 pass
2004 pass
2002 return a
2005 return a
2003
2006
2004 def undoname(fn):
2007 def undoname(fn):
2005 base, name = os.path.split(fn)
2008 base, name = os.path.split(fn)
2006 assert name.startswith('journal')
2009 assert name.startswith('journal')
2007 return os.path.join(base, name.replace('journal', 'undo', 1))
2010 return os.path.join(base, name.replace('journal', 'undo', 1))
2008
2011
2009 def instance(ui, path, create):
2012 def instance(ui, path, create):
2010 return localrepository(ui, util.urllocalpath(path), create)
2013 return localrepository(ui, util.urllocalpath(path), create)
2011
2014
2012 def islocal(path):
2015 def islocal(path):
2013 return True
2016 return True
2014
2017
2015 def newreporequirements(repo):
2018 def newreporequirements(repo):
2016 """Determine the set of requirements for a new local repository.
2019 """Determine the set of requirements for a new local repository.
2017
2020
2018 Extensions can wrap this function to specify custom requirements for
2021 Extensions can wrap this function to specify custom requirements for
2019 new repositories.
2022 new repositories.
2020 """
2023 """
2021 ui = repo.ui
2024 ui = repo.ui
2022 requirements = set(['revlogv1'])
2025 requirements = set(['revlogv1'])
2023 if ui.configbool('format', 'usestore', True):
2026 if ui.configbool('format', 'usestore', True):
2024 requirements.add('store')
2027 requirements.add('store')
2025 if ui.configbool('format', 'usefncache', True):
2028 if ui.configbool('format', 'usefncache', True):
2026 requirements.add('fncache')
2029 requirements.add('fncache')
2027 if ui.configbool('format', 'dotencode', True):
2030 if ui.configbool('format', 'dotencode', True):
2028 requirements.add('dotencode')
2031 requirements.add('dotencode')
2029
2032
2030 compengine = ui.config('experimental', 'format.compression', 'zlib')
2033 compengine = ui.config('experimental', 'format.compression', 'zlib')
2031 if compengine not in util.compengines:
2034 if compengine not in util.compengines:
2032 raise error.Abort(_('compression engine %s defined by '
2035 raise error.Abort(_('compression engine %s defined by '
2033 'experimental.format.compression not available') %
2036 'experimental.format.compression not available') %
2034 compengine,
2037 compengine,
2035 hint=_('run "hg debuginstall" to list available '
2038 hint=_('run "hg debuginstall" to list available '
2036 'compression engines'))
2039 'compression engines'))
2037
2040
2038 # zlib is the historical default and doesn't need an explicit requirement.
2041 # zlib is the historical default and doesn't need an explicit requirement.
2039 if compengine != 'zlib':
2042 if compengine != 'zlib':
2040 requirements.add('exp-compression-%s' % compengine)
2043 requirements.add('exp-compression-%s' % compengine)
2041
2044
2042 if scmutil.gdinitconfig(ui):
2045 if scmutil.gdinitconfig(ui):
2043 requirements.add('generaldelta')
2046 requirements.add('generaldelta')
2044 if ui.configbool('experimental', 'treemanifest', False):
2047 if ui.configbool('experimental', 'treemanifest', False):
2045 requirements.add('treemanifest')
2048 requirements.add('treemanifest')
2046 if ui.configbool('experimental', 'manifestv2', False):
2049 if ui.configbool('experimental', 'manifestv2', False):
2047 requirements.add('manifestv2')
2050 requirements.add('manifestv2')
2048
2051
2049 return requirements
2052 return requirements
@@ -1,364 +1,399 b''
1 #require killdaemons
1 #require killdaemons
2
2
3 $ echo "[extensions]" >> $HGRCPATH
3 $ echo "[extensions]" >> $HGRCPATH
4 $ echo "share = " >> $HGRCPATH
4 $ echo "share = " >> $HGRCPATH
5
5
6 prepare repo1
6 prepare repo1
7
7
8 $ hg init repo1
8 $ hg init repo1
9 $ cd repo1
9 $ cd repo1
10 $ echo a > a
10 $ echo a > a
11 $ hg commit -A -m'init'
11 $ hg commit -A -m'init'
12 adding a
12 adding a
13
13
14 share it
14 share it
15
15
16 $ cd ..
16 $ cd ..
17 $ hg share repo1 repo2
17 $ hg share repo1 repo2
18 updating working directory
18 updating working directory
19 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
19 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
20
20
21 share shouldn't have a store dir
21 share shouldn't have a store dir
22
22
23 $ cd repo2
23 $ cd repo2
24 $ test -d .hg/store
24 $ test -d .hg/store
25 [1]
25 [1]
26
26
27 Some sed versions appends newline, some don't, and some just fails
27 Some sed versions appends newline, some don't, and some just fails
28
28
29 $ cat .hg/sharedpath; echo
29 $ cat .hg/sharedpath; echo
30 $TESTTMP/repo1/.hg (glob)
30 $TESTTMP/repo1/.hg (glob)
31
31
32 trailing newline on .hg/sharedpath is ok
32 trailing newline on .hg/sharedpath is ok
33 $ hg tip -q
33 $ hg tip -q
34 0:d3873e73d99e
34 0:d3873e73d99e
35 $ echo '' >> .hg/sharedpath
35 $ echo '' >> .hg/sharedpath
36 $ cat .hg/sharedpath
36 $ cat .hg/sharedpath
37 $TESTTMP/repo1/.hg (glob)
37 $TESTTMP/repo1/.hg (glob)
38 $ hg tip -q
38 $ hg tip -q
39 0:d3873e73d99e
39 0:d3873e73d99e
40
40
41 commit in shared clone
41 commit in shared clone
42
42
43 $ echo a >> a
43 $ echo a >> a
44 $ hg commit -m'change in shared clone'
44 $ hg commit -m'change in shared clone'
45
45
46 check original
46 check original
47
47
48 $ cd ../repo1
48 $ cd ../repo1
49 $ hg log
49 $ hg log
50 changeset: 1:8af4dc49db9e
50 changeset: 1:8af4dc49db9e
51 tag: tip
51 tag: tip
52 user: test
52 user: test
53 date: Thu Jan 01 00:00:00 1970 +0000
53 date: Thu Jan 01 00:00:00 1970 +0000
54 summary: change in shared clone
54 summary: change in shared clone
55
55
56 changeset: 0:d3873e73d99e
56 changeset: 0:d3873e73d99e
57 user: test
57 user: test
58 date: Thu Jan 01 00:00:00 1970 +0000
58 date: Thu Jan 01 00:00:00 1970 +0000
59 summary: init
59 summary: init
60
60
61 $ hg update
61 $ hg update
62 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
62 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
63 $ cat a # should be two lines of "a"
63 $ cat a # should be two lines of "a"
64 a
64 a
65 a
65 a
66
66
67 commit in original
67 commit in original
68
68
69 $ echo b > b
69 $ echo b > b
70 $ hg commit -A -m'another file'
70 $ hg commit -A -m'another file'
71 adding b
71 adding b
72
72
73 check in shared clone
73 check in shared clone
74
74
75 $ cd ../repo2
75 $ cd ../repo2
76 $ hg log
76 $ hg log
77 changeset: 2:c2e0ac586386
77 changeset: 2:c2e0ac586386
78 tag: tip
78 tag: tip
79 user: test
79 user: test
80 date: Thu Jan 01 00:00:00 1970 +0000
80 date: Thu Jan 01 00:00:00 1970 +0000
81 summary: another file
81 summary: another file
82
82
83 changeset: 1:8af4dc49db9e
83 changeset: 1:8af4dc49db9e
84 user: test
84 user: test
85 date: Thu Jan 01 00:00:00 1970 +0000
85 date: Thu Jan 01 00:00:00 1970 +0000
86 summary: change in shared clone
86 summary: change in shared clone
87
87
88 changeset: 0:d3873e73d99e
88 changeset: 0:d3873e73d99e
89 user: test
89 user: test
90 date: Thu Jan 01 00:00:00 1970 +0000
90 date: Thu Jan 01 00:00:00 1970 +0000
91 summary: init
91 summary: init
92
92
93 $ hg update
93 $ hg update
94 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
94 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
95 $ cat b # should exist with one "b"
95 $ cat b # should exist with one "b"
96 b
96 b
97
97
98 hg serve shared clone
98 hg serve shared clone
99
99
100 $ hg serve -n test -p $HGPORT -d --pid-file=hg.pid
100 $ hg serve -n test -p $HGPORT -d --pid-file=hg.pid
101 $ cat hg.pid >> $DAEMON_PIDS
101 $ cat hg.pid >> $DAEMON_PIDS
102 $ get-with-headers.py localhost:$HGPORT 'raw-file/'
102 $ get-with-headers.py localhost:$HGPORT 'raw-file/'
103 200 Script output follows
103 200 Script output follows
104
104
105
105
106 -rw-r--r-- 4 a
106 -rw-r--r-- 4 a
107 -rw-r--r-- 2 b
107 -rw-r--r-- 2 b
108
108
109
109
110
110
111 test unshare command
111 test unshare command
112
112
113 $ hg unshare
113 $ hg unshare
114 $ test -d .hg/store
114 $ test -d .hg/store
115 $ test -f .hg/sharedpath
115 $ test -f .hg/sharedpath
116 [1]
116 [1]
117 $ hg unshare
117 $ hg unshare
118 abort: this is not a shared repo
118 abort: this is not a shared repo
119 [255]
119 [255]
120
120
121 check that a change does not propagate
121 check that a change does not propagate
122
122
123 $ echo b >> b
123 $ echo b >> b
124 $ hg commit -m'change in unshared'
124 $ hg commit -m'change in unshared'
125 $ cd ../repo1
125 $ cd ../repo1
126 $ hg id -r tip
126 $ hg id -r tip
127 c2e0ac586386 tip
127 c2e0ac586386 tip
128
128
129 $ cd ..
129 $ cd ..
130
130
131
131
132 test sharing bookmarks
132 test sharing bookmarks
133
133
134 $ hg share -B repo1 repo3
134 $ hg share -B repo1 repo3
135 updating working directory
135 updating working directory
136 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
136 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
137 $ cd repo1
137 $ cd repo1
138 $ hg bookmark bm1
138 $ hg bookmark bm1
139 $ hg bookmarks
139 $ hg bookmarks
140 * bm1 2:c2e0ac586386
140 * bm1 2:c2e0ac586386
141 $ cd ../repo2
141 $ cd ../repo2
142 $ hg book bm2
142 $ hg book bm2
143 $ hg bookmarks
143 $ hg bookmarks
144 * bm2 3:0e6e70d1d5f1
144 * bm2 3:0e6e70d1d5f1
145 $ cd ../repo3
145 $ cd ../repo3
146 $ hg bookmarks
146 $ hg bookmarks
147 bm1 2:c2e0ac586386
147 bm1 2:c2e0ac586386
148 $ hg book bm3
148 $ hg book bm3
149 $ hg bookmarks
149 $ hg bookmarks
150 bm1 2:c2e0ac586386
150 bm1 2:c2e0ac586386
151 * bm3 2:c2e0ac586386
151 * bm3 2:c2e0ac586386
152 $ cd ../repo1
152 $ cd ../repo1
153 $ hg bookmarks
153 $ hg bookmarks
154 * bm1 2:c2e0ac586386
154 * bm1 2:c2e0ac586386
155 bm3 2:c2e0ac586386
155 bm3 2:c2e0ac586386
156
156
157 check whether HG_PENDING makes pending changes only in relatd
157 check whether HG_PENDING makes pending changes only in relatd
158 repositories visible to an external hook.
158 repositories visible to an external hook.
159
159
160 In "hg share" case, another transaction can't run in other
160 In "hg share" case, another transaction can't run in other
161 repositories sharing same source repository, because starting
161 repositories sharing same source repository, because starting
162 transaction requires locking store of source repository.
162 transaction requires locking store of source repository.
163
163
164 Therefore, this test scenario ignores checking visibility of
164 Therefore, this test scenario ignores checking visibility of
165 .hg/bookmakrs.pending in repo2, which shares repo1 without bookmarks.
165 .hg/bookmakrs.pending in repo2, which shares repo1 without bookmarks.
166
166
167 $ cat > $TESTTMP/checkbookmarks.sh <<EOF
167 $ cat > $TESTTMP/checkbookmarks.sh <<EOF
168 > echo "@repo1"
168 > echo "@repo1"
169 > hg -R $TESTTMP/repo1 bookmarks
169 > hg -R $TESTTMP/repo1 bookmarks
170 > echo "@repo2"
170 > echo "@repo2"
171 > hg -R $TESTTMP/repo2 bookmarks
171 > hg -R $TESTTMP/repo2 bookmarks
172 > echo "@repo3"
172 > echo "@repo3"
173 > hg -R $TESTTMP/repo3 bookmarks
173 > hg -R $TESTTMP/repo3 bookmarks
174 > exit 1 # to avoid adding new bookmark for subsequent tests
174 > exit 1 # to avoid adding new bookmark for subsequent tests
175 > EOF
175 > EOF
176
176
177 $ cd ../repo1
177 $ cd ../repo1
178 $ hg --config hooks.pretxnclose="sh $TESTTMP/checkbookmarks.sh" -q book bmX
178 $ hg --config hooks.pretxnclose="sh $TESTTMP/checkbookmarks.sh" -q book bmX
179 @repo1
179 @repo1
180 bm1 2:c2e0ac586386
180 bm1 2:c2e0ac586386
181 bm3 2:c2e0ac586386
181 bm3 2:c2e0ac586386
182 * bmX 2:c2e0ac586386
182 * bmX 2:c2e0ac586386
183 @repo2
183 @repo2
184 * bm2 3:0e6e70d1d5f1
184 * bm2 3:0e6e70d1d5f1
185 @repo3
185 @repo3
186 bm1 2:c2e0ac586386
186 bm1 2:c2e0ac586386
187 * bm3 2:c2e0ac586386
187 * bm3 2:c2e0ac586386
188 bmX 2:c2e0ac586386
188 bmX 2:c2e0ac586386
189 transaction abort!
189 transaction abort!
190 rollback completed
190 rollback completed
191 abort: pretxnclose hook exited with status 1
191 abort: pretxnclose hook exited with status 1
192 [255]
192 [255]
193 $ hg book bm1
193 $ hg book bm1
194
194
195 FYI, in contrast to above test, bmX is invisible in repo1 (= shared
195 FYI, in contrast to above test, bmX is invisible in repo1 (= shared
196 src), because (1) HG_PENDING refers only repo3 and (2)
196 src), because (1) HG_PENDING refers only repo3 and (2)
197 "bookmarks.pending" is written only into repo3.
197 "bookmarks.pending" is written only into repo3.
198
198
199 $ cd ../repo3
199 $ cd ../repo3
200 $ hg --config hooks.pretxnclose="sh $TESTTMP/checkbookmarks.sh" -q book bmX
200 $ hg --config hooks.pretxnclose="sh $TESTTMP/checkbookmarks.sh" -q book bmX
201 @repo1
201 @repo1
202 * bm1 2:c2e0ac586386
202 * bm1 2:c2e0ac586386
203 bm3 2:c2e0ac586386
203 bm3 2:c2e0ac586386
204 @repo2
204 @repo2
205 * bm2 3:0e6e70d1d5f1
205 * bm2 3:0e6e70d1d5f1
206 @repo3
206 @repo3
207 bm1 2:c2e0ac586386
207 bm1 2:c2e0ac586386
208 bm3 2:c2e0ac586386
208 bm3 2:c2e0ac586386
209 * bmX 2:c2e0ac586386
209 * bmX 2:c2e0ac586386
210 transaction abort!
210 transaction abort!
211 rollback completed
211 rollback completed
212 abort: pretxnclose hook exited with status 1
212 abort: pretxnclose hook exited with status 1
213 [255]
213 [255]
214 $ hg book bm3
214 $ hg book bm3
215
215
216 $ cd ../repo1
216 $ cd ../repo1
217
217
218 test that commits work
218 test that commits work
219
219
220 $ echo 'shared bookmarks' > a
220 $ echo 'shared bookmarks' > a
221 $ hg commit -m 'testing shared bookmarks'
221 $ hg commit -m 'testing shared bookmarks'
222 $ hg bookmarks
222 $ hg bookmarks
223 * bm1 3:b87954705719
223 * bm1 3:b87954705719
224 bm3 2:c2e0ac586386
224 bm3 2:c2e0ac586386
225 $ cd ../repo3
225 $ cd ../repo3
226 $ hg bookmarks
226 $ hg bookmarks
227 bm1 3:b87954705719
227 bm1 3:b87954705719
228 * bm3 2:c2e0ac586386
228 * bm3 2:c2e0ac586386
229 $ echo 'more shared bookmarks' > a
229 $ echo 'more shared bookmarks' > a
230 $ hg commit -m 'testing shared bookmarks'
230 $ hg commit -m 'testing shared bookmarks'
231 created new head
231 created new head
232 $ hg bookmarks
232 $ hg bookmarks
233 bm1 3:b87954705719
233 bm1 3:b87954705719
234 * bm3 4:62f4ded848e4
234 * bm3 4:62f4ded848e4
235 $ cd ../repo1
235 $ cd ../repo1
236 $ hg bookmarks
236 $ hg bookmarks
237 * bm1 3:b87954705719
237 * bm1 3:b87954705719
238 bm3 4:62f4ded848e4
238 bm3 4:62f4ded848e4
239 $ cd ..
239 $ cd ..
240
240
241 test pushing bookmarks works
241 test pushing bookmarks works
242
242
243 $ hg clone repo3 repo4
243 $ hg clone repo3 repo4
244 updating to branch default
244 updating to branch default
245 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
245 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
246 $ cd repo4
246 $ cd repo4
247 $ hg boo bm4
247 $ hg boo bm4
248 $ echo foo > b
248 $ echo foo > b
249 $ hg commit -m 'foo in b'
249 $ hg commit -m 'foo in b'
250 $ hg boo
250 $ hg boo
251 bm1 3:b87954705719
251 bm1 3:b87954705719
252 bm3 4:62f4ded848e4
252 bm3 4:62f4ded848e4
253 * bm4 5:92793bfc8cad
253 * bm4 5:92793bfc8cad
254 $ hg push -B bm4
254 $ hg push -B bm4
255 pushing to $TESTTMP/repo3 (glob)
255 pushing to $TESTTMP/repo3 (glob)
256 searching for changes
256 searching for changes
257 adding changesets
257 adding changesets
258 adding manifests
258 adding manifests
259 adding file changes
259 adding file changes
260 added 1 changesets with 1 changes to 1 files
260 added 1 changesets with 1 changes to 1 files
261 exporting bookmark bm4
261 exporting bookmark bm4
262 $ cd ../repo1
262 $ cd ../repo1
263 $ hg bookmarks
263 $ hg bookmarks
264 * bm1 3:b87954705719
264 * bm1 3:b87954705719
265 bm3 4:62f4ded848e4
265 bm3 4:62f4ded848e4
266 bm4 5:92793bfc8cad
266 bm4 5:92793bfc8cad
267 $ cd ../repo3
267 $ cd ../repo3
268 $ hg bookmarks
268 $ hg bookmarks
269 bm1 3:b87954705719
269 bm1 3:b87954705719
270 * bm3 4:62f4ded848e4
270 * bm3 4:62f4ded848e4
271 bm4 5:92793bfc8cad
271 bm4 5:92793bfc8cad
272 $ cd ..
272 $ cd ..
273
273
274 test behavior when sharing a shared repo
274 test behavior when sharing a shared repo
275
275
276 $ hg share -B repo3 repo5
276 $ hg share -B repo3 repo5
277 updating working directory
277 updating working directory
278 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
278 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
279 $ cd repo5
279 $ cd repo5
280 $ hg book
280 $ hg book
281 bm1 3:b87954705719
281 bm1 3:b87954705719
282 bm3 4:62f4ded848e4
282 bm3 4:62f4ded848e4
283 bm4 5:92793bfc8cad
283 bm4 5:92793bfc8cad
284 $ cd ..
284 $ cd ..
285
285
286 test what happens when an active bookmark is deleted
286 test what happens when an active bookmark is deleted
287
287
288 $ cd repo1
288 $ cd repo1
289 $ hg boo -d bm3
289 $ hg boo -d bm3
290 $ hg boo
290 $ hg boo
291 * bm1 3:b87954705719
291 * bm1 3:b87954705719
292 bm4 5:92793bfc8cad
292 bm4 5:92793bfc8cad
293 $ cd ../repo3
293 $ cd ../repo3
294 $ hg boo
294 $ hg boo
295 bm1 3:b87954705719
295 bm1 3:b87954705719
296 bm4 5:92793bfc8cad
296 bm4 5:92793bfc8cad
297 $ cd ..
297 $ cd ..
298
298
299 verify that bookmarks are not written on failed transaction
299 verify that bookmarks are not written on failed transaction
300
300
301 $ cat > failpullbookmarks.py << EOF
301 $ cat > failpullbookmarks.py << EOF
302 > """A small extension that makes bookmark pulls fail, for testing"""
302 > """A small extension that makes bookmark pulls fail, for testing"""
303 > from mercurial import extensions, exchange, error
303 > from mercurial import extensions, exchange, error
304 > def _pullbookmarks(orig, pullop):
304 > def _pullbookmarks(orig, pullop):
305 > orig(pullop)
305 > orig(pullop)
306 > raise error.HookAbort('forced failure by extension')
306 > raise error.HookAbort('forced failure by extension')
307 > def extsetup(ui):
307 > def extsetup(ui):
308 > extensions.wrapfunction(exchange, '_pullbookmarks', _pullbookmarks)
308 > extensions.wrapfunction(exchange, '_pullbookmarks', _pullbookmarks)
309 > EOF
309 > EOF
310 $ cd repo4
310 $ cd repo4
311 $ hg boo
311 $ hg boo
312 bm1 3:b87954705719
312 bm1 3:b87954705719
313 bm3 4:62f4ded848e4
313 bm3 4:62f4ded848e4
314 * bm4 5:92793bfc8cad
314 * bm4 5:92793bfc8cad
315 $ cd ../repo3
315 $ cd ../repo3
316 $ hg boo
316 $ hg boo
317 bm1 3:b87954705719
317 bm1 3:b87954705719
318 bm4 5:92793bfc8cad
318 bm4 5:92793bfc8cad
319 $ hg --config "extensions.failpullbookmarks=$TESTTMP/failpullbookmarks.py" pull $TESTTMP/repo4
319 $ hg --config "extensions.failpullbookmarks=$TESTTMP/failpullbookmarks.py" pull $TESTTMP/repo4
320 pulling from $TESTTMP/repo4 (glob)
320 pulling from $TESTTMP/repo4 (glob)
321 searching for changes
321 searching for changes
322 no changes found
322 no changes found
323 adding remote bookmark bm3
323 adding remote bookmark bm3
324 abort: forced failure by extension
324 abort: forced failure by extension
325 [255]
325 [255]
326 $ hg boo
326 $ hg boo
327 bm1 3:b87954705719
327 bm1 3:b87954705719
328 bm4 5:92793bfc8cad
328 bm4 5:92793bfc8cad
329 $ hg pull $TESTTMP/repo4
329 $ hg pull $TESTTMP/repo4
330 pulling from $TESTTMP/repo4 (glob)
330 pulling from $TESTTMP/repo4 (glob)
331 searching for changes
331 searching for changes
332 no changes found
332 no changes found
333 adding remote bookmark bm3
333 adding remote bookmark bm3
334 $ hg boo
334 $ hg boo
335 bm1 3:b87954705719
335 bm1 3:b87954705719
336 * bm3 4:62f4ded848e4
336 * bm3 4:62f4ded848e4
337 bm4 5:92793bfc8cad
337 bm4 5:92793bfc8cad
338 $ cd ..
338 $ cd ..
339
339
340 verify bookmark behavior after unshare
340 verify bookmark behavior after unshare
341
341
342 $ cd repo3
342 $ cd repo3
343 $ hg unshare
343 $ hg unshare
344 $ hg boo
344 $ hg boo
345 bm1 3:b87954705719
345 bm1 3:b87954705719
346 * bm3 4:62f4ded848e4
346 * bm3 4:62f4ded848e4
347 bm4 5:92793bfc8cad
347 bm4 5:92793bfc8cad
348 $ hg boo -d bm4
348 $ hg boo -d bm4
349 $ hg boo bm5
349 $ hg boo bm5
350 $ hg boo
350 $ hg boo
351 bm1 3:b87954705719
351 bm1 3:b87954705719
352 bm3 4:62f4ded848e4
352 bm3 4:62f4ded848e4
353 * bm5 4:62f4ded848e4
353 * bm5 4:62f4ded848e4
354 $ cd ../repo1
354 $ cd ../repo1
355 $ hg boo
355 $ hg boo
356 * bm1 3:b87954705719
356 * bm1 3:b87954705719
357 bm3 4:62f4ded848e4
357 bm3 4:62f4ded848e4
358 bm4 5:92793bfc8cad
358 bm4 5:92793bfc8cad
359 $ cd ..
359 $ cd ..
360
360
361 test shared clones using relative paths work
362
363 $ mkdir thisdir
364 $ hg init thisdir/orig
365 $ hg share -U thisdir/orig thisdir/abs
366 $ hg share -U --relative thisdir/abs thisdir/rel
367 $ cat thisdir/rel/.hg/sharedpath
368 ../../orig/.hg (no-eol)
369 $ grep shared thisdir/*/.hg/requires
370 thisdir/abs/.hg/requires:shared
371 thisdir/rel/.hg/requires:shared
372 thisdir/rel/.hg/requires:relshared
373
374 test that relative shared paths aren't relative to $PWD
375
376 $ cd thisdir
377 $ hg -R rel root
378 $TESTTMP/thisdir/rel
379 $ cd ..
380
381 now test that relative paths really are relative, survive across
382 renames and changes of PWD
383
384 $ hg -R thisdir/abs root
385 $TESTTMP/thisdir/abs
386 $ hg -R thisdir/rel root
387 $TESTTMP/thisdir/rel
388 $ mv thisdir thatdir
389 $ hg -R thatdir/abs root
390 abort: .hg/sharedpath points to nonexistent directory $TESTTMP/thisdir/orig/.hg!
391 [255]
392 $ hg -R thatdir/rel root
393 $TESTTMP/thatdir/rel
394 $ rm -r thatdir
395
361 Explicitly kill daemons to let the test exit on Windows
396 Explicitly kill daemons to let the test exit on Windows
362
397
363 $ killdaemons.py
398 $ killdaemons.py
364
399
General Comments 0
You need to be logged in to leave comments. Login now