##// END OF EJS Templates
localrepo: make supported features manageable in each repositories individually...
FUJIWARA Katsunori -
r19778:55ef7903 default
parent child Browse files
Show More
@@ -1,178 +1,178 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''setup for largefiles extension: uisetup'''
9 '''setup for largefiles extension: uisetup'''
10
10
11 from mercurial import archival, cmdutil, commands, extensions, filemerge, hg, \
11 from mercurial import archival, cmdutil, commands, extensions, filemerge, hg, \
12 httppeer, localrepo, merge, scmutil, sshpeer, wireproto, revset
12 httppeer, localrepo, merge, scmutil, sshpeer, wireproto, revset
13 from mercurial.i18n import _
13 from mercurial.i18n import _
14 from mercurial.hgweb import hgweb_mod, webcommands
14 from mercurial.hgweb import hgweb_mod, webcommands
15 from mercurial.subrepo import hgsubrepo
15 from mercurial.subrepo import hgsubrepo
16
16
17 import overrides
17 import overrides
18 import proto
18 import proto
19
19
20 def uisetup(ui):
20 def uisetup(ui):
21 # Disable auto-status for some commands which assume that all
21 # Disable auto-status for some commands which assume that all
22 # files in the result are under Mercurial's control
22 # files in the result are under Mercurial's control
23
23
24 entry = extensions.wrapcommand(commands.table, 'add',
24 entry = extensions.wrapcommand(commands.table, 'add',
25 overrides.overrideadd)
25 overrides.overrideadd)
26 addopt = [('', 'large', None, _('add as largefile')),
26 addopt = [('', 'large', None, _('add as largefile')),
27 ('', 'normal', None, _('add as normal file')),
27 ('', 'normal', None, _('add as normal file')),
28 ('', 'lfsize', '', _('add all files above this size '
28 ('', 'lfsize', '', _('add all files above this size '
29 '(in megabytes) as largefiles '
29 '(in megabytes) as largefiles '
30 '(default: 10)'))]
30 '(default: 10)'))]
31 entry[1].extend(addopt)
31 entry[1].extend(addopt)
32
32
33 # The scmutil function is called both by the (trivial) addremove command,
33 # The scmutil function is called both by the (trivial) addremove command,
34 # and in the process of handling commit -A (issue3542)
34 # and in the process of handling commit -A (issue3542)
35 entry = extensions.wrapfunction(scmutil, 'addremove',
35 entry = extensions.wrapfunction(scmutil, 'addremove',
36 overrides.scmutiladdremove)
36 overrides.scmutiladdremove)
37 entry = extensions.wrapcommand(commands.table, 'remove',
37 entry = extensions.wrapcommand(commands.table, 'remove',
38 overrides.overrideremove)
38 overrides.overrideremove)
39 entry = extensions.wrapcommand(commands.table, 'forget',
39 entry = extensions.wrapcommand(commands.table, 'forget',
40 overrides.overrideforget)
40 overrides.overrideforget)
41
41
42 # Subrepos call status function
42 # Subrepos call status function
43 entry = extensions.wrapcommand(commands.table, 'status',
43 entry = extensions.wrapcommand(commands.table, 'status',
44 overrides.overridestatus)
44 overrides.overridestatus)
45 entry = extensions.wrapfunction(hgsubrepo, 'status',
45 entry = extensions.wrapfunction(hgsubrepo, 'status',
46 overrides.overridestatusfn)
46 overrides.overridestatusfn)
47
47
48 entry = extensions.wrapcommand(commands.table, 'log',
48 entry = extensions.wrapcommand(commands.table, 'log',
49 overrides.overridelog)
49 overrides.overridelog)
50 entry = extensions.wrapcommand(commands.table, 'rollback',
50 entry = extensions.wrapcommand(commands.table, 'rollback',
51 overrides.overriderollback)
51 overrides.overriderollback)
52 entry = extensions.wrapcommand(commands.table, 'verify',
52 entry = extensions.wrapcommand(commands.table, 'verify',
53 overrides.overrideverify)
53 overrides.overrideverify)
54
54
55 verifyopt = [('', 'large', None,
55 verifyopt = [('', 'large', None,
56 _('verify that all largefiles in current revision exists')),
56 _('verify that all largefiles in current revision exists')),
57 ('', 'lfa', None,
57 ('', 'lfa', None,
58 _('verify largefiles in all revisions, not just current')),
58 _('verify largefiles in all revisions, not just current')),
59 ('', 'lfc', None,
59 ('', 'lfc', None,
60 _('verify local largefile contents, not just existence'))]
60 _('verify local largefile contents, not just existence'))]
61 entry[1].extend(verifyopt)
61 entry[1].extend(verifyopt)
62
62
63 entry = extensions.wrapcommand(commands.table, 'debugstate',
63 entry = extensions.wrapcommand(commands.table, 'debugstate',
64 overrides.overridedebugstate)
64 overrides.overridedebugstate)
65 debugstateopt = [('', 'large', None, _('display largefiles dirstate'))]
65 debugstateopt = [('', 'large', None, _('display largefiles dirstate'))]
66 entry[1].extend(debugstateopt)
66 entry[1].extend(debugstateopt)
67
67
68 entry = extensions.wrapcommand(commands.table, 'outgoing',
68 entry = extensions.wrapcommand(commands.table, 'outgoing',
69 overrides.overrideoutgoing)
69 overrides.overrideoutgoing)
70 outgoingopt = [('', 'large', None, _('display outgoing largefiles'))]
70 outgoingopt = [('', 'large', None, _('display outgoing largefiles'))]
71 entry[1].extend(outgoingopt)
71 entry[1].extend(outgoingopt)
72 entry = extensions.wrapcommand(commands.table, 'summary',
72 entry = extensions.wrapcommand(commands.table, 'summary',
73 overrides.overridesummary)
73 overrides.overridesummary)
74 summaryopt = [('', 'large', None, _('display outgoing largefiles'))]
74 summaryopt = [('', 'large', None, _('display outgoing largefiles'))]
75 entry[1].extend(summaryopt)
75 entry[1].extend(summaryopt)
76
76
77 entry = extensions.wrapcommand(commands.table, 'update',
77 entry = extensions.wrapcommand(commands.table, 'update',
78 overrides.overrideupdate)
78 overrides.overrideupdate)
79 entry = extensions.wrapcommand(commands.table, 'pull',
79 entry = extensions.wrapcommand(commands.table, 'pull',
80 overrides.overridepull)
80 overrides.overridepull)
81 pullopt = [('', 'all-largefiles', None,
81 pullopt = [('', 'all-largefiles', None,
82 _('download all pulled versions of largefiles (DEPRECATED)')),
82 _('download all pulled versions of largefiles (DEPRECATED)')),
83 ('', 'lfrev', [],
83 ('', 'lfrev', [],
84 _('download largefiles for these revisions'), _('REV'))]
84 _('download largefiles for these revisions'), _('REV'))]
85 entry[1].extend(pullopt)
85 entry[1].extend(pullopt)
86 revset.symbols['pulled'] = overrides.pulledrevsetsymbol
86 revset.symbols['pulled'] = overrides.pulledrevsetsymbol
87
87
88 entry = extensions.wrapcommand(commands.table, 'clone',
88 entry = extensions.wrapcommand(commands.table, 'clone',
89 overrides.overrideclone)
89 overrides.overrideclone)
90 cloneopt = [('', 'all-largefiles', None,
90 cloneopt = [('', 'all-largefiles', None,
91 _('download all versions of all largefiles'))]
91 _('download all versions of all largefiles'))]
92 entry[1].extend(cloneopt)
92 entry[1].extend(cloneopt)
93 entry = extensions.wrapfunction(hg, 'clone', overrides.hgclone)
93 entry = extensions.wrapfunction(hg, 'clone', overrides.hgclone)
94
94
95 entry = extensions.wrapcommand(commands.table, 'cat',
95 entry = extensions.wrapcommand(commands.table, 'cat',
96 overrides.overridecat)
96 overrides.overridecat)
97 entry = extensions.wrapfunction(merge, '_checkunknownfile',
97 entry = extensions.wrapfunction(merge, '_checkunknownfile',
98 overrides.overridecheckunknownfile)
98 overrides.overridecheckunknownfile)
99 entry = extensions.wrapfunction(merge, 'manifestmerge',
99 entry = extensions.wrapfunction(merge, 'manifestmerge',
100 overrides.overridemanifestmerge)
100 overrides.overridemanifestmerge)
101 entry = extensions.wrapfunction(filemerge, 'filemerge',
101 entry = extensions.wrapfunction(filemerge, 'filemerge',
102 overrides.overridefilemerge)
102 overrides.overridefilemerge)
103 entry = extensions.wrapfunction(cmdutil, 'copy',
103 entry = extensions.wrapfunction(cmdutil, 'copy',
104 overrides.overridecopy)
104 overrides.overridecopy)
105
105
106 # Summary calls dirty on the subrepos
106 # Summary calls dirty on the subrepos
107 entry = extensions.wrapfunction(hgsubrepo, 'dirty',
107 entry = extensions.wrapfunction(hgsubrepo, 'dirty',
108 overrides.overridedirty)
108 overrides.overridedirty)
109
109
110 # Backout calls revert so we need to override both the command and the
110 # Backout calls revert so we need to override both the command and the
111 # function
111 # function
112 entry = extensions.wrapcommand(commands.table, 'revert',
112 entry = extensions.wrapcommand(commands.table, 'revert',
113 overrides.overriderevert)
113 overrides.overriderevert)
114 entry = extensions.wrapfunction(commands, 'revert',
114 entry = extensions.wrapfunction(commands, 'revert',
115 overrides.overriderevert)
115 overrides.overriderevert)
116
116
117 extensions.wrapfunction(hg, 'updaterepo', overrides.hgupdaterepo)
117 extensions.wrapfunction(hg, 'updaterepo', overrides.hgupdaterepo)
118 extensions.wrapfunction(hg, 'merge', overrides.hgmerge)
118 extensions.wrapfunction(hg, 'merge', overrides.hgmerge)
119
119
120 extensions.wrapfunction(archival, 'archive', overrides.overridearchive)
120 extensions.wrapfunction(archival, 'archive', overrides.overridearchive)
121 extensions.wrapfunction(hgsubrepo, 'archive', overrides.hgsubrepoarchive)
121 extensions.wrapfunction(hgsubrepo, 'archive', overrides.hgsubrepoarchive)
122 extensions.wrapfunction(cmdutil, 'bailifchanged',
122 extensions.wrapfunction(cmdutil, 'bailifchanged',
123 overrides.overridebailifchanged)
123 overrides.overridebailifchanged)
124
124
125 # create the new wireproto commands ...
125 # create the new wireproto commands ...
126 wireproto.commands['putlfile'] = (proto.putlfile, 'sha')
126 wireproto.commands['putlfile'] = (proto.putlfile, 'sha')
127 wireproto.commands['getlfile'] = (proto.getlfile, 'sha')
127 wireproto.commands['getlfile'] = (proto.getlfile, 'sha')
128 wireproto.commands['statlfile'] = (proto.statlfile, 'sha')
128 wireproto.commands['statlfile'] = (proto.statlfile, 'sha')
129
129
130 # ... and wrap some existing ones
130 # ... and wrap some existing ones
131 wireproto.commands['capabilities'] = (proto.capabilities, '')
131 wireproto.commands['capabilities'] = (proto.capabilities, '')
132 wireproto.commands['heads'] = (proto.heads, '')
132 wireproto.commands['heads'] = (proto.heads, '')
133 wireproto.commands['lheads'] = (wireproto.heads, '')
133 wireproto.commands['lheads'] = (wireproto.heads, '')
134
134
135 # make putlfile behave the same as push and {get,stat}lfile behave
135 # make putlfile behave the same as push and {get,stat}lfile behave
136 # the same as pull w.r.t. permissions checks
136 # the same as pull w.r.t. permissions checks
137 hgweb_mod.perms['putlfile'] = 'push'
137 hgweb_mod.perms['putlfile'] = 'push'
138 hgweb_mod.perms['getlfile'] = 'pull'
138 hgweb_mod.perms['getlfile'] = 'pull'
139 hgweb_mod.perms['statlfile'] = 'pull'
139 hgweb_mod.perms['statlfile'] = 'pull'
140
140
141 extensions.wrapfunction(webcommands, 'decodepath', overrides.decodepath)
141 extensions.wrapfunction(webcommands, 'decodepath', overrides.decodepath)
142
142
143 # the hello wireproto command uses wireproto.capabilities, so it won't see
143 # the hello wireproto command uses wireproto.capabilities, so it won't see
144 # our largefiles capability unless we replace the actual function as well.
144 # our largefiles capability unless we replace the actual function as well.
145 proto.capabilitiesorig = wireproto.capabilities
145 proto.capabilitiesorig = wireproto.capabilities
146 wireproto.capabilities = proto.capabilities
146 wireproto.capabilities = proto.capabilities
147
147
148 # can't do this in reposetup because it needs to have happened before
148 # can't do this in reposetup because it needs to have happened before
149 # wirerepo.__init__ is called
149 # wirerepo.__init__ is called
150 proto.ssholdcallstream = sshpeer.sshpeer._callstream
150 proto.ssholdcallstream = sshpeer.sshpeer._callstream
151 proto.httpoldcallstream = httppeer.httppeer._callstream
151 proto.httpoldcallstream = httppeer.httppeer._callstream
152 sshpeer.sshpeer._callstream = proto.sshrepocallstream
152 sshpeer.sshpeer._callstream = proto.sshrepocallstream
153 httppeer.httppeer._callstream = proto.httprepocallstream
153 httppeer.httppeer._callstream = proto.httprepocallstream
154
154
155 # don't die on seeing a repo with the largefiles requirement
155 # don't die on seeing a repo with the largefiles requirement
156 localrepo.localrepository.supported |= set(['largefiles'])
156 localrepo.localrepository._basesupported |= set(['largefiles'])
157
157
158 # override some extensions' stuff as well
158 # override some extensions' stuff as well
159 for name, module in extensions.extensions():
159 for name, module in extensions.extensions():
160 if name == 'fetch':
160 if name == 'fetch':
161 extensions.wrapcommand(getattr(module, 'cmdtable'), 'fetch',
161 extensions.wrapcommand(getattr(module, 'cmdtable'), 'fetch',
162 overrides.overridefetch)
162 overrides.overridefetch)
163 if name == 'purge':
163 if name == 'purge':
164 extensions.wrapcommand(getattr(module, 'cmdtable'), 'purge',
164 extensions.wrapcommand(getattr(module, 'cmdtable'), 'purge',
165 overrides.overridepurge)
165 overrides.overridepurge)
166 if name == 'rebase':
166 if name == 'rebase':
167 extensions.wrapcommand(getattr(module, 'cmdtable'), 'rebase',
167 extensions.wrapcommand(getattr(module, 'cmdtable'), 'rebase',
168 overrides.overriderebase)
168 overrides.overriderebase)
169 if name == 'transplant':
169 if name == 'transplant':
170 extensions.wrapcommand(getattr(module, 'cmdtable'), 'transplant',
170 extensions.wrapcommand(getattr(module, 'cmdtable'), 'transplant',
171 overrides.overridetransplant)
171 overrides.overridetransplant)
172 if name == 'convert':
172 if name == 'convert':
173 convcmd = getattr(module, 'convcmd')
173 convcmd = getattr(module, 'convcmd')
174 hgsink = getattr(convcmd, 'mercurial_sink')
174 hgsink = getattr(convcmd, 'mercurial_sink')
175 extensions.wrapfunction(hgsink, 'before',
175 extensions.wrapfunction(hgsink, 'before',
176 overrides.mercurialsinkbefore)
176 overrides.mercurialsinkbefore)
177 extensions.wrapfunction(hgsink, 'after',
177 extensions.wrapfunction(hgsink, 'after',
178 overrides.mercurialsinkafter)
178 overrides.mercurialsinkafter)
@@ -1,2444 +1,2469 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import lock, transaction, store, encoding
11 import lock, transaction, store, encoding
12 import scmutil, util, extensions, hook, error, revset
12 import scmutil, util, extensions, hook, error, revset
13 import match as matchmod
13 import match as matchmod
14 import merge as mergemod
14 import merge as mergemod
15 import tags as tagsmod
15 import tags as tagsmod
16 from lock import release
16 from lock import release
17 import weakref, errno, os, time, inspect
17 import weakref, errno, os, time, inspect
18 import branchmap
18 import branchmap
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20 filecache = scmutil.filecache
20 filecache = scmutil.filecache
21
21
22 class repofilecache(filecache):
22 class repofilecache(filecache):
23 """All filecache usage on repo are done for logic that should be unfiltered
23 """All filecache usage on repo are done for logic that should be unfiltered
24 """
24 """
25
25
26 def __get__(self, repo, type=None):
26 def __get__(self, repo, type=None):
27 return super(repofilecache, self).__get__(repo.unfiltered(), type)
27 return super(repofilecache, self).__get__(repo.unfiltered(), type)
28 def __set__(self, repo, value):
28 def __set__(self, repo, value):
29 return super(repofilecache, self).__set__(repo.unfiltered(), value)
29 return super(repofilecache, self).__set__(repo.unfiltered(), value)
30 def __delete__(self, repo):
30 def __delete__(self, repo):
31 return super(repofilecache, self).__delete__(repo.unfiltered())
31 return super(repofilecache, self).__delete__(repo.unfiltered())
32
32
33 class storecache(repofilecache):
33 class storecache(repofilecache):
34 """filecache for files in the store"""
34 """filecache for files in the store"""
35 def join(self, obj, fname):
35 def join(self, obj, fname):
36 return obj.sjoin(fname)
36 return obj.sjoin(fname)
37
37
38 class unfilteredpropertycache(propertycache):
38 class unfilteredpropertycache(propertycache):
39 """propertycache that apply to unfiltered repo only"""
39 """propertycache that apply to unfiltered repo only"""
40
40
41 def __get__(self, repo, type=None):
41 def __get__(self, repo, type=None):
42 if hasunfilteredcache(repo, self.name):
42 if hasunfilteredcache(repo, self.name):
43 return getattr(repo.unfiltered(), self.name)
43 return getattr(repo.unfiltered(), self.name)
44 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
44 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
45
45
46 class filteredpropertycache(propertycache):
46 class filteredpropertycache(propertycache):
47 """propertycache that must take filtering in account"""
47 """propertycache that must take filtering in account"""
48
48
49 def cachevalue(self, obj, value):
49 def cachevalue(self, obj, value):
50 object.__setattr__(obj, self.name, value)
50 object.__setattr__(obj, self.name, value)
51
51
52
52
53 def hasunfilteredcache(repo, name):
53 def hasunfilteredcache(repo, name):
54 """check if a repo has an unfilteredpropertycache value for <name>"""
54 """check if a repo has an unfilteredpropertycache value for <name>"""
55 return name in vars(repo.unfiltered())
55 return name in vars(repo.unfiltered())
56
56
57 def unfilteredmethod(orig):
57 def unfilteredmethod(orig):
58 """decorate method that always need to be run on unfiltered version"""
58 """decorate method that always need to be run on unfiltered version"""
59 def wrapper(repo, *args, **kwargs):
59 def wrapper(repo, *args, **kwargs):
60 return orig(repo.unfiltered(), *args, **kwargs)
60 return orig(repo.unfiltered(), *args, **kwargs)
61 return wrapper
61 return wrapper
62
62
63 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
63 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
64 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
64 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
65
65
66 class localpeer(peer.peerrepository):
66 class localpeer(peer.peerrepository):
67 '''peer for a local repo; reflects only the most recent API'''
67 '''peer for a local repo; reflects only the most recent API'''
68
68
69 def __init__(self, repo, caps=MODERNCAPS):
69 def __init__(self, repo, caps=MODERNCAPS):
70 peer.peerrepository.__init__(self)
70 peer.peerrepository.__init__(self)
71 self._repo = repo.filtered('served')
71 self._repo = repo.filtered('served')
72 self.ui = repo.ui
72 self.ui = repo.ui
73 self._caps = repo._restrictcapabilities(caps)
73 self._caps = repo._restrictcapabilities(caps)
74 self.requirements = repo.requirements
74 self.requirements = repo.requirements
75 self.supportedformats = repo.supportedformats
75 self.supportedformats = repo.supportedformats
76
76
77 def close(self):
77 def close(self):
78 self._repo.close()
78 self._repo.close()
79
79
80 def _capabilities(self):
80 def _capabilities(self):
81 return self._caps
81 return self._caps
82
82
83 def local(self):
83 def local(self):
84 return self._repo
84 return self._repo
85
85
86 def canpush(self):
86 def canpush(self):
87 return True
87 return True
88
88
89 def url(self):
89 def url(self):
90 return self._repo.url()
90 return self._repo.url()
91
91
92 def lookup(self, key):
92 def lookup(self, key):
93 return self._repo.lookup(key)
93 return self._repo.lookup(key)
94
94
95 def branchmap(self):
95 def branchmap(self):
96 return self._repo.branchmap()
96 return self._repo.branchmap()
97
97
98 def heads(self):
98 def heads(self):
99 return self._repo.heads()
99 return self._repo.heads()
100
100
101 def known(self, nodes):
101 def known(self, nodes):
102 return self._repo.known(nodes)
102 return self._repo.known(nodes)
103
103
104 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
104 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
105 return self._repo.getbundle(source, heads=heads, common=common,
105 return self._repo.getbundle(source, heads=heads, common=common,
106 bundlecaps=None)
106 bundlecaps=None)
107
107
108 # TODO We might want to move the next two calls into legacypeer and add
108 # TODO We might want to move the next two calls into legacypeer and add
109 # unbundle instead.
109 # unbundle instead.
110
110
111 def lock(self):
111 def lock(self):
112 return self._repo.lock()
112 return self._repo.lock()
113
113
114 def addchangegroup(self, cg, source, url):
114 def addchangegroup(self, cg, source, url):
115 return self._repo.addchangegroup(cg, source, url)
115 return self._repo.addchangegroup(cg, source, url)
116
116
117 def pushkey(self, namespace, key, old, new):
117 def pushkey(self, namespace, key, old, new):
118 return self._repo.pushkey(namespace, key, old, new)
118 return self._repo.pushkey(namespace, key, old, new)
119
119
120 def listkeys(self, namespace):
120 def listkeys(self, namespace):
121 return self._repo.listkeys(namespace)
121 return self._repo.listkeys(namespace)
122
122
123 def debugwireargs(self, one, two, three=None, four=None, five=None):
123 def debugwireargs(self, one, two, three=None, four=None, five=None):
124 '''used to test argument passing over the wire'''
124 '''used to test argument passing over the wire'''
125 return "%s %s %s %s %s" % (one, two, three, four, five)
125 return "%s %s %s %s %s" % (one, two, three, four, five)
126
126
127 class locallegacypeer(localpeer):
127 class locallegacypeer(localpeer):
128 '''peer extension which implements legacy methods too; used for tests with
128 '''peer extension which implements legacy methods too; used for tests with
129 restricted capabilities'''
129 restricted capabilities'''
130
130
131 def __init__(self, repo):
131 def __init__(self, repo):
132 localpeer.__init__(self, repo, caps=LEGACYCAPS)
132 localpeer.__init__(self, repo, caps=LEGACYCAPS)
133
133
134 def branches(self, nodes):
134 def branches(self, nodes):
135 return self._repo.branches(nodes)
135 return self._repo.branches(nodes)
136
136
137 def between(self, pairs):
137 def between(self, pairs):
138 return self._repo.between(pairs)
138 return self._repo.between(pairs)
139
139
140 def changegroup(self, basenodes, source):
140 def changegroup(self, basenodes, source):
141 return self._repo.changegroup(basenodes, source)
141 return self._repo.changegroup(basenodes, source)
142
142
143 def changegroupsubset(self, bases, heads, source):
143 def changegroupsubset(self, bases, heads, source):
144 return self._repo.changegroupsubset(bases, heads, source)
144 return self._repo.changegroupsubset(bases, heads, source)
145
145
146 class localrepository(object):
146 class localrepository(object):
147
147
148 supportedformats = set(('revlogv1', 'generaldelta'))
148 supportedformats = set(('revlogv1', 'generaldelta'))
149 supported = supportedformats | set(('store', 'fncache', 'shared',
149 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
150 'dotencode'))
150 'dotencode'))
151 openerreqs = set(('revlogv1', 'generaldelta'))
151 openerreqs = set(('revlogv1', 'generaldelta'))
152 requirements = ['revlogv1']
152 requirements = ['revlogv1']
153 filtername = None
153 filtername = None
154
154
155 featuresetupfuncs = set()
156
155 def _baserequirements(self, create):
157 def _baserequirements(self, create):
156 return self.requirements[:]
158 return self.requirements[:]
157
159
158 def __init__(self, baseui, path=None, create=False):
160 def __init__(self, baseui, path=None, create=False):
159 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
161 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
160 self.wopener = self.wvfs
162 self.wopener = self.wvfs
161 self.root = self.wvfs.base
163 self.root = self.wvfs.base
162 self.path = self.wvfs.join(".hg")
164 self.path = self.wvfs.join(".hg")
163 self.origroot = path
165 self.origroot = path
164 self.auditor = scmutil.pathauditor(self.root, self._checknested)
166 self.auditor = scmutil.pathauditor(self.root, self._checknested)
165 self.vfs = scmutil.vfs(self.path)
167 self.vfs = scmutil.vfs(self.path)
166 self.opener = self.vfs
168 self.opener = self.vfs
167 self.baseui = baseui
169 self.baseui = baseui
168 self.ui = baseui.copy()
170 self.ui = baseui.copy()
169 # A list of callback to shape the phase if no data were found.
171 # A list of callback to shape the phase if no data were found.
170 # Callback are in the form: func(repo, roots) --> processed root.
172 # Callback are in the form: func(repo, roots) --> processed root.
171 # This list it to be filled by extension during repo setup
173 # This list it to be filled by extension during repo setup
172 self._phasedefaults = []
174 self._phasedefaults = []
173 try:
175 try:
174 self.ui.readconfig(self.join("hgrc"), self.root)
176 self.ui.readconfig(self.join("hgrc"), self.root)
175 extensions.loadall(self.ui)
177 extensions.loadall(self.ui)
176 except IOError:
178 except IOError:
177 pass
179 pass
178
180
181 if self.featuresetupfuncs:
182 self.supported = set(self._basesupported) # use private copy
183 for setupfunc in self.featuresetupfuncs:
184 setupfunc(self.ui, self.supported)
185 else:
186 self.supported = self._basesupported
187
179 if not self.vfs.isdir():
188 if not self.vfs.isdir():
180 if create:
189 if create:
181 if not self.wvfs.exists():
190 if not self.wvfs.exists():
182 self.wvfs.makedirs()
191 self.wvfs.makedirs()
183 self.vfs.makedir(notindexed=True)
192 self.vfs.makedir(notindexed=True)
184 requirements = self._baserequirements(create)
193 requirements = self._baserequirements(create)
185 if self.ui.configbool('format', 'usestore', True):
194 if self.ui.configbool('format', 'usestore', True):
186 self.vfs.mkdir("store")
195 self.vfs.mkdir("store")
187 requirements.append("store")
196 requirements.append("store")
188 if self.ui.configbool('format', 'usefncache', True):
197 if self.ui.configbool('format', 'usefncache', True):
189 requirements.append("fncache")
198 requirements.append("fncache")
190 if self.ui.configbool('format', 'dotencode', True):
199 if self.ui.configbool('format', 'dotencode', True):
191 requirements.append('dotencode')
200 requirements.append('dotencode')
192 # create an invalid changelog
201 # create an invalid changelog
193 self.vfs.append(
202 self.vfs.append(
194 "00changelog.i",
203 "00changelog.i",
195 '\0\0\0\2' # represents revlogv2
204 '\0\0\0\2' # represents revlogv2
196 ' dummy changelog to prevent using the old repo layout'
205 ' dummy changelog to prevent using the old repo layout'
197 )
206 )
198 if self.ui.configbool('format', 'generaldelta', False):
207 if self.ui.configbool('format', 'generaldelta', False):
199 requirements.append("generaldelta")
208 requirements.append("generaldelta")
200 requirements = set(requirements)
209 requirements = set(requirements)
201 else:
210 else:
202 raise error.RepoError(_("repository %s not found") % path)
211 raise error.RepoError(_("repository %s not found") % path)
203 elif create:
212 elif create:
204 raise error.RepoError(_("repository %s already exists") % path)
213 raise error.RepoError(_("repository %s already exists") % path)
205 else:
214 else:
206 try:
215 try:
207 requirements = scmutil.readrequires(self.vfs, self.supported)
216 requirements = scmutil.readrequires(self.vfs, self.supported)
208 except IOError, inst:
217 except IOError, inst:
209 if inst.errno != errno.ENOENT:
218 if inst.errno != errno.ENOENT:
210 raise
219 raise
211 requirements = set()
220 requirements = set()
212
221
213 self.sharedpath = self.path
222 self.sharedpath = self.path
214 try:
223 try:
215 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
224 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
216 realpath=True)
225 realpath=True)
217 s = vfs.base
226 s = vfs.base
218 if not vfs.exists():
227 if not vfs.exists():
219 raise error.RepoError(
228 raise error.RepoError(
220 _('.hg/sharedpath points to nonexistent directory %s') % s)
229 _('.hg/sharedpath points to nonexistent directory %s') % s)
221 self.sharedpath = s
230 self.sharedpath = s
222 except IOError, inst:
231 except IOError, inst:
223 if inst.errno != errno.ENOENT:
232 if inst.errno != errno.ENOENT:
224 raise
233 raise
225
234
226 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
235 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
227 self.spath = self.store.path
236 self.spath = self.store.path
228 self.svfs = self.store.vfs
237 self.svfs = self.store.vfs
229 self.sopener = self.svfs
238 self.sopener = self.svfs
230 self.sjoin = self.store.join
239 self.sjoin = self.store.join
231 self.vfs.createmode = self.store.createmode
240 self.vfs.createmode = self.store.createmode
232 self._applyrequirements(requirements)
241 self._applyrequirements(requirements)
233 if create:
242 if create:
234 self._writerequirements()
243 self._writerequirements()
235
244
236
245
237 self._branchcaches = {}
246 self._branchcaches = {}
238 self.filterpats = {}
247 self.filterpats = {}
239 self._datafilters = {}
248 self._datafilters = {}
240 self._transref = self._lockref = self._wlockref = None
249 self._transref = self._lockref = self._wlockref = None
241
250
242 # A cache for various files under .hg/ that tracks file changes,
251 # A cache for various files under .hg/ that tracks file changes,
243 # (used by the filecache decorator)
252 # (used by the filecache decorator)
244 #
253 #
245 # Maps a property name to its util.filecacheentry
254 # Maps a property name to its util.filecacheentry
246 self._filecache = {}
255 self._filecache = {}
247
256
248 # hold sets of revision to be filtered
257 # hold sets of revision to be filtered
249 # should be cleared when something might have changed the filter value:
258 # should be cleared when something might have changed the filter value:
250 # - new changesets,
259 # - new changesets,
251 # - phase change,
260 # - phase change,
252 # - new obsolescence marker,
261 # - new obsolescence marker,
253 # - working directory parent change,
262 # - working directory parent change,
254 # - bookmark changes
263 # - bookmark changes
255 self.filteredrevcache = {}
264 self.filteredrevcache = {}
256
265
257 def close(self):
266 def close(self):
258 pass
267 pass
259
268
260 def _restrictcapabilities(self, caps):
269 def _restrictcapabilities(self, caps):
261 return caps
270 return caps
262
271
263 def _applyrequirements(self, requirements):
272 def _applyrequirements(self, requirements):
264 self.requirements = requirements
273 self.requirements = requirements
265 self.sopener.options = dict((r, 1) for r in requirements
274 self.sopener.options = dict((r, 1) for r in requirements
266 if r in self.openerreqs)
275 if r in self.openerreqs)
267
276
268 def _writerequirements(self):
277 def _writerequirements(self):
269 reqfile = self.opener("requires", "w")
278 reqfile = self.opener("requires", "w")
270 for r in sorted(self.requirements):
279 for r in sorted(self.requirements):
271 reqfile.write("%s\n" % r)
280 reqfile.write("%s\n" % r)
272 reqfile.close()
281 reqfile.close()
273
282
274 def _checknested(self, path):
283 def _checknested(self, path):
275 """Determine if path is a legal nested repository."""
284 """Determine if path is a legal nested repository."""
276 if not path.startswith(self.root):
285 if not path.startswith(self.root):
277 return False
286 return False
278 subpath = path[len(self.root) + 1:]
287 subpath = path[len(self.root) + 1:]
279 normsubpath = util.pconvert(subpath)
288 normsubpath = util.pconvert(subpath)
280
289
281 # XXX: Checking against the current working copy is wrong in
290 # XXX: Checking against the current working copy is wrong in
282 # the sense that it can reject things like
291 # the sense that it can reject things like
283 #
292 #
284 # $ hg cat -r 10 sub/x.txt
293 # $ hg cat -r 10 sub/x.txt
285 #
294 #
286 # if sub/ is no longer a subrepository in the working copy
295 # if sub/ is no longer a subrepository in the working copy
287 # parent revision.
296 # parent revision.
288 #
297 #
289 # However, it can of course also allow things that would have
298 # However, it can of course also allow things that would have
290 # been rejected before, such as the above cat command if sub/
299 # been rejected before, such as the above cat command if sub/
291 # is a subrepository now, but was a normal directory before.
300 # is a subrepository now, but was a normal directory before.
292 # The old path auditor would have rejected by mistake since it
301 # The old path auditor would have rejected by mistake since it
293 # panics when it sees sub/.hg/.
302 # panics when it sees sub/.hg/.
294 #
303 #
295 # All in all, checking against the working copy seems sensible
304 # All in all, checking against the working copy seems sensible
296 # since we want to prevent access to nested repositories on
305 # since we want to prevent access to nested repositories on
297 # the filesystem *now*.
306 # the filesystem *now*.
298 ctx = self[None]
307 ctx = self[None]
299 parts = util.splitpath(subpath)
308 parts = util.splitpath(subpath)
300 while parts:
309 while parts:
301 prefix = '/'.join(parts)
310 prefix = '/'.join(parts)
302 if prefix in ctx.substate:
311 if prefix in ctx.substate:
303 if prefix == normsubpath:
312 if prefix == normsubpath:
304 return True
313 return True
305 else:
314 else:
306 sub = ctx.sub(prefix)
315 sub = ctx.sub(prefix)
307 return sub.checknested(subpath[len(prefix) + 1:])
316 return sub.checknested(subpath[len(prefix) + 1:])
308 else:
317 else:
309 parts.pop()
318 parts.pop()
310 return False
319 return False
311
320
312 def peer(self):
321 def peer(self):
313 return localpeer(self) # not cached to avoid reference cycle
322 return localpeer(self) # not cached to avoid reference cycle
314
323
315 def unfiltered(self):
324 def unfiltered(self):
316 """Return unfiltered version of the repository
325 """Return unfiltered version of the repository
317
326
318 Intended to be overwritten by filtered repo."""
327 Intended to be overwritten by filtered repo."""
319 return self
328 return self
320
329
321 def filtered(self, name):
330 def filtered(self, name):
322 """Return a filtered version of a repository"""
331 """Return a filtered version of a repository"""
323 # build a new class with the mixin and the current class
332 # build a new class with the mixin and the current class
324 # (possibly subclass of the repo)
333 # (possibly subclass of the repo)
325 class proxycls(repoview.repoview, self.unfiltered().__class__):
334 class proxycls(repoview.repoview, self.unfiltered().__class__):
326 pass
335 pass
327 return proxycls(self, name)
336 return proxycls(self, name)
328
337
329 @repofilecache('bookmarks')
338 @repofilecache('bookmarks')
330 def _bookmarks(self):
339 def _bookmarks(self):
331 return bookmarks.bmstore(self)
340 return bookmarks.bmstore(self)
332
341
333 @repofilecache('bookmarks.current')
342 @repofilecache('bookmarks.current')
334 def _bookmarkcurrent(self):
343 def _bookmarkcurrent(self):
335 return bookmarks.readcurrent(self)
344 return bookmarks.readcurrent(self)
336
345
337 def bookmarkheads(self, bookmark):
346 def bookmarkheads(self, bookmark):
338 name = bookmark.split('@', 1)[0]
347 name = bookmark.split('@', 1)[0]
339 heads = []
348 heads = []
340 for mark, n in self._bookmarks.iteritems():
349 for mark, n in self._bookmarks.iteritems():
341 if mark.split('@', 1)[0] == name:
350 if mark.split('@', 1)[0] == name:
342 heads.append(n)
351 heads.append(n)
343 return heads
352 return heads
344
353
345 @storecache('phaseroots')
354 @storecache('phaseroots')
346 def _phasecache(self):
355 def _phasecache(self):
347 return phases.phasecache(self, self._phasedefaults)
356 return phases.phasecache(self, self._phasedefaults)
348
357
349 @storecache('obsstore')
358 @storecache('obsstore')
350 def obsstore(self):
359 def obsstore(self):
351 store = obsolete.obsstore(self.sopener)
360 store = obsolete.obsstore(self.sopener)
352 if store and not obsolete._enabled:
361 if store and not obsolete._enabled:
353 # message is rare enough to not be translated
362 # message is rare enough to not be translated
354 msg = 'obsolete feature not enabled but %i markers found!\n'
363 msg = 'obsolete feature not enabled but %i markers found!\n'
355 self.ui.warn(msg % len(list(store)))
364 self.ui.warn(msg % len(list(store)))
356 return store
365 return store
357
366
358 @storecache('00changelog.i')
367 @storecache('00changelog.i')
359 def changelog(self):
368 def changelog(self):
360 c = changelog.changelog(self.sopener)
369 c = changelog.changelog(self.sopener)
361 if 'HG_PENDING' in os.environ:
370 if 'HG_PENDING' in os.environ:
362 p = os.environ['HG_PENDING']
371 p = os.environ['HG_PENDING']
363 if p.startswith(self.root):
372 if p.startswith(self.root):
364 c.readpending('00changelog.i.a')
373 c.readpending('00changelog.i.a')
365 return c
374 return c
366
375
367 @storecache('00manifest.i')
376 @storecache('00manifest.i')
368 def manifest(self):
377 def manifest(self):
369 return manifest.manifest(self.sopener)
378 return manifest.manifest(self.sopener)
370
379
371 @repofilecache('dirstate')
380 @repofilecache('dirstate')
372 def dirstate(self):
381 def dirstate(self):
373 warned = [0]
382 warned = [0]
374 def validate(node):
383 def validate(node):
375 try:
384 try:
376 self.changelog.rev(node)
385 self.changelog.rev(node)
377 return node
386 return node
378 except error.LookupError:
387 except error.LookupError:
379 if not warned[0]:
388 if not warned[0]:
380 warned[0] = True
389 warned[0] = True
381 self.ui.warn(_("warning: ignoring unknown"
390 self.ui.warn(_("warning: ignoring unknown"
382 " working parent %s!\n") % short(node))
391 " working parent %s!\n") % short(node))
383 return nullid
392 return nullid
384
393
385 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
394 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
386
395
387 def __getitem__(self, changeid):
396 def __getitem__(self, changeid):
388 if changeid is None:
397 if changeid is None:
389 return context.workingctx(self)
398 return context.workingctx(self)
390 return context.changectx(self, changeid)
399 return context.changectx(self, changeid)
391
400
392 def __contains__(self, changeid):
401 def __contains__(self, changeid):
393 try:
402 try:
394 return bool(self.lookup(changeid))
403 return bool(self.lookup(changeid))
395 except error.RepoLookupError:
404 except error.RepoLookupError:
396 return False
405 return False
397
406
398 def __nonzero__(self):
407 def __nonzero__(self):
399 return True
408 return True
400
409
401 def __len__(self):
410 def __len__(self):
402 return len(self.changelog)
411 return len(self.changelog)
403
412
404 def __iter__(self):
413 def __iter__(self):
405 return iter(self.changelog)
414 return iter(self.changelog)
406
415
407 def revs(self, expr, *args):
416 def revs(self, expr, *args):
408 '''Return a list of revisions matching the given revset'''
417 '''Return a list of revisions matching the given revset'''
409 expr = revset.formatspec(expr, *args)
418 expr = revset.formatspec(expr, *args)
410 m = revset.match(None, expr)
419 m = revset.match(None, expr)
411 return [r for r in m(self, list(self))]
420 return [r for r in m(self, list(self))]
412
421
413 def set(self, expr, *args):
422 def set(self, expr, *args):
414 '''
423 '''
415 Yield a context for each matching revision, after doing arg
424 Yield a context for each matching revision, after doing arg
416 replacement via revset.formatspec
425 replacement via revset.formatspec
417 '''
426 '''
418 for r in self.revs(expr, *args):
427 for r in self.revs(expr, *args):
419 yield self[r]
428 yield self[r]
420
429
421 def url(self):
430 def url(self):
422 return 'file:' + self.root
431 return 'file:' + self.root
423
432
424 def hook(self, name, throw=False, **args):
433 def hook(self, name, throw=False, **args):
425 return hook.hook(self.ui, self, name, throw, **args)
434 return hook.hook(self.ui, self, name, throw, **args)
426
435
427 @unfilteredmethod
436 @unfilteredmethod
428 def _tag(self, names, node, message, local, user, date, extra={}):
437 def _tag(self, names, node, message, local, user, date, extra={}):
429 if isinstance(names, str):
438 if isinstance(names, str):
430 names = (names,)
439 names = (names,)
431
440
432 branches = self.branchmap()
441 branches = self.branchmap()
433 for name in names:
442 for name in names:
434 self.hook('pretag', throw=True, node=hex(node), tag=name,
443 self.hook('pretag', throw=True, node=hex(node), tag=name,
435 local=local)
444 local=local)
436 if name in branches:
445 if name in branches:
437 self.ui.warn(_("warning: tag %s conflicts with existing"
446 self.ui.warn(_("warning: tag %s conflicts with existing"
438 " branch name\n") % name)
447 " branch name\n") % name)
439
448
440 def writetags(fp, names, munge, prevtags):
449 def writetags(fp, names, munge, prevtags):
441 fp.seek(0, 2)
450 fp.seek(0, 2)
442 if prevtags and prevtags[-1] != '\n':
451 if prevtags and prevtags[-1] != '\n':
443 fp.write('\n')
452 fp.write('\n')
444 for name in names:
453 for name in names:
445 m = munge and munge(name) or name
454 m = munge and munge(name) or name
446 if (self._tagscache.tagtypes and
455 if (self._tagscache.tagtypes and
447 name in self._tagscache.tagtypes):
456 name in self._tagscache.tagtypes):
448 old = self.tags().get(name, nullid)
457 old = self.tags().get(name, nullid)
449 fp.write('%s %s\n' % (hex(old), m))
458 fp.write('%s %s\n' % (hex(old), m))
450 fp.write('%s %s\n' % (hex(node), m))
459 fp.write('%s %s\n' % (hex(node), m))
451 fp.close()
460 fp.close()
452
461
453 prevtags = ''
462 prevtags = ''
454 if local:
463 if local:
455 try:
464 try:
456 fp = self.opener('localtags', 'r+')
465 fp = self.opener('localtags', 'r+')
457 except IOError:
466 except IOError:
458 fp = self.opener('localtags', 'a')
467 fp = self.opener('localtags', 'a')
459 else:
468 else:
460 prevtags = fp.read()
469 prevtags = fp.read()
461
470
462 # local tags are stored in the current charset
471 # local tags are stored in the current charset
463 writetags(fp, names, None, prevtags)
472 writetags(fp, names, None, prevtags)
464 for name in names:
473 for name in names:
465 self.hook('tag', node=hex(node), tag=name, local=local)
474 self.hook('tag', node=hex(node), tag=name, local=local)
466 return
475 return
467
476
468 try:
477 try:
469 fp = self.wfile('.hgtags', 'rb+')
478 fp = self.wfile('.hgtags', 'rb+')
470 except IOError, e:
479 except IOError, e:
471 if e.errno != errno.ENOENT:
480 if e.errno != errno.ENOENT:
472 raise
481 raise
473 fp = self.wfile('.hgtags', 'ab')
482 fp = self.wfile('.hgtags', 'ab')
474 else:
483 else:
475 prevtags = fp.read()
484 prevtags = fp.read()
476
485
477 # committed tags are stored in UTF-8
486 # committed tags are stored in UTF-8
478 writetags(fp, names, encoding.fromlocal, prevtags)
487 writetags(fp, names, encoding.fromlocal, prevtags)
479
488
480 fp.close()
489 fp.close()
481
490
482 self.invalidatecaches()
491 self.invalidatecaches()
483
492
484 if '.hgtags' not in self.dirstate:
493 if '.hgtags' not in self.dirstate:
485 self[None].add(['.hgtags'])
494 self[None].add(['.hgtags'])
486
495
487 m = matchmod.exact(self.root, '', ['.hgtags'])
496 m = matchmod.exact(self.root, '', ['.hgtags'])
488 tagnode = self.commit(message, user, date, extra=extra, match=m)
497 tagnode = self.commit(message, user, date, extra=extra, match=m)
489
498
490 for name in names:
499 for name in names:
491 self.hook('tag', node=hex(node), tag=name, local=local)
500 self.hook('tag', node=hex(node), tag=name, local=local)
492
501
493 return tagnode
502 return tagnode
494
503
495 def tag(self, names, node, message, local, user, date):
504 def tag(self, names, node, message, local, user, date):
496 '''tag a revision with one or more symbolic names.
505 '''tag a revision with one or more symbolic names.
497
506
498 names is a list of strings or, when adding a single tag, names may be a
507 names is a list of strings or, when adding a single tag, names may be a
499 string.
508 string.
500
509
501 if local is True, the tags are stored in a per-repository file.
510 if local is True, the tags are stored in a per-repository file.
502 otherwise, they are stored in the .hgtags file, and a new
511 otherwise, they are stored in the .hgtags file, and a new
503 changeset is committed with the change.
512 changeset is committed with the change.
504
513
505 keyword arguments:
514 keyword arguments:
506
515
507 local: whether to store tags in non-version-controlled file
516 local: whether to store tags in non-version-controlled file
508 (default False)
517 (default False)
509
518
510 message: commit message to use if committing
519 message: commit message to use if committing
511
520
512 user: name of user to use if committing
521 user: name of user to use if committing
513
522
514 date: date tuple to use if committing'''
523 date: date tuple to use if committing'''
515
524
516 if not local:
525 if not local:
517 for x in self.status()[:5]:
526 for x in self.status()[:5]:
518 if '.hgtags' in x:
527 if '.hgtags' in x:
519 raise util.Abort(_('working copy of .hgtags is changed '
528 raise util.Abort(_('working copy of .hgtags is changed '
520 '(please commit .hgtags manually)'))
529 '(please commit .hgtags manually)'))
521
530
522 self.tags() # instantiate the cache
531 self.tags() # instantiate the cache
523 self._tag(names, node, message, local, user, date)
532 self._tag(names, node, message, local, user, date)
524
533
525 @filteredpropertycache
534 @filteredpropertycache
526 def _tagscache(self):
535 def _tagscache(self):
527 '''Returns a tagscache object that contains various tags related
536 '''Returns a tagscache object that contains various tags related
528 caches.'''
537 caches.'''
529
538
530 # This simplifies its cache management by having one decorated
539 # This simplifies its cache management by having one decorated
531 # function (this one) and the rest simply fetch things from it.
540 # function (this one) and the rest simply fetch things from it.
532 class tagscache(object):
541 class tagscache(object):
533 def __init__(self):
542 def __init__(self):
534 # These two define the set of tags for this repository. tags
543 # These two define the set of tags for this repository. tags
535 # maps tag name to node; tagtypes maps tag name to 'global' or
544 # maps tag name to node; tagtypes maps tag name to 'global' or
536 # 'local'. (Global tags are defined by .hgtags across all
545 # 'local'. (Global tags are defined by .hgtags across all
537 # heads, and local tags are defined in .hg/localtags.)
546 # heads, and local tags are defined in .hg/localtags.)
538 # They constitute the in-memory cache of tags.
547 # They constitute the in-memory cache of tags.
539 self.tags = self.tagtypes = None
548 self.tags = self.tagtypes = None
540
549
541 self.nodetagscache = self.tagslist = None
550 self.nodetagscache = self.tagslist = None
542
551
543 cache = tagscache()
552 cache = tagscache()
544 cache.tags, cache.tagtypes = self._findtags()
553 cache.tags, cache.tagtypes = self._findtags()
545
554
546 return cache
555 return cache
547
556
548 def tags(self):
557 def tags(self):
549 '''return a mapping of tag to node'''
558 '''return a mapping of tag to node'''
550 t = {}
559 t = {}
551 if self.changelog.filteredrevs:
560 if self.changelog.filteredrevs:
552 tags, tt = self._findtags()
561 tags, tt = self._findtags()
553 else:
562 else:
554 tags = self._tagscache.tags
563 tags = self._tagscache.tags
555 for k, v in tags.iteritems():
564 for k, v in tags.iteritems():
556 try:
565 try:
557 # ignore tags to unknown nodes
566 # ignore tags to unknown nodes
558 self.changelog.rev(v)
567 self.changelog.rev(v)
559 t[k] = v
568 t[k] = v
560 except (error.LookupError, ValueError):
569 except (error.LookupError, ValueError):
561 pass
570 pass
562 return t
571 return t
563
572
564 def _findtags(self):
573 def _findtags(self):
565 '''Do the hard work of finding tags. Return a pair of dicts
574 '''Do the hard work of finding tags. Return a pair of dicts
566 (tags, tagtypes) where tags maps tag name to node, and tagtypes
575 (tags, tagtypes) where tags maps tag name to node, and tagtypes
567 maps tag name to a string like \'global\' or \'local\'.
576 maps tag name to a string like \'global\' or \'local\'.
568 Subclasses or extensions are free to add their own tags, but
577 Subclasses or extensions are free to add their own tags, but
569 should be aware that the returned dicts will be retained for the
578 should be aware that the returned dicts will be retained for the
570 duration of the localrepo object.'''
579 duration of the localrepo object.'''
571
580
572 # XXX what tagtype should subclasses/extensions use? Currently
581 # XXX what tagtype should subclasses/extensions use? Currently
573 # mq and bookmarks add tags, but do not set the tagtype at all.
582 # mq and bookmarks add tags, but do not set the tagtype at all.
574 # Should each extension invent its own tag type? Should there
583 # Should each extension invent its own tag type? Should there
575 # be one tagtype for all such "virtual" tags? Or is the status
584 # be one tagtype for all such "virtual" tags? Or is the status
576 # quo fine?
585 # quo fine?
577
586
578 alltags = {} # map tag name to (node, hist)
587 alltags = {} # map tag name to (node, hist)
579 tagtypes = {}
588 tagtypes = {}
580
589
581 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
590 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
582 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
591 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
583
592
584 # Build the return dicts. Have to re-encode tag names because
593 # Build the return dicts. Have to re-encode tag names because
585 # the tags module always uses UTF-8 (in order not to lose info
594 # the tags module always uses UTF-8 (in order not to lose info
586 # writing to the cache), but the rest of Mercurial wants them in
595 # writing to the cache), but the rest of Mercurial wants them in
587 # local encoding.
596 # local encoding.
588 tags = {}
597 tags = {}
589 for (name, (node, hist)) in alltags.iteritems():
598 for (name, (node, hist)) in alltags.iteritems():
590 if node != nullid:
599 if node != nullid:
591 tags[encoding.tolocal(name)] = node
600 tags[encoding.tolocal(name)] = node
592 tags['tip'] = self.changelog.tip()
601 tags['tip'] = self.changelog.tip()
593 tagtypes = dict([(encoding.tolocal(name), value)
602 tagtypes = dict([(encoding.tolocal(name), value)
594 for (name, value) in tagtypes.iteritems()])
603 for (name, value) in tagtypes.iteritems()])
595 return (tags, tagtypes)
604 return (tags, tagtypes)
596
605
597 def tagtype(self, tagname):
606 def tagtype(self, tagname):
598 '''
607 '''
599 return the type of the given tag. result can be:
608 return the type of the given tag. result can be:
600
609
601 'local' : a local tag
610 'local' : a local tag
602 'global' : a global tag
611 'global' : a global tag
603 None : tag does not exist
612 None : tag does not exist
604 '''
613 '''
605
614
606 return self._tagscache.tagtypes.get(tagname)
615 return self._tagscache.tagtypes.get(tagname)
607
616
608 def tagslist(self):
617 def tagslist(self):
609 '''return a list of tags ordered by revision'''
618 '''return a list of tags ordered by revision'''
610 if not self._tagscache.tagslist:
619 if not self._tagscache.tagslist:
611 l = []
620 l = []
612 for t, n in self.tags().iteritems():
621 for t, n in self.tags().iteritems():
613 r = self.changelog.rev(n)
622 r = self.changelog.rev(n)
614 l.append((r, t, n))
623 l.append((r, t, n))
615 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
624 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
616
625
617 return self._tagscache.tagslist
626 return self._tagscache.tagslist
618
627
619 def nodetags(self, node):
628 def nodetags(self, node):
620 '''return the tags associated with a node'''
629 '''return the tags associated with a node'''
621 if not self._tagscache.nodetagscache:
630 if not self._tagscache.nodetagscache:
622 nodetagscache = {}
631 nodetagscache = {}
623 for t, n in self._tagscache.tags.iteritems():
632 for t, n in self._tagscache.tags.iteritems():
624 nodetagscache.setdefault(n, []).append(t)
633 nodetagscache.setdefault(n, []).append(t)
625 for tags in nodetagscache.itervalues():
634 for tags in nodetagscache.itervalues():
626 tags.sort()
635 tags.sort()
627 self._tagscache.nodetagscache = nodetagscache
636 self._tagscache.nodetagscache = nodetagscache
628 return self._tagscache.nodetagscache.get(node, [])
637 return self._tagscache.nodetagscache.get(node, [])
629
638
630 def nodebookmarks(self, node):
639 def nodebookmarks(self, node):
631 marks = []
640 marks = []
632 for bookmark, n in self._bookmarks.iteritems():
641 for bookmark, n in self._bookmarks.iteritems():
633 if n == node:
642 if n == node:
634 marks.append(bookmark)
643 marks.append(bookmark)
635 return sorted(marks)
644 return sorted(marks)
636
645
637 def branchmap(self):
646 def branchmap(self):
638 '''returns a dictionary {branch: [branchheads]}'''
647 '''returns a dictionary {branch: [branchheads]}'''
639 branchmap.updatecache(self)
648 branchmap.updatecache(self)
640 return self._branchcaches[self.filtername]
649 return self._branchcaches[self.filtername]
641
650
642
651
643 def _branchtip(self, heads):
652 def _branchtip(self, heads):
644 '''return the tipmost branch head in heads'''
653 '''return the tipmost branch head in heads'''
645 tip = heads[-1]
654 tip = heads[-1]
646 for h in reversed(heads):
655 for h in reversed(heads):
647 if not self[h].closesbranch():
656 if not self[h].closesbranch():
648 tip = h
657 tip = h
649 break
658 break
650 return tip
659 return tip
651
660
652 def branchtip(self, branch):
661 def branchtip(self, branch):
653 '''return the tip node for a given branch'''
662 '''return the tip node for a given branch'''
654 if branch not in self.branchmap():
663 if branch not in self.branchmap():
655 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
664 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
656 return self._branchtip(self.branchmap()[branch])
665 return self._branchtip(self.branchmap()[branch])
657
666
658 def branchtags(self):
667 def branchtags(self):
659 '''return a dict where branch names map to the tipmost head of
668 '''return a dict where branch names map to the tipmost head of
660 the branch, open heads come before closed'''
669 the branch, open heads come before closed'''
661 bt = {}
670 bt = {}
662 for bn, heads in self.branchmap().iteritems():
671 for bn, heads in self.branchmap().iteritems():
663 bt[bn] = self._branchtip(heads)
672 bt[bn] = self._branchtip(heads)
664 return bt
673 return bt
665
674
666 def lookup(self, key):
675 def lookup(self, key):
667 return self[key].node()
676 return self[key].node()
668
677
669 def lookupbranch(self, key, remote=None):
678 def lookupbranch(self, key, remote=None):
670 repo = remote or self
679 repo = remote or self
671 if key in repo.branchmap():
680 if key in repo.branchmap():
672 return key
681 return key
673
682
674 repo = (remote and remote.local()) and remote or self
683 repo = (remote and remote.local()) and remote or self
675 return repo[key].branch()
684 return repo[key].branch()
676
685
677 def known(self, nodes):
686 def known(self, nodes):
678 nm = self.changelog.nodemap
687 nm = self.changelog.nodemap
679 pc = self._phasecache
688 pc = self._phasecache
680 result = []
689 result = []
681 for n in nodes:
690 for n in nodes:
682 r = nm.get(n)
691 r = nm.get(n)
683 resp = not (r is None or pc.phase(self, r) >= phases.secret)
692 resp = not (r is None or pc.phase(self, r) >= phases.secret)
684 result.append(resp)
693 result.append(resp)
685 return result
694 return result
686
695
687 def local(self):
696 def local(self):
688 return self
697 return self
689
698
690 def cancopy(self):
699 def cancopy(self):
691 return self.local() # so statichttprepo's override of local() works
700 return self.local() # so statichttprepo's override of local() works
692
701
693 def join(self, f):
702 def join(self, f):
694 return os.path.join(self.path, f)
703 return os.path.join(self.path, f)
695
704
696 def wjoin(self, f):
705 def wjoin(self, f):
697 return os.path.join(self.root, f)
706 return os.path.join(self.root, f)
698
707
699 def file(self, f):
708 def file(self, f):
700 if f[0] == '/':
709 if f[0] == '/':
701 f = f[1:]
710 f = f[1:]
702 return filelog.filelog(self.sopener, f)
711 return filelog.filelog(self.sopener, f)
703
712
704 def changectx(self, changeid):
713 def changectx(self, changeid):
705 return self[changeid]
714 return self[changeid]
706
715
707 def parents(self, changeid=None):
716 def parents(self, changeid=None):
708 '''get list of changectxs for parents of changeid'''
717 '''get list of changectxs for parents of changeid'''
709 return self[changeid].parents()
718 return self[changeid].parents()
710
719
711 def setparents(self, p1, p2=nullid):
720 def setparents(self, p1, p2=nullid):
712 copies = self.dirstate.setparents(p1, p2)
721 copies = self.dirstate.setparents(p1, p2)
713 pctx = self[p1]
722 pctx = self[p1]
714 if copies:
723 if copies:
715 # Adjust copy records, the dirstate cannot do it, it
724 # Adjust copy records, the dirstate cannot do it, it
716 # requires access to parents manifests. Preserve them
725 # requires access to parents manifests. Preserve them
717 # only for entries added to first parent.
726 # only for entries added to first parent.
718 for f in copies:
727 for f in copies:
719 if f not in pctx and copies[f] in pctx:
728 if f not in pctx and copies[f] in pctx:
720 self.dirstate.copy(copies[f], f)
729 self.dirstate.copy(copies[f], f)
721 if p2 == nullid:
730 if p2 == nullid:
722 for f, s in sorted(self.dirstate.copies().items()):
731 for f, s in sorted(self.dirstate.copies().items()):
723 if f not in pctx and s not in pctx:
732 if f not in pctx and s not in pctx:
724 self.dirstate.copy(None, f)
733 self.dirstate.copy(None, f)
725
734
726 def filectx(self, path, changeid=None, fileid=None):
735 def filectx(self, path, changeid=None, fileid=None):
727 """changeid can be a changeset revision, node, or tag.
736 """changeid can be a changeset revision, node, or tag.
728 fileid can be a file revision or node."""
737 fileid can be a file revision or node."""
729 return context.filectx(self, path, changeid, fileid)
738 return context.filectx(self, path, changeid, fileid)
730
739
731 def getcwd(self):
740 def getcwd(self):
732 return self.dirstate.getcwd()
741 return self.dirstate.getcwd()
733
742
734 def pathto(self, f, cwd=None):
743 def pathto(self, f, cwd=None):
735 return self.dirstate.pathto(f, cwd)
744 return self.dirstate.pathto(f, cwd)
736
745
737 def wfile(self, f, mode='r'):
746 def wfile(self, f, mode='r'):
738 return self.wopener(f, mode)
747 return self.wopener(f, mode)
739
748
740 def _link(self, f):
749 def _link(self, f):
741 return self.wvfs.islink(f)
750 return self.wvfs.islink(f)
742
751
743 def _loadfilter(self, filter):
752 def _loadfilter(self, filter):
744 if filter not in self.filterpats:
753 if filter not in self.filterpats:
745 l = []
754 l = []
746 for pat, cmd in self.ui.configitems(filter):
755 for pat, cmd in self.ui.configitems(filter):
747 if cmd == '!':
756 if cmd == '!':
748 continue
757 continue
749 mf = matchmod.match(self.root, '', [pat])
758 mf = matchmod.match(self.root, '', [pat])
750 fn = None
759 fn = None
751 params = cmd
760 params = cmd
752 for name, filterfn in self._datafilters.iteritems():
761 for name, filterfn in self._datafilters.iteritems():
753 if cmd.startswith(name):
762 if cmd.startswith(name):
754 fn = filterfn
763 fn = filterfn
755 params = cmd[len(name):].lstrip()
764 params = cmd[len(name):].lstrip()
756 break
765 break
757 if not fn:
766 if not fn:
758 fn = lambda s, c, **kwargs: util.filter(s, c)
767 fn = lambda s, c, **kwargs: util.filter(s, c)
759 # Wrap old filters not supporting keyword arguments
768 # Wrap old filters not supporting keyword arguments
760 if not inspect.getargspec(fn)[2]:
769 if not inspect.getargspec(fn)[2]:
761 oldfn = fn
770 oldfn = fn
762 fn = lambda s, c, **kwargs: oldfn(s, c)
771 fn = lambda s, c, **kwargs: oldfn(s, c)
763 l.append((mf, fn, params))
772 l.append((mf, fn, params))
764 self.filterpats[filter] = l
773 self.filterpats[filter] = l
765 return self.filterpats[filter]
774 return self.filterpats[filter]
766
775
767 def _filter(self, filterpats, filename, data):
776 def _filter(self, filterpats, filename, data):
768 for mf, fn, cmd in filterpats:
777 for mf, fn, cmd in filterpats:
769 if mf(filename):
778 if mf(filename):
770 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
779 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
771 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
780 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
772 break
781 break
773
782
774 return data
783 return data
775
784
776 @unfilteredpropertycache
785 @unfilteredpropertycache
777 def _encodefilterpats(self):
786 def _encodefilterpats(self):
778 return self._loadfilter('encode')
787 return self._loadfilter('encode')
779
788
780 @unfilteredpropertycache
789 @unfilteredpropertycache
781 def _decodefilterpats(self):
790 def _decodefilterpats(self):
782 return self._loadfilter('decode')
791 return self._loadfilter('decode')
783
792
784 def adddatafilter(self, name, filter):
793 def adddatafilter(self, name, filter):
785 self._datafilters[name] = filter
794 self._datafilters[name] = filter
786
795
787 def wread(self, filename):
796 def wread(self, filename):
788 if self._link(filename):
797 if self._link(filename):
789 data = self.wvfs.readlink(filename)
798 data = self.wvfs.readlink(filename)
790 else:
799 else:
791 data = self.wopener.read(filename)
800 data = self.wopener.read(filename)
792 return self._filter(self._encodefilterpats, filename, data)
801 return self._filter(self._encodefilterpats, filename, data)
793
802
794 def wwrite(self, filename, data, flags):
803 def wwrite(self, filename, data, flags):
795 data = self._filter(self._decodefilterpats, filename, data)
804 data = self._filter(self._decodefilterpats, filename, data)
796 if 'l' in flags:
805 if 'l' in flags:
797 self.wopener.symlink(data, filename)
806 self.wopener.symlink(data, filename)
798 else:
807 else:
799 self.wopener.write(filename, data)
808 self.wopener.write(filename, data)
800 if 'x' in flags:
809 if 'x' in flags:
801 self.wvfs.setflags(filename, False, True)
810 self.wvfs.setflags(filename, False, True)
802
811
803 def wwritedata(self, filename, data):
812 def wwritedata(self, filename, data):
804 return self._filter(self._decodefilterpats, filename, data)
813 return self._filter(self._decodefilterpats, filename, data)
805
814
806 def transaction(self, desc):
815 def transaction(self, desc):
807 tr = self._transref and self._transref() or None
816 tr = self._transref and self._transref() or None
808 if tr and tr.running():
817 if tr and tr.running():
809 return tr.nest()
818 return tr.nest()
810
819
811 # abort here if the journal already exists
820 # abort here if the journal already exists
812 if self.svfs.exists("journal"):
821 if self.svfs.exists("journal"):
813 raise error.RepoError(
822 raise error.RepoError(
814 _("abandoned transaction found - run hg recover"))
823 _("abandoned transaction found - run hg recover"))
815
824
816 self._writejournal(desc)
825 self._writejournal(desc)
817 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
826 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
818
827
819 tr = transaction.transaction(self.ui.warn, self.sopener,
828 tr = transaction.transaction(self.ui.warn, self.sopener,
820 self.sjoin("journal"),
829 self.sjoin("journal"),
821 aftertrans(renames),
830 aftertrans(renames),
822 self.store.createmode)
831 self.store.createmode)
823 self._transref = weakref.ref(tr)
832 self._transref = weakref.ref(tr)
824 return tr
833 return tr
825
834
826 def _journalfiles(self):
835 def _journalfiles(self):
827 return ((self.svfs, 'journal'),
836 return ((self.svfs, 'journal'),
828 (self.vfs, 'journal.dirstate'),
837 (self.vfs, 'journal.dirstate'),
829 (self.vfs, 'journal.branch'),
838 (self.vfs, 'journal.branch'),
830 (self.vfs, 'journal.desc'),
839 (self.vfs, 'journal.desc'),
831 (self.vfs, 'journal.bookmarks'),
840 (self.vfs, 'journal.bookmarks'),
832 (self.svfs, 'journal.phaseroots'))
841 (self.svfs, 'journal.phaseroots'))
833
842
834 def undofiles(self):
843 def undofiles(self):
835 return [vfs.join(undoname(x)) for vfs, x in self._journalfiles()]
844 return [vfs.join(undoname(x)) for vfs, x in self._journalfiles()]
836
845
837 def _writejournal(self, desc):
846 def _writejournal(self, desc):
838 self.opener.write("journal.dirstate",
847 self.opener.write("journal.dirstate",
839 self.opener.tryread("dirstate"))
848 self.opener.tryread("dirstate"))
840 self.opener.write("journal.branch",
849 self.opener.write("journal.branch",
841 encoding.fromlocal(self.dirstate.branch()))
850 encoding.fromlocal(self.dirstate.branch()))
842 self.opener.write("journal.desc",
851 self.opener.write("journal.desc",
843 "%d\n%s\n" % (len(self), desc))
852 "%d\n%s\n" % (len(self), desc))
844 self.opener.write("journal.bookmarks",
853 self.opener.write("journal.bookmarks",
845 self.opener.tryread("bookmarks"))
854 self.opener.tryread("bookmarks"))
846 self.sopener.write("journal.phaseroots",
855 self.sopener.write("journal.phaseroots",
847 self.sopener.tryread("phaseroots"))
856 self.sopener.tryread("phaseroots"))
848
857
849 def recover(self):
858 def recover(self):
850 lock = self.lock()
859 lock = self.lock()
851 try:
860 try:
852 if self.svfs.exists("journal"):
861 if self.svfs.exists("journal"):
853 self.ui.status(_("rolling back interrupted transaction\n"))
862 self.ui.status(_("rolling back interrupted transaction\n"))
854 transaction.rollback(self.sopener, self.sjoin("journal"),
863 transaction.rollback(self.sopener, self.sjoin("journal"),
855 self.ui.warn)
864 self.ui.warn)
856 self.invalidate()
865 self.invalidate()
857 return True
866 return True
858 else:
867 else:
859 self.ui.warn(_("no interrupted transaction available\n"))
868 self.ui.warn(_("no interrupted transaction available\n"))
860 return False
869 return False
861 finally:
870 finally:
862 lock.release()
871 lock.release()
863
872
864 def rollback(self, dryrun=False, force=False):
873 def rollback(self, dryrun=False, force=False):
865 wlock = lock = None
874 wlock = lock = None
866 try:
875 try:
867 wlock = self.wlock()
876 wlock = self.wlock()
868 lock = self.lock()
877 lock = self.lock()
869 if self.svfs.exists("undo"):
878 if self.svfs.exists("undo"):
870 return self._rollback(dryrun, force)
879 return self._rollback(dryrun, force)
871 else:
880 else:
872 self.ui.warn(_("no rollback information available\n"))
881 self.ui.warn(_("no rollback information available\n"))
873 return 1
882 return 1
874 finally:
883 finally:
875 release(lock, wlock)
884 release(lock, wlock)
876
885
877 @unfilteredmethod # Until we get smarter cache management
886 @unfilteredmethod # Until we get smarter cache management
878 def _rollback(self, dryrun, force):
887 def _rollback(self, dryrun, force):
879 ui = self.ui
888 ui = self.ui
880 try:
889 try:
881 args = self.opener.read('undo.desc').splitlines()
890 args = self.opener.read('undo.desc').splitlines()
882 (oldlen, desc, detail) = (int(args[0]), args[1], None)
891 (oldlen, desc, detail) = (int(args[0]), args[1], None)
883 if len(args) >= 3:
892 if len(args) >= 3:
884 detail = args[2]
893 detail = args[2]
885 oldtip = oldlen - 1
894 oldtip = oldlen - 1
886
895
887 if detail and ui.verbose:
896 if detail and ui.verbose:
888 msg = (_('repository tip rolled back to revision %s'
897 msg = (_('repository tip rolled back to revision %s'
889 ' (undo %s: %s)\n')
898 ' (undo %s: %s)\n')
890 % (oldtip, desc, detail))
899 % (oldtip, desc, detail))
891 else:
900 else:
892 msg = (_('repository tip rolled back to revision %s'
901 msg = (_('repository tip rolled back to revision %s'
893 ' (undo %s)\n')
902 ' (undo %s)\n')
894 % (oldtip, desc))
903 % (oldtip, desc))
895 except IOError:
904 except IOError:
896 msg = _('rolling back unknown transaction\n')
905 msg = _('rolling back unknown transaction\n')
897 desc = None
906 desc = None
898
907
899 if not force and self['.'] != self['tip'] and desc == 'commit':
908 if not force and self['.'] != self['tip'] and desc == 'commit':
900 raise util.Abort(
909 raise util.Abort(
901 _('rollback of last commit while not checked out '
910 _('rollback of last commit while not checked out '
902 'may lose data'), hint=_('use -f to force'))
911 'may lose data'), hint=_('use -f to force'))
903
912
904 ui.status(msg)
913 ui.status(msg)
905 if dryrun:
914 if dryrun:
906 return 0
915 return 0
907
916
908 parents = self.dirstate.parents()
917 parents = self.dirstate.parents()
909 self.destroying()
918 self.destroying()
910 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
919 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
911 if self.vfs.exists('undo.bookmarks'):
920 if self.vfs.exists('undo.bookmarks'):
912 self.vfs.rename('undo.bookmarks', 'bookmarks')
921 self.vfs.rename('undo.bookmarks', 'bookmarks')
913 if self.svfs.exists('undo.phaseroots'):
922 if self.svfs.exists('undo.phaseroots'):
914 self.svfs.rename('undo.phaseroots', 'phaseroots')
923 self.svfs.rename('undo.phaseroots', 'phaseroots')
915 self.invalidate()
924 self.invalidate()
916
925
917 parentgone = (parents[0] not in self.changelog.nodemap or
926 parentgone = (parents[0] not in self.changelog.nodemap or
918 parents[1] not in self.changelog.nodemap)
927 parents[1] not in self.changelog.nodemap)
919 if parentgone:
928 if parentgone:
920 self.vfs.rename('undo.dirstate', 'dirstate')
929 self.vfs.rename('undo.dirstate', 'dirstate')
921 try:
930 try:
922 branch = self.opener.read('undo.branch')
931 branch = self.opener.read('undo.branch')
923 self.dirstate.setbranch(encoding.tolocal(branch))
932 self.dirstate.setbranch(encoding.tolocal(branch))
924 except IOError:
933 except IOError:
925 ui.warn(_('named branch could not be reset: '
934 ui.warn(_('named branch could not be reset: '
926 'current branch is still \'%s\'\n')
935 'current branch is still \'%s\'\n')
927 % self.dirstate.branch())
936 % self.dirstate.branch())
928
937
929 self.dirstate.invalidate()
938 self.dirstate.invalidate()
930 parents = tuple([p.rev() for p in self.parents()])
939 parents = tuple([p.rev() for p in self.parents()])
931 if len(parents) > 1:
940 if len(parents) > 1:
932 ui.status(_('working directory now based on '
941 ui.status(_('working directory now based on '
933 'revisions %d and %d\n') % parents)
942 'revisions %d and %d\n') % parents)
934 else:
943 else:
935 ui.status(_('working directory now based on '
944 ui.status(_('working directory now based on '
936 'revision %d\n') % parents)
945 'revision %d\n') % parents)
937 # TODO: if we know which new heads may result from this rollback, pass
946 # TODO: if we know which new heads may result from this rollback, pass
938 # them to destroy(), which will prevent the branchhead cache from being
947 # them to destroy(), which will prevent the branchhead cache from being
939 # invalidated.
948 # invalidated.
940 self.destroyed()
949 self.destroyed()
941 return 0
950 return 0
942
951
943 def invalidatecaches(self):
952 def invalidatecaches(self):
944
953
945 if '_tagscache' in vars(self):
954 if '_tagscache' in vars(self):
946 # can't use delattr on proxy
955 # can't use delattr on proxy
947 del self.__dict__['_tagscache']
956 del self.__dict__['_tagscache']
948
957
949 self.unfiltered()._branchcaches.clear()
958 self.unfiltered()._branchcaches.clear()
950 self.invalidatevolatilesets()
959 self.invalidatevolatilesets()
951
960
952 def invalidatevolatilesets(self):
961 def invalidatevolatilesets(self):
953 self.filteredrevcache.clear()
962 self.filteredrevcache.clear()
954 obsolete.clearobscaches(self)
963 obsolete.clearobscaches(self)
955
964
956 def invalidatedirstate(self):
965 def invalidatedirstate(self):
957 '''Invalidates the dirstate, causing the next call to dirstate
966 '''Invalidates the dirstate, causing the next call to dirstate
958 to check if it was modified since the last time it was read,
967 to check if it was modified since the last time it was read,
959 rereading it if it has.
968 rereading it if it has.
960
969
961 This is different to dirstate.invalidate() that it doesn't always
970 This is different to dirstate.invalidate() that it doesn't always
962 rereads the dirstate. Use dirstate.invalidate() if you want to
971 rereads the dirstate. Use dirstate.invalidate() if you want to
963 explicitly read the dirstate again (i.e. restoring it to a previous
972 explicitly read the dirstate again (i.e. restoring it to a previous
964 known good state).'''
973 known good state).'''
965 if hasunfilteredcache(self, 'dirstate'):
974 if hasunfilteredcache(self, 'dirstate'):
966 for k in self.dirstate._filecache:
975 for k in self.dirstate._filecache:
967 try:
976 try:
968 delattr(self.dirstate, k)
977 delattr(self.dirstate, k)
969 except AttributeError:
978 except AttributeError:
970 pass
979 pass
971 delattr(self.unfiltered(), 'dirstate')
980 delattr(self.unfiltered(), 'dirstate')
972
981
973 def invalidate(self):
982 def invalidate(self):
974 unfiltered = self.unfiltered() # all file caches are stored unfiltered
983 unfiltered = self.unfiltered() # all file caches are stored unfiltered
975 for k in self._filecache:
984 for k in self._filecache:
976 # dirstate is invalidated separately in invalidatedirstate()
985 # dirstate is invalidated separately in invalidatedirstate()
977 if k == 'dirstate':
986 if k == 'dirstate':
978 continue
987 continue
979
988
980 try:
989 try:
981 delattr(unfiltered, k)
990 delattr(unfiltered, k)
982 except AttributeError:
991 except AttributeError:
983 pass
992 pass
984 self.invalidatecaches()
993 self.invalidatecaches()
985
994
986 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
995 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
987 try:
996 try:
988 l = lock.lock(lockname, 0, releasefn, desc=desc)
997 l = lock.lock(lockname, 0, releasefn, desc=desc)
989 except error.LockHeld, inst:
998 except error.LockHeld, inst:
990 if not wait:
999 if not wait:
991 raise
1000 raise
992 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1001 self.ui.warn(_("waiting for lock on %s held by %r\n") %
993 (desc, inst.locker))
1002 (desc, inst.locker))
994 # default to 600 seconds timeout
1003 # default to 600 seconds timeout
995 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1004 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
996 releasefn, desc=desc)
1005 releasefn, desc=desc)
997 if acquirefn:
1006 if acquirefn:
998 acquirefn()
1007 acquirefn()
999 return l
1008 return l
1000
1009
1001 def _afterlock(self, callback):
1010 def _afterlock(self, callback):
1002 """add a callback to the current repository lock.
1011 """add a callback to the current repository lock.
1003
1012
1004 The callback will be executed on lock release."""
1013 The callback will be executed on lock release."""
1005 l = self._lockref and self._lockref()
1014 l = self._lockref and self._lockref()
1006 if l:
1015 if l:
1007 l.postrelease.append(callback)
1016 l.postrelease.append(callback)
1008 else:
1017 else:
1009 callback()
1018 callback()
1010
1019
1011 def lock(self, wait=True):
1020 def lock(self, wait=True):
1012 '''Lock the repository store (.hg/store) and return a weak reference
1021 '''Lock the repository store (.hg/store) and return a weak reference
1013 to the lock. Use this before modifying the store (e.g. committing or
1022 to the lock. Use this before modifying the store (e.g. committing or
1014 stripping). If you are opening a transaction, get a lock as well.)'''
1023 stripping). If you are opening a transaction, get a lock as well.)'''
1015 l = self._lockref and self._lockref()
1024 l = self._lockref and self._lockref()
1016 if l is not None and l.held:
1025 if l is not None and l.held:
1017 l.lock()
1026 l.lock()
1018 return l
1027 return l
1019
1028
1020 def unlock():
1029 def unlock():
1021 self.store.write()
1030 self.store.write()
1022 if hasunfilteredcache(self, '_phasecache'):
1031 if hasunfilteredcache(self, '_phasecache'):
1023 self._phasecache.write()
1032 self._phasecache.write()
1024 for k, ce in self._filecache.items():
1033 for k, ce in self._filecache.items():
1025 if k == 'dirstate' or k not in self.__dict__:
1034 if k == 'dirstate' or k not in self.__dict__:
1026 continue
1035 continue
1027 ce.refresh()
1036 ce.refresh()
1028
1037
1029 l = self._lock(self.sjoin("lock"), wait, unlock,
1038 l = self._lock(self.sjoin("lock"), wait, unlock,
1030 self.invalidate, _('repository %s') % self.origroot)
1039 self.invalidate, _('repository %s') % self.origroot)
1031 self._lockref = weakref.ref(l)
1040 self._lockref = weakref.ref(l)
1032 return l
1041 return l
1033
1042
1034 def wlock(self, wait=True):
1043 def wlock(self, wait=True):
1035 '''Lock the non-store parts of the repository (everything under
1044 '''Lock the non-store parts of the repository (everything under
1036 .hg except .hg/store) and return a weak reference to the lock.
1045 .hg except .hg/store) and return a weak reference to the lock.
1037 Use this before modifying files in .hg.'''
1046 Use this before modifying files in .hg.'''
1038 l = self._wlockref and self._wlockref()
1047 l = self._wlockref and self._wlockref()
1039 if l is not None and l.held:
1048 if l is not None and l.held:
1040 l.lock()
1049 l.lock()
1041 return l
1050 return l
1042
1051
1043 def unlock():
1052 def unlock():
1044 self.dirstate.write()
1053 self.dirstate.write()
1045 self._filecache['dirstate'].refresh()
1054 self._filecache['dirstate'].refresh()
1046
1055
1047 l = self._lock(self.join("wlock"), wait, unlock,
1056 l = self._lock(self.join("wlock"), wait, unlock,
1048 self.invalidatedirstate, _('working directory of %s') %
1057 self.invalidatedirstate, _('working directory of %s') %
1049 self.origroot)
1058 self.origroot)
1050 self._wlockref = weakref.ref(l)
1059 self._wlockref = weakref.ref(l)
1051 return l
1060 return l
1052
1061
1053 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1062 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1054 """
1063 """
1055 commit an individual file as part of a larger transaction
1064 commit an individual file as part of a larger transaction
1056 """
1065 """
1057
1066
1058 fname = fctx.path()
1067 fname = fctx.path()
1059 text = fctx.data()
1068 text = fctx.data()
1060 flog = self.file(fname)
1069 flog = self.file(fname)
1061 fparent1 = manifest1.get(fname, nullid)
1070 fparent1 = manifest1.get(fname, nullid)
1062 fparent2 = fparent2o = manifest2.get(fname, nullid)
1071 fparent2 = fparent2o = manifest2.get(fname, nullid)
1063
1072
1064 meta = {}
1073 meta = {}
1065 copy = fctx.renamed()
1074 copy = fctx.renamed()
1066 if copy and copy[0] != fname:
1075 if copy and copy[0] != fname:
1067 # Mark the new revision of this file as a copy of another
1076 # Mark the new revision of this file as a copy of another
1068 # file. This copy data will effectively act as a parent
1077 # file. This copy data will effectively act as a parent
1069 # of this new revision. If this is a merge, the first
1078 # of this new revision. If this is a merge, the first
1070 # parent will be the nullid (meaning "look up the copy data")
1079 # parent will be the nullid (meaning "look up the copy data")
1071 # and the second one will be the other parent. For example:
1080 # and the second one will be the other parent. For example:
1072 #
1081 #
1073 # 0 --- 1 --- 3 rev1 changes file foo
1082 # 0 --- 1 --- 3 rev1 changes file foo
1074 # \ / rev2 renames foo to bar and changes it
1083 # \ / rev2 renames foo to bar and changes it
1075 # \- 2 -/ rev3 should have bar with all changes and
1084 # \- 2 -/ rev3 should have bar with all changes and
1076 # should record that bar descends from
1085 # should record that bar descends from
1077 # bar in rev2 and foo in rev1
1086 # bar in rev2 and foo in rev1
1078 #
1087 #
1079 # this allows this merge to succeed:
1088 # this allows this merge to succeed:
1080 #
1089 #
1081 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1090 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1082 # \ / merging rev3 and rev4 should use bar@rev2
1091 # \ / merging rev3 and rev4 should use bar@rev2
1083 # \- 2 --- 4 as the merge base
1092 # \- 2 --- 4 as the merge base
1084 #
1093 #
1085
1094
1086 cfname = copy[0]
1095 cfname = copy[0]
1087 crev = manifest1.get(cfname)
1096 crev = manifest1.get(cfname)
1088 newfparent = fparent2
1097 newfparent = fparent2
1089
1098
1090 if manifest2: # branch merge
1099 if manifest2: # branch merge
1091 if fparent2 == nullid or crev is None: # copied on remote side
1100 if fparent2 == nullid or crev is None: # copied on remote side
1092 if cfname in manifest2:
1101 if cfname in manifest2:
1093 crev = manifest2[cfname]
1102 crev = manifest2[cfname]
1094 newfparent = fparent1
1103 newfparent = fparent1
1095
1104
1096 # find source in nearest ancestor if we've lost track
1105 # find source in nearest ancestor if we've lost track
1097 if not crev:
1106 if not crev:
1098 self.ui.debug(" %s: searching for copy revision for %s\n" %
1107 self.ui.debug(" %s: searching for copy revision for %s\n" %
1099 (fname, cfname))
1108 (fname, cfname))
1100 for ancestor in self[None].ancestors():
1109 for ancestor in self[None].ancestors():
1101 if cfname in ancestor:
1110 if cfname in ancestor:
1102 crev = ancestor[cfname].filenode()
1111 crev = ancestor[cfname].filenode()
1103 break
1112 break
1104
1113
1105 if crev:
1114 if crev:
1106 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1115 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1107 meta["copy"] = cfname
1116 meta["copy"] = cfname
1108 meta["copyrev"] = hex(crev)
1117 meta["copyrev"] = hex(crev)
1109 fparent1, fparent2 = nullid, newfparent
1118 fparent1, fparent2 = nullid, newfparent
1110 else:
1119 else:
1111 self.ui.warn(_("warning: can't find ancestor for '%s' "
1120 self.ui.warn(_("warning: can't find ancestor for '%s' "
1112 "copied from '%s'!\n") % (fname, cfname))
1121 "copied from '%s'!\n") % (fname, cfname))
1113
1122
1114 elif fparent2 != nullid:
1123 elif fparent2 != nullid:
1115 # is one parent an ancestor of the other?
1124 # is one parent an ancestor of the other?
1116 fparentancestor = flog.ancestor(fparent1, fparent2)
1125 fparentancestor = flog.ancestor(fparent1, fparent2)
1117 if fparentancestor == fparent1:
1126 if fparentancestor == fparent1:
1118 fparent1, fparent2 = fparent2, nullid
1127 fparent1, fparent2 = fparent2, nullid
1119 elif fparentancestor == fparent2:
1128 elif fparentancestor == fparent2:
1120 fparent2 = nullid
1129 fparent2 = nullid
1121
1130
1122 # is the file changed?
1131 # is the file changed?
1123 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1132 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1124 changelist.append(fname)
1133 changelist.append(fname)
1125 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1134 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1126
1135
1127 # are just the flags changed during merge?
1136 # are just the flags changed during merge?
1128 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1137 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1129 changelist.append(fname)
1138 changelist.append(fname)
1130
1139
1131 return fparent1
1140 return fparent1
1132
1141
1133 @unfilteredmethod
1142 @unfilteredmethod
1134 def commit(self, text="", user=None, date=None, match=None, force=False,
1143 def commit(self, text="", user=None, date=None, match=None, force=False,
1135 editor=False, extra={}):
1144 editor=False, extra={}):
1136 """Add a new revision to current repository.
1145 """Add a new revision to current repository.
1137
1146
1138 Revision information is gathered from the working directory,
1147 Revision information is gathered from the working directory,
1139 match can be used to filter the committed files. If editor is
1148 match can be used to filter the committed files. If editor is
1140 supplied, it is called to get a commit message.
1149 supplied, it is called to get a commit message.
1141 """
1150 """
1142
1151
1143 def fail(f, msg):
1152 def fail(f, msg):
1144 raise util.Abort('%s: %s' % (f, msg))
1153 raise util.Abort('%s: %s' % (f, msg))
1145
1154
1146 if not match:
1155 if not match:
1147 match = matchmod.always(self.root, '')
1156 match = matchmod.always(self.root, '')
1148
1157
1149 if not force:
1158 if not force:
1150 vdirs = []
1159 vdirs = []
1151 match.explicitdir = vdirs.append
1160 match.explicitdir = vdirs.append
1152 match.bad = fail
1161 match.bad = fail
1153
1162
1154 wlock = self.wlock()
1163 wlock = self.wlock()
1155 try:
1164 try:
1156 wctx = self[None]
1165 wctx = self[None]
1157 merge = len(wctx.parents()) > 1
1166 merge = len(wctx.parents()) > 1
1158
1167
1159 if (not force and merge and match and
1168 if (not force and merge and match and
1160 (match.files() or match.anypats())):
1169 (match.files() or match.anypats())):
1161 raise util.Abort(_('cannot partially commit a merge '
1170 raise util.Abort(_('cannot partially commit a merge '
1162 '(do not specify files or patterns)'))
1171 '(do not specify files or patterns)'))
1163
1172
1164 changes = self.status(match=match, clean=force)
1173 changes = self.status(match=match, clean=force)
1165 if force:
1174 if force:
1166 changes[0].extend(changes[6]) # mq may commit unchanged files
1175 changes[0].extend(changes[6]) # mq may commit unchanged files
1167
1176
1168 # check subrepos
1177 # check subrepos
1169 subs = []
1178 subs = []
1170 commitsubs = set()
1179 commitsubs = set()
1171 newstate = wctx.substate.copy()
1180 newstate = wctx.substate.copy()
1172 # only manage subrepos and .hgsubstate if .hgsub is present
1181 # only manage subrepos and .hgsubstate if .hgsub is present
1173 if '.hgsub' in wctx:
1182 if '.hgsub' in wctx:
1174 # we'll decide whether to track this ourselves, thanks
1183 # we'll decide whether to track this ourselves, thanks
1175 if '.hgsubstate' in changes[0]:
1184 if '.hgsubstate' in changes[0]:
1176 changes[0].remove('.hgsubstate')
1185 changes[0].remove('.hgsubstate')
1177 if '.hgsubstate' in changes[2]:
1186 if '.hgsubstate' in changes[2]:
1178 changes[2].remove('.hgsubstate')
1187 changes[2].remove('.hgsubstate')
1179
1188
1180 # compare current state to last committed state
1189 # compare current state to last committed state
1181 # build new substate based on last committed state
1190 # build new substate based on last committed state
1182 oldstate = wctx.p1().substate
1191 oldstate = wctx.p1().substate
1183 for s in sorted(newstate.keys()):
1192 for s in sorted(newstate.keys()):
1184 if not match(s):
1193 if not match(s):
1185 # ignore working copy, use old state if present
1194 # ignore working copy, use old state if present
1186 if s in oldstate:
1195 if s in oldstate:
1187 newstate[s] = oldstate[s]
1196 newstate[s] = oldstate[s]
1188 continue
1197 continue
1189 if not force:
1198 if not force:
1190 raise util.Abort(
1199 raise util.Abort(
1191 _("commit with new subrepo %s excluded") % s)
1200 _("commit with new subrepo %s excluded") % s)
1192 if wctx.sub(s).dirty(True):
1201 if wctx.sub(s).dirty(True):
1193 if not self.ui.configbool('ui', 'commitsubrepos'):
1202 if not self.ui.configbool('ui', 'commitsubrepos'):
1194 raise util.Abort(
1203 raise util.Abort(
1195 _("uncommitted changes in subrepo %s") % s,
1204 _("uncommitted changes in subrepo %s") % s,
1196 hint=_("use --subrepos for recursive commit"))
1205 hint=_("use --subrepos for recursive commit"))
1197 subs.append(s)
1206 subs.append(s)
1198 commitsubs.add(s)
1207 commitsubs.add(s)
1199 else:
1208 else:
1200 bs = wctx.sub(s).basestate()
1209 bs = wctx.sub(s).basestate()
1201 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1210 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1202 if oldstate.get(s, (None, None, None))[1] != bs:
1211 if oldstate.get(s, (None, None, None))[1] != bs:
1203 subs.append(s)
1212 subs.append(s)
1204
1213
1205 # check for removed subrepos
1214 # check for removed subrepos
1206 for p in wctx.parents():
1215 for p in wctx.parents():
1207 r = [s for s in p.substate if s not in newstate]
1216 r = [s for s in p.substate if s not in newstate]
1208 subs += [s for s in r if match(s)]
1217 subs += [s for s in r if match(s)]
1209 if subs:
1218 if subs:
1210 if (not match('.hgsub') and
1219 if (not match('.hgsub') and
1211 '.hgsub' in (wctx.modified() + wctx.added())):
1220 '.hgsub' in (wctx.modified() + wctx.added())):
1212 raise util.Abort(
1221 raise util.Abort(
1213 _("can't commit subrepos without .hgsub"))
1222 _("can't commit subrepos without .hgsub"))
1214 changes[0].insert(0, '.hgsubstate')
1223 changes[0].insert(0, '.hgsubstate')
1215
1224
1216 elif '.hgsub' in changes[2]:
1225 elif '.hgsub' in changes[2]:
1217 # clean up .hgsubstate when .hgsub is removed
1226 # clean up .hgsubstate when .hgsub is removed
1218 if ('.hgsubstate' in wctx and
1227 if ('.hgsubstate' in wctx and
1219 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1228 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1220 changes[2].insert(0, '.hgsubstate')
1229 changes[2].insert(0, '.hgsubstate')
1221
1230
1222 # make sure all explicit patterns are matched
1231 # make sure all explicit patterns are matched
1223 if not force and match.files():
1232 if not force and match.files():
1224 matched = set(changes[0] + changes[1] + changes[2])
1233 matched = set(changes[0] + changes[1] + changes[2])
1225
1234
1226 for f in match.files():
1235 for f in match.files():
1227 f = self.dirstate.normalize(f)
1236 f = self.dirstate.normalize(f)
1228 if f == '.' or f in matched or f in wctx.substate:
1237 if f == '.' or f in matched or f in wctx.substate:
1229 continue
1238 continue
1230 if f in changes[3]: # missing
1239 if f in changes[3]: # missing
1231 fail(f, _('file not found!'))
1240 fail(f, _('file not found!'))
1232 if f in vdirs: # visited directory
1241 if f in vdirs: # visited directory
1233 d = f + '/'
1242 d = f + '/'
1234 for mf in matched:
1243 for mf in matched:
1235 if mf.startswith(d):
1244 if mf.startswith(d):
1236 break
1245 break
1237 else:
1246 else:
1238 fail(f, _("no match under directory!"))
1247 fail(f, _("no match under directory!"))
1239 elif f not in self.dirstate:
1248 elif f not in self.dirstate:
1240 fail(f, _("file not tracked!"))
1249 fail(f, _("file not tracked!"))
1241
1250
1242 cctx = context.workingctx(self, text, user, date, extra, changes)
1251 cctx = context.workingctx(self, text, user, date, extra, changes)
1243
1252
1244 if (not force and not extra.get("close") and not merge
1253 if (not force and not extra.get("close") and not merge
1245 and not cctx.files()
1254 and not cctx.files()
1246 and wctx.branch() == wctx.p1().branch()):
1255 and wctx.branch() == wctx.p1().branch()):
1247 return None
1256 return None
1248
1257
1249 if merge and cctx.deleted():
1258 if merge and cctx.deleted():
1250 raise util.Abort(_("cannot commit merge with missing files"))
1259 raise util.Abort(_("cannot commit merge with missing files"))
1251
1260
1252 ms = mergemod.mergestate(self)
1261 ms = mergemod.mergestate(self)
1253 for f in changes[0]:
1262 for f in changes[0]:
1254 if f in ms and ms[f] == 'u':
1263 if f in ms and ms[f] == 'u':
1255 raise util.Abort(_("unresolved merge conflicts "
1264 raise util.Abort(_("unresolved merge conflicts "
1256 "(see hg help resolve)"))
1265 "(see hg help resolve)"))
1257
1266
1258 if editor:
1267 if editor:
1259 cctx._text = editor(self, cctx, subs)
1268 cctx._text = editor(self, cctx, subs)
1260 edited = (text != cctx._text)
1269 edited = (text != cctx._text)
1261
1270
1262 # commit subs and write new state
1271 # commit subs and write new state
1263 if subs:
1272 if subs:
1264 for s in sorted(commitsubs):
1273 for s in sorted(commitsubs):
1265 sub = wctx.sub(s)
1274 sub = wctx.sub(s)
1266 self.ui.status(_('committing subrepository %s\n') %
1275 self.ui.status(_('committing subrepository %s\n') %
1267 subrepo.subrelpath(sub))
1276 subrepo.subrelpath(sub))
1268 sr = sub.commit(cctx._text, user, date)
1277 sr = sub.commit(cctx._text, user, date)
1269 newstate[s] = (newstate[s][0], sr)
1278 newstate[s] = (newstate[s][0], sr)
1270 subrepo.writestate(self, newstate)
1279 subrepo.writestate(self, newstate)
1271
1280
1272 # Save commit message in case this transaction gets rolled back
1281 # Save commit message in case this transaction gets rolled back
1273 # (e.g. by a pretxncommit hook). Leave the content alone on
1282 # (e.g. by a pretxncommit hook). Leave the content alone on
1274 # the assumption that the user will use the same editor again.
1283 # the assumption that the user will use the same editor again.
1275 msgfn = self.savecommitmessage(cctx._text)
1284 msgfn = self.savecommitmessage(cctx._text)
1276
1285
1277 p1, p2 = self.dirstate.parents()
1286 p1, p2 = self.dirstate.parents()
1278 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1287 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1279 try:
1288 try:
1280 self.hook("precommit", throw=True, parent1=hookp1,
1289 self.hook("precommit", throw=True, parent1=hookp1,
1281 parent2=hookp2)
1290 parent2=hookp2)
1282 ret = self.commitctx(cctx, True)
1291 ret = self.commitctx(cctx, True)
1283 except: # re-raises
1292 except: # re-raises
1284 if edited:
1293 if edited:
1285 self.ui.write(
1294 self.ui.write(
1286 _('note: commit message saved in %s\n') % msgfn)
1295 _('note: commit message saved in %s\n') % msgfn)
1287 raise
1296 raise
1288
1297
1289 # update bookmarks, dirstate and mergestate
1298 # update bookmarks, dirstate and mergestate
1290 bookmarks.update(self, [p1, p2], ret)
1299 bookmarks.update(self, [p1, p2], ret)
1291 cctx.markcommitted(ret)
1300 cctx.markcommitted(ret)
1292 ms.reset()
1301 ms.reset()
1293 finally:
1302 finally:
1294 wlock.release()
1303 wlock.release()
1295
1304
1296 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1305 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1297 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1306 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1298 self._afterlock(commithook)
1307 self._afterlock(commithook)
1299 return ret
1308 return ret
1300
1309
1301 @unfilteredmethod
1310 @unfilteredmethod
1302 def commitctx(self, ctx, error=False):
1311 def commitctx(self, ctx, error=False):
1303 """Add a new revision to current repository.
1312 """Add a new revision to current repository.
1304 Revision information is passed via the context argument.
1313 Revision information is passed via the context argument.
1305 """
1314 """
1306
1315
1307 tr = lock = None
1316 tr = lock = None
1308 removed = list(ctx.removed())
1317 removed = list(ctx.removed())
1309 p1, p2 = ctx.p1(), ctx.p2()
1318 p1, p2 = ctx.p1(), ctx.p2()
1310 user = ctx.user()
1319 user = ctx.user()
1311
1320
1312 lock = self.lock()
1321 lock = self.lock()
1313 try:
1322 try:
1314 tr = self.transaction("commit")
1323 tr = self.transaction("commit")
1315 trp = weakref.proxy(tr)
1324 trp = weakref.proxy(tr)
1316
1325
1317 if ctx.files():
1326 if ctx.files():
1318 m1 = p1.manifest().copy()
1327 m1 = p1.manifest().copy()
1319 m2 = p2.manifest()
1328 m2 = p2.manifest()
1320
1329
1321 # check in files
1330 # check in files
1322 new = {}
1331 new = {}
1323 changed = []
1332 changed = []
1324 linkrev = len(self)
1333 linkrev = len(self)
1325 for f in sorted(ctx.modified() + ctx.added()):
1334 for f in sorted(ctx.modified() + ctx.added()):
1326 self.ui.note(f + "\n")
1335 self.ui.note(f + "\n")
1327 try:
1336 try:
1328 fctx = ctx[f]
1337 fctx = ctx[f]
1329 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1338 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1330 changed)
1339 changed)
1331 m1.set(f, fctx.flags())
1340 m1.set(f, fctx.flags())
1332 except OSError, inst:
1341 except OSError, inst:
1333 self.ui.warn(_("trouble committing %s!\n") % f)
1342 self.ui.warn(_("trouble committing %s!\n") % f)
1334 raise
1343 raise
1335 except IOError, inst:
1344 except IOError, inst:
1336 errcode = getattr(inst, 'errno', errno.ENOENT)
1345 errcode = getattr(inst, 'errno', errno.ENOENT)
1337 if error or errcode and errcode != errno.ENOENT:
1346 if error or errcode and errcode != errno.ENOENT:
1338 self.ui.warn(_("trouble committing %s!\n") % f)
1347 self.ui.warn(_("trouble committing %s!\n") % f)
1339 raise
1348 raise
1340 else:
1349 else:
1341 removed.append(f)
1350 removed.append(f)
1342
1351
1343 # update manifest
1352 # update manifest
1344 m1.update(new)
1353 m1.update(new)
1345 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1354 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1346 drop = [f for f in removed if f in m1]
1355 drop = [f for f in removed if f in m1]
1347 for f in drop:
1356 for f in drop:
1348 del m1[f]
1357 del m1[f]
1349 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1358 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1350 p2.manifestnode(), (new, drop))
1359 p2.manifestnode(), (new, drop))
1351 files = changed + removed
1360 files = changed + removed
1352 else:
1361 else:
1353 mn = p1.manifestnode()
1362 mn = p1.manifestnode()
1354 files = []
1363 files = []
1355
1364
1356 # update changelog
1365 # update changelog
1357 self.changelog.delayupdate()
1366 self.changelog.delayupdate()
1358 n = self.changelog.add(mn, files, ctx.description(),
1367 n = self.changelog.add(mn, files, ctx.description(),
1359 trp, p1.node(), p2.node(),
1368 trp, p1.node(), p2.node(),
1360 user, ctx.date(), ctx.extra().copy())
1369 user, ctx.date(), ctx.extra().copy())
1361 p = lambda: self.changelog.writepending() and self.root or ""
1370 p = lambda: self.changelog.writepending() and self.root or ""
1362 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1371 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1363 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1372 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1364 parent2=xp2, pending=p)
1373 parent2=xp2, pending=p)
1365 self.changelog.finalize(trp)
1374 self.changelog.finalize(trp)
1366 # set the new commit is proper phase
1375 # set the new commit is proper phase
1367 targetphase = phases.newcommitphase(self.ui)
1376 targetphase = phases.newcommitphase(self.ui)
1368 if targetphase:
1377 if targetphase:
1369 # retract boundary do not alter parent changeset.
1378 # retract boundary do not alter parent changeset.
1370 # if a parent have higher the resulting phase will
1379 # if a parent have higher the resulting phase will
1371 # be compliant anyway
1380 # be compliant anyway
1372 #
1381 #
1373 # if minimal phase was 0 we don't need to retract anything
1382 # if minimal phase was 0 we don't need to retract anything
1374 phases.retractboundary(self, targetphase, [n])
1383 phases.retractboundary(self, targetphase, [n])
1375 tr.close()
1384 tr.close()
1376 branchmap.updatecache(self.filtered('served'))
1385 branchmap.updatecache(self.filtered('served'))
1377 return n
1386 return n
1378 finally:
1387 finally:
1379 if tr:
1388 if tr:
1380 tr.release()
1389 tr.release()
1381 lock.release()
1390 lock.release()
1382
1391
1383 @unfilteredmethod
1392 @unfilteredmethod
1384 def destroying(self):
1393 def destroying(self):
1385 '''Inform the repository that nodes are about to be destroyed.
1394 '''Inform the repository that nodes are about to be destroyed.
1386 Intended for use by strip and rollback, so there's a common
1395 Intended for use by strip and rollback, so there's a common
1387 place for anything that has to be done before destroying history.
1396 place for anything that has to be done before destroying history.
1388
1397
1389 This is mostly useful for saving state that is in memory and waiting
1398 This is mostly useful for saving state that is in memory and waiting
1390 to be flushed when the current lock is released. Because a call to
1399 to be flushed when the current lock is released. Because a call to
1391 destroyed is imminent, the repo will be invalidated causing those
1400 destroyed is imminent, the repo will be invalidated causing those
1392 changes to stay in memory (waiting for the next unlock), or vanish
1401 changes to stay in memory (waiting for the next unlock), or vanish
1393 completely.
1402 completely.
1394 '''
1403 '''
1395 # When using the same lock to commit and strip, the phasecache is left
1404 # When using the same lock to commit and strip, the phasecache is left
1396 # dirty after committing. Then when we strip, the repo is invalidated,
1405 # dirty after committing. Then when we strip, the repo is invalidated,
1397 # causing those changes to disappear.
1406 # causing those changes to disappear.
1398 if '_phasecache' in vars(self):
1407 if '_phasecache' in vars(self):
1399 self._phasecache.write()
1408 self._phasecache.write()
1400
1409
1401 @unfilteredmethod
1410 @unfilteredmethod
1402 def destroyed(self):
1411 def destroyed(self):
1403 '''Inform the repository that nodes have been destroyed.
1412 '''Inform the repository that nodes have been destroyed.
1404 Intended for use by strip and rollback, so there's a common
1413 Intended for use by strip and rollback, so there's a common
1405 place for anything that has to be done after destroying history.
1414 place for anything that has to be done after destroying history.
1406 '''
1415 '''
1407 # When one tries to:
1416 # When one tries to:
1408 # 1) destroy nodes thus calling this method (e.g. strip)
1417 # 1) destroy nodes thus calling this method (e.g. strip)
1409 # 2) use phasecache somewhere (e.g. commit)
1418 # 2) use phasecache somewhere (e.g. commit)
1410 #
1419 #
1411 # then 2) will fail because the phasecache contains nodes that were
1420 # then 2) will fail because the phasecache contains nodes that were
1412 # removed. We can either remove phasecache from the filecache,
1421 # removed. We can either remove phasecache from the filecache,
1413 # causing it to reload next time it is accessed, or simply filter
1422 # causing it to reload next time it is accessed, or simply filter
1414 # the removed nodes now and write the updated cache.
1423 # the removed nodes now and write the updated cache.
1415 self._phasecache.filterunknown(self)
1424 self._phasecache.filterunknown(self)
1416 self._phasecache.write()
1425 self._phasecache.write()
1417
1426
1418 # update the 'served' branch cache to help read only server process
1427 # update the 'served' branch cache to help read only server process
1419 # Thanks to branchcache collaboration this is done from the nearest
1428 # Thanks to branchcache collaboration this is done from the nearest
1420 # filtered subset and it is expected to be fast.
1429 # filtered subset and it is expected to be fast.
1421 branchmap.updatecache(self.filtered('served'))
1430 branchmap.updatecache(self.filtered('served'))
1422
1431
1423 # Ensure the persistent tag cache is updated. Doing it now
1432 # Ensure the persistent tag cache is updated. Doing it now
1424 # means that the tag cache only has to worry about destroyed
1433 # means that the tag cache only has to worry about destroyed
1425 # heads immediately after a strip/rollback. That in turn
1434 # heads immediately after a strip/rollback. That in turn
1426 # guarantees that "cachetip == currenttip" (comparing both rev
1435 # guarantees that "cachetip == currenttip" (comparing both rev
1427 # and node) always means no nodes have been added or destroyed.
1436 # and node) always means no nodes have been added or destroyed.
1428
1437
1429 # XXX this is suboptimal when qrefresh'ing: we strip the current
1438 # XXX this is suboptimal when qrefresh'ing: we strip the current
1430 # head, refresh the tag cache, then immediately add a new head.
1439 # head, refresh the tag cache, then immediately add a new head.
1431 # But I think doing it this way is necessary for the "instant
1440 # But I think doing it this way is necessary for the "instant
1432 # tag cache retrieval" case to work.
1441 # tag cache retrieval" case to work.
1433 self.invalidate()
1442 self.invalidate()
1434
1443
1435 def walk(self, match, node=None):
1444 def walk(self, match, node=None):
1436 '''
1445 '''
1437 walk recursively through the directory tree or a given
1446 walk recursively through the directory tree or a given
1438 changeset, finding all files matched by the match
1447 changeset, finding all files matched by the match
1439 function
1448 function
1440 '''
1449 '''
1441 return self[node].walk(match)
1450 return self[node].walk(match)
1442
1451
1443 def status(self, node1='.', node2=None, match=None,
1452 def status(self, node1='.', node2=None, match=None,
1444 ignored=False, clean=False, unknown=False,
1453 ignored=False, clean=False, unknown=False,
1445 listsubrepos=False):
1454 listsubrepos=False):
1446 """return status of files between two nodes or node and working
1455 """return status of files between two nodes or node and working
1447 directory.
1456 directory.
1448
1457
1449 If node1 is None, use the first dirstate parent instead.
1458 If node1 is None, use the first dirstate parent instead.
1450 If node2 is None, compare node1 with working directory.
1459 If node2 is None, compare node1 with working directory.
1451 """
1460 """
1452
1461
1453 def mfmatches(ctx):
1462 def mfmatches(ctx):
1454 mf = ctx.manifest().copy()
1463 mf = ctx.manifest().copy()
1455 if match.always():
1464 if match.always():
1456 return mf
1465 return mf
1457 for fn in mf.keys():
1466 for fn in mf.keys():
1458 if not match(fn):
1467 if not match(fn):
1459 del mf[fn]
1468 del mf[fn]
1460 return mf
1469 return mf
1461
1470
1462 ctx1 = self[node1]
1471 ctx1 = self[node1]
1463 ctx2 = self[node2]
1472 ctx2 = self[node2]
1464
1473
1465 working = ctx2.rev() is None
1474 working = ctx2.rev() is None
1466 parentworking = working and ctx1 == self['.']
1475 parentworking = working and ctx1 == self['.']
1467 match = match or matchmod.always(self.root, self.getcwd())
1476 match = match or matchmod.always(self.root, self.getcwd())
1468 listignored, listclean, listunknown = ignored, clean, unknown
1477 listignored, listclean, listunknown = ignored, clean, unknown
1469
1478
1470 # load earliest manifest first for caching reasons
1479 # load earliest manifest first for caching reasons
1471 if not working and ctx2.rev() < ctx1.rev():
1480 if not working and ctx2.rev() < ctx1.rev():
1472 ctx2.manifest()
1481 ctx2.manifest()
1473
1482
1474 if not parentworking:
1483 if not parentworking:
1475 def bad(f, msg):
1484 def bad(f, msg):
1476 # 'f' may be a directory pattern from 'match.files()',
1485 # 'f' may be a directory pattern from 'match.files()',
1477 # so 'f not in ctx1' is not enough
1486 # so 'f not in ctx1' is not enough
1478 if f not in ctx1 and f not in ctx1.dirs():
1487 if f not in ctx1 and f not in ctx1.dirs():
1479 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1488 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1480 match.bad = bad
1489 match.bad = bad
1481
1490
1482 if working: # we need to scan the working dir
1491 if working: # we need to scan the working dir
1483 subrepos = []
1492 subrepos = []
1484 if '.hgsub' in self.dirstate:
1493 if '.hgsub' in self.dirstate:
1485 subrepos = sorted(ctx2.substate)
1494 subrepos = sorted(ctx2.substate)
1486 s = self.dirstate.status(match, subrepos, listignored,
1495 s = self.dirstate.status(match, subrepos, listignored,
1487 listclean, listunknown)
1496 listclean, listunknown)
1488 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1497 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1489
1498
1490 # check for any possibly clean files
1499 # check for any possibly clean files
1491 if parentworking and cmp:
1500 if parentworking and cmp:
1492 fixup = []
1501 fixup = []
1493 # do a full compare of any files that might have changed
1502 # do a full compare of any files that might have changed
1494 for f in sorted(cmp):
1503 for f in sorted(cmp):
1495 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1504 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1496 or ctx1[f].cmp(ctx2[f])):
1505 or ctx1[f].cmp(ctx2[f])):
1497 modified.append(f)
1506 modified.append(f)
1498 else:
1507 else:
1499 fixup.append(f)
1508 fixup.append(f)
1500
1509
1501 # update dirstate for files that are actually clean
1510 # update dirstate for files that are actually clean
1502 if fixup:
1511 if fixup:
1503 if listclean:
1512 if listclean:
1504 clean += fixup
1513 clean += fixup
1505
1514
1506 try:
1515 try:
1507 # updating the dirstate is optional
1516 # updating the dirstate is optional
1508 # so we don't wait on the lock
1517 # so we don't wait on the lock
1509 wlock = self.wlock(False)
1518 wlock = self.wlock(False)
1510 try:
1519 try:
1511 for f in fixup:
1520 for f in fixup:
1512 self.dirstate.normal(f)
1521 self.dirstate.normal(f)
1513 finally:
1522 finally:
1514 wlock.release()
1523 wlock.release()
1515 except error.LockError:
1524 except error.LockError:
1516 pass
1525 pass
1517
1526
1518 if not parentworking:
1527 if not parentworking:
1519 mf1 = mfmatches(ctx1)
1528 mf1 = mfmatches(ctx1)
1520 if working:
1529 if working:
1521 # we are comparing working dir against non-parent
1530 # we are comparing working dir against non-parent
1522 # generate a pseudo-manifest for the working dir
1531 # generate a pseudo-manifest for the working dir
1523 mf2 = mfmatches(self['.'])
1532 mf2 = mfmatches(self['.'])
1524 for f in cmp + modified + added:
1533 for f in cmp + modified + added:
1525 mf2[f] = None
1534 mf2[f] = None
1526 mf2.set(f, ctx2.flags(f))
1535 mf2.set(f, ctx2.flags(f))
1527 for f in removed:
1536 for f in removed:
1528 if f in mf2:
1537 if f in mf2:
1529 del mf2[f]
1538 del mf2[f]
1530 else:
1539 else:
1531 # we are comparing two revisions
1540 # we are comparing two revisions
1532 deleted, unknown, ignored = [], [], []
1541 deleted, unknown, ignored = [], [], []
1533 mf2 = mfmatches(ctx2)
1542 mf2 = mfmatches(ctx2)
1534
1543
1535 modified, added, clean = [], [], []
1544 modified, added, clean = [], [], []
1536 withflags = mf1.withflags() | mf2.withflags()
1545 withflags = mf1.withflags() | mf2.withflags()
1537 for fn, mf2node in mf2.iteritems():
1546 for fn, mf2node in mf2.iteritems():
1538 if fn in mf1:
1547 if fn in mf1:
1539 if (fn not in deleted and
1548 if (fn not in deleted and
1540 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1549 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1541 (mf1[fn] != mf2node and
1550 (mf1[fn] != mf2node and
1542 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1551 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1543 modified.append(fn)
1552 modified.append(fn)
1544 elif listclean:
1553 elif listclean:
1545 clean.append(fn)
1554 clean.append(fn)
1546 del mf1[fn]
1555 del mf1[fn]
1547 elif fn not in deleted:
1556 elif fn not in deleted:
1548 added.append(fn)
1557 added.append(fn)
1549 removed = mf1.keys()
1558 removed = mf1.keys()
1550
1559
1551 if working and modified and not self.dirstate._checklink:
1560 if working and modified and not self.dirstate._checklink:
1552 # Symlink placeholders may get non-symlink-like contents
1561 # Symlink placeholders may get non-symlink-like contents
1553 # via user error or dereferencing by NFS or Samba servers,
1562 # via user error or dereferencing by NFS or Samba servers,
1554 # so we filter out any placeholders that don't look like a
1563 # so we filter out any placeholders that don't look like a
1555 # symlink
1564 # symlink
1556 sane = []
1565 sane = []
1557 for f in modified:
1566 for f in modified:
1558 if ctx2.flags(f) == 'l':
1567 if ctx2.flags(f) == 'l':
1559 d = ctx2[f].data()
1568 d = ctx2[f].data()
1560 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1569 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1561 self.ui.debug('ignoring suspect symlink placeholder'
1570 self.ui.debug('ignoring suspect symlink placeholder'
1562 ' "%s"\n' % f)
1571 ' "%s"\n' % f)
1563 continue
1572 continue
1564 sane.append(f)
1573 sane.append(f)
1565 modified = sane
1574 modified = sane
1566
1575
1567 r = modified, added, removed, deleted, unknown, ignored, clean
1576 r = modified, added, removed, deleted, unknown, ignored, clean
1568
1577
1569 if listsubrepos:
1578 if listsubrepos:
1570 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1579 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1571 if working:
1580 if working:
1572 rev2 = None
1581 rev2 = None
1573 else:
1582 else:
1574 rev2 = ctx2.substate[subpath][1]
1583 rev2 = ctx2.substate[subpath][1]
1575 try:
1584 try:
1576 submatch = matchmod.narrowmatcher(subpath, match)
1585 submatch = matchmod.narrowmatcher(subpath, match)
1577 s = sub.status(rev2, match=submatch, ignored=listignored,
1586 s = sub.status(rev2, match=submatch, ignored=listignored,
1578 clean=listclean, unknown=listunknown,
1587 clean=listclean, unknown=listunknown,
1579 listsubrepos=True)
1588 listsubrepos=True)
1580 for rfiles, sfiles in zip(r, s):
1589 for rfiles, sfiles in zip(r, s):
1581 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1590 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1582 except error.LookupError:
1591 except error.LookupError:
1583 self.ui.status(_("skipping missing subrepository: %s\n")
1592 self.ui.status(_("skipping missing subrepository: %s\n")
1584 % subpath)
1593 % subpath)
1585
1594
1586 for l in r:
1595 for l in r:
1587 l.sort()
1596 l.sort()
1588 return r
1597 return r
1589
1598
1590 def heads(self, start=None):
1599 def heads(self, start=None):
1591 heads = self.changelog.heads(start)
1600 heads = self.changelog.heads(start)
1592 # sort the output in rev descending order
1601 # sort the output in rev descending order
1593 return sorted(heads, key=self.changelog.rev, reverse=True)
1602 return sorted(heads, key=self.changelog.rev, reverse=True)
1594
1603
1595 def branchheads(self, branch=None, start=None, closed=False):
1604 def branchheads(self, branch=None, start=None, closed=False):
1596 '''return a (possibly filtered) list of heads for the given branch
1605 '''return a (possibly filtered) list of heads for the given branch
1597
1606
1598 Heads are returned in topological order, from newest to oldest.
1607 Heads are returned in topological order, from newest to oldest.
1599 If branch is None, use the dirstate branch.
1608 If branch is None, use the dirstate branch.
1600 If start is not None, return only heads reachable from start.
1609 If start is not None, return only heads reachable from start.
1601 If closed is True, return heads that are marked as closed as well.
1610 If closed is True, return heads that are marked as closed as well.
1602 '''
1611 '''
1603 if branch is None:
1612 if branch is None:
1604 branch = self[None].branch()
1613 branch = self[None].branch()
1605 branches = self.branchmap()
1614 branches = self.branchmap()
1606 if branch not in branches:
1615 if branch not in branches:
1607 return []
1616 return []
1608 # the cache returns heads ordered lowest to highest
1617 # the cache returns heads ordered lowest to highest
1609 bheads = list(reversed(branches[branch]))
1618 bheads = list(reversed(branches[branch]))
1610 if start is not None:
1619 if start is not None:
1611 # filter out the heads that cannot be reached from startrev
1620 # filter out the heads that cannot be reached from startrev
1612 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1621 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1613 bheads = [h for h in bheads if h in fbheads]
1622 bheads = [h for h in bheads if h in fbheads]
1614 if not closed:
1623 if not closed:
1615 bheads = [h for h in bheads if not self[h].closesbranch()]
1624 bheads = [h for h in bheads if not self[h].closesbranch()]
1616 return bheads
1625 return bheads
1617
1626
1618 def branches(self, nodes):
1627 def branches(self, nodes):
1619 if not nodes:
1628 if not nodes:
1620 nodes = [self.changelog.tip()]
1629 nodes = [self.changelog.tip()]
1621 b = []
1630 b = []
1622 for n in nodes:
1631 for n in nodes:
1623 t = n
1632 t = n
1624 while True:
1633 while True:
1625 p = self.changelog.parents(n)
1634 p = self.changelog.parents(n)
1626 if p[1] != nullid or p[0] == nullid:
1635 if p[1] != nullid or p[0] == nullid:
1627 b.append((t, n, p[0], p[1]))
1636 b.append((t, n, p[0], p[1]))
1628 break
1637 break
1629 n = p[0]
1638 n = p[0]
1630 return b
1639 return b
1631
1640
1632 def between(self, pairs):
1641 def between(self, pairs):
1633 r = []
1642 r = []
1634
1643
1635 for top, bottom in pairs:
1644 for top, bottom in pairs:
1636 n, l, i = top, [], 0
1645 n, l, i = top, [], 0
1637 f = 1
1646 f = 1
1638
1647
1639 while n != bottom and n != nullid:
1648 while n != bottom and n != nullid:
1640 p = self.changelog.parents(n)[0]
1649 p = self.changelog.parents(n)[0]
1641 if i == f:
1650 if i == f:
1642 l.append(n)
1651 l.append(n)
1643 f = f * 2
1652 f = f * 2
1644 n = p
1653 n = p
1645 i += 1
1654 i += 1
1646
1655
1647 r.append(l)
1656 r.append(l)
1648
1657
1649 return r
1658 return r
1650
1659
1651 def pull(self, remote, heads=None, force=False):
1660 def pull(self, remote, heads=None, force=False):
1661 if remote.local():
1662 missing = set(remote.requirements) - self.supported
1663 if missing:
1664 msg = _("required features are not"
1665 " supported in the destination:"
1666 " %s") % (', '.join(sorted(missing)))
1667 raise util.Abort(msg)
1668
1652 # don't open transaction for nothing or you break future useful
1669 # don't open transaction for nothing or you break future useful
1653 # rollback call
1670 # rollback call
1654 tr = None
1671 tr = None
1655 trname = 'pull\n' + util.hidepassword(remote.url())
1672 trname = 'pull\n' + util.hidepassword(remote.url())
1656 lock = self.lock()
1673 lock = self.lock()
1657 try:
1674 try:
1658 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1675 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1659 force=force)
1676 force=force)
1660 common, fetch, rheads = tmp
1677 common, fetch, rheads = tmp
1661 if not fetch:
1678 if not fetch:
1662 self.ui.status(_("no changes found\n"))
1679 self.ui.status(_("no changes found\n"))
1663 added = []
1680 added = []
1664 result = 0
1681 result = 0
1665 else:
1682 else:
1666 tr = self.transaction(trname)
1683 tr = self.transaction(trname)
1667 if heads is None and list(common) == [nullid]:
1684 if heads is None and list(common) == [nullid]:
1668 self.ui.status(_("requesting all changes\n"))
1685 self.ui.status(_("requesting all changes\n"))
1669 elif heads is None and remote.capable('changegroupsubset'):
1686 elif heads is None and remote.capable('changegroupsubset'):
1670 # issue1320, avoid a race if remote changed after discovery
1687 # issue1320, avoid a race if remote changed after discovery
1671 heads = rheads
1688 heads = rheads
1672
1689
1673 if remote.capable('getbundle'):
1690 if remote.capable('getbundle'):
1674 # TODO: get bundlecaps from remote
1691 # TODO: get bundlecaps from remote
1675 cg = remote.getbundle('pull', common=common,
1692 cg = remote.getbundle('pull', common=common,
1676 heads=heads or rheads)
1693 heads=heads or rheads)
1677 elif heads is None:
1694 elif heads is None:
1678 cg = remote.changegroup(fetch, 'pull')
1695 cg = remote.changegroup(fetch, 'pull')
1679 elif not remote.capable('changegroupsubset'):
1696 elif not remote.capable('changegroupsubset'):
1680 raise util.Abort(_("partial pull cannot be done because "
1697 raise util.Abort(_("partial pull cannot be done because "
1681 "other repository doesn't support "
1698 "other repository doesn't support "
1682 "changegroupsubset."))
1699 "changegroupsubset."))
1683 else:
1700 else:
1684 cg = remote.changegroupsubset(fetch, heads, 'pull')
1701 cg = remote.changegroupsubset(fetch, heads, 'pull')
1685 # we use unfiltered changelog here because hidden revision must
1702 # we use unfiltered changelog here because hidden revision must
1686 # be taken in account for phase synchronization. They may
1703 # be taken in account for phase synchronization. They may
1687 # becomes public and becomes visible again.
1704 # becomes public and becomes visible again.
1688 cl = self.unfiltered().changelog
1705 cl = self.unfiltered().changelog
1689 clstart = len(cl)
1706 clstart = len(cl)
1690 result = self.addchangegroup(cg, 'pull', remote.url())
1707 result = self.addchangegroup(cg, 'pull', remote.url())
1691 clend = len(cl)
1708 clend = len(cl)
1692 added = [cl.node(r) for r in xrange(clstart, clend)]
1709 added = [cl.node(r) for r in xrange(clstart, clend)]
1693
1710
1694 # compute target subset
1711 # compute target subset
1695 if heads is None:
1712 if heads is None:
1696 # We pulled every thing possible
1713 # We pulled every thing possible
1697 # sync on everything common
1714 # sync on everything common
1698 subset = common + added
1715 subset = common + added
1699 else:
1716 else:
1700 # We pulled a specific subset
1717 # We pulled a specific subset
1701 # sync on this subset
1718 # sync on this subset
1702 subset = heads
1719 subset = heads
1703
1720
1704 # Get remote phases data from remote
1721 # Get remote phases data from remote
1705 remotephases = remote.listkeys('phases')
1722 remotephases = remote.listkeys('phases')
1706 publishing = bool(remotephases.get('publishing', False))
1723 publishing = bool(remotephases.get('publishing', False))
1707 if remotephases and not publishing:
1724 if remotephases and not publishing:
1708 # remote is new and unpublishing
1725 # remote is new and unpublishing
1709 pheads, _dr = phases.analyzeremotephases(self, subset,
1726 pheads, _dr = phases.analyzeremotephases(self, subset,
1710 remotephases)
1727 remotephases)
1711 phases.advanceboundary(self, phases.public, pheads)
1728 phases.advanceboundary(self, phases.public, pheads)
1712 phases.advanceboundary(self, phases.draft, subset)
1729 phases.advanceboundary(self, phases.draft, subset)
1713 else:
1730 else:
1714 # Remote is old or publishing all common changesets
1731 # Remote is old or publishing all common changesets
1715 # should be seen as public
1732 # should be seen as public
1716 phases.advanceboundary(self, phases.public, subset)
1733 phases.advanceboundary(self, phases.public, subset)
1717
1734
1718 def gettransaction():
1735 def gettransaction():
1719 if tr is None:
1736 if tr is None:
1720 return self.transaction(trname)
1737 return self.transaction(trname)
1721 return tr
1738 return tr
1722
1739
1723 obstr = obsolete.syncpull(self, remote, gettransaction)
1740 obstr = obsolete.syncpull(self, remote, gettransaction)
1724 if obstr is not None:
1741 if obstr is not None:
1725 tr = obstr
1742 tr = obstr
1726
1743
1727 if tr is not None:
1744 if tr is not None:
1728 tr.close()
1745 tr.close()
1729 finally:
1746 finally:
1730 if tr is not None:
1747 if tr is not None:
1731 tr.release()
1748 tr.release()
1732 lock.release()
1749 lock.release()
1733
1750
1734 return result
1751 return result
1735
1752
1736 def checkpush(self, force, revs):
1753 def checkpush(self, force, revs):
1737 """Extensions can override this function if additional checks have
1754 """Extensions can override this function if additional checks have
1738 to be performed before pushing, or call it if they override push
1755 to be performed before pushing, or call it if they override push
1739 command.
1756 command.
1740 """
1757 """
1741 pass
1758 pass
1742
1759
1743 def push(self, remote, force=False, revs=None, newbranch=False):
1760 def push(self, remote, force=False, revs=None, newbranch=False):
1744 '''Push outgoing changesets (limited by revs) from the current
1761 '''Push outgoing changesets (limited by revs) from the current
1745 repository to remote. Return an integer:
1762 repository to remote. Return an integer:
1746 - None means nothing to push
1763 - None means nothing to push
1747 - 0 means HTTP error
1764 - 0 means HTTP error
1748 - 1 means we pushed and remote head count is unchanged *or*
1765 - 1 means we pushed and remote head count is unchanged *or*
1749 we have outgoing changesets but refused to push
1766 we have outgoing changesets but refused to push
1750 - other values as described by addchangegroup()
1767 - other values as described by addchangegroup()
1751 '''
1768 '''
1769 if remote.local():
1770 missing = set(self.requirements) - remote.local().supported
1771 if missing:
1772 msg = _("required features are not"
1773 " supported in the destination:"
1774 " %s") % (', '.join(sorted(missing)))
1775 raise util.Abort(msg)
1776
1752 # there are two ways to push to remote repo:
1777 # there are two ways to push to remote repo:
1753 #
1778 #
1754 # addchangegroup assumes local user can lock remote
1779 # addchangegroup assumes local user can lock remote
1755 # repo (local filesystem, old ssh servers).
1780 # repo (local filesystem, old ssh servers).
1756 #
1781 #
1757 # unbundle assumes local user cannot lock remote repo (new ssh
1782 # unbundle assumes local user cannot lock remote repo (new ssh
1758 # servers, http servers).
1783 # servers, http servers).
1759
1784
1760 if not remote.canpush():
1785 if not remote.canpush():
1761 raise util.Abort(_("destination does not support push"))
1786 raise util.Abort(_("destination does not support push"))
1762 unfi = self.unfiltered()
1787 unfi = self.unfiltered()
1763 def localphasemove(nodes, phase=phases.public):
1788 def localphasemove(nodes, phase=phases.public):
1764 """move <nodes> to <phase> in the local source repo"""
1789 """move <nodes> to <phase> in the local source repo"""
1765 if locallock is not None:
1790 if locallock is not None:
1766 phases.advanceboundary(self, phase, nodes)
1791 phases.advanceboundary(self, phase, nodes)
1767 else:
1792 else:
1768 # repo is not locked, do not change any phases!
1793 # repo is not locked, do not change any phases!
1769 # Informs the user that phases should have been moved when
1794 # Informs the user that phases should have been moved when
1770 # applicable.
1795 # applicable.
1771 actualmoves = [n for n in nodes if phase < self[n].phase()]
1796 actualmoves = [n for n in nodes if phase < self[n].phase()]
1772 phasestr = phases.phasenames[phase]
1797 phasestr = phases.phasenames[phase]
1773 if actualmoves:
1798 if actualmoves:
1774 self.ui.status(_('cannot lock source repo, skipping local'
1799 self.ui.status(_('cannot lock source repo, skipping local'
1775 ' %s phase update\n') % phasestr)
1800 ' %s phase update\n') % phasestr)
1776 # get local lock as we might write phase data
1801 # get local lock as we might write phase data
1777 locallock = None
1802 locallock = None
1778 try:
1803 try:
1779 locallock = self.lock()
1804 locallock = self.lock()
1780 except IOError, err:
1805 except IOError, err:
1781 if err.errno != errno.EACCES:
1806 if err.errno != errno.EACCES:
1782 raise
1807 raise
1783 # source repo cannot be locked.
1808 # source repo cannot be locked.
1784 # We do not abort the push, but just disable the local phase
1809 # We do not abort the push, but just disable the local phase
1785 # synchronisation.
1810 # synchronisation.
1786 msg = 'cannot lock source repository: %s\n' % err
1811 msg = 'cannot lock source repository: %s\n' % err
1787 self.ui.debug(msg)
1812 self.ui.debug(msg)
1788 try:
1813 try:
1789 self.checkpush(force, revs)
1814 self.checkpush(force, revs)
1790 lock = None
1815 lock = None
1791 unbundle = remote.capable('unbundle')
1816 unbundle = remote.capable('unbundle')
1792 if not unbundle:
1817 if not unbundle:
1793 lock = remote.lock()
1818 lock = remote.lock()
1794 try:
1819 try:
1795 # discovery
1820 # discovery
1796 fci = discovery.findcommonincoming
1821 fci = discovery.findcommonincoming
1797 commoninc = fci(unfi, remote, force=force)
1822 commoninc = fci(unfi, remote, force=force)
1798 common, inc, remoteheads = commoninc
1823 common, inc, remoteheads = commoninc
1799 fco = discovery.findcommonoutgoing
1824 fco = discovery.findcommonoutgoing
1800 outgoing = fco(unfi, remote, onlyheads=revs,
1825 outgoing = fco(unfi, remote, onlyheads=revs,
1801 commoninc=commoninc, force=force)
1826 commoninc=commoninc, force=force)
1802
1827
1803
1828
1804 if not outgoing.missing:
1829 if not outgoing.missing:
1805 # nothing to push
1830 # nothing to push
1806 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1831 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1807 ret = None
1832 ret = None
1808 else:
1833 else:
1809 # something to push
1834 # something to push
1810 if not force:
1835 if not force:
1811 # if self.obsstore == False --> no obsolete
1836 # if self.obsstore == False --> no obsolete
1812 # then, save the iteration
1837 # then, save the iteration
1813 if unfi.obsstore:
1838 if unfi.obsstore:
1814 # this message are here for 80 char limit reason
1839 # this message are here for 80 char limit reason
1815 mso = _("push includes obsolete changeset: %s!")
1840 mso = _("push includes obsolete changeset: %s!")
1816 mst = "push includes %s changeset: %s!"
1841 mst = "push includes %s changeset: %s!"
1817 # plain versions for i18n tool to detect them
1842 # plain versions for i18n tool to detect them
1818 _("push includes unstable changeset: %s!")
1843 _("push includes unstable changeset: %s!")
1819 _("push includes bumped changeset: %s!")
1844 _("push includes bumped changeset: %s!")
1820 _("push includes divergent changeset: %s!")
1845 _("push includes divergent changeset: %s!")
1821 # If we are to push if there is at least one
1846 # If we are to push if there is at least one
1822 # obsolete or unstable changeset in missing, at
1847 # obsolete or unstable changeset in missing, at
1823 # least one of the missinghead will be obsolete or
1848 # least one of the missinghead will be obsolete or
1824 # unstable. So checking heads only is ok
1849 # unstable. So checking heads only is ok
1825 for node in outgoing.missingheads:
1850 for node in outgoing.missingheads:
1826 ctx = unfi[node]
1851 ctx = unfi[node]
1827 if ctx.obsolete():
1852 if ctx.obsolete():
1828 raise util.Abort(mso % ctx)
1853 raise util.Abort(mso % ctx)
1829 elif ctx.troubled():
1854 elif ctx.troubled():
1830 raise util.Abort(_(mst)
1855 raise util.Abort(_(mst)
1831 % (ctx.troubles()[0],
1856 % (ctx.troubles()[0],
1832 ctx))
1857 ctx))
1833 discovery.checkheads(unfi, remote, outgoing,
1858 discovery.checkheads(unfi, remote, outgoing,
1834 remoteheads, newbranch,
1859 remoteheads, newbranch,
1835 bool(inc))
1860 bool(inc))
1836
1861
1837 # TODO: get bundlecaps from remote
1862 # TODO: get bundlecaps from remote
1838 bundlecaps = None
1863 bundlecaps = None
1839 # create a changegroup from local
1864 # create a changegroup from local
1840 if revs is None and not outgoing.excluded:
1865 if revs is None and not outgoing.excluded:
1841 # push everything,
1866 # push everything,
1842 # use the fast path, no race possible on push
1867 # use the fast path, no race possible on push
1843 bundler = changegroup.bundle10(self, bundlecaps)
1868 bundler = changegroup.bundle10(self, bundlecaps)
1844 cg = self._changegroupsubset(outgoing,
1869 cg = self._changegroupsubset(outgoing,
1845 bundler,
1870 bundler,
1846 'push',
1871 'push',
1847 fastpath=True)
1872 fastpath=True)
1848 else:
1873 else:
1849 cg = self.getlocalbundle('push', outgoing, bundlecaps)
1874 cg = self.getlocalbundle('push', outgoing, bundlecaps)
1850
1875
1851 # apply changegroup to remote
1876 # apply changegroup to remote
1852 if unbundle:
1877 if unbundle:
1853 # local repo finds heads on server, finds out what
1878 # local repo finds heads on server, finds out what
1854 # revs it must push. once revs transferred, if server
1879 # revs it must push. once revs transferred, if server
1855 # finds it has different heads (someone else won
1880 # finds it has different heads (someone else won
1856 # commit/push race), server aborts.
1881 # commit/push race), server aborts.
1857 if force:
1882 if force:
1858 remoteheads = ['force']
1883 remoteheads = ['force']
1859 # ssh: return remote's addchangegroup()
1884 # ssh: return remote's addchangegroup()
1860 # http: return remote's addchangegroup() or 0 for error
1885 # http: return remote's addchangegroup() or 0 for error
1861 ret = remote.unbundle(cg, remoteheads, 'push')
1886 ret = remote.unbundle(cg, remoteheads, 'push')
1862 else:
1887 else:
1863 # we return an integer indicating remote head count
1888 # we return an integer indicating remote head count
1864 # change
1889 # change
1865 ret = remote.addchangegroup(cg, 'push', self.url())
1890 ret = remote.addchangegroup(cg, 'push', self.url())
1866
1891
1867 if ret:
1892 if ret:
1868 # push succeed, synchronize target of the push
1893 # push succeed, synchronize target of the push
1869 cheads = outgoing.missingheads
1894 cheads = outgoing.missingheads
1870 elif revs is None:
1895 elif revs is None:
1871 # All out push fails. synchronize all common
1896 # All out push fails. synchronize all common
1872 cheads = outgoing.commonheads
1897 cheads = outgoing.commonheads
1873 else:
1898 else:
1874 # I want cheads = heads(::missingheads and ::commonheads)
1899 # I want cheads = heads(::missingheads and ::commonheads)
1875 # (missingheads is revs with secret changeset filtered out)
1900 # (missingheads is revs with secret changeset filtered out)
1876 #
1901 #
1877 # This can be expressed as:
1902 # This can be expressed as:
1878 # cheads = ( (missingheads and ::commonheads)
1903 # cheads = ( (missingheads and ::commonheads)
1879 # + (commonheads and ::missingheads))"
1904 # + (commonheads and ::missingheads))"
1880 # )
1905 # )
1881 #
1906 #
1882 # while trying to push we already computed the following:
1907 # while trying to push we already computed the following:
1883 # common = (::commonheads)
1908 # common = (::commonheads)
1884 # missing = ((commonheads::missingheads) - commonheads)
1909 # missing = ((commonheads::missingheads) - commonheads)
1885 #
1910 #
1886 # We can pick:
1911 # We can pick:
1887 # * missingheads part of common (::commonheads)
1912 # * missingheads part of common (::commonheads)
1888 common = set(outgoing.common)
1913 common = set(outgoing.common)
1889 cheads = [node for node in revs if node in common]
1914 cheads = [node for node in revs if node in common]
1890 # and
1915 # and
1891 # * commonheads parents on missing
1916 # * commonheads parents on missing
1892 revset = unfi.set('%ln and parents(roots(%ln))',
1917 revset = unfi.set('%ln and parents(roots(%ln))',
1893 outgoing.commonheads,
1918 outgoing.commonheads,
1894 outgoing.missing)
1919 outgoing.missing)
1895 cheads.extend(c.node() for c in revset)
1920 cheads.extend(c.node() for c in revset)
1896 # even when we don't push, exchanging phase data is useful
1921 # even when we don't push, exchanging phase data is useful
1897 remotephases = remote.listkeys('phases')
1922 remotephases = remote.listkeys('phases')
1898 if (self.ui.configbool('ui', '_usedassubrepo', False)
1923 if (self.ui.configbool('ui', '_usedassubrepo', False)
1899 and remotephases # server supports phases
1924 and remotephases # server supports phases
1900 and ret is None # nothing was pushed
1925 and ret is None # nothing was pushed
1901 and remotephases.get('publishing', False)):
1926 and remotephases.get('publishing', False)):
1902 # When:
1927 # When:
1903 # - this is a subrepo push
1928 # - this is a subrepo push
1904 # - and remote support phase
1929 # - and remote support phase
1905 # - and no changeset was pushed
1930 # - and no changeset was pushed
1906 # - and remote is publishing
1931 # - and remote is publishing
1907 # We may be in issue 3871 case!
1932 # We may be in issue 3871 case!
1908 # We drop the possible phase synchronisation done by
1933 # We drop the possible phase synchronisation done by
1909 # courtesy to publish changesets possibly locally draft
1934 # courtesy to publish changesets possibly locally draft
1910 # on the remote.
1935 # on the remote.
1911 remotephases = {'publishing': 'True'}
1936 remotephases = {'publishing': 'True'}
1912 if not remotephases: # old server or public only repo
1937 if not remotephases: # old server or public only repo
1913 localphasemove(cheads)
1938 localphasemove(cheads)
1914 # don't push any phase data as there is nothing to push
1939 # don't push any phase data as there is nothing to push
1915 else:
1940 else:
1916 ana = phases.analyzeremotephases(self, cheads, remotephases)
1941 ana = phases.analyzeremotephases(self, cheads, remotephases)
1917 pheads, droots = ana
1942 pheads, droots = ana
1918 ### Apply remote phase on local
1943 ### Apply remote phase on local
1919 if remotephases.get('publishing', False):
1944 if remotephases.get('publishing', False):
1920 localphasemove(cheads)
1945 localphasemove(cheads)
1921 else: # publish = False
1946 else: # publish = False
1922 localphasemove(pheads)
1947 localphasemove(pheads)
1923 localphasemove(cheads, phases.draft)
1948 localphasemove(cheads, phases.draft)
1924 ### Apply local phase on remote
1949 ### Apply local phase on remote
1925
1950
1926 # Get the list of all revs draft on remote by public here.
1951 # Get the list of all revs draft on remote by public here.
1927 # XXX Beware that revset break if droots is not strictly
1952 # XXX Beware that revset break if droots is not strictly
1928 # XXX root we may want to ensure it is but it is costly
1953 # XXX root we may want to ensure it is but it is costly
1929 outdated = unfi.set('heads((%ln::%ln) and public())',
1954 outdated = unfi.set('heads((%ln::%ln) and public())',
1930 droots, cheads)
1955 droots, cheads)
1931 for newremotehead in outdated:
1956 for newremotehead in outdated:
1932 r = remote.pushkey('phases',
1957 r = remote.pushkey('phases',
1933 newremotehead.hex(),
1958 newremotehead.hex(),
1934 str(phases.draft),
1959 str(phases.draft),
1935 str(phases.public))
1960 str(phases.public))
1936 if not r:
1961 if not r:
1937 self.ui.warn(_('updating %s to public failed!\n')
1962 self.ui.warn(_('updating %s to public failed!\n')
1938 % newremotehead)
1963 % newremotehead)
1939 self.ui.debug('try to push obsolete markers to remote\n')
1964 self.ui.debug('try to push obsolete markers to remote\n')
1940 obsolete.syncpush(self, remote)
1965 obsolete.syncpush(self, remote)
1941 finally:
1966 finally:
1942 if lock is not None:
1967 if lock is not None:
1943 lock.release()
1968 lock.release()
1944 finally:
1969 finally:
1945 if locallock is not None:
1970 if locallock is not None:
1946 locallock.release()
1971 locallock.release()
1947
1972
1948 self.ui.debug("checking for updated bookmarks\n")
1973 self.ui.debug("checking for updated bookmarks\n")
1949 rb = remote.listkeys('bookmarks')
1974 rb = remote.listkeys('bookmarks')
1950 revnums = map(unfi.changelog.rev, revs or [])
1975 revnums = map(unfi.changelog.rev, revs or [])
1951 ancestors = [
1976 ancestors = [
1952 a for a in unfi.changelog.ancestors(revnums, inclusive=True)]
1977 a for a in unfi.changelog.ancestors(revnums, inclusive=True)]
1953 for k in rb.keys():
1978 for k in rb.keys():
1954 if k in unfi._bookmarks:
1979 if k in unfi._bookmarks:
1955 nr, nl = rb[k], hex(self._bookmarks[k])
1980 nr, nl = rb[k], hex(self._bookmarks[k])
1956 if nr in unfi:
1981 if nr in unfi:
1957 cr = unfi[nr]
1982 cr = unfi[nr]
1958 cl = unfi[nl]
1983 cl = unfi[nl]
1959 if bookmarks.validdest(unfi, cr, cl):
1984 if bookmarks.validdest(unfi, cr, cl):
1960 if ancestors and cl.rev() not in ancestors:
1985 if ancestors and cl.rev() not in ancestors:
1961 continue
1986 continue
1962 r = remote.pushkey('bookmarks', k, nr, nl)
1987 r = remote.pushkey('bookmarks', k, nr, nl)
1963 if r:
1988 if r:
1964 self.ui.status(_("updating bookmark %s\n") % k)
1989 self.ui.status(_("updating bookmark %s\n") % k)
1965 else:
1990 else:
1966 self.ui.warn(_('updating bookmark %s'
1991 self.ui.warn(_('updating bookmark %s'
1967 ' failed!\n') % k)
1992 ' failed!\n') % k)
1968
1993
1969 return ret
1994 return ret
1970
1995
1971 def changegroupinfo(self, nodes, source):
1996 def changegroupinfo(self, nodes, source):
1972 if self.ui.verbose or source == 'bundle':
1997 if self.ui.verbose or source == 'bundle':
1973 self.ui.status(_("%d changesets found\n") % len(nodes))
1998 self.ui.status(_("%d changesets found\n") % len(nodes))
1974 if self.ui.debugflag:
1999 if self.ui.debugflag:
1975 self.ui.debug("list of changesets:\n")
2000 self.ui.debug("list of changesets:\n")
1976 for node in nodes:
2001 for node in nodes:
1977 self.ui.debug("%s\n" % hex(node))
2002 self.ui.debug("%s\n" % hex(node))
1978
2003
1979 def changegroupsubset(self, bases, heads, source):
2004 def changegroupsubset(self, bases, heads, source):
1980 """Compute a changegroup consisting of all the nodes that are
2005 """Compute a changegroup consisting of all the nodes that are
1981 descendants of any of the bases and ancestors of any of the heads.
2006 descendants of any of the bases and ancestors of any of the heads.
1982 Return a chunkbuffer object whose read() method will return
2007 Return a chunkbuffer object whose read() method will return
1983 successive changegroup chunks.
2008 successive changegroup chunks.
1984
2009
1985 It is fairly complex as determining which filenodes and which
2010 It is fairly complex as determining which filenodes and which
1986 manifest nodes need to be included for the changeset to be complete
2011 manifest nodes need to be included for the changeset to be complete
1987 is non-trivial.
2012 is non-trivial.
1988
2013
1989 Another wrinkle is doing the reverse, figuring out which changeset in
2014 Another wrinkle is doing the reverse, figuring out which changeset in
1990 the changegroup a particular filenode or manifestnode belongs to.
2015 the changegroup a particular filenode or manifestnode belongs to.
1991 """
2016 """
1992 cl = self.changelog
2017 cl = self.changelog
1993 if not bases:
2018 if not bases:
1994 bases = [nullid]
2019 bases = [nullid]
1995 # TODO: remove call to nodesbetween.
2020 # TODO: remove call to nodesbetween.
1996 csets, bases, heads = cl.nodesbetween(bases, heads)
2021 csets, bases, heads = cl.nodesbetween(bases, heads)
1997 bases = [p for n in bases for p in cl.parents(n) if p != nullid]
2022 bases = [p for n in bases for p in cl.parents(n) if p != nullid]
1998 outgoing = discovery.outgoing(cl, bases, heads)
2023 outgoing = discovery.outgoing(cl, bases, heads)
1999 bundler = changegroup.bundle10(self)
2024 bundler = changegroup.bundle10(self)
2000 return self._changegroupsubset(outgoing, bundler, source)
2025 return self._changegroupsubset(outgoing, bundler, source)
2001
2026
2002 def getlocalbundle(self, source, outgoing, bundlecaps=None):
2027 def getlocalbundle(self, source, outgoing, bundlecaps=None):
2003 """Like getbundle, but taking a discovery.outgoing as an argument.
2028 """Like getbundle, but taking a discovery.outgoing as an argument.
2004
2029
2005 This is only implemented for local repos and reuses potentially
2030 This is only implemented for local repos and reuses potentially
2006 precomputed sets in outgoing."""
2031 precomputed sets in outgoing."""
2007 if not outgoing.missing:
2032 if not outgoing.missing:
2008 return None
2033 return None
2009 bundler = changegroup.bundle10(self, bundlecaps)
2034 bundler = changegroup.bundle10(self, bundlecaps)
2010 return self._changegroupsubset(outgoing, bundler, source)
2035 return self._changegroupsubset(outgoing, bundler, source)
2011
2036
2012 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
2037 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
2013 """Like changegroupsubset, but returns the set difference between the
2038 """Like changegroupsubset, but returns the set difference between the
2014 ancestors of heads and the ancestors common.
2039 ancestors of heads and the ancestors common.
2015
2040
2016 If heads is None, use the local heads. If common is None, use [nullid].
2041 If heads is None, use the local heads. If common is None, use [nullid].
2017
2042
2018 The nodes in common might not all be known locally due to the way the
2043 The nodes in common might not all be known locally due to the way the
2019 current discovery protocol works.
2044 current discovery protocol works.
2020 """
2045 """
2021 cl = self.changelog
2046 cl = self.changelog
2022 if common:
2047 if common:
2023 hasnode = cl.hasnode
2048 hasnode = cl.hasnode
2024 common = [n for n in common if hasnode(n)]
2049 common = [n for n in common if hasnode(n)]
2025 else:
2050 else:
2026 common = [nullid]
2051 common = [nullid]
2027 if not heads:
2052 if not heads:
2028 heads = cl.heads()
2053 heads = cl.heads()
2029 return self.getlocalbundle(source,
2054 return self.getlocalbundle(source,
2030 discovery.outgoing(cl, common, heads),
2055 discovery.outgoing(cl, common, heads),
2031 bundlecaps=bundlecaps)
2056 bundlecaps=bundlecaps)
2032
2057
2033 @unfilteredmethod
2058 @unfilteredmethod
2034 def _changegroupsubset(self, outgoing, bundler, source,
2059 def _changegroupsubset(self, outgoing, bundler, source,
2035 fastpath=False):
2060 fastpath=False):
2036 commonrevs = outgoing.common
2061 commonrevs = outgoing.common
2037 csets = outgoing.missing
2062 csets = outgoing.missing
2038 heads = outgoing.missingheads
2063 heads = outgoing.missingheads
2039 # We go through the fast path if we get told to, or if all (unfiltered
2064 # We go through the fast path if we get told to, or if all (unfiltered
2040 # heads have been requested (since we then know there all linkrevs will
2065 # heads have been requested (since we then know there all linkrevs will
2041 # be pulled by the client).
2066 # be pulled by the client).
2042 heads.sort()
2067 heads.sort()
2043 fastpathlinkrev = fastpath or (
2068 fastpathlinkrev = fastpath or (
2044 self.filtername is None and heads == sorted(self.heads()))
2069 self.filtername is None and heads == sorted(self.heads()))
2045
2070
2046 self.hook('preoutgoing', throw=True, source=source)
2071 self.hook('preoutgoing', throw=True, source=source)
2047 self.changegroupinfo(csets, source)
2072 self.changegroupinfo(csets, source)
2048 gengroup = bundler.generate(commonrevs, csets, fastpathlinkrev, source)
2073 gengroup = bundler.generate(commonrevs, csets, fastpathlinkrev, source)
2049 return changegroup.unbundle10(util.chunkbuffer(gengroup), 'UN')
2074 return changegroup.unbundle10(util.chunkbuffer(gengroup), 'UN')
2050
2075
2051 def changegroup(self, basenodes, source):
2076 def changegroup(self, basenodes, source):
2052 # to avoid a race we use changegroupsubset() (issue1320)
2077 # to avoid a race we use changegroupsubset() (issue1320)
2053 return self.changegroupsubset(basenodes, self.heads(), source)
2078 return self.changegroupsubset(basenodes, self.heads(), source)
2054
2079
2055 @unfilteredmethod
2080 @unfilteredmethod
2056 def addchangegroup(self, source, srctype, url, emptyok=False):
2081 def addchangegroup(self, source, srctype, url, emptyok=False):
2057 """Add the changegroup returned by source.read() to this repo.
2082 """Add the changegroup returned by source.read() to this repo.
2058 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2083 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2059 the URL of the repo where this changegroup is coming from.
2084 the URL of the repo where this changegroup is coming from.
2060
2085
2061 Return an integer summarizing the change to this repo:
2086 Return an integer summarizing the change to this repo:
2062 - nothing changed or no source: 0
2087 - nothing changed or no source: 0
2063 - more heads than before: 1+added heads (2..n)
2088 - more heads than before: 1+added heads (2..n)
2064 - fewer heads than before: -1-removed heads (-2..-n)
2089 - fewer heads than before: -1-removed heads (-2..-n)
2065 - number of heads stays the same: 1
2090 - number of heads stays the same: 1
2066 """
2091 """
2067 def csmap(x):
2092 def csmap(x):
2068 self.ui.debug("add changeset %s\n" % short(x))
2093 self.ui.debug("add changeset %s\n" % short(x))
2069 return len(cl)
2094 return len(cl)
2070
2095
2071 def revmap(x):
2096 def revmap(x):
2072 return cl.rev(x)
2097 return cl.rev(x)
2073
2098
2074 if not source:
2099 if not source:
2075 return 0
2100 return 0
2076
2101
2077 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2102 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2078
2103
2079 changesets = files = revisions = 0
2104 changesets = files = revisions = 0
2080 efiles = set()
2105 efiles = set()
2081
2106
2082 # write changelog data to temp files so concurrent readers will not see
2107 # write changelog data to temp files so concurrent readers will not see
2083 # inconsistent view
2108 # inconsistent view
2084 cl = self.changelog
2109 cl = self.changelog
2085 cl.delayupdate()
2110 cl.delayupdate()
2086 oldheads = cl.heads()
2111 oldheads = cl.heads()
2087
2112
2088 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2113 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2089 try:
2114 try:
2090 trp = weakref.proxy(tr)
2115 trp = weakref.proxy(tr)
2091 # pull off the changeset group
2116 # pull off the changeset group
2092 self.ui.status(_("adding changesets\n"))
2117 self.ui.status(_("adding changesets\n"))
2093 clstart = len(cl)
2118 clstart = len(cl)
2094 class prog(object):
2119 class prog(object):
2095 step = _('changesets')
2120 step = _('changesets')
2096 count = 1
2121 count = 1
2097 ui = self.ui
2122 ui = self.ui
2098 total = None
2123 total = None
2099 def __call__(self):
2124 def __call__(self):
2100 self.ui.progress(self.step, self.count, unit=_('chunks'),
2125 self.ui.progress(self.step, self.count, unit=_('chunks'),
2101 total=self.total)
2126 total=self.total)
2102 self.count += 1
2127 self.count += 1
2103 pr = prog()
2128 pr = prog()
2104 source.callback = pr
2129 source.callback = pr
2105
2130
2106 source.changelogheader()
2131 source.changelogheader()
2107 srccontent = cl.addgroup(source, csmap, trp)
2132 srccontent = cl.addgroup(source, csmap, trp)
2108 if not (srccontent or emptyok):
2133 if not (srccontent or emptyok):
2109 raise util.Abort(_("received changelog group is empty"))
2134 raise util.Abort(_("received changelog group is empty"))
2110 clend = len(cl)
2135 clend = len(cl)
2111 changesets = clend - clstart
2136 changesets = clend - clstart
2112 for c in xrange(clstart, clend):
2137 for c in xrange(clstart, clend):
2113 efiles.update(self[c].files())
2138 efiles.update(self[c].files())
2114 efiles = len(efiles)
2139 efiles = len(efiles)
2115 self.ui.progress(_('changesets'), None)
2140 self.ui.progress(_('changesets'), None)
2116
2141
2117 # pull off the manifest group
2142 # pull off the manifest group
2118 self.ui.status(_("adding manifests\n"))
2143 self.ui.status(_("adding manifests\n"))
2119 pr.step = _('manifests')
2144 pr.step = _('manifests')
2120 pr.count = 1
2145 pr.count = 1
2121 pr.total = changesets # manifests <= changesets
2146 pr.total = changesets # manifests <= changesets
2122 # no need to check for empty manifest group here:
2147 # no need to check for empty manifest group here:
2123 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2148 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2124 # no new manifest will be created and the manifest group will
2149 # no new manifest will be created and the manifest group will
2125 # be empty during the pull
2150 # be empty during the pull
2126 source.manifestheader()
2151 source.manifestheader()
2127 self.manifest.addgroup(source, revmap, trp)
2152 self.manifest.addgroup(source, revmap, trp)
2128 self.ui.progress(_('manifests'), None)
2153 self.ui.progress(_('manifests'), None)
2129
2154
2130 needfiles = {}
2155 needfiles = {}
2131 if self.ui.configbool('server', 'validate', default=False):
2156 if self.ui.configbool('server', 'validate', default=False):
2132 # validate incoming csets have their manifests
2157 # validate incoming csets have their manifests
2133 for cset in xrange(clstart, clend):
2158 for cset in xrange(clstart, clend):
2134 mfest = self.changelog.read(self.changelog.node(cset))[0]
2159 mfest = self.changelog.read(self.changelog.node(cset))[0]
2135 mfest = self.manifest.readdelta(mfest)
2160 mfest = self.manifest.readdelta(mfest)
2136 # store file nodes we must see
2161 # store file nodes we must see
2137 for f, n in mfest.iteritems():
2162 for f, n in mfest.iteritems():
2138 needfiles.setdefault(f, set()).add(n)
2163 needfiles.setdefault(f, set()).add(n)
2139
2164
2140 # process the files
2165 # process the files
2141 self.ui.status(_("adding file changes\n"))
2166 self.ui.status(_("adding file changes\n"))
2142 pr.step = _('files')
2167 pr.step = _('files')
2143 pr.count = 1
2168 pr.count = 1
2144 pr.total = efiles
2169 pr.total = efiles
2145 source.callback = None
2170 source.callback = None
2146
2171
2147 newrevs, newfiles = self.addchangegroupfiles(source, revmap, trp,
2172 newrevs, newfiles = self.addchangegroupfiles(source, revmap, trp,
2148 pr, needfiles)
2173 pr, needfiles)
2149 revisions += newrevs
2174 revisions += newrevs
2150 files += newfiles
2175 files += newfiles
2151
2176
2152 dh = 0
2177 dh = 0
2153 if oldheads:
2178 if oldheads:
2154 heads = cl.heads()
2179 heads = cl.heads()
2155 dh = len(heads) - len(oldheads)
2180 dh = len(heads) - len(oldheads)
2156 for h in heads:
2181 for h in heads:
2157 if h not in oldheads and self[h].closesbranch():
2182 if h not in oldheads and self[h].closesbranch():
2158 dh -= 1
2183 dh -= 1
2159 htext = ""
2184 htext = ""
2160 if dh:
2185 if dh:
2161 htext = _(" (%+d heads)") % dh
2186 htext = _(" (%+d heads)") % dh
2162
2187
2163 self.ui.status(_("added %d changesets"
2188 self.ui.status(_("added %d changesets"
2164 " with %d changes to %d files%s\n")
2189 " with %d changes to %d files%s\n")
2165 % (changesets, revisions, files, htext))
2190 % (changesets, revisions, files, htext))
2166 self.invalidatevolatilesets()
2191 self.invalidatevolatilesets()
2167
2192
2168 if changesets > 0:
2193 if changesets > 0:
2169 p = lambda: cl.writepending() and self.root or ""
2194 p = lambda: cl.writepending() and self.root or ""
2170 self.hook('pretxnchangegroup', throw=True,
2195 self.hook('pretxnchangegroup', throw=True,
2171 node=hex(cl.node(clstart)), source=srctype,
2196 node=hex(cl.node(clstart)), source=srctype,
2172 url=url, pending=p)
2197 url=url, pending=p)
2173
2198
2174 added = [cl.node(r) for r in xrange(clstart, clend)]
2199 added = [cl.node(r) for r in xrange(clstart, clend)]
2175 publishing = self.ui.configbool('phases', 'publish', True)
2200 publishing = self.ui.configbool('phases', 'publish', True)
2176 if srctype == 'push':
2201 if srctype == 'push':
2177 # Old server can not push the boundary themself.
2202 # Old server can not push the boundary themself.
2178 # New server won't push the boundary if changeset already
2203 # New server won't push the boundary if changeset already
2179 # existed locally as secrete
2204 # existed locally as secrete
2180 #
2205 #
2181 # We should not use added here but the list of all change in
2206 # We should not use added here but the list of all change in
2182 # the bundle
2207 # the bundle
2183 if publishing:
2208 if publishing:
2184 phases.advanceboundary(self, phases.public, srccontent)
2209 phases.advanceboundary(self, phases.public, srccontent)
2185 else:
2210 else:
2186 phases.advanceboundary(self, phases.draft, srccontent)
2211 phases.advanceboundary(self, phases.draft, srccontent)
2187 phases.retractboundary(self, phases.draft, added)
2212 phases.retractboundary(self, phases.draft, added)
2188 elif srctype != 'strip':
2213 elif srctype != 'strip':
2189 # publishing only alter behavior during push
2214 # publishing only alter behavior during push
2190 #
2215 #
2191 # strip should not touch boundary at all
2216 # strip should not touch boundary at all
2192 phases.retractboundary(self, phases.draft, added)
2217 phases.retractboundary(self, phases.draft, added)
2193
2218
2194 # make changelog see real files again
2219 # make changelog see real files again
2195 cl.finalize(trp)
2220 cl.finalize(trp)
2196
2221
2197 tr.close()
2222 tr.close()
2198
2223
2199 if changesets > 0:
2224 if changesets > 0:
2200 if srctype != 'strip':
2225 if srctype != 'strip':
2201 # During strip, branchcache is invalid but coming call to
2226 # During strip, branchcache is invalid but coming call to
2202 # `destroyed` will repair it.
2227 # `destroyed` will repair it.
2203 # In other case we can safely update cache on disk.
2228 # In other case we can safely update cache on disk.
2204 branchmap.updatecache(self.filtered('served'))
2229 branchmap.updatecache(self.filtered('served'))
2205 def runhooks():
2230 def runhooks():
2206 # forcefully update the on-disk branch cache
2231 # forcefully update the on-disk branch cache
2207 self.ui.debug("updating the branch cache\n")
2232 self.ui.debug("updating the branch cache\n")
2208 self.hook("changegroup", node=hex(cl.node(clstart)),
2233 self.hook("changegroup", node=hex(cl.node(clstart)),
2209 source=srctype, url=url)
2234 source=srctype, url=url)
2210
2235
2211 for n in added:
2236 for n in added:
2212 self.hook("incoming", node=hex(n), source=srctype,
2237 self.hook("incoming", node=hex(n), source=srctype,
2213 url=url)
2238 url=url)
2214
2239
2215 newheads = [h for h in self.heads() if h not in oldheads]
2240 newheads = [h for h in self.heads() if h not in oldheads]
2216 self.ui.log("incoming",
2241 self.ui.log("incoming",
2217 "%s incoming changes - new heads: %s\n",
2242 "%s incoming changes - new heads: %s\n",
2218 len(added),
2243 len(added),
2219 ', '.join([hex(c[:6]) for c in newheads]))
2244 ', '.join([hex(c[:6]) for c in newheads]))
2220 self._afterlock(runhooks)
2245 self._afterlock(runhooks)
2221
2246
2222 finally:
2247 finally:
2223 tr.release()
2248 tr.release()
2224 # never return 0 here:
2249 # never return 0 here:
2225 if dh < 0:
2250 if dh < 0:
2226 return dh - 1
2251 return dh - 1
2227 else:
2252 else:
2228 return dh + 1
2253 return dh + 1
2229
2254
2230 def addchangegroupfiles(self, source, revmap, trp, pr, needfiles):
2255 def addchangegroupfiles(self, source, revmap, trp, pr, needfiles):
2231 revisions = 0
2256 revisions = 0
2232 files = 0
2257 files = 0
2233 while True:
2258 while True:
2234 chunkdata = source.filelogheader()
2259 chunkdata = source.filelogheader()
2235 if not chunkdata:
2260 if not chunkdata:
2236 break
2261 break
2237 f = chunkdata["filename"]
2262 f = chunkdata["filename"]
2238 self.ui.debug("adding %s revisions\n" % f)
2263 self.ui.debug("adding %s revisions\n" % f)
2239 pr()
2264 pr()
2240 fl = self.file(f)
2265 fl = self.file(f)
2241 o = len(fl)
2266 o = len(fl)
2242 if not fl.addgroup(source, revmap, trp):
2267 if not fl.addgroup(source, revmap, trp):
2243 raise util.Abort(_("received file revlog group is empty"))
2268 raise util.Abort(_("received file revlog group is empty"))
2244 revisions += len(fl) - o
2269 revisions += len(fl) - o
2245 files += 1
2270 files += 1
2246 if f in needfiles:
2271 if f in needfiles:
2247 needs = needfiles[f]
2272 needs = needfiles[f]
2248 for new in xrange(o, len(fl)):
2273 for new in xrange(o, len(fl)):
2249 n = fl.node(new)
2274 n = fl.node(new)
2250 if n in needs:
2275 if n in needs:
2251 needs.remove(n)
2276 needs.remove(n)
2252 else:
2277 else:
2253 raise util.Abort(
2278 raise util.Abort(
2254 _("received spurious file revlog entry"))
2279 _("received spurious file revlog entry"))
2255 if not needs:
2280 if not needs:
2256 del needfiles[f]
2281 del needfiles[f]
2257 self.ui.progress(_('files'), None)
2282 self.ui.progress(_('files'), None)
2258
2283
2259 for f, needs in needfiles.iteritems():
2284 for f, needs in needfiles.iteritems():
2260 fl = self.file(f)
2285 fl = self.file(f)
2261 for n in needs:
2286 for n in needs:
2262 try:
2287 try:
2263 fl.rev(n)
2288 fl.rev(n)
2264 except error.LookupError:
2289 except error.LookupError:
2265 raise util.Abort(
2290 raise util.Abort(
2266 _('missing file data for %s:%s - run hg verify') %
2291 _('missing file data for %s:%s - run hg verify') %
2267 (f, hex(n)))
2292 (f, hex(n)))
2268
2293
2269 return revisions, files
2294 return revisions, files
2270
2295
2271 def stream_in(self, remote, requirements):
2296 def stream_in(self, remote, requirements):
2272 lock = self.lock()
2297 lock = self.lock()
2273 try:
2298 try:
2274 # Save remote branchmap. We will use it later
2299 # Save remote branchmap. We will use it later
2275 # to speed up branchcache creation
2300 # to speed up branchcache creation
2276 rbranchmap = None
2301 rbranchmap = None
2277 if remote.capable("branchmap"):
2302 if remote.capable("branchmap"):
2278 rbranchmap = remote.branchmap()
2303 rbranchmap = remote.branchmap()
2279
2304
2280 fp = remote.stream_out()
2305 fp = remote.stream_out()
2281 l = fp.readline()
2306 l = fp.readline()
2282 try:
2307 try:
2283 resp = int(l)
2308 resp = int(l)
2284 except ValueError:
2309 except ValueError:
2285 raise error.ResponseError(
2310 raise error.ResponseError(
2286 _('unexpected response from remote server:'), l)
2311 _('unexpected response from remote server:'), l)
2287 if resp == 1:
2312 if resp == 1:
2288 raise util.Abort(_('operation forbidden by server'))
2313 raise util.Abort(_('operation forbidden by server'))
2289 elif resp == 2:
2314 elif resp == 2:
2290 raise util.Abort(_('locking the remote repository failed'))
2315 raise util.Abort(_('locking the remote repository failed'))
2291 elif resp != 0:
2316 elif resp != 0:
2292 raise util.Abort(_('the server sent an unknown error code'))
2317 raise util.Abort(_('the server sent an unknown error code'))
2293 self.ui.status(_('streaming all changes\n'))
2318 self.ui.status(_('streaming all changes\n'))
2294 l = fp.readline()
2319 l = fp.readline()
2295 try:
2320 try:
2296 total_files, total_bytes = map(int, l.split(' ', 1))
2321 total_files, total_bytes = map(int, l.split(' ', 1))
2297 except (ValueError, TypeError):
2322 except (ValueError, TypeError):
2298 raise error.ResponseError(
2323 raise error.ResponseError(
2299 _('unexpected response from remote server:'), l)
2324 _('unexpected response from remote server:'), l)
2300 self.ui.status(_('%d files to transfer, %s of data\n') %
2325 self.ui.status(_('%d files to transfer, %s of data\n') %
2301 (total_files, util.bytecount(total_bytes)))
2326 (total_files, util.bytecount(total_bytes)))
2302 handled_bytes = 0
2327 handled_bytes = 0
2303 self.ui.progress(_('clone'), 0, total=total_bytes)
2328 self.ui.progress(_('clone'), 0, total=total_bytes)
2304 start = time.time()
2329 start = time.time()
2305 for i in xrange(total_files):
2330 for i in xrange(total_files):
2306 # XXX doesn't support '\n' or '\r' in filenames
2331 # XXX doesn't support '\n' or '\r' in filenames
2307 l = fp.readline()
2332 l = fp.readline()
2308 try:
2333 try:
2309 name, size = l.split('\0', 1)
2334 name, size = l.split('\0', 1)
2310 size = int(size)
2335 size = int(size)
2311 except (ValueError, TypeError):
2336 except (ValueError, TypeError):
2312 raise error.ResponseError(
2337 raise error.ResponseError(
2313 _('unexpected response from remote server:'), l)
2338 _('unexpected response from remote server:'), l)
2314 if self.ui.debugflag:
2339 if self.ui.debugflag:
2315 self.ui.debug('adding %s (%s)\n' %
2340 self.ui.debug('adding %s (%s)\n' %
2316 (name, util.bytecount(size)))
2341 (name, util.bytecount(size)))
2317 # for backwards compat, name was partially encoded
2342 # for backwards compat, name was partially encoded
2318 ofp = self.sopener(store.decodedir(name), 'w')
2343 ofp = self.sopener(store.decodedir(name), 'w')
2319 for chunk in util.filechunkiter(fp, limit=size):
2344 for chunk in util.filechunkiter(fp, limit=size):
2320 handled_bytes += len(chunk)
2345 handled_bytes += len(chunk)
2321 self.ui.progress(_('clone'), handled_bytes,
2346 self.ui.progress(_('clone'), handled_bytes,
2322 total=total_bytes)
2347 total=total_bytes)
2323 ofp.write(chunk)
2348 ofp.write(chunk)
2324 ofp.close()
2349 ofp.close()
2325 elapsed = time.time() - start
2350 elapsed = time.time() - start
2326 if elapsed <= 0:
2351 if elapsed <= 0:
2327 elapsed = 0.001
2352 elapsed = 0.001
2328 self.ui.progress(_('clone'), None)
2353 self.ui.progress(_('clone'), None)
2329 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2354 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2330 (util.bytecount(total_bytes), elapsed,
2355 (util.bytecount(total_bytes), elapsed,
2331 util.bytecount(total_bytes / elapsed)))
2356 util.bytecount(total_bytes / elapsed)))
2332
2357
2333 # new requirements = old non-format requirements +
2358 # new requirements = old non-format requirements +
2334 # new format-related
2359 # new format-related
2335 # requirements from the streamed-in repository
2360 # requirements from the streamed-in repository
2336 requirements.update(set(self.requirements) - self.supportedformats)
2361 requirements.update(set(self.requirements) - self.supportedformats)
2337 self._applyrequirements(requirements)
2362 self._applyrequirements(requirements)
2338 self._writerequirements()
2363 self._writerequirements()
2339
2364
2340 if rbranchmap:
2365 if rbranchmap:
2341 rbheads = []
2366 rbheads = []
2342 for bheads in rbranchmap.itervalues():
2367 for bheads in rbranchmap.itervalues():
2343 rbheads.extend(bheads)
2368 rbheads.extend(bheads)
2344
2369
2345 if rbheads:
2370 if rbheads:
2346 rtiprev = max((int(self.changelog.rev(node))
2371 rtiprev = max((int(self.changelog.rev(node))
2347 for node in rbheads))
2372 for node in rbheads))
2348 cache = branchmap.branchcache(rbranchmap,
2373 cache = branchmap.branchcache(rbranchmap,
2349 self[rtiprev].node(),
2374 self[rtiprev].node(),
2350 rtiprev)
2375 rtiprev)
2351 # Try to stick it as low as possible
2376 # Try to stick it as low as possible
2352 # filter above served are unlikely to be fetch from a clone
2377 # filter above served are unlikely to be fetch from a clone
2353 for candidate in ('base', 'immutable', 'served'):
2378 for candidate in ('base', 'immutable', 'served'):
2354 rview = self.filtered(candidate)
2379 rview = self.filtered(candidate)
2355 if cache.validfor(rview):
2380 if cache.validfor(rview):
2356 self._branchcaches[candidate] = cache
2381 self._branchcaches[candidate] = cache
2357 cache.write(rview)
2382 cache.write(rview)
2358 break
2383 break
2359 self.invalidate()
2384 self.invalidate()
2360 return len(self.heads()) + 1
2385 return len(self.heads()) + 1
2361 finally:
2386 finally:
2362 lock.release()
2387 lock.release()
2363
2388
2364 def clone(self, remote, heads=[], stream=False):
2389 def clone(self, remote, heads=[], stream=False):
2365 '''clone remote repository.
2390 '''clone remote repository.
2366
2391
2367 keyword arguments:
2392 keyword arguments:
2368 heads: list of revs to clone (forces use of pull)
2393 heads: list of revs to clone (forces use of pull)
2369 stream: use streaming clone if possible'''
2394 stream: use streaming clone if possible'''
2370
2395
2371 # now, all clients that can request uncompressed clones can
2396 # now, all clients that can request uncompressed clones can
2372 # read repo formats supported by all servers that can serve
2397 # read repo formats supported by all servers that can serve
2373 # them.
2398 # them.
2374
2399
2375 # if revlog format changes, client will have to check version
2400 # if revlog format changes, client will have to check version
2376 # and format flags on "stream" capability, and use
2401 # and format flags on "stream" capability, and use
2377 # uncompressed only if compatible.
2402 # uncompressed only if compatible.
2378
2403
2379 if not stream:
2404 if not stream:
2380 # if the server explicitly prefers to stream (for fast LANs)
2405 # if the server explicitly prefers to stream (for fast LANs)
2381 stream = remote.capable('stream-preferred')
2406 stream = remote.capable('stream-preferred')
2382
2407
2383 if stream and not heads:
2408 if stream and not heads:
2384 # 'stream' means remote revlog format is revlogv1 only
2409 # 'stream' means remote revlog format is revlogv1 only
2385 if remote.capable('stream'):
2410 if remote.capable('stream'):
2386 return self.stream_in(remote, set(('revlogv1',)))
2411 return self.stream_in(remote, set(('revlogv1',)))
2387 # otherwise, 'streamreqs' contains the remote revlog format
2412 # otherwise, 'streamreqs' contains the remote revlog format
2388 streamreqs = remote.capable('streamreqs')
2413 streamreqs = remote.capable('streamreqs')
2389 if streamreqs:
2414 if streamreqs:
2390 streamreqs = set(streamreqs.split(','))
2415 streamreqs = set(streamreqs.split(','))
2391 # if we support it, stream in and adjust our requirements
2416 # if we support it, stream in and adjust our requirements
2392 if not streamreqs - self.supportedformats:
2417 if not streamreqs - self.supportedformats:
2393 return self.stream_in(remote, streamreqs)
2418 return self.stream_in(remote, streamreqs)
2394 return self.pull(remote, heads)
2419 return self.pull(remote, heads)
2395
2420
2396 def pushkey(self, namespace, key, old, new):
2421 def pushkey(self, namespace, key, old, new):
2397 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2422 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2398 old=old, new=new)
2423 old=old, new=new)
2399 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2424 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2400 ret = pushkey.push(self, namespace, key, old, new)
2425 ret = pushkey.push(self, namespace, key, old, new)
2401 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2426 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2402 ret=ret)
2427 ret=ret)
2403 return ret
2428 return ret
2404
2429
2405 def listkeys(self, namespace):
2430 def listkeys(self, namespace):
2406 self.hook('prelistkeys', throw=True, namespace=namespace)
2431 self.hook('prelistkeys', throw=True, namespace=namespace)
2407 self.ui.debug('listing keys for "%s"\n' % namespace)
2432 self.ui.debug('listing keys for "%s"\n' % namespace)
2408 values = pushkey.list(self, namespace)
2433 values = pushkey.list(self, namespace)
2409 self.hook('listkeys', namespace=namespace, values=values)
2434 self.hook('listkeys', namespace=namespace, values=values)
2410 return values
2435 return values
2411
2436
2412 def debugwireargs(self, one, two, three=None, four=None, five=None):
2437 def debugwireargs(self, one, two, three=None, four=None, five=None):
2413 '''used to test argument passing over the wire'''
2438 '''used to test argument passing over the wire'''
2414 return "%s %s %s %s %s" % (one, two, three, four, five)
2439 return "%s %s %s %s %s" % (one, two, three, four, five)
2415
2440
2416 def savecommitmessage(self, text):
2441 def savecommitmessage(self, text):
2417 fp = self.opener('last-message.txt', 'wb')
2442 fp = self.opener('last-message.txt', 'wb')
2418 try:
2443 try:
2419 fp.write(text)
2444 fp.write(text)
2420 finally:
2445 finally:
2421 fp.close()
2446 fp.close()
2422 return self.pathto(fp.name[len(self.root) + 1:])
2447 return self.pathto(fp.name[len(self.root) + 1:])
2423
2448
2424 # used to avoid circular references so destructors work
2449 # used to avoid circular references so destructors work
2425 def aftertrans(files):
2450 def aftertrans(files):
2426 renamefiles = [tuple(t) for t in files]
2451 renamefiles = [tuple(t) for t in files]
2427 def a():
2452 def a():
2428 for vfs, src, dest in renamefiles:
2453 for vfs, src, dest in renamefiles:
2429 try:
2454 try:
2430 vfs.rename(src, dest)
2455 vfs.rename(src, dest)
2431 except OSError: # journal file does not yet exist
2456 except OSError: # journal file does not yet exist
2432 pass
2457 pass
2433 return a
2458 return a
2434
2459
2435 def undoname(fn):
2460 def undoname(fn):
2436 base, name = os.path.split(fn)
2461 base, name = os.path.split(fn)
2437 assert name.startswith('journal')
2462 assert name.startswith('journal')
2438 return os.path.join(base, name.replace('journal', 'undo', 1))
2463 return os.path.join(base, name.replace('journal', 'undo', 1))
2439
2464
2440 def instance(ui, path, create):
2465 def instance(ui, path, create):
2441 return localrepository(ui, util.urllocalpath(path), create)
2466 return localrepository(ui, util.urllocalpath(path), create)
2442
2467
2443 def islocal(path):
2468 def islocal(path):
2444 return True
2469 return True
@@ -1,159 +1,161 b''
1 # statichttprepo.py - simple http repository class for mercurial
1 # statichttprepo.py - simple http repository class for mercurial
2 #
2 #
3 # This provides read-only repo access to repositories exported via static http
3 # This provides read-only repo access to repositories exported via static http
4 #
4 #
5 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 from i18n import _
10 from i18n import _
11 import changelog, byterange, url, error
11 import changelog, byterange, url, error
12 import localrepo, manifest, util, scmutil, store
12 import localrepo, manifest, util, scmutil, store
13 import urllib, urllib2, errno, os
13 import urllib, urllib2, errno, os
14
14
15 class httprangereader(object):
15 class httprangereader(object):
16 def __init__(self, url, opener):
16 def __init__(self, url, opener):
17 # we assume opener has HTTPRangeHandler
17 # we assume opener has HTTPRangeHandler
18 self.url = url
18 self.url = url
19 self.pos = 0
19 self.pos = 0
20 self.opener = opener
20 self.opener = opener
21 self.name = url
21 self.name = url
22 def seek(self, pos):
22 def seek(self, pos):
23 self.pos = pos
23 self.pos = pos
24 def read(self, bytes=None):
24 def read(self, bytes=None):
25 req = urllib2.Request(self.url)
25 req = urllib2.Request(self.url)
26 end = ''
26 end = ''
27 if bytes:
27 if bytes:
28 end = self.pos + bytes - 1
28 end = self.pos + bytes - 1
29 if self.pos or end:
29 if self.pos or end:
30 req.add_header('Range', 'bytes=%d-%s' % (self.pos, end))
30 req.add_header('Range', 'bytes=%d-%s' % (self.pos, end))
31
31
32 try:
32 try:
33 f = self.opener.open(req)
33 f = self.opener.open(req)
34 data = f.read()
34 data = f.read()
35 # Python 2.6+ defines a getcode() function, and 2.4 and
35 # Python 2.6+ defines a getcode() function, and 2.4 and
36 # 2.5 appear to always have an undocumented code attribute
36 # 2.5 appear to always have an undocumented code attribute
37 # set. If we can't read either of those, fall back to 206
37 # set. If we can't read either of those, fall back to 206
38 # and hope for the best.
38 # and hope for the best.
39 code = getattr(f, 'getcode', lambda : getattr(f, 'code', 206))()
39 code = getattr(f, 'getcode', lambda : getattr(f, 'code', 206))()
40 except urllib2.HTTPError, inst:
40 except urllib2.HTTPError, inst:
41 num = inst.code == 404 and errno.ENOENT or None
41 num = inst.code == 404 and errno.ENOENT or None
42 raise IOError(num, inst)
42 raise IOError(num, inst)
43 except urllib2.URLError, inst:
43 except urllib2.URLError, inst:
44 raise IOError(None, inst.reason[1])
44 raise IOError(None, inst.reason[1])
45
45
46 if code == 200:
46 if code == 200:
47 # HTTPRangeHandler does nothing if remote does not support
47 # HTTPRangeHandler does nothing if remote does not support
48 # Range headers and returns the full entity. Let's slice it.
48 # Range headers and returns the full entity. Let's slice it.
49 if bytes:
49 if bytes:
50 data = data[self.pos:self.pos + bytes]
50 data = data[self.pos:self.pos + bytes]
51 else:
51 else:
52 data = data[self.pos:]
52 data = data[self.pos:]
53 elif bytes:
53 elif bytes:
54 data = data[:bytes]
54 data = data[:bytes]
55 self.pos += len(data)
55 self.pos += len(data)
56 return data
56 return data
57 def __iter__(self):
57 def __iter__(self):
58 return iter(self.read().splitlines(1))
58 return iter(self.read().splitlines(1))
59 def close(self):
59 def close(self):
60 pass
60 pass
61
61
62 def build_opener(ui, authinfo):
62 def build_opener(ui, authinfo):
63 # urllib cannot handle URLs with embedded user or passwd
63 # urllib cannot handle URLs with embedded user or passwd
64 urlopener = url.opener(ui, authinfo)
64 urlopener = url.opener(ui, authinfo)
65 urlopener.add_handler(byterange.HTTPRangeHandler())
65 urlopener.add_handler(byterange.HTTPRangeHandler())
66
66
67 class statichttpvfs(scmutil.abstractvfs):
67 class statichttpvfs(scmutil.abstractvfs):
68 def __init__(self, base):
68 def __init__(self, base):
69 self.base = base
69 self.base = base
70
70
71 def __call__(self, path, mode="r", atomictemp=None):
71 def __call__(self, path, mode="r", atomictemp=None):
72 if mode not in ('r', 'rb'):
72 if mode not in ('r', 'rb'):
73 raise IOError('Permission denied')
73 raise IOError('Permission denied')
74 f = "/".join((self.base, urllib.quote(path)))
74 f = "/".join((self.base, urllib.quote(path)))
75 return httprangereader(f, urlopener)
75 return httprangereader(f, urlopener)
76
76
77 def join(self, path):
77 def join(self, path):
78 if path:
78 if path:
79 return os.path.join(self.base, path)
79 return os.path.join(self.base, path)
80 else:
80 else:
81 return self.base
81 return self.base
82
82
83 return statichttpvfs
83 return statichttpvfs
84
84
85 class statichttppeer(localrepo.localpeer):
85 class statichttppeer(localrepo.localpeer):
86 def local(self):
86 def local(self):
87 return None
87 return None
88 def canpush(self):
88 def canpush(self):
89 return False
89 return False
90
90
91 class statichttprepository(localrepo.localrepository):
91 class statichttprepository(localrepo.localrepository):
92 supported = localrepo.localrepository._basesupported
93
92 def __init__(self, ui, path):
94 def __init__(self, ui, path):
93 self._url = path
95 self._url = path
94 self.ui = ui
96 self.ui = ui
95
97
96 self.root = path
98 self.root = path
97 u = util.url(path.rstrip('/') + "/.hg")
99 u = util.url(path.rstrip('/') + "/.hg")
98 self.path, authinfo = u.authinfo()
100 self.path, authinfo = u.authinfo()
99
101
100 opener = build_opener(ui, authinfo)
102 opener = build_opener(ui, authinfo)
101 self.opener = opener(self.path)
103 self.opener = opener(self.path)
102 self.vfs = self.opener
104 self.vfs = self.opener
103 self._phasedefaults = []
105 self._phasedefaults = []
104
106
105 try:
107 try:
106 requirements = scmutil.readrequires(self.opener, self.supported)
108 requirements = scmutil.readrequires(self.opener, self.supported)
107 except IOError, inst:
109 except IOError, inst:
108 if inst.errno != errno.ENOENT:
110 if inst.errno != errno.ENOENT:
109 raise
111 raise
110 requirements = set()
112 requirements = set()
111
113
112 # check if it is a non-empty old-style repository
114 # check if it is a non-empty old-style repository
113 try:
115 try:
114 fp = self.opener("00changelog.i")
116 fp = self.opener("00changelog.i")
115 fp.read(1)
117 fp.read(1)
116 fp.close()
118 fp.close()
117 except IOError, inst:
119 except IOError, inst:
118 if inst.errno != errno.ENOENT:
120 if inst.errno != errno.ENOENT:
119 raise
121 raise
120 # we do not care about empty old-style repositories here
122 # we do not care about empty old-style repositories here
121 msg = _("'%s' does not appear to be an hg repository") % path
123 msg = _("'%s' does not appear to be an hg repository") % path
122 raise error.RepoError(msg)
124 raise error.RepoError(msg)
123
125
124 # setup store
126 # setup store
125 self.store = store.store(requirements, self.path, opener)
127 self.store = store.store(requirements, self.path, opener)
126 self.spath = self.store.path
128 self.spath = self.store.path
127 self.sopener = self.store.opener
129 self.sopener = self.store.opener
128 self.svfs = self.sopener
130 self.svfs = self.sopener
129 self.sjoin = self.store.join
131 self.sjoin = self.store.join
130 self._filecache = {}
132 self._filecache = {}
131 self.requirements = requirements
133 self.requirements = requirements
132
134
133 self.manifest = manifest.manifest(self.sopener)
135 self.manifest = manifest.manifest(self.sopener)
134 self.changelog = changelog.changelog(self.sopener)
136 self.changelog = changelog.changelog(self.sopener)
135 self._tags = None
137 self._tags = None
136 self.nodetagscache = None
138 self.nodetagscache = None
137 self._branchcaches = {}
139 self._branchcaches = {}
138 self.encodepats = None
140 self.encodepats = None
139 self.decodepats = None
141 self.decodepats = None
140
142
141 def _restrictcapabilities(self, caps):
143 def _restrictcapabilities(self, caps):
142 return caps.difference(["pushkey"])
144 return caps.difference(["pushkey"])
143
145
144 def url(self):
146 def url(self):
145 return self._url
147 return self._url
146
148
147 def local(self):
149 def local(self):
148 return False
150 return False
149
151
150 def peer(self):
152 def peer(self):
151 return statichttppeer(self)
153 return statichttppeer(self)
152
154
153 def lock(self, wait=True):
155 def lock(self, wait=True):
154 raise util.Abort(_('cannot lock static-http repository'))
156 raise util.Abort(_('cannot lock static-http repository'))
155
157
156 def instance(ui, path, create):
158 def instance(ui, path, create):
157 if create:
159 if create:
158 raise util.Abort(_('cannot create new static-http repository'))
160 raise util.Abort(_('cannot create new static-http repository'))
159 return statichttprepository(ui, path[7:])
161 return statichttprepository(ui, path[7:])
@@ -1,19 +1,69 b''
1 $ hg init t
1 $ hg init t
2 $ cd t
2 $ cd t
3 $ echo a > a
3 $ echo a > a
4 $ hg add a
4 $ hg add a
5 $ hg commit -m test
5 $ hg commit -m test
6 $ rm .hg/requires
6 $ rm .hg/requires
7 $ hg tip
7 $ hg tip
8 abort: index 00changelog.i unknown format 2!
8 abort: index 00changelog.i unknown format 2!
9 [255]
9 [255]
10 $ echo indoor-pool > .hg/requires
10 $ echo indoor-pool > .hg/requires
11 $ hg tip
11 $ hg tip
12 abort: unknown repository format: requires features 'indoor-pool' (upgrade Mercurial)!
12 abort: unknown repository format: requires features 'indoor-pool' (upgrade Mercurial)!
13 [255]
13 [255]
14 $ echo outdoor-pool >> .hg/requires
14 $ echo outdoor-pool >> .hg/requires
15 $ hg tip
15 $ hg tip
16 abort: unknown repository format: requires features 'indoor-pool', 'outdoor-pool' (upgrade Mercurial)!
16 abort: unknown repository format: requires features 'indoor-pool', 'outdoor-pool' (upgrade Mercurial)!
17 [255]
17 [255]
18 $ cd ..
19
20 Test checking between features supported locally and ones required in
21 another repository of push/pull/clone on localhost:
22
23 $ mkdir supported-locally
24 $ cd supported-locally
25
26 $ hg init supported
27 $ echo a > supported/a
28 $ hg -R supported commit -Am '#0 at supported'
29 adding a
30
31 $ echo 'featuresetup-test' >> supported/.hg/requires
32 $ cat > $TESTTMP/supported-locally/supportlocally.py <<EOF
33 > from mercurial import localrepo, extensions
34 > def featuresetup(ui, supported):
35 > for name, module in extensions.extensions(ui):
36 > if __name__ == module.__name__:
37 > # support specific feature locally
38 > supported |= set(['featuresetup-test'])
39 > return
40 > def uisetup(ui):
41 > localrepo.localrepository.featuresetupfuncs.add(featuresetup)
42 > EOF
43 $ cat > supported/.hg/hgrc <<EOF
44 > [extensions]
45 > # enable extension locally
46 > supportlocally = $TESTTMP/supported-locally/supportlocally.py
47 > EOF
48 $ hg -R supported status
49
50 $ hg init push-dst
51 $ hg -R supported push push-dst
52 pushing to push-dst
53 abort: required features are not supported in the destination: featuresetup-test
54 [255]
55
56 $ hg init pull-src
57 $ hg -R pull-src pull supported
58 pulling from supported
59 abort: required features are not supported in the destination: featuresetup-test
60 [255]
61
62 $ hg clone supported clone-dst
63 abort: unknown repository format: requires features 'featuresetup-test' (upgrade Mercurial)!
64 [255]
65 $ hg clone --pull supported clone-dst
66 abort: required features are not supported in the destination: featuresetup-test
67 [255]
18
68
19 $ cd ..
69 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now