##// END OF EJS Templates
localrepo: get value from the unfiltered caches should check if the attribute existed....
Wei, Elson -
r19635:b9b7dc26 default
parent child Browse files
Show More
@@ -1,2442 +1,2444
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import lock, transaction, store, encoding
11 import lock, transaction, store, encoding
12 import scmutil, util, extensions, hook, error, revset
12 import scmutil, util, extensions, hook, error, revset
13 import match as matchmod
13 import match as matchmod
14 import merge as mergemod
14 import merge as mergemod
15 import tags as tagsmod
15 import tags as tagsmod
16 from lock import release
16 from lock import release
17 import weakref, errno, os, time, inspect
17 import weakref, errno, os, time, inspect
18 import branchmap
18 import branchmap
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20 filecache = scmutil.filecache
20 filecache = scmutil.filecache
21
21
22 class repofilecache(filecache):
22 class repofilecache(filecache):
23 """All filecache usage on repo are done for logic that should be unfiltered
23 """All filecache usage on repo are done for logic that should be unfiltered
24 """
24 """
25
25
26 def __get__(self, repo, type=None):
26 def __get__(self, repo, type=None):
27 return super(repofilecache, self).__get__(repo.unfiltered(), type)
27 return super(repofilecache, self).__get__(repo.unfiltered(), type)
28 def __set__(self, repo, value):
28 def __set__(self, repo, value):
29 return super(repofilecache, self).__set__(repo.unfiltered(), value)
29 return super(repofilecache, self).__set__(repo.unfiltered(), value)
30 def __delete__(self, repo):
30 def __delete__(self, repo):
31 return super(repofilecache, self).__delete__(repo.unfiltered())
31 return super(repofilecache, self).__delete__(repo.unfiltered())
32
32
33 class storecache(repofilecache):
33 class storecache(repofilecache):
34 """filecache for files in the store"""
34 """filecache for files in the store"""
35 def join(self, obj, fname):
35 def join(self, obj, fname):
36 return obj.sjoin(fname)
36 return obj.sjoin(fname)
37
37
38 class unfilteredpropertycache(propertycache):
38 class unfilteredpropertycache(propertycache):
39 """propertycache that apply to unfiltered repo only"""
39 """propertycache that apply to unfiltered repo only"""
40
40
41 def __get__(self, repo, type=None):
41 def __get__(self, repo, type=None):
42 if hasunfilteredcache(repo, self.name):
43 return getattr(repo.unfiltered(), self.name)
42 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
44 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
43
45
44 class filteredpropertycache(propertycache):
46 class filteredpropertycache(propertycache):
45 """propertycache that must take filtering in account"""
47 """propertycache that must take filtering in account"""
46
48
47 def cachevalue(self, obj, value):
49 def cachevalue(self, obj, value):
48 object.__setattr__(obj, self.name, value)
50 object.__setattr__(obj, self.name, value)
49
51
50
52
51 def hasunfilteredcache(repo, name):
53 def hasunfilteredcache(repo, name):
52 """check if a repo has an unfilteredpropertycache value for <name>"""
54 """check if a repo has an unfilteredpropertycache value for <name>"""
53 return name in vars(repo.unfiltered())
55 return name in vars(repo.unfiltered())
54
56
55 def unfilteredmethod(orig):
57 def unfilteredmethod(orig):
56 """decorate method that always need to be run on unfiltered version"""
58 """decorate method that always need to be run on unfiltered version"""
57 def wrapper(repo, *args, **kwargs):
59 def wrapper(repo, *args, **kwargs):
58 return orig(repo.unfiltered(), *args, **kwargs)
60 return orig(repo.unfiltered(), *args, **kwargs)
59 return wrapper
61 return wrapper
60
62
61 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
63 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
62 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
64 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
63
65
64 class localpeer(peer.peerrepository):
66 class localpeer(peer.peerrepository):
65 '''peer for a local repo; reflects only the most recent API'''
67 '''peer for a local repo; reflects only the most recent API'''
66
68
67 def __init__(self, repo, caps=MODERNCAPS):
69 def __init__(self, repo, caps=MODERNCAPS):
68 peer.peerrepository.__init__(self)
70 peer.peerrepository.__init__(self)
69 self._repo = repo.filtered('served')
71 self._repo = repo.filtered('served')
70 self.ui = repo.ui
72 self.ui = repo.ui
71 self._caps = repo._restrictcapabilities(caps)
73 self._caps = repo._restrictcapabilities(caps)
72 self.requirements = repo.requirements
74 self.requirements = repo.requirements
73 self.supportedformats = repo.supportedformats
75 self.supportedformats = repo.supportedformats
74
76
75 def close(self):
77 def close(self):
76 self._repo.close()
78 self._repo.close()
77
79
78 def _capabilities(self):
80 def _capabilities(self):
79 return self._caps
81 return self._caps
80
82
81 def local(self):
83 def local(self):
82 return self._repo
84 return self._repo
83
85
84 def canpush(self):
86 def canpush(self):
85 return True
87 return True
86
88
87 def url(self):
89 def url(self):
88 return self._repo.url()
90 return self._repo.url()
89
91
90 def lookup(self, key):
92 def lookup(self, key):
91 return self._repo.lookup(key)
93 return self._repo.lookup(key)
92
94
93 def branchmap(self):
95 def branchmap(self):
94 return self._repo.branchmap()
96 return self._repo.branchmap()
95
97
96 def heads(self):
98 def heads(self):
97 return self._repo.heads()
99 return self._repo.heads()
98
100
99 def known(self, nodes):
101 def known(self, nodes):
100 return self._repo.known(nodes)
102 return self._repo.known(nodes)
101
103
102 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
104 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
103 return self._repo.getbundle(source, heads=heads, common=common,
105 return self._repo.getbundle(source, heads=heads, common=common,
104 bundlecaps=None)
106 bundlecaps=None)
105
107
106 # TODO We might want to move the next two calls into legacypeer and add
108 # TODO We might want to move the next two calls into legacypeer and add
107 # unbundle instead.
109 # unbundle instead.
108
110
109 def lock(self):
111 def lock(self):
110 return self._repo.lock()
112 return self._repo.lock()
111
113
112 def addchangegroup(self, cg, source, url):
114 def addchangegroup(self, cg, source, url):
113 return self._repo.addchangegroup(cg, source, url)
115 return self._repo.addchangegroup(cg, source, url)
114
116
115 def pushkey(self, namespace, key, old, new):
117 def pushkey(self, namespace, key, old, new):
116 return self._repo.pushkey(namespace, key, old, new)
118 return self._repo.pushkey(namespace, key, old, new)
117
119
118 def listkeys(self, namespace):
120 def listkeys(self, namespace):
119 return self._repo.listkeys(namespace)
121 return self._repo.listkeys(namespace)
120
122
121 def debugwireargs(self, one, two, three=None, four=None, five=None):
123 def debugwireargs(self, one, two, three=None, four=None, five=None):
122 '''used to test argument passing over the wire'''
124 '''used to test argument passing over the wire'''
123 return "%s %s %s %s %s" % (one, two, three, four, five)
125 return "%s %s %s %s %s" % (one, two, three, four, five)
124
126
125 class locallegacypeer(localpeer):
127 class locallegacypeer(localpeer):
126 '''peer extension which implements legacy methods too; used for tests with
128 '''peer extension which implements legacy methods too; used for tests with
127 restricted capabilities'''
129 restricted capabilities'''
128
130
129 def __init__(self, repo):
131 def __init__(self, repo):
130 localpeer.__init__(self, repo, caps=LEGACYCAPS)
132 localpeer.__init__(self, repo, caps=LEGACYCAPS)
131
133
132 def branches(self, nodes):
134 def branches(self, nodes):
133 return self._repo.branches(nodes)
135 return self._repo.branches(nodes)
134
136
135 def between(self, pairs):
137 def between(self, pairs):
136 return self._repo.between(pairs)
138 return self._repo.between(pairs)
137
139
138 def changegroup(self, basenodes, source):
140 def changegroup(self, basenodes, source):
139 return self._repo.changegroup(basenodes, source)
141 return self._repo.changegroup(basenodes, source)
140
142
141 def changegroupsubset(self, bases, heads, source):
143 def changegroupsubset(self, bases, heads, source):
142 return self._repo.changegroupsubset(bases, heads, source)
144 return self._repo.changegroupsubset(bases, heads, source)
143
145
144 class localrepository(object):
146 class localrepository(object):
145
147
146 supportedformats = set(('revlogv1', 'generaldelta'))
148 supportedformats = set(('revlogv1', 'generaldelta'))
147 supported = supportedformats | set(('store', 'fncache', 'shared',
149 supported = supportedformats | set(('store', 'fncache', 'shared',
148 'dotencode'))
150 'dotencode'))
149 openerreqs = set(('revlogv1', 'generaldelta'))
151 openerreqs = set(('revlogv1', 'generaldelta'))
150 requirements = ['revlogv1']
152 requirements = ['revlogv1']
151 filtername = None
153 filtername = None
152
154
153 def _baserequirements(self, create):
155 def _baserequirements(self, create):
154 return self.requirements[:]
156 return self.requirements[:]
155
157
156 def __init__(self, baseui, path=None, create=False):
158 def __init__(self, baseui, path=None, create=False):
157 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
159 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
158 self.wopener = self.wvfs
160 self.wopener = self.wvfs
159 self.root = self.wvfs.base
161 self.root = self.wvfs.base
160 self.path = self.wvfs.join(".hg")
162 self.path = self.wvfs.join(".hg")
161 self.origroot = path
163 self.origroot = path
162 self.auditor = scmutil.pathauditor(self.root, self._checknested)
164 self.auditor = scmutil.pathauditor(self.root, self._checknested)
163 self.vfs = scmutil.vfs(self.path)
165 self.vfs = scmutil.vfs(self.path)
164 self.opener = self.vfs
166 self.opener = self.vfs
165 self.baseui = baseui
167 self.baseui = baseui
166 self.ui = baseui.copy()
168 self.ui = baseui.copy()
167 # A list of callback to shape the phase if no data were found.
169 # A list of callback to shape the phase if no data were found.
168 # Callback are in the form: func(repo, roots) --> processed root.
170 # Callback are in the form: func(repo, roots) --> processed root.
169 # This list it to be filled by extension during repo setup
171 # This list it to be filled by extension during repo setup
170 self._phasedefaults = []
172 self._phasedefaults = []
171 try:
173 try:
172 self.ui.readconfig(self.join("hgrc"), self.root)
174 self.ui.readconfig(self.join("hgrc"), self.root)
173 extensions.loadall(self.ui)
175 extensions.loadall(self.ui)
174 except IOError:
176 except IOError:
175 pass
177 pass
176
178
177 if not self.vfs.isdir():
179 if not self.vfs.isdir():
178 if create:
180 if create:
179 if not self.wvfs.exists():
181 if not self.wvfs.exists():
180 self.wvfs.makedirs()
182 self.wvfs.makedirs()
181 self.vfs.makedir(notindexed=True)
183 self.vfs.makedir(notindexed=True)
182 requirements = self._baserequirements(create)
184 requirements = self._baserequirements(create)
183 if self.ui.configbool('format', 'usestore', True):
185 if self.ui.configbool('format', 'usestore', True):
184 self.vfs.mkdir("store")
186 self.vfs.mkdir("store")
185 requirements.append("store")
187 requirements.append("store")
186 if self.ui.configbool('format', 'usefncache', True):
188 if self.ui.configbool('format', 'usefncache', True):
187 requirements.append("fncache")
189 requirements.append("fncache")
188 if self.ui.configbool('format', 'dotencode', True):
190 if self.ui.configbool('format', 'dotencode', True):
189 requirements.append('dotencode')
191 requirements.append('dotencode')
190 # create an invalid changelog
192 # create an invalid changelog
191 self.vfs.append(
193 self.vfs.append(
192 "00changelog.i",
194 "00changelog.i",
193 '\0\0\0\2' # represents revlogv2
195 '\0\0\0\2' # represents revlogv2
194 ' dummy changelog to prevent using the old repo layout'
196 ' dummy changelog to prevent using the old repo layout'
195 )
197 )
196 if self.ui.configbool('format', 'generaldelta', False):
198 if self.ui.configbool('format', 'generaldelta', False):
197 requirements.append("generaldelta")
199 requirements.append("generaldelta")
198 requirements = set(requirements)
200 requirements = set(requirements)
199 else:
201 else:
200 raise error.RepoError(_("repository %s not found") % path)
202 raise error.RepoError(_("repository %s not found") % path)
201 elif create:
203 elif create:
202 raise error.RepoError(_("repository %s already exists") % path)
204 raise error.RepoError(_("repository %s already exists") % path)
203 else:
205 else:
204 try:
206 try:
205 requirements = scmutil.readrequires(self.vfs, self.supported)
207 requirements = scmutil.readrequires(self.vfs, self.supported)
206 except IOError, inst:
208 except IOError, inst:
207 if inst.errno != errno.ENOENT:
209 if inst.errno != errno.ENOENT:
208 raise
210 raise
209 requirements = set()
211 requirements = set()
210
212
211 self.sharedpath = self.path
213 self.sharedpath = self.path
212 try:
214 try:
213 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
215 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
214 realpath=True)
216 realpath=True)
215 s = vfs.base
217 s = vfs.base
216 if not vfs.exists():
218 if not vfs.exists():
217 raise error.RepoError(
219 raise error.RepoError(
218 _('.hg/sharedpath points to nonexistent directory %s') % s)
220 _('.hg/sharedpath points to nonexistent directory %s') % s)
219 self.sharedpath = s
221 self.sharedpath = s
220 except IOError, inst:
222 except IOError, inst:
221 if inst.errno != errno.ENOENT:
223 if inst.errno != errno.ENOENT:
222 raise
224 raise
223
225
224 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
226 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
225 self.spath = self.store.path
227 self.spath = self.store.path
226 self.svfs = self.store.vfs
228 self.svfs = self.store.vfs
227 self.sopener = self.svfs
229 self.sopener = self.svfs
228 self.sjoin = self.store.join
230 self.sjoin = self.store.join
229 self.vfs.createmode = self.store.createmode
231 self.vfs.createmode = self.store.createmode
230 self._applyrequirements(requirements)
232 self._applyrequirements(requirements)
231 if create:
233 if create:
232 self._writerequirements()
234 self._writerequirements()
233
235
234
236
235 self._branchcaches = {}
237 self._branchcaches = {}
236 self.filterpats = {}
238 self.filterpats = {}
237 self._datafilters = {}
239 self._datafilters = {}
238 self._transref = self._lockref = self._wlockref = None
240 self._transref = self._lockref = self._wlockref = None
239
241
240 # A cache for various files under .hg/ that tracks file changes,
242 # A cache for various files under .hg/ that tracks file changes,
241 # (used by the filecache decorator)
243 # (used by the filecache decorator)
242 #
244 #
243 # Maps a property name to its util.filecacheentry
245 # Maps a property name to its util.filecacheentry
244 self._filecache = {}
246 self._filecache = {}
245
247
246 # hold sets of revision to be filtered
248 # hold sets of revision to be filtered
247 # should be cleared when something might have changed the filter value:
249 # should be cleared when something might have changed the filter value:
248 # - new changesets,
250 # - new changesets,
249 # - phase change,
251 # - phase change,
250 # - new obsolescence marker,
252 # - new obsolescence marker,
251 # - working directory parent change,
253 # - working directory parent change,
252 # - bookmark changes
254 # - bookmark changes
253 self.filteredrevcache = {}
255 self.filteredrevcache = {}
254
256
255 def close(self):
257 def close(self):
256 pass
258 pass
257
259
258 def _restrictcapabilities(self, caps):
260 def _restrictcapabilities(self, caps):
259 return caps
261 return caps
260
262
261 def _applyrequirements(self, requirements):
263 def _applyrequirements(self, requirements):
262 self.requirements = requirements
264 self.requirements = requirements
263 self.sopener.options = dict((r, 1) for r in requirements
265 self.sopener.options = dict((r, 1) for r in requirements
264 if r in self.openerreqs)
266 if r in self.openerreqs)
265
267
266 def _writerequirements(self):
268 def _writerequirements(self):
267 reqfile = self.opener("requires", "w")
269 reqfile = self.opener("requires", "w")
268 for r in sorted(self.requirements):
270 for r in sorted(self.requirements):
269 reqfile.write("%s\n" % r)
271 reqfile.write("%s\n" % r)
270 reqfile.close()
272 reqfile.close()
271
273
272 def _checknested(self, path):
274 def _checknested(self, path):
273 """Determine if path is a legal nested repository."""
275 """Determine if path is a legal nested repository."""
274 if not path.startswith(self.root):
276 if not path.startswith(self.root):
275 return False
277 return False
276 subpath = path[len(self.root) + 1:]
278 subpath = path[len(self.root) + 1:]
277 normsubpath = util.pconvert(subpath)
279 normsubpath = util.pconvert(subpath)
278
280
279 # XXX: Checking against the current working copy is wrong in
281 # XXX: Checking against the current working copy is wrong in
280 # the sense that it can reject things like
282 # the sense that it can reject things like
281 #
283 #
282 # $ hg cat -r 10 sub/x.txt
284 # $ hg cat -r 10 sub/x.txt
283 #
285 #
284 # if sub/ is no longer a subrepository in the working copy
286 # if sub/ is no longer a subrepository in the working copy
285 # parent revision.
287 # parent revision.
286 #
288 #
287 # However, it can of course also allow things that would have
289 # However, it can of course also allow things that would have
288 # been rejected before, such as the above cat command if sub/
290 # been rejected before, such as the above cat command if sub/
289 # is a subrepository now, but was a normal directory before.
291 # is a subrepository now, but was a normal directory before.
290 # The old path auditor would have rejected by mistake since it
292 # The old path auditor would have rejected by mistake since it
291 # panics when it sees sub/.hg/.
293 # panics when it sees sub/.hg/.
292 #
294 #
293 # All in all, checking against the working copy seems sensible
295 # All in all, checking against the working copy seems sensible
294 # since we want to prevent access to nested repositories on
296 # since we want to prevent access to nested repositories on
295 # the filesystem *now*.
297 # the filesystem *now*.
296 ctx = self[None]
298 ctx = self[None]
297 parts = util.splitpath(subpath)
299 parts = util.splitpath(subpath)
298 while parts:
300 while parts:
299 prefix = '/'.join(parts)
301 prefix = '/'.join(parts)
300 if prefix in ctx.substate:
302 if prefix in ctx.substate:
301 if prefix == normsubpath:
303 if prefix == normsubpath:
302 return True
304 return True
303 else:
305 else:
304 sub = ctx.sub(prefix)
306 sub = ctx.sub(prefix)
305 return sub.checknested(subpath[len(prefix) + 1:])
307 return sub.checknested(subpath[len(prefix) + 1:])
306 else:
308 else:
307 parts.pop()
309 parts.pop()
308 return False
310 return False
309
311
310 def peer(self):
312 def peer(self):
311 return localpeer(self) # not cached to avoid reference cycle
313 return localpeer(self) # not cached to avoid reference cycle
312
314
313 def unfiltered(self):
315 def unfiltered(self):
314 """Return unfiltered version of the repository
316 """Return unfiltered version of the repository
315
317
316 Intended to be overwritten by filtered repo."""
318 Intended to be overwritten by filtered repo."""
317 return self
319 return self
318
320
319 def filtered(self, name):
321 def filtered(self, name):
320 """Return a filtered version of a repository"""
322 """Return a filtered version of a repository"""
321 # build a new class with the mixin and the current class
323 # build a new class with the mixin and the current class
322 # (possibly subclass of the repo)
324 # (possibly subclass of the repo)
323 class proxycls(repoview.repoview, self.unfiltered().__class__):
325 class proxycls(repoview.repoview, self.unfiltered().__class__):
324 pass
326 pass
325 return proxycls(self, name)
327 return proxycls(self, name)
326
328
327 @repofilecache('bookmarks')
329 @repofilecache('bookmarks')
328 def _bookmarks(self):
330 def _bookmarks(self):
329 return bookmarks.bmstore(self)
331 return bookmarks.bmstore(self)
330
332
331 @repofilecache('bookmarks.current')
333 @repofilecache('bookmarks.current')
332 def _bookmarkcurrent(self):
334 def _bookmarkcurrent(self):
333 return bookmarks.readcurrent(self)
335 return bookmarks.readcurrent(self)
334
336
335 def bookmarkheads(self, bookmark):
337 def bookmarkheads(self, bookmark):
336 name = bookmark.split('@', 1)[0]
338 name = bookmark.split('@', 1)[0]
337 heads = []
339 heads = []
338 for mark, n in self._bookmarks.iteritems():
340 for mark, n in self._bookmarks.iteritems():
339 if mark.split('@', 1)[0] == name:
341 if mark.split('@', 1)[0] == name:
340 heads.append(n)
342 heads.append(n)
341 return heads
343 return heads
342
344
343 @storecache('phaseroots')
345 @storecache('phaseroots')
344 def _phasecache(self):
346 def _phasecache(self):
345 return phases.phasecache(self, self._phasedefaults)
347 return phases.phasecache(self, self._phasedefaults)
346
348
347 @storecache('obsstore')
349 @storecache('obsstore')
348 def obsstore(self):
350 def obsstore(self):
349 store = obsolete.obsstore(self.sopener)
351 store = obsolete.obsstore(self.sopener)
350 if store and not obsolete._enabled:
352 if store and not obsolete._enabled:
351 # message is rare enough to not be translated
353 # message is rare enough to not be translated
352 msg = 'obsolete feature not enabled but %i markers found!\n'
354 msg = 'obsolete feature not enabled but %i markers found!\n'
353 self.ui.warn(msg % len(list(store)))
355 self.ui.warn(msg % len(list(store)))
354 return store
356 return store
355
357
356 @storecache('00changelog.i')
358 @storecache('00changelog.i')
357 def changelog(self):
359 def changelog(self):
358 c = changelog.changelog(self.sopener)
360 c = changelog.changelog(self.sopener)
359 if 'HG_PENDING' in os.environ:
361 if 'HG_PENDING' in os.environ:
360 p = os.environ['HG_PENDING']
362 p = os.environ['HG_PENDING']
361 if p.startswith(self.root):
363 if p.startswith(self.root):
362 c.readpending('00changelog.i.a')
364 c.readpending('00changelog.i.a')
363 return c
365 return c
364
366
365 @storecache('00manifest.i')
367 @storecache('00manifest.i')
366 def manifest(self):
368 def manifest(self):
367 return manifest.manifest(self.sopener)
369 return manifest.manifest(self.sopener)
368
370
369 @repofilecache('dirstate')
371 @repofilecache('dirstate')
370 def dirstate(self):
372 def dirstate(self):
371 warned = [0]
373 warned = [0]
372 def validate(node):
374 def validate(node):
373 try:
375 try:
374 self.changelog.rev(node)
376 self.changelog.rev(node)
375 return node
377 return node
376 except error.LookupError:
378 except error.LookupError:
377 if not warned[0]:
379 if not warned[0]:
378 warned[0] = True
380 warned[0] = True
379 self.ui.warn(_("warning: ignoring unknown"
381 self.ui.warn(_("warning: ignoring unknown"
380 " working parent %s!\n") % short(node))
382 " working parent %s!\n") % short(node))
381 return nullid
383 return nullid
382
384
383 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
385 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
384
386
385 def __getitem__(self, changeid):
387 def __getitem__(self, changeid):
386 if changeid is None:
388 if changeid is None:
387 return context.workingctx(self)
389 return context.workingctx(self)
388 return context.changectx(self, changeid)
390 return context.changectx(self, changeid)
389
391
390 def __contains__(self, changeid):
392 def __contains__(self, changeid):
391 try:
393 try:
392 return bool(self.lookup(changeid))
394 return bool(self.lookup(changeid))
393 except error.RepoLookupError:
395 except error.RepoLookupError:
394 return False
396 return False
395
397
396 def __nonzero__(self):
398 def __nonzero__(self):
397 return True
399 return True
398
400
399 def __len__(self):
401 def __len__(self):
400 return len(self.changelog)
402 return len(self.changelog)
401
403
402 def __iter__(self):
404 def __iter__(self):
403 return iter(self.changelog)
405 return iter(self.changelog)
404
406
405 def revs(self, expr, *args):
407 def revs(self, expr, *args):
406 '''Return a list of revisions matching the given revset'''
408 '''Return a list of revisions matching the given revset'''
407 expr = revset.formatspec(expr, *args)
409 expr = revset.formatspec(expr, *args)
408 m = revset.match(None, expr)
410 m = revset.match(None, expr)
409 return [r for r in m(self, list(self))]
411 return [r for r in m(self, list(self))]
410
412
411 def set(self, expr, *args):
413 def set(self, expr, *args):
412 '''
414 '''
413 Yield a context for each matching revision, after doing arg
415 Yield a context for each matching revision, after doing arg
414 replacement via revset.formatspec
416 replacement via revset.formatspec
415 '''
417 '''
416 for r in self.revs(expr, *args):
418 for r in self.revs(expr, *args):
417 yield self[r]
419 yield self[r]
418
420
419 def url(self):
421 def url(self):
420 return 'file:' + self.root
422 return 'file:' + self.root
421
423
422 def hook(self, name, throw=False, **args):
424 def hook(self, name, throw=False, **args):
423 return hook.hook(self.ui, self, name, throw, **args)
425 return hook.hook(self.ui, self, name, throw, **args)
424
426
425 @unfilteredmethod
427 @unfilteredmethod
426 def _tag(self, names, node, message, local, user, date, extra={}):
428 def _tag(self, names, node, message, local, user, date, extra={}):
427 if isinstance(names, str):
429 if isinstance(names, str):
428 names = (names,)
430 names = (names,)
429
431
430 branches = self.branchmap()
432 branches = self.branchmap()
431 for name in names:
433 for name in names:
432 self.hook('pretag', throw=True, node=hex(node), tag=name,
434 self.hook('pretag', throw=True, node=hex(node), tag=name,
433 local=local)
435 local=local)
434 if name in branches:
436 if name in branches:
435 self.ui.warn(_("warning: tag %s conflicts with existing"
437 self.ui.warn(_("warning: tag %s conflicts with existing"
436 " branch name\n") % name)
438 " branch name\n") % name)
437
439
438 def writetags(fp, names, munge, prevtags):
440 def writetags(fp, names, munge, prevtags):
439 fp.seek(0, 2)
441 fp.seek(0, 2)
440 if prevtags and prevtags[-1] != '\n':
442 if prevtags and prevtags[-1] != '\n':
441 fp.write('\n')
443 fp.write('\n')
442 for name in names:
444 for name in names:
443 m = munge and munge(name) or name
445 m = munge and munge(name) or name
444 if (self._tagscache.tagtypes and
446 if (self._tagscache.tagtypes and
445 name in self._tagscache.tagtypes):
447 name in self._tagscache.tagtypes):
446 old = self.tags().get(name, nullid)
448 old = self.tags().get(name, nullid)
447 fp.write('%s %s\n' % (hex(old), m))
449 fp.write('%s %s\n' % (hex(old), m))
448 fp.write('%s %s\n' % (hex(node), m))
450 fp.write('%s %s\n' % (hex(node), m))
449 fp.close()
451 fp.close()
450
452
451 prevtags = ''
453 prevtags = ''
452 if local:
454 if local:
453 try:
455 try:
454 fp = self.opener('localtags', 'r+')
456 fp = self.opener('localtags', 'r+')
455 except IOError:
457 except IOError:
456 fp = self.opener('localtags', 'a')
458 fp = self.opener('localtags', 'a')
457 else:
459 else:
458 prevtags = fp.read()
460 prevtags = fp.read()
459
461
460 # local tags are stored in the current charset
462 # local tags are stored in the current charset
461 writetags(fp, names, None, prevtags)
463 writetags(fp, names, None, prevtags)
462 for name in names:
464 for name in names:
463 self.hook('tag', node=hex(node), tag=name, local=local)
465 self.hook('tag', node=hex(node), tag=name, local=local)
464 return
466 return
465
467
466 try:
468 try:
467 fp = self.wfile('.hgtags', 'rb+')
469 fp = self.wfile('.hgtags', 'rb+')
468 except IOError, e:
470 except IOError, e:
469 if e.errno != errno.ENOENT:
471 if e.errno != errno.ENOENT:
470 raise
472 raise
471 fp = self.wfile('.hgtags', 'ab')
473 fp = self.wfile('.hgtags', 'ab')
472 else:
474 else:
473 prevtags = fp.read()
475 prevtags = fp.read()
474
476
475 # committed tags are stored in UTF-8
477 # committed tags are stored in UTF-8
476 writetags(fp, names, encoding.fromlocal, prevtags)
478 writetags(fp, names, encoding.fromlocal, prevtags)
477
479
478 fp.close()
480 fp.close()
479
481
480 self.invalidatecaches()
482 self.invalidatecaches()
481
483
482 if '.hgtags' not in self.dirstate:
484 if '.hgtags' not in self.dirstate:
483 self[None].add(['.hgtags'])
485 self[None].add(['.hgtags'])
484
486
485 m = matchmod.exact(self.root, '', ['.hgtags'])
487 m = matchmod.exact(self.root, '', ['.hgtags'])
486 tagnode = self.commit(message, user, date, extra=extra, match=m)
488 tagnode = self.commit(message, user, date, extra=extra, match=m)
487
489
488 for name in names:
490 for name in names:
489 self.hook('tag', node=hex(node), tag=name, local=local)
491 self.hook('tag', node=hex(node), tag=name, local=local)
490
492
491 return tagnode
493 return tagnode
492
494
493 def tag(self, names, node, message, local, user, date):
495 def tag(self, names, node, message, local, user, date):
494 '''tag a revision with one or more symbolic names.
496 '''tag a revision with one or more symbolic names.
495
497
496 names is a list of strings or, when adding a single tag, names may be a
498 names is a list of strings or, when adding a single tag, names may be a
497 string.
499 string.
498
500
499 if local is True, the tags are stored in a per-repository file.
501 if local is True, the tags are stored in a per-repository file.
500 otherwise, they are stored in the .hgtags file, and a new
502 otherwise, they are stored in the .hgtags file, and a new
501 changeset is committed with the change.
503 changeset is committed with the change.
502
504
503 keyword arguments:
505 keyword arguments:
504
506
505 local: whether to store tags in non-version-controlled file
507 local: whether to store tags in non-version-controlled file
506 (default False)
508 (default False)
507
509
508 message: commit message to use if committing
510 message: commit message to use if committing
509
511
510 user: name of user to use if committing
512 user: name of user to use if committing
511
513
512 date: date tuple to use if committing'''
514 date: date tuple to use if committing'''
513
515
514 if not local:
516 if not local:
515 for x in self.status()[:5]:
517 for x in self.status()[:5]:
516 if '.hgtags' in x:
518 if '.hgtags' in x:
517 raise util.Abort(_('working copy of .hgtags is changed '
519 raise util.Abort(_('working copy of .hgtags is changed '
518 '(please commit .hgtags manually)'))
520 '(please commit .hgtags manually)'))
519
521
520 self.tags() # instantiate the cache
522 self.tags() # instantiate the cache
521 self._tag(names, node, message, local, user, date)
523 self._tag(names, node, message, local, user, date)
522
524
523 @filteredpropertycache
525 @filteredpropertycache
524 def _tagscache(self):
526 def _tagscache(self):
525 '''Returns a tagscache object that contains various tags related
527 '''Returns a tagscache object that contains various tags related
526 caches.'''
528 caches.'''
527
529
528 # This simplifies its cache management by having one decorated
530 # This simplifies its cache management by having one decorated
529 # function (this one) and the rest simply fetch things from it.
531 # function (this one) and the rest simply fetch things from it.
530 class tagscache(object):
532 class tagscache(object):
531 def __init__(self):
533 def __init__(self):
532 # These two define the set of tags for this repository. tags
534 # These two define the set of tags for this repository. tags
533 # maps tag name to node; tagtypes maps tag name to 'global' or
535 # maps tag name to node; tagtypes maps tag name to 'global' or
534 # 'local'. (Global tags are defined by .hgtags across all
536 # 'local'. (Global tags are defined by .hgtags across all
535 # heads, and local tags are defined in .hg/localtags.)
537 # heads, and local tags are defined in .hg/localtags.)
536 # They constitute the in-memory cache of tags.
538 # They constitute the in-memory cache of tags.
537 self.tags = self.tagtypes = None
539 self.tags = self.tagtypes = None
538
540
539 self.nodetagscache = self.tagslist = None
541 self.nodetagscache = self.tagslist = None
540
542
541 cache = tagscache()
543 cache = tagscache()
542 cache.tags, cache.tagtypes = self._findtags()
544 cache.tags, cache.tagtypes = self._findtags()
543
545
544 return cache
546 return cache
545
547
546 def tags(self):
548 def tags(self):
547 '''return a mapping of tag to node'''
549 '''return a mapping of tag to node'''
548 t = {}
550 t = {}
549 if self.changelog.filteredrevs:
551 if self.changelog.filteredrevs:
550 tags, tt = self._findtags()
552 tags, tt = self._findtags()
551 else:
553 else:
552 tags = self._tagscache.tags
554 tags = self._tagscache.tags
553 for k, v in tags.iteritems():
555 for k, v in tags.iteritems():
554 try:
556 try:
555 # ignore tags to unknown nodes
557 # ignore tags to unknown nodes
556 self.changelog.rev(v)
558 self.changelog.rev(v)
557 t[k] = v
559 t[k] = v
558 except (error.LookupError, ValueError):
560 except (error.LookupError, ValueError):
559 pass
561 pass
560 return t
562 return t
561
563
562 def _findtags(self):
564 def _findtags(self):
563 '''Do the hard work of finding tags. Return a pair of dicts
565 '''Do the hard work of finding tags. Return a pair of dicts
564 (tags, tagtypes) where tags maps tag name to node, and tagtypes
566 (tags, tagtypes) where tags maps tag name to node, and tagtypes
565 maps tag name to a string like \'global\' or \'local\'.
567 maps tag name to a string like \'global\' or \'local\'.
566 Subclasses or extensions are free to add their own tags, but
568 Subclasses or extensions are free to add their own tags, but
567 should be aware that the returned dicts will be retained for the
569 should be aware that the returned dicts will be retained for the
568 duration of the localrepo object.'''
570 duration of the localrepo object.'''
569
571
570 # XXX what tagtype should subclasses/extensions use? Currently
572 # XXX what tagtype should subclasses/extensions use? Currently
571 # mq and bookmarks add tags, but do not set the tagtype at all.
573 # mq and bookmarks add tags, but do not set the tagtype at all.
572 # Should each extension invent its own tag type? Should there
574 # Should each extension invent its own tag type? Should there
573 # be one tagtype for all such "virtual" tags? Or is the status
575 # be one tagtype for all such "virtual" tags? Or is the status
574 # quo fine?
576 # quo fine?
575
577
576 alltags = {} # map tag name to (node, hist)
578 alltags = {} # map tag name to (node, hist)
577 tagtypes = {}
579 tagtypes = {}
578
580
579 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
581 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
580 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
582 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
581
583
582 # Build the return dicts. Have to re-encode tag names because
584 # Build the return dicts. Have to re-encode tag names because
583 # the tags module always uses UTF-8 (in order not to lose info
585 # the tags module always uses UTF-8 (in order not to lose info
584 # writing to the cache), but the rest of Mercurial wants them in
586 # writing to the cache), but the rest of Mercurial wants them in
585 # local encoding.
587 # local encoding.
586 tags = {}
588 tags = {}
587 for (name, (node, hist)) in alltags.iteritems():
589 for (name, (node, hist)) in alltags.iteritems():
588 if node != nullid:
590 if node != nullid:
589 tags[encoding.tolocal(name)] = node
591 tags[encoding.tolocal(name)] = node
590 tags['tip'] = self.changelog.tip()
592 tags['tip'] = self.changelog.tip()
591 tagtypes = dict([(encoding.tolocal(name), value)
593 tagtypes = dict([(encoding.tolocal(name), value)
592 for (name, value) in tagtypes.iteritems()])
594 for (name, value) in tagtypes.iteritems()])
593 return (tags, tagtypes)
595 return (tags, tagtypes)
594
596
595 def tagtype(self, tagname):
597 def tagtype(self, tagname):
596 '''
598 '''
597 return the type of the given tag. result can be:
599 return the type of the given tag. result can be:
598
600
599 'local' : a local tag
601 'local' : a local tag
600 'global' : a global tag
602 'global' : a global tag
601 None : tag does not exist
603 None : tag does not exist
602 '''
604 '''
603
605
604 return self._tagscache.tagtypes.get(tagname)
606 return self._tagscache.tagtypes.get(tagname)
605
607
606 def tagslist(self):
608 def tagslist(self):
607 '''return a list of tags ordered by revision'''
609 '''return a list of tags ordered by revision'''
608 if not self._tagscache.tagslist:
610 if not self._tagscache.tagslist:
609 l = []
611 l = []
610 for t, n in self.tags().iteritems():
612 for t, n in self.tags().iteritems():
611 r = self.changelog.rev(n)
613 r = self.changelog.rev(n)
612 l.append((r, t, n))
614 l.append((r, t, n))
613 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
615 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
614
616
615 return self._tagscache.tagslist
617 return self._tagscache.tagslist
616
618
617 def nodetags(self, node):
619 def nodetags(self, node):
618 '''return the tags associated with a node'''
620 '''return the tags associated with a node'''
619 if not self._tagscache.nodetagscache:
621 if not self._tagscache.nodetagscache:
620 nodetagscache = {}
622 nodetagscache = {}
621 for t, n in self._tagscache.tags.iteritems():
623 for t, n in self._tagscache.tags.iteritems():
622 nodetagscache.setdefault(n, []).append(t)
624 nodetagscache.setdefault(n, []).append(t)
623 for tags in nodetagscache.itervalues():
625 for tags in nodetagscache.itervalues():
624 tags.sort()
626 tags.sort()
625 self._tagscache.nodetagscache = nodetagscache
627 self._tagscache.nodetagscache = nodetagscache
626 return self._tagscache.nodetagscache.get(node, [])
628 return self._tagscache.nodetagscache.get(node, [])
627
629
628 def nodebookmarks(self, node):
630 def nodebookmarks(self, node):
629 marks = []
631 marks = []
630 for bookmark, n in self._bookmarks.iteritems():
632 for bookmark, n in self._bookmarks.iteritems():
631 if n == node:
633 if n == node:
632 marks.append(bookmark)
634 marks.append(bookmark)
633 return sorted(marks)
635 return sorted(marks)
634
636
635 def branchmap(self):
637 def branchmap(self):
636 '''returns a dictionary {branch: [branchheads]}'''
638 '''returns a dictionary {branch: [branchheads]}'''
637 branchmap.updatecache(self)
639 branchmap.updatecache(self)
638 return self._branchcaches[self.filtername]
640 return self._branchcaches[self.filtername]
639
641
640
642
641 def _branchtip(self, heads):
643 def _branchtip(self, heads):
642 '''return the tipmost branch head in heads'''
644 '''return the tipmost branch head in heads'''
643 tip = heads[-1]
645 tip = heads[-1]
644 for h in reversed(heads):
646 for h in reversed(heads):
645 if not self[h].closesbranch():
647 if not self[h].closesbranch():
646 tip = h
648 tip = h
647 break
649 break
648 return tip
650 return tip
649
651
650 def branchtip(self, branch):
652 def branchtip(self, branch):
651 '''return the tip node for a given branch'''
653 '''return the tip node for a given branch'''
652 if branch not in self.branchmap():
654 if branch not in self.branchmap():
653 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
655 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
654 return self._branchtip(self.branchmap()[branch])
656 return self._branchtip(self.branchmap()[branch])
655
657
656 def branchtags(self):
658 def branchtags(self):
657 '''return a dict where branch names map to the tipmost head of
659 '''return a dict where branch names map to the tipmost head of
658 the branch, open heads come before closed'''
660 the branch, open heads come before closed'''
659 bt = {}
661 bt = {}
660 for bn, heads in self.branchmap().iteritems():
662 for bn, heads in self.branchmap().iteritems():
661 bt[bn] = self._branchtip(heads)
663 bt[bn] = self._branchtip(heads)
662 return bt
664 return bt
663
665
664 def lookup(self, key):
666 def lookup(self, key):
665 return self[key].node()
667 return self[key].node()
666
668
667 def lookupbranch(self, key, remote=None):
669 def lookupbranch(self, key, remote=None):
668 repo = remote or self
670 repo = remote or self
669 if key in repo.branchmap():
671 if key in repo.branchmap():
670 return key
672 return key
671
673
672 repo = (remote and remote.local()) and remote or self
674 repo = (remote and remote.local()) and remote or self
673 return repo[key].branch()
675 return repo[key].branch()
674
676
675 def known(self, nodes):
677 def known(self, nodes):
676 nm = self.changelog.nodemap
678 nm = self.changelog.nodemap
677 pc = self._phasecache
679 pc = self._phasecache
678 result = []
680 result = []
679 for n in nodes:
681 for n in nodes:
680 r = nm.get(n)
682 r = nm.get(n)
681 resp = not (r is None or pc.phase(self, r) >= phases.secret)
683 resp = not (r is None or pc.phase(self, r) >= phases.secret)
682 result.append(resp)
684 result.append(resp)
683 return result
685 return result
684
686
685 def local(self):
687 def local(self):
686 return self
688 return self
687
689
688 def cancopy(self):
690 def cancopy(self):
689 return self.local() # so statichttprepo's override of local() works
691 return self.local() # so statichttprepo's override of local() works
690
692
691 def join(self, f):
693 def join(self, f):
692 return os.path.join(self.path, f)
694 return os.path.join(self.path, f)
693
695
694 def wjoin(self, f):
696 def wjoin(self, f):
695 return os.path.join(self.root, f)
697 return os.path.join(self.root, f)
696
698
697 def file(self, f):
699 def file(self, f):
698 if f[0] == '/':
700 if f[0] == '/':
699 f = f[1:]
701 f = f[1:]
700 return filelog.filelog(self.sopener, f)
702 return filelog.filelog(self.sopener, f)
701
703
702 def changectx(self, changeid):
704 def changectx(self, changeid):
703 return self[changeid]
705 return self[changeid]
704
706
705 def parents(self, changeid=None):
707 def parents(self, changeid=None):
706 '''get list of changectxs for parents of changeid'''
708 '''get list of changectxs for parents of changeid'''
707 return self[changeid].parents()
709 return self[changeid].parents()
708
710
709 def setparents(self, p1, p2=nullid):
711 def setparents(self, p1, p2=nullid):
710 copies = self.dirstate.setparents(p1, p2)
712 copies = self.dirstate.setparents(p1, p2)
711 pctx = self[p1]
713 pctx = self[p1]
712 if copies:
714 if copies:
713 # Adjust copy records, the dirstate cannot do it, it
715 # Adjust copy records, the dirstate cannot do it, it
714 # requires access to parents manifests. Preserve them
716 # requires access to parents manifests. Preserve them
715 # only for entries added to first parent.
717 # only for entries added to first parent.
716 for f in copies:
718 for f in copies:
717 if f not in pctx and copies[f] in pctx:
719 if f not in pctx and copies[f] in pctx:
718 self.dirstate.copy(copies[f], f)
720 self.dirstate.copy(copies[f], f)
719 if p2 == nullid:
721 if p2 == nullid:
720 for f, s in sorted(self.dirstate.copies().items()):
722 for f, s in sorted(self.dirstate.copies().items()):
721 if f not in pctx and s not in pctx:
723 if f not in pctx and s not in pctx:
722 self.dirstate.copy(None, f)
724 self.dirstate.copy(None, f)
723
725
724 def filectx(self, path, changeid=None, fileid=None):
726 def filectx(self, path, changeid=None, fileid=None):
725 """changeid can be a changeset revision, node, or tag.
727 """changeid can be a changeset revision, node, or tag.
726 fileid can be a file revision or node."""
728 fileid can be a file revision or node."""
727 return context.filectx(self, path, changeid, fileid)
729 return context.filectx(self, path, changeid, fileid)
728
730
729 def getcwd(self):
731 def getcwd(self):
730 return self.dirstate.getcwd()
732 return self.dirstate.getcwd()
731
733
732 def pathto(self, f, cwd=None):
734 def pathto(self, f, cwd=None):
733 return self.dirstate.pathto(f, cwd)
735 return self.dirstate.pathto(f, cwd)
734
736
735 def wfile(self, f, mode='r'):
737 def wfile(self, f, mode='r'):
736 return self.wopener(f, mode)
738 return self.wopener(f, mode)
737
739
738 def _link(self, f):
740 def _link(self, f):
739 return self.wvfs.islink(f)
741 return self.wvfs.islink(f)
740
742
741 def _loadfilter(self, filter):
743 def _loadfilter(self, filter):
742 if filter not in self.filterpats:
744 if filter not in self.filterpats:
743 l = []
745 l = []
744 for pat, cmd in self.ui.configitems(filter):
746 for pat, cmd in self.ui.configitems(filter):
745 if cmd == '!':
747 if cmd == '!':
746 continue
748 continue
747 mf = matchmod.match(self.root, '', [pat])
749 mf = matchmod.match(self.root, '', [pat])
748 fn = None
750 fn = None
749 params = cmd
751 params = cmd
750 for name, filterfn in self._datafilters.iteritems():
752 for name, filterfn in self._datafilters.iteritems():
751 if cmd.startswith(name):
753 if cmd.startswith(name):
752 fn = filterfn
754 fn = filterfn
753 params = cmd[len(name):].lstrip()
755 params = cmd[len(name):].lstrip()
754 break
756 break
755 if not fn:
757 if not fn:
756 fn = lambda s, c, **kwargs: util.filter(s, c)
758 fn = lambda s, c, **kwargs: util.filter(s, c)
757 # Wrap old filters not supporting keyword arguments
759 # Wrap old filters not supporting keyword arguments
758 if not inspect.getargspec(fn)[2]:
760 if not inspect.getargspec(fn)[2]:
759 oldfn = fn
761 oldfn = fn
760 fn = lambda s, c, **kwargs: oldfn(s, c)
762 fn = lambda s, c, **kwargs: oldfn(s, c)
761 l.append((mf, fn, params))
763 l.append((mf, fn, params))
762 self.filterpats[filter] = l
764 self.filterpats[filter] = l
763 return self.filterpats[filter]
765 return self.filterpats[filter]
764
766
765 def _filter(self, filterpats, filename, data):
767 def _filter(self, filterpats, filename, data):
766 for mf, fn, cmd in filterpats:
768 for mf, fn, cmd in filterpats:
767 if mf(filename):
769 if mf(filename):
768 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
770 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
769 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
771 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
770 break
772 break
771
773
772 return data
774 return data
773
775
774 @unfilteredpropertycache
776 @unfilteredpropertycache
775 def _encodefilterpats(self):
777 def _encodefilterpats(self):
776 return self._loadfilter('encode')
778 return self._loadfilter('encode')
777
779
778 @unfilteredpropertycache
780 @unfilteredpropertycache
779 def _decodefilterpats(self):
781 def _decodefilterpats(self):
780 return self._loadfilter('decode')
782 return self._loadfilter('decode')
781
783
782 def adddatafilter(self, name, filter):
784 def adddatafilter(self, name, filter):
783 self._datafilters[name] = filter
785 self._datafilters[name] = filter
784
786
785 def wread(self, filename):
787 def wread(self, filename):
786 if self._link(filename):
788 if self._link(filename):
787 data = self.wvfs.readlink(filename)
789 data = self.wvfs.readlink(filename)
788 else:
790 else:
789 data = self.wopener.read(filename)
791 data = self.wopener.read(filename)
790 return self._filter(self._encodefilterpats, filename, data)
792 return self._filter(self._encodefilterpats, filename, data)
791
793
792 def wwrite(self, filename, data, flags):
794 def wwrite(self, filename, data, flags):
793 data = self._filter(self._decodefilterpats, filename, data)
795 data = self._filter(self._decodefilterpats, filename, data)
794 if 'l' in flags:
796 if 'l' in flags:
795 self.wopener.symlink(data, filename)
797 self.wopener.symlink(data, filename)
796 else:
798 else:
797 self.wopener.write(filename, data)
799 self.wopener.write(filename, data)
798 if 'x' in flags:
800 if 'x' in flags:
799 self.wvfs.setflags(filename, False, True)
801 self.wvfs.setflags(filename, False, True)
800
802
801 def wwritedata(self, filename, data):
803 def wwritedata(self, filename, data):
802 return self._filter(self._decodefilterpats, filename, data)
804 return self._filter(self._decodefilterpats, filename, data)
803
805
804 def transaction(self, desc):
806 def transaction(self, desc):
805 tr = self._transref and self._transref() or None
807 tr = self._transref and self._transref() or None
806 if tr and tr.running():
808 if tr and tr.running():
807 return tr.nest()
809 return tr.nest()
808
810
809 # abort here if the journal already exists
811 # abort here if the journal already exists
810 if self.svfs.exists("journal"):
812 if self.svfs.exists("journal"):
811 raise error.RepoError(
813 raise error.RepoError(
812 _("abandoned transaction found - run hg recover"))
814 _("abandoned transaction found - run hg recover"))
813
815
814 self._writejournal(desc)
816 self._writejournal(desc)
815 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
817 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
816
818
817 tr = transaction.transaction(self.ui.warn, self.sopener,
819 tr = transaction.transaction(self.ui.warn, self.sopener,
818 self.sjoin("journal"),
820 self.sjoin("journal"),
819 aftertrans(renames),
821 aftertrans(renames),
820 self.store.createmode)
822 self.store.createmode)
821 self._transref = weakref.ref(tr)
823 self._transref = weakref.ref(tr)
822 return tr
824 return tr
823
825
824 def _journalfiles(self):
826 def _journalfiles(self):
825 return ((self.svfs, 'journal'),
827 return ((self.svfs, 'journal'),
826 (self.vfs, 'journal.dirstate'),
828 (self.vfs, 'journal.dirstate'),
827 (self.vfs, 'journal.branch'),
829 (self.vfs, 'journal.branch'),
828 (self.vfs, 'journal.desc'),
830 (self.vfs, 'journal.desc'),
829 (self.vfs, 'journal.bookmarks'),
831 (self.vfs, 'journal.bookmarks'),
830 (self.svfs, 'journal.phaseroots'))
832 (self.svfs, 'journal.phaseroots'))
831
833
832 def undofiles(self):
834 def undofiles(self):
833 return [vfs.join(undoname(x)) for vfs, x in self._journalfiles()]
835 return [vfs.join(undoname(x)) for vfs, x in self._journalfiles()]
834
836
835 def _writejournal(self, desc):
837 def _writejournal(self, desc):
836 self.opener.write("journal.dirstate",
838 self.opener.write("journal.dirstate",
837 self.opener.tryread("dirstate"))
839 self.opener.tryread("dirstate"))
838 self.opener.write("journal.branch",
840 self.opener.write("journal.branch",
839 encoding.fromlocal(self.dirstate.branch()))
841 encoding.fromlocal(self.dirstate.branch()))
840 self.opener.write("journal.desc",
842 self.opener.write("journal.desc",
841 "%d\n%s\n" % (len(self), desc))
843 "%d\n%s\n" % (len(self), desc))
842 self.opener.write("journal.bookmarks",
844 self.opener.write("journal.bookmarks",
843 self.opener.tryread("bookmarks"))
845 self.opener.tryread("bookmarks"))
844 self.sopener.write("journal.phaseroots",
846 self.sopener.write("journal.phaseroots",
845 self.sopener.tryread("phaseroots"))
847 self.sopener.tryread("phaseroots"))
846
848
847 def recover(self):
849 def recover(self):
848 lock = self.lock()
850 lock = self.lock()
849 try:
851 try:
850 if self.svfs.exists("journal"):
852 if self.svfs.exists("journal"):
851 self.ui.status(_("rolling back interrupted transaction\n"))
853 self.ui.status(_("rolling back interrupted transaction\n"))
852 transaction.rollback(self.sopener, self.sjoin("journal"),
854 transaction.rollback(self.sopener, self.sjoin("journal"),
853 self.ui.warn)
855 self.ui.warn)
854 self.invalidate()
856 self.invalidate()
855 return True
857 return True
856 else:
858 else:
857 self.ui.warn(_("no interrupted transaction available\n"))
859 self.ui.warn(_("no interrupted transaction available\n"))
858 return False
860 return False
859 finally:
861 finally:
860 lock.release()
862 lock.release()
861
863
862 def rollback(self, dryrun=False, force=False):
864 def rollback(self, dryrun=False, force=False):
863 wlock = lock = None
865 wlock = lock = None
864 try:
866 try:
865 wlock = self.wlock()
867 wlock = self.wlock()
866 lock = self.lock()
868 lock = self.lock()
867 if self.svfs.exists("undo"):
869 if self.svfs.exists("undo"):
868 return self._rollback(dryrun, force)
870 return self._rollback(dryrun, force)
869 else:
871 else:
870 self.ui.warn(_("no rollback information available\n"))
872 self.ui.warn(_("no rollback information available\n"))
871 return 1
873 return 1
872 finally:
874 finally:
873 release(lock, wlock)
875 release(lock, wlock)
874
876
875 @unfilteredmethod # Until we get smarter cache management
877 @unfilteredmethod # Until we get smarter cache management
876 def _rollback(self, dryrun, force):
878 def _rollback(self, dryrun, force):
877 ui = self.ui
879 ui = self.ui
878 try:
880 try:
879 args = self.opener.read('undo.desc').splitlines()
881 args = self.opener.read('undo.desc').splitlines()
880 (oldlen, desc, detail) = (int(args[0]), args[1], None)
882 (oldlen, desc, detail) = (int(args[0]), args[1], None)
881 if len(args) >= 3:
883 if len(args) >= 3:
882 detail = args[2]
884 detail = args[2]
883 oldtip = oldlen - 1
885 oldtip = oldlen - 1
884
886
885 if detail and ui.verbose:
887 if detail and ui.verbose:
886 msg = (_('repository tip rolled back to revision %s'
888 msg = (_('repository tip rolled back to revision %s'
887 ' (undo %s: %s)\n')
889 ' (undo %s: %s)\n')
888 % (oldtip, desc, detail))
890 % (oldtip, desc, detail))
889 else:
891 else:
890 msg = (_('repository tip rolled back to revision %s'
892 msg = (_('repository tip rolled back to revision %s'
891 ' (undo %s)\n')
893 ' (undo %s)\n')
892 % (oldtip, desc))
894 % (oldtip, desc))
893 except IOError:
895 except IOError:
894 msg = _('rolling back unknown transaction\n')
896 msg = _('rolling back unknown transaction\n')
895 desc = None
897 desc = None
896
898
897 if not force and self['.'] != self['tip'] and desc == 'commit':
899 if not force and self['.'] != self['tip'] and desc == 'commit':
898 raise util.Abort(
900 raise util.Abort(
899 _('rollback of last commit while not checked out '
901 _('rollback of last commit while not checked out '
900 'may lose data'), hint=_('use -f to force'))
902 'may lose data'), hint=_('use -f to force'))
901
903
902 ui.status(msg)
904 ui.status(msg)
903 if dryrun:
905 if dryrun:
904 return 0
906 return 0
905
907
906 parents = self.dirstate.parents()
908 parents = self.dirstate.parents()
907 self.destroying()
909 self.destroying()
908 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
910 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
909 if self.vfs.exists('undo.bookmarks'):
911 if self.vfs.exists('undo.bookmarks'):
910 self.vfs.rename('undo.bookmarks', 'bookmarks')
912 self.vfs.rename('undo.bookmarks', 'bookmarks')
911 if self.svfs.exists('undo.phaseroots'):
913 if self.svfs.exists('undo.phaseroots'):
912 self.svfs.rename('undo.phaseroots', 'phaseroots')
914 self.svfs.rename('undo.phaseroots', 'phaseroots')
913 self.invalidate()
915 self.invalidate()
914
916
915 parentgone = (parents[0] not in self.changelog.nodemap or
917 parentgone = (parents[0] not in self.changelog.nodemap or
916 parents[1] not in self.changelog.nodemap)
918 parents[1] not in self.changelog.nodemap)
917 if parentgone:
919 if parentgone:
918 self.vfs.rename('undo.dirstate', 'dirstate')
920 self.vfs.rename('undo.dirstate', 'dirstate')
919 try:
921 try:
920 branch = self.opener.read('undo.branch')
922 branch = self.opener.read('undo.branch')
921 self.dirstate.setbranch(encoding.tolocal(branch))
923 self.dirstate.setbranch(encoding.tolocal(branch))
922 except IOError:
924 except IOError:
923 ui.warn(_('named branch could not be reset: '
925 ui.warn(_('named branch could not be reset: '
924 'current branch is still \'%s\'\n')
926 'current branch is still \'%s\'\n')
925 % self.dirstate.branch())
927 % self.dirstate.branch())
926
928
927 self.dirstate.invalidate()
929 self.dirstate.invalidate()
928 parents = tuple([p.rev() for p in self.parents()])
930 parents = tuple([p.rev() for p in self.parents()])
929 if len(parents) > 1:
931 if len(parents) > 1:
930 ui.status(_('working directory now based on '
932 ui.status(_('working directory now based on '
931 'revisions %d and %d\n') % parents)
933 'revisions %d and %d\n') % parents)
932 else:
934 else:
933 ui.status(_('working directory now based on '
935 ui.status(_('working directory now based on '
934 'revision %d\n') % parents)
936 'revision %d\n') % parents)
935 # TODO: if we know which new heads may result from this rollback, pass
937 # TODO: if we know which new heads may result from this rollback, pass
936 # them to destroy(), which will prevent the branchhead cache from being
938 # them to destroy(), which will prevent the branchhead cache from being
937 # invalidated.
939 # invalidated.
938 self.destroyed()
940 self.destroyed()
939 return 0
941 return 0
940
942
941 def invalidatecaches(self):
943 def invalidatecaches(self):
942
944
943 if '_tagscache' in vars(self):
945 if '_tagscache' in vars(self):
944 # can't use delattr on proxy
946 # can't use delattr on proxy
945 del self.__dict__['_tagscache']
947 del self.__dict__['_tagscache']
946
948
947 self.unfiltered()._branchcaches.clear()
949 self.unfiltered()._branchcaches.clear()
948 self.invalidatevolatilesets()
950 self.invalidatevolatilesets()
949
951
950 def invalidatevolatilesets(self):
952 def invalidatevolatilesets(self):
951 self.filteredrevcache.clear()
953 self.filteredrevcache.clear()
952 obsolete.clearobscaches(self)
954 obsolete.clearobscaches(self)
953
955
954 def invalidatedirstate(self):
956 def invalidatedirstate(self):
955 '''Invalidates the dirstate, causing the next call to dirstate
957 '''Invalidates the dirstate, causing the next call to dirstate
956 to check if it was modified since the last time it was read,
958 to check if it was modified since the last time it was read,
957 rereading it if it has.
959 rereading it if it has.
958
960
959 This is different to dirstate.invalidate() that it doesn't always
961 This is different to dirstate.invalidate() that it doesn't always
960 rereads the dirstate. Use dirstate.invalidate() if you want to
962 rereads the dirstate. Use dirstate.invalidate() if you want to
961 explicitly read the dirstate again (i.e. restoring it to a previous
963 explicitly read the dirstate again (i.e. restoring it to a previous
962 known good state).'''
964 known good state).'''
963 if hasunfilteredcache(self, 'dirstate'):
965 if hasunfilteredcache(self, 'dirstate'):
964 for k in self.dirstate._filecache:
966 for k in self.dirstate._filecache:
965 try:
967 try:
966 delattr(self.dirstate, k)
968 delattr(self.dirstate, k)
967 except AttributeError:
969 except AttributeError:
968 pass
970 pass
969 delattr(self.unfiltered(), 'dirstate')
971 delattr(self.unfiltered(), 'dirstate')
970
972
971 def invalidate(self):
973 def invalidate(self):
972 unfiltered = self.unfiltered() # all file caches are stored unfiltered
974 unfiltered = self.unfiltered() # all file caches are stored unfiltered
973 for k in self._filecache:
975 for k in self._filecache:
974 # dirstate is invalidated separately in invalidatedirstate()
976 # dirstate is invalidated separately in invalidatedirstate()
975 if k == 'dirstate':
977 if k == 'dirstate':
976 continue
978 continue
977
979
978 try:
980 try:
979 delattr(unfiltered, k)
981 delattr(unfiltered, k)
980 except AttributeError:
982 except AttributeError:
981 pass
983 pass
982 self.invalidatecaches()
984 self.invalidatecaches()
983
985
984 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
986 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
985 try:
987 try:
986 l = lock.lock(lockname, 0, releasefn, desc=desc)
988 l = lock.lock(lockname, 0, releasefn, desc=desc)
987 except error.LockHeld, inst:
989 except error.LockHeld, inst:
988 if not wait:
990 if not wait:
989 raise
991 raise
990 self.ui.warn(_("waiting for lock on %s held by %r\n") %
992 self.ui.warn(_("waiting for lock on %s held by %r\n") %
991 (desc, inst.locker))
993 (desc, inst.locker))
992 # default to 600 seconds timeout
994 # default to 600 seconds timeout
993 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
995 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
994 releasefn, desc=desc)
996 releasefn, desc=desc)
995 if acquirefn:
997 if acquirefn:
996 acquirefn()
998 acquirefn()
997 return l
999 return l
998
1000
999 def _afterlock(self, callback):
1001 def _afterlock(self, callback):
1000 """add a callback to the current repository lock.
1002 """add a callback to the current repository lock.
1001
1003
1002 The callback will be executed on lock release."""
1004 The callback will be executed on lock release."""
1003 l = self._lockref and self._lockref()
1005 l = self._lockref and self._lockref()
1004 if l:
1006 if l:
1005 l.postrelease.append(callback)
1007 l.postrelease.append(callback)
1006 else:
1008 else:
1007 callback()
1009 callback()
1008
1010
1009 def lock(self, wait=True):
1011 def lock(self, wait=True):
1010 '''Lock the repository store (.hg/store) and return a weak reference
1012 '''Lock the repository store (.hg/store) and return a weak reference
1011 to the lock. Use this before modifying the store (e.g. committing or
1013 to the lock. Use this before modifying the store (e.g. committing or
1012 stripping). If you are opening a transaction, get a lock as well.)'''
1014 stripping). If you are opening a transaction, get a lock as well.)'''
1013 l = self._lockref and self._lockref()
1015 l = self._lockref and self._lockref()
1014 if l is not None and l.held:
1016 if l is not None and l.held:
1015 l.lock()
1017 l.lock()
1016 return l
1018 return l
1017
1019
1018 def unlock():
1020 def unlock():
1019 self.store.write()
1021 self.store.write()
1020 if hasunfilteredcache(self, '_phasecache'):
1022 if hasunfilteredcache(self, '_phasecache'):
1021 self._phasecache.write()
1023 self._phasecache.write()
1022 for k, ce in self._filecache.items():
1024 for k, ce in self._filecache.items():
1023 if k == 'dirstate' or k not in self.__dict__:
1025 if k == 'dirstate' or k not in self.__dict__:
1024 continue
1026 continue
1025 ce.refresh()
1027 ce.refresh()
1026
1028
1027 l = self._lock(self.sjoin("lock"), wait, unlock,
1029 l = self._lock(self.sjoin("lock"), wait, unlock,
1028 self.invalidate, _('repository %s') % self.origroot)
1030 self.invalidate, _('repository %s') % self.origroot)
1029 self._lockref = weakref.ref(l)
1031 self._lockref = weakref.ref(l)
1030 return l
1032 return l
1031
1033
1032 def wlock(self, wait=True):
1034 def wlock(self, wait=True):
1033 '''Lock the non-store parts of the repository (everything under
1035 '''Lock the non-store parts of the repository (everything under
1034 .hg except .hg/store) and return a weak reference to the lock.
1036 .hg except .hg/store) and return a weak reference to the lock.
1035 Use this before modifying files in .hg.'''
1037 Use this before modifying files in .hg.'''
1036 l = self._wlockref and self._wlockref()
1038 l = self._wlockref and self._wlockref()
1037 if l is not None and l.held:
1039 if l is not None and l.held:
1038 l.lock()
1040 l.lock()
1039 return l
1041 return l
1040
1042
1041 def unlock():
1043 def unlock():
1042 self.dirstate.write()
1044 self.dirstate.write()
1043 self._filecache['dirstate'].refresh()
1045 self._filecache['dirstate'].refresh()
1044
1046
1045 l = self._lock(self.join("wlock"), wait, unlock,
1047 l = self._lock(self.join("wlock"), wait, unlock,
1046 self.invalidatedirstate, _('working directory of %s') %
1048 self.invalidatedirstate, _('working directory of %s') %
1047 self.origroot)
1049 self.origroot)
1048 self._wlockref = weakref.ref(l)
1050 self._wlockref = weakref.ref(l)
1049 return l
1051 return l
1050
1052
1051 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1053 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1052 """
1054 """
1053 commit an individual file as part of a larger transaction
1055 commit an individual file as part of a larger transaction
1054 """
1056 """
1055
1057
1056 fname = fctx.path()
1058 fname = fctx.path()
1057 text = fctx.data()
1059 text = fctx.data()
1058 flog = self.file(fname)
1060 flog = self.file(fname)
1059 fparent1 = manifest1.get(fname, nullid)
1061 fparent1 = manifest1.get(fname, nullid)
1060 fparent2 = fparent2o = manifest2.get(fname, nullid)
1062 fparent2 = fparent2o = manifest2.get(fname, nullid)
1061
1063
1062 meta = {}
1064 meta = {}
1063 copy = fctx.renamed()
1065 copy = fctx.renamed()
1064 if copy and copy[0] != fname:
1066 if copy and copy[0] != fname:
1065 # Mark the new revision of this file as a copy of another
1067 # Mark the new revision of this file as a copy of another
1066 # file. This copy data will effectively act as a parent
1068 # file. This copy data will effectively act as a parent
1067 # of this new revision. If this is a merge, the first
1069 # of this new revision. If this is a merge, the first
1068 # parent will be the nullid (meaning "look up the copy data")
1070 # parent will be the nullid (meaning "look up the copy data")
1069 # and the second one will be the other parent. For example:
1071 # and the second one will be the other parent. For example:
1070 #
1072 #
1071 # 0 --- 1 --- 3 rev1 changes file foo
1073 # 0 --- 1 --- 3 rev1 changes file foo
1072 # \ / rev2 renames foo to bar and changes it
1074 # \ / rev2 renames foo to bar and changes it
1073 # \- 2 -/ rev3 should have bar with all changes and
1075 # \- 2 -/ rev3 should have bar with all changes and
1074 # should record that bar descends from
1076 # should record that bar descends from
1075 # bar in rev2 and foo in rev1
1077 # bar in rev2 and foo in rev1
1076 #
1078 #
1077 # this allows this merge to succeed:
1079 # this allows this merge to succeed:
1078 #
1080 #
1079 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1081 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1080 # \ / merging rev3 and rev4 should use bar@rev2
1082 # \ / merging rev3 and rev4 should use bar@rev2
1081 # \- 2 --- 4 as the merge base
1083 # \- 2 --- 4 as the merge base
1082 #
1084 #
1083
1085
1084 cfname = copy[0]
1086 cfname = copy[0]
1085 crev = manifest1.get(cfname)
1087 crev = manifest1.get(cfname)
1086 newfparent = fparent2
1088 newfparent = fparent2
1087
1089
1088 if manifest2: # branch merge
1090 if manifest2: # branch merge
1089 if fparent2 == nullid or crev is None: # copied on remote side
1091 if fparent2 == nullid or crev is None: # copied on remote side
1090 if cfname in manifest2:
1092 if cfname in manifest2:
1091 crev = manifest2[cfname]
1093 crev = manifest2[cfname]
1092 newfparent = fparent1
1094 newfparent = fparent1
1093
1095
1094 # find source in nearest ancestor if we've lost track
1096 # find source in nearest ancestor if we've lost track
1095 if not crev:
1097 if not crev:
1096 self.ui.debug(" %s: searching for copy revision for %s\n" %
1098 self.ui.debug(" %s: searching for copy revision for %s\n" %
1097 (fname, cfname))
1099 (fname, cfname))
1098 for ancestor in self[None].ancestors():
1100 for ancestor in self[None].ancestors():
1099 if cfname in ancestor:
1101 if cfname in ancestor:
1100 crev = ancestor[cfname].filenode()
1102 crev = ancestor[cfname].filenode()
1101 break
1103 break
1102
1104
1103 if crev:
1105 if crev:
1104 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1106 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1105 meta["copy"] = cfname
1107 meta["copy"] = cfname
1106 meta["copyrev"] = hex(crev)
1108 meta["copyrev"] = hex(crev)
1107 fparent1, fparent2 = nullid, newfparent
1109 fparent1, fparent2 = nullid, newfparent
1108 else:
1110 else:
1109 self.ui.warn(_("warning: can't find ancestor for '%s' "
1111 self.ui.warn(_("warning: can't find ancestor for '%s' "
1110 "copied from '%s'!\n") % (fname, cfname))
1112 "copied from '%s'!\n") % (fname, cfname))
1111
1113
1112 elif fparent2 != nullid:
1114 elif fparent2 != nullid:
1113 # is one parent an ancestor of the other?
1115 # is one parent an ancestor of the other?
1114 fparentancestor = flog.ancestor(fparent1, fparent2)
1116 fparentancestor = flog.ancestor(fparent1, fparent2)
1115 if fparentancestor == fparent1:
1117 if fparentancestor == fparent1:
1116 fparent1, fparent2 = fparent2, nullid
1118 fparent1, fparent2 = fparent2, nullid
1117 elif fparentancestor == fparent2:
1119 elif fparentancestor == fparent2:
1118 fparent2 = nullid
1120 fparent2 = nullid
1119
1121
1120 # is the file changed?
1122 # is the file changed?
1121 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1123 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1122 changelist.append(fname)
1124 changelist.append(fname)
1123 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1125 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1124
1126
1125 # are just the flags changed during merge?
1127 # are just the flags changed during merge?
1126 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1128 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1127 changelist.append(fname)
1129 changelist.append(fname)
1128
1130
1129 return fparent1
1131 return fparent1
1130
1132
1131 @unfilteredmethod
1133 @unfilteredmethod
1132 def commit(self, text="", user=None, date=None, match=None, force=False,
1134 def commit(self, text="", user=None, date=None, match=None, force=False,
1133 editor=False, extra={}):
1135 editor=False, extra={}):
1134 """Add a new revision to current repository.
1136 """Add a new revision to current repository.
1135
1137
1136 Revision information is gathered from the working directory,
1138 Revision information is gathered from the working directory,
1137 match can be used to filter the committed files. If editor is
1139 match can be used to filter the committed files. If editor is
1138 supplied, it is called to get a commit message.
1140 supplied, it is called to get a commit message.
1139 """
1141 """
1140
1142
1141 def fail(f, msg):
1143 def fail(f, msg):
1142 raise util.Abort('%s: %s' % (f, msg))
1144 raise util.Abort('%s: %s' % (f, msg))
1143
1145
1144 if not match:
1146 if not match:
1145 match = matchmod.always(self.root, '')
1147 match = matchmod.always(self.root, '')
1146
1148
1147 if not force:
1149 if not force:
1148 vdirs = []
1150 vdirs = []
1149 match.explicitdir = vdirs.append
1151 match.explicitdir = vdirs.append
1150 match.bad = fail
1152 match.bad = fail
1151
1153
1152 wlock = self.wlock()
1154 wlock = self.wlock()
1153 try:
1155 try:
1154 wctx = self[None]
1156 wctx = self[None]
1155 merge = len(wctx.parents()) > 1
1157 merge = len(wctx.parents()) > 1
1156
1158
1157 if (not force and merge and match and
1159 if (not force and merge and match and
1158 (match.files() or match.anypats())):
1160 (match.files() or match.anypats())):
1159 raise util.Abort(_('cannot partially commit a merge '
1161 raise util.Abort(_('cannot partially commit a merge '
1160 '(do not specify files or patterns)'))
1162 '(do not specify files or patterns)'))
1161
1163
1162 changes = self.status(match=match, clean=force)
1164 changes = self.status(match=match, clean=force)
1163 if force:
1165 if force:
1164 changes[0].extend(changes[6]) # mq may commit unchanged files
1166 changes[0].extend(changes[6]) # mq may commit unchanged files
1165
1167
1166 # check subrepos
1168 # check subrepos
1167 subs = []
1169 subs = []
1168 commitsubs = set()
1170 commitsubs = set()
1169 newstate = wctx.substate.copy()
1171 newstate = wctx.substate.copy()
1170 # only manage subrepos and .hgsubstate if .hgsub is present
1172 # only manage subrepos and .hgsubstate if .hgsub is present
1171 if '.hgsub' in wctx:
1173 if '.hgsub' in wctx:
1172 # we'll decide whether to track this ourselves, thanks
1174 # we'll decide whether to track this ourselves, thanks
1173 if '.hgsubstate' in changes[0]:
1175 if '.hgsubstate' in changes[0]:
1174 changes[0].remove('.hgsubstate')
1176 changes[0].remove('.hgsubstate')
1175 if '.hgsubstate' in changes[2]:
1177 if '.hgsubstate' in changes[2]:
1176 changes[2].remove('.hgsubstate')
1178 changes[2].remove('.hgsubstate')
1177
1179
1178 # compare current state to last committed state
1180 # compare current state to last committed state
1179 # build new substate based on last committed state
1181 # build new substate based on last committed state
1180 oldstate = wctx.p1().substate
1182 oldstate = wctx.p1().substate
1181 for s in sorted(newstate.keys()):
1183 for s in sorted(newstate.keys()):
1182 if not match(s):
1184 if not match(s):
1183 # ignore working copy, use old state if present
1185 # ignore working copy, use old state if present
1184 if s in oldstate:
1186 if s in oldstate:
1185 newstate[s] = oldstate[s]
1187 newstate[s] = oldstate[s]
1186 continue
1188 continue
1187 if not force:
1189 if not force:
1188 raise util.Abort(
1190 raise util.Abort(
1189 _("commit with new subrepo %s excluded") % s)
1191 _("commit with new subrepo %s excluded") % s)
1190 if wctx.sub(s).dirty(True):
1192 if wctx.sub(s).dirty(True):
1191 if not self.ui.configbool('ui', 'commitsubrepos'):
1193 if not self.ui.configbool('ui', 'commitsubrepos'):
1192 raise util.Abort(
1194 raise util.Abort(
1193 _("uncommitted changes in subrepo %s") % s,
1195 _("uncommitted changes in subrepo %s") % s,
1194 hint=_("use --subrepos for recursive commit"))
1196 hint=_("use --subrepos for recursive commit"))
1195 subs.append(s)
1197 subs.append(s)
1196 commitsubs.add(s)
1198 commitsubs.add(s)
1197 else:
1199 else:
1198 bs = wctx.sub(s).basestate()
1200 bs = wctx.sub(s).basestate()
1199 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1201 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1200 if oldstate.get(s, (None, None, None))[1] != bs:
1202 if oldstate.get(s, (None, None, None))[1] != bs:
1201 subs.append(s)
1203 subs.append(s)
1202
1204
1203 # check for removed subrepos
1205 # check for removed subrepos
1204 for p in wctx.parents():
1206 for p in wctx.parents():
1205 r = [s for s in p.substate if s not in newstate]
1207 r = [s for s in p.substate if s not in newstate]
1206 subs += [s for s in r if match(s)]
1208 subs += [s for s in r if match(s)]
1207 if subs:
1209 if subs:
1208 if (not match('.hgsub') and
1210 if (not match('.hgsub') and
1209 '.hgsub' in (wctx.modified() + wctx.added())):
1211 '.hgsub' in (wctx.modified() + wctx.added())):
1210 raise util.Abort(
1212 raise util.Abort(
1211 _("can't commit subrepos without .hgsub"))
1213 _("can't commit subrepos without .hgsub"))
1212 changes[0].insert(0, '.hgsubstate')
1214 changes[0].insert(0, '.hgsubstate')
1213
1215
1214 elif '.hgsub' in changes[2]:
1216 elif '.hgsub' in changes[2]:
1215 # clean up .hgsubstate when .hgsub is removed
1217 # clean up .hgsubstate when .hgsub is removed
1216 if ('.hgsubstate' in wctx and
1218 if ('.hgsubstate' in wctx and
1217 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1219 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1218 changes[2].insert(0, '.hgsubstate')
1220 changes[2].insert(0, '.hgsubstate')
1219
1221
1220 # make sure all explicit patterns are matched
1222 # make sure all explicit patterns are matched
1221 if not force and match.files():
1223 if not force and match.files():
1222 matched = set(changes[0] + changes[1] + changes[2])
1224 matched = set(changes[0] + changes[1] + changes[2])
1223
1225
1224 for f in match.files():
1226 for f in match.files():
1225 f = self.dirstate.normalize(f)
1227 f = self.dirstate.normalize(f)
1226 if f == '.' or f in matched or f in wctx.substate:
1228 if f == '.' or f in matched or f in wctx.substate:
1227 continue
1229 continue
1228 if f in changes[3]: # missing
1230 if f in changes[3]: # missing
1229 fail(f, _('file not found!'))
1231 fail(f, _('file not found!'))
1230 if f in vdirs: # visited directory
1232 if f in vdirs: # visited directory
1231 d = f + '/'
1233 d = f + '/'
1232 for mf in matched:
1234 for mf in matched:
1233 if mf.startswith(d):
1235 if mf.startswith(d):
1234 break
1236 break
1235 else:
1237 else:
1236 fail(f, _("no match under directory!"))
1238 fail(f, _("no match under directory!"))
1237 elif f not in self.dirstate:
1239 elif f not in self.dirstate:
1238 fail(f, _("file not tracked!"))
1240 fail(f, _("file not tracked!"))
1239
1241
1240 cctx = context.workingctx(self, text, user, date, extra, changes)
1242 cctx = context.workingctx(self, text, user, date, extra, changes)
1241
1243
1242 if (not force and not extra.get("close") and not merge
1244 if (not force and not extra.get("close") and not merge
1243 and not cctx.files()
1245 and not cctx.files()
1244 and wctx.branch() == wctx.p1().branch()):
1246 and wctx.branch() == wctx.p1().branch()):
1245 return None
1247 return None
1246
1248
1247 if merge and cctx.deleted():
1249 if merge and cctx.deleted():
1248 raise util.Abort(_("cannot commit merge with missing files"))
1250 raise util.Abort(_("cannot commit merge with missing files"))
1249
1251
1250 ms = mergemod.mergestate(self)
1252 ms = mergemod.mergestate(self)
1251 for f in changes[0]:
1253 for f in changes[0]:
1252 if f in ms and ms[f] == 'u':
1254 if f in ms and ms[f] == 'u':
1253 raise util.Abort(_("unresolved merge conflicts "
1255 raise util.Abort(_("unresolved merge conflicts "
1254 "(see hg help resolve)"))
1256 "(see hg help resolve)"))
1255
1257
1256 if editor:
1258 if editor:
1257 cctx._text = editor(self, cctx, subs)
1259 cctx._text = editor(self, cctx, subs)
1258 edited = (text != cctx._text)
1260 edited = (text != cctx._text)
1259
1261
1260 # commit subs and write new state
1262 # commit subs and write new state
1261 if subs:
1263 if subs:
1262 for s in sorted(commitsubs):
1264 for s in sorted(commitsubs):
1263 sub = wctx.sub(s)
1265 sub = wctx.sub(s)
1264 self.ui.status(_('committing subrepository %s\n') %
1266 self.ui.status(_('committing subrepository %s\n') %
1265 subrepo.subrelpath(sub))
1267 subrepo.subrelpath(sub))
1266 sr = sub.commit(cctx._text, user, date)
1268 sr = sub.commit(cctx._text, user, date)
1267 newstate[s] = (newstate[s][0], sr)
1269 newstate[s] = (newstate[s][0], sr)
1268 subrepo.writestate(self, newstate)
1270 subrepo.writestate(self, newstate)
1269
1271
1270 # Save commit message in case this transaction gets rolled back
1272 # Save commit message in case this transaction gets rolled back
1271 # (e.g. by a pretxncommit hook). Leave the content alone on
1273 # (e.g. by a pretxncommit hook). Leave the content alone on
1272 # the assumption that the user will use the same editor again.
1274 # the assumption that the user will use the same editor again.
1273 msgfn = self.savecommitmessage(cctx._text)
1275 msgfn = self.savecommitmessage(cctx._text)
1274
1276
1275 p1, p2 = self.dirstate.parents()
1277 p1, p2 = self.dirstate.parents()
1276 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1278 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1277 try:
1279 try:
1278 self.hook("precommit", throw=True, parent1=hookp1,
1280 self.hook("precommit", throw=True, parent1=hookp1,
1279 parent2=hookp2)
1281 parent2=hookp2)
1280 ret = self.commitctx(cctx, True)
1282 ret = self.commitctx(cctx, True)
1281 except: # re-raises
1283 except: # re-raises
1282 if edited:
1284 if edited:
1283 self.ui.write(
1285 self.ui.write(
1284 _('note: commit message saved in %s\n') % msgfn)
1286 _('note: commit message saved in %s\n') % msgfn)
1285 raise
1287 raise
1286
1288
1287 # update bookmarks, dirstate and mergestate
1289 # update bookmarks, dirstate and mergestate
1288 bookmarks.update(self, [p1, p2], ret)
1290 bookmarks.update(self, [p1, p2], ret)
1289 cctx.markcommitted(ret)
1291 cctx.markcommitted(ret)
1290 ms.reset()
1292 ms.reset()
1291 finally:
1293 finally:
1292 wlock.release()
1294 wlock.release()
1293
1295
1294 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1296 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1295 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1297 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1296 self._afterlock(commithook)
1298 self._afterlock(commithook)
1297 return ret
1299 return ret
1298
1300
1299 @unfilteredmethod
1301 @unfilteredmethod
1300 def commitctx(self, ctx, error=False):
1302 def commitctx(self, ctx, error=False):
1301 """Add a new revision to current repository.
1303 """Add a new revision to current repository.
1302 Revision information is passed via the context argument.
1304 Revision information is passed via the context argument.
1303 """
1305 """
1304
1306
1305 tr = lock = None
1307 tr = lock = None
1306 removed = list(ctx.removed())
1308 removed = list(ctx.removed())
1307 p1, p2 = ctx.p1(), ctx.p2()
1309 p1, p2 = ctx.p1(), ctx.p2()
1308 user = ctx.user()
1310 user = ctx.user()
1309
1311
1310 lock = self.lock()
1312 lock = self.lock()
1311 try:
1313 try:
1312 tr = self.transaction("commit")
1314 tr = self.transaction("commit")
1313 trp = weakref.proxy(tr)
1315 trp = weakref.proxy(tr)
1314
1316
1315 if ctx.files():
1317 if ctx.files():
1316 m1 = p1.manifest().copy()
1318 m1 = p1.manifest().copy()
1317 m2 = p2.manifest()
1319 m2 = p2.manifest()
1318
1320
1319 # check in files
1321 # check in files
1320 new = {}
1322 new = {}
1321 changed = []
1323 changed = []
1322 linkrev = len(self)
1324 linkrev = len(self)
1323 for f in sorted(ctx.modified() + ctx.added()):
1325 for f in sorted(ctx.modified() + ctx.added()):
1324 self.ui.note(f + "\n")
1326 self.ui.note(f + "\n")
1325 try:
1327 try:
1326 fctx = ctx[f]
1328 fctx = ctx[f]
1327 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1329 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1328 changed)
1330 changed)
1329 m1.set(f, fctx.flags())
1331 m1.set(f, fctx.flags())
1330 except OSError, inst:
1332 except OSError, inst:
1331 self.ui.warn(_("trouble committing %s!\n") % f)
1333 self.ui.warn(_("trouble committing %s!\n") % f)
1332 raise
1334 raise
1333 except IOError, inst:
1335 except IOError, inst:
1334 errcode = getattr(inst, 'errno', errno.ENOENT)
1336 errcode = getattr(inst, 'errno', errno.ENOENT)
1335 if error or errcode and errcode != errno.ENOENT:
1337 if error or errcode and errcode != errno.ENOENT:
1336 self.ui.warn(_("trouble committing %s!\n") % f)
1338 self.ui.warn(_("trouble committing %s!\n") % f)
1337 raise
1339 raise
1338 else:
1340 else:
1339 removed.append(f)
1341 removed.append(f)
1340
1342
1341 # update manifest
1343 # update manifest
1342 m1.update(new)
1344 m1.update(new)
1343 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1345 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1344 drop = [f for f in removed if f in m1]
1346 drop = [f for f in removed if f in m1]
1345 for f in drop:
1347 for f in drop:
1346 del m1[f]
1348 del m1[f]
1347 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1349 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1348 p2.manifestnode(), (new, drop))
1350 p2.manifestnode(), (new, drop))
1349 files = changed + removed
1351 files = changed + removed
1350 else:
1352 else:
1351 mn = p1.manifestnode()
1353 mn = p1.manifestnode()
1352 files = []
1354 files = []
1353
1355
1354 # update changelog
1356 # update changelog
1355 self.changelog.delayupdate()
1357 self.changelog.delayupdate()
1356 n = self.changelog.add(mn, files, ctx.description(),
1358 n = self.changelog.add(mn, files, ctx.description(),
1357 trp, p1.node(), p2.node(),
1359 trp, p1.node(), p2.node(),
1358 user, ctx.date(), ctx.extra().copy())
1360 user, ctx.date(), ctx.extra().copy())
1359 p = lambda: self.changelog.writepending() and self.root or ""
1361 p = lambda: self.changelog.writepending() and self.root or ""
1360 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1362 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1361 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1363 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1362 parent2=xp2, pending=p)
1364 parent2=xp2, pending=p)
1363 self.changelog.finalize(trp)
1365 self.changelog.finalize(trp)
1364 # set the new commit is proper phase
1366 # set the new commit is proper phase
1365 targetphase = phases.newcommitphase(self.ui)
1367 targetphase = phases.newcommitphase(self.ui)
1366 if targetphase:
1368 if targetphase:
1367 # retract boundary do not alter parent changeset.
1369 # retract boundary do not alter parent changeset.
1368 # if a parent have higher the resulting phase will
1370 # if a parent have higher the resulting phase will
1369 # be compliant anyway
1371 # be compliant anyway
1370 #
1372 #
1371 # if minimal phase was 0 we don't need to retract anything
1373 # if minimal phase was 0 we don't need to retract anything
1372 phases.retractboundary(self, targetphase, [n])
1374 phases.retractboundary(self, targetphase, [n])
1373 tr.close()
1375 tr.close()
1374 branchmap.updatecache(self.filtered('served'))
1376 branchmap.updatecache(self.filtered('served'))
1375 return n
1377 return n
1376 finally:
1378 finally:
1377 if tr:
1379 if tr:
1378 tr.release()
1380 tr.release()
1379 lock.release()
1381 lock.release()
1380
1382
1381 @unfilteredmethod
1383 @unfilteredmethod
1382 def destroying(self):
1384 def destroying(self):
1383 '''Inform the repository that nodes are about to be destroyed.
1385 '''Inform the repository that nodes are about to be destroyed.
1384 Intended for use by strip and rollback, so there's a common
1386 Intended for use by strip and rollback, so there's a common
1385 place for anything that has to be done before destroying history.
1387 place for anything that has to be done before destroying history.
1386
1388
1387 This is mostly useful for saving state that is in memory and waiting
1389 This is mostly useful for saving state that is in memory and waiting
1388 to be flushed when the current lock is released. Because a call to
1390 to be flushed when the current lock is released. Because a call to
1389 destroyed is imminent, the repo will be invalidated causing those
1391 destroyed is imminent, the repo will be invalidated causing those
1390 changes to stay in memory (waiting for the next unlock), or vanish
1392 changes to stay in memory (waiting for the next unlock), or vanish
1391 completely.
1393 completely.
1392 '''
1394 '''
1393 # When using the same lock to commit and strip, the phasecache is left
1395 # When using the same lock to commit and strip, the phasecache is left
1394 # dirty after committing. Then when we strip, the repo is invalidated,
1396 # dirty after committing. Then when we strip, the repo is invalidated,
1395 # causing those changes to disappear.
1397 # causing those changes to disappear.
1396 if '_phasecache' in vars(self):
1398 if '_phasecache' in vars(self):
1397 self._phasecache.write()
1399 self._phasecache.write()
1398
1400
1399 @unfilteredmethod
1401 @unfilteredmethod
1400 def destroyed(self):
1402 def destroyed(self):
1401 '''Inform the repository that nodes have been destroyed.
1403 '''Inform the repository that nodes have been destroyed.
1402 Intended for use by strip and rollback, so there's a common
1404 Intended for use by strip and rollback, so there's a common
1403 place for anything that has to be done after destroying history.
1405 place for anything that has to be done after destroying history.
1404 '''
1406 '''
1405 # When one tries to:
1407 # When one tries to:
1406 # 1) destroy nodes thus calling this method (e.g. strip)
1408 # 1) destroy nodes thus calling this method (e.g. strip)
1407 # 2) use phasecache somewhere (e.g. commit)
1409 # 2) use phasecache somewhere (e.g. commit)
1408 #
1410 #
1409 # then 2) will fail because the phasecache contains nodes that were
1411 # then 2) will fail because the phasecache contains nodes that were
1410 # removed. We can either remove phasecache from the filecache,
1412 # removed. We can either remove phasecache from the filecache,
1411 # causing it to reload next time it is accessed, or simply filter
1413 # causing it to reload next time it is accessed, or simply filter
1412 # the removed nodes now and write the updated cache.
1414 # the removed nodes now and write the updated cache.
1413 self._phasecache.filterunknown(self)
1415 self._phasecache.filterunknown(self)
1414 self._phasecache.write()
1416 self._phasecache.write()
1415
1417
1416 # update the 'served' branch cache to help read only server process
1418 # update the 'served' branch cache to help read only server process
1417 # Thanks to branchcache collaboration this is done from the nearest
1419 # Thanks to branchcache collaboration this is done from the nearest
1418 # filtered subset and it is expected to be fast.
1420 # filtered subset and it is expected to be fast.
1419 branchmap.updatecache(self.filtered('served'))
1421 branchmap.updatecache(self.filtered('served'))
1420
1422
1421 # Ensure the persistent tag cache is updated. Doing it now
1423 # Ensure the persistent tag cache is updated. Doing it now
1422 # means that the tag cache only has to worry about destroyed
1424 # means that the tag cache only has to worry about destroyed
1423 # heads immediately after a strip/rollback. That in turn
1425 # heads immediately after a strip/rollback. That in turn
1424 # guarantees that "cachetip == currenttip" (comparing both rev
1426 # guarantees that "cachetip == currenttip" (comparing both rev
1425 # and node) always means no nodes have been added or destroyed.
1427 # and node) always means no nodes have been added or destroyed.
1426
1428
1427 # XXX this is suboptimal when qrefresh'ing: we strip the current
1429 # XXX this is suboptimal when qrefresh'ing: we strip the current
1428 # head, refresh the tag cache, then immediately add a new head.
1430 # head, refresh the tag cache, then immediately add a new head.
1429 # But I think doing it this way is necessary for the "instant
1431 # But I think doing it this way is necessary for the "instant
1430 # tag cache retrieval" case to work.
1432 # tag cache retrieval" case to work.
1431 self.invalidate()
1433 self.invalidate()
1432
1434
1433 def walk(self, match, node=None):
1435 def walk(self, match, node=None):
1434 '''
1436 '''
1435 walk recursively through the directory tree or a given
1437 walk recursively through the directory tree or a given
1436 changeset, finding all files matched by the match
1438 changeset, finding all files matched by the match
1437 function
1439 function
1438 '''
1440 '''
1439 return self[node].walk(match)
1441 return self[node].walk(match)
1440
1442
1441 def status(self, node1='.', node2=None, match=None,
1443 def status(self, node1='.', node2=None, match=None,
1442 ignored=False, clean=False, unknown=False,
1444 ignored=False, clean=False, unknown=False,
1443 listsubrepos=False):
1445 listsubrepos=False):
1444 """return status of files between two nodes or node and working
1446 """return status of files between two nodes or node and working
1445 directory.
1447 directory.
1446
1448
1447 If node1 is None, use the first dirstate parent instead.
1449 If node1 is None, use the first dirstate parent instead.
1448 If node2 is None, compare node1 with working directory.
1450 If node2 is None, compare node1 with working directory.
1449 """
1451 """
1450
1452
1451 def mfmatches(ctx):
1453 def mfmatches(ctx):
1452 mf = ctx.manifest().copy()
1454 mf = ctx.manifest().copy()
1453 if match.always():
1455 if match.always():
1454 return mf
1456 return mf
1455 for fn in mf.keys():
1457 for fn in mf.keys():
1456 if not match(fn):
1458 if not match(fn):
1457 del mf[fn]
1459 del mf[fn]
1458 return mf
1460 return mf
1459
1461
1460 ctx1 = self[node1]
1462 ctx1 = self[node1]
1461 ctx2 = self[node2]
1463 ctx2 = self[node2]
1462
1464
1463 working = ctx2.rev() is None
1465 working = ctx2.rev() is None
1464 parentworking = working and ctx1 == self['.']
1466 parentworking = working and ctx1 == self['.']
1465 match = match or matchmod.always(self.root, self.getcwd())
1467 match = match or matchmod.always(self.root, self.getcwd())
1466 listignored, listclean, listunknown = ignored, clean, unknown
1468 listignored, listclean, listunknown = ignored, clean, unknown
1467
1469
1468 # load earliest manifest first for caching reasons
1470 # load earliest manifest first for caching reasons
1469 if not working and ctx2.rev() < ctx1.rev():
1471 if not working and ctx2.rev() < ctx1.rev():
1470 ctx2.manifest()
1472 ctx2.manifest()
1471
1473
1472 if not parentworking:
1474 if not parentworking:
1473 def bad(f, msg):
1475 def bad(f, msg):
1474 # 'f' may be a directory pattern from 'match.files()',
1476 # 'f' may be a directory pattern from 'match.files()',
1475 # so 'f not in ctx1' is not enough
1477 # so 'f not in ctx1' is not enough
1476 if f not in ctx1 and f not in ctx1.dirs():
1478 if f not in ctx1 and f not in ctx1.dirs():
1477 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1479 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1478 match.bad = bad
1480 match.bad = bad
1479
1481
1480 if working: # we need to scan the working dir
1482 if working: # we need to scan the working dir
1481 subrepos = []
1483 subrepos = []
1482 if '.hgsub' in self.dirstate:
1484 if '.hgsub' in self.dirstate:
1483 subrepos = sorted(ctx2.substate)
1485 subrepos = sorted(ctx2.substate)
1484 s = self.dirstate.status(match, subrepos, listignored,
1486 s = self.dirstate.status(match, subrepos, listignored,
1485 listclean, listunknown)
1487 listclean, listunknown)
1486 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1488 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1487
1489
1488 # check for any possibly clean files
1490 # check for any possibly clean files
1489 if parentworking and cmp:
1491 if parentworking and cmp:
1490 fixup = []
1492 fixup = []
1491 # do a full compare of any files that might have changed
1493 # do a full compare of any files that might have changed
1492 for f in sorted(cmp):
1494 for f in sorted(cmp):
1493 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1495 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1494 or ctx1[f].cmp(ctx2[f])):
1496 or ctx1[f].cmp(ctx2[f])):
1495 modified.append(f)
1497 modified.append(f)
1496 else:
1498 else:
1497 fixup.append(f)
1499 fixup.append(f)
1498
1500
1499 # update dirstate for files that are actually clean
1501 # update dirstate for files that are actually clean
1500 if fixup:
1502 if fixup:
1501 if listclean:
1503 if listclean:
1502 clean += fixup
1504 clean += fixup
1503
1505
1504 try:
1506 try:
1505 # updating the dirstate is optional
1507 # updating the dirstate is optional
1506 # so we don't wait on the lock
1508 # so we don't wait on the lock
1507 wlock = self.wlock(False)
1509 wlock = self.wlock(False)
1508 try:
1510 try:
1509 for f in fixup:
1511 for f in fixup:
1510 self.dirstate.normal(f)
1512 self.dirstate.normal(f)
1511 finally:
1513 finally:
1512 wlock.release()
1514 wlock.release()
1513 except error.LockError:
1515 except error.LockError:
1514 pass
1516 pass
1515
1517
1516 if not parentworking:
1518 if not parentworking:
1517 mf1 = mfmatches(ctx1)
1519 mf1 = mfmatches(ctx1)
1518 if working:
1520 if working:
1519 # we are comparing working dir against non-parent
1521 # we are comparing working dir against non-parent
1520 # generate a pseudo-manifest for the working dir
1522 # generate a pseudo-manifest for the working dir
1521 mf2 = mfmatches(self['.'])
1523 mf2 = mfmatches(self['.'])
1522 for f in cmp + modified + added:
1524 for f in cmp + modified + added:
1523 mf2[f] = None
1525 mf2[f] = None
1524 mf2.set(f, ctx2.flags(f))
1526 mf2.set(f, ctx2.flags(f))
1525 for f in removed:
1527 for f in removed:
1526 if f in mf2:
1528 if f in mf2:
1527 del mf2[f]
1529 del mf2[f]
1528 else:
1530 else:
1529 # we are comparing two revisions
1531 # we are comparing two revisions
1530 deleted, unknown, ignored = [], [], []
1532 deleted, unknown, ignored = [], [], []
1531 mf2 = mfmatches(ctx2)
1533 mf2 = mfmatches(ctx2)
1532
1534
1533 modified, added, clean = [], [], []
1535 modified, added, clean = [], [], []
1534 withflags = mf1.withflags() | mf2.withflags()
1536 withflags = mf1.withflags() | mf2.withflags()
1535 for fn, mf2node in mf2.iteritems():
1537 for fn, mf2node in mf2.iteritems():
1536 if fn in mf1:
1538 if fn in mf1:
1537 if (fn not in deleted and
1539 if (fn not in deleted and
1538 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1540 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1539 (mf1[fn] != mf2node and
1541 (mf1[fn] != mf2node and
1540 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1542 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1541 modified.append(fn)
1543 modified.append(fn)
1542 elif listclean:
1544 elif listclean:
1543 clean.append(fn)
1545 clean.append(fn)
1544 del mf1[fn]
1546 del mf1[fn]
1545 elif fn not in deleted:
1547 elif fn not in deleted:
1546 added.append(fn)
1548 added.append(fn)
1547 removed = mf1.keys()
1549 removed = mf1.keys()
1548
1550
1549 if working and modified and not self.dirstate._checklink:
1551 if working and modified and not self.dirstate._checklink:
1550 # Symlink placeholders may get non-symlink-like contents
1552 # Symlink placeholders may get non-symlink-like contents
1551 # via user error or dereferencing by NFS or Samba servers,
1553 # via user error or dereferencing by NFS or Samba servers,
1552 # so we filter out any placeholders that don't look like a
1554 # so we filter out any placeholders that don't look like a
1553 # symlink
1555 # symlink
1554 sane = []
1556 sane = []
1555 for f in modified:
1557 for f in modified:
1556 if ctx2.flags(f) == 'l':
1558 if ctx2.flags(f) == 'l':
1557 d = ctx2[f].data()
1559 d = ctx2[f].data()
1558 if len(d) >= 1024 or '\n' in d or util.binary(d):
1560 if len(d) >= 1024 or '\n' in d or util.binary(d):
1559 self.ui.debug('ignoring suspect symlink placeholder'
1561 self.ui.debug('ignoring suspect symlink placeholder'
1560 ' "%s"\n' % f)
1562 ' "%s"\n' % f)
1561 continue
1563 continue
1562 sane.append(f)
1564 sane.append(f)
1563 modified = sane
1565 modified = sane
1564
1566
1565 r = modified, added, removed, deleted, unknown, ignored, clean
1567 r = modified, added, removed, deleted, unknown, ignored, clean
1566
1568
1567 if listsubrepos:
1569 if listsubrepos:
1568 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1570 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1569 if working:
1571 if working:
1570 rev2 = None
1572 rev2 = None
1571 else:
1573 else:
1572 rev2 = ctx2.substate[subpath][1]
1574 rev2 = ctx2.substate[subpath][1]
1573 try:
1575 try:
1574 submatch = matchmod.narrowmatcher(subpath, match)
1576 submatch = matchmod.narrowmatcher(subpath, match)
1575 s = sub.status(rev2, match=submatch, ignored=listignored,
1577 s = sub.status(rev2, match=submatch, ignored=listignored,
1576 clean=listclean, unknown=listunknown,
1578 clean=listclean, unknown=listunknown,
1577 listsubrepos=True)
1579 listsubrepos=True)
1578 for rfiles, sfiles in zip(r, s):
1580 for rfiles, sfiles in zip(r, s):
1579 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1581 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1580 except error.LookupError:
1582 except error.LookupError:
1581 self.ui.status(_("skipping missing subrepository: %s\n")
1583 self.ui.status(_("skipping missing subrepository: %s\n")
1582 % subpath)
1584 % subpath)
1583
1585
1584 for l in r:
1586 for l in r:
1585 l.sort()
1587 l.sort()
1586 return r
1588 return r
1587
1589
1588 def heads(self, start=None):
1590 def heads(self, start=None):
1589 heads = self.changelog.heads(start)
1591 heads = self.changelog.heads(start)
1590 # sort the output in rev descending order
1592 # sort the output in rev descending order
1591 return sorted(heads, key=self.changelog.rev, reverse=True)
1593 return sorted(heads, key=self.changelog.rev, reverse=True)
1592
1594
1593 def branchheads(self, branch=None, start=None, closed=False):
1595 def branchheads(self, branch=None, start=None, closed=False):
1594 '''return a (possibly filtered) list of heads for the given branch
1596 '''return a (possibly filtered) list of heads for the given branch
1595
1597
1596 Heads are returned in topological order, from newest to oldest.
1598 Heads are returned in topological order, from newest to oldest.
1597 If branch is None, use the dirstate branch.
1599 If branch is None, use the dirstate branch.
1598 If start is not None, return only heads reachable from start.
1600 If start is not None, return only heads reachable from start.
1599 If closed is True, return heads that are marked as closed as well.
1601 If closed is True, return heads that are marked as closed as well.
1600 '''
1602 '''
1601 if branch is None:
1603 if branch is None:
1602 branch = self[None].branch()
1604 branch = self[None].branch()
1603 branches = self.branchmap()
1605 branches = self.branchmap()
1604 if branch not in branches:
1606 if branch not in branches:
1605 return []
1607 return []
1606 # the cache returns heads ordered lowest to highest
1608 # the cache returns heads ordered lowest to highest
1607 bheads = list(reversed(branches[branch]))
1609 bheads = list(reversed(branches[branch]))
1608 if start is not None:
1610 if start is not None:
1609 # filter out the heads that cannot be reached from startrev
1611 # filter out the heads that cannot be reached from startrev
1610 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1612 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1611 bheads = [h for h in bheads if h in fbheads]
1613 bheads = [h for h in bheads if h in fbheads]
1612 if not closed:
1614 if not closed:
1613 bheads = [h for h in bheads if not self[h].closesbranch()]
1615 bheads = [h for h in bheads if not self[h].closesbranch()]
1614 return bheads
1616 return bheads
1615
1617
1616 def branches(self, nodes):
1618 def branches(self, nodes):
1617 if not nodes:
1619 if not nodes:
1618 nodes = [self.changelog.tip()]
1620 nodes = [self.changelog.tip()]
1619 b = []
1621 b = []
1620 for n in nodes:
1622 for n in nodes:
1621 t = n
1623 t = n
1622 while True:
1624 while True:
1623 p = self.changelog.parents(n)
1625 p = self.changelog.parents(n)
1624 if p[1] != nullid or p[0] == nullid:
1626 if p[1] != nullid or p[0] == nullid:
1625 b.append((t, n, p[0], p[1]))
1627 b.append((t, n, p[0], p[1]))
1626 break
1628 break
1627 n = p[0]
1629 n = p[0]
1628 return b
1630 return b
1629
1631
1630 def between(self, pairs):
1632 def between(self, pairs):
1631 r = []
1633 r = []
1632
1634
1633 for top, bottom in pairs:
1635 for top, bottom in pairs:
1634 n, l, i = top, [], 0
1636 n, l, i = top, [], 0
1635 f = 1
1637 f = 1
1636
1638
1637 while n != bottom and n != nullid:
1639 while n != bottom and n != nullid:
1638 p = self.changelog.parents(n)[0]
1640 p = self.changelog.parents(n)[0]
1639 if i == f:
1641 if i == f:
1640 l.append(n)
1642 l.append(n)
1641 f = f * 2
1643 f = f * 2
1642 n = p
1644 n = p
1643 i += 1
1645 i += 1
1644
1646
1645 r.append(l)
1647 r.append(l)
1646
1648
1647 return r
1649 return r
1648
1650
1649 def pull(self, remote, heads=None, force=False):
1651 def pull(self, remote, heads=None, force=False):
1650 # don't open transaction for nothing or you break future useful
1652 # don't open transaction for nothing or you break future useful
1651 # rollback call
1653 # rollback call
1652 tr = None
1654 tr = None
1653 trname = 'pull\n' + util.hidepassword(remote.url())
1655 trname = 'pull\n' + util.hidepassword(remote.url())
1654 lock = self.lock()
1656 lock = self.lock()
1655 try:
1657 try:
1656 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1658 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1657 force=force)
1659 force=force)
1658 common, fetch, rheads = tmp
1660 common, fetch, rheads = tmp
1659 if not fetch:
1661 if not fetch:
1660 self.ui.status(_("no changes found\n"))
1662 self.ui.status(_("no changes found\n"))
1661 added = []
1663 added = []
1662 result = 0
1664 result = 0
1663 else:
1665 else:
1664 tr = self.transaction(trname)
1666 tr = self.transaction(trname)
1665 if heads is None and list(common) == [nullid]:
1667 if heads is None and list(common) == [nullid]:
1666 self.ui.status(_("requesting all changes\n"))
1668 self.ui.status(_("requesting all changes\n"))
1667 elif heads is None and remote.capable('changegroupsubset'):
1669 elif heads is None and remote.capable('changegroupsubset'):
1668 # issue1320, avoid a race if remote changed after discovery
1670 # issue1320, avoid a race if remote changed after discovery
1669 heads = rheads
1671 heads = rheads
1670
1672
1671 if remote.capable('getbundle'):
1673 if remote.capable('getbundle'):
1672 # TODO: get bundlecaps from remote
1674 # TODO: get bundlecaps from remote
1673 cg = remote.getbundle('pull', common=common,
1675 cg = remote.getbundle('pull', common=common,
1674 heads=heads or rheads)
1676 heads=heads or rheads)
1675 elif heads is None:
1677 elif heads is None:
1676 cg = remote.changegroup(fetch, 'pull')
1678 cg = remote.changegroup(fetch, 'pull')
1677 elif not remote.capable('changegroupsubset'):
1679 elif not remote.capable('changegroupsubset'):
1678 raise util.Abort(_("partial pull cannot be done because "
1680 raise util.Abort(_("partial pull cannot be done because "
1679 "other repository doesn't support "
1681 "other repository doesn't support "
1680 "changegroupsubset."))
1682 "changegroupsubset."))
1681 else:
1683 else:
1682 cg = remote.changegroupsubset(fetch, heads, 'pull')
1684 cg = remote.changegroupsubset(fetch, heads, 'pull')
1683 # we use unfiltered changelog here because hidden revision must
1685 # we use unfiltered changelog here because hidden revision must
1684 # be taken in account for phase synchronization. They may
1686 # be taken in account for phase synchronization. They may
1685 # becomes public and becomes visible again.
1687 # becomes public and becomes visible again.
1686 cl = self.unfiltered().changelog
1688 cl = self.unfiltered().changelog
1687 clstart = len(cl)
1689 clstart = len(cl)
1688 result = self.addchangegroup(cg, 'pull', remote.url())
1690 result = self.addchangegroup(cg, 'pull', remote.url())
1689 clend = len(cl)
1691 clend = len(cl)
1690 added = [cl.node(r) for r in xrange(clstart, clend)]
1692 added = [cl.node(r) for r in xrange(clstart, clend)]
1691
1693
1692 # compute target subset
1694 # compute target subset
1693 if heads is None:
1695 if heads is None:
1694 # We pulled every thing possible
1696 # We pulled every thing possible
1695 # sync on everything common
1697 # sync on everything common
1696 subset = common + added
1698 subset = common + added
1697 else:
1699 else:
1698 # We pulled a specific subset
1700 # We pulled a specific subset
1699 # sync on this subset
1701 # sync on this subset
1700 subset = heads
1702 subset = heads
1701
1703
1702 # Get remote phases data from remote
1704 # Get remote phases data from remote
1703 remotephases = remote.listkeys('phases')
1705 remotephases = remote.listkeys('phases')
1704 publishing = bool(remotephases.get('publishing', False))
1706 publishing = bool(remotephases.get('publishing', False))
1705 if remotephases and not publishing:
1707 if remotephases and not publishing:
1706 # remote is new and unpublishing
1708 # remote is new and unpublishing
1707 pheads, _dr = phases.analyzeremotephases(self, subset,
1709 pheads, _dr = phases.analyzeremotephases(self, subset,
1708 remotephases)
1710 remotephases)
1709 phases.advanceboundary(self, phases.public, pheads)
1711 phases.advanceboundary(self, phases.public, pheads)
1710 phases.advanceboundary(self, phases.draft, subset)
1712 phases.advanceboundary(self, phases.draft, subset)
1711 else:
1713 else:
1712 # Remote is old or publishing all common changesets
1714 # Remote is old or publishing all common changesets
1713 # should be seen as public
1715 # should be seen as public
1714 phases.advanceboundary(self, phases.public, subset)
1716 phases.advanceboundary(self, phases.public, subset)
1715
1717
1716 def gettransaction():
1718 def gettransaction():
1717 if tr is None:
1719 if tr is None:
1718 return self.transaction(trname)
1720 return self.transaction(trname)
1719 return tr
1721 return tr
1720
1722
1721 obstr = obsolete.syncpull(self, remote, gettransaction)
1723 obstr = obsolete.syncpull(self, remote, gettransaction)
1722 if obstr is not None:
1724 if obstr is not None:
1723 tr = obstr
1725 tr = obstr
1724
1726
1725 if tr is not None:
1727 if tr is not None:
1726 tr.close()
1728 tr.close()
1727 finally:
1729 finally:
1728 if tr is not None:
1730 if tr is not None:
1729 tr.release()
1731 tr.release()
1730 lock.release()
1732 lock.release()
1731
1733
1732 return result
1734 return result
1733
1735
1734 def checkpush(self, force, revs):
1736 def checkpush(self, force, revs):
1735 """Extensions can override this function if additional checks have
1737 """Extensions can override this function if additional checks have
1736 to be performed before pushing, or call it if they override push
1738 to be performed before pushing, or call it if they override push
1737 command.
1739 command.
1738 """
1740 """
1739 pass
1741 pass
1740
1742
1741 def push(self, remote, force=False, revs=None, newbranch=False):
1743 def push(self, remote, force=False, revs=None, newbranch=False):
1742 '''Push outgoing changesets (limited by revs) from the current
1744 '''Push outgoing changesets (limited by revs) from the current
1743 repository to remote. Return an integer:
1745 repository to remote. Return an integer:
1744 - None means nothing to push
1746 - None means nothing to push
1745 - 0 means HTTP error
1747 - 0 means HTTP error
1746 - 1 means we pushed and remote head count is unchanged *or*
1748 - 1 means we pushed and remote head count is unchanged *or*
1747 we have outgoing changesets but refused to push
1749 we have outgoing changesets but refused to push
1748 - other values as described by addchangegroup()
1750 - other values as described by addchangegroup()
1749 '''
1751 '''
1750 # there are two ways to push to remote repo:
1752 # there are two ways to push to remote repo:
1751 #
1753 #
1752 # addchangegroup assumes local user can lock remote
1754 # addchangegroup assumes local user can lock remote
1753 # repo (local filesystem, old ssh servers).
1755 # repo (local filesystem, old ssh servers).
1754 #
1756 #
1755 # unbundle assumes local user cannot lock remote repo (new ssh
1757 # unbundle assumes local user cannot lock remote repo (new ssh
1756 # servers, http servers).
1758 # servers, http servers).
1757
1759
1758 if not remote.canpush():
1760 if not remote.canpush():
1759 raise util.Abort(_("destination does not support push"))
1761 raise util.Abort(_("destination does not support push"))
1760 unfi = self.unfiltered()
1762 unfi = self.unfiltered()
1761 def localphasemove(nodes, phase=phases.public):
1763 def localphasemove(nodes, phase=phases.public):
1762 """move <nodes> to <phase> in the local source repo"""
1764 """move <nodes> to <phase> in the local source repo"""
1763 if locallock is not None:
1765 if locallock is not None:
1764 phases.advanceboundary(self, phase, nodes)
1766 phases.advanceboundary(self, phase, nodes)
1765 else:
1767 else:
1766 # repo is not locked, do not change any phases!
1768 # repo is not locked, do not change any phases!
1767 # Informs the user that phases should have been moved when
1769 # Informs the user that phases should have been moved when
1768 # applicable.
1770 # applicable.
1769 actualmoves = [n for n in nodes if phase < self[n].phase()]
1771 actualmoves = [n for n in nodes if phase < self[n].phase()]
1770 phasestr = phases.phasenames[phase]
1772 phasestr = phases.phasenames[phase]
1771 if actualmoves:
1773 if actualmoves:
1772 self.ui.status(_('cannot lock source repo, skipping local'
1774 self.ui.status(_('cannot lock source repo, skipping local'
1773 ' %s phase update\n') % phasestr)
1775 ' %s phase update\n') % phasestr)
1774 # get local lock as we might write phase data
1776 # get local lock as we might write phase data
1775 locallock = None
1777 locallock = None
1776 try:
1778 try:
1777 locallock = self.lock()
1779 locallock = self.lock()
1778 except IOError, err:
1780 except IOError, err:
1779 if err.errno != errno.EACCES:
1781 if err.errno != errno.EACCES:
1780 raise
1782 raise
1781 # source repo cannot be locked.
1783 # source repo cannot be locked.
1782 # We do not abort the push, but just disable the local phase
1784 # We do not abort the push, but just disable the local phase
1783 # synchronisation.
1785 # synchronisation.
1784 msg = 'cannot lock source repository: %s\n' % err
1786 msg = 'cannot lock source repository: %s\n' % err
1785 self.ui.debug(msg)
1787 self.ui.debug(msg)
1786 try:
1788 try:
1787 self.checkpush(force, revs)
1789 self.checkpush(force, revs)
1788 lock = None
1790 lock = None
1789 unbundle = remote.capable('unbundle')
1791 unbundle = remote.capable('unbundle')
1790 if not unbundle:
1792 if not unbundle:
1791 lock = remote.lock()
1793 lock = remote.lock()
1792 try:
1794 try:
1793 # discovery
1795 # discovery
1794 fci = discovery.findcommonincoming
1796 fci = discovery.findcommonincoming
1795 commoninc = fci(unfi, remote, force=force)
1797 commoninc = fci(unfi, remote, force=force)
1796 common, inc, remoteheads = commoninc
1798 common, inc, remoteheads = commoninc
1797 fco = discovery.findcommonoutgoing
1799 fco = discovery.findcommonoutgoing
1798 outgoing = fco(unfi, remote, onlyheads=revs,
1800 outgoing = fco(unfi, remote, onlyheads=revs,
1799 commoninc=commoninc, force=force)
1801 commoninc=commoninc, force=force)
1800
1802
1801
1803
1802 if not outgoing.missing:
1804 if not outgoing.missing:
1803 # nothing to push
1805 # nothing to push
1804 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1806 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1805 ret = None
1807 ret = None
1806 else:
1808 else:
1807 # something to push
1809 # something to push
1808 if not force:
1810 if not force:
1809 # if self.obsstore == False --> no obsolete
1811 # if self.obsstore == False --> no obsolete
1810 # then, save the iteration
1812 # then, save the iteration
1811 if unfi.obsstore:
1813 if unfi.obsstore:
1812 # this message are here for 80 char limit reason
1814 # this message are here for 80 char limit reason
1813 mso = _("push includes obsolete changeset: %s!")
1815 mso = _("push includes obsolete changeset: %s!")
1814 mst = "push includes %s changeset: %s!"
1816 mst = "push includes %s changeset: %s!"
1815 # plain versions for i18n tool to detect them
1817 # plain versions for i18n tool to detect them
1816 _("push includes unstable changeset: %s!")
1818 _("push includes unstable changeset: %s!")
1817 _("push includes bumped changeset: %s!")
1819 _("push includes bumped changeset: %s!")
1818 _("push includes divergent changeset: %s!")
1820 _("push includes divergent changeset: %s!")
1819 # If we are to push if there is at least one
1821 # If we are to push if there is at least one
1820 # obsolete or unstable changeset in missing, at
1822 # obsolete or unstable changeset in missing, at
1821 # least one of the missinghead will be obsolete or
1823 # least one of the missinghead will be obsolete or
1822 # unstable. So checking heads only is ok
1824 # unstable. So checking heads only is ok
1823 for node in outgoing.missingheads:
1825 for node in outgoing.missingheads:
1824 ctx = unfi[node]
1826 ctx = unfi[node]
1825 if ctx.obsolete():
1827 if ctx.obsolete():
1826 raise util.Abort(mso % ctx)
1828 raise util.Abort(mso % ctx)
1827 elif ctx.troubled():
1829 elif ctx.troubled():
1828 raise util.Abort(_(mst)
1830 raise util.Abort(_(mst)
1829 % (ctx.troubles()[0],
1831 % (ctx.troubles()[0],
1830 ctx))
1832 ctx))
1831 discovery.checkheads(unfi, remote, outgoing,
1833 discovery.checkheads(unfi, remote, outgoing,
1832 remoteheads, newbranch,
1834 remoteheads, newbranch,
1833 bool(inc))
1835 bool(inc))
1834
1836
1835 # TODO: get bundlecaps from remote
1837 # TODO: get bundlecaps from remote
1836 bundlecaps = None
1838 bundlecaps = None
1837 # create a changegroup from local
1839 # create a changegroup from local
1838 if revs is None and not outgoing.excluded:
1840 if revs is None and not outgoing.excluded:
1839 # push everything,
1841 # push everything,
1840 # use the fast path, no race possible on push
1842 # use the fast path, no race possible on push
1841 bundler = changegroup.bundle10(self, bundlecaps)
1843 bundler = changegroup.bundle10(self, bundlecaps)
1842 cg = self._changegroupsubset(outgoing,
1844 cg = self._changegroupsubset(outgoing,
1843 bundler,
1845 bundler,
1844 'push',
1846 'push',
1845 fastpath=True)
1847 fastpath=True)
1846 else:
1848 else:
1847 cg = self.getlocalbundle('push', outgoing, bundlecaps)
1849 cg = self.getlocalbundle('push', outgoing, bundlecaps)
1848
1850
1849 # apply changegroup to remote
1851 # apply changegroup to remote
1850 if unbundle:
1852 if unbundle:
1851 # local repo finds heads on server, finds out what
1853 # local repo finds heads on server, finds out what
1852 # revs it must push. once revs transferred, if server
1854 # revs it must push. once revs transferred, if server
1853 # finds it has different heads (someone else won
1855 # finds it has different heads (someone else won
1854 # commit/push race), server aborts.
1856 # commit/push race), server aborts.
1855 if force:
1857 if force:
1856 remoteheads = ['force']
1858 remoteheads = ['force']
1857 # ssh: return remote's addchangegroup()
1859 # ssh: return remote's addchangegroup()
1858 # http: return remote's addchangegroup() or 0 for error
1860 # http: return remote's addchangegroup() or 0 for error
1859 ret = remote.unbundle(cg, remoteheads, 'push')
1861 ret = remote.unbundle(cg, remoteheads, 'push')
1860 else:
1862 else:
1861 # we return an integer indicating remote head count
1863 # we return an integer indicating remote head count
1862 # change
1864 # change
1863 ret = remote.addchangegroup(cg, 'push', self.url())
1865 ret = remote.addchangegroup(cg, 'push', self.url())
1864
1866
1865 if ret:
1867 if ret:
1866 # push succeed, synchronize target of the push
1868 # push succeed, synchronize target of the push
1867 cheads = outgoing.missingheads
1869 cheads = outgoing.missingheads
1868 elif revs is None:
1870 elif revs is None:
1869 # All out push fails. synchronize all common
1871 # All out push fails. synchronize all common
1870 cheads = outgoing.commonheads
1872 cheads = outgoing.commonheads
1871 else:
1873 else:
1872 # I want cheads = heads(::missingheads and ::commonheads)
1874 # I want cheads = heads(::missingheads and ::commonheads)
1873 # (missingheads is revs with secret changeset filtered out)
1875 # (missingheads is revs with secret changeset filtered out)
1874 #
1876 #
1875 # This can be expressed as:
1877 # This can be expressed as:
1876 # cheads = ( (missingheads and ::commonheads)
1878 # cheads = ( (missingheads and ::commonheads)
1877 # + (commonheads and ::missingheads))"
1879 # + (commonheads and ::missingheads))"
1878 # )
1880 # )
1879 #
1881 #
1880 # while trying to push we already computed the following:
1882 # while trying to push we already computed the following:
1881 # common = (::commonheads)
1883 # common = (::commonheads)
1882 # missing = ((commonheads::missingheads) - commonheads)
1884 # missing = ((commonheads::missingheads) - commonheads)
1883 #
1885 #
1884 # We can pick:
1886 # We can pick:
1885 # * missingheads part of common (::commonheads)
1887 # * missingheads part of common (::commonheads)
1886 common = set(outgoing.common)
1888 common = set(outgoing.common)
1887 cheads = [node for node in revs if node in common]
1889 cheads = [node for node in revs if node in common]
1888 # and
1890 # and
1889 # * commonheads parents on missing
1891 # * commonheads parents on missing
1890 revset = unfi.set('%ln and parents(roots(%ln))',
1892 revset = unfi.set('%ln and parents(roots(%ln))',
1891 outgoing.commonheads,
1893 outgoing.commonheads,
1892 outgoing.missing)
1894 outgoing.missing)
1893 cheads.extend(c.node() for c in revset)
1895 cheads.extend(c.node() for c in revset)
1894 # even when we don't push, exchanging phase data is useful
1896 # even when we don't push, exchanging phase data is useful
1895 remotephases = remote.listkeys('phases')
1897 remotephases = remote.listkeys('phases')
1896 if (self.ui.configbool('ui', '_usedassubrepo', False)
1898 if (self.ui.configbool('ui', '_usedassubrepo', False)
1897 and remotephases # server supports phases
1899 and remotephases # server supports phases
1898 and ret is None # nothing was pushed
1900 and ret is None # nothing was pushed
1899 and remotephases.get('publishing', False)):
1901 and remotephases.get('publishing', False)):
1900 # When:
1902 # When:
1901 # - this is a subrepo push
1903 # - this is a subrepo push
1902 # - and remote support phase
1904 # - and remote support phase
1903 # - and no changeset was pushed
1905 # - and no changeset was pushed
1904 # - and remote is publishing
1906 # - and remote is publishing
1905 # We may be in issue 3871 case!
1907 # We may be in issue 3871 case!
1906 # We drop the possible phase synchronisation done by
1908 # We drop the possible phase synchronisation done by
1907 # courtesy to publish changesets possibly locally draft
1909 # courtesy to publish changesets possibly locally draft
1908 # on the remote.
1910 # on the remote.
1909 remotephases = {'publishing': 'True'}
1911 remotephases = {'publishing': 'True'}
1910 if not remotephases: # old server or public only repo
1912 if not remotephases: # old server or public only repo
1911 localphasemove(cheads)
1913 localphasemove(cheads)
1912 # don't push any phase data as there is nothing to push
1914 # don't push any phase data as there is nothing to push
1913 else:
1915 else:
1914 ana = phases.analyzeremotephases(self, cheads, remotephases)
1916 ana = phases.analyzeremotephases(self, cheads, remotephases)
1915 pheads, droots = ana
1917 pheads, droots = ana
1916 ### Apply remote phase on local
1918 ### Apply remote phase on local
1917 if remotephases.get('publishing', False):
1919 if remotephases.get('publishing', False):
1918 localphasemove(cheads)
1920 localphasemove(cheads)
1919 else: # publish = False
1921 else: # publish = False
1920 localphasemove(pheads)
1922 localphasemove(pheads)
1921 localphasemove(cheads, phases.draft)
1923 localphasemove(cheads, phases.draft)
1922 ### Apply local phase on remote
1924 ### Apply local phase on remote
1923
1925
1924 # Get the list of all revs draft on remote by public here.
1926 # Get the list of all revs draft on remote by public here.
1925 # XXX Beware that revset break if droots is not strictly
1927 # XXX Beware that revset break if droots is not strictly
1926 # XXX root we may want to ensure it is but it is costly
1928 # XXX root we may want to ensure it is but it is costly
1927 outdated = unfi.set('heads((%ln::%ln) and public())',
1929 outdated = unfi.set('heads((%ln::%ln) and public())',
1928 droots, cheads)
1930 droots, cheads)
1929 for newremotehead in outdated:
1931 for newremotehead in outdated:
1930 r = remote.pushkey('phases',
1932 r = remote.pushkey('phases',
1931 newremotehead.hex(),
1933 newremotehead.hex(),
1932 str(phases.draft),
1934 str(phases.draft),
1933 str(phases.public))
1935 str(phases.public))
1934 if not r:
1936 if not r:
1935 self.ui.warn(_('updating %s to public failed!\n')
1937 self.ui.warn(_('updating %s to public failed!\n')
1936 % newremotehead)
1938 % newremotehead)
1937 self.ui.debug('try to push obsolete markers to remote\n')
1939 self.ui.debug('try to push obsolete markers to remote\n')
1938 obsolete.syncpush(self, remote)
1940 obsolete.syncpush(self, remote)
1939 finally:
1941 finally:
1940 if lock is not None:
1942 if lock is not None:
1941 lock.release()
1943 lock.release()
1942 finally:
1944 finally:
1943 if locallock is not None:
1945 if locallock is not None:
1944 locallock.release()
1946 locallock.release()
1945
1947
1946 self.ui.debug("checking for updated bookmarks\n")
1948 self.ui.debug("checking for updated bookmarks\n")
1947 rb = remote.listkeys('bookmarks')
1949 rb = remote.listkeys('bookmarks')
1948 revnums = map(unfi.changelog.rev, revs or [])
1950 revnums = map(unfi.changelog.rev, revs or [])
1949 ancestors = [
1951 ancestors = [
1950 a for a in unfi.changelog.ancestors(revnums, inclusive=True)]
1952 a for a in unfi.changelog.ancestors(revnums, inclusive=True)]
1951 for k in rb.keys():
1953 for k in rb.keys():
1952 if k in unfi._bookmarks:
1954 if k in unfi._bookmarks:
1953 nr, nl = rb[k], hex(self._bookmarks[k])
1955 nr, nl = rb[k], hex(self._bookmarks[k])
1954 if nr in unfi:
1956 if nr in unfi:
1955 cr = unfi[nr]
1957 cr = unfi[nr]
1956 cl = unfi[nl]
1958 cl = unfi[nl]
1957 if bookmarks.validdest(unfi, cr, cl):
1959 if bookmarks.validdest(unfi, cr, cl):
1958 if ancestors and cl.rev() not in ancestors:
1960 if ancestors and cl.rev() not in ancestors:
1959 continue
1961 continue
1960 r = remote.pushkey('bookmarks', k, nr, nl)
1962 r = remote.pushkey('bookmarks', k, nr, nl)
1961 if r:
1963 if r:
1962 self.ui.status(_("updating bookmark %s\n") % k)
1964 self.ui.status(_("updating bookmark %s\n") % k)
1963 else:
1965 else:
1964 self.ui.warn(_('updating bookmark %s'
1966 self.ui.warn(_('updating bookmark %s'
1965 ' failed!\n') % k)
1967 ' failed!\n') % k)
1966
1968
1967 return ret
1969 return ret
1968
1970
1969 def changegroupinfo(self, nodes, source):
1971 def changegroupinfo(self, nodes, source):
1970 if self.ui.verbose or source == 'bundle':
1972 if self.ui.verbose or source == 'bundle':
1971 self.ui.status(_("%d changesets found\n") % len(nodes))
1973 self.ui.status(_("%d changesets found\n") % len(nodes))
1972 if self.ui.debugflag:
1974 if self.ui.debugflag:
1973 self.ui.debug("list of changesets:\n")
1975 self.ui.debug("list of changesets:\n")
1974 for node in nodes:
1976 for node in nodes:
1975 self.ui.debug("%s\n" % hex(node))
1977 self.ui.debug("%s\n" % hex(node))
1976
1978
1977 def changegroupsubset(self, bases, heads, source):
1979 def changegroupsubset(self, bases, heads, source):
1978 """Compute a changegroup consisting of all the nodes that are
1980 """Compute a changegroup consisting of all the nodes that are
1979 descendants of any of the bases and ancestors of any of the heads.
1981 descendants of any of the bases and ancestors of any of the heads.
1980 Return a chunkbuffer object whose read() method will return
1982 Return a chunkbuffer object whose read() method will return
1981 successive changegroup chunks.
1983 successive changegroup chunks.
1982
1984
1983 It is fairly complex as determining which filenodes and which
1985 It is fairly complex as determining which filenodes and which
1984 manifest nodes need to be included for the changeset to be complete
1986 manifest nodes need to be included for the changeset to be complete
1985 is non-trivial.
1987 is non-trivial.
1986
1988
1987 Another wrinkle is doing the reverse, figuring out which changeset in
1989 Another wrinkle is doing the reverse, figuring out which changeset in
1988 the changegroup a particular filenode or manifestnode belongs to.
1990 the changegroup a particular filenode or manifestnode belongs to.
1989 """
1991 """
1990 cl = self.changelog
1992 cl = self.changelog
1991 if not bases:
1993 if not bases:
1992 bases = [nullid]
1994 bases = [nullid]
1993 # TODO: remove call to nodesbetween.
1995 # TODO: remove call to nodesbetween.
1994 csets, bases, heads = cl.nodesbetween(bases, heads)
1996 csets, bases, heads = cl.nodesbetween(bases, heads)
1995 bases = [p for n in bases for p in cl.parents(n) if p != nullid]
1997 bases = [p for n in bases for p in cl.parents(n) if p != nullid]
1996 outgoing = discovery.outgoing(cl, bases, heads)
1998 outgoing = discovery.outgoing(cl, bases, heads)
1997 bundler = changegroup.bundle10(self)
1999 bundler = changegroup.bundle10(self)
1998 return self._changegroupsubset(outgoing, bundler, source)
2000 return self._changegroupsubset(outgoing, bundler, source)
1999
2001
2000 def getlocalbundle(self, source, outgoing, bundlecaps=None):
2002 def getlocalbundle(self, source, outgoing, bundlecaps=None):
2001 """Like getbundle, but taking a discovery.outgoing as an argument.
2003 """Like getbundle, but taking a discovery.outgoing as an argument.
2002
2004
2003 This is only implemented for local repos and reuses potentially
2005 This is only implemented for local repos and reuses potentially
2004 precomputed sets in outgoing."""
2006 precomputed sets in outgoing."""
2005 if not outgoing.missing:
2007 if not outgoing.missing:
2006 return None
2008 return None
2007 bundler = changegroup.bundle10(self, bundlecaps)
2009 bundler = changegroup.bundle10(self, bundlecaps)
2008 return self._changegroupsubset(outgoing, bundler, source)
2010 return self._changegroupsubset(outgoing, bundler, source)
2009
2011
2010 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
2012 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
2011 """Like changegroupsubset, but returns the set difference between the
2013 """Like changegroupsubset, but returns the set difference between the
2012 ancestors of heads and the ancestors common.
2014 ancestors of heads and the ancestors common.
2013
2015
2014 If heads is None, use the local heads. If common is None, use [nullid].
2016 If heads is None, use the local heads. If common is None, use [nullid].
2015
2017
2016 The nodes in common might not all be known locally due to the way the
2018 The nodes in common might not all be known locally due to the way the
2017 current discovery protocol works.
2019 current discovery protocol works.
2018 """
2020 """
2019 cl = self.changelog
2021 cl = self.changelog
2020 if common:
2022 if common:
2021 hasnode = cl.hasnode
2023 hasnode = cl.hasnode
2022 common = [n for n in common if hasnode(n)]
2024 common = [n for n in common if hasnode(n)]
2023 else:
2025 else:
2024 common = [nullid]
2026 common = [nullid]
2025 if not heads:
2027 if not heads:
2026 heads = cl.heads()
2028 heads = cl.heads()
2027 return self.getlocalbundle(source,
2029 return self.getlocalbundle(source,
2028 discovery.outgoing(cl, common, heads),
2030 discovery.outgoing(cl, common, heads),
2029 bundlecaps=bundlecaps)
2031 bundlecaps=bundlecaps)
2030
2032
2031 @unfilteredmethod
2033 @unfilteredmethod
2032 def _changegroupsubset(self, outgoing, bundler, source,
2034 def _changegroupsubset(self, outgoing, bundler, source,
2033 fastpath=False):
2035 fastpath=False):
2034 commonrevs = outgoing.common
2036 commonrevs = outgoing.common
2035 csets = outgoing.missing
2037 csets = outgoing.missing
2036 heads = outgoing.missingheads
2038 heads = outgoing.missingheads
2037 # We go through the fast path if we get told to, or if all (unfiltered
2039 # We go through the fast path if we get told to, or if all (unfiltered
2038 # heads have been requested (since we then know there all linkrevs will
2040 # heads have been requested (since we then know there all linkrevs will
2039 # be pulled by the client).
2041 # be pulled by the client).
2040 heads.sort()
2042 heads.sort()
2041 fastpathlinkrev = fastpath or (
2043 fastpathlinkrev = fastpath or (
2042 self.filtername is None and heads == sorted(self.heads()))
2044 self.filtername is None and heads == sorted(self.heads()))
2043
2045
2044 self.hook('preoutgoing', throw=True, source=source)
2046 self.hook('preoutgoing', throw=True, source=source)
2045 self.changegroupinfo(csets, source)
2047 self.changegroupinfo(csets, source)
2046 gengroup = bundler.generate(commonrevs, csets, fastpathlinkrev, source)
2048 gengroup = bundler.generate(commonrevs, csets, fastpathlinkrev, source)
2047 return changegroup.unbundle10(util.chunkbuffer(gengroup), 'UN')
2049 return changegroup.unbundle10(util.chunkbuffer(gengroup), 'UN')
2048
2050
2049 def changegroup(self, basenodes, source):
2051 def changegroup(self, basenodes, source):
2050 # to avoid a race we use changegroupsubset() (issue1320)
2052 # to avoid a race we use changegroupsubset() (issue1320)
2051 return self.changegroupsubset(basenodes, self.heads(), source)
2053 return self.changegroupsubset(basenodes, self.heads(), source)
2052
2054
2053 @unfilteredmethod
2055 @unfilteredmethod
2054 def addchangegroup(self, source, srctype, url, emptyok=False):
2056 def addchangegroup(self, source, srctype, url, emptyok=False):
2055 """Add the changegroup returned by source.read() to this repo.
2057 """Add the changegroup returned by source.read() to this repo.
2056 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2058 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2057 the URL of the repo where this changegroup is coming from.
2059 the URL of the repo where this changegroup is coming from.
2058
2060
2059 Return an integer summarizing the change to this repo:
2061 Return an integer summarizing the change to this repo:
2060 - nothing changed or no source: 0
2062 - nothing changed or no source: 0
2061 - more heads than before: 1+added heads (2..n)
2063 - more heads than before: 1+added heads (2..n)
2062 - fewer heads than before: -1-removed heads (-2..-n)
2064 - fewer heads than before: -1-removed heads (-2..-n)
2063 - number of heads stays the same: 1
2065 - number of heads stays the same: 1
2064 """
2066 """
2065 def csmap(x):
2067 def csmap(x):
2066 self.ui.debug("add changeset %s\n" % short(x))
2068 self.ui.debug("add changeset %s\n" % short(x))
2067 return len(cl)
2069 return len(cl)
2068
2070
2069 def revmap(x):
2071 def revmap(x):
2070 return cl.rev(x)
2072 return cl.rev(x)
2071
2073
2072 if not source:
2074 if not source:
2073 return 0
2075 return 0
2074
2076
2075 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2077 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2076
2078
2077 changesets = files = revisions = 0
2079 changesets = files = revisions = 0
2078 efiles = set()
2080 efiles = set()
2079
2081
2080 # write changelog data to temp files so concurrent readers will not see
2082 # write changelog data to temp files so concurrent readers will not see
2081 # inconsistent view
2083 # inconsistent view
2082 cl = self.changelog
2084 cl = self.changelog
2083 cl.delayupdate()
2085 cl.delayupdate()
2084 oldheads = cl.heads()
2086 oldheads = cl.heads()
2085
2087
2086 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2088 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2087 try:
2089 try:
2088 trp = weakref.proxy(tr)
2090 trp = weakref.proxy(tr)
2089 # pull off the changeset group
2091 # pull off the changeset group
2090 self.ui.status(_("adding changesets\n"))
2092 self.ui.status(_("adding changesets\n"))
2091 clstart = len(cl)
2093 clstart = len(cl)
2092 class prog(object):
2094 class prog(object):
2093 step = _('changesets')
2095 step = _('changesets')
2094 count = 1
2096 count = 1
2095 ui = self.ui
2097 ui = self.ui
2096 total = None
2098 total = None
2097 def __call__(self):
2099 def __call__(self):
2098 self.ui.progress(self.step, self.count, unit=_('chunks'),
2100 self.ui.progress(self.step, self.count, unit=_('chunks'),
2099 total=self.total)
2101 total=self.total)
2100 self.count += 1
2102 self.count += 1
2101 pr = prog()
2103 pr = prog()
2102 source.callback = pr
2104 source.callback = pr
2103
2105
2104 source.changelogheader()
2106 source.changelogheader()
2105 srccontent = cl.addgroup(source, csmap, trp)
2107 srccontent = cl.addgroup(source, csmap, trp)
2106 if not (srccontent or emptyok):
2108 if not (srccontent or emptyok):
2107 raise util.Abort(_("received changelog group is empty"))
2109 raise util.Abort(_("received changelog group is empty"))
2108 clend = len(cl)
2110 clend = len(cl)
2109 changesets = clend - clstart
2111 changesets = clend - clstart
2110 for c in xrange(clstart, clend):
2112 for c in xrange(clstart, clend):
2111 efiles.update(self[c].files())
2113 efiles.update(self[c].files())
2112 efiles = len(efiles)
2114 efiles = len(efiles)
2113 self.ui.progress(_('changesets'), None)
2115 self.ui.progress(_('changesets'), None)
2114
2116
2115 # pull off the manifest group
2117 # pull off the manifest group
2116 self.ui.status(_("adding manifests\n"))
2118 self.ui.status(_("adding manifests\n"))
2117 pr.step = _('manifests')
2119 pr.step = _('manifests')
2118 pr.count = 1
2120 pr.count = 1
2119 pr.total = changesets # manifests <= changesets
2121 pr.total = changesets # manifests <= changesets
2120 # no need to check for empty manifest group here:
2122 # no need to check for empty manifest group here:
2121 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2123 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2122 # no new manifest will be created and the manifest group will
2124 # no new manifest will be created and the manifest group will
2123 # be empty during the pull
2125 # be empty during the pull
2124 source.manifestheader()
2126 source.manifestheader()
2125 self.manifest.addgroup(source, revmap, trp)
2127 self.manifest.addgroup(source, revmap, trp)
2126 self.ui.progress(_('manifests'), None)
2128 self.ui.progress(_('manifests'), None)
2127
2129
2128 needfiles = {}
2130 needfiles = {}
2129 if self.ui.configbool('server', 'validate', default=False):
2131 if self.ui.configbool('server', 'validate', default=False):
2130 # validate incoming csets have their manifests
2132 # validate incoming csets have their manifests
2131 for cset in xrange(clstart, clend):
2133 for cset in xrange(clstart, clend):
2132 mfest = self.changelog.read(self.changelog.node(cset))[0]
2134 mfest = self.changelog.read(self.changelog.node(cset))[0]
2133 mfest = self.manifest.readdelta(mfest)
2135 mfest = self.manifest.readdelta(mfest)
2134 # store file nodes we must see
2136 # store file nodes we must see
2135 for f, n in mfest.iteritems():
2137 for f, n in mfest.iteritems():
2136 needfiles.setdefault(f, set()).add(n)
2138 needfiles.setdefault(f, set()).add(n)
2137
2139
2138 # process the files
2140 # process the files
2139 self.ui.status(_("adding file changes\n"))
2141 self.ui.status(_("adding file changes\n"))
2140 pr.step = _('files')
2142 pr.step = _('files')
2141 pr.count = 1
2143 pr.count = 1
2142 pr.total = efiles
2144 pr.total = efiles
2143 source.callback = None
2145 source.callback = None
2144
2146
2145 newrevs, newfiles = self.addchangegroupfiles(source, revmap, trp,
2147 newrevs, newfiles = self.addchangegroupfiles(source, revmap, trp,
2146 pr, needfiles)
2148 pr, needfiles)
2147 revisions += newrevs
2149 revisions += newrevs
2148 files += newfiles
2150 files += newfiles
2149
2151
2150 dh = 0
2152 dh = 0
2151 if oldheads:
2153 if oldheads:
2152 heads = cl.heads()
2154 heads = cl.heads()
2153 dh = len(heads) - len(oldheads)
2155 dh = len(heads) - len(oldheads)
2154 for h in heads:
2156 for h in heads:
2155 if h not in oldheads and self[h].closesbranch():
2157 if h not in oldheads and self[h].closesbranch():
2156 dh -= 1
2158 dh -= 1
2157 htext = ""
2159 htext = ""
2158 if dh:
2160 if dh:
2159 htext = _(" (%+d heads)") % dh
2161 htext = _(" (%+d heads)") % dh
2160
2162
2161 self.ui.status(_("added %d changesets"
2163 self.ui.status(_("added %d changesets"
2162 " with %d changes to %d files%s\n")
2164 " with %d changes to %d files%s\n")
2163 % (changesets, revisions, files, htext))
2165 % (changesets, revisions, files, htext))
2164 self.invalidatevolatilesets()
2166 self.invalidatevolatilesets()
2165
2167
2166 if changesets > 0:
2168 if changesets > 0:
2167 p = lambda: cl.writepending() and self.root or ""
2169 p = lambda: cl.writepending() and self.root or ""
2168 self.hook('pretxnchangegroup', throw=True,
2170 self.hook('pretxnchangegroup', throw=True,
2169 node=hex(cl.node(clstart)), source=srctype,
2171 node=hex(cl.node(clstart)), source=srctype,
2170 url=url, pending=p)
2172 url=url, pending=p)
2171
2173
2172 added = [cl.node(r) for r in xrange(clstart, clend)]
2174 added = [cl.node(r) for r in xrange(clstart, clend)]
2173 publishing = self.ui.configbool('phases', 'publish', True)
2175 publishing = self.ui.configbool('phases', 'publish', True)
2174 if srctype == 'push':
2176 if srctype == 'push':
2175 # Old server can not push the boundary themself.
2177 # Old server can not push the boundary themself.
2176 # New server won't push the boundary if changeset already
2178 # New server won't push the boundary if changeset already
2177 # existed locally as secrete
2179 # existed locally as secrete
2178 #
2180 #
2179 # We should not use added here but the list of all change in
2181 # We should not use added here but the list of all change in
2180 # the bundle
2182 # the bundle
2181 if publishing:
2183 if publishing:
2182 phases.advanceboundary(self, phases.public, srccontent)
2184 phases.advanceboundary(self, phases.public, srccontent)
2183 else:
2185 else:
2184 phases.advanceboundary(self, phases.draft, srccontent)
2186 phases.advanceboundary(self, phases.draft, srccontent)
2185 phases.retractboundary(self, phases.draft, added)
2187 phases.retractboundary(self, phases.draft, added)
2186 elif srctype != 'strip':
2188 elif srctype != 'strip':
2187 # publishing only alter behavior during push
2189 # publishing only alter behavior during push
2188 #
2190 #
2189 # strip should not touch boundary at all
2191 # strip should not touch boundary at all
2190 phases.retractboundary(self, phases.draft, added)
2192 phases.retractboundary(self, phases.draft, added)
2191
2193
2192 # make changelog see real files again
2194 # make changelog see real files again
2193 cl.finalize(trp)
2195 cl.finalize(trp)
2194
2196
2195 tr.close()
2197 tr.close()
2196
2198
2197 if changesets > 0:
2199 if changesets > 0:
2198 if srctype != 'strip':
2200 if srctype != 'strip':
2199 # During strip, branchcache is invalid but coming call to
2201 # During strip, branchcache is invalid but coming call to
2200 # `destroyed` will repair it.
2202 # `destroyed` will repair it.
2201 # In other case we can safely update cache on disk.
2203 # In other case we can safely update cache on disk.
2202 branchmap.updatecache(self.filtered('served'))
2204 branchmap.updatecache(self.filtered('served'))
2203 def runhooks():
2205 def runhooks():
2204 # forcefully update the on-disk branch cache
2206 # forcefully update the on-disk branch cache
2205 self.ui.debug("updating the branch cache\n")
2207 self.ui.debug("updating the branch cache\n")
2206 self.hook("changegroup", node=hex(cl.node(clstart)),
2208 self.hook("changegroup", node=hex(cl.node(clstart)),
2207 source=srctype, url=url)
2209 source=srctype, url=url)
2208
2210
2209 for n in added:
2211 for n in added:
2210 self.hook("incoming", node=hex(n), source=srctype,
2212 self.hook("incoming", node=hex(n), source=srctype,
2211 url=url)
2213 url=url)
2212
2214
2213 newheads = [h for h in self.heads() if h not in oldheads]
2215 newheads = [h for h in self.heads() if h not in oldheads]
2214 self.ui.log("incoming",
2216 self.ui.log("incoming",
2215 "%s incoming changes - new heads: %s\n",
2217 "%s incoming changes - new heads: %s\n",
2216 len(added),
2218 len(added),
2217 ', '.join([hex(c[:6]) for c in newheads]))
2219 ', '.join([hex(c[:6]) for c in newheads]))
2218 self._afterlock(runhooks)
2220 self._afterlock(runhooks)
2219
2221
2220 finally:
2222 finally:
2221 tr.release()
2223 tr.release()
2222 # never return 0 here:
2224 # never return 0 here:
2223 if dh < 0:
2225 if dh < 0:
2224 return dh - 1
2226 return dh - 1
2225 else:
2227 else:
2226 return dh + 1
2228 return dh + 1
2227
2229
2228 def addchangegroupfiles(self, source, revmap, trp, pr, needfiles):
2230 def addchangegroupfiles(self, source, revmap, trp, pr, needfiles):
2229 revisions = 0
2231 revisions = 0
2230 files = 0
2232 files = 0
2231 while True:
2233 while True:
2232 chunkdata = source.filelogheader()
2234 chunkdata = source.filelogheader()
2233 if not chunkdata:
2235 if not chunkdata:
2234 break
2236 break
2235 f = chunkdata["filename"]
2237 f = chunkdata["filename"]
2236 self.ui.debug("adding %s revisions\n" % f)
2238 self.ui.debug("adding %s revisions\n" % f)
2237 pr()
2239 pr()
2238 fl = self.file(f)
2240 fl = self.file(f)
2239 o = len(fl)
2241 o = len(fl)
2240 if not fl.addgroup(source, revmap, trp):
2242 if not fl.addgroup(source, revmap, trp):
2241 raise util.Abort(_("received file revlog group is empty"))
2243 raise util.Abort(_("received file revlog group is empty"))
2242 revisions += len(fl) - o
2244 revisions += len(fl) - o
2243 files += 1
2245 files += 1
2244 if f in needfiles:
2246 if f in needfiles:
2245 needs = needfiles[f]
2247 needs = needfiles[f]
2246 for new in xrange(o, len(fl)):
2248 for new in xrange(o, len(fl)):
2247 n = fl.node(new)
2249 n = fl.node(new)
2248 if n in needs:
2250 if n in needs:
2249 needs.remove(n)
2251 needs.remove(n)
2250 else:
2252 else:
2251 raise util.Abort(
2253 raise util.Abort(
2252 _("received spurious file revlog entry"))
2254 _("received spurious file revlog entry"))
2253 if not needs:
2255 if not needs:
2254 del needfiles[f]
2256 del needfiles[f]
2255 self.ui.progress(_('files'), None)
2257 self.ui.progress(_('files'), None)
2256
2258
2257 for f, needs in needfiles.iteritems():
2259 for f, needs in needfiles.iteritems():
2258 fl = self.file(f)
2260 fl = self.file(f)
2259 for n in needs:
2261 for n in needs:
2260 try:
2262 try:
2261 fl.rev(n)
2263 fl.rev(n)
2262 except error.LookupError:
2264 except error.LookupError:
2263 raise util.Abort(
2265 raise util.Abort(
2264 _('missing file data for %s:%s - run hg verify') %
2266 _('missing file data for %s:%s - run hg verify') %
2265 (f, hex(n)))
2267 (f, hex(n)))
2266
2268
2267 return revisions, files
2269 return revisions, files
2268
2270
2269 def stream_in(self, remote, requirements):
2271 def stream_in(self, remote, requirements):
2270 lock = self.lock()
2272 lock = self.lock()
2271 try:
2273 try:
2272 # Save remote branchmap. We will use it later
2274 # Save remote branchmap. We will use it later
2273 # to speed up branchcache creation
2275 # to speed up branchcache creation
2274 rbranchmap = None
2276 rbranchmap = None
2275 if remote.capable("branchmap"):
2277 if remote.capable("branchmap"):
2276 rbranchmap = remote.branchmap()
2278 rbranchmap = remote.branchmap()
2277
2279
2278 fp = remote.stream_out()
2280 fp = remote.stream_out()
2279 l = fp.readline()
2281 l = fp.readline()
2280 try:
2282 try:
2281 resp = int(l)
2283 resp = int(l)
2282 except ValueError:
2284 except ValueError:
2283 raise error.ResponseError(
2285 raise error.ResponseError(
2284 _('unexpected response from remote server:'), l)
2286 _('unexpected response from remote server:'), l)
2285 if resp == 1:
2287 if resp == 1:
2286 raise util.Abort(_('operation forbidden by server'))
2288 raise util.Abort(_('operation forbidden by server'))
2287 elif resp == 2:
2289 elif resp == 2:
2288 raise util.Abort(_('locking the remote repository failed'))
2290 raise util.Abort(_('locking the remote repository failed'))
2289 elif resp != 0:
2291 elif resp != 0:
2290 raise util.Abort(_('the server sent an unknown error code'))
2292 raise util.Abort(_('the server sent an unknown error code'))
2291 self.ui.status(_('streaming all changes\n'))
2293 self.ui.status(_('streaming all changes\n'))
2292 l = fp.readline()
2294 l = fp.readline()
2293 try:
2295 try:
2294 total_files, total_bytes = map(int, l.split(' ', 1))
2296 total_files, total_bytes = map(int, l.split(' ', 1))
2295 except (ValueError, TypeError):
2297 except (ValueError, TypeError):
2296 raise error.ResponseError(
2298 raise error.ResponseError(
2297 _('unexpected response from remote server:'), l)
2299 _('unexpected response from remote server:'), l)
2298 self.ui.status(_('%d files to transfer, %s of data\n') %
2300 self.ui.status(_('%d files to transfer, %s of data\n') %
2299 (total_files, util.bytecount(total_bytes)))
2301 (total_files, util.bytecount(total_bytes)))
2300 handled_bytes = 0
2302 handled_bytes = 0
2301 self.ui.progress(_('clone'), 0, total=total_bytes)
2303 self.ui.progress(_('clone'), 0, total=total_bytes)
2302 start = time.time()
2304 start = time.time()
2303 for i in xrange(total_files):
2305 for i in xrange(total_files):
2304 # XXX doesn't support '\n' or '\r' in filenames
2306 # XXX doesn't support '\n' or '\r' in filenames
2305 l = fp.readline()
2307 l = fp.readline()
2306 try:
2308 try:
2307 name, size = l.split('\0', 1)
2309 name, size = l.split('\0', 1)
2308 size = int(size)
2310 size = int(size)
2309 except (ValueError, TypeError):
2311 except (ValueError, TypeError):
2310 raise error.ResponseError(
2312 raise error.ResponseError(
2311 _('unexpected response from remote server:'), l)
2313 _('unexpected response from remote server:'), l)
2312 if self.ui.debugflag:
2314 if self.ui.debugflag:
2313 self.ui.debug('adding %s (%s)\n' %
2315 self.ui.debug('adding %s (%s)\n' %
2314 (name, util.bytecount(size)))
2316 (name, util.bytecount(size)))
2315 # for backwards compat, name was partially encoded
2317 # for backwards compat, name was partially encoded
2316 ofp = self.sopener(store.decodedir(name), 'w')
2318 ofp = self.sopener(store.decodedir(name), 'w')
2317 for chunk in util.filechunkiter(fp, limit=size):
2319 for chunk in util.filechunkiter(fp, limit=size):
2318 handled_bytes += len(chunk)
2320 handled_bytes += len(chunk)
2319 self.ui.progress(_('clone'), handled_bytes,
2321 self.ui.progress(_('clone'), handled_bytes,
2320 total=total_bytes)
2322 total=total_bytes)
2321 ofp.write(chunk)
2323 ofp.write(chunk)
2322 ofp.close()
2324 ofp.close()
2323 elapsed = time.time() - start
2325 elapsed = time.time() - start
2324 if elapsed <= 0:
2326 if elapsed <= 0:
2325 elapsed = 0.001
2327 elapsed = 0.001
2326 self.ui.progress(_('clone'), None)
2328 self.ui.progress(_('clone'), None)
2327 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2329 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2328 (util.bytecount(total_bytes), elapsed,
2330 (util.bytecount(total_bytes), elapsed,
2329 util.bytecount(total_bytes / elapsed)))
2331 util.bytecount(total_bytes / elapsed)))
2330
2332
2331 # new requirements = old non-format requirements +
2333 # new requirements = old non-format requirements +
2332 # new format-related
2334 # new format-related
2333 # requirements from the streamed-in repository
2335 # requirements from the streamed-in repository
2334 requirements.update(set(self.requirements) - self.supportedformats)
2336 requirements.update(set(self.requirements) - self.supportedformats)
2335 self._applyrequirements(requirements)
2337 self._applyrequirements(requirements)
2336 self._writerequirements()
2338 self._writerequirements()
2337
2339
2338 if rbranchmap:
2340 if rbranchmap:
2339 rbheads = []
2341 rbheads = []
2340 for bheads in rbranchmap.itervalues():
2342 for bheads in rbranchmap.itervalues():
2341 rbheads.extend(bheads)
2343 rbheads.extend(bheads)
2342
2344
2343 if rbheads:
2345 if rbheads:
2344 rtiprev = max((int(self.changelog.rev(node))
2346 rtiprev = max((int(self.changelog.rev(node))
2345 for node in rbheads))
2347 for node in rbheads))
2346 cache = branchmap.branchcache(rbranchmap,
2348 cache = branchmap.branchcache(rbranchmap,
2347 self[rtiprev].node(),
2349 self[rtiprev].node(),
2348 rtiprev)
2350 rtiprev)
2349 # Try to stick it as low as possible
2351 # Try to stick it as low as possible
2350 # filter above served are unlikely to be fetch from a clone
2352 # filter above served are unlikely to be fetch from a clone
2351 for candidate in ('base', 'immutable', 'served'):
2353 for candidate in ('base', 'immutable', 'served'):
2352 rview = self.filtered(candidate)
2354 rview = self.filtered(candidate)
2353 if cache.validfor(rview):
2355 if cache.validfor(rview):
2354 self._branchcaches[candidate] = cache
2356 self._branchcaches[candidate] = cache
2355 cache.write(rview)
2357 cache.write(rview)
2356 break
2358 break
2357 self.invalidate()
2359 self.invalidate()
2358 return len(self.heads()) + 1
2360 return len(self.heads()) + 1
2359 finally:
2361 finally:
2360 lock.release()
2362 lock.release()
2361
2363
2362 def clone(self, remote, heads=[], stream=False):
2364 def clone(self, remote, heads=[], stream=False):
2363 '''clone remote repository.
2365 '''clone remote repository.
2364
2366
2365 keyword arguments:
2367 keyword arguments:
2366 heads: list of revs to clone (forces use of pull)
2368 heads: list of revs to clone (forces use of pull)
2367 stream: use streaming clone if possible'''
2369 stream: use streaming clone if possible'''
2368
2370
2369 # now, all clients that can request uncompressed clones can
2371 # now, all clients that can request uncompressed clones can
2370 # read repo formats supported by all servers that can serve
2372 # read repo formats supported by all servers that can serve
2371 # them.
2373 # them.
2372
2374
2373 # if revlog format changes, client will have to check version
2375 # if revlog format changes, client will have to check version
2374 # and format flags on "stream" capability, and use
2376 # and format flags on "stream" capability, and use
2375 # uncompressed only if compatible.
2377 # uncompressed only if compatible.
2376
2378
2377 if not stream:
2379 if not stream:
2378 # if the server explicitly prefers to stream (for fast LANs)
2380 # if the server explicitly prefers to stream (for fast LANs)
2379 stream = remote.capable('stream-preferred')
2381 stream = remote.capable('stream-preferred')
2380
2382
2381 if stream and not heads:
2383 if stream and not heads:
2382 # 'stream' means remote revlog format is revlogv1 only
2384 # 'stream' means remote revlog format is revlogv1 only
2383 if remote.capable('stream'):
2385 if remote.capable('stream'):
2384 return self.stream_in(remote, set(('revlogv1',)))
2386 return self.stream_in(remote, set(('revlogv1',)))
2385 # otherwise, 'streamreqs' contains the remote revlog format
2387 # otherwise, 'streamreqs' contains the remote revlog format
2386 streamreqs = remote.capable('streamreqs')
2388 streamreqs = remote.capable('streamreqs')
2387 if streamreqs:
2389 if streamreqs:
2388 streamreqs = set(streamreqs.split(','))
2390 streamreqs = set(streamreqs.split(','))
2389 # if we support it, stream in and adjust our requirements
2391 # if we support it, stream in and adjust our requirements
2390 if not streamreqs - self.supportedformats:
2392 if not streamreqs - self.supportedformats:
2391 return self.stream_in(remote, streamreqs)
2393 return self.stream_in(remote, streamreqs)
2392 return self.pull(remote, heads)
2394 return self.pull(remote, heads)
2393
2395
2394 def pushkey(self, namespace, key, old, new):
2396 def pushkey(self, namespace, key, old, new):
2395 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2397 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2396 old=old, new=new)
2398 old=old, new=new)
2397 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2399 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2398 ret = pushkey.push(self, namespace, key, old, new)
2400 ret = pushkey.push(self, namespace, key, old, new)
2399 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2401 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2400 ret=ret)
2402 ret=ret)
2401 return ret
2403 return ret
2402
2404
2403 def listkeys(self, namespace):
2405 def listkeys(self, namespace):
2404 self.hook('prelistkeys', throw=True, namespace=namespace)
2406 self.hook('prelistkeys', throw=True, namespace=namespace)
2405 self.ui.debug('listing keys for "%s"\n' % namespace)
2407 self.ui.debug('listing keys for "%s"\n' % namespace)
2406 values = pushkey.list(self, namespace)
2408 values = pushkey.list(self, namespace)
2407 self.hook('listkeys', namespace=namespace, values=values)
2409 self.hook('listkeys', namespace=namespace, values=values)
2408 return values
2410 return values
2409
2411
2410 def debugwireargs(self, one, two, three=None, four=None, five=None):
2412 def debugwireargs(self, one, two, three=None, four=None, five=None):
2411 '''used to test argument passing over the wire'''
2413 '''used to test argument passing over the wire'''
2412 return "%s %s %s %s %s" % (one, two, three, four, five)
2414 return "%s %s %s %s %s" % (one, two, three, four, five)
2413
2415
2414 def savecommitmessage(self, text):
2416 def savecommitmessage(self, text):
2415 fp = self.opener('last-message.txt', 'wb')
2417 fp = self.opener('last-message.txt', 'wb')
2416 try:
2418 try:
2417 fp.write(text)
2419 fp.write(text)
2418 finally:
2420 finally:
2419 fp.close()
2421 fp.close()
2420 return self.pathto(fp.name[len(self.root) + 1:])
2422 return self.pathto(fp.name[len(self.root) + 1:])
2421
2423
2422 # used to avoid circular references so destructors work
2424 # used to avoid circular references so destructors work
2423 def aftertrans(files):
2425 def aftertrans(files):
2424 renamefiles = [tuple(t) for t in files]
2426 renamefiles = [tuple(t) for t in files]
2425 def a():
2427 def a():
2426 for vfs, src, dest in renamefiles:
2428 for vfs, src, dest in renamefiles:
2427 try:
2429 try:
2428 vfs.rename(src, dest)
2430 vfs.rename(src, dest)
2429 except OSError: # journal file does not yet exist
2431 except OSError: # journal file does not yet exist
2430 pass
2432 pass
2431 return a
2433 return a
2432
2434
2433 def undoname(fn):
2435 def undoname(fn):
2434 base, name = os.path.split(fn)
2436 base, name = os.path.split(fn)
2435 assert name.startswith('journal')
2437 assert name.startswith('journal')
2436 return os.path.join(base, name.replace('journal', 'undo', 1))
2438 return os.path.join(base, name.replace('journal', 'undo', 1))
2437
2439
2438 def instance(ui, path, create):
2440 def instance(ui, path, create):
2439 return localrepository(ui, util.urllocalpath(path), create)
2441 return localrepository(ui, util.urllocalpath(path), create)
2440
2442
2441 def islocal(path):
2443 def islocal(path):
2442 return True
2444 return True
General Comments 0
You need to be logged in to leave comments. Login now