##// END OF EJS Templates
en-us: initialization
timeless@mozdev.org -
r17532:e4b2f0eb default
parent child Browse files
Show More
@@ -1,2605 +1,2605 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import bin, hex, nullid, nullrev, short
7 from node import bin, hex, nullid, nullrev, short
8 from i18n import _
8 from i18n import _
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import lock, transaction, store, encoding, base85
11 import lock, transaction, store, encoding, base85
12 import scmutil, util, extensions, hook, error, revset
12 import scmutil, util, extensions, hook, error, revset
13 import match as matchmod
13 import match as matchmod
14 import merge as mergemod
14 import merge as mergemod
15 import tags as tagsmod
15 import tags as tagsmod
16 from lock import release
16 from lock import release
17 import weakref, errno, os, time, inspect
17 import weakref, errno, os, time, inspect
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19 filecache = scmutil.filecache
19 filecache = scmutil.filecache
20
20
21 class storecache(filecache):
21 class storecache(filecache):
22 """filecache for files in the store"""
22 """filecache for files in the store"""
23 def join(self, obj, fname):
23 def join(self, obj, fname):
24 return obj.sjoin(fname)
24 return obj.sjoin(fname)
25
25
26 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
26 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
27 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
27 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
28
28
29 class localpeer(peer.peerrepository):
29 class localpeer(peer.peerrepository):
30 '''peer for a local repo; reflects only the most recent API'''
30 '''peer for a local repo; reflects only the most recent API'''
31
31
32 def __init__(self, repo, caps=MODERNCAPS):
32 def __init__(self, repo, caps=MODERNCAPS):
33 peer.peerrepository.__init__(self)
33 peer.peerrepository.__init__(self)
34 self._repo = repo
34 self._repo = repo
35 self.ui = repo.ui
35 self.ui = repo.ui
36 self._caps = repo._restrictcapabilities(caps)
36 self._caps = repo._restrictcapabilities(caps)
37 self.requirements = repo.requirements
37 self.requirements = repo.requirements
38 self.supportedformats = repo.supportedformats
38 self.supportedformats = repo.supportedformats
39
39
40 def close(self):
40 def close(self):
41 self._repo.close()
41 self._repo.close()
42
42
43 def _capabilities(self):
43 def _capabilities(self):
44 return self._caps
44 return self._caps
45
45
46 def local(self):
46 def local(self):
47 return self._repo
47 return self._repo
48
48
49 def canpush(self):
49 def canpush(self):
50 return True
50 return True
51
51
52 def url(self):
52 def url(self):
53 return self._repo.url()
53 return self._repo.url()
54
54
55 def lookup(self, key):
55 def lookup(self, key):
56 return self._repo.lookup(key)
56 return self._repo.lookup(key)
57
57
58 def branchmap(self):
58 def branchmap(self):
59 return discovery.visiblebranchmap(self._repo)
59 return discovery.visiblebranchmap(self._repo)
60
60
61 def heads(self):
61 def heads(self):
62 return discovery.visibleheads(self._repo)
62 return discovery.visibleheads(self._repo)
63
63
64 def known(self, nodes):
64 def known(self, nodes):
65 return self._repo.known(nodes)
65 return self._repo.known(nodes)
66
66
67 def getbundle(self, source, heads=None, common=None):
67 def getbundle(self, source, heads=None, common=None):
68 return self._repo.getbundle(source, heads=heads, common=common)
68 return self._repo.getbundle(source, heads=heads, common=common)
69
69
70 # TODO We might want to move the next two calls into legacypeer and add
70 # TODO We might want to move the next two calls into legacypeer and add
71 # unbundle instead.
71 # unbundle instead.
72
72
73 def lock(self):
73 def lock(self):
74 return self._repo.lock()
74 return self._repo.lock()
75
75
76 def addchangegroup(self, cg, source, url):
76 def addchangegroup(self, cg, source, url):
77 return self._repo.addchangegroup(cg, source, url)
77 return self._repo.addchangegroup(cg, source, url)
78
78
79 def pushkey(self, namespace, key, old, new):
79 def pushkey(self, namespace, key, old, new):
80 return self._repo.pushkey(namespace, key, old, new)
80 return self._repo.pushkey(namespace, key, old, new)
81
81
82 def listkeys(self, namespace):
82 def listkeys(self, namespace):
83 return self._repo.listkeys(namespace)
83 return self._repo.listkeys(namespace)
84
84
85 def debugwireargs(self, one, two, three=None, four=None, five=None):
85 def debugwireargs(self, one, two, three=None, four=None, five=None):
86 '''used to test argument passing over the wire'''
86 '''used to test argument passing over the wire'''
87 return "%s %s %s %s %s" % (one, two, three, four, five)
87 return "%s %s %s %s %s" % (one, two, three, four, five)
88
88
89 class locallegacypeer(localpeer):
89 class locallegacypeer(localpeer):
90 '''peer extension which implements legacy methods too; used for tests with
90 '''peer extension which implements legacy methods too; used for tests with
91 restricted capabilities'''
91 restricted capabilities'''
92
92
93 def __init__(self, repo):
93 def __init__(self, repo):
94 localpeer.__init__(self, repo, caps=LEGACYCAPS)
94 localpeer.__init__(self, repo, caps=LEGACYCAPS)
95
95
96 def branches(self, nodes):
96 def branches(self, nodes):
97 return self._repo.branches(nodes)
97 return self._repo.branches(nodes)
98
98
99 def between(self, pairs):
99 def between(self, pairs):
100 return self._repo.between(pairs)
100 return self._repo.between(pairs)
101
101
102 def changegroup(self, basenodes, source):
102 def changegroup(self, basenodes, source):
103 return self._repo.changegroup(basenodes, source)
103 return self._repo.changegroup(basenodes, source)
104
104
105 def changegroupsubset(self, bases, heads, source):
105 def changegroupsubset(self, bases, heads, source):
106 return self._repo.changegroupsubset(bases, heads, source)
106 return self._repo.changegroupsubset(bases, heads, source)
107
107
108 class localrepository(object):
108 class localrepository(object):
109
109
110 supportedformats = set(('revlogv1', 'generaldelta'))
110 supportedformats = set(('revlogv1', 'generaldelta'))
111 supported = supportedformats | set(('store', 'fncache', 'shared',
111 supported = supportedformats | set(('store', 'fncache', 'shared',
112 'dotencode'))
112 'dotencode'))
113 openerreqs = set(('revlogv1', 'generaldelta'))
113 openerreqs = set(('revlogv1', 'generaldelta'))
114 requirements = ['revlogv1']
114 requirements = ['revlogv1']
115
115
116 def _baserequirements(self, create):
116 def _baserequirements(self, create):
117 return self.requirements[:]
117 return self.requirements[:]
118
118
119 def __init__(self, baseui, path=None, create=False):
119 def __init__(self, baseui, path=None, create=False):
120 self.wopener = scmutil.opener(path, expand=True)
120 self.wopener = scmutil.opener(path, expand=True)
121 self.wvfs = self.wopener
121 self.wvfs = self.wopener
122 self.root = self.wvfs.base
122 self.root = self.wvfs.base
123 self.path = self.wvfs.join(".hg")
123 self.path = self.wvfs.join(".hg")
124 self.origroot = path
124 self.origroot = path
125 self.auditor = scmutil.pathauditor(self.root, self._checknested)
125 self.auditor = scmutil.pathauditor(self.root, self._checknested)
126 self.opener = scmutil.opener(self.path)
126 self.opener = scmutil.opener(self.path)
127 self.vfs = self.opener
127 self.vfs = self.opener
128 self.baseui = baseui
128 self.baseui = baseui
129 self.ui = baseui.copy()
129 self.ui = baseui.copy()
130 # A list of callback to shape the phase if no data were found.
130 # A list of callback to shape the phase if no data were found.
131 # Callback are in the form: func(repo, roots) --> processed root.
131 # Callback are in the form: func(repo, roots) --> processed root.
132 # This list it to be filled by extension during repo setup
132 # This list it to be filled by extension during repo setup
133 self._phasedefaults = []
133 self._phasedefaults = []
134 try:
134 try:
135 self.ui.readconfig(self.join("hgrc"), self.root)
135 self.ui.readconfig(self.join("hgrc"), self.root)
136 extensions.loadall(self.ui)
136 extensions.loadall(self.ui)
137 except IOError:
137 except IOError:
138 pass
138 pass
139
139
140 if not self.vfs.isdir():
140 if not self.vfs.isdir():
141 if create:
141 if create:
142 if not self.wvfs.exists():
142 if not self.wvfs.exists():
143 self.wvfs.makedirs()
143 self.wvfs.makedirs()
144 self.vfs.makedir(notindexed=True)
144 self.vfs.makedir(notindexed=True)
145 requirements = self._baserequirements(create)
145 requirements = self._baserequirements(create)
146 if self.ui.configbool('format', 'usestore', True):
146 if self.ui.configbool('format', 'usestore', True):
147 self.vfs.mkdir("store")
147 self.vfs.mkdir("store")
148 requirements.append("store")
148 requirements.append("store")
149 if self.ui.configbool('format', 'usefncache', True):
149 if self.ui.configbool('format', 'usefncache', True):
150 requirements.append("fncache")
150 requirements.append("fncache")
151 if self.ui.configbool('format', 'dotencode', True):
151 if self.ui.configbool('format', 'dotencode', True):
152 requirements.append('dotencode')
152 requirements.append('dotencode')
153 # create an invalid changelog
153 # create an invalid changelog
154 self.vfs.append(
154 self.vfs.append(
155 "00changelog.i",
155 "00changelog.i",
156 '\0\0\0\2' # represents revlogv2
156 '\0\0\0\2' # represents revlogv2
157 ' dummy changelog to prevent using the old repo layout'
157 ' dummy changelog to prevent using the old repo layout'
158 )
158 )
159 if self.ui.configbool('format', 'generaldelta', False):
159 if self.ui.configbool('format', 'generaldelta', False):
160 requirements.append("generaldelta")
160 requirements.append("generaldelta")
161 requirements = set(requirements)
161 requirements = set(requirements)
162 else:
162 else:
163 raise error.RepoError(_("repository %s not found") % path)
163 raise error.RepoError(_("repository %s not found") % path)
164 elif create:
164 elif create:
165 raise error.RepoError(_("repository %s already exists") % path)
165 raise error.RepoError(_("repository %s already exists") % path)
166 else:
166 else:
167 try:
167 try:
168 requirements = scmutil.readrequires(self.vfs, self.supported)
168 requirements = scmutil.readrequires(self.vfs, self.supported)
169 except IOError, inst:
169 except IOError, inst:
170 if inst.errno != errno.ENOENT:
170 if inst.errno != errno.ENOENT:
171 raise
171 raise
172 requirements = set()
172 requirements = set()
173
173
174 self.sharedpath = self.path
174 self.sharedpath = self.path
175 try:
175 try:
176 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
176 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
177 if not os.path.exists(s):
177 if not os.path.exists(s):
178 raise error.RepoError(
178 raise error.RepoError(
179 _('.hg/sharedpath points to nonexistent directory %s') % s)
179 _('.hg/sharedpath points to nonexistent directory %s') % s)
180 self.sharedpath = s
180 self.sharedpath = s
181 except IOError, inst:
181 except IOError, inst:
182 if inst.errno != errno.ENOENT:
182 if inst.errno != errno.ENOENT:
183 raise
183 raise
184
184
185 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
185 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
186 self.spath = self.store.path
186 self.spath = self.store.path
187 self.sopener = self.store.opener
187 self.sopener = self.store.opener
188 self.svfs = self.sopener
188 self.svfs = self.sopener
189 self.sjoin = self.store.join
189 self.sjoin = self.store.join
190 self.opener.createmode = self.store.createmode
190 self.opener.createmode = self.store.createmode
191 self._applyrequirements(requirements)
191 self._applyrequirements(requirements)
192 if create:
192 if create:
193 self._writerequirements()
193 self._writerequirements()
194
194
195
195
196 self._branchcache = None
196 self._branchcache = None
197 self._branchcachetip = None
197 self._branchcachetip = None
198 self.filterpats = {}
198 self.filterpats = {}
199 self._datafilters = {}
199 self._datafilters = {}
200 self._transref = self._lockref = self._wlockref = None
200 self._transref = self._lockref = self._wlockref = None
201
201
202 # A cache for various files under .hg/ that tracks file changes,
202 # A cache for various files under .hg/ that tracks file changes,
203 # (used by the filecache decorator)
203 # (used by the filecache decorator)
204 #
204 #
205 # Maps a property name to its util.filecacheentry
205 # Maps a property name to its util.filecacheentry
206 self._filecache = {}
206 self._filecache = {}
207
207
208 def close(self):
208 def close(self):
209 pass
209 pass
210
210
211 def _restrictcapabilities(self, caps):
211 def _restrictcapabilities(self, caps):
212 return caps
212 return caps
213
213
214 def _applyrequirements(self, requirements):
214 def _applyrequirements(self, requirements):
215 self.requirements = requirements
215 self.requirements = requirements
216 self.sopener.options = dict((r, 1) for r in requirements
216 self.sopener.options = dict((r, 1) for r in requirements
217 if r in self.openerreqs)
217 if r in self.openerreqs)
218
218
219 def _writerequirements(self):
219 def _writerequirements(self):
220 reqfile = self.opener("requires", "w")
220 reqfile = self.opener("requires", "w")
221 for r in self.requirements:
221 for r in self.requirements:
222 reqfile.write("%s\n" % r)
222 reqfile.write("%s\n" % r)
223 reqfile.close()
223 reqfile.close()
224
224
225 def _checknested(self, path):
225 def _checknested(self, path):
226 """Determine if path is a legal nested repository."""
226 """Determine if path is a legal nested repository."""
227 if not path.startswith(self.root):
227 if not path.startswith(self.root):
228 return False
228 return False
229 subpath = path[len(self.root) + 1:]
229 subpath = path[len(self.root) + 1:]
230 normsubpath = util.pconvert(subpath)
230 normsubpath = util.pconvert(subpath)
231
231
232 # XXX: Checking against the current working copy is wrong in
232 # XXX: Checking against the current working copy is wrong in
233 # the sense that it can reject things like
233 # the sense that it can reject things like
234 #
234 #
235 # $ hg cat -r 10 sub/x.txt
235 # $ hg cat -r 10 sub/x.txt
236 #
236 #
237 # if sub/ is no longer a subrepository in the working copy
237 # if sub/ is no longer a subrepository in the working copy
238 # parent revision.
238 # parent revision.
239 #
239 #
240 # However, it can of course also allow things that would have
240 # However, it can of course also allow things that would have
241 # been rejected before, such as the above cat command if sub/
241 # been rejected before, such as the above cat command if sub/
242 # is a subrepository now, but was a normal directory before.
242 # is a subrepository now, but was a normal directory before.
243 # The old path auditor would have rejected by mistake since it
243 # The old path auditor would have rejected by mistake since it
244 # panics when it sees sub/.hg/.
244 # panics when it sees sub/.hg/.
245 #
245 #
246 # All in all, checking against the working copy seems sensible
246 # All in all, checking against the working copy seems sensible
247 # since we want to prevent access to nested repositories on
247 # since we want to prevent access to nested repositories on
248 # the filesystem *now*.
248 # the filesystem *now*.
249 ctx = self[None]
249 ctx = self[None]
250 parts = util.splitpath(subpath)
250 parts = util.splitpath(subpath)
251 while parts:
251 while parts:
252 prefix = '/'.join(parts)
252 prefix = '/'.join(parts)
253 if prefix in ctx.substate:
253 if prefix in ctx.substate:
254 if prefix == normsubpath:
254 if prefix == normsubpath:
255 return True
255 return True
256 else:
256 else:
257 sub = ctx.sub(prefix)
257 sub = ctx.sub(prefix)
258 return sub.checknested(subpath[len(prefix) + 1:])
258 return sub.checknested(subpath[len(prefix) + 1:])
259 else:
259 else:
260 parts.pop()
260 parts.pop()
261 return False
261 return False
262
262
263 def peer(self):
263 def peer(self):
264 return localpeer(self) # not cached to avoid reference cycle
264 return localpeer(self) # not cached to avoid reference cycle
265
265
266 @filecache('bookmarks')
266 @filecache('bookmarks')
267 def _bookmarks(self):
267 def _bookmarks(self):
268 return bookmarks.read(self)
268 return bookmarks.read(self)
269
269
270 @filecache('bookmarks.current')
270 @filecache('bookmarks.current')
271 def _bookmarkcurrent(self):
271 def _bookmarkcurrent(self):
272 return bookmarks.readcurrent(self)
272 return bookmarks.readcurrent(self)
273
273
274 def _writebookmarks(self, marks):
274 def _writebookmarks(self, marks):
275 bookmarks.write(self)
275 bookmarks.write(self)
276
276
277 def bookmarkheads(self, bookmark):
277 def bookmarkheads(self, bookmark):
278 name = bookmark.split('@', 1)[0]
278 name = bookmark.split('@', 1)[0]
279 heads = []
279 heads = []
280 for mark, n in self._bookmarks.iteritems():
280 for mark, n in self._bookmarks.iteritems():
281 if mark.split('@', 1)[0] == name:
281 if mark.split('@', 1)[0] == name:
282 heads.append(n)
282 heads.append(n)
283 return heads
283 return heads
284
284
285 @storecache('phaseroots')
285 @storecache('phaseroots')
286 def _phasecache(self):
286 def _phasecache(self):
287 return phases.phasecache(self, self._phasedefaults)
287 return phases.phasecache(self, self._phasedefaults)
288
288
289 @storecache('obsstore')
289 @storecache('obsstore')
290 def obsstore(self):
290 def obsstore(self):
291 store = obsolete.obsstore(self.sopener)
291 store = obsolete.obsstore(self.sopener)
292 if store and not obsolete._enabled:
292 if store and not obsolete._enabled:
293 # message is rare enough to not be translated
293 # message is rare enough to not be translated
294 msg = 'obsolete feature not enabled but %i markers found!\n'
294 msg = 'obsolete feature not enabled but %i markers found!\n'
295 self.ui.warn(msg % len(list(store)))
295 self.ui.warn(msg % len(list(store)))
296 return store
296 return store
297
297
298 @propertycache
298 @propertycache
299 def hiddenrevs(self):
299 def hiddenrevs(self):
300 """hiddenrevs: revs that should be hidden by command and tools
300 """hiddenrevs: revs that should be hidden by command and tools
301
301
302 This set is carried on the repo to ease initialisation and lazy
302 This set is carried on the repo to ease initialization and lazy
303 loading it'll probably move back to changelog for efficiently and
303 loading it'll probably move back to changelog for efficiently and
304 consistency reason
304 consistency reason
305
305
306 Note that the hiddenrevs will needs invalidations when
306 Note that the hiddenrevs will needs invalidations when
307 - a new changesets is added (possible unstable above extinct)
307 - a new changesets is added (possible unstable above extinct)
308 - a new obsolete marker is added (possible new extinct changeset)
308 - a new obsolete marker is added (possible new extinct changeset)
309 """
309 """
310 hidden = set()
310 hidden = set()
311 if self.obsstore:
311 if self.obsstore:
312 ### hide extinct changeset that are not accessible by any mean
312 ### hide extinct changeset that are not accessible by any mean
313 hiddenquery = 'extinct() - ::(. + bookmark() + tagged())'
313 hiddenquery = 'extinct() - ::(. + bookmark() + tagged())'
314 hidden.update(self.revs(hiddenquery))
314 hidden.update(self.revs(hiddenquery))
315 return hidden
315 return hidden
316
316
317 @storecache('00changelog.i')
317 @storecache('00changelog.i')
318 def changelog(self):
318 def changelog(self):
319 c = changelog.changelog(self.sopener)
319 c = changelog.changelog(self.sopener)
320 if 'HG_PENDING' in os.environ:
320 if 'HG_PENDING' in os.environ:
321 p = os.environ['HG_PENDING']
321 p = os.environ['HG_PENDING']
322 if p.startswith(self.root):
322 if p.startswith(self.root):
323 c.readpending('00changelog.i.a')
323 c.readpending('00changelog.i.a')
324 return c
324 return c
325
325
326 @storecache('00manifest.i')
326 @storecache('00manifest.i')
327 def manifest(self):
327 def manifest(self):
328 return manifest.manifest(self.sopener)
328 return manifest.manifest(self.sopener)
329
329
330 @filecache('dirstate')
330 @filecache('dirstate')
331 def dirstate(self):
331 def dirstate(self):
332 warned = [0]
332 warned = [0]
333 def validate(node):
333 def validate(node):
334 try:
334 try:
335 self.changelog.rev(node)
335 self.changelog.rev(node)
336 return node
336 return node
337 except error.LookupError:
337 except error.LookupError:
338 if not warned[0]:
338 if not warned[0]:
339 warned[0] = True
339 warned[0] = True
340 self.ui.warn(_("warning: ignoring unknown"
340 self.ui.warn(_("warning: ignoring unknown"
341 " working parent %s!\n") % short(node))
341 " working parent %s!\n") % short(node))
342 return nullid
342 return nullid
343
343
344 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
344 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
345
345
346 def __getitem__(self, changeid):
346 def __getitem__(self, changeid):
347 if changeid is None:
347 if changeid is None:
348 return context.workingctx(self)
348 return context.workingctx(self)
349 return context.changectx(self, changeid)
349 return context.changectx(self, changeid)
350
350
351 def __contains__(self, changeid):
351 def __contains__(self, changeid):
352 try:
352 try:
353 return bool(self.lookup(changeid))
353 return bool(self.lookup(changeid))
354 except error.RepoLookupError:
354 except error.RepoLookupError:
355 return False
355 return False
356
356
357 def __nonzero__(self):
357 def __nonzero__(self):
358 return True
358 return True
359
359
360 def __len__(self):
360 def __len__(self):
361 return len(self.changelog)
361 return len(self.changelog)
362
362
363 def __iter__(self):
363 def __iter__(self):
364 for i in xrange(len(self)):
364 for i in xrange(len(self)):
365 yield i
365 yield i
366
366
367 def revs(self, expr, *args):
367 def revs(self, expr, *args):
368 '''Return a list of revisions matching the given revset'''
368 '''Return a list of revisions matching the given revset'''
369 expr = revset.formatspec(expr, *args)
369 expr = revset.formatspec(expr, *args)
370 m = revset.match(None, expr)
370 m = revset.match(None, expr)
371 return [r for r in m(self, range(len(self)))]
371 return [r for r in m(self, range(len(self)))]
372
372
373 def set(self, expr, *args):
373 def set(self, expr, *args):
374 '''
374 '''
375 Yield a context for each matching revision, after doing arg
375 Yield a context for each matching revision, after doing arg
376 replacement via revset.formatspec
376 replacement via revset.formatspec
377 '''
377 '''
378 for r in self.revs(expr, *args):
378 for r in self.revs(expr, *args):
379 yield self[r]
379 yield self[r]
380
380
381 def url(self):
381 def url(self):
382 return 'file:' + self.root
382 return 'file:' + self.root
383
383
384 def hook(self, name, throw=False, **args):
384 def hook(self, name, throw=False, **args):
385 return hook.hook(self.ui, self, name, throw, **args)
385 return hook.hook(self.ui, self, name, throw, **args)
386
386
387 tag_disallowed = ':\r\n'
387 tag_disallowed = ':\r\n'
388
388
389 def _tag(self, names, node, message, local, user, date, extra={}):
389 def _tag(self, names, node, message, local, user, date, extra={}):
390 if isinstance(names, str):
390 if isinstance(names, str):
391 allchars = names
391 allchars = names
392 names = (names,)
392 names = (names,)
393 else:
393 else:
394 allchars = ''.join(names)
394 allchars = ''.join(names)
395 for c in self.tag_disallowed:
395 for c in self.tag_disallowed:
396 if c in allchars:
396 if c in allchars:
397 raise util.Abort(_('%r cannot be used in a tag name') % c)
397 raise util.Abort(_('%r cannot be used in a tag name') % c)
398
398
399 branches = self.branchmap()
399 branches = self.branchmap()
400 for name in names:
400 for name in names:
401 self.hook('pretag', throw=True, node=hex(node), tag=name,
401 self.hook('pretag', throw=True, node=hex(node), tag=name,
402 local=local)
402 local=local)
403 if name in branches:
403 if name in branches:
404 self.ui.warn(_("warning: tag %s conflicts with existing"
404 self.ui.warn(_("warning: tag %s conflicts with existing"
405 " branch name\n") % name)
405 " branch name\n") % name)
406
406
407 def writetags(fp, names, munge, prevtags):
407 def writetags(fp, names, munge, prevtags):
408 fp.seek(0, 2)
408 fp.seek(0, 2)
409 if prevtags and prevtags[-1] != '\n':
409 if prevtags and prevtags[-1] != '\n':
410 fp.write('\n')
410 fp.write('\n')
411 for name in names:
411 for name in names:
412 m = munge and munge(name) or name
412 m = munge and munge(name) or name
413 if (self._tagscache.tagtypes and
413 if (self._tagscache.tagtypes and
414 name in self._tagscache.tagtypes):
414 name in self._tagscache.tagtypes):
415 old = self.tags().get(name, nullid)
415 old = self.tags().get(name, nullid)
416 fp.write('%s %s\n' % (hex(old), m))
416 fp.write('%s %s\n' % (hex(old), m))
417 fp.write('%s %s\n' % (hex(node), m))
417 fp.write('%s %s\n' % (hex(node), m))
418 fp.close()
418 fp.close()
419
419
420 prevtags = ''
420 prevtags = ''
421 if local:
421 if local:
422 try:
422 try:
423 fp = self.opener('localtags', 'r+')
423 fp = self.opener('localtags', 'r+')
424 except IOError:
424 except IOError:
425 fp = self.opener('localtags', 'a')
425 fp = self.opener('localtags', 'a')
426 else:
426 else:
427 prevtags = fp.read()
427 prevtags = fp.read()
428
428
429 # local tags are stored in the current charset
429 # local tags are stored in the current charset
430 writetags(fp, names, None, prevtags)
430 writetags(fp, names, None, prevtags)
431 for name in names:
431 for name in names:
432 self.hook('tag', node=hex(node), tag=name, local=local)
432 self.hook('tag', node=hex(node), tag=name, local=local)
433 return
433 return
434
434
435 try:
435 try:
436 fp = self.wfile('.hgtags', 'rb+')
436 fp = self.wfile('.hgtags', 'rb+')
437 except IOError, e:
437 except IOError, e:
438 if e.errno != errno.ENOENT:
438 if e.errno != errno.ENOENT:
439 raise
439 raise
440 fp = self.wfile('.hgtags', 'ab')
440 fp = self.wfile('.hgtags', 'ab')
441 else:
441 else:
442 prevtags = fp.read()
442 prevtags = fp.read()
443
443
444 # committed tags are stored in UTF-8
444 # committed tags are stored in UTF-8
445 writetags(fp, names, encoding.fromlocal, prevtags)
445 writetags(fp, names, encoding.fromlocal, prevtags)
446
446
447 fp.close()
447 fp.close()
448
448
449 self.invalidatecaches()
449 self.invalidatecaches()
450
450
451 if '.hgtags' not in self.dirstate:
451 if '.hgtags' not in self.dirstate:
452 self[None].add(['.hgtags'])
452 self[None].add(['.hgtags'])
453
453
454 m = matchmod.exact(self.root, '', ['.hgtags'])
454 m = matchmod.exact(self.root, '', ['.hgtags'])
455 tagnode = self.commit(message, user, date, extra=extra, match=m)
455 tagnode = self.commit(message, user, date, extra=extra, match=m)
456
456
457 for name in names:
457 for name in names:
458 self.hook('tag', node=hex(node), tag=name, local=local)
458 self.hook('tag', node=hex(node), tag=name, local=local)
459
459
460 return tagnode
460 return tagnode
461
461
462 def tag(self, names, node, message, local, user, date):
462 def tag(self, names, node, message, local, user, date):
463 '''tag a revision with one or more symbolic names.
463 '''tag a revision with one or more symbolic names.
464
464
465 names is a list of strings or, when adding a single tag, names may be a
465 names is a list of strings or, when adding a single tag, names may be a
466 string.
466 string.
467
467
468 if local is True, the tags are stored in a per-repository file.
468 if local is True, the tags are stored in a per-repository file.
469 otherwise, they are stored in the .hgtags file, and a new
469 otherwise, they are stored in the .hgtags file, and a new
470 changeset is committed with the change.
470 changeset is committed with the change.
471
471
472 keyword arguments:
472 keyword arguments:
473
473
474 local: whether to store tags in non-version-controlled file
474 local: whether to store tags in non-version-controlled file
475 (default False)
475 (default False)
476
476
477 message: commit message to use if committing
477 message: commit message to use if committing
478
478
479 user: name of user to use if committing
479 user: name of user to use if committing
480
480
481 date: date tuple to use if committing'''
481 date: date tuple to use if committing'''
482
482
483 if not local:
483 if not local:
484 for x in self.status()[:5]:
484 for x in self.status()[:5]:
485 if '.hgtags' in x:
485 if '.hgtags' in x:
486 raise util.Abort(_('working copy of .hgtags is changed '
486 raise util.Abort(_('working copy of .hgtags is changed '
487 '(please commit .hgtags manually)'))
487 '(please commit .hgtags manually)'))
488
488
489 self.tags() # instantiate the cache
489 self.tags() # instantiate the cache
490 self._tag(names, node, message, local, user, date)
490 self._tag(names, node, message, local, user, date)
491
491
492 @propertycache
492 @propertycache
493 def _tagscache(self):
493 def _tagscache(self):
494 '''Returns a tagscache object that contains various tags related
494 '''Returns a tagscache object that contains various tags related
495 caches.'''
495 caches.'''
496
496
497 # This simplifies its cache management by having one decorated
497 # This simplifies its cache management by having one decorated
498 # function (this one) and the rest simply fetch things from it.
498 # function (this one) and the rest simply fetch things from it.
499 class tagscache(object):
499 class tagscache(object):
500 def __init__(self):
500 def __init__(self):
501 # These two define the set of tags for this repository. tags
501 # These two define the set of tags for this repository. tags
502 # maps tag name to node; tagtypes maps tag name to 'global' or
502 # maps tag name to node; tagtypes maps tag name to 'global' or
503 # 'local'. (Global tags are defined by .hgtags across all
503 # 'local'. (Global tags are defined by .hgtags across all
504 # heads, and local tags are defined in .hg/localtags.)
504 # heads, and local tags are defined in .hg/localtags.)
505 # They constitute the in-memory cache of tags.
505 # They constitute the in-memory cache of tags.
506 self.tags = self.tagtypes = None
506 self.tags = self.tagtypes = None
507
507
508 self.nodetagscache = self.tagslist = None
508 self.nodetagscache = self.tagslist = None
509
509
510 cache = tagscache()
510 cache = tagscache()
511 cache.tags, cache.tagtypes = self._findtags()
511 cache.tags, cache.tagtypes = self._findtags()
512
512
513 return cache
513 return cache
514
514
515 def tags(self):
515 def tags(self):
516 '''return a mapping of tag to node'''
516 '''return a mapping of tag to node'''
517 t = {}
517 t = {}
518 for k, v in self._tagscache.tags.iteritems():
518 for k, v in self._tagscache.tags.iteritems():
519 try:
519 try:
520 # ignore tags to unknown nodes
520 # ignore tags to unknown nodes
521 self.changelog.rev(v)
521 self.changelog.rev(v)
522 t[k] = v
522 t[k] = v
523 except (error.LookupError, ValueError):
523 except (error.LookupError, ValueError):
524 pass
524 pass
525 return t
525 return t
526
526
527 def _findtags(self):
527 def _findtags(self):
528 '''Do the hard work of finding tags. Return a pair of dicts
528 '''Do the hard work of finding tags. Return a pair of dicts
529 (tags, tagtypes) where tags maps tag name to node, and tagtypes
529 (tags, tagtypes) where tags maps tag name to node, and tagtypes
530 maps tag name to a string like \'global\' or \'local\'.
530 maps tag name to a string like \'global\' or \'local\'.
531 Subclasses or extensions are free to add their own tags, but
531 Subclasses or extensions are free to add their own tags, but
532 should be aware that the returned dicts will be retained for the
532 should be aware that the returned dicts will be retained for the
533 duration of the localrepo object.'''
533 duration of the localrepo object.'''
534
534
535 # XXX what tagtype should subclasses/extensions use? Currently
535 # XXX what tagtype should subclasses/extensions use? Currently
536 # mq and bookmarks add tags, but do not set the tagtype at all.
536 # mq and bookmarks add tags, but do not set the tagtype at all.
537 # Should each extension invent its own tag type? Should there
537 # Should each extension invent its own tag type? Should there
538 # be one tagtype for all such "virtual" tags? Or is the status
538 # be one tagtype for all such "virtual" tags? Or is the status
539 # quo fine?
539 # quo fine?
540
540
541 alltags = {} # map tag name to (node, hist)
541 alltags = {} # map tag name to (node, hist)
542 tagtypes = {}
542 tagtypes = {}
543
543
544 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
544 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
545 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
545 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
546
546
547 # Build the return dicts. Have to re-encode tag names because
547 # Build the return dicts. Have to re-encode tag names because
548 # the tags module always uses UTF-8 (in order not to lose info
548 # the tags module always uses UTF-8 (in order not to lose info
549 # writing to the cache), but the rest of Mercurial wants them in
549 # writing to the cache), but the rest of Mercurial wants them in
550 # local encoding.
550 # local encoding.
551 tags = {}
551 tags = {}
552 for (name, (node, hist)) in alltags.iteritems():
552 for (name, (node, hist)) in alltags.iteritems():
553 if node != nullid:
553 if node != nullid:
554 tags[encoding.tolocal(name)] = node
554 tags[encoding.tolocal(name)] = node
555 tags['tip'] = self.changelog.tip()
555 tags['tip'] = self.changelog.tip()
556 tagtypes = dict([(encoding.tolocal(name), value)
556 tagtypes = dict([(encoding.tolocal(name), value)
557 for (name, value) in tagtypes.iteritems()])
557 for (name, value) in tagtypes.iteritems()])
558 return (tags, tagtypes)
558 return (tags, tagtypes)
559
559
560 def tagtype(self, tagname):
560 def tagtype(self, tagname):
561 '''
561 '''
562 return the type of the given tag. result can be:
562 return the type of the given tag. result can be:
563
563
564 'local' : a local tag
564 'local' : a local tag
565 'global' : a global tag
565 'global' : a global tag
566 None : tag does not exist
566 None : tag does not exist
567 '''
567 '''
568
568
569 return self._tagscache.tagtypes.get(tagname)
569 return self._tagscache.tagtypes.get(tagname)
570
570
571 def tagslist(self):
571 def tagslist(self):
572 '''return a list of tags ordered by revision'''
572 '''return a list of tags ordered by revision'''
573 if not self._tagscache.tagslist:
573 if not self._tagscache.tagslist:
574 l = []
574 l = []
575 for t, n in self.tags().iteritems():
575 for t, n in self.tags().iteritems():
576 r = self.changelog.rev(n)
576 r = self.changelog.rev(n)
577 l.append((r, t, n))
577 l.append((r, t, n))
578 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
578 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
579
579
580 return self._tagscache.tagslist
580 return self._tagscache.tagslist
581
581
582 def nodetags(self, node):
582 def nodetags(self, node):
583 '''return the tags associated with a node'''
583 '''return the tags associated with a node'''
584 if not self._tagscache.nodetagscache:
584 if not self._tagscache.nodetagscache:
585 nodetagscache = {}
585 nodetagscache = {}
586 for t, n in self._tagscache.tags.iteritems():
586 for t, n in self._tagscache.tags.iteritems():
587 nodetagscache.setdefault(n, []).append(t)
587 nodetagscache.setdefault(n, []).append(t)
588 for tags in nodetagscache.itervalues():
588 for tags in nodetagscache.itervalues():
589 tags.sort()
589 tags.sort()
590 self._tagscache.nodetagscache = nodetagscache
590 self._tagscache.nodetagscache = nodetagscache
591 return self._tagscache.nodetagscache.get(node, [])
591 return self._tagscache.nodetagscache.get(node, [])
592
592
593 def nodebookmarks(self, node):
593 def nodebookmarks(self, node):
594 marks = []
594 marks = []
595 for bookmark, n in self._bookmarks.iteritems():
595 for bookmark, n in self._bookmarks.iteritems():
596 if n == node:
596 if n == node:
597 marks.append(bookmark)
597 marks.append(bookmark)
598 return sorted(marks)
598 return sorted(marks)
599
599
600 def _branchtags(self, partial, lrev):
600 def _branchtags(self, partial, lrev):
601 # TODO: rename this function?
601 # TODO: rename this function?
602 tiprev = len(self) - 1
602 tiprev = len(self) - 1
603 if lrev != tiprev:
603 if lrev != tiprev:
604 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
604 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
605 self._updatebranchcache(partial, ctxgen)
605 self._updatebranchcache(partial, ctxgen)
606 self._writebranchcache(partial, self.changelog.tip(), tiprev)
606 self._writebranchcache(partial, self.changelog.tip(), tiprev)
607
607
608 return partial
608 return partial
609
609
610 def updatebranchcache(self):
610 def updatebranchcache(self):
611 tip = self.changelog.tip()
611 tip = self.changelog.tip()
612 if self._branchcache is not None and self._branchcachetip == tip:
612 if self._branchcache is not None and self._branchcachetip == tip:
613 return
613 return
614
614
615 oldtip = self._branchcachetip
615 oldtip = self._branchcachetip
616 self._branchcachetip = tip
616 self._branchcachetip = tip
617 if oldtip is None or oldtip not in self.changelog.nodemap:
617 if oldtip is None or oldtip not in self.changelog.nodemap:
618 partial, last, lrev = self._readbranchcache()
618 partial, last, lrev = self._readbranchcache()
619 else:
619 else:
620 lrev = self.changelog.rev(oldtip)
620 lrev = self.changelog.rev(oldtip)
621 partial = self._branchcache
621 partial = self._branchcache
622
622
623 self._branchtags(partial, lrev)
623 self._branchtags(partial, lrev)
624 # this private cache holds all heads (not just the branch tips)
624 # this private cache holds all heads (not just the branch tips)
625 self._branchcache = partial
625 self._branchcache = partial
626
626
627 def branchmap(self):
627 def branchmap(self):
628 '''returns a dictionary {branch: [branchheads]}'''
628 '''returns a dictionary {branch: [branchheads]}'''
629 self.updatebranchcache()
629 self.updatebranchcache()
630 return self._branchcache
630 return self._branchcache
631
631
632 def _branchtip(self, heads):
632 def _branchtip(self, heads):
633 '''return the tipmost branch head in heads'''
633 '''return the tipmost branch head in heads'''
634 tip = heads[-1]
634 tip = heads[-1]
635 for h in reversed(heads):
635 for h in reversed(heads):
636 if not self[h].closesbranch():
636 if not self[h].closesbranch():
637 tip = h
637 tip = h
638 break
638 break
639 return tip
639 return tip
640
640
641 def branchtip(self, branch):
641 def branchtip(self, branch):
642 '''return the tip node for a given branch'''
642 '''return the tip node for a given branch'''
643 if branch not in self.branchmap():
643 if branch not in self.branchmap():
644 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
644 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
645 return self._branchtip(self.branchmap()[branch])
645 return self._branchtip(self.branchmap()[branch])
646
646
647 def branchtags(self):
647 def branchtags(self):
648 '''return a dict where branch names map to the tipmost head of
648 '''return a dict where branch names map to the tipmost head of
649 the branch, open heads come before closed'''
649 the branch, open heads come before closed'''
650 bt = {}
650 bt = {}
651 for bn, heads in self.branchmap().iteritems():
651 for bn, heads in self.branchmap().iteritems():
652 bt[bn] = self._branchtip(heads)
652 bt[bn] = self._branchtip(heads)
653 return bt
653 return bt
654
654
655 def _readbranchcache(self):
655 def _readbranchcache(self):
656 partial = {}
656 partial = {}
657 try:
657 try:
658 f = self.opener("cache/branchheads")
658 f = self.opener("cache/branchheads")
659 lines = f.read().split('\n')
659 lines = f.read().split('\n')
660 f.close()
660 f.close()
661 except (IOError, OSError):
661 except (IOError, OSError):
662 return {}, nullid, nullrev
662 return {}, nullid, nullrev
663
663
664 try:
664 try:
665 last, lrev = lines.pop(0).split(" ", 1)
665 last, lrev = lines.pop(0).split(" ", 1)
666 last, lrev = bin(last), int(lrev)
666 last, lrev = bin(last), int(lrev)
667 if lrev >= len(self) or self[lrev].node() != last:
667 if lrev >= len(self) or self[lrev].node() != last:
668 # invalidate the cache
668 # invalidate the cache
669 raise ValueError('invalidating branch cache (tip differs)')
669 raise ValueError('invalidating branch cache (tip differs)')
670 for l in lines:
670 for l in lines:
671 if not l:
671 if not l:
672 continue
672 continue
673 node, label = l.split(" ", 1)
673 node, label = l.split(" ", 1)
674 label = encoding.tolocal(label.strip())
674 label = encoding.tolocal(label.strip())
675 if not node in self:
675 if not node in self:
676 raise ValueError('invalidating branch cache because node '+
676 raise ValueError('invalidating branch cache because node '+
677 '%s does not exist' % node)
677 '%s does not exist' % node)
678 partial.setdefault(label, []).append(bin(node))
678 partial.setdefault(label, []).append(bin(node))
679 except KeyboardInterrupt:
679 except KeyboardInterrupt:
680 raise
680 raise
681 except Exception, inst:
681 except Exception, inst:
682 if self.ui.debugflag:
682 if self.ui.debugflag:
683 self.ui.warn(str(inst), '\n')
683 self.ui.warn(str(inst), '\n')
684 partial, last, lrev = {}, nullid, nullrev
684 partial, last, lrev = {}, nullid, nullrev
685 return partial, last, lrev
685 return partial, last, lrev
686
686
687 def _writebranchcache(self, branches, tip, tiprev):
687 def _writebranchcache(self, branches, tip, tiprev):
688 try:
688 try:
689 f = self.opener("cache/branchheads", "w", atomictemp=True)
689 f = self.opener("cache/branchheads", "w", atomictemp=True)
690 f.write("%s %s\n" % (hex(tip), tiprev))
690 f.write("%s %s\n" % (hex(tip), tiprev))
691 for label, nodes in branches.iteritems():
691 for label, nodes in branches.iteritems():
692 for node in nodes:
692 for node in nodes:
693 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
693 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
694 f.close()
694 f.close()
695 except (IOError, OSError):
695 except (IOError, OSError):
696 pass
696 pass
697
697
698 def _updatebranchcache(self, partial, ctxgen):
698 def _updatebranchcache(self, partial, ctxgen):
699 """Given a branchhead cache, partial, that may have extra nodes or be
699 """Given a branchhead cache, partial, that may have extra nodes or be
700 missing heads, and a generator of nodes that are at least a superset of
700 missing heads, and a generator of nodes that are at least a superset of
701 heads missing, this function updates partial to be correct.
701 heads missing, this function updates partial to be correct.
702 """
702 """
703 # collect new branch entries
703 # collect new branch entries
704 newbranches = {}
704 newbranches = {}
705 for c in ctxgen:
705 for c in ctxgen:
706 newbranches.setdefault(c.branch(), []).append(c.node())
706 newbranches.setdefault(c.branch(), []).append(c.node())
707 # if older branchheads are reachable from new ones, they aren't
707 # if older branchheads are reachable from new ones, they aren't
708 # really branchheads. Note checking parents is insufficient:
708 # really branchheads. Note checking parents is insufficient:
709 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
709 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
710 for branch, newnodes in newbranches.iteritems():
710 for branch, newnodes in newbranches.iteritems():
711 bheads = partial.setdefault(branch, [])
711 bheads = partial.setdefault(branch, [])
712 # Remove candidate heads that no longer are in the repo (e.g., as
712 # Remove candidate heads that no longer are in the repo (e.g., as
713 # the result of a strip that just happened). Avoid using 'node in
713 # the result of a strip that just happened). Avoid using 'node in
714 # self' here because that dives down into branchcache code somewhat
714 # self' here because that dives down into branchcache code somewhat
715 # recrusively.
715 # recrusively.
716 bheadrevs = [self.changelog.rev(node) for node in bheads
716 bheadrevs = [self.changelog.rev(node) for node in bheads
717 if self.changelog.hasnode(node)]
717 if self.changelog.hasnode(node)]
718 newheadrevs = [self.changelog.rev(node) for node in newnodes
718 newheadrevs = [self.changelog.rev(node) for node in newnodes
719 if self.changelog.hasnode(node)]
719 if self.changelog.hasnode(node)]
720 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
720 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
721 # Remove duplicates - nodes that are in newheadrevs and are already
721 # Remove duplicates - nodes that are in newheadrevs and are already
722 # in bheadrevs. This can happen if you strip a node whose parent
722 # in bheadrevs. This can happen if you strip a node whose parent
723 # was already a head (because they're on different branches).
723 # was already a head (because they're on different branches).
724 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
724 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
725
725
726 # Starting from tip means fewer passes over reachable. If we know
726 # Starting from tip means fewer passes over reachable. If we know
727 # the new candidates are not ancestors of existing heads, we don't
727 # the new candidates are not ancestors of existing heads, we don't
728 # have to examine ancestors of existing heads
728 # have to examine ancestors of existing heads
729 if ctxisnew:
729 if ctxisnew:
730 iterrevs = sorted(newheadrevs)
730 iterrevs = sorted(newheadrevs)
731 else:
731 else:
732 iterrevs = list(bheadrevs)
732 iterrevs = list(bheadrevs)
733
733
734 # This loop prunes out two kinds of heads - heads that are
734 # This loop prunes out two kinds of heads - heads that are
735 # superseded by a head in newheadrevs, and newheadrevs that are not
735 # superseded by a head in newheadrevs, and newheadrevs that are not
736 # heads because an existing head is their descendant.
736 # heads because an existing head is their descendant.
737 while iterrevs:
737 while iterrevs:
738 latest = iterrevs.pop()
738 latest = iterrevs.pop()
739 if latest not in bheadrevs:
739 if latest not in bheadrevs:
740 continue
740 continue
741 ancestors = set(self.changelog.ancestors([latest],
741 ancestors = set(self.changelog.ancestors([latest],
742 bheadrevs[0]))
742 bheadrevs[0]))
743 if ancestors:
743 if ancestors:
744 bheadrevs = [b for b in bheadrevs if b not in ancestors]
744 bheadrevs = [b for b in bheadrevs if b not in ancestors]
745 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
745 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
746
746
747 # There may be branches that cease to exist when the last commit in the
747 # There may be branches that cease to exist when the last commit in the
748 # branch was stripped. This code filters them out. Note that the
748 # branch was stripped. This code filters them out. Note that the
749 # branch that ceased to exist may not be in newbranches because
749 # branch that ceased to exist may not be in newbranches because
750 # newbranches is the set of candidate heads, which when you strip the
750 # newbranches is the set of candidate heads, which when you strip the
751 # last commit in a branch will be the parent branch.
751 # last commit in a branch will be the parent branch.
752 for branch in partial.keys():
752 for branch in partial.keys():
753 nodes = [head for head in partial[branch]
753 nodes = [head for head in partial[branch]
754 if self.changelog.hasnode(head)]
754 if self.changelog.hasnode(head)]
755 if not nodes:
755 if not nodes:
756 del partial[branch]
756 del partial[branch]
757
757
758 def lookup(self, key):
758 def lookup(self, key):
759 return self[key].node()
759 return self[key].node()
760
760
761 def lookupbranch(self, key, remote=None):
761 def lookupbranch(self, key, remote=None):
762 repo = remote or self
762 repo = remote or self
763 if key in repo.branchmap():
763 if key in repo.branchmap():
764 return key
764 return key
765
765
766 repo = (remote and remote.local()) and remote or self
766 repo = (remote and remote.local()) and remote or self
767 return repo[key].branch()
767 return repo[key].branch()
768
768
769 def known(self, nodes):
769 def known(self, nodes):
770 nm = self.changelog.nodemap
770 nm = self.changelog.nodemap
771 pc = self._phasecache
771 pc = self._phasecache
772 result = []
772 result = []
773 for n in nodes:
773 for n in nodes:
774 r = nm.get(n)
774 r = nm.get(n)
775 resp = not (r is None or pc.phase(self, r) >= phases.secret)
775 resp = not (r is None or pc.phase(self, r) >= phases.secret)
776 result.append(resp)
776 result.append(resp)
777 return result
777 return result
778
778
779 def local(self):
779 def local(self):
780 return self
780 return self
781
781
782 def cancopy(self):
782 def cancopy(self):
783 return self.local() # so statichttprepo's override of local() works
783 return self.local() # so statichttprepo's override of local() works
784
784
785 def join(self, f):
785 def join(self, f):
786 return os.path.join(self.path, f)
786 return os.path.join(self.path, f)
787
787
788 def wjoin(self, f):
788 def wjoin(self, f):
789 return os.path.join(self.root, f)
789 return os.path.join(self.root, f)
790
790
791 def file(self, f):
791 def file(self, f):
792 if f[0] == '/':
792 if f[0] == '/':
793 f = f[1:]
793 f = f[1:]
794 return filelog.filelog(self.sopener, f)
794 return filelog.filelog(self.sopener, f)
795
795
796 def changectx(self, changeid):
796 def changectx(self, changeid):
797 return self[changeid]
797 return self[changeid]
798
798
799 def parents(self, changeid=None):
799 def parents(self, changeid=None):
800 '''get list of changectxs for parents of changeid'''
800 '''get list of changectxs for parents of changeid'''
801 return self[changeid].parents()
801 return self[changeid].parents()
802
802
803 def setparents(self, p1, p2=nullid):
803 def setparents(self, p1, p2=nullid):
804 copies = self.dirstate.setparents(p1, p2)
804 copies = self.dirstate.setparents(p1, p2)
805 if copies:
805 if copies:
806 # Adjust copy records, the dirstate cannot do it, it
806 # Adjust copy records, the dirstate cannot do it, it
807 # requires access to parents manifests. Preserve them
807 # requires access to parents manifests. Preserve them
808 # only for entries added to first parent.
808 # only for entries added to first parent.
809 pctx = self[p1]
809 pctx = self[p1]
810 for f in copies:
810 for f in copies:
811 if f not in pctx and copies[f] in pctx:
811 if f not in pctx and copies[f] in pctx:
812 self.dirstate.copy(copies[f], f)
812 self.dirstate.copy(copies[f], f)
813
813
814 def filectx(self, path, changeid=None, fileid=None):
814 def filectx(self, path, changeid=None, fileid=None):
815 """changeid can be a changeset revision, node, or tag.
815 """changeid can be a changeset revision, node, or tag.
816 fileid can be a file revision or node."""
816 fileid can be a file revision or node."""
817 return context.filectx(self, path, changeid, fileid)
817 return context.filectx(self, path, changeid, fileid)
818
818
819 def getcwd(self):
819 def getcwd(self):
820 return self.dirstate.getcwd()
820 return self.dirstate.getcwd()
821
821
822 def pathto(self, f, cwd=None):
822 def pathto(self, f, cwd=None):
823 return self.dirstate.pathto(f, cwd)
823 return self.dirstate.pathto(f, cwd)
824
824
825 def wfile(self, f, mode='r'):
825 def wfile(self, f, mode='r'):
826 return self.wopener(f, mode)
826 return self.wopener(f, mode)
827
827
828 def _link(self, f):
828 def _link(self, f):
829 return os.path.islink(self.wjoin(f))
829 return os.path.islink(self.wjoin(f))
830
830
831 def _loadfilter(self, filter):
831 def _loadfilter(self, filter):
832 if filter not in self.filterpats:
832 if filter not in self.filterpats:
833 l = []
833 l = []
834 for pat, cmd in self.ui.configitems(filter):
834 for pat, cmd in self.ui.configitems(filter):
835 if cmd == '!':
835 if cmd == '!':
836 continue
836 continue
837 mf = matchmod.match(self.root, '', [pat])
837 mf = matchmod.match(self.root, '', [pat])
838 fn = None
838 fn = None
839 params = cmd
839 params = cmd
840 for name, filterfn in self._datafilters.iteritems():
840 for name, filterfn in self._datafilters.iteritems():
841 if cmd.startswith(name):
841 if cmd.startswith(name):
842 fn = filterfn
842 fn = filterfn
843 params = cmd[len(name):].lstrip()
843 params = cmd[len(name):].lstrip()
844 break
844 break
845 if not fn:
845 if not fn:
846 fn = lambda s, c, **kwargs: util.filter(s, c)
846 fn = lambda s, c, **kwargs: util.filter(s, c)
847 # Wrap old filters not supporting keyword arguments
847 # Wrap old filters not supporting keyword arguments
848 if not inspect.getargspec(fn)[2]:
848 if not inspect.getargspec(fn)[2]:
849 oldfn = fn
849 oldfn = fn
850 fn = lambda s, c, **kwargs: oldfn(s, c)
850 fn = lambda s, c, **kwargs: oldfn(s, c)
851 l.append((mf, fn, params))
851 l.append((mf, fn, params))
852 self.filterpats[filter] = l
852 self.filterpats[filter] = l
853 return self.filterpats[filter]
853 return self.filterpats[filter]
854
854
855 def _filter(self, filterpats, filename, data):
855 def _filter(self, filterpats, filename, data):
856 for mf, fn, cmd in filterpats:
856 for mf, fn, cmd in filterpats:
857 if mf(filename):
857 if mf(filename):
858 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
858 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
859 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
859 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
860 break
860 break
861
861
862 return data
862 return data
863
863
864 @propertycache
864 @propertycache
865 def _encodefilterpats(self):
865 def _encodefilterpats(self):
866 return self._loadfilter('encode')
866 return self._loadfilter('encode')
867
867
868 @propertycache
868 @propertycache
869 def _decodefilterpats(self):
869 def _decodefilterpats(self):
870 return self._loadfilter('decode')
870 return self._loadfilter('decode')
871
871
872 def adddatafilter(self, name, filter):
872 def adddatafilter(self, name, filter):
873 self._datafilters[name] = filter
873 self._datafilters[name] = filter
874
874
875 def wread(self, filename):
875 def wread(self, filename):
876 if self._link(filename):
876 if self._link(filename):
877 data = os.readlink(self.wjoin(filename))
877 data = os.readlink(self.wjoin(filename))
878 else:
878 else:
879 data = self.wopener.read(filename)
879 data = self.wopener.read(filename)
880 return self._filter(self._encodefilterpats, filename, data)
880 return self._filter(self._encodefilterpats, filename, data)
881
881
882 def wwrite(self, filename, data, flags):
882 def wwrite(self, filename, data, flags):
883 data = self._filter(self._decodefilterpats, filename, data)
883 data = self._filter(self._decodefilterpats, filename, data)
884 if 'l' in flags:
884 if 'l' in flags:
885 self.wopener.symlink(data, filename)
885 self.wopener.symlink(data, filename)
886 else:
886 else:
887 self.wopener.write(filename, data)
887 self.wopener.write(filename, data)
888 if 'x' in flags:
888 if 'x' in flags:
889 util.setflags(self.wjoin(filename), False, True)
889 util.setflags(self.wjoin(filename), False, True)
890
890
891 def wwritedata(self, filename, data):
891 def wwritedata(self, filename, data):
892 return self._filter(self._decodefilterpats, filename, data)
892 return self._filter(self._decodefilterpats, filename, data)
893
893
894 def transaction(self, desc):
894 def transaction(self, desc):
895 tr = self._transref and self._transref() or None
895 tr = self._transref and self._transref() or None
896 if tr and tr.running():
896 if tr and tr.running():
897 return tr.nest()
897 return tr.nest()
898
898
899 # abort here if the journal already exists
899 # abort here if the journal already exists
900 if os.path.exists(self.sjoin("journal")):
900 if os.path.exists(self.sjoin("journal")):
901 raise error.RepoError(
901 raise error.RepoError(
902 _("abandoned transaction found - run hg recover"))
902 _("abandoned transaction found - run hg recover"))
903
903
904 self._writejournal(desc)
904 self._writejournal(desc)
905 renames = [(x, undoname(x)) for x in self._journalfiles()]
905 renames = [(x, undoname(x)) for x in self._journalfiles()]
906
906
907 tr = transaction.transaction(self.ui.warn, self.sopener,
907 tr = transaction.transaction(self.ui.warn, self.sopener,
908 self.sjoin("journal"),
908 self.sjoin("journal"),
909 aftertrans(renames),
909 aftertrans(renames),
910 self.store.createmode)
910 self.store.createmode)
911 self._transref = weakref.ref(tr)
911 self._transref = weakref.ref(tr)
912 return tr
912 return tr
913
913
914 def _journalfiles(self):
914 def _journalfiles(self):
915 return (self.sjoin('journal'), self.join('journal.dirstate'),
915 return (self.sjoin('journal'), self.join('journal.dirstate'),
916 self.join('journal.branch'), self.join('journal.desc'),
916 self.join('journal.branch'), self.join('journal.desc'),
917 self.join('journal.bookmarks'),
917 self.join('journal.bookmarks'),
918 self.sjoin('journal.phaseroots'))
918 self.sjoin('journal.phaseroots'))
919
919
920 def undofiles(self):
920 def undofiles(self):
921 return [undoname(x) for x in self._journalfiles()]
921 return [undoname(x) for x in self._journalfiles()]
922
922
923 def _writejournal(self, desc):
923 def _writejournal(self, desc):
924 self.opener.write("journal.dirstate",
924 self.opener.write("journal.dirstate",
925 self.opener.tryread("dirstate"))
925 self.opener.tryread("dirstate"))
926 self.opener.write("journal.branch",
926 self.opener.write("journal.branch",
927 encoding.fromlocal(self.dirstate.branch()))
927 encoding.fromlocal(self.dirstate.branch()))
928 self.opener.write("journal.desc",
928 self.opener.write("journal.desc",
929 "%d\n%s\n" % (len(self), desc))
929 "%d\n%s\n" % (len(self), desc))
930 self.opener.write("journal.bookmarks",
930 self.opener.write("journal.bookmarks",
931 self.opener.tryread("bookmarks"))
931 self.opener.tryread("bookmarks"))
932 self.sopener.write("journal.phaseroots",
932 self.sopener.write("journal.phaseroots",
933 self.sopener.tryread("phaseroots"))
933 self.sopener.tryread("phaseroots"))
934
934
935 def recover(self):
935 def recover(self):
936 lock = self.lock()
936 lock = self.lock()
937 try:
937 try:
938 if os.path.exists(self.sjoin("journal")):
938 if os.path.exists(self.sjoin("journal")):
939 self.ui.status(_("rolling back interrupted transaction\n"))
939 self.ui.status(_("rolling back interrupted transaction\n"))
940 transaction.rollback(self.sopener, self.sjoin("journal"),
940 transaction.rollback(self.sopener, self.sjoin("journal"),
941 self.ui.warn)
941 self.ui.warn)
942 self.invalidate()
942 self.invalidate()
943 return True
943 return True
944 else:
944 else:
945 self.ui.warn(_("no interrupted transaction available\n"))
945 self.ui.warn(_("no interrupted transaction available\n"))
946 return False
946 return False
947 finally:
947 finally:
948 lock.release()
948 lock.release()
949
949
950 def rollback(self, dryrun=False, force=False):
950 def rollback(self, dryrun=False, force=False):
951 wlock = lock = None
951 wlock = lock = None
952 try:
952 try:
953 wlock = self.wlock()
953 wlock = self.wlock()
954 lock = self.lock()
954 lock = self.lock()
955 if os.path.exists(self.sjoin("undo")):
955 if os.path.exists(self.sjoin("undo")):
956 return self._rollback(dryrun, force)
956 return self._rollback(dryrun, force)
957 else:
957 else:
958 self.ui.warn(_("no rollback information available\n"))
958 self.ui.warn(_("no rollback information available\n"))
959 return 1
959 return 1
960 finally:
960 finally:
961 release(lock, wlock)
961 release(lock, wlock)
962
962
963 def _rollback(self, dryrun, force):
963 def _rollback(self, dryrun, force):
964 ui = self.ui
964 ui = self.ui
965 try:
965 try:
966 args = self.opener.read('undo.desc').splitlines()
966 args = self.opener.read('undo.desc').splitlines()
967 (oldlen, desc, detail) = (int(args[0]), args[1], None)
967 (oldlen, desc, detail) = (int(args[0]), args[1], None)
968 if len(args) >= 3:
968 if len(args) >= 3:
969 detail = args[2]
969 detail = args[2]
970 oldtip = oldlen - 1
970 oldtip = oldlen - 1
971
971
972 if detail and ui.verbose:
972 if detail and ui.verbose:
973 msg = (_('repository tip rolled back to revision %s'
973 msg = (_('repository tip rolled back to revision %s'
974 ' (undo %s: %s)\n')
974 ' (undo %s: %s)\n')
975 % (oldtip, desc, detail))
975 % (oldtip, desc, detail))
976 else:
976 else:
977 msg = (_('repository tip rolled back to revision %s'
977 msg = (_('repository tip rolled back to revision %s'
978 ' (undo %s)\n')
978 ' (undo %s)\n')
979 % (oldtip, desc))
979 % (oldtip, desc))
980 except IOError:
980 except IOError:
981 msg = _('rolling back unknown transaction\n')
981 msg = _('rolling back unknown transaction\n')
982 desc = None
982 desc = None
983
983
984 if not force and self['.'] != self['tip'] and desc == 'commit':
984 if not force and self['.'] != self['tip'] and desc == 'commit':
985 raise util.Abort(
985 raise util.Abort(
986 _('rollback of last commit while not checked out '
986 _('rollback of last commit while not checked out '
987 'may lose data'), hint=_('use -f to force'))
987 'may lose data'), hint=_('use -f to force'))
988
988
989 ui.status(msg)
989 ui.status(msg)
990 if dryrun:
990 if dryrun:
991 return 0
991 return 0
992
992
993 parents = self.dirstate.parents()
993 parents = self.dirstate.parents()
994 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
994 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
995 if os.path.exists(self.join('undo.bookmarks')):
995 if os.path.exists(self.join('undo.bookmarks')):
996 util.rename(self.join('undo.bookmarks'),
996 util.rename(self.join('undo.bookmarks'),
997 self.join('bookmarks'))
997 self.join('bookmarks'))
998 if os.path.exists(self.sjoin('undo.phaseroots')):
998 if os.path.exists(self.sjoin('undo.phaseroots')):
999 util.rename(self.sjoin('undo.phaseroots'),
999 util.rename(self.sjoin('undo.phaseroots'),
1000 self.sjoin('phaseroots'))
1000 self.sjoin('phaseroots'))
1001 self.invalidate()
1001 self.invalidate()
1002
1002
1003 # Discard all cache entries to force reloading everything.
1003 # Discard all cache entries to force reloading everything.
1004 self._filecache.clear()
1004 self._filecache.clear()
1005
1005
1006 parentgone = (parents[0] not in self.changelog.nodemap or
1006 parentgone = (parents[0] not in self.changelog.nodemap or
1007 parents[1] not in self.changelog.nodemap)
1007 parents[1] not in self.changelog.nodemap)
1008 if parentgone:
1008 if parentgone:
1009 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1009 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1010 try:
1010 try:
1011 branch = self.opener.read('undo.branch')
1011 branch = self.opener.read('undo.branch')
1012 self.dirstate.setbranch(encoding.tolocal(branch))
1012 self.dirstate.setbranch(encoding.tolocal(branch))
1013 except IOError:
1013 except IOError:
1014 ui.warn(_('named branch could not be reset: '
1014 ui.warn(_('named branch could not be reset: '
1015 'current branch is still \'%s\'\n')
1015 'current branch is still \'%s\'\n')
1016 % self.dirstate.branch())
1016 % self.dirstate.branch())
1017
1017
1018 self.dirstate.invalidate()
1018 self.dirstate.invalidate()
1019 parents = tuple([p.rev() for p in self.parents()])
1019 parents = tuple([p.rev() for p in self.parents()])
1020 if len(parents) > 1:
1020 if len(parents) > 1:
1021 ui.status(_('working directory now based on '
1021 ui.status(_('working directory now based on '
1022 'revisions %d and %d\n') % parents)
1022 'revisions %d and %d\n') % parents)
1023 else:
1023 else:
1024 ui.status(_('working directory now based on '
1024 ui.status(_('working directory now based on '
1025 'revision %d\n') % parents)
1025 'revision %d\n') % parents)
1026 # TODO: if we know which new heads may result from this rollback, pass
1026 # TODO: if we know which new heads may result from this rollback, pass
1027 # them to destroy(), which will prevent the branchhead cache from being
1027 # them to destroy(), which will prevent the branchhead cache from being
1028 # invalidated.
1028 # invalidated.
1029 self.destroyed()
1029 self.destroyed()
1030 return 0
1030 return 0
1031
1031
1032 def invalidatecaches(self):
1032 def invalidatecaches(self):
1033 def delcache(name):
1033 def delcache(name):
1034 try:
1034 try:
1035 delattr(self, name)
1035 delattr(self, name)
1036 except AttributeError:
1036 except AttributeError:
1037 pass
1037 pass
1038
1038
1039 delcache('_tagscache')
1039 delcache('_tagscache')
1040
1040
1041 self._branchcache = None # in UTF-8
1041 self._branchcache = None # in UTF-8
1042 self._branchcachetip = None
1042 self._branchcachetip = None
1043
1043
1044 def invalidatedirstate(self):
1044 def invalidatedirstate(self):
1045 '''Invalidates the dirstate, causing the next call to dirstate
1045 '''Invalidates the dirstate, causing the next call to dirstate
1046 to check if it was modified since the last time it was read,
1046 to check if it was modified since the last time it was read,
1047 rereading it if it has.
1047 rereading it if it has.
1048
1048
1049 This is different to dirstate.invalidate() that it doesn't always
1049 This is different to dirstate.invalidate() that it doesn't always
1050 rereads the dirstate. Use dirstate.invalidate() if you want to
1050 rereads the dirstate. Use dirstate.invalidate() if you want to
1051 explicitly read the dirstate again (i.e. restoring it to a previous
1051 explicitly read the dirstate again (i.e. restoring it to a previous
1052 known good state).'''
1052 known good state).'''
1053 if 'dirstate' in self.__dict__:
1053 if 'dirstate' in self.__dict__:
1054 for k in self.dirstate._filecache:
1054 for k in self.dirstate._filecache:
1055 try:
1055 try:
1056 delattr(self.dirstate, k)
1056 delattr(self.dirstate, k)
1057 except AttributeError:
1057 except AttributeError:
1058 pass
1058 pass
1059 delattr(self, 'dirstate')
1059 delattr(self, 'dirstate')
1060
1060
1061 def invalidate(self):
1061 def invalidate(self):
1062 for k in self._filecache:
1062 for k in self._filecache:
1063 # dirstate is invalidated separately in invalidatedirstate()
1063 # dirstate is invalidated separately in invalidatedirstate()
1064 if k == 'dirstate':
1064 if k == 'dirstate':
1065 continue
1065 continue
1066
1066
1067 try:
1067 try:
1068 delattr(self, k)
1068 delattr(self, k)
1069 except AttributeError:
1069 except AttributeError:
1070 pass
1070 pass
1071 self.invalidatecaches()
1071 self.invalidatecaches()
1072
1072
1073 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1073 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1074 try:
1074 try:
1075 l = lock.lock(lockname, 0, releasefn, desc=desc)
1075 l = lock.lock(lockname, 0, releasefn, desc=desc)
1076 except error.LockHeld, inst:
1076 except error.LockHeld, inst:
1077 if not wait:
1077 if not wait:
1078 raise
1078 raise
1079 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1079 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1080 (desc, inst.locker))
1080 (desc, inst.locker))
1081 # default to 600 seconds timeout
1081 # default to 600 seconds timeout
1082 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1082 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1083 releasefn, desc=desc)
1083 releasefn, desc=desc)
1084 if acquirefn:
1084 if acquirefn:
1085 acquirefn()
1085 acquirefn()
1086 return l
1086 return l
1087
1087
1088 def _afterlock(self, callback):
1088 def _afterlock(self, callback):
1089 """add a callback to the current repository lock.
1089 """add a callback to the current repository lock.
1090
1090
1091 The callback will be executed on lock release."""
1091 The callback will be executed on lock release."""
1092 l = self._lockref and self._lockref()
1092 l = self._lockref and self._lockref()
1093 if l:
1093 if l:
1094 l.postrelease.append(callback)
1094 l.postrelease.append(callback)
1095 else:
1095 else:
1096 callback()
1096 callback()
1097
1097
1098 def lock(self, wait=True):
1098 def lock(self, wait=True):
1099 '''Lock the repository store (.hg/store) and return a weak reference
1099 '''Lock the repository store (.hg/store) and return a weak reference
1100 to the lock. Use this before modifying the store (e.g. committing or
1100 to the lock. Use this before modifying the store (e.g. committing or
1101 stripping). If you are opening a transaction, get a lock as well.)'''
1101 stripping). If you are opening a transaction, get a lock as well.)'''
1102 l = self._lockref and self._lockref()
1102 l = self._lockref and self._lockref()
1103 if l is not None and l.held:
1103 if l is not None and l.held:
1104 l.lock()
1104 l.lock()
1105 return l
1105 return l
1106
1106
1107 def unlock():
1107 def unlock():
1108 self.store.write()
1108 self.store.write()
1109 if '_phasecache' in vars(self):
1109 if '_phasecache' in vars(self):
1110 self._phasecache.write()
1110 self._phasecache.write()
1111 for k, ce in self._filecache.items():
1111 for k, ce in self._filecache.items():
1112 if k == 'dirstate':
1112 if k == 'dirstate':
1113 continue
1113 continue
1114 ce.refresh()
1114 ce.refresh()
1115
1115
1116 l = self._lock(self.sjoin("lock"), wait, unlock,
1116 l = self._lock(self.sjoin("lock"), wait, unlock,
1117 self.invalidate, _('repository %s') % self.origroot)
1117 self.invalidate, _('repository %s') % self.origroot)
1118 self._lockref = weakref.ref(l)
1118 self._lockref = weakref.ref(l)
1119 return l
1119 return l
1120
1120
1121 def wlock(self, wait=True):
1121 def wlock(self, wait=True):
1122 '''Lock the non-store parts of the repository (everything under
1122 '''Lock the non-store parts of the repository (everything under
1123 .hg except .hg/store) and return a weak reference to the lock.
1123 .hg except .hg/store) and return a weak reference to the lock.
1124 Use this before modifying files in .hg.'''
1124 Use this before modifying files in .hg.'''
1125 l = self._wlockref and self._wlockref()
1125 l = self._wlockref and self._wlockref()
1126 if l is not None and l.held:
1126 if l is not None and l.held:
1127 l.lock()
1127 l.lock()
1128 return l
1128 return l
1129
1129
1130 def unlock():
1130 def unlock():
1131 self.dirstate.write()
1131 self.dirstate.write()
1132 ce = self._filecache.get('dirstate')
1132 ce = self._filecache.get('dirstate')
1133 if ce:
1133 if ce:
1134 ce.refresh()
1134 ce.refresh()
1135
1135
1136 l = self._lock(self.join("wlock"), wait, unlock,
1136 l = self._lock(self.join("wlock"), wait, unlock,
1137 self.invalidatedirstate, _('working directory of %s') %
1137 self.invalidatedirstate, _('working directory of %s') %
1138 self.origroot)
1138 self.origroot)
1139 self._wlockref = weakref.ref(l)
1139 self._wlockref = weakref.ref(l)
1140 return l
1140 return l
1141
1141
1142 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1142 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1143 """
1143 """
1144 commit an individual file as part of a larger transaction
1144 commit an individual file as part of a larger transaction
1145 """
1145 """
1146
1146
1147 fname = fctx.path()
1147 fname = fctx.path()
1148 text = fctx.data()
1148 text = fctx.data()
1149 flog = self.file(fname)
1149 flog = self.file(fname)
1150 fparent1 = manifest1.get(fname, nullid)
1150 fparent1 = manifest1.get(fname, nullid)
1151 fparent2 = fparent2o = manifest2.get(fname, nullid)
1151 fparent2 = fparent2o = manifest2.get(fname, nullid)
1152
1152
1153 meta = {}
1153 meta = {}
1154 copy = fctx.renamed()
1154 copy = fctx.renamed()
1155 if copy and copy[0] != fname:
1155 if copy and copy[0] != fname:
1156 # Mark the new revision of this file as a copy of another
1156 # Mark the new revision of this file as a copy of another
1157 # file. This copy data will effectively act as a parent
1157 # file. This copy data will effectively act as a parent
1158 # of this new revision. If this is a merge, the first
1158 # of this new revision. If this is a merge, the first
1159 # parent will be the nullid (meaning "look up the copy data")
1159 # parent will be the nullid (meaning "look up the copy data")
1160 # and the second one will be the other parent. For example:
1160 # and the second one will be the other parent. For example:
1161 #
1161 #
1162 # 0 --- 1 --- 3 rev1 changes file foo
1162 # 0 --- 1 --- 3 rev1 changes file foo
1163 # \ / rev2 renames foo to bar and changes it
1163 # \ / rev2 renames foo to bar and changes it
1164 # \- 2 -/ rev3 should have bar with all changes and
1164 # \- 2 -/ rev3 should have bar with all changes and
1165 # should record that bar descends from
1165 # should record that bar descends from
1166 # bar in rev2 and foo in rev1
1166 # bar in rev2 and foo in rev1
1167 #
1167 #
1168 # this allows this merge to succeed:
1168 # this allows this merge to succeed:
1169 #
1169 #
1170 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1170 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1171 # \ / merging rev3 and rev4 should use bar@rev2
1171 # \ / merging rev3 and rev4 should use bar@rev2
1172 # \- 2 --- 4 as the merge base
1172 # \- 2 --- 4 as the merge base
1173 #
1173 #
1174
1174
1175 cfname = copy[0]
1175 cfname = copy[0]
1176 crev = manifest1.get(cfname)
1176 crev = manifest1.get(cfname)
1177 newfparent = fparent2
1177 newfparent = fparent2
1178
1178
1179 if manifest2: # branch merge
1179 if manifest2: # branch merge
1180 if fparent2 == nullid or crev is None: # copied on remote side
1180 if fparent2 == nullid or crev is None: # copied on remote side
1181 if cfname in manifest2:
1181 if cfname in manifest2:
1182 crev = manifest2[cfname]
1182 crev = manifest2[cfname]
1183 newfparent = fparent1
1183 newfparent = fparent1
1184
1184
1185 # find source in nearest ancestor if we've lost track
1185 # find source in nearest ancestor if we've lost track
1186 if not crev:
1186 if not crev:
1187 self.ui.debug(" %s: searching for copy revision for %s\n" %
1187 self.ui.debug(" %s: searching for copy revision for %s\n" %
1188 (fname, cfname))
1188 (fname, cfname))
1189 for ancestor in self[None].ancestors():
1189 for ancestor in self[None].ancestors():
1190 if cfname in ancestor:
1190 if cfname in ancestor:
1191 crev = ancestor[cfname].filenode()
1191 crev = ancestor[cfname].filenode()
1192 break
1192 break
1193
1193
1194 if crev:
1194 if crev:
1195 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1195 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1196 meta["copy"] = cfname
1196 meta["copy"] = cfname
1197 meta["copyrev"] = hex(crev)
1197 meta["copyrev"] = hex(crev)
1198 fparent1, fparent2 = nullid, newfparent
1198 fparent1, fparent2 = nullid, newfparent
1199 else:
1199 else:
1200 self.ui.warn(_("warning: can't find ancestor for '%s' "
1200 self.ui.warn(_("warning: can't find ancestor for '%s' "
1201 "copied from '%s'!\n") % (fname, cfname))
1201 "copied from '%s'!\n") % (fname, cfname))
1202
1202
1203 elif fparent2 != nullid:
1203 elif fparent2 != nullid:
1204 # is one parent an ancestor of the other?
1204 # is one parent an ancestor of the other?
1205 fparentancestor = flog.ancestor(fparent1, fparent2)
1205 fparentancestor = flog.ancestor(fparent1, fparent2)
1206 if fparentancestor == fparent1:
1206 if fparentancestor == fparent1:
1207 fparent1, fparent2 = fparent2, nullid
1207 fparent1, fparent2 = fparent2, nullid
1208 elif fparentancestor == fparent2:
1208 elif fparentancestor == fparent2:
1209 fparent2 = nullid
1209 fparent2 = nullid
1210
1210
1211 # is the file changed?
1211 # is the file changed?
1212 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1212 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1213 changelist.append(fname)
1213 changelist.append(fname)
1214 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1214 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1215
1215
1216 # are just the flags changed during merge?
1216 # are just the flags changed during merge?
1217 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1217 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1218 changelist.append(fname)
1218 changelist.append(fname)
1219
1219
1220 return fparent1
1220 return fparent1
1221
1221
1222 def commit(self, text="", user=None, date=None, match=None, force=False,
1222 def commit(self, text="", user=None, date=None, match=None, force=False,
1223 editor=False, extra={}):
1223 editor=False, extra={}):
1224 """Add a new revision to current repository.
1224 """Add a new revision to current repository.
1225
1225
1226 Revision information is gathered from the working directory,
1226 Revision information is gathered from the working directory,
1227 match can be used to filter the committed files. If editor is
1227 match can be used to filter the committed files. If editor is
1228 supplied, it is called to get a commit message.
1228 supplied, it is called to get a commit message.
1229 """
1229 """
1230
1230
1231 def fail(f, msg):
1231 def fail(f, msg):
1232 raise util.Abort('%s: %s' % (f, msg))
1232 raise util.Abort('%s: %s' % (f, msg))
1233
1233
1234 if not match:
1234 if not match:
1235 match = matchmod.always(self.root, '')
1235 match = matchmod.always(self.root, '')
1236
1236
1237 if not force:
1237 if not force:
1238 vdirs = []
1238 vdirs = []
1239 match.dir = vdirs.append
1239 match.dir = vdirs.append
1240 match.bad = fail
1240 match.bad = fail
1241
1241
1242 wlock = self.wlock()
1242 wlock = self.wlock()
1243 try:
1243 try:
1244 wctx = self[None]
1244 wctx = self[None]
1245 merge = len(wctx.parents()) > 1
1245 merge = len(wctx.parents()) > 1
1246
1246
1247 if (not force and merge and match and
1247 if (not force and merge and match and
1248 (match.files() or match.anypats())):
1248 (match.files() or match.anypats())):
1249 raise util.Abort(_('cannot partially commit a merge '
1249 raise util.Abort(_('cannot partially commit a merge '
1250 '(do not specify files or patterns)'))
1250 '(do not specify files or patterns)'))
1251
1251
1252 changes = self.status(match=match, clean=force)
1252 changes = self.status(match=match, clean=force)
1253 if force:
1253 if force:
1254 changes[0].extend(changes[6]) # mq may commit unchanged files
1254 changes[0].extend(changes[6]) # mq may commit unchanged files
1255
1255
1256 # check subrepos
1256 # check subrepos
1257 subs = []
1257 subs = []
1258 commitsubs = set()
1258 commitsubs = set()
1259 newstate = wctx.substate.copy()
1259 newstate = wctx.substate.copy()
1260 # only manage subrepos and .hgsubstate if .hgsub is present
1260 # only manage subrepos and .hgsubstate if .hgsub is present
1261 if '.hgsub' in wctx:
1261 if '.hgsub' in wctx:
1262 # we'll decide whether to track this ourselves, thanks
1262 # we'll decide whether to track this ourselves, thanks
1263 if '.hgsubstate' in changes[0]:
1263 if '.hgsubstate' in changes[0]:
1264 changes[0].remove('.hgsubstate')
1264 changes[0].remove('.hgsubstate')
1265 if '.hgsubstate' in changes[2]:
1265 if '.hgsubstate' in changes[2]:
1266 changes[2].remove('.hgsubstate')
1266 changes[2].remove('.hgsubstate')
1267
1267
1268 # compare current state to last committed state
1268 # compare current state to last committed state
1269 # build new substate based on last committed state
1269 # build new substate based on last committed state
1270 oldstate = wctx.p1().substate
1270 oldstate = wctx.p1().substate
1271 for s in sorted(newstate.keys()):
1271 for s in sorted(newstate.keys()):
1272 if not match(s):
1272 if not match(s):
1273 # ignore working copy, use old state if present
1273 # ignore working copy, use old state if present
1274 if s in oldstate:
1274 if s in oldstate:
1275 newstate[s] = oldstate[s]
1275 newstate[s] = oldstate[s]
1276 continue
1276 continue
1277 if not force:
1277 if not force:
1278 raise util.Abort(
1278 raise util.Abort(
1279 _("commit with new subrepo %s excluded") % s)
1279 _("commit with new subrepo %s excluded") % s)
1280 if wctx.sub(s).dirty(True):
1280 if wctx.sub(s).dirty(True):
1281 if not self.ui.configbool('ui', 'commitsubrepos'):
1281 if not self.ui.configbool('ui', 'commitsubrepos'):
1282 raise util.Abort(
1282 raise util.Abort(
1283 _("uncommitted changes in subrepo %s") % s,
1283 _("uncommitted changes in subrepo %s") % s,
1284 hint=_("use --subrepos for recursive commit"))
1284 hint=_("use --subrepos for recursive commit"))
1285 subs.append(s)
1285 subs.append(s)
1286 commitsubs.add(s)
1286 commitsubs.add(s)
1287 else:
1287 else:
1288 bs = wctx.sub(s).basestate()
1288 bs = wctx.sub(s).basestate()
1289 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1289 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1290 if oldstate.get(s, (None, None, None))[1] != bs:
1290 if oldstate.get(s, (None, None, None))[1] != bs:
1291 subs.append(s)
1291 subs.append(s)
1292
1292
1293 # check for removed subrepos
1293 # check for removed subrepos
1294 for p in wctx.parents():
1294 for p in wctx.parents():
1295 r = [s for s in p.substate if s not in newstate]
1295 r = [s for s in p.substate if s not in newstate]
1296 subs += [s for s in r if match(s)]
1296 subs += [s for s in r if match(s)]
1297 if subs:
1297 if subs:
1298 if (not match('.hgsub') and
1298 if (not match('.hgsub') and
1299 '.hgsub' in (wctx.modified() + wctx.added())):
1299 '.hgsub' in (wctx.modified() + wctx.added())):
1300 raise util.Abort(
1300 raise util.Abort(
1301 _("can't commit subrepos without .hgsub"))
1301 _("can't commit subrepos without .hgsub"))
1302 changes[0].insert(0, '.hgsubstate')
1302 changes[0].insert(0, '.hgsubstate')
1303
1303
1304 elif '.hgsub' in changes[2]:
1304 elif '.hgsub' in changes[2]:
1305 # clean up .hgsubstate when .hgsub is removed
1305 # clean up .hgsubstate when .hgsub is removed
1306 if ('.hgsubstate' in wctx and
1306 if ('.hgsubstate' in wctx and
1307 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1307 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1308 changes[2].insert(0, '.hgsubstate')
1308 changes[2].insert(0, '.hgsubstate')
1309
1309
1310 # make sure all explicit patterns are matched
1310 # make sure all explicit patterns are matched
1311 if not force and match.files():
1311 if not force and match.files():
1312 matched = set(changes[0] + changes[1] + changes[2])
1312 matched = set(changes[0] + changes[1] + changes[2])
1313
1313
1314 for f in match.files():
1314 for f in match.files():
1315 if f == '.' or f in matched or f in wctx.substate:
1315 if f == '.' or f in matched or f in wctx.substate:
1316 continue
1316 continue
1317 if f in changes[3]: # missing
1317 if f in changes[3]: # missing
1318 fail(f, _('file not found!'))
1318 fail(f, _('file not found!'))
1319 if f in vdirs: # visited directory
1319 if f in vdirs: # visited directory
1320 d = f + '/'
1320 d = f + '/'
1321 for mf in matched:
1321 for mf in matched:
1322 if mf.startswith(d):
1322 if mf.startswith(d):
1323 break
1323 break
1324 else:
1324 else:
1325 fail(f, _("no match under directory!"))
1325 fail(f, _("no match under directory!"))
1326 elif f not in self.dirstate:
1326 elif f not in self.dirstate:
1327 fail(f, _("file not tracked!"))
1327 fail(f, _("file not tracked!"))
1328
1328
1329 if (not force and not extra.get("close") and not merge
1329 if (not force and not extra.get("close") and not merge
1330 and not (changes[0] or changes[1] or changes[2])
1330 and not (changes[0] or changes[1] or changes[2])
1331 and wctx.branch() == wctx.p1().branch()):
1331 and wctx.branch() == wctx.p1().branch()):
1332 return None
1332 return None
1333
1333
1334 if merge and changes[3]:
1334 if merge and changes[3]:
1335 raise util.Abort(_("cannot commit merge with missing files"))
1335 raise util.Abort(_("cannot commit merge with missing files"))
1336
1336
1337 ms = mergemod.mergestate(self)
1337 ms = mergemod.mergestate(self)
1338 for f in changes[0]:
1338 for f in changes[0]:
1339 if f in ms and ms[f] == 'u':
1339 if f in ms and ms[f] == 'u':
1340 raise util.Abort(_("unresolved merge conflicts "
1340 raise util.Abort(_("unresolved merge conflicts "
1341 "(see hg help resolve)"))
1341 "(see hg help resolve)"))
1342
1342
1343 cctx = context.workingctx(self, text, user, date, extra, changes)
1343 cctx = context.workingctx(self, text, user, date, extra, changes)
1344 if editor:
1344 if editor:
1345 cctx._text = editor(self, cctx, subs)
1345 cctx._text = editor(self, cctx, subs)
1346 edited = (text != cctx._text)
1346 edited = (text != cctx._text)
1347
1347
1348 # commit subs and write new state
1348 # commit subs and write new state
1349 if subs:
1349 if subs:
1350 for s in sorted(commitsubs):
1350 for s in sorted(commitsubs):
1351 sub = wctx.sub(s)
1351 sub = wctx.sub(s)
1352 self.ui.status(_('committing subrepository %s\n') %
1352 self.ui.status(_('committing subrepository %s\n') %
1353 subrepo.subrelpath(sub))
1353 subrepo.subrelpath(sub))
1354 sr = sub.commit(cctx._text, user, date)
1354 sr = sub.commit(cctx._text, user, date)
1355 newstate[s] = (newstate[s][0], sr)
1355 newstate[s] = (newstate[s][0], sr)
1356 subrepo.writestate(self, newstate)
1356 subrepo.writestate(self, newstate)
1357
1357
1358 # Save commit message in case this transaction gets rolled back
1358 # Save commit message in case this transaction gets rolled back
1359 # (e.g. by a pretxncommit hook). Leave the content alone on
1359 # (e.g. by a pretxncommit hook). Leave the content alone on
1360 # the assumption that the user will use the same editor again.
1360 # the assumption that the user will use the same editor again.
1361 msgfn = self.savecommitmessage(cctx._text)
1361 msgfn = self.savecommitmessage(cctx._text)
1362
1362
1363 p1, p2 = self.dirstate.parents()
1363 p1, p2 = self.dirstate.parents()
1364 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1364 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1365 try:
1365 try:
1366 self.hook("precommit", throw=True, parent1=hookp1,
1366 self.hook("precommit", throw=True, parent1=hookp1,
1367 parent2=hookp2)
1367 parent2=hookp2)
1368 ret = self.commitctx(cctx, True)
1368 ret = self.commitctx(cctx, True)
1369 except: # re-raises
1369 except: # re-raises
1370 if edited:
1370 if edited:
1371 self.ui.write(
1371 self.ui.write(
1372 _('note: commit message saved in %s\n') % msgfn)
1372 _('note: commit message saved in %s\n') % msgfn)
1373 raise
1373 raise
1374
1374
1375 # update bookmarks, dirstate and mergestate
1375 # update bookmarks, dirstate and mergestate
1376 bookmarks.update(self, [p1, p2], ret)
1376 bookmarks.update(self, [p1, p2], ret)
1377 for f in changes[0] + changes[1]:
1377 for f in changes[0] + changes[1]:
1378 self.dirstate.normal(f)
1378 self.dirstate.normal(f)
1379 for f in changes[2]:
1379 for f in changes[2]:
1380 self.dirstate.drop(f)
1380 self.dirstate.drop(f)
1381 self.dirstate.setparents(ret)
1381 self.dirstate.setparents(ret)
1382 ms.reset()
1382 ms.reset()
1383 finally:
1383 finally:
1384 wlock.release()
1384 wlock.release()
1385
1385
1386 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1386 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1387 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1387 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1388 self._afterlock(commithook)
1388 self._afterlock(commithook)
1389 return ret
1389 return ret
1390
1390
1391 def commitctx(self, ctx, error=False):
1391 def commitctx(self, ctx, error=False):
1392 """Add a new revision to current repository.
1392 """Add a new revision to current repository.
1393 Revision information is passed via the context argument.
1393 Revision information is passed via the context argument.
1394 """
1394 """
1395
1395
1396 tr = lock = None
1396 tr = lock = None
1397 removed = list(ctx.removed())
1397 removed = list(ctx.removed())
1398 p1, p2 = ctx.p1(), ctx.p2()
1398 p1, p2 = ctx.p1(), ctx.p2()
1399 user = ctx.user()
1399 user = ctx.user()
1400
1400
1401 lock = self.lock()
1401 lock = self.lock()
1402 try:
1402 try:
1403 tr = self.transaction("commit")
1403 tr = self.transaction("commit")
1404 trp = weakref.proxy(tr)
1404 trp = weakref.proxy(tr)
1405
1405
1406 if ctx.files():
1406 if ctx.files():
1407 m1 = p1.manifest().copy()
1407 m1 = p1.manifest().copy()
1408 m2 = p2.manifest()
1408 m2 = p2.manifest()
1409
1409
1410 # check in files
1410 # check in files
1411 new = {}
1411 new = {}
1412 changed = []
1412 changed = []
1413 linkrev = len(self)
1413 linkrev = len(self)
1414 for f in sorted(ctx.modified() + ctx.added()):
1414 for f in sorted(ctx.modified() + ctx.added()):
1415 self.ui.note(f + "\n")
1415 self.ui.note(f + "\n")
1416 try:
1416 try:
1417 fctx = ctx[f]
1417 fctx = ctx[f]
1418 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1418 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1419 changed)
1419 changed)
1420 m1.set(f, fctx.flags())
1420 m1.set(f, fctx.flags())
1421 except OSError, inst:
1421 except OSError, inst:
1422 self.ui.warn(_("trouble committing %s!\n") % f)
1422 self.ui.warn(_("trouble committing %s!\n") % f)
1423 raise
1423 raise
1424 except IOError, inst:
1424 except IOError, inst:
1425 errcode = getattr(inst, 'errno', errno.ENOENT)
1425 errcode = getattr(inst, 'errno', errno.ENOENT)
1426 if error or errcode and errcode != errno.ENOENT:
1426 if error or errcode and errcode != errno.ENOENT:
1427 self.ui.warn(_("trouble committing %s!\n") % f)
1427 self.ui.warn(_("trouble committing %s!\n") % f)
1428 raise
1428 raise
1429 else:
1429 else:
1430 removed.append(f)
1430 removed.append(f)
1431
1431
1432 # update manifest
1432 # update manifest
1433 m1.update(new)
1433 m1.update(new)
1434 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1434 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1435 drop = [f for f in removed if f in m1]
1435 drop = [f for f in removed if f in m1]
1436 for f in drop:
1436 for f in drop:
1437 del m1[f]
1437 del m1[f]
1438 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1438 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1439 p2.manifestnode(), (new, drop))
1439 p2.manifestnode(), (new, drop))
1440 files = changed + removed
1440 files = changed + removed
1441 else:
1441 else:
1442 mn = p1.manifestnode()
1442 mn = p1.manifestnode()
1443 files = []
1443 files = []
1444
1444
1445 # update changelog
1445 # update changelog
1446 self.changelog.delayupdate()
1446 self.changelog.delayupdate()
1447 n = self.changelog.add(mn, files, ctx.description(),
1447 n = self.changelog.add(mn, files, ctx.description(),
1448 trp, p1.node(), p2.node(),
1448 trp, p1.node(), p2.node(),
1449 user, ctx.date(), ctx.extra().copy())
1449 user, ctx.date(), ctx.extra().copy())
1450 p = lambda: self.changelog.writepending() and self.root or ""
1450 p = lambda: self.changelog.writepending() and self.root or ""
1451 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1451 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1452 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1452 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1453 parent2=xp2, pending=p)
1453 parent2=xp2, pending=p)
1454 self.changelog.finalize(trp)
1454 self.changelog.finalize(trp)
1455 # set the new commit is proper phase
1455 # set the new commit is proper phase
1456 targetphase = phases.newcommitphase(self.ui)
1456 targetphase = phases.newcommitphase(self.ui)
1457 if targetphase:
1457 if targetphase:
1458 # retract boundary do not alter parent changeset.
1458 # retract boundary do not alter parent changeset.
1459 # if a parent have higher the resulting phase will
1459 # if a parent have higher the resulting phase will
1460 # be compliant anyway
1460 # be compliant anyway
1461 #
1461 #
1462 # if minimal phase was 0 we don't need to retract anything
1462 # if minimal phase was 0 we don't need to retract anything
1463 phases.retractboundary(self, targetphase, [n])
1463 phases.retractboundary(self, targetphase, [n])
1464 tr.close()
1464 tr.close()
1465 self.updatebranchcache()
1465 self.updatebranchcache()
1466 return n
1466 return n
1467 finally:
1467 finally:
1468 if tr:
1468 if tr:
1469 tr.release()
1469 tr.release()
1470 lock.release()
1470 lock.release()
1471
1471
1472 def destroyed(self, newheadnodes=None):
1472 def destroyed(self, newheadnodes=None):
1473 '''Inform the repository that nodes have been destroyed.
1473 '''Inform the repository that nodes have been destroyed.
1474 Intended for use by strip and rollback, so there's a common
1474 Intended for use by strip and rollback, so there's a common
1475 place for anything that has to be done after destroying history.
1475 place for anything that has to be done after destroying history.
1476
1476
1477 If you know the branchheadcache was uptodate before nodes were removed
1477 If you know the branchheadcache was uptodate before nodes were removed
1478 and you also know the set of candidate new heads that may have resulted
1478 and you also know the set of candidate new heads that may have resulted
1479 from the destruction, you can set newheadnodes. This will enable the
1479 from the destruction, you can set newheadnodes. This will enable the
1480 code to update the branchheads cache, rather than having future code
1480 code to update the branchheads cache, rather than having future code
1481 decide it's invalid and regenrating it from scratch.
1481 decide it's invalid and regenrating it from scratch.
1482 '''
1482 '''
1483 # If we have info, newheadnodes, on how to update the branch cache, do
1483 # If we have info, newheadnodes, on how to update the branch cache, do
1484 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1484 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1485 # will be caught the next time it is read.
1485 # will be caught the next time it is read.
1486 if newheadnodes:
1486 if newheadnodes:
1487 tiprev = len(self) - 1
1487 tiprev = len(self) - 1
1488 ctxgen = (self[node] for node in newheadnodes
1488 ctxgen = (self[node] for node in newheadnodes
1489 if self.changelog.hasnode(node))
1489 if self.changelog.hasnode(node))
1490 self._updatebranchcache(self._branchcache, ctxgen)
1490 self._updatebranchcache(self._branchcache, ctxgen)
1491 self._writebranchcache(self._branchcache, self.changelog.tip(),
1491 self._writebranchcache(self._branchcache, self.changelog.tip(),
1492 tiprev)
1492 tiprev)
1493
1493
1494 # Ensure the persistent tag cache is updated. Doing it now
1494 # Ensure the persistent tag cache is updated. Doing it now
1495 # means that the tag cache only has to worry about destroyed
1495 # means that the tag cache only has to worry about destroyed
1496 # heads immediately after a strip/rollback. That in turn
1496 # heads immediately after a strip/rollback. That in turn
1497 # guarantees that "cachetip == currenttip" (comparing both rev
1497 # guarantees that "cachetip == currenttip" (comparing both rev
1498 # and node) always means no nodes have been added or destroyed.
1498 # and node) always means no nodes have been added or destroyed.
1499
1499
1500 # XXX this is suboptimal when qrefresh'ing: we strip the current
1500 # XXX this is suboptimal when qrefresh'ing: we strip the current
1501 # head, refresh the tag cache, then immediately add a new head.
1501 # head, refresh the tag cache, then immediately add a new head.
1502 # But I think doing it this way is necessary for the "instant
1502 # But I think doing it this way is necessary for the "instant
1503 # tag cache retrieval" case to work.
1503 # tag cache retrieval" case to work.
1504 self.invalidatecaches()
1504 self.invalidatecaches()
1505
1505
1506 # Discard all cache entries to force reloading everything.
1506 # Discard all cache entries to force reloading everything.
1507 self._filecache.clear()
1507 self._filecache.clear()
1508
1508
1509 def walk(self, match, node=None):
1509 def walk(self, match, node=None):
1510 '''
1510 '''
1511 walk recursively through the directory tree or a given
1511 walk recursively through the directory tree or a given
1512 changeset, finding all files matched by the match
1512 changeset, finding all files matched by the match
1513 function
1513 function
1514 '''
1514 '''
1515 return self[node].walk(match)
1515 return self[node].walk(match)
1516
1516
1517 def status(self, node1='.', node2=None, match=None,
1517 def status(self, node1='.', node2=None, match=None,
1518 ignored=False, clean=False, unknown=False,
1518 ignored=False, clean=False, unknown=False,
1519 listsubrepos=False):
1519 listsubrepos=False):
1520 """return status of files between two nodes or node and working
1520 """return status of files between two nodes or node and working
1521 directory.
1521 directory.
1522
1522
1523 If node1 is None, use the first dirstate parent instead.
1523 If node1 is None, use the first dirstate parent instead.
1524 If node2 is None, compare node1 with working directory.
1524 If node2 is None, compare node1 with working directory.
1525 """
1525 """
1526
1526
1527 def mfmatches(ctx):
1527 def mfmatches(ctx):
1528 mf = ctx.manifest().copy()
1528 mf = ctx.manifest().copy()
1529 if match.always():
1529 if match.always():
1530 return mf
1530 return mf
1531 for fn in mf.keys():
1531 for fn in mf.keys():
1532 if not match(fn):
1532 if not match(fn):
1533 del mf[fn]
1533 del mf[fn]
1534 return mf
1534 return mf
1535
1535
1536 if isinstance(node1, context.changectx):
1536 if isinstance(node1, context.changectx):
1537 ctx1 = node1
1537 ctx1 = node1
1538 else:
1538 else:
1539 ctx1 = self[node1]
1539 ctx1 = self[node1]
1540 if isinstance(node2, context.changectx):
1540 if isinstance(node2, context.changectx):
1541 ctx2 = node2
1541 ctx2 = node2
1542 else:
1542 else:
1543 ctx2 = self[node2]
1543 ctx2 = self[node2]
1544
1544
1545 working = ctx2.rev() is None
1545 working = ctx2.rev() is None
1546 parentworking = working and ctx1 == self['.']
1546 parentworking = working and ctx1 == self['.']
1547 match = match or matchmod.always(self.root, self.getcwd())
1547 match = match or matchmod.always(self.root, self.getcwd())
1548 listignored, listclean, listunknown = ignored, clean, unknown
1548 listignored, listclean, listunknown = ignored, clean, unknown
1549
1549
1550 # load earliest manifest first for caching reasons
1550 # load earliest manifest first for caching reasons
1551 if not working and ctx2.rev() < ctx1.rev():
1551 if not working and ctx2.rev() < ctx1.rev():
1552 ctx2.manifest()
1552 ctx2.manifest()
1553
1553
1554 if not parentworking:
1554 if not parentworking:
1555 def bad(f, msg):
1555 def bad(f, msg):
1556 # 'f' may be a directory pattern from 'match.files()',
1556 # 'f' may be a directory pattern from 'match.files()',
1557 # so 'f not in ctx1' is not enough
1557 # so 'f not in ctx1' is not enough
1558 if f not in ctx1 and f not in ctx1.dirs():
1558 if f not in ctx1 and f not in ctx1.dirs():
1559 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1559 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1560 match.bad = bad
1560 match.bad = bad
1561
1561
1562 if working: # we need to scan the working dir
1562 if working: # we need to scan the working dir
1563 subrepos = []
1563 subrepos = []
1564 if '.hgsub' in self.dirstate:
1564 if '.hgsub' in self.dirstate:
1565 subrepos = ctx2.substate.keys()
1565 subrepos = ctx2.substate.keys()
1566 s = self.dirstate.status(match, subrepos, listignored,
1566 s = self.dirstate.status(match, subrepos, listignored,
1567 listclean, listunknown)
1567 listclean, listunknown)
1568 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1568 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1569
1569
1570 # check for any possibly clean files
1570 # check for any possibly clean files
1571 if parentworking and cmp:
1571 if parentworking and cmp:
1572 fixup = []
1572 fixup = []
1573 # do a full compare of any files that might have changed
1573 # do a full compare of any files that might have changed
1574 for f in sorted(cmp):
1574 for f in sorted(cmp):
1575 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1575 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1576 or ctx1[f].cmp(ctx2[f])):
1576 or ctx1[f].cmp(ctx2[f])):
1577 modified.append(f)
1577 modified.append(f)
1578 else:
1578 else:
1579 fixup.append(f)
1579 fixup.append(f)
1580
1580
1581 # update dirstate for files that are actually clean
1581 # update dirstate for files that are actually clean
1582 if fixup:
1582 if fixup:
1583 if listclean:
1583 if listclean:
1584 clean += fixup
1584 clean += fixup
1585
1585
1586 try:
1586 try:
1587 # updating the dirstate is optional
1587 # updating the dirstate is optional
1588 # so we don't wait on the lock
1588 # so we don't wait on the lock
1589 wlock = self.wlock(False)
1589 wlock = self.wlock(False)
1590 try:
1590 try:
1591 for f in fixup:
1591 for f in fixup:
1592 self.dirstate.normal(f)
1592 self.dirstate.normal(f)
1593 finally:
1593 finally:
1594 wlock.release()
1594 wlock.release()
1595 except error.LockError:
1595 except error.LockError:
1596 pass
1596 pass
1597
1597
1598 if not parentworking:
1598 if not parentworking:
1599 mf1 = mfmatches(ctx1)
1599 mf1 = mfmatches(ctx1)
1600 if working:
1600 if working:
1601 # we are comparing working dir against non-parent
1601 # we are comparing working dir against non-parent
1602 # generate a pseudo-manifest for the working dir
1602 # generate a pseudo-manifest for the working dir
1603 mf2 = mfmatches(self['.'])
1603 mf2 = mfmatches(self['.'])
1604 for f in cmp + modified + added:
1604 for f in cmp + modified + added:
1605 mf2[f] = None
1605 mf2[f] = None
1606 mf2.set(f, ctx2.flags(f))
1606 mf2.set(f, ctx2.flags(f))
1607 for f in removed:
1607 for f in removed:
1608 if f in mf2:
1608 if f in mf2:
1609 del mf2[f]
1609 del mf2[f]
1610 else:
1610 else:
1611 # we are comparing two revisions
1611 # we are comparing two revisions
1612 deleted, unknown, ignored = [], [], []
1612 deleted, unknown, ignored = [], [], []
1613 mf2 = mfmatches(ctx2)
1613 mf2 = mfmatches(ctx2)
1614
1614
1615 modified, added, clean = [], [], []
1615 modified, added, clean = [], [], []
1616 withflags = mf1.withflags() | mf2.withflags()
1616 withflags = mf1.withflags() | mf2.withflags()
1617 for fn in mf2:
1617 for fn in mf2:
1618 if fn in mf1:
1618 if fn in mf1:
1619 if (fn not in deleted and
1619 if (fn not in deleted and
1620 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1620 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1621 (mf1[fn] != mf2[fn] and
1621 (mf1[fn] != mf2[fn] and
1622 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1622 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1623 modified.append(fn)
1623 modified.append(fn)
1624 elif listclean:
1624 elif listclean:
1625 clean.append(fn)
1625 clean.append(fn)
1626 del mf1[fn]
1626 del mf1[fn]
1627 elif fn not in deleted:
1627 elif fn not in deleted:
1628 added.append(fn)
1628 added.append(fn)
1629 removed = mf1.keys()
1629 removed = mf1.keys()
1630
1630
1631 if working and modified and not self.dirstate._checklink:
1631 if working and modified and not self.dirstate._checklink:
1632 # Symlink placeholders may get non-symlink-like contents
1632 # Symlink placeholders may get non-symlink-like contents
1633 # via user error or dereferencing by NFS or Samba servers,
1633 # via user error or dereferencing by NFS or Samba servers,
1634 # so we filter out any placeholders that don't look like a
1634 # so we filter out any placeholders that don't look like a
1635 # symlink
1635 # symlink
1636 sane = []
1636 sane = []
1637 for f in modified:
1637 for f in modified:
1638 if ctx2.flags(f) == 'l':
1638 if ctx2.flags(f) == 'l':
1639 d = ctx2[f].data()
1639 d = ctx2[f].data()
1640 if len(d) >= 1024 or '\n' in d or util.binary(d):
1640 if len(d) >= 1024 or '\n' in d or util.binary(d):
1641 self.ui.debug('ignoring suspect symlink placeholder'
1641 self.ui.debug('ignoring suspect symlink placeholder'
1642 ' "%s"\n' % f)
1642 ' "%s"\n' % f)
1643 continue
1643 continue
1644 sane.append(f)
1644 sane.append(f)
1645 modified = sane
1645 modified = sane
1646
1646
1647 r = modified, added, removed, deleted, unknown, ignored, clean
1647 r = modified, added, removed, deleted, unknown, ignored, clean
1648
1648
1649 if listsubrepos:
1649 if listsubrepos:
1650 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1650 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1651 if working:
1651 if working:
1652 rev2 = None
1652 rev2 = None
1653 else:
1653 else:
1654 rev2 = ctx2.substate[subpath][1]
1654 rev2 = ctx2.substate[subpath][1]
1655 try:
1655 try:
1656 submatch = matchmod.narrowmatcher(subpath, match)
1656 submatch = matchmod.narrowmatcher(subpath, match)
1657 s = sub.status(rev2, match=submatch, ignored=listignored,
1657 s = sub.status(rev2, match=submatch, ignored=listignored,
1658 clean=listclean, unknown=listunknown,
1658 clean=listclean, unknown=listunknown,
1659 listsubrepos=True)
1659 listsubrepos=True)
1660 for rfiles, sfiles in zip(r, s):
1660 for rfiles, sfiles in zip(r, s):
1661 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1661 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1662 except error.LookupError:
1662 except error.LookupError:
1663 self.ui.status(_("skipping missing subrepository: %s\n")
1663 self.ui.status(_("skipping missing subrepository: %s\n")
1664 % subpath)
1664 % subpath)
1665
1665
1666 for l in r:
1666 for l in r:
1667 l.sort()
1667 l.sort()
1668 return r
1668 return r
1669
1669
1670 def heads(self, start=None):
1670 def heads(self, start=None):
1671 heads = self.changelog.heads(start)
1671 heads = self.changelog.heads(start)
1672 # sort the output in rev descending order
1672 # sort the output in rev descending order
1673 return sorted(heads, key=self.changelog.rev, reverse=True)
1673 return sorted(heads, key=self.changelog.rev, reverse=True)
1674
1674
1675 def branchheads(self, branch=None, start=None, closed=False):
1675 def branchheads(self, branch=None, start=None, closed=False):
1676 '''return a (possibly filtered) list of heads for the given branch
1676 '''return a (possibly filtered) list of heads for the given branch
1677
1677
1678 Heads are returned in topological order, from newest to oldest.
1678 Heads are returned in topological order, from newest to oldest.
1679 If branch is None, use the dirstate branch.
1679 If branch is None, use the dirstate branch.
1680 If start is not None, return only heads reachable from start.
1680 If start is not None, return only heads reachable from start.
1681 If closed is True, return heads that are marked as closed as well.
1681 If closed is True, return heads that are marked as closed as well.
1682 '''
1682 '''
1683 if branch is None:
1683 if branch is None:
1684 branch = self[None].branch()
1684 branch = self[None].branch()
1685 branches = self.branchmap()
1685 branches = self.branchmap()
1686 if branch not in branches:
1686 if branch not in branches:
1687 return []
1687 return []
1688 # the cache returns heads ordered lowest to highest
1688 # the cache returns heads ordered lowest to highest
1689 bheads = list(reversed(branches[branch]))
1689 bheads = list(reversed(branches[branch]))
1690 if start is not None:
1690 if start is not None:
1691 # filter out the heads that cannot be reached from startrev
1691 # filter out the heads that cannot be reached from startrev
1692 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1692 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1693 bheads = [h for h in bheads if h in fbheads]
1693 bheads = [h for h in bheads if h in fbheads]
1694 if not closed:
1694 if not closed:
1695 bheads = [h for h in bheads if not self[h].closesbranch()]
1695 bheads = [h for h in bheads if not self[h].closesbranch()]
1696 return bheads
1696 return bheads
1697
1697
1698 def branches(self, nodes):
1698 def branches(self, nodes):
1699 if not nodes:
1699 if not nodes:
1700 nodes = [self.changelog.tip()]
1700 nodes = [self.changelog.tip()]
1701 b = []
1701 b = []
1702 for n in nodes:
1702 for n in nodes:
1703 t = n
1703 t = n
1704 while True:
1704 while True:
1705 p = self.changelog.parents(n)
1705 p = self.changelog.parents(n)
1706 if p[1] != nullid or p[0] == nullid:
1706 if p[1] != nullid or p[0] == nullid:
1707 b.append((t, n, p[0], p[1]))
1707 b.append((t, n, p[0], p[1]))
1708 break
1708 break
1709 n = p[0]
1709 n = p[0]
1710 return b
1710 return b
1711
1711
1712 def between(self, pairs):
1712 def between(self, pairs):
1713 r = []
1713 r = []
1714
1714
1715 for top, bottom in pairs:
1715 for top, bottom in pairs:
1716 n, l, i = top, [], 0
1716 n, l, i = top, [], 0
1717 f = 1
1717 f = 1
1718
1718
1719 while n != bottom and n != nullid:
1719 while n != bottom and n != nullid:
1720 p = self.changelog.parents(n)[0]
1720 p = self.changelog.parents(n)[0]
1721 if i == f:
1721 if i == f:
1722 l.append(n)
1722 l.append(n)
1723 f = f * 2
1723 f = f * 2
1724 n = p
1724 n = p
1725 i += 1
1725 i += 1
1726
1726
1727 r.append(l)
1727 r.append(l)
1728
1728
1729 return r
1729 return r
1730
1730
1731 def pull(self, remote, heads=None, force=False):
1731 def pull(self, remote, heads=None, force=False):
1732 # don't open transaction for nothing or you break future useful
1732 # don't open transaction for nothing or you break future useful
1733 # rollback call
1733 # rollback call
1734 tr = None
1734 tr = None
1735 trname = 'pull\n' + util.hidepassword(remote.url())
1735 trname = 'pull\n' + util.hidepassword(remote.url())
1736 lock = self.lock()
1736 lock = self.lock()
1737 try:
1737 try:
1738 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1738 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1739 force=force)
1739 force=force)
1740 common, fetch, rheads = tmp
1740 common, fetch, rheads = tmp
1741 if not fetch:
1741 if not fetch:
1742 self.ui.status(_("no changes found\n"))
1742 self.ui.status(_("no changes found\n"))
1743 added = []
1743 added = []
1744 result = 0
1744 result = 0
1745 else:
1745 else:
1746 tr = self.transaction(trname)
1746 tr = self.transaction(trname)
1747 if heads is None and list(common) == [nullid]:
1747 if heads is None and list(common) == [nullid]:
1748 self.ui.status(_("requesting all changes\n"))
1748 self.ui.status(_("requesting all changes\n"))
1749 elif heads is None and remote.capable('changegroupsubset'):
1749 elif heads is None and remote.capable('changegroupsubset'):
1750 # issue1320, avoid a race if remote changed after discovery
1750 # issue1320, avoid a race if remote changed after discovery
1751 heads = rheads
1751 heads = rheads
1752
1752
1753 if remote.capable('getbundle'):
1753 if remote.capable('getbundle'):
1754 cg = remote.getbundle('pull', common=common,
1754 cg = remote.getbundle('pull', common=common,
1755 heads=heads or rheads)
1755 heads=heads or rheads)
1756 elif heads is None:
1756 elif heads is None:
1757 cg = remote.changegroup(fetch, 'pull')
1757 cg = remote.changegroup(fetch, 'pull')
1758 elif not remote.capable('changegroupsubset'):
1758 elif not remote.capable('changegroupsubset'):
1759 raise util.Abort(_("partial pull cannot be done because "
1759 raise util.Abort(_("partial pull cannot be done because "
1760 "other repository doesn't support "
1760 "other repository doesn't support "
1761 "changegroupsubset."))
1761 "changegroupsubset."))
1762 else:
1762 else:
1763 cg = remote.changegroupsubset(fetch, heads, 'pull')
1763 cg = remote.changegroupsubset(fetch, heads, 'pull')
1764 clstart = len(self.changelog)
1764 clstart = len(self.changelog)
1765 result = self.addchangegroup(cg, 'pull', remote.url())
1765 result = self.addchangegroup(cg, 'pull', remote.url())
1766 clend = len(self.changelog)
1766 clend = len(self.changelog)
1767 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1767 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1768
1768
1769 # compute target subset
1769 # compute target subset
1770 if heads is None:
1770 if heads is None:
1771 # We pulled every thing possible
1771 # We pulled every thing possible
1772 # sync on everything common
1772 # sync on everything common
1773 subset = common + added
1773 subset = common + added
1774 else:
1774 else:
1775 # We pulled a specific subset
1775 # We pulled a specific subset
1776 # sync on this subset
1776 # sync on this subset
1777 subset = heads
1777 subset = heads
1778
1778
1779 # Get remote phases data from remote
1779 # Get remote phases data from remote
1780 remotephases = remote.listkeys('phases')
1780 remotephases = remote.listkeys('phases')
1781 publishing = bool(remotephases.get('publishing', False))
1781 publishing = bool(remotephases.get('publishing', False))
1782 if remotephases and not publishing:
1782 if remotephases and not publishing:
1783 # remote is new and unpublishing
1783 # remote is new and unpublishing
1784 pheads, _dr = phases.analyzeremotephases(self, subset,
1784 pheads, _dr = phases.analyzeremotephases(self, subset,
1785 remotephases)
1785 remotephases)
1786 phases.advanceboundary(self, phases.public, pheads)
1786 phases.advanceboundary(self, phases.public, pheads)
1787 phases.advanceboundary(self, phases.draft, subset)
1787 phases.advanceboundary(self, phases.draft, subset)
1788 else:
1788 else:
1789 # Remote is old or publishing all common changesets
1789 # Remote is old or publishing all common changesets
1790 # should be seen as public
1790 # should be seen as public
1791 phases.advanceboundary(self, phases.public, subset)
1791 phases.advanceboundary(self, phases.public, subset)
1792
1792
1793 if obsolete._enabled:
1793 if obsolete._enabled:
1794 self.ui.debug('fetching remote obsolete markers')
1794 self.ui.debug('fetching remote obsolete markers')
1795 remoteobs = remote.listkeys('obsolete')
1795 remoteobs = remote.listkeys('obsolete')
1796 if 'dump0' in remoteobs:
1796 if 'dump0' in remoteobs:
1797 if tr is None:
1797 if tr is None:
1798 tr = self.transaction(trname)
1798 tr = self.transaction(trname)
1799 for key in sorted(remoteobs, reverse=True):
1799 for key in sorted(remoteobs, reverse=True):
1800 if key.startswith('dump'):
1800 if key.startswith('dump'):
1801 data = base85.b85decode(remoteobs[key])
1801 data = base85.b85decode(remoteobs[key])
1802 self.obsstore.mergemarkers(tr, data)
1802 self.obsstore.mergemarkers(tr, data)
1803 if tr is not None:
1803 if tr is not None:
1804 tr.close()
1804 tr.close()
1805 finally:
1805 finally:
1806 if tr is not None:
1806 if tr is not None:
1807 tr.release()
1807 tr.release()
1808 lock.release()
1808 lock.release()
1809
1809
1810 return result
1810 return result
1811
1811
1812 def checkpush(self, force, revs):
1812 def checkpush(self, force, revs):
1813 """Extensions can override this function if additional checks have
1813 """Extensions can override this function if additional checks have
1814 to be performed before pushing, or call it if they override push
1814 to be performed before pushing, or call it if they override push
1815 command.
1815 command.
1816 """
1816 """
1817 pass
1817 pass
1818
1818
1819 def push(self, remote, force=False, revs=None, newbranch=False):
1819 def push(self, remote, force=False, revs=None, newbranch=False):
1820 '''Push outgoing changesets (limited by revs) from the current
1820 '''Push outgoing changesets (limited by revs) from the current
1821 repository to remote. Return an integer:
1821 repository to remote. Return an integer:
1822 - None means nothing to push
1822 - None means nothing to push
1823 - 0 means HTTP error
1823 - 0 means HTTP error
1824 - 1 means we pushed and remote head count is unchanged *or*
1824 - 1 means we pushed and remote head count is unchanged *or*
1825 we have outgoing changesets but refused to push
1825 we have outgoing changesets but refused to push
1826 - other values as described by addchangegroup()
1826 - other values as described by addchangegroup()
1827 '''
1827 '''
1828 # there are two ways to push to remote repo:
1828 # there are two ways to push to remote repo:
1829 #
1829 #
1830 # addchangegroup assumes local user can lock remote
1830 # addchangegroup assumes local user can lock remote
1831 # repo (local filesystem, old ssh servers).
1831 # repo (local filesystem, old ssh servers).
1832 #
1832 #
1833 # unbundle assumes local user cannot lock remote repo (new ssh
1833 # unbundle assumes local user cannot lock remote repo (new ssh
1834 # servers, http servers).
1834 # servers, http servers).
1835
1835
1836 if not remote.canpush():
1836 if not remote.canpush():
1837 raise util.Abort(_("destination does not support push"))
1837 raise util.Abort(_("destination does not support push"))
1838 # get local lock as we might write phase data
1838 # get local lock as we might write phase data
1839 locallock = self.lock()
1839 locallock = self.lock()
1840 try:
1840 try:
1841 self.checkpush(force, revs)
1841 self.checkpush(force, revs)
1842 lock = None
1842 lock = None
1843 unbundle = remote.capable('unbundle')
1843 unbundle = remote.capable('unbundle')
1844 if not unbundle:
1844 if not unbundle:
1845 lock = remote.lock()
1845 lock = remote.lock()
1846 try:
1846 try:
1847 # discovery
1847 # discovery
1848 fci = discovery.findcommonincoming
1848 fci = discovery.findcommonincoming
1849 commoninc = fci(self, remote, force=force)
1849 commoninc = fci(self, remote, force=force)
1850 common, inc, remoteheads = commoninc
1850 common, inc, remoteheads = commoninc
1851 fco = discovery.findcommonoutgoing
1851 fco = discovery.findcommonoutgoing
1852 outgoing = fco(self, remote, onlyheads=revs,
1852 outgoing = fco(self, remote, onlyheads=revs,
1853 commoninc=commoninc, force=force)
1853 commoninc=commoninc, force=force)
1854
1854
1855
1855
1856 if not outgoing.missing:
1856 if not outgoing.missing:
1857 # nothing to push
1857 # nothing to push
1858 scmutil.nochangesfound(self.ui, self, outgoing.excluded)
1858 scmutil.nochangesfound(self.ui, self, outgoing.excluded)
1859 ret = None
1859 ret = None
1860 else:
1860 else:
1861 # something to push
1861 # something to push
1862 if not force:
1862 if not force:
1863 # if self.obsstore == False --> no obsolete
1863 # if self.obsstore == False --> no obsolete
1864 # then, save the iteration
1864 # then, save the iteration
1865 if self.obsstore:
1865 if self.obsstore:
1866 # this message are here for 80 char limit reason
1866 # this message are here for 80 char limit reason
1867 mso = _("push includes an obsolete changeset: %s!")
1867 mso = _("push includes an obsolete changeset: %s!")
1868 msu = _("push includes an unstable changeset: %s!")
1868 msu = _("push includes an unstable changeset: %s!")
1869 # If we are to push if there is at least one
1869 # If we are to push if there is at least one
1870 # obsolete or unstable changeset in missing, at
1870 # obsolete or unstable changeset in missing, at
1871 # least one of the missinghead will be obsolete or
1871 # least one of the missinghead will be obsolete or
1872 # unstable. So checking heads only is ok
1872 # unstable. So checking heads only is ok
1873 for node in outgoing.missingheads:
1873 for node in outgoing.missingheads:
1874 ctx = self[node]
1874 ctx = self[node]
1875 if ctx.obsolete():
1875 if ctx.obsolete():
1876 raise util.Abort(_(mso) % ctx)
1876 raise util.Abort(_(mso) % ctx)
1877 elif ctx.unstable():
1877 elif ctx.unstable():
1878 raise util.Abort(_(msu) % ctx)
1878 raise util.Abort(_(msu) % ctx)
1879 discovery.checkheads(self, remote, outgoing,
1879 discovery.checkheads(self, remote, outgoing,
1880 remoteheads, newbranch,
1880 remoteheads, newbranch,
1881 bool(inc))
1881 bool(inc))
1882
1882
1883 # create a changegroup from local
1883 # create a changegroup from local
1884 if revs is None and not outgoing.excluded:
1884 if revs is None and not outgoing.excluded:
1885 # push everything,
1885 # push everything,
1886 # use the fast path, no race possible on push
1886 # use the fast path, no race possible on push
1887 cg = self._changegroup(outgoing.missing, 'push')
1887 cg = self._changegroup(outgoing.missing, 'push')
1888 else:
1888 else:
1889 cg = self.getlocalbundle('push', outgoing)
1889 cg = self.getlocalbundle('push', outgoing)
1890
1890
1891 # apply changegroup to remote
1891 # apply changegroup to remote
1892 if unbundle:
1892 if unbundle:
1893 # local repo finds heads on server, finds out what
1893 # local repo finds heads on server, finds out what
1894 # revs it must push. once revs transferred, if server
1894 # revs it must push. once revs transferred, if server
1895 # finds it has different heads (someone else won
1895 # finds it has different heads (someone else won
1896 # commit/push race), server aborts.
1896 # commit/push race), server aborts.
1897 if force:
1897 if force:
1898 remoteheads = ['force']
1898 remoteheads = ['force']
1899 # ssh: return remote's addchangegroup()
1899 # ssh: return remote's addchangegroup()
1900 # http: return remote's addchangegroup() or 0 for error
1900 # http: return remote's addchangegroup() or 0 for error
1901 ret = remote.unbundle(cg, remoteheads, 'push')
1901 ret = remote.unbundle(cg, remoteheads, 'push')
1902 else:
1902 else:
1903 # we return an integer indicating remote head count
1903 # we return an integer indicating remote head count
1904 # change
1904 # change
1905 ret = remote.addchangegroup(cg, 'push', self.url())
1905 ret = remote.addchangegroup(cg, 'push', self.url())
1906
1906
1907 if ret:
1907 if ret:
1908 # push succeed, synchronize target of the push
1908 # push succeed, synchronize target of the push
1909 cheads = outgoing.missingheads
1909 cheads = outgoing.missingheads
1910 elif revs is None:
1910 elif revs is None:
1911 # All out push fails. synchronize all common
1911 # All out push fails. synchronize all common
1912 cheads = outgoing.commonheads
1912 cheads = outgoing.commonheads
1913 else:
1913 else:
1914 # I want cheads = heads(::missingheads and ::commonheads)
1914 # I want cheads = heads(::missingheads and ::commonheads)
1915 # (missingheads is revs with secret changeset filtered out)
1915 # (missingheads is revs with secret changeset filtered out)
1916 #
1916 #
1917 # This can be expressed as:
1917 # This can be expressed as:
1918 # cheads = ( (missingheads and ::commonheads)
1918 # cheads = ( (missingheads and ::commonheads)
1919 # + (commonheads and ::missingheads))"
1919 # + (commonheads and ::missingheads))"
1920 # )
1920 # )
1921 #
1921 #
1922 # while trying to push we already computed the following:
1922 # while trying to push we already computed the following:
1923 # common = (::commonheads)
1923 # common = (::commonheads)
1924 # missing = ((commonheads::missingheads) - commonheads)
1924 # missing = ((commonheads::missingheads) - commonheads)
1925 #
1925 #
1926 # We can pick:
1926 # We can pick:
1927 # * missingheads part of comon (::commonheads)
1927 # * missingheads part of comon (::commonheads)
1928 common = set(outgoing.common)
1928 common = set(outgoing.common)
1929 cheads = [node for node in revs if node in common]
1929 cheads = [node for node in revs if node in common]
1930 # and
1930 # and
1931 # * commonheads parents on missing
1931 # * commonheads parents on missing
1932 revset = self.set('%ln and parents(roots(%ln))',
1932 revset = self.set('%ln and parents(roots(%ln))',
1933 outgoing.commonheads,
1933 outgoing.commonheads,
1934 outgoing.missing)
1934 outgoing.missing)
1935 cheads.extend(c.node() for c in revset)
1935 cheads.extend(c.node() for c in revset)
1936 # even when we don't push, exchanging phase data is useful
1936 # even when we don't push, exchanging phase data is useful
1937 remotephases = remote.listkeys('phases')
1937 remotephases = remote.listkeys('phases')
1938 if not remotephases: # old server or public only repo
1938 if not remotephases: # old server or public only repo
1939 phases.advanceboundary(self, phases.public, cheads)
1939 phases.advanceboundary(self, phases.public, cheads)
1940 # don't push any phase data as there is nothing to push
1940 # don't push any phase data as there is nothing to push
1941 else:
1941 else:
1942 ana = phases.analyzeremotephases(self, cheads, remotephases)
1942 ana = phases.analyzeremotephases(self, cheads, remotephases)
1943 pheads, droots = ana
1943 pheads, droots = ana
1944 ### Apply remote phase on local
1944 ### Apply remote phase on local
1945 if remotephases.get('publishing', False):
1945 if remotephases.get('publishing', False):
1946 phases.advanceboundary(self, phases.public, cheads)
1946 phases.advanceboundary(self, phases.public, cheads)
1947 else: # publish = False
1947 else: # publish = False
1948 phases.advanceboundary(self, phases.public, pheads)
1948 phases.advanceboundary(self, phases.public, pheads)
1949 phases.advanceboundary(self, phases.draft, cheads)
1949 phases.advanceboundary(self, phases.draft, cheads)
1950 ### Apply local phase on remote
1950 ### Apply local phase on remote
1951
1951
1952 # Get the list of all revs draft on remote by public here.
1952 # Get the list of all revs draft on remote by public here.
1953 # XXX Beware that revset break if droots is not strictly
1953 # XXX Beware that revset break if droots is not strictly
1954 # XXX root we may want to ensure it is but it is costly
1954 # XXX root we may want to ensure it is but it is costly
1955 outdated = self.set('heads((%ln::%ln) and public())',
1955 outdated = self.set('heads((%ln::%ln) and public())',
1956 droots, cheads)
1956 droots, cheads)
1957 for newremotehead in outdated:
1957 for newremotehead in outdated:
1958 r = remote.pushkey('phases',
1958 r = remote.pushkey('phases',
1959 newremotehead.hex(),
1959 newremotehead.hex(),
1960 str(phases.draft),
1960 str(phases.draft),
1961 str(phases.public))
1961 str(phases.public))
1962 if not r:
1962 if not r:
1963 self.ui.warn(_('updating %s to public failed!\n')
1963 self.ui.warn(_('updating %s to public failed!\n')
1964 % newremotehead)
1964 % newremotehead)
1965 self.ui.debug('try to push obsolete markers to remote\n')
1965 self.ui.debug('try to push obsolete markers to remote\n')
1966 if (obsolete._enabled and self.obsstore and
1966 if (obsolete._enabled and self.obsstore and
1967 'obsolete' in remote.listkeys('namespaces')):
1967 'obsolete' in remote.listkeys('namespaces')):
1968 rslts = []
1968 rslts = []
1969 remotedata = self.listkeys('obsolete')
1969 remotedata = self.listkeys('obsolete')
1970 for key in sorted(remotedata, reverse=True):
1970 for key in sorted(remotedata, reverse=True):
1971 # reverse sort to ensure we end with dump0
1971 # reverse sort to ensure we end with dump0
1972 data = remotedata[key]
1972 data = remotedata[key]
1973 rslts.append(remote.pushkey('obsolete', key, '', data))
1973 rslts.append(remote.pushkey('obsolete', key, '', data))
1974 if [r for r in rslts if not r]:
1974 if [r for r in rslts if not r]:
1975 msg = _('failed to push some obsolete markers!\n')
1975 msg = _('failed to push some obsolete markers!\n')
1976 self.ui.warn(msg)
1976 self.ui.warn(msg)
1977 finally:
1977 finally:
1978 if lock is not None:
1978 if lock is not None:
1979 lock.release()
1979 lock.release()
1980 finally:
1980 finally:
1981 locallock.release()
1981 locallock.release()
1982
1982
1983 self.ui.debug("checking for updated bookmarks\n")
1983 self.ui.debug("checking for updated bookmarks\n")
1984 rb = remote.listkeys('bookmarks')
1984 rb = remote.listkeys('bookmarks')
1985 for k in rb.keys():
1985 for k in rb.keys():
1986 if k in self._bookmarks:
1986 if k in self._bookmarks:
1987 nr, nl = rb[k], hex(self._bookmarks[k])
1987 nr, nl = rb[k], hex(self._bookmarks[k])
1988 if nr in self:
1988 if nr in self:
1989 cr = self[nr]
1989 cr = self[nr]
1990 cl = self[nl]
1990 cl = self[nl]
1991 if cl in cr.descendants():
1991 if cl in cr.descendants():
1992 r = remote.pushkey('bookmarks', k, nr, nl)
1992 r = remote.pushkey('bookmarks', k, nr, nl)
1993 if r:
1993 if r:
1994 self.ui.status(_("updating bookmark %s\n") % k)
1994 self.ui.status(_("updating bookmark %s\n") % k)
1995 else:
1995 else:
1996 self.ui.warn(_('updating bookmark %s'
1996 self.ui.warn(_('updating bookmark %s'
1997 ' failed!\n') % k)
1997 ' failed!\n') % k)
1998
1998
1999 return ret
1999 return ret
2000
2000
2001 def changegroupinfo(self, nodes, source):
2001 def changegroupinfo(self, nodes, source):
2002 if self.ui.verbose or source == 'bundle':
2002 if self.ui.verbose or source == 'bundle':
2003 self.ui.status(_("%d changesets found\n") % len(nodes))
2003 self.ui.status(_("%d changesets found\n") % len(nodes))
2004 if self.ui.debugflag:
2004 if self.ui.debugflag:
2005 self.ui.debug("list of changesets:\n")
2005 self.ui.debug("list of changesets:\n")
2006 for node in nodes:
2006 for node in nodes:
2007 self.ui.debug("%s\n" % hex(node))
2007 self.ui.debug("%s\n" % hex(node))
2008
2008
2009 def changegroupsubset(self, bases, heads, source):
2009 def changegroupsubset(self, bases, heads, source):
2010 """Compute a changegroup consisting of all the nodes that are
2010 """Compute a changegroup consisting of all the nodes that are
2011 descendants of any of the bases and ancestors of any of the heads.
2011 descendants of any of the bases and ancestors of any of the heads.
2012 Return a chunkbuffer object whose read() method will return
2012 Return a chunkbuffer object whose read() method will return
2013 successive changegroup chunks.
2013 successive changegroup chunks.
2014
2014
2015 It is fairly complex as determining which filenodes and which
2015 It is fairly complex as determining which filenodes and which
2016 manifest nodes need to be included for the changeset to be complete
2016 manifest nodes need to be included for the changeset to be complete
2017 is non-trivial.
2017 is non-trivial.
2018
2018
2019 Another wrinkle is doing the reverse, figuring out which changeset in
2019 Another wrinkle is doing the reverse, figuring out which changeset in
2020 the changegroup a particular filenode or manifestnode belongs to.
2020 the changegroup a particular filenode or manifestnode belongs to.
2021 """
2021 """
2022 cl = self.changelog
2022 cl = self.changelog
2023 if not bases:
2023 if not bases:
2024 bases = [nullid]
2024 bases = [nullid]
2025 csets, bases, heads = cl.nodesbetween(bases, heads)
2025 csets, bases, heads = cl.nodesbetween(bases, heads)
2026 # We assume that all ancestors of bases are known
2026 # We assume that all ancestors of bases are known
2027 common = set(cl.ancestors([cl.rev(n) for n in bases]))
2027 common = set(cl.ancestors([cl.rev(n) for n in bases]))
2028 return self._changegroupsubset(common, csets, heads, source)
2028 return self._changegroupsubset(common, csets, heads, source)
2029
2029
2030 def getlocalbundle(self, source, outgoing):
2030 def getlocalbundle(self, source, outgoing):
2031 """Like getbundle, but taking a discovery.outgoing as an argument.
2031 """Like getbundle, but taking a discovery.outgoing as an argument.
2032
2032
2033 This is only implemented for local repos and reuses potentially
2033 This is only implemented for local repos and reuses potentially
2034 precomputed sets in outgoing."""
2034 precomputed sets in outgoing."""
2035 if not outgoing.missing:
2035 if not outgoing.missing:
2036 return None
2036 return None
2037 return self._changegroupsubset(outgoing.common,
2037 return self._changegroupsubset(outgoing.common,
2038 outgoing.missing,
2038 outgoing.missing,
2039 outgoing.missingheads,
2039 outgoing.missingheads,
2040 source)
2040 source)
2041
2041
2042 def getbundle(self, source, heads=None, common=None):
2042 def getbundle(self, source, heads=None, common=None):
2043 """Like changegroupsubset, but returns the set difference between the
2043 """Like changegroupsubset, but returns the set difference between the
2044 ancestors of heads and the ancestors common.
2044 ancestors of heads and the ancestors common.
2045
2045
2046 If heads is None, use the local heads. If common is None, use [nullid].
2046 If heads is None, use the local heads. If common is None, use [nullid].
2047
2047
2048 The nodes in common might not all be known locally due to the way the
2048 The nodes in common might not all be known locally due to the way the
2049 current discovery protocol works.
2049 current discovery protocol works.
2050 """
2050 """
2051 cl = self.changelog
2051 cl = self.changelog
2052 if common:
2052 if common:
2053 nm = cl.nodemap
2053 nm = cl.nodemap
2054 common = [n for n in common if n in nm]
2054 common = [n for n in common if n in nm]
2055 else:
2055 else:
2056 common = [nullid]
2056 common = [nullid]
2057 if not heads:
2057 if not heads:
2058 heads = cl.heads()
2058 heads = cl.heads()
2059 return self.getlocalbundle(source,
2059 return self.getlocalbundle(source,
2060 discovery.outgoing(cl, common, heads))
2060 discovery.outgoing(cl, common, heads))
2061
2061
2062 def _changegroupsubset(self, commonrevs, csets, heads, source):
2062 def _changegroupsubset(self, commonrevs, csets, heads, source):
2063
2063
2064 cl = self.changelog
2064 cl = self.changelog
2065 mf = self.manifest
2065 mf = self.manifest
2066 mfs = {} # needed manifests
2066 mfs = {} # needed manifests
2067 fnodes = {} # needed file nodes
2067 fnodes = {} # needed file nodes
2068 changedfiles = set()
2068 changedfiles = set()
2069 fstate = ['', {}]
2069 fstate = ['', {}]
2070 count = [0, 0]
2070 count = [0, 0]
2071
2071
2072 # can we go through the fast path ?
2072 # can we go through the fast path ?
2073 heads.sort()
2073 heads.sort()
2074 if heads == sorted(self.heads()):
2074 if heads == sorted(self.heads()):
2075 return self._changegroup(csets, source)
2075 return self._changegroup(csets, source)
2076
2076
2077 # slow path
2077 # slow path
2078 self.hook('preoutgoing', throw=True, source=source)
2078 self.hook('preoutgoing', throw=True, source=source)
2079 self.changegroupinfo(csets, source)
2079 self.changegroupinfo(csets, source)
2080
2080
2081 # filter any nodes that claim to be part of the known set
2081 # filter any nodes that claim to be part of the known set
2082 def prune(revlog, missing):
2082 def prune(revlog, missing):
2083 rr, rl = revlog.rev, revlog.linkrev
2083 rr, rl = revlog.rev, revlog.linkrev
2084 return [n for n in missing
2084 return [n for n in missing
2085 if rl(rr(n)) not in commonrevs]
2085 if rl(rr(n)) not in commonrevs]
2086
2086
2087 progress = self.ui.progress
2087 progress = self.ui.progress
2088 _bundling = _('bundling')
2088 _bundling = _('bundling')
2089 _changesets = _('changesets')
2089 _changesets = _('changesets')
2090 _manifests = _('manifests')
2090 _manifests = _('manifests')
2091 _files = _('files')
2091 _files = _('files')
2092
2092
2093 def lookup(revlog, x):
2093 def lookup(revlog, x):
2094 if revlog == cl:
2094 if revlog == cl:
2095 c = cl.read(x)
2095 c = cl.read(x)
2096 changedfiles.update(c[3])
2096 changedfiles.update(c[3])
2097 mfs.setdefault(c[0], x)
2097 mfs.setdefault(c[0], x)
2098 count[0] += 1
2098 count[0] += 1
2099 progress(_bundling, count[0],
2099 progress(_bundling, count[0],
2100 unit=_changesets, total=count[1])
2100 unit=_changesets, total=count[1])
2101 return x
2101 return x
2102 elif revlog == mf:
2102 elif revlog == mf:
2103 clnode = mfs[x]
2103 clnode = mfs[x]
2104 mdata = mf.readfast(x)
2104 mdata = mf.readfast(x)
2105 for f, n in mdata.iteritems():
2105 for f, n in mdata.iteritems():
2106 if f in changedfiles:
2106 if f in changedfiles:
2107 fnodes[f].setdefault(n, clnode)
2107 fnodes[f].setdefault(n, clnode)
2108 count[0] += 1
2108 count[0] += 1
2109 progress(_bundling, count[0],
2109 progress(_bundling, count[0],
2110 unit=_manifests, total=count[1])
2110 unit=_manifests, total=count[1])
2111 return clnode
2111 return clnode
2112 else:
2112 else:
2113 progress(_bundling, count[0], item=fstate[0],
2113 progress(_bundling, count[0], item=fstate[0],
2114 unit=_files, total=count[1])
2114 unit=_files, total=count[1])
2115 return fstate[1][x]
2115 return fstate[1][x]
2116
2116
2117 bundler = changegroup.bundle10(lookup)
2117 bundler = changegroup.bundle10(lookup)
2118 reorder = self.ui.config('bundle', 'reorder', 'auto')
2118 reorder = self.ui.config('bundle', 'reorder', 'auto')
2119 if reorder == 'auto':
2119 if reorder == 'auto':
2120 reorder = None
2120 reorder = None
2121 else:
2121 else:
2122 reorder = util.parsebool(reorder)
2122 reorder = util.parsebool(reorder)
2123
2123
2124 def gengroup():
2124 def gengroup():
2125 # Create a changenode group generator that will call our functions
2125 # Create a changenode group generator that will call our functions
2126 # back to lookup the owning changenode and collect information.
2126 # back to lookup the owning changenode and collect information.
2127 count[:] = [0, len(csets)]
2127 count[:] = [0, len(csets)]
2128 for chunk in cl.group(csets, bundler, reorder=reorder):
2128 for chunk in cl.group(csets, bundler, reorder=reorder):
2129 yield chunk
2129 yield chunk
2130 progress(_bundling, None)
2130 progress(_bundling, None)
2131
2131
2132 # Create a generator for the manifestnodes that calls our lookup
2132 # Create a generator for the manifestnodes that calls our lookup
2133 # and data collection functions back.
2133 # and data collection functions back.
2134 for f in changedfiles:
2134 for f in changedfiles:
2135 fnodes[f] = {}
2135 fnodes[f] = {}
2136 count[:] = [0, len(mfs)]
2136 count[:] = [0, len(mfs)]
2137 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2137 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2138 yield chunk
2138 yield chunk
2139 progress(_bundling, None)
2139 progress(_bundling, None)
2140
2140
2141 mfs.clear()
2141 mfs.clear()
2142
2142
2143 # Go through all our files in order sorted by name.
2143 # Go through all our files in order sorted by name.
2144 count[:] = [0, len(changedfiles)]
2144 count[:] = [0, len(changedfiles)]
2145 for fname in sorted(changedfiles):
2145 for fname in sorted(changedfiles):
2146 filerevlog = self.file(fname)
2146 filerevlog = self.file(fname)
2147 if not len(filerevlog):
2147 if not len(filerevlog):
2148 raise util.Abort(_("empty or missing revlog for %s")
2148 raise util.Abort(_("empty or missing revlog for %s")
2149 % fname)
2149 % fname)
2150 fstate[0] = fname
2150 fstate[0] = fname
2151 fstate[1] = fnodes.pop(fname, {})
2151 fstate[1] = fnodes.pop(fname, {})
2152
2152
2153 nodelist = prune(filerevlog, fstate[1])
2153 nodelist = prune(filerevlog, fstate[1])
2154 if nodelist:
2154 if nodelist:
2155 count[0] += 1
2155 count[0] += 1
2156 yield bundler.fileheader(fname)
2156 yield bundler.fileheader(fname)
2157 for chunk in filerevlog.group(nodelist, bundler, reorder):
2157 for chunk in filerevlog.group(nodelist, bundler, reorder):
2158 yield chunk
2158 yield chunk
2159
2159
2160 # Signal that no more groups are left.
2160 # Signal that no more groups are left.
2161 yield bundler.close()
2161 yield bundler.close()
2162 progress(_bundling, None)
2162 progress(_bundling, None)
2163
2163
2164 if csets:
2164 if csets:
2165 self.hook('outgoing', node=hex(csets[0]), source=source)
2165 self.hook('outgoing', node=hex(csets[0]), source=source)
2166
2166
2167 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2167 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2168
2168
2169 def changegroup(self, basenodes, source):
2169 def changegroup(self, basenodes, source):
2170 # to avoid a race we use changegroupsubset() (issue1320)
2170 # to avoid a race we use changegroupsubset() (issue1320)
2171 return self.changegroupsubset(basenodes, self.heads(), source)
2171 return self.changegroupsubset(basenodes, self.heads(), source)
2172
2172
2173 def _changegroup(self, nodes, source):
2173 def _changegroup(self, nodes, source):
2174 """Compute the changegroup of all nodes that we have that a recipient
2174 """Compute the changegroup of all nodes that we have that a recipient
2175 doesn't. Return a chunkbuffer object whose read() method will return
2175 doesn't. Return a chunkbuffer object whose read() method will return
2176 successive changegroup chunks.
2176 successive changegroup chunks.
2177
2177
2178 This is much easier than the previous function as we can assume that
2178 This is much easier than the previous function as we can assume that
2179 the recipient has any changenode we aren't sending them.
2179 the recipient has any changenode we aren't sending them.
2180
2180
2181 nodes is the set of nodes to send"""
2181 nodes is the set of nodes to send"""
2182
2182
2183 cl = self.changelog
2183 cl = self.changelog
2184 mf = self.manifest
2184 mf = self.manifest
2185 mfs = {}
2185 mfs = {}
2186 changedfiles = set()
2186 changedfiles = set()
2187 fstate = ['']
2187 fstate = ['']
2188 count = [0, 0]
2188 count = [0, 0]
2189
2189
2190 self.hook('preoutgoing', throw=True, source=source)
2190 self.hook('preoutgoing', throw=True, source=source)
2191 self.changegroupinfo(nodes, source)
2191 self.changegroupinfo(nodes, source)
2192
2192
2193 revset = set([cl.rev(n) for n in nodes])
2193 revset = set([cl.rev(n) for n in nodes])
2194
2194
2195 def gennodelst(log):
2195 def gennodelst(log):
2196 ln, llr = log.node, log.linkrev
2196 ln, llr = log.node, log.linkrev
2197 return [ln(r) for r in log if llr(r) in revset]
2197 return [ln(r) for r in log if llr(r) in revset]
2198
2198
2199 progress = self.ui.progress
2199 progress = self.ui.progress
2200 _bundling = _('bundling')
2200 _bundling = _('bundling')
2201 _changesets = _('changesets')
2201 _changesets = _('changesets')
2202 _manifests = _('manifests')
2202 _manifests = _('manifests')
2203 _files = _('files')
2203 _files = _('files')
2204
2204
2205 def lookup(revlog, x):
2205 def lookup(revlog, x):
2206 if revlog == cl:
2206 if revlog == cl:
2207 c = cl.read(x)
2207 c = cl.read(x)
2208 changedfiles.update(c[3])
2208 changedfiles.update(c[3])
2209 mfs.setdefault(c[0], x)
2209 mfs.setdefault(c[0], x)
2210 count[0] += 1
2210 count[0] += 1
2211 progress(_bundling, count[0],
2211 progress(_bundling, count[0],
2212 unit=_changesets, total=count[1])
2212 unit=_changesets, total=count[1])
2213 return x
2213 return x
2214 elif revlog == mf:
2214 elif revlog == mf:
2215 count[0] += 1
2215 count[0] += 1
2216 progress(_bundling, count[0],
2216 progress(_bundling, count[0],
2217 unit=_manifests, total=count[1])
2217 unit=_manifests, total=count[1])
2218 return cl.node(revlog.linkrev(revlog.rev(x)))
2218 return cl.node(revlog.linkrev(revlog.rev(x)))
2219 else:
2219 else:
2220 progress(_bundling, count[0], item=fstate[0],
2220 progress(_bundling, count[0], item=fstate[0],
2221 total=count[1], unit=_files)
2221 total=count[1], unit=_files)
2222 return cl.node(revlog.linkrev(revlog.rev(x)))
2222 return cl.node(revlog.linkrev(revlog.rev(x)))
2223
2223
2224 bundler = changegroup.bundle10(lookup)
2224 bundler = changegroup.bundle10(lookup)
2225 reorder = self.ui.config('bundle', 'reorder', 'auto')
2225 reorder = self.ui.config('bundle', 'reorder', 'auto')
2226 if reorder == 'auto':
2226 if reorder == 'auto':
2227 reorder = None
2227 reorder = None
2228 else:
2228 else:
2229 reorder = util.parsebool(reorder)
2229 reorder = util.parsebool(reorder)
2230
2230
2231 def gengroup():
2231 def gengroup():
2232 '''yield a sequence of changegroup chunks (strings)'''
2232 '''yield a sequence of changegroup chunks (strings)'''
2233 # construct a list of all changed files
2233 # construct a list of all changed files
2234
2234
2235 count[:] = [0, len(nodes)]
2235 count[:] = [0, len(nodes)]
2236 for chunk in cl.group(nodes, bundler, reorder=reorder):
2236 for chunk in cl.group(nodes, bundler, reorder=reorder):
2237 yield chunk
2237 yield chunk
2238 progress(_bundling, None)
2238 progress(_bundling, None)
2239
2239
2240 count[:] = [0, len(mfs)]
2240 count[:] = [0, len(mfs)]
2241 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2241 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2242 yield chunk
2242 yield chunk
2243 progress(_bundling, None)
2243 progress(_bundling, None)
2244
2244
2245 count[:] = [0, len(changedfiles)]
2245 count[:] = [0, len(changedfiles)]
2246 for fname in sorted(changedfiles):
2246 for fname in sorted(changedfiles):
2247 filerevlog = self.file(fname)
2247 filerevlog = self.file(fname)
2248 if not len(filerevlog):
2248 if not len(filerevlog):
2249 raise util.Abort(_("empty or missing revlog for %s")
2249 raise util.Abort(_("empty or missing revlog for %s")
2250 % fname)
2250 % fname)
2251 fstate[0] = fname
2251 fstate[0] = fname
2252 nodelist = gennodelst(filerevlog)
2252 nodelist = gennodelst(filerevlog)
2253 if nodelist:
2253 if nodelist:
2254 count[0] += 1
2254 count[0] += 1
2255 yield bundler.fileheader(fname)
2255 yield bundler.fileheader(fname)
2256 for chunk in filerevlog.group(nodelist, bundler, reorder):
2256 for chunk in filerevlog.group(nodelist, bundler, reorder):
2257 yield chunk
2257 yield chunk
2258 yield bundler.close()
2258 yield bundler.close()
2259 progress(_bundling, None)
2259 progress(_bundling, None)
2260
2260
2261 if nodes:
2261 if nodes:
2262 self.hook('outgoing', node=hex(nodes[0]), source=source)
2262 self.hook('outgoing', node=hex(nodes[0]), source=source)
2263
2263
2264 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2264 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2265
2265
2266 def addchangegroup(self, source, srctype, url, emptyok=False):
2266 def addchangegroup(self, source, srctype, url, emptyok=False):
2267 """Add the changegroup returned by source.read() to this repo.
2267 """Add the changegroup returned by source.read() to this repo.
2268 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2268 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2269 the URL of the repo where this changegroup is coming from.
2269 the URL of the repo where this changegroup is coming from.
2270
2270
2271 Return an integer summarizing the change to this repo:
2271 Return an integer summarizing the change to this repo:
2272 - nothing changed or no source: 0
2272 - nothing changed or no source: 0
2273 - more heads than before: 1+added heads (2..n)
2273 - more heads than before: 1+added heads (2..n)
2274 - fewer heads than before: -1-removed heads (-2..-n)
2274 - fewer heads than before: -1-removed heads (-2..-n)
2275 - number of heads stays the same: 1
2275 - number of heads stays the same: 1
2276 """
2276 """
2277 def csmap(x):
2277 def csmap(x):
2278 self.ui.debug("add changeset %s\n" % short(x))
2278 self.ui.debug("add changeset %s\n" % short(x))
2279 return len(cl)
2279 return len(cl)
2280
2280
2281 def revmap(x):
2281 def revmap(x):
2282 return cl.rev(x)
2282 return cl.rev(x)
2283
2283
2284 if not source:
2284 if not source:
2285 return 0
2285 return 0
2286
2286
2287 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2287 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2288
2288
2289 changesets = files = revisions = 0
2289 changesets = files = revisions = 0
2290 efiles = set()
2290 efiles = set()
2291
2291
2292 # write changelog data to temp files so concurrent readers will not see
2292 # write changelog data to temp files so concurrent readers will not see
2293 # inconsistent view
2293 # inconsistent view
2294 cl = self.changelog
2294 cl = self.changelog
2295 cl.delayupdate()
2295 cl.delayupdate()
2296 oldheads = cl.heads()
2296 oldheads = cl.heads()
2297
2297
2298 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2298 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2299 try:
2299 try:
2300 trp = weakref.proxy(tr)
2300 trp = weakref.proxy(tr)
2301 # pull off the changeset group
2301 # pull off the changeset group
2302 self.ui.status(_("adding changesets\n"))
2302 self.ui.status(_("adding changesets\n"))
2303 clstart = len(cl)
2303 clstart = len(cl)
2304 class prog(object):
2304 class prog(object):
2305 step = _('changesets')
2305 step = _('changesets')
2306 count = 1
2306 count = 1
2307 ui = self.ui
2307 ui = self.ui
2308 total = None
2308 total = None
2309 def __call__(self):
2309 def __call__(self):
2310 self.ui.progress(self.step, self.count, unit=_('chunks'),
2310 self.ui.progress(self.step, self.count, unit=_('chunks'),
2311 total=self.total)
2311 total=self.total)
2312 self.count += 1
2312 self.count += 1
2313 pr = prog()
2313 pr = prog()
2314 source.callback = pr
2314 source.callback = pr
2315
2315
2316 source.changelogheader()
2316 source.changelogheader()
2317 srccontent = cl.addgroup(source, csmap, trp)
2317 srccontent = cl.addgroup(source, csmap, trp)
2318 if not (srccontent or emptyok):
2318 if not (srccontent or emptyok):
2319 raise util.Abort(_("received changelog group is empty"))
2319 raise util.Abort(_("received changelog group is empty"))
2320 clend = len(cl)
2320 clend = len(cl)
2321 changesets = clend - clstart
2321 changesets = clend - clstart
2322 for c in xrange(clstart, clend):
2322 for c in xrange(clstart, clend):
2323 efiles.update(self[c].files())
2323 efiles.update(self[c].files())
2324 efiles = len(efiles)
2324 efiles = len(efiles)
2325 self.ui.progress(_('changesets'), None)
2325 self.ui.progress(_('changesets'), None)
2326
2326
2327 # pull off the manifest group
2327 # pull off the manifest group
2328 self.ui.status(_("adding manifests\n"))
2328 self.ui.status(_("adding manifests\n"))
2329 pr.step = _('manifests')
2329 pr.step = _('manifests')
2330 pr.count = 1
2330 pr.count = 1
2331 pr.total = changesets # manifests <= changesets
2331 pr.total = changesets # manifests <= changesets
2332 # no need to check for empty manifest group here:
2332 # no need to check for empty manifest group here:
2333 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2333 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2334 # no new manifest will be created and the manifest group will
2334 # no new manifest will be created and the manifest group will
2335 # be empty during the pull
2335 # be empty during the pull
2336 source.manifestheader()
2336 source.manifestheader()
2337 self.manifest.addgroup(source, revmap, trp)
2337 self.manifest.addgroup(source, revmap, trp)
2338 self.ui.progress(_('manifests'), None)
2338 self.ui.progress(_('manifests'), None)
2339
2339
2340 needfiles = {}
2340 needfiles = {}
2341 if self.ui.configbool('server', 'validate', default=False):
2341 if self.ui.configbool('server', 'validate', default=False):
2342 # validate incoming csets have their manifests
2342 # validate incoming csets have their manifests
2343 for cset in xrange(clstart, clend):
2343 for cset in xrange(clstart, clend):
2344 mfest = self.changelog.read(self.changelog.node(cset))[0]
2344 mfest = self.changelog.read(self.changelog.node(cset))[0]
2345 mfest = self.manifest.readdelta(mfest)
2345 mfest = self.manifest.readdelta(mfest)
2346 # store file nodes we must see
2346 # store file nodes we must see
2347 for f, n in mfest.iteritems():
2347 for f, n in mfest.iteritems():
2348 needfiles.setdefault(f, set()).add(n)
2348 needfiles.setdefault(f, set()).add(n)
2349
2349
2350 # process the files
2350 # process the files
2351 self.ui.status(_("adding file changes\n"))
2351 self.ui.status(_("adding file changes\n"))
2352 pr.step = _('files')
2352 pr.step = _('files')
2353 pr.count = 1
2353 pr.count = 1
2354 pr.total = efiles
2354 pr.total = efiles
2355 source.callback = None
2355 source.callback = None
2356
2356
2357 while True:
2357 while True:
2358 chunkdata = source.filelogheader()
2358 chunkdata = source.filelogheader()
2359 if not chunkdata:
2359 if not chunkdata:
2360 break
2360 break
2361 f = chunkdata["filename"]
2361 f = chunkdata["filename"]
2362 self.ui.debug("adding %s revisions\n" % f)
2362 self.ui.debug("adding %s revisions\n" % f)
2363 pr()
2363 pr()
2364 fl = self.file(f)
2364 fl = self.file(f)
2365 o = len(fl)
2365 o = len(fl)
2366 if not fl.addgroup(source, revmap, trp):
2366 if not fl.addgroup(source, revmap, trp):
2367 raise util.Abort(_("received file revlog group is empty"))
2367 raise util.Abort(_("received file revlog group is empty"))
2368 revisions += len(fl) - o
2368 revisions += len(fl) - o
2369 files += 1
2369 files += 1
2370 if f in needfiles:
2370 if f in needfiles:
2371 needs = needfiles[f]
2371 needs = needfiles[f]
2372 for new in xrange(o, len(fl)):
2372 for new in xrange(o, len(fl)):
2373 n = fl.node(new)
2373 n = fl.node(new)
2374 if n in needs:
2374 if n in needs:
2375 needs.remove(n)
2375 needs.remove(n)
2376 if not needs:
2376 if not needs:
2377 del needfiles[f]
2377 del needfiles[f]
2378 self.ui.progress(_('files'), None)
2378 self.ui.progress(_('files'), None)
2379
2379
2380 for f, needs in needfiles.iteritems():
2380 for f, needs in needfiles.iteritems():
2381 fl = self.file(f)
2381 fl = self.file(f)
2382 for n in needs:
2382 for n in needs:
2383 try:
2383 try:
2384 fl.rev(n)
2384 fl.rev(n)
2385 except error.LookupError:
2385 except error.LookupError:
2386 raise util.Abort(
2386 raise util.Abort(
2387 _('missing file data for %s:%s - run hg verify') %
2387 _('missing file data for %s:%s - run hg verify') %
2388 (f, hex(n)))
2388 (f, hex(n)))
2389
2389
2390 dh = 0
2390 dh = 0
2391 if oldheads:
2391 if oldheads:
2392 heads = cl.heads()
2392 heads = cl.heads()
2393 dh = len(heads) - len(oldheads)
2393 dh = len(heads) - len(oldheads)
2394 for h in heads:
2394 for h in heads:
2395 if h not in oldheads and self[h].closesbranch():
2395 if h not in oldheads and self[h].closesbranch():
2396 dh -= 1
2396 dh -= 1
2397 htext = ""
2397 htext = ""
2398 if dh:
2398 if dh:
2399 htext = _(" (%+d heads)") % dh
2399 htext = _(" (%+d heads)") % dh
2400
2400
2401 self.ui.status(_("added %d changesets"
2401 self.ui.status(_("added %d changesets"
2402 " with %d changes to %d files%s\n")
2402 " with %d changes to %d files%s\n")
2403 % (changesets, revisions, files, htext))
2403 % (changesets, revisions, files, htext))
2404
2404
2405 if changesets > 0:
2405 if changesets > 0:
2406 p = lambda: cl.writepending() and self.root or ""
2406 p = lambda: cl.writepending() and self.root or ""
2407 self.hook('pretxnchangegroup', throw=True,
2407 self.hook('pretxnchangegroup', throw=True,
2408 node=hex(cl.node(clstart)), source=srctype,
2408 node=hex(cl.node(clstart)), source=srctype,
2409 url=url, pending=p)
2409 url=url, pending=p)
2410
2410
2411 added = [cl.node(r) for r in xrange(clstart, clend)]
2411 added = [cl.node(r) for r in xrange(clstart, clend)]
2412 publishing = self.ui.configbool('phases', 'publish', True)
2412 publishing = self.ui.configbool('phases', 'publish', True)
2413 if srctype == 'push':
2413 if srctype == 'push':
2414 # Old server can not push the boundary themself.
2414 # Old server can not push the boundary themself.
2415 # New server won't push the boundary if changeset already
2415 # New server won't push the boundary if changeset already
2416 # existed locally as secrete
2416 # existed locally as secrete
2417 #
2417 #
2418 # We should not use added here but the list of all change in
2418 # We should not use added here but the list of all change in
2419 # the bundle
2419 # the bundle
2420 if publishing:
2420 if publishing:
2421 phases.advanceboundary(self, phases.public, srccontent)
2421 phases.advanceboundary(self, phases.public, srccontent)
2422 else:
2422 else:
2423 phases.advanceboundary(self, phases.draft, srccontent)
2423 phases.advanceboundary(self, phases.draft, srccontent)
2424 phases.retractboundary(self, phases.draft, added)
2424 phases.retractboundary(self, phases.draft, added)
2425 elif srctype != 'strip':
2425 elif srctype != 'strip':
2426 # publishing only alter behavior during push
2426 # publishing only alter behavior during push
2427 #
2427 #
2428 # strip should not touch boundary at all
2428 # strip should not touch boundary at all
2429 phases.retractboundary(self, phases.draft, added)
2429 phases.retractboundary(self, phases.draft, added)
2430
2430
2431 # make changelog see real files again
2431 # make changelog see real files again
2432 cl.finalize(trp)
2432 cl.finalize(trp)
2433
2433
2434 tr.close()
2434 tr.close()
2435
2435
2436 if changesets > 0:
2436 if changesets > 0:
2437 def runhooks():
2437 def runhooks():
2438 # forcefully update the on-disk branch cache
2438 # forcefully update the on-disk branch cache
2439 self.ui.debug("updating the branch cache\n")
2439 self.ui.debug("updating the branch cache\n")
2440 self.updatebranchcache()
2440 self.updatebranchcache()
2441 self.hook("changegroup", node=hex(cl.node(clstart)),
2441 self.hook("changegroup", node=hex(cl.node(clstart)),
2442 source=srctype, url=url)
2442 source=srctype, url=url)
2443
2443
2444 for n in added:
2444 for n in added:
2445 self.hook("incoming", node=hex(n), source=srctype,
2445 self.hook("incoming", node=hex(n), source=srctype,
2446 url=url)
2446 url=url)
2447 self._afterlock(runhooks)
2447 self._afterlock(runhooks)
2448
2448
2449 finally:
2449 finally:
2450 tr.release()
2450 tr.release()
2451 # never return 0 here:
2451 # never return 0 here:
2452 if dh < 0:
2452 if dh < 0:
2453 return dh - 1
2453 return dh - 1
2454 else:
2454 else:
2455 return dh + 1
2455 return dh + 1
2456
2456
2457 def stream_in(self, remote, requirements):
2457 def stream_in(self, remote, requirements):
2458 lock = self.lock()
2458 lock = self.lock()
2459 try:
2459 try:
2460 fp = remote.stream_out()
2460 fp = remote.stream_out()
2461 l = fp.readline()
2461 l = fp.readline()
2462 try:
2462 try:
2463 resp = int(l)
2463 resp = int(l)
2464 except ValueError:
2464 except ValueError:
2465 raise error.ResponseError(
2465 raise error.ResponseError(
2466 _('unexpected response from remote server:'), l)
2466 _('unexpected response from remote server:'), l)
2467 if resp == 1:
2467 if resp == 1:
2468 raise util.Abort(_('operation forbidden by server'))
2468 raise util.Abort(_('operation forbidden by server'))
2469 elif resp == 2:
2469 elif resp == 2:
2470 raise util.Abort(_('locking the remote repository failed'))
2470 raise util.Abort(_('locking the remote repository failed'))
2471 elif resp != 0:
2471 elif resp != 0:
2472 raise util.Abort(_('the server sent an unknown error code'))
2472 raise util.Abort(_('the server sent an unknown error code'))
2473 self.ui.status(_('streaming all changes\n'))
2473 self.ui.status(_('streaming all changes\n'))
2474 l = fp.readline()
2474 l = fp.readline()
2475 try:
2475 try:
2476 total_files, total_bytes = map(int, l.split(' ', 1))
2476 total_files, total_bytes = map(int, l.split(' ', 1))
2477 except (ValueError, TypeError):
2477 except (ValueError, TypeError):
2478 raise error.ResponseError(
2478 raise error.ResponseError(
2479 _('unexpected response from remote server:'), l)
2479 _('unexpected response from remote server:'), l)
2480 self.ui.status(_('%d files to transfer, %s of data\n') %
2480 self.ui.status(_('%d files to transfer, %s of data\n') %
2481 (total_files, util.bytecount(total_bytes)))
2481 (total_files, util.bytecount(total_bytes)))
2482 handled_bytes = 0
2482 handled_bytes = 0
2483 self.ui.progress(_('clone'), 0, total=total_bytes)
2483 self.ui.progress(_('clone'), 0, total=total_bytes)
2484 start = time.time()
2484 start = time.time()
2485 for i in xrange(total_files):
2485 for i in xrange(total_files):
2486 # XXX doesn't support '\n' or '\r' in filenames
2486 # XXX doesn't support '\n' or '\r' in filenames
2487 l = fp.readline()
2487 l = fp.readline()
2488 try:
2488 try:
2489 name, size = l.split('\0', 1)
2489 name, size = l.split('\0', 1)
2490 size = int(size)
2490 size = int(size)
2491 except (ValueError, TypeError):
2491 except (ValueError, TypeError):
2492 raise error.ResponseError(
2492 raise error.ResponseError(
2493 _('unexpected response from remote server:'), l)
2493 _('unexpected response from remote server:'), l)
2494 if self.ui.debugflag:
2494 if self.ui.debugflag:
2495 self.ui.debug('adding %s (%s)\n' %
2495 self.ui.debug('adding %s (%s)\n' %
2496 (name, util.bytecount(size)))
2496 (name, util.bytecount(size)))
2497 # for backwards compat, name was partially encoded
2497 # for backwards compat, name was partially encoded
2498 ofp = self.sopener(store.decodedir(name), 'w')
2498 ofp = self.sopener(store.decodedir(name), 'w')
2499 for chunk in util.filechunkiter(fp, limit=size):
2499 for chunk in util.filechunkiter(fp, limit=size):
2500 handled_bytes += len(chunk)
2500 handled_bytes += len(chunk)
2501 self.ui.progress(_('clone'), handled_bytes,
2501 self.ui.progress(_('clone'), handled_bytes,
2502 total=total_bytes)
2502 total=total_bytes)
2503 ofp.write(chunk)
2503 ofp.write(chunk)
2504 ofp.close()
2504 ofp.close()
2505 elapsed = time.time() - start
2505 elapsed = time.time() - start
2506 if elapsed <= 0:
2506 if elapsed <= 0:
2507 elapsed = 0.001
2507 elapsed = 0.001
2508 self.ui.progress(_('clone'), None)
2508 self.ui.progress(_('clone'), None)
2509 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2509 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2510 (util.bytecount(total_bytes), elapsed,
2510 (util.bytecount(total_bytes), elapsed,
2511 util.bytecount(total_bytes / elapsed)))
2511 util.bytecount(total_bytes / elapsed)))
2512
2512
2513 # new requirements = old non-format requirements +
2513 # new requirements = old non-format requirements +
2514 # new format-related
2514 # new format-related
2515 # requirements from the streamed-in repository
2515 # requirements from the streamed-in repository
2516 requirements.update(set(self.requirements) - self.supportedformats)
2516 requirements.update(set(self.requirements) - self.supportedformats)
2517 self._applyrequirements(requirements)
2517 self._applyrequirements(requirements)
2518 self._writerequirements()
2518 self._writerequirements()
2519
2519
2520 self.invalidate()
2520 self.invalidate()
2521 return len(self.heads()) + 1
2521 return len(self.heads()) + 1
2522 finally:
2522 finally:
2523 lock.release()
2523 lock.release()
2524
2524
2525 def clone(self, remote, heads=[], stream=False):
2525 def clone(self, remote, heads=[], stream=False):
2526 '''clone remote repository.
2526 '''clone remote repository.
2527
2527
2528 keyword arguments:
2528 keyword arguments:
2529 heads: list of revs to clone (forces use of pull)
2529 heads: list of revs to clone (forces use of pull)
2530 stream: use streaming clone if possible'''
2530 stream: use streaming clone if possible'''
2531
2531
2532 # now, all clients that can request uncompressed clones can
2532 # now, all clients that can request uncompressed clones can
2533 # read repo formats supported by all servers that can serve
2533 # read repo formats supported by all servers that can serve
2534 # them.
2534 # them.
2535
2535
2536 # if revlog format changes, client will have to check version
2536 # if revlog format changes, client will have to check version
2537 # and format flags on "stream" capability, and use
2537 # and format flags on "stream" capability, and use
2538 # uncompressed only if compatible.
2538 # uncompressed only if compatible.
2539
2539
2540 if not stream:
2540 if not stream:
2541 # if the server explicitly prefer to stream (for fast LANs)
2541 # if the server explicitly prefer to stream (for fast LANs)
2542 stream = remote.capable('stream-preferred')
2542 stream = remote.capable('stream-preferred')
2543
2543
2544 if stream and not heads:
2544 if stream and not heads:
2545 # 'stream' means remote revlog format is revlogv1 only
2545 # 'stream' means remote revlog format is revlogv1 only
2546 if remote.capable('stream'):
2546 if remote.capable('stream'):
2547 return self.stream_in(remote, set(('revlogv1',)))
2547 return self.stream_in(remote, set(('revlogv1',)))
2548 # otherwise, 'streamreqs' contains the remote revlog format
2548 # otherwise, 'streamreqs' contains the remote revlog format
2549 streamreqs = remote.capable('streamreqs')
2549 streamreqs = remote.capable('streamreqs')
2550 if streamreqs:
2550 if streamreqs:
2551 streamreqs = set(streamreqs.split(','))
2551 streamreqs = set(streamreqs.split(','))
2552 # if we support it, stream in and adjust our requirements
2552 # if we support it, stream in and adjust our requirements
2553 if not streamreqs - self.supportedformats:
2553 if not streamreqs - self.supportedformats:
2554 return self.stream_in(remote, streamreqs)
2554 return self.stream_in(remote, streamreqs)
2555 return self.pull(remote, heads)
2555 return self.pull(remote, heads)
2556
2556
2557 def pushkey(self, namespace, key, old, new):
2557 def pushkey(self, namespace, key, old, new):
2558 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2558 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2559 old=old, new=new)
2559 old=old, new=new)
2560 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2560 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2561 ret = pushkey.push(self, namespace, key, old, new)
2561 ret = pushkey.push(self, namespace, key, old, new)
2562 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2562 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2563 ret=ret)
2563 ret=ret)
2564 return ret
2564 return ret
2565
2565
2566 def listkeys(self, namespace):
2566 def listkeys(self, namespace):
2567 self.hook('prelistkeys', throw=True, namespace=namespace)
2567 self.hook('prelistkeys', throw=True, namespace=namespace)
2568 self.ui.debug('listing keys for "%s"\n' % namespace)
2568 self.ui.debug('listing keys for "%s"\n' % namespace)
2569 values = pushkey.list(self, namespace)
2569 values = pushkey.list(self, namespace)
2570 self.hook('listkeys', namespace=namespace, values=values)
2570 self.hook('listkeys', namespace=namespace, values=values)
2571 return values
2571 return values
2572
2572
2573 def debugwireargs(self, one, two, three=None, four=None, five=None):
2573 def debugwireargs(self, one, two, three=None, four=None, five=None):
2574 '''used to test argument passing over the wire'''
2574 '''used to test argument passing over the wire'''
2575 return "%s %s %s %s %s" % (one, two, three, four, five)
2575 return "%s %s %s %s %s" % (one, two, three, four, five)
2576
2576
2577 def savecommitmessage(self, text):
2577 def savecommitmessage(self, text):
2578 fp = self.opener('last-message.txt', 'wb')
2578 fp = self.opener('last-message.txt', 'wb')
2579 try:
2579 try:
2580 fp.write(text)
2580 fp.write(text)
2581 finally:
2581 finally:
2582 fp.close()
2582 fp.close()
2583 return self.pathto(fp.name[len(self.root)+1:])
2583 return self.pathto(fp.name[len(self.root)+1:])
2584
2584
2585 # used to avoid circular references so destructors work
2585 # used to avoid circular references so destructors work
2586 def aftertrans(files):
2586 def aftertrans(files):
2587 renamefiles = [tuple(t) for t in files]
2587 renamefiles = [tuple(t) for t in files]
2588 def a():
2588 def a():
2589 for src, dest in renamefiles:
2589 for src, dest in renamefiles:
2590 try:
2590 try:
2591 util.rename(src, dest)
2591 util.rename(src, dest)
2592 except OSError: # journal file does not yet exist
2592 except OSError: # journal file does not yet exist
2593 pass
2593 pass
2594 return a
2594 return a
2595
2595
2596 def undoname(fn):
2596 def undoname(fn):
2597 base, name = os.path.split(fn)
2597 base, name = os.path.split(fn)
2598 assert name.startswith('journal')
2598 assert name.startswith('journal')
2599 return os.path.join(base, name.replace('journal', 'undo', 1))
2599 return os.path.join(base, name.replace('journal', 'undo', 1))
2600
2600
2601 def instance(ui, path, create):
2601 def instance(ui, path, create):
2602 return localrepository(ui, util.urllocalpath(path), create)
2602 return localrepository(ui, util.urllocalpath(path), create)
2603
2603
2604 def islocal(path):
2604 def islocal(path):
2605 return True
2605 return True
General Comments 0
You need to be logged in to leave comments. Login now