##// END OF EJS Templates
obsolete: mark unreachable extinct changesets as hidden...
Pierre-Yves.David@ens-lyon.org -
r17208:8018f234 default
parent child Browse files
Show More
@@ -1,2573 +1,2586 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import bin, hex, nullid, nullrev, short
7 from node import bin, hex, nullid, nullrev, short
8 from i18n import _
8 from i18n import _
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import lock, transaction, store, encoding, base85
11 import lock, transaction, store, encoding, base85
12 import scmutil, util, extensions, hook, error, revset
12 import scmutil, util, extensions, hook, error, revset
13 import match as matchmod
13 import match as matchmod
14 import merge as mergemod
14 import merge as mergemod
15 import tags as tagsmod
15 import tags as tagsmod
16 from lock import release
16 from lock import release
17 import weakref, errno, os, time, inspect
17 import weakref, errno, os, time, inspect
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19 filecache = scmutil.filecache
19 filecache = scmutil.filecache
20
20
21 class storecache(filecache):
21 class storecache(filecache):
22 """filecache for files in the store"""
22 """filecache for files in the store"""
23 def join(self, obj, fname):
23 def join(self, obj, fname):
24 return obj.sjoin(fname)
24 return obj.sjoin(fname)
25
25
26 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
26 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
27 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
27 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
28
28
29 class localpeer(peer.peerrepository):
29 class localpeer(peer.peerrepository):
30 '''peer for a local repo; reflects only the most recent API'''
30 '''peer for a local repo; reflects only the most recent API'''
31
31
32 def __init__(self, repo, caps=MODERNCAPS):
32 def __init__(self, repo, caps=MODERNCAPS):
33 peer.peerrepository.__init__(self)
33 peer.peerrepository.__init__(self)
34 self._repo = repo
34 self._repo = repo
35 self.ui = repo.ui
35 self.ui = repo.ui
36 self._caps = repo._restrictcapabilities(caps)
36 self._caps = repo._restrictcapabilities(caps)
37 self.requirements = repo.requirements
37 self.requirements = repo.requirements
38 self.supportedformats = repo.supportedformats
38 self.supportedformats = repo.supportedformats
39
39
40 def close(self):
40 def close(self):
41 self._repo.close()
41 self._repo.close()
42
42
43 def _capabilities(self):
43 def _capabilities(self):
44 return self._caps
44 return self._caps
45
45
46 def local(self):
46 def local(self):
47 return self._repo
47 return self._repo
48
48
49 def canpush(self):
49 def canpush(self):
50 return True
50 return True
51
51
52 def url(self):
52 def url(self):
53 return self._repo.url()
53 return self._repo.url()
54
54
55 def lookup(self, key):
55 def lookup(self, key):
56 return self._repo.lookup(key)
56 return self._repo.lookup(key)
57
57
58 def branchmap(self):
58 def branchmap(self):
59 return discovery.visiblebranchmap(self._repo)
59 return discovery.visiblebranchmap(self._repo)
60
60
61 def heads(self):
61 def heads(self):
62 return discovery.visibleheads(self._repo)
62 return discovery.visibleheads(self._repo)
63
63
64 def known(self, nodes):
64 def known(self, nodes):
65 return self._repo.known(nodes)
65 return self._repo.known(nodes)
66
66
67 def getbundle(self, source, heads=None, common=None):
67 def getbundle(self, source, heads=None, common=None):
68 return self._repo.getbundle(source, heads=heads, common=common)
68 return self._repo.getbundle(source, heads=heads, common=common)
69
69
70 # TODO We might want to move the next two calls into legacypeer and add
70 # TODO We might want to move the next two calls into legacypeer and add
71 # unbundle instead.
71 # unbundle instead.
72
72
73 def lock(self):
73 def lock(self):
74 return self._repo.lock()
74 return self._repo.lock()
75
75
76 def addchangegroup(self, cg, source, url):
76 def addchangegroup(self, cg, source, url):
77 return self._repo.addchangegroup(cg, source, url)
77 return self._repo.addchangegroup(cg, source, url)
78
78
79 def pushkey(self, namespace, key, old, new):
79 def pushkey(self, namespace, key, old, new):
80 return self._repo.pushkey(namespace, key, old, new)
80 return self._repo.pushkey(namespace, key, old, new)
81
81
82 def listkeys(self, namespace):
82 def listkeys(self, namespace):
83 return self._repo.listkeys(namespace)
83 return self._repo.listkeys(namespace)
84
84
85 def debugwireargs(self, one, two, three=None, four=None, five=None):
85 def debugwireargs(self, one, two, three=None, four=None, five=None):
86 '''used to test argument passing over the wire'''
86 '''used to test argument passing over the wire'''
87 return "%s %s %s %s %s" % (one, two, three, four, five)
87 return "%s %s %s %s %s" % (one, two, three, four, five)
88
88
89 class locallegacypeer(localpeer):
89 class locallegacypeer(localpeer):
90 '''peer extension which implements legacy methods too; used for tests with
90 '''peer extension which implements legacy methods too; used for tests with
91 restricted capabilities'''
91 restricted capabilities'''
92
92
93 def __init__(self, repo):
93 def __init__(self, repo):
94 localpeer.__init__(self, repo, caps=LEGACYCAPS)
94 localpeer.__init__(self, repo, caps=LEGACYCAPS)
95
95
96 def branches(self, nodes):
96 def branches(self, nodes):
97 return self._repo.branches(nodes)
97 return self._repo.branches(nodes)
98
98
99 def between(self, pairs):
99 def between(self, pairs):
100 return self._repo.between(pairs)
100 return self._repo.between(pairs)
101
101
102 def changegroup(self, basenodes, source):
102 def changegroup(self, basenodes, source):
103 return self._repo.changegroup(basenodes, source)
103 return self._repo.changegroup(basenodes, source)
104
104
105 def changegroupsubset(self, bases, heads, source):
105 def changegroupsubset(self, bases, heads, source):
106 return self._repo.changegroupsubset(bases, heads, source)
106 return self._repo.changegroupsubset(bases, heads, source)
107
107
108 class localrepository(object):
108 class localrepository(object):
109
109
110 supportedformats = set(('revlogv1', 'generaldelta'))
110 supportedformats = set(('revlogv1', 'generaldelta'))
111 supported = supportedformats | set(('store', 'fncache', 'shared',
111 supported = supportedformats | set(('store', 'fncache', 'shared',
112 'dotencode'))
112 'dotencode'))
113 openerreqs = set(('revlogv1', 'generaldelta'))
113 openerreqs = set(('revlogv1', 'generaldelta'))
114 requirements = ['revlogv1']
114 requirements = ['revlogv1']
115
115
116 def _baserequirements(self, create):
116 def _baserequirements(self, create):
117 return self.requirements[:]
117 return self.requirements[:]
118
118
119 def __init__(self, baseui, path=None, create=False):
119 def __init__(self, baseui, path=None, create=False):
120 self.wopener = scmutil.opener(path, expand=True)
120 self.wopener = scmutil.opener(path, expand=True)
121 self.wvfs = self.wopener
121 self.wvfs = self.wopener
122 self.root = self.wvfs.base
122 self.root = self.wvfs.base
123 self.path = self.wvfs.join(".hg")
123 self.path = self.wvfs.join(".hg")
124 self.origroot = path
124 self.origroot = path
125 self.auditor = scmutil.pathauditor(self.root, self._checknested)
125 self.auditor = scmutil.pathauditor(self.root, self._checknested)
126 self.opener = scmutil.opener(self.path)
126 self.opener = scmutil.opener(self.path)
127 self.vfs = self.opener
127 self.vfs = self.opener
128 self.baseui = baseui
128 self.baseui = baseui
129 self.ui = baseui.copy()
129 self.ui = baseui.copy()
130 # A list of callback to shape the phase if no data were found.
130 # A list of callback to shape the phase if no data were found.
131 # Callback are in the form: func(repo, roots) --> processed root.
131 # Callback are in the form: func(repo, roots) --> processed root.
132 # This list it to be filled by extension during repo setup
132 # This list it to be filled by extension during repo setup
133 self._phasedefaults = []
133 self._phasedefaults = []
134 # hiddenrevs: revs that should be hidden by command and tools
135 #
136 # This set is carried on the repo to ease initialisation and lazy
137 # loading it'll probably move back to changelog for efficienty and
138 # consistency reason
139 self.hiddenrevs = set()
140 try:
134 try:
141 self.ui.readconfig(self.join("hgrc"), self.root)
135 self.ui.readconfig(self.join("hgrc"), self.root)
142 extensions.loadall(self.ui)
136 extensions.loadall(self.ui)
143 except IOError:
137 except IOError:
144 pass
138 pass
145
139
146 if not self.vfs.isdir():
140 if not self.vfs.isdir():
147 if create:
141 if create:
148 if not self.wvfs.exists():
142 if not self.wvfs.exists():
149 self.wvfs.makedirs()
143 self.wvfs.makedirs()
150 self.vfs.makedir(notindexed=True)
144 self.vfs.makedir(notindexed=True)
151 requirements = self._baserequirements(create)
145 requirements = self._baserequirements(create)
152 if self.ui.configbool('format', 'usestore', True):
146 if self.ui.configbool('format', 'usestore', True):
153 self.vfs.mkdir("store")
147 self.vfs.mkdir("store")
154 requirements.append("store")
148 requirements.append("store")
155 if self.ui.configbool('format', 'usefncache', True):
149 if self.ui.configbool('format', 'usefncache', True):
156 requirements.append("fncache")
150 requirements.append("fncache")
157 if self.ui.configbool('format', 'dotencode', True):
151 if self.ui.configbool('format', 'dotencode', True):
158 requirements.append('dotencode')
152 requirements.append('dotencode')
159 # create an invalid changelog
153 # create an invalid changelog
160 self.vfs.append(
154 self.vfs.append(
161 "00changelog.i",
155 "00changelog.i",
162 '\0\0\0\2' # represents revlogv2
156 '\0\0\0\2' # represents revlogv2
163 ' dummy changelog to prevent using the old repo layout'
157 ' dummy changelog to prevent using the old repo layout'
164 )
158 )
165 if self.ui.configbool('format', 'generaldelta', False):
159 if self.ui.configbool('format', 'generaldelta', False):
166 requirements.append("generaldelta")
160 requirements.append("generaldelta")
167 requirements = set(requirements)
161 requirements = set(requirements)
168 else:
162 else:
169 raise error.RepoError(_("repository %s not found") % path)
163 raise error.RepoError(_("repository %s not found") % path)
170 elif create:
164 elif create:
171 raise error.RepoError(_("repository %s already exists") % path)
165 raise error.RepoError(_("repository %s already exists") % path)
172 else:
166 else:
173 try:
167 try:
174 requirements = scmutil.readrequires(self.vfs, self.supported)
168 requirements = scmutil.readrequires(self.vfs, self.supported)
175 except IOError, inst:
169 except IOError, inst:
176 if inst.errno != errno.ENOENT:
170 if inst.errno != errno.ENOENT:
177 raise
171 raise
178 requirements = set()
172 requirements = set()
179
173
180 self.sharedpath = self.path
174 self.sharedpath = self.path
181 try:
175 try:
182 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
176 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
183 if not os.path.exists(s):
177 if not os.path.exists(s):
184 raise error.RepoError(
178 raise error.RepoError(
185 _('.hg/sharedpath points to nonexistent directory %s') % s)
179 _('.hg/sharedpath points to nonexistent directory %s') % s)
186 self.sharedpath = s
180 self.sharedpath = s
187 except IOError, inst:
181 except IOError, inst:
188 if inst.errno != errno.ENOENT:
182 if inst.errno != errno.ENOENT:
189 raise
183 raise
190
184
191 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
185 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
192 self.spath = self.store.path
186 self.spath = self.store.path
193 self.sopener = self.store.opener
187 self.sopener = self.store.opener
194 self.svfs = self.sopener
188 self.svfs = self.sopener
195 self.sjoin = self.store.join
189 self.sjoin = self.store.join
196 self.opener.createmode = self.store.createmode
190 self.opener.createmode = self.store.createmode
197 self._applyrequirements(requirements)
191 self._applyrequirements(requirements)
198 if create:
192 if create:
199 self._writerequirements()
193 self._writerequirements()
200
194
201
195
202 self._branchcache = None
196 self._branchcache = None
203 self._branchcachetip = None
197 self._branchcachetip = None
204 self.filterpats = {}
198 self.filterpats = {}
205 self._datafilters = {}
199 self._datafilters = {}
206 self._transref = self._lockref = self._wlockref = None
200 self._transref = self._lockref = self._wlockref = None
207
201
208 # A cache for various files under .hg/ that tracks file changes,
202 # A cache for various files under .hg/ that tracks file changes,
209 # (used by the filecache decorator)
203 # (used by the filecache decorator)
210 #
204 #
211 # Maps a property name to its util.filecacheentry
205 # Maps a property name to its util.filecacheentry
212 self._filecache = {}
206 self._filecache = {}
213
207
214 def close(self):
208 def close(self):
215 pass
209 pass
216
210
217 def _restrictcapabilities(self, caps):
211 def _restrictcapabilities(self, caps):
218 return caps
212 return caps
219
213
220 def _applyrequirements(self, requirements):
214 def _applyrequirements(self, requirements):
221 self.requirements = requirements
215 self.requirements = requirements
222 self.sopener.options = dict((r, 1) for r in requirements
216 self.sopener.options = dict((r, 1) for r in requirements
223 if r in self.openerreqs)
217 if r in self.openerreqs)
224
218
225 def _writerequirements(self):
219 def _writerequirements(self):
226 reqfile = self.opener("requires", "w")
220 reqfile = self.opener("requires", "w")
227 for r in self.requirements:
221 for r in self.requirements:
228 reqfile.write("%s\n" % r)
222 reqfile.write("%s\n" % r)
229 reqfile.close()
223 reqfile.close()
230
224
231 def _checknested(self, path):
225 def _checknested(self, path):
232 """Determine if path is a legal nested repository."""
226 """Determine if path is a legal nested repository."""
233 if not path.startswith(self.root):
227 if not path.startswith(self.root):
234 return False
228 return False
235 subpath = path[len(self.root) + 1:]
229 subpath = path[len(self.root) + 1:]
236 normsubpath = util.pconvert(subpath)
230 normsubpath = util.pconvert(subpath)
237
231
238 # XXX: Checking against the current working copy is wrong in
232 # XXX: Checking against the current working copy is wrong in
239 # the sense that it can reject things like
233 # the sense that it can reject things like
240 #
234 #
241 # $ hg cat -r 10 sub/x.txt
235 # $ hg cat -r 10 sub/x.txt
242 #
236 #
243 # if sub/ is no longer a subrepository in the working copy
237 # if sub/ is no longer a subrepository in the working copy
244 # parent revision.
238 # parent revision.
245 #
239 #
246 # However, it can of course also allow things that would have
240 # However, it can of course also allow things that would have
247 # been rejected before, such as the above cat command if sub/
241 # been rejected before, such as the above cat command if sub/
248 # is a subrepository now, but was a normal directory before.
242 # is a subrepository now, but was a normal directory before.
249 # The old path auditor would have rejected by mistake since it
243 # The old path auditor would have rejected by mistake since it
250 # panics when it sees sub/.hg/.
244 # panics when it sees sub/.hg/.
251 #
245 #
252 # All in all, checking against the working copy seems sensible
246 # All in all, checking against the working copy seems sensible
253 # since we want to prevent access to nested repositories on
247 # since we want to prevent access to nested repositories on
254 # the filesystem *now*.
248 # the filesystem *now*.
255 ctx = self[None]
249 ctx = self[None]
256 parts = util.splitpath(subpath)
250 parts = util.splitpath(subpath)
257 while parts:
251 while parts:
258 prefix = '/'.join(parts)
252 prefix = '/'.join(parts)
259 if prefix in ctx.substate:
253 if prefix in ctx.substate:
260 if prefix == normsubpath:
254 if prefix == normsubpath:
261 return True
255 return True
262 else:
256 else:
263 sub = ctx.sub(prefix)
257 sub = ctx.sub(prefix)
264 return sub.checknested(subpath[len(prefix) + 1:])
258 return sub.checknested(subpath[len(prefix) + 1:])
265 else:
259 else:
266 parts.pop()
260 parts.pop()
267 return False
261 return False
268
262
269 def peer(self):
263 def peer(self):
270 return localpeer(self) # not cached to avoid reference cycle
264 return localpeer(self) # not cached to avoid reference cycle
271
265
272 @filecache('bookmarks')
266 @filecache('bookmarks')
273 def _bookmarks(self):
267 def _bookmarks(self):
274 return bookmarks.read(self)
268 return bookmarks.read(self)
275
269
276 @filecache('bookmarks.current')
270 @filecache('bookmarks.current')
277 def _bookmarkcurrent(self):
271 def _bookmarkcurrent(self):
278 return bookmarks.readcurrent(self)
272 return bookmarks.readcurrent(self)
279
273
280 def _writebookmarks(self, marks):
274 def _writebookmarks(self, marks):
281 bookmarks.write(self)
275 bookmarks.write(self)
282
276
283 def bookmarkheads(self, bookmark):
277 def bookmarkheads(self, bookmark):
284 name = bookmark.split('@', 1)[0]
278 name = bookmark.split('@', 1)[0]
285 heads = []
279 heads = []
286 for mark, n in self._bookmarks.iteritems():
280 for mark, n in self._bookmarks.iteritems():
287 if mark.split('@', 1)[0] == name:
281 if mark.split('@', 1)[0] == name:
288 heads.append(n)
282 heads.append(n)
289 return heads
283 return heads
290
284
291 @storecache('phaseroots')
285 @storecache('phaseroots')
292 def _phasecache(self):
286 def _phasecache(self):
293 return phases.phasecache(self, self._phasedefaults)
287 return phases.phasecache(self, self._phasedefaults)
294
288
295 @storecache('obsstore')
289 @storecache('obsstore')
296 def obsstore(self):
290 def obsstore(self):
297 store = obsolete.obsstore(self.sopener)
291 store = obsolete.obsstore(self.sopener)
298 return store
292 return store
299
293
294 @propertycache
295 def hiddenrevs(self):
296 """hiddenrevs: revs that should be hidden by command and tools
297
298 This set is carried on the repo to ease initialisation and lazy
299 loading it'll probably move back to changelog for efficienty and
300 consistency reason
301
302 Note that the hiddenrevs will needs invalidations when
303 - a new changesets is added (possible unstable above extinct)
304 - a new obsolete marker is added (possible new extinct changeset)
305 """
306 hidden = set()
307 if self.obsstore:
308 ### hide extinct changeset that are not accessible by any mean
309 hiddenquery = 'extinct() - ::(. + bookmark() + tagged())'
310 hidden.update(self.revs(hiddenquery))
311 return hidden
312
300 @storecache('00changelog.i')
313 @storecache('00changelog.i')
301 def changelog(self):
314 def changelog(self):
302 c = changelog.changelog(self.sopener)
315 c = changelog.changelog(self.sopener)
303 if 'HG_PENDING' in os.environ:
316 if 'HG_PENDING' in os.environ:
304 p = os.environ['HG_PENDING']
317 p = os.environ['HG_PENDING']
305 if p.startswith(self.root):
318 if p.startswith(self.root):
306 c.readpending('00changelog.i.a')
319 c.readpending('00changelog.i.a')
307 return c
320 return c
308
321
309 @storecache('00manifest.i')
322 @storecache('00manifest.i')
310 def manifest(self):
323 def manifest(self):
311 return manifest.manifest(self.sopener)
324 return manifest.manifest(self.sopener)
312
325
313 @filecache('dirstate')
326 @filecache('dirstate')
314 def dirstate(self):
327 def dirstate(self):
315 warned = [0]
328 warned = [0]
316 def validate(node):
329 def validate(node):
317 try:
330 try:
318 self.changelog.rev(node)
331 self.changelog.rev(node)
319 return node
332 return node
320 except error.LookupError:
333 except error.LookupError:
321 if not warned[0]:
334 if not warned[0]:
322 warned[0] = True
335 warned[0] = True
323 self.ui.warn(_("warning: ignoring unknown"
336 self.ui.warn(_("warning: ignoring unknown"
324 " working parent %s!\n") % short(node))
337 " working parent %s!\n") % short(node))
325 return nullid
338 return nullid
326
339
327 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
340 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
328
341
329 def __getitem__(self, changeid):
342 def __getitem__(self, changeid):
330 if changeid is None:
343 if changeid is None:
331 return context.workingctx(self)
344 return context.workingctx(self)
332 return context.changectx(self, changeid)
345 return context.changectx(self, changeid)
333
346
334 def __contains__(self, changeid):
347 def __contains__(self, changeid):
335 try:
348 try:
336 return bool(self.lookup(changeid))
349 return bool(self.lookup(changeid))
337 except error.RepoLookupError:
350 except error.RepoLookupError:
338 return False
351 return False
339
352
340 def __nonzero__(self):
353 def __nonzero__(self):
341 return True
354 return True
342
355
343 def __len__(self):
356 def __len__(self):
344 return len(self.changelog)
357 return len(self.changelog)
345
358
346 def __iter__(self):
359 def __iter__(self):
347 for i in xrange(len(self)):
360 for i in xrange(len(self)):
348 yield i
361 yield i
349
362
350 def revs(self, expr, *args):
363 def revs(self, expr, *args):
351 '''Return a list of revisions matching the given revset'''
364 '''Return a list of revisions matching the given revset'''
352 expr = revset.formatspec(expr, *args)
365 expr = revset.formatspec(expr, *args)
353 m = revset.match(None, expr)
366 m = revset.match(None, expr)
354 return [r for r in m(self, range(len(self)))]
367 return [r for r in m(self, range(len(self)))]
355
368
356 def set(self, expr, *args):
369 def set(self, expr, *args):
357 '''
370 '''
358 Yield a context for each matching revision, after doing arg
371 Yield a context for each matching revision, after doing arg
359 replacement via revset.formatspec
372 replacement via revset.formatspec
360 '''
373 '''
361 for r in self.revs(expr, *args):
374 for r in self.revs(expr, *args):
362 yield self[r]
375 yield self[r]
363
376
364 def url(self):
377 def url(self):
365 return 'file:' + self.root
378 return 'file:' + self.root
366
379
367 def hook(self, name, throw=False, **args):
380 def hook(self, name, throw=False, **args):
368 return hook.hook(self.ui, self, name, throw, **args)
381 return hook.hook(self.ui, self, name, throw, **args)
369
382
370 tag_disallowed = ':\r\n'
383 tag_disallowed = ':\r\n'
371
384
372 def _tag(self, names, node, message, local, user, date, extra={}):
385 def _tag(self, names, node, message, local, user, date, extra={}):
373 if isinstance(names, str):
386 if isinstance(names, str):
374 allchars = names
387 allchars = names
375 names = (names,)
388 names = (names,)
376 else:
389 else:
377 allchars = ''.join(names)
390 allchars = ''.join(names)
378 for c in self.tag_disallowed:
391 for c in self.tag_disallowed:
379 if c in allchars:
392 if c in allchars:
380 raise util.Abort(_('%r cannot be used in a tag name') % c)
393 raise util.Abort(_('%r cannot be used in a tag name') % c)
381
394
382 branches = self.branchmap()
395 branches = self.branchmap()
383 for name in names:
396 for name in names:
384 self.hook('pretag', throw=True, node=hex(node), tag=name,
397 self.hook('pretag', throw=True, node=hex(node), tag=name,
385 local=local)
398 local=local)
386 if name in branches:
399 if name in branches:
387 self.ui.warn(_("warning: tag %s conflicts with existing"
400 self.ui.warn(_("warning: tag %s conflicts with existing"
388 " branch name\n") % name)
401 " branch name\n") % name)
389
402
390 def writetags(fp, names, munge, prevtags):
403 def writetags(fp, names, munge, prevtags):
391 fp.seek(0, 2)
404 fp.seek(0, 2)
392 if prevtags and prevtags[-1] != '\n':
405 if prevtags and prevtags[-1] != '\n':
393 fp.write('\n')
406 fp.write('\n')
394 for name in names:
407 for name in names:
395 m = munge and munge(name) or name
408 m = munge and munge(name) or name
396 if (self._tagscache.tagtypes and
409 if (self._tagscache.tagtypes and
397 name in self._tagscache.tagtypes):
410 name in self._tagscache.tagtypes):
398 old = self.tags().get(name, nullid)
411 old = self.tags().get(name, nullid)
399 fp.write('%s %s\n' % (hex(old), m))
412 fp.write('%s %s\n' % (hex(old), m))
400 fp.write('%s %s\n' % (hex(node), m))
413 fp.write('%s %s\n' % (hex(node), m))
401 fp.close()
414 fp.close()
402
415
403 prevtags = ''
416 prevtags = ''
404 if local:
417 if local:
405 try:
418 try:
406 fp = self.opener('localtags', 'r+')
419 fp = self.opener('localtags', 'r+')
407 except IOError:
420 except IOError:
408 fp = self.opener('localtags', 'a')
421 fp = self.opener('localtags', 'a')
409 else:
422 else:
410 prevtags = fp.read()
423 prevtags = fp.read()
411
424
412 # local tags are stored in the current charset
425 # local tags are stored in the current charset
413 writetags(fp, names, None, prevtags)
426 writetags(fp, names, None, prevtags)
414 for name in names:
427 for name in names:
415 self.hook('tag', node=hex(node), tag=name, local=local)
428 self.hook('tag', node=hex(node), tag=name, local=local)
416 return
429 return
417
430
418 try:
431 try:
419 fp = self.wfile('.hgtags', 'rb+')
432 fp = self.wfile('.hgtags', 'rb+')
420 except IOError, e:
433 except IOError, e:
421 if e.errno != errno.ENOENT:
434 if e.errno != errno.ENOENT:
422 raise
435 raise
423 fp = self.wfile('.hgtags', 'ab')
436 fp = self.wfile('.hgtags', 'ab')
424 else:
437 else:
425 prevtags = fp.read()
438 prevtags = fp.read()
426
439
427 # committed tags are stored in UTF-8
440 # committed tags are stored in UTF-8
428 writetags(fp, names, encoding.fromlocal, prevtags)
441 writetags(fp, names, encoding.fromlocal, prevtags)
429
442
430 fp.close()
443 fp.close()
431
444
432 self.invalidatecaches()
445 self.invalidatecaches()
433
446
434 if '.hgtags' not in self.dirstate:
447 if '.hgtags' not in self.dirstate:
435 self[None].add(['.hgtags'])
448 self[None].add(['.hgtags'])
436
449
437 m = matchmod.exact(self.root, '', ['.hgtags'])
450 m = matchmod.exact(self.root, '', ['.hgtags'])
438 tagnode = self.commit(message, user, date, extra=extra, match=m)
451 tagnode = self.commit(message, user, date, extra=extra, match=m)
439
452
440 for name in names:
453 for name in names:
441 self.hook('tag', node=hex(node), tag=name, local=local)
454 self.hook('tag', node=hex(node), tag=name, local=local)
442
455
443 return tagnode
456 return tagnode
444
457
445 def tag(self, names, node, message, local, user, date):
458 def tag(self, names, node, message, local, user, date):
446 '''tag a revision with one or more symbolic names.
459 '''tag a revision with one or more symbolic names.
447
460
448 names is a list of strings or, when adding a single tag, names may be a
461 names is a list of strings or, when adding a single tag, names may be a
449 string.
462 string.
450
463
451 if local is True, the tags are stored in a per-repository file.
464 if local is True, the tags are stored in a per-repository file.
452 otherwise, they are stored in the .hgtags file, and a new
465 otherwise, they are stored in the .hgtags file, and a new
453 changeset is committed with the change.
466 changeset is committed with the change.
454
467
455 keyword arguments:
468 keyword arguments:
456
469
457 local: whether to store tags in non-version-controlled file
470 local: whether to store tags in non-version-controlled file
458 (default False)
471 (default False)
459
472
460 message: commit message to use if committing
473 message: commit message to use if committing
461
474
462 user: name of user to use if committing
475 user: name of user to use if committing
463
476
464 date: date tuple to use if committing'''
477 date: date tuple to use if committing'''
465
478
466 if not local:
479 if not local:
467 for x in self.status()[:5]:
480 for x in self.status()[:5]:
468 if '.hgtags' in x:
481 if '.hgtags' in x:
469 raise util.Abort(_('working copy of .hgtags is changed '
482 raise util.Abort(_('working copy of .hgtags is changed '
470 '(please commit .hgtags manually)'))
483 '(please commit .hgtags manually)'))
471
484
472 self.tags() # instantiate the cache
485 self.tags() # instantiate the cache
473 self._tag(names, node, message, local, user, date)
486 self._tag(names, node, message, local, user, date)
474
487
475 @propertycache
488 @propertycache
476 def _tagscache(self):
489 def _tagscache(self):
477 '''Returns a tagscache object that contains various tags related
490 '''Returns a tagscache object that contains various tags related
478 caches.'''
491 caches.'''
479
492
480 # This simplifies its cache management by having one decorated
493 # This simplifies its cache management by having one decorated
481 # function (this one) and the rest simply fetch things from it.
494 # function (this one) and the rest simply fetch things from it.
482 class tagscache(object):
495 class tagscache(object):
483 def __init__(self):
496 def __init__(self):
484 # These two define the set of tags for this repository. tags
497 # These two define the set of tags for this repository. tags
485 # maps tag name to node; tagtypes maps tag name to 'global' or
498 # maps tag name to node; tagtypes maps tag name to 'global' or
486 # 'local'. (Global tags are defined by .hgtags across all
499 # 'local'. (Global tags are defined by .hgtags across all
487 # heads, and local tags are defined in .hg/localtags.)
500 # heads, and local tags are defined in .hg/localtags.)
488 # They constitute the in-memory cache of tags.
501 # They constitute the in-memory cache of tags.
489 self.tags = self.tagtypes = None
502 self.tags = self.tagtypes = None
490
503
491 self.nodetagscache = self.tagslist = None
504 self.nodetagscache = self.tagslist = None
492
505
493 cache = tagscache()
506 cache = tagscache()
494 cache.tags, cache.tagtypes = self._findtags()
507 cache.tags, cache.tagtypes = self._findtags()
495
508
496 return cache
509 return cache
497
510
498 def tags(self):
511 def tags(self):
499 '''return a mapping of tag to node'''
512 '''return a mapping of tag to node'''
500 t = {}
513 t = {}
501 for k, v in self._tagscache.tags.iteritems():
514 for k, v in self._tagscache.tags.iteritems():
502 try:
515 try:
503 # ignore tags to unknown nodes
516 # ignore tags to unknown nodes
504 self.changelog.rev(v)
517 self.changelog.rev(v)
505 t[k] = v
518 t[k] = v
506 except (error.LookupError, ValueError):
519 except (error.LookupError, ValueError):
507 pass
520 pass
508 return t
521 return t
509
522
510 def _findtags(self):
523 def _findtags(self):
511 '''Do the hard work of finding tags. Return a pair of dicts
524 '''Do the hard work of finding tags. Return a pair of dicts
512 (tags, tagtypes) where tags maps tag name to node, and tagtypes
525 (tags, tagtypes) where tags maps tag name to node, and tagtypes
513 maps tag name to a string like \'global\' or \'local\'.
526 maps tag name to a string like \'global\' or \'local\'.
514 Subclasses or extensions are free to add their own tags, but
527 Subclasses or extensions are free to add their own tags, but
515 should be aware that the returned dicts will be retained for the
528 should be aware that the returned dicts will be retained for the
516 duration of the localrepo object.'''
529 duration of the localrepo object.'''
517
530
518 # XXX what tagtype should subclasses/extensions use? Currently
531 # XXX what tagtype should subclasses/extensions use? Currently
519 # mq and bookmarks add tags, but do not set the tagtype at all.
532 # mq and bookmarks add tags, but do not set the tagtype at all.
520 # Should each extension invent its own tag type? Should there
533 # Should each extension invent its own tag type? Should there
521 # be one tagtype for all such "virtual" tags? Or is the status
534 # be one tagtype for all such "virtual" tags? Or is the status
522 # quo fine?
535 # quo fine?
523
536
524 alltags = {} # map tag name to (node, hist)
537 alltags = {} # map tag name to (node, hist)
525 tagtypes = {}
538 tagtypes = {}
526
539
527 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
540 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
528 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
541 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
529
542
530 # Build the return dicts. Have to re-encode tag names because
543 # Build the return dicts. Have to re-encode tag names because
531 # the tags module always uses UTF-8 (in order not to lose info
544 # the tags module always uses UTF-8 (in order not to lose info
532 # writing to the cache), but the rest of Mercurial wants them in
545 # writing to the cache), but the rest of Mercurial wants them in
533 # local encoding.
546 # local encoding.
534 tags = {}
547 tags = {}
535 for (name, (node, hist)) in alltags.iteritems():
548 for (name, (node, hist)) in alltags.iteritems():
536 if node != nullid:
549 if node != nullid:
537 tags[encoding.tolocal(name)] = node
550 tags[encoding.tolocal(name)] = node
538 tags['tip'] = self.changelog.tip()
551 tags['tip'] = self.changelog.tip()
539 tagtypes = dict([(encoding.tolocal(name), value)
552 tagtypes = dict([(encoding.tolocal(name), value)
540 for (name, value) in tagtypes.iteritems()])
553 for (name, value) in tagtypes.iteritems()])
541 return (tags, tagtypes)
554 return (tags, tagtypes)
542
555
543 def tagtype(self, tagname):
556 def tagtype(self, tagname):
544 '''
557 '''
545 return the type of the given tag. result can be:
558 return the type of the given tag. result can be:
546
559
547 'local' : a local tag
560 'local' : a local tag
548 'global' : a global tag
561 'global' : a global tag
549 None : tag does not exist
562 None : tag does not exist
550 '''
563 '''
551
564
552 return self._tagscache.tagtypes.get(tagname)
565 return self._tagscache.tagtypes.get(tagname)
553
566
554 def tagslist(self):
567 def tagslist(self):
555 '''return a list of tags ordered by revision'''
568 '''return a list of tags ordered by revision'''
556 if not self._tagscache.tagslist:
569 if not self._tagscache.tagslist:
557 l = []
570 l = []
558 for t, n in self.tags().iteritems():
571 for t, n in self.tags().iteritems():
559 r = self.changelog.rev(n)
572 r = self.changelog.rev(n)
560 l.append((r, t, n))
573 l.append((r, t, n))
561 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
574 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
562
575
563 return self._tagscache.tagslist
576 return self._tagscache.tagslist
564
577
565 def nodetags(self, node):
578 def nodetags(self, node):
566 '''return the tags associated with a node'''
579 '''return the tags associated with a node'''
567 if not self._tagscache.nodetagscache:
580 if not self._tagscache.nodetagscache:
568 nodetagscache = {}
581 nodetagscache = {}
569 for t, n in self._tagscache.tags.iteritems():
582 for t, n in self._tagscache.tags.iteritems():
570 nodetagscache.setdefault(n, []).append(t)
583 nodetagscache.setdefault(n, []).append(t)
571 for tags in nodetagscache.itervalues():
584 for tags in nodetagscache.itervalues():
572 tags.sort()
585 tags.sort()
573 self._tagscache.nodetagscache = nodetagscache
586 self._tagscache.nodetagscache = nodetagscache
574 return self._tagscache.nodetagscache.get(node, [])
587 return self._tagscache.nodetagscache.get(node, [])
575
588
576 def nodebookmarks(self, node):
589 def nodebookmarks(self, node):
577 marks = []
590 marks = []
578 for bookmark, n in self._bookmarks.iteritems():
591 for bookmark, n in self._bookmarks.iteritems():
579 if n == node:
592 if n == node:
580 marks.append(bookmark)
593 marks.append(bookmark)
581 return sorted(marks)
594 return sorted(marks)
582
595
583 def _branchtags(self, partial, lrev):
596 def _branchtags(self, partial, lrev):
584 # TODO: rename this function?
597 # TODO: rename this function?
585 tiprev = len(self) - 1
598 tiprev = len(self) - 1
586 if lrev != tiprev:
599 if lrev != tiprev:
587 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
600 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
588 self._updatebranchcache(partial, ctxgen)
601 self._updatebranchcache(partial, ctxgen)
589 self._writebranchcache(partial, self.changelog.tip(), tiprev)
602 self._writebranchcache(partial, self.changelog.tip(), tiprev)
590
603
591 return partial
604 return partial
592
605
593 def updatebranchcache(self):
606 def updatebranchcache(self):
594 tip = self.changelog.tip()
607 tip = self.changelog.tip()
595 if self._branchcache is not None and self._branchcachetip == tip:
608 if self._branchcache is not None and self._branchcachetip == tip:
596 return
609 return
597
610
598 oldtip = self._branchcachetip
611 oldtip = self._branchcachetip
599 self._branchcachetip = tip
612 self._branchcachetip = tip
600 if oldtip is None or oldtip not in self.changelog.nodemap:
613 if oldtip is None or oldtip not in self.changelog.nodemap:
601 partial, last, lrev = self._readbranchcache()
614 partial, last, lrev = self._readbranchcache()
602 else:
615 else:
603 lrev = self.changelog.rev(oldtip)
616 lrev = self.changelog.rev(oldtip)
604 partial = self._branchcache
617 partial = self._branchcache
605
618
606 self._branchtags(partial, lrev)
619 self._branchtags(partial, lrev)
607 # this private cache holds all heads (not just the branch tips)
620 # this private cache holds all heads (not just the branch tips)
608 self._branchcache = partial
621 self._branchcache = partial
609
622
610 def branchmap(self):
623 def branchmap(self):
611 '''returns a dictionary {branch: [branchheads]}'''
624 '''returns a dictionary {branch: [branchheads]}'''
612 self.updatebranchcache()
625 self.updatebranchcache()
613 return self._branchcache
626 return self._branchcache
614
627
615 def _branchtip(self, heads):
628 def _branchtip(self, heads):
616 '''return the tipmost branch head in heads'''
629 '''return the tipmost branch head in heads'''
617 tip = heads[-1]
630 tip = heads[-1]
618 for h in reversed(heads):
631 for h in reversed(heads):
619 if not self[h].closesbranch():
632 if not self[h].closesbranch():
620 tip = h
633 tip = h
621 break
634 break
622 return tip
635 return tip
623
636
624 def branchtip(self, branch):
637 def branchtip(self, branch):
625 '''return the tip node for a given branch'''
638 '''return the tip node for a given branch'''
626 if branch not in self.branchmap():
639 if branch not in self.branchmap():
627 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
640 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
628 return self._branchtip(self.branchmap()[branch])
641 return self._branchtip(self.branchmap()[branch])
629
642
630 def branchtags(self):
643 def branchtags(self):
631 '''return a dict where branch names map to the tipmost head of
644 '''return a dict where branch names map to the tipmost head of
632 the branch, open heads come before closed'''
645 the branch, open heads come before closed'''
633 bt = {}
646 bt = {}
634 for bn, heads in self.branchmap().iteritems():
647 for bn, heads in self.branchmap().iteritems():
635 bt[bn] = self._branchtip(heads)
648 bt[bn] = self._branchtip(heads)
636 return bt
649 return bt
637
650
638 def _readbranchcache(self):
651 def _readbranchcache(self):
639 partial = {}
652 partial = {}
640 try:
653 try:
641 f = self.opener("cache/branchheads")
654 f = self.opener("cache/branchheads")
642 lines = f.read().split('\n')
655 lines = f.read().split('\n')
643 f.close()
656 f.close()
644 except (IOError, OSError):
657 except (IOError, OSError):
645 return {}, nullid, nullrev
658 return {}, nullid, nullrev
646
659
647 try:
660 try:
648 last, lrev = lines.pop(0).split(" ", 1)
661 last, lrev = lines.pop(0).split(" ", 1)
649 last, lrev = bin(last), int(lrev)
662 last, lrev = bin(last), int(lrev)
650 if lrev >= len(self) or self[lrev].node() != last:
663 if lrev >= len(self) or self[lrev].node() != last:
651 # invalidate the cache
664 # invalidate the cache
652 raise ValueError('invalidating branch cache (tip differs)')
665 raise ValueError('invalidating branch cache (tip differs)')
653 for l in lines:
666 for l in lines:
654 if not l:
667 if not l:
655 continue
668 continue
656 node, label = l.split(" ", 1)
669 node, label = l.split(" ", 1)
657 label = encoding.tolocal(label.strip())
670 label = encoding.tolocal(label.strip())
658 if not node in self:
671 if not node in self:
659 raise ValueError('invalidating branch cache because node '+
672 raise ValueError('invalidating branch cache because node '+
660 '%s does not exist' % node)
673 '%s does not exist' % node)
661 partial.setdefault(label, []).append(bin(node))
674 partial.setdefault(label, []).append(bin(node))
662 except KeyboardInterrupt:
675 except KeyboardInterrupt:
663 raise
676 raise
664 except Exception, inst:
677 except Exception, inst:
665 if self.ui.debugflag:
678 if self.ui.debugflag:
666 self.ui.warn(str(inst), '\n')
679 self.ui.warn(str(inst), '\n')
667 partial, last, lrev = {}, nullid, nullrev
680 partial, last, lrev = {}, nullid, nullrev
668 return partial, last, lrev
681 return partial, last, lrev
669
682
670 def _writebranchcache(self, branches, tip, tiprev):
683 def _writebranchcache(self, branches, tip, tiprev):
671 try:
684 try:
672 f = self.opener("cache/branchheads", "w", atomictemp=True)
685 f = self.opener("cache/branchheads", "w", atomictemp=True)
673 f.write("%s %s\n" % (hex(tip), tiprev))
686 f.write("%s %s\n" % (hex(tip), tiprev))
674 for label, nodes in branches.iteritems():
687 for label, nodes in branches.iteritems():
675 for node in nodes:
688 for node in nodes:
676 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
689 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
677 f.close()
690 f.close()
678 except (IOError, OSError):
691 except (IOError, OSError):
679 pass
692 pass
680
693
681 def _updatebranchcache(self, partial, ctxgen):
694 def _updatebranchcache(self, partial, ctxgen):
682 """Given a branchhead cache, partial, that may have extra nodes or be
695 """Given a branchhead cache, partial, that may have extra nodes or be
683 missing heads, and a generator of nodes that are at least a superset of
696 missing heads, and a generator of nodes that are at least a superset of
684 heads missing, this function updates partial to be correct.
697 heads missing, this function updates partial to be correct.
685 """
698 """
686 # collect new branch entries
699 # collect new branch entries
687 newbranches = {}
700 newbranches = {}
688 for c in ctxgen:
701 for c in ctxgen:
689 newbranches.setdefault(c.branch(), []).append(c.node())
702 newbranches.setdefault(c.branch(), []).append(c.node())
690 # if older branchheads are reachable from new ones, they aren't
703 # if older branchheads are reachable from new ones, they aren't
691 # really branchheads. Note checking parents is insufficient:
704 # really branchheads. Note checking parents is insufficient:
692 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
705 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
693 for branch, newnodes in newbranches.iteritems():
706 for branch, newnodes in newbranches.iteritems():
694 bheads = partial.setdefault(branch, [])
707 bheads = partial.setdefault(branch, [])
695 # Remove candidate heads that no longer are in the repo (e.g., as
708 # Remove candidate heads that no longer are in the repo (e.g., as
696 # the result of a strip that just happened). Avoid using 'node in
709 # the result of a strip that just happened). Avoid using 'node in
697 # self' here because that dives down into branchcache code somewhat
710 # self' here because that dives down into branchcache code somewhat
698 # recrusively.
711 # recrusively.
699 bheadrevs = [self.changelog.rev(node) for node in bheads
712 bheadrevs = [self.changelog.rev(node) for node in bheads
700 if self.changelog.hasnode(node)]
713 if self.changelog.hasnode(node)]
701 newheadrevs = [self.changelog.rev(node) for node in newnodes
714 newheadrevs = [self.changelog.rev(node) for node in newnodes
702 if self.changelog.hasnode(node)]
715 if self.changelog.hasnode(node)]
703 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
716 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
704 # Remove duplicates - nodes that are in newheadrevs and are already
717 # Remove duplicates - nodes that are in newheadrevs and are already
705 # in bheadrevs. This can happen if you strip a node whose parent
718 # in bheadrevs. This can happen if you strip a node whose parent
706 # was already a head (because they're on different branches).
719 # was already a head (because they're on different branches).
707 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
720 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
708
721
709 # Starting from tip means fewer passes over reachable. If we know
722 # Starting from tip means fewer passes over reachable. If we know
710 # the new candidates are not ancestors of existing heads, we don't
723 # the new candidates are not ancestors of existing heads, we don't
711 # have to examine ancestors of existing heads
724 # have to examine ancestors of existing heads
712 if ctxisnew:
725 if ctxisnew:
713 iterrevs = sorted(newheadrevs)
726 iterrevs = sorted(newheadrevs)
714 else:
727 else:
715 iterrevs = list(bheadrevs)
728 iterrevs = list(bheadrevs)
716
729
717 # This loop prunes out two kinds of heads - heads that are
730 # This loop prunes out two kinds of heads - heads that are
718 # superceded by a head in newheadrevs, and newheadrevs that are not
731 # superceded by a head in newheadrevs, and newheadrevs that are not
719 # heads because an existing head is their descendant.
732 # heads because an existing head is their descendant.
720 while iterrevs:
733 while iterrevs:
721 latest = iterrevs.pop()
734 latest = iterrevs.pop()
722 if latest not in bheadrevs:
735 if latest not in bheadrevs:
723 continue
736 continue
724 ancestors = set(self.changelog.ancestors([latest],
737 ancestors = set(self.changelog.ancestors([latest],
725 bheadrevs[0]))
738 bheadrevs[0]))
726 if ancestors:
739 if ancestors:
727 bheadrevs = [b for b in bheadrevs if b not in ancestors]
740 bheadrevs = [b for b in bheadrevs if b not in ancestors]
728 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
741 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
729
742
730 # There may be branches that cease to exist when the last commit in the
743 # There may be branches that cease to exist when the last commit in the
731 # branch was stripped. This code filters them out. Note that the
744 # branch was stripped. This code filters them out. Note that the
732 # branch that ceased to exist may not be in newbranches because
745 # branch that ceased to exist may not be in newbranches because
733 # newbranches is the set of candidate heads, which when you strip the
746 # newbranches is the set of candidate heads, which when you strip the
734 # last commit in a branch will be the parent branch.
747 # last commit in a branch will be the parent branch.
735 for branch in partial:
748 for branch in partial:
736 nodes = [head for head in partial[branch]
749 nodes = [head for head in partial[branch]
737 if self.changelog.hasnode(head)]
750 if self.changelog.hasnode(head)]
738 if not nodes:
751 if not nodes:
739 del partial[branch]
752 del partial[branch]
740
753
741 def lookup(self, key):
754 def lookup(self, key):
742 return self[key].node()
755 return self[key].node()
743
756
744 def lookupbranch(self, key, remote=None):
757 def lookupbranch(self, key, remote=None):
745 repo = remote or self
758 repo = remote or self
746 if key in repo.branchmap():
759 if key in repo.branchmap():
747 return key
760 return key
748
761
749 repo = (remote and remote.local()) and remote or self
762 repo = (remote and remote.local()) and remote or self
750 return repo[key].branch()
763 return repo[key].branch()
751
764
752 def known(self, nodes):
765 def known(self, nodes):
753 nm = self.changelog.nodemap
766 nm = self.changelog.nodemap
754 pc = self._phasecache
767 pc = self._phasecache
755 result = []
768 result = []
756 for n in nodes:
769 for n in nodes:
757 r = nm.get(n)
770 r = nm.get(n)
758 resp = not (r is None or pc.phase(self, r) >= phases.secret)
771 resp = not (r is None or pc.phase(self, r) >= phases.secret)
759 result.append(resp)
772 result.append(resp)
760 return result
773 return result
761
774
762 def local(self):
775 def local(self):
763 return self
776 return self
764
777
765 def cancopy(self):
778 def cancopy(self):
766 return self.local() # so statichttprepo's override of local() works
779 return self.local() # so statichttprepo's override of local() works
767
780
768 def join(self, f):
781 def join(self, f):
769 return os.path.join(self.path, f)
782 return os.path.join(self.path, f)
770
783
771 def wjoin(self, f):
784 def wjoin(self, f):
772 return os.path.join(self.root, f)
785 return os.path.join(self.root, f)
773
786
774 def file(self, f):
787 def file(self, f):
775 if f[0] == '/':
788 if f[0] == '/':
776 f = f[1:]
789 f = f[1:]
777 return filelog.filelog(self.sopener, f)
790 return filelog.filelog(self.sopener, f)
778
791
779 def changectx(self, changeid):
792 def changectx(self, changeid):
780 return self[changeid]
793 return self[changeid]
781
794
782 def parents(self, changeid=None):
795 def parents(self, changeid=None):
783 '''get list of changectxs for parents of changeid'''
796 '''get list of changectxs for parents of changeid'''
784 return self[changeid].parents()
797 return self[changeid].parents()
785
798
786 def setparents(self, p1, p2=nullid):
799 def setparents(self, p1, p2=nullid):
787 copies = self.dirstate.setparents(p1, p2)
800 copies = self.dirstate.setparents(p1, p2)
788 if copies:
801 if copies:
789 # Adjust copy records, the dirstate cannot do it, it
802 # Adjust copy records, the dirstate cannot do it, it
790 # requires access to parents manifests. Preserve them
803 # requires access to parents manifests. Preserve them
791 # only for entries added to first parent.
804 # only for entries added to first parent.
792 pctx = self[p1]
805 pctx = self[p1]
793 for f in copies:
806 for f in copies:
794 if f not in pctx and copies[f] in pctx:
807 if f not in pctx and copies[f] in pctx:
795 self.dirstate.copy(copies[f], f)
808 self.dirstate.copy(copies[f], f)
796
809
797 def filectx(self, path, changeid=None, fileid=None):
810 def filectx(self, path, changeid=None, fileid=None):
798 """changeid can be a changeset revision, node, or tag.
811 """changeid can be a changeset revision, node, or tag.
799 fileid can be a file revision or node."""
812 fileid can be a file revision or node."""
800 return context.filectx(self, path, changeid, fileid)
813 return context.filectx(self, path, changeid, fileid)
801
814
802 def getcwd(self):
815 def getcwd(self):
803 return self.dirstate.getcwd()
816 return self.dirstate.getcwd()
804
817
805 def pathto(self, f, cwd=None):
818 def pathto(self, f, cwd=None):
806 return self.dirstate.pathto(f, cwd)
819 return self.dirstate.pathto(f, cwd)
807
820
808 def wfile(self, f, mode='r'):
821 def wfile(self, f, mode='r'):
809 return self.wopener(f, mode)
822 return self.wopener(f, mode)
810
823
811 def _link(self, f):
824 def _link(self, f):
812 return os.path.islink(self.wjoin(f))
825 return os.path.islink(self.wjoin(f))
813
826
814 def _loadfilter(self, filter):
827 def _loadfilter(self, filter):
815 if filter not in self.filterpats:
828 if filter not in self.filterpats:
816 l = []
829 l = []
817 for pat, cmd in self.ui.configitems(filter):
830 for pat, cmd in self.ui.configitems(filter):
818 if cmd == '!':
831 if cmd == '!':
819 continue
832 continue
820 mf = matchmod.match(self.root, '', [pat])
833 mf = matchmod.match(self.root, '', [pat])
821 fn = None
834 fn = None
822 params = cmd
835 params = cmd
823 for name, filterfn in self._datafilters.iteritems():
836 for name, filterfn in self._datafilters.iteritems():
824 if cmd.startswith(name):
837 if cmd.startswith(name):
825 fn = filterfn
838 fn = filterfn
826 params = cmd[len(name):].lstrip()
839 params = cmd[len(name):].lstrip()
827 break
840 break
828 if not fn:
841 if not fn:
829 fn = lambda s, c, **kwargs: util.filter(s, c)
842 fn = lambda s, c, **kwargs: util.filter(s, c)
830 # Wrap old filters not supporting keyword arguments
843 # Wrap old filters not supporting keyword arguments
831 if not inspect.getargspec(fn)[2]:
844 if not inspect.getargspec(fn)[2]:
832 oldfn = fn
845 oldfn = fn
833 fn = lambda s, c, **kwargs: oldfn(s, c)
846 fn = lambda s, c, **kwargs: oldfn(s, c)
834 l.append((mf, fn, params))
847 l.append((mf, fn, params))
835 self.filterpats[filter] = l
848 self.filterpats[filter] = l
836 return self.filterpats[filter]
849 return self.filterpats[filter]
837
850
838 def _filter(self, filterpats, filename, data):
851 def _filter(self, filterpats, filename, data):
839 for mf, fn, cmd in filterpats:
852 for mf, fn, cmd in filterpats:
840 if mf(filename):
853 if mf(filename):
841 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
854 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
842 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
855 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
843 break
856 break
844
857
845 return data
858 return data
846
859
847 @propertycache
860 @propertycache
848 def _encodefilterpats(self):
861 def _encodefilterpats(self):
849 return self._loadfilter('encode')
862 return self._loadfilter('encode')
850
863
851 @propertycache
864 @propertycache
852 def _decodefilterpats(self):
865 def _decodefilterpats(self):
853 return self._loadfilter('decode')
866 return self._loadfilter('decode')
854
867
855 def adddatafilter(self, name, filter):
868 def adddatafilter(self, name, filter):
856 self._datafilters[name] = filter
869 self._datafilters[name] = filter
857
870
858 def wread(self, filename):
871 def wread(self, filename):
859 if self._link(filename):
872 if self._link(filename):
860 data = os.readlink(self.wjoin(filename))
873 data = os.readlink(self.wjoin(filename))
861 else:
874 else:
862 data = self.wopener.read(filename)
875 data = self.wopener.read(filename)
863 return self._filter(self._encodefilterpats, filename, data)
876 return self._filter(self._encodefilterpats, filename, data)
864
877
865 def wwrite(self, filename, data, flags):
878 def wwrite(self, filename, data, flags):
866 data = self._filter(self._decodefilterpats, filename, data)
879 data = self._filter(self._decodefilterpats, filename, data)
867 if 'l' in flags:
880 if 'l' in flags:
868 self.wopener.symlink(data, filename)
881 self.wopener.symlink(data, filename)
869 else:
882 else:
870 self.wopener.write(filename, data)
883 self.wopener.write(filename, data)
871 if 'x' in flags:
884 if 'x' in flags:
872 util.setflags(self.wjoin(filename), False, True)
885 util.setflags(self.wjoin(filename), False, True)
873
886
874 def wwritedata(self, filename, data):
887 def wwritedata(self, filename, data):
875 return self._filter(self._decodefilterpats, filename, data)
888 return self._filter(self._decodefilterpats, filename, data)
876
889
877 def transaction(self, desc):
890 def transaction(self, desc):
878 tr = self._transref and self._transref() or None
891 tr = self._transref and self._transref() or None
879 if tr and tr.running():
892 if tr and tr.running():
880 return tr.nest()
893 return tr.nest()
881
894
882 # abort here if the journal already exists
895 # abort here if the journal already exists
883 if os.path.exists(self.sjoin("journal")):
896 if os.path.exists(self.sjoin("journal")):
884 raise error.RepoError(
897 raise error.RepoError(
885 _("abandoned transaction found - run hg recover"))
898 _("abandoned transaction found - run hg recover"))
886
899
887 self._writejournal(desc)
900 self._writejournal(desc)
888 renames = [(x, undoname(x)) for x in self._journalfiles()]
901 renames = [(x, undoname(x)) for x in self._journalfiles()]
889
902
890 tr = transaction.transaction(self.ui.warn, self.sopener,
903 tr = transaction.transaction(self.ui.warn, self.sopener,
891 self.sjoin("journal"),
904 self.sjoin("journal"),
892 aftertrans(renames),
905 aftertrans(renames),
893 self.store.createmode)
906 self.store.createmode)
894 self._transref = weakref.ref(tr)
907 self._transref = weakref.ref(tr)
895 return tr
908 return tr
896
909
897 def _journalfiles(self):
910 def _journalfiles(self):
898 return (self.sjoin('journal'), self.join('journal.dirstate'),
911 return (self.sjoin('journal'), self.join('journal.dirstate'),
899 self.join('journal.branch'), self.join('journal.desc'),
912 self.join('journal.branch'), self.join('journal.desc'),
900 self.join('journal.bookmarks'),
913 self.join('journal.bookmarks'),
901 self.sjoin('journal.phaseroots'))
914 self.sjoin('journal.phaseroots'))
902
915
903 def undofiles(self):
916 def undofiles(self):
904 return [undoname(x) for x in self._journalfiles()]
917 return [undoname(x) for x in self._journalfiles()]
905
918
906 def _writejournal(self, desc):
919 def _writejournal(self, desc):
907 self.opener.write("journal.dirstate",
920 self.opener.write("journal.dirstate",
908 self.opener.tryread("dirstate"))
921 self.opener.tryread("dirstate"))
909 self.opener.write("journal.branch",
922 self.opener.write("journal.branch",
910 encoding.fromlocal(self.dirstate.branch()))
923 encoding.fromlocal(self.dirstate.branch()))
911 self.opener.write("journal.desc",
924 self.opener.write("journal.desc",
912 "%d\n%s\n" % (len(self), desc))
925 "%d\n%s\n" % (len(self), desc))
913 self.opener.write("journal.bookmarks",
926 self.opener.write("journal.bookmarks",
914 self.opener.tryread("bookmarks"))
927 self.opener.tryread("bookmarks"))
915 self.sopener.write("journal.phaseroots",
928 self.sopener.write("journal.phaseroots",
916 self.sopener.tryread("phaseroots"))
929 self.sopener.tryread("phaseroots"))
917
930
918 def recover(self):
931 def recover(self):
919 lock = self.lock()
932 lock = self.lock()
920 try:
933 try:
921 if os.path.exists(self.sjoin("journal")):
934 if os.path.exists(self.sjoin("journal")):
922 self.ui.status(_("rolling back interrupted transaction\n"))
935 self.ui.status(_("rolling back interrupted transaction\n"))
923 transaction.rollback(self.sopener, self.sjoin("journal"),
936 transaction.rollback(self.sopener, self.sjoin("journal"),
924 self.ui.warn)
937 self.ui.warn)
925 self.invalidate()
938 self.invalidate()
926 return True
939 return True
927 else:
940 else:
928 self.ui.warn(_("no interrupted transaction available\n"))
941 self.ui.warn(_("no interrupted transaction available\n"))
929 return False
942 return False
930 finally:
943 finally:
931 lock.release()
944 lock.release()
932
945
933 def rollback(self, dryrun=False, force=False):
946 def rollback(self, dryrun=False, force=False):
934 wlock = lock = None
947 wlock = lock = None
935 try:
948 try:
936 wlock = self.wlock()
949 wlock = self.wlock()
937 lock = self.lock()
950 lock = self.lock()
938 if os.path.exists(self.sjoin("undo")):
951 if os.path.exists(self.sjoin("undo")):
939 return self._rollback(dryrun, force)
952 return self._rollback(dryrun, force)
940 else:
953 else:
941 self.ui.warn(_("no rollback information available\n"))
954 self.ui.warn(_("no rollback information available\n"))
942 return 1
955 return 1
943 finally:
956 finally:
944 release(lock, wlock)
957 release(lock, wlock)
945
958
946 def _rollback(self, dryrun, force):
959 def _rollback(self, dryrun, force):
947 ui = self.ui
960 ui = self.ui
948 try:
961 try:
949 args = self.opener.read('undo.desc').splitlines()
962 args = self.opener.read('undo.desc').splitlines()
950 (oldlen, desc, detail) = (int(args[0]), args[1], None)
963 (oldlen, desc, detail) = (int(args[0]), args[1], None)
951 if len(args) >= 3:
964 if len(args) >= 3:
952 detail = args[2]
965 detail = args[2]
953 oldtip = oldlen - 1
966 oldtip = oldlen - 1
954
967
955 if detail and ui.verbose:
968 if detail and ui.verbose:
956 msg = (_('repository tip rolled back to revision %s'
969 msg = (_('repository tip rolled back to revision %s'
957 ' (undo %s: %s)\n')
970 ' (undo %s: %s)\n')
958 % (oldtip, desc, detail))
971 % (oldtip, desc, detail))
959 else:
972 else:
960 msg = (_('repository tip rolled back to revision %s'
973 msg = (_('repository tip rolled back to revision %s'
961 ' (undo %s)\n')
974 ' (undo %s)\n')
962 % (oldtip, desc))
975 % (oldtip, desc))
963 except IOError:
976 except IOError:
964 msg = _('rolling back unknown transaction\n')
977 msg = _('rolling back unknown transaction\n')
965 desc = None
978 desc = None
966
979
967 if not force and self['.'] != self['tip'] and desc == 'commit':
980 if not force and self['.'] != self['tip'] and desc == 'commit':
968 raise util.Abort(
981 raise util.Abort(
969 _('rollback of last commit while not checked out '
982 _('rollback of last commit while not checked out '
970 'may lose data'), hint=_('use -f to force'))
983 'may lose data'), hint=_('use -f to force'))
971
984
972 ui.status(msg)
985 ui.status(msg)
973 if dryrun:
986 if dryrun:
974 return 0
987 return 0
975
988
976 parents = self.dirstate.parents()
989 parents = self.dirstate.parents()
977 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
990 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
978 if os.path.exists(self.join('undo.bookmarks')):
991 if os.path.exists(self.join('undo.bookmarks')):
979 util.rename(self.join('undo.bookmarks'),
992 util.rename(self.join('undo.bookmarks'),
980 self.join('bookmarks'))
993 self.join('bookmarks'))
981 if os.path.exists(self.sjoin('undo.phaseroots')):
994 if os.path.exists(self.sjoin('undo.phaseroots')):
982 util.rename(self.sjoin('undo.phaseroots'),
995 util.rename(self.sjoin('undo.phaseroots'),
983 self.sjoin('phaseroots'))
996 self.sjoin('phaseroots'))
984 self.invalidate()
997 self.invalidate()
985
998
986 parentgone = (parents[0] not in self.changelog.nodemap or
999 parentgone = (parents[0] not in self.changelog.nodemap or
987 parents[1] not in self.changelog.nodemap)
1000 parents[1] not in self.changelog.nodemap)
988 if parentgone:
1001 if parentgone:
989 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1002 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
990 try:
1003 try:
991 branch = self.opener.read('undo.branch')
1004 branch = self.opener.read('undo.branch')
992 self.dirstate.setbranch(branch)
1005 self.dirstate.setbranch(branch)
993 except IOError:
1006 except IOError:
994 ui.warn(_('named branch could not be reset: '
1007 ui.warn(_('named branch could not be reset: '
995 'current branch is still \'%s\'\n')
1008 'current branch is still \'%s\'\n')
996 % self.dirstate.branch())
1009 % self.dirstate.branch())
997
1010
998 self.dirstate.invalidate()
1011 self.dirstate.invalidate()
999 parents = tuple([p.rev() for p in self.parents()])
1012 parents = tuple([p.rev() for p in self.parents()])
1000 if len(parents) > 1:
1013 if len(parents) > 1:
1001 ui.status(_('working directory now based on '
1014 ui.status(_('working directory now based on '
1002 'revisions %d and %d\n') % parents)
1015 'revisions %d and %d\n') % parents)
1003 else:
1016 else:
1004 ui.status(_('working directory now based on '
1017 ui.status(_('working directory now based on '
1005 'revision %d\n') % parents)
1018 'revision %d\n') % parents)
1006 # TODO: if we know which new heads may result from this rollback, pass
1019 # TODO: if we know which new heads may result from this rollback, pass
1007 # them to destroy(), which will prevent the branchhead cache from being
1020 # them to destroy(), which will prevent the branchhead cache from being
1008 # invalidated.
1021 # invalidated.
1009 self.destroyed()
1022 self.destroyed()
1010 return 0
1023 return 0
1011
1024
1012 def invalidatecaches(self):
1025 def invalidatecaches(self):
1013 def delcache(name):
1026 def delcache(name):
1014 try:
1027 try:
1015 delattr(self, name)
1028 delattr(self, name)
1016 except AttributeError:
1029 except AttributeError:
1017 pass
1030 pass
1018
1031
1019 delcache('_tagscache')
1032 delcache('_tagscache')
1020
1033
1021 self._branchcache = None # in UTF-8
1034 self._branchcache = None # in UTF-8
1022 self._branchcachetip = None
1035 self._branchcachetip = None
1023
1036
1024 def invalidatedirstate(self):
1037 def invalidatedirstate(self):
1025 '''Invalidates the dirstate, causing the next call to dirstate
1038 '''Invalidates the dirstate, causing the next call to dirstate
1026 to check if it was modified since the last time it was read,
1039 to check if it was modified since the last time it was read,
1027 rereading it if it has.
1040 rereading it if it has.
1028
1041
1029 This is different to dirstate.invalidate() that it doesn't always
1042 This is different to dirstate.invalidate() that it doesn't always
1030 rereads the dirstate. Use dirstate.invalidate() if you want to
1043 rereads the dirstate. Use dirstate.invalidate() if you want to
1031 explicitly read the dirstate again (i.e. restoring it to a previous
1044 explicitly read the dirstate again (i.e. restoring it to a previous
1032 known good state).'''
1045 known good state).'''
1033 if 'dirstate' in self.__dict__:
1046 if 'dirstate' in self.__dict__:
1034 for k in self.dirstate._filecache:
1047 for k in self.dirstate._filecache:
1035 try:
1048 try:
1036 delattr(self.dirstate, k)
1049 delattr(self.dirstate, k)
1037 except AttributeError:
1050 except AttributeError:
1038 pass
1051 pass
1039 delattr(self, 'dirstate')
1052 delattr(self, 'dirstate')
1040
1053
1041 def invalidate(self):
1054 def invalidate(self):
1042 for k in self._filecache:
1055 for k in self._filecache:
1043 # dirstate is invalidated separately in invalidatedirstate()
1056 # dirstate is invalidated separately in invalidatedirstate()
1044 if k == 'dirstate':
1057 if k == 'dirstate':
1045 continue
1058 continue
1046
1059
1047 try:
1060 try:
1048 delattr(self, k)
1061 delattr(self, k)
1049 except AttributeError:
1062 except AttributeError:
1050 pass
1063 pass
1051 self.invalidatecaches()
1064 self.invalidatecaches()
1052
1065
1053 # Discard all cache entries to force reloading everything.
1066 # Discard all cache entries to force reloading everything.
1054 self._filecache.clear()
1067 self._filecache.clear()
1055
1068
1056 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1069 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1057 try:
1070 try:
1058 l = lock.lock(lockname, 0, releasefn, desc=desc)
1071 l = lock.lock(lockname, 0, releasefn, desc=desc)
1059 except error.LockHeld, inst:
1072 except error.LockHeld, inst:
1060 if not wait:
1073 if not wait:
1061 raise
1074 raise
1062 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1075 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1063 (desc, inst.locker))
1076 (desc, inst.locker))
1064 # default to 600 seconds timeout
1077 # default to 600 seconds timeout
1065 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1078 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1066 releasefn, desc=desc)
1079 releasefn, desc=desc)
1067 if acquirefn:
1080 if acquirefn:
1068 acquirefn()
1081 acquirefn()
1069 return l
1082 return l
1070
1083
1071 def _afterlock(self, callback):
1084 def _afterlock(self, callback):
1072 """add a callback to the current repository lock.
1085 """add a callback to the current repository lock.
1073
1086
1074 The callback will be executed on lock release."""
1087 The callback will be executed on lock release."""
1075 l = self._lockref and self._lockref()
1088 l = self._lockref and self._lockref()
1076 if l:
1089 if l:
1077 l.postrelease.append(callback)
1090 l.postrelease.append(callback)
1078 else:
1091 else:
1079 callback()
1092 callback()
1080
1093
1081 def lock(self, wait=True):
1094 def lock(self, wait=True):
1082 '''Lock the repository store (.hg/store) and return a weak reference
1095 '''Lock the repository store (.hg/store) and return a weak reference
1083 to the lock. Use this before modifying the store (e.g. committing or
1096 to the lock. Use this before modifying the store (e.g. committing or
1084 stripping). If you are opening a transaction, get a lock as well.)'''
1097 stripping). If you are opening a transaction, get a lock as well.)'''
1085 l = self._lockref and self._lockref()
1098 l = self._lockref and self._lockref()
1086 if l is not None and l.held:
1099 if l is not None and l.held:
1087 l.lock()
1100 l.lock()
1088 return l
1101 return l
1089
1102
1090 def unlock():
1103 def unlock():
1091 self.store.write()
1104 self.store.write()
1092 if '_phasecache' in vars(self):
1105 if '_phasecache' in vars(self):
1093 self._phasecache.write()
1106 self._phasecache.write()
1094 for k, ce in self._filecache.items():
1107 for k, ce in self._filecache.items():
1095 if k == 'dirstate':
1108 if k == 'dirstate':
1096 continue
1109 continue
1097 ce.refresh()
1110 ce.refresh()
1098
1111
1099 l = self._lock(self.sjoin("lock"), wait, unlock,
1112 l = self._lock(self.sjoin("lock"), wait, unlock,
1100 self.invalidate, _('repository %s') % self.origroot)
1113 self.invalidate, _('repository %s') % self.origroot)
1101 self._lockref = weakref.ref(l)
1114 self._lockref = weakref.ref(l)
1102 return l
1115 return l
1103
1116
1104 def wlock(self, wait=True):
1117 def wlock(self, wait=True):
1105 '''Lock the non-store parts of the repository (everything under
1118 '''Lock the non-store parts of the repository (everything under
1106 .hg except .hg/store) and return a weak reference to the lock.
1119 .hg except .hg/store) and return a weak reference to the lock.
1107 Use this before modifying files in .hg.'''
1120 Use this before modifying files in .hg.'''
1108 l = self._wlockref and self._wlockref()
1121 l = self._wlockref and self._wlockref()
1109 if l is not None and l.held:
1122 if l is not None and l.held:
1110 l.lock()
1123 l.lock()
1111 return l
1124 return l
1112
1125
1113 def unlock():
1126 def unlock():
1114 self.dirstate.write()
1127 self.dirstate.write()
1115 ce = self._filecache.get('dirstate')
1128 ce = self._filecache.get('dirstate')
1116 if ce:
1129 if ce:
1117 ce.refresh()
1130 ce.refresh()
1118
1131
1119 l = self._lock(self.join("wlock"), wait, unlock,
1132 l = self._lock(self.join("wlock"), wait, unlock,
1120 self.invalidatedirstate, _('working directory of %s') %
1133 self.invalidatedirstate, _('working directory of %s') %
1121 self.origroot)
1134 self.origroot)
1122 self._wlockref = weakref.ref(l)
1135 self._wlockref = weakref.ref(l)
1123 return l
1136 return l
1124
1137
1125 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1138 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1126 """
1139 """
1127 commit an individual file as part of a larger transaction
1140 commit an individual file as part of a larger transaction
1128 """
1141 """
1129
1142
1130 fname = fctx.path()
1143 fname = fctx.path()
1131 text = fctx.data()
1144 text = fctx.data()
1132 flog = self.file(fname)
1145 flog = self.file(fname)
1133 fparent1 = manifest1.get(fname, nullid)
1146 fparent1 = manifest1.get(fname, nullid)
1134 fparent2 = fparent2o = manifest2.get(fname, nullid)
1147 fparent2 = fparent2o = manifest2.get(fname, nullid)
1135
1148
1136 meta = {}
1149 meta = {}
1137 copy = fctx.renamed()
1150 copy = fctx.renamed()
1138 if copy and copy[0] != fname:
1151 if copy and copy[0] != fname:
1139 # Mark the new revision of this file as a copy of another
1152 # Mark the new revision of this file as a copy of another
1140 # file. This copy data will effectively act as a parent
1153 # file. This copy data will effectively act as a parent
1141 # of this new revision. If this is a merge, the first
1154 # of this new revision. If this is a merge, the first
1142 # parent will be the nullid (meaning "look up the copy data")
1155 # parent will be the nullid (meaning "look up the copy data")
1143 # and the second one will be the other parent. For example:
1156 # and the second one will be the other parent. For example:
1144 #
1157 #
1145 # 0 --- 1 --- 3 rev1 changes file foo
1158 # 0 --- 1 --- 3 rev1 changes file foo
1146 # \ / rev2 renames foo to bar and changes it
1159 # \ / rev2 renames foo to bar and changes it
1147 # \- 2 -/ rev3 should have bar with all changes and
1160 # \- 2 -/ rev3 should have bar with all changes and
1148 # should record that bar descends from
1161 # should record that bar descends from
1149 # bar in rev2 and foo in rev1
1162 # bar in rev2 and foo in rev1
1150 #
1163 #
1151 # this allows this merge to succeed:
1164 # this allows this merge to succeed:
1152 #
1165 #
1153 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1166 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1154 # \ / merging rev3 and rev4 should use bar@rev2
1167 # \ / merging rev3 and rev4 should use bar@rev2
1155 # \- 2 --- 4 as the merge base
1168 # \- 2 --- 4 as the merge base
1156 #
1169 #
1157
1170
1158 cfname = copy[0]
1171 cfname = copy[0]
1159 crev = manifest1.get(cfname)
1172 crev = manifest1.get(cfname)
1160 newfparent = fparent2
1173 newfparent = fparent2
1161
1174
1162 if manifest2: # branch merge
1175 if manifest2: # branch merge
1163 if fparent2 == nullid or crev is None: # copied on remote side
1176 if fparent2 == nullid or crev is None: # copied on remote side
1164 if cfname in manifest2:
1177 if cfname in manifest2:
1165 crev = manifest2[cfname]
1178 crev = manifest2[cfname]
1166 newfparent = fparent1
1179 newfparent = fparent1
1167
1180
1168 # find source in nearest ancestor if we've lost track
1181 # find source in nearest ancestor if we've lost track
1169 if not crev:
1182 if not crev:
1170 self.ui.debug(" %s: searching for copy revision for %s\n" %
1183 self.ui.debug(" %s: searching for copy revision for %s\n" %
1171 (fname, cfname))
1184 (fname, cfname))
1172 for ancestor in self[None].ancestors():
1185 for ancestor in self[None].ancestors():
1173 if cfname in ancestor:
1186 if cfname in ancestor:
1174 crev = ancestor[cfname].filenode()
1187 crev = ancestor[cfname].filenode()
1175 break
1188 break
1176
1189
1177 if crev:
1190 if crev:
1178 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1191 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1179 meta["copy"] = cfname
1192 meta["copy"] = cfname
1180 meta["copyrev"] = hex(crev)
1193 meta["copyrev"] = hex(crev)
1181 fparent1, fparent2 = nullid, newfparent
1194 fparent1, fparent2 = nullid, newfparent
1182 else:
1195 else:
1183 self.ui.warn(_("warning: can't find ancestor for '%s' "
1196 self.ui.warn(_("warning: can't find ancestor for '%s' "
1184 "copied from '%s'!\n") % (fname, cfname))
1197 "copied from '%s'!\n") % (fname, cfname))
1185
1198
1186 elif fparent2 != nullid:
1199 elif fparent2 != nullid:
1187 # is one parent an ancestor of the other?
1200 # is one parent an ancestor of the other?
1188 fparentancestor = flog.ancestor(fparent1, fparent2)
1201 fparentancestor = flog.ancestor(fparent1, fparent2)
1189 if fparentancestor == fparent1:
1202 if fparentancestor == fparent1:
1190 fparent1, fparent2 = fparent2, nullid
1203 fparent1, fparent2 = fparent2, nullid
1191 elif fparentancestor == fparent2:
1204 elif fparentancestor == fparent2:
1192 fparent2 = nullid
1205 fparent2 = nullid
1193
1206
1194 # is the file changed?
1207 # is the file changed?
1195 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1208 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1196 changelist.append(fname)
1209 changelist.append(fname)
1197 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1210 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1198
1211
1199 # are just the flags changed during merge?
1212 # are just the flags changed during merge?
1200 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1213 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1201 changelist.append(fname)
1214 changelist.append(fname)
1202
1215
1203 return fparent1
1216 return fparent1
1204
1217
1205 def commit(self, text="", user=None, date=None, match=None, force=False,
1218 def commit(self, text="", user=None, date=None, match=None, force=False,
1206 editor=False, extra={}):
1219 editor=False, extra={}):
1207 """Add a new revision to current repository.
1220 """Add a new revision to current repository.
1208
1221
1209 Revision information is gathered from the working directory,
1222 Revision information is gathered from the working directory,
1210 match can be used to filter the committed files. If editor is
1223 match can be used to filter the committed files. If editor is
1211 supplied, it is called to get a commit message.
1224 supplied, it is called to get a commit message.
1212 """
1225 """
1213
1226
1214 def fail(f, msg):
1227 def fail(f, msg):
1215 raise util.Abort('%s: %s' % (f, msg))
1228 raise util.Abort('%s: %s' % (f, msg))
1216
1229
1217 if not match:
1230 if not match:
1218 match = matchmod.always(self.root, '')
1231 match = matchmod.always(self.root, '')
1219
1232
1220 if not force:
1233 if not force:
1221 vdirs = []
1234 vdirs = []
1222 match.dir = vdirs.append
1235 match.dir = vdirs.append
1223 match.bad = fail
1236 match.bad = fail
1224
1237
1225 wlock = self.wlock()
1238 wlock = self.wlock()
1226 try:
1239 try:
1227 wctx = self[None]
1240 wctx = self[None]
1228 merge = len(wctx.parents()) > 1
1241 merge = len(wctx.parents()) > 1
1229
1242
1230 if (not force and merge and match and
1243 if (not force and merge and match and
1231 (match.files() or match.anypats())):
1244 (match.files() or match.anypats())):
1232 raise util.Abort(_('cannot partially commit a merge '
1245 raise util.Abort(_('cannot partially commit a merge '
1233 '(do not specify files or patterns)'))
1246 '(do not specify files or patterns)'))
1234
1247
1235 changes = self.status(match=match, clean=force)
1248 changes = self.status(match=match, clean=force)
1236 if force:
1249 if force:
1237 changes[0].extend(changes[6]) # mq may commit unchanged files
1250 changes[0].extend(changes[6]) # mq may commit unchanged files
1238
1251
1239 # check subrepos
1252 # check subrepos
1240 subs = []
1253 subs = []
1241 commitsubs = set()
1254 commitsubs = set()
1242 newstate = wctx.substate.copy()
1255 newstate = wctx.substate.copy()
1243 # only manage subrepos and .hgsubstate if .hgsub is present
1256 # only manage subrepos and .hgsubstate if .hgsub is present
1244 if '.hgsub' in wctx:
1257 if '.hgsub' in wctx:
1245 # we'll decide whether to track this ourselves, thanks
1258 # we'll decide whether to track this ourselves, thanks
1246 if '.hgsubstate' in changes[0]:
1259 if '.hgsubstate' in changes[0]:
1247 changes[0].remove('.hgsubstate')
1260 changes[0].remove('.hgsubstate')
1248 if '.hgsubstate' in changes[2]:
1261 if '.hgsubstate' in changes[2]:
1249 changes[2].remove('.hgsubstate')
1262 changes[2].remove('.hgsubstate')
1250
1263
1251 # compare current state to last committed state
1264 # compare current state to last committed state
1252 # build new substate based on last committed state
1265 # build new substate based on last committed state
1253 oldstate = wctx.p1().substate
1266 oldstate = wctx.p1().substate
1254 for s in sorted(newstate.keys()):
1267 for s in sorted(newstate.keys()):
1255 if not match(s):
1268 if not match(s):
1256 # ignore working copy, use old state if present
1269 # ignore working copy, use old state if present
1257 if s in oldstate:
1270 if s in oldstate:
1258 newstate[s] = oldstate[s]
1271 newstate[s] = oldstate[s]
1259 continue
1272 continue
1260 if not force:
1273 if not force:
1261 raise util.Abort(
1274 raise util.Abort(
1262 _("commit with new subrepo %s excluded") % s)
1275 _("commit with new subrepo %s excluded") % s)
1263 if wctx.sub(s).dirty(True):
1276 if wctx.sub(s).dirty(True):
1264 if not self.ui.configbool('ui', 'commitsubrepos'):
1277 if not self.ui.configbool('ui', 'commitsubrepos'):
1265 raise util.Abort(
1278 raise util.Abort(
1266 _("uncommitted changes in subrepo %s") % s,
1279 _("uncommitted changes in subrepo %s") % s,
1267 hint=_("use --subrepos for recursive commit"))
1280 hint=_("use --subrepos for recursive commit"))
1268 subs.append(s)
1281 subs.append(s)
1269 commitsubs.add(s)
1282 commitsubs.add(s)
1270 else:
1283 else:
1271 bs = wctx.sub(s).basestate()
1284 bs = wctx.sub(s).basestate()
1272 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1285 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1273 if oldstate.get(s, (None, None, None))[1] != bs:
1286 if oldstate.get(s, (None, None, None))[1] != bs:
1274 subs.append(s)
1287 subs.append(s)
1275
1288
1276 # check for removed subrepos
1289 # check for removed subrepos
1277 for p in wctx.parents():
1290 for p in wctx.parents():
1278 r = [s for s in p.substate if s not in newstate]
1291 r = [s for s in p.substate if s not in newstate]
1279 subs += [s for s in r if match(s)]
1292 subs += [s for s in r if match(s)]
1280 if subs:
1293 if subs:
1281 if (not match('.hgsub') and
1294 if (not match('.hgsub') and
1282 '.hgsub' in (wctx.modified() + wctx.added())):
1295 '.hgsub' in (wctx.modified() + wctx.added())):
1283 raise util.Abort(
1296 raise util.Abort(
1284 _("can't commit subrepos without .hgsub"))
1297 _("can't commit subrepos without .hgsub"))
1285 changes[0].insert(0, '.hgsubstate')
1298 changes[0].insert(0, '.hgsubstate')
1286
1299
1287 elif '.hgsub' in changes[2]:
1300 elif '.hgsub' in changes[2]:
1288 # clean up .hgsubstate when .hgsub is removed
1301 # clean up .hgsubstate when .hgsub is removed
1289 if ('.hgsubstate' in wctx and
1302 if ('.hgsubstate' in wctx and
1290 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1303 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1291 changes[2].insert(0, '.hgsubstate')
1304 changes[2].insert(0, '.hgsubstate')
1292
1305
1293 # make sure all explicit patterns are matched
1306 # make sure all explicit patterns are matched
1294 if not force and match.files():
1307 if not force and match.files():
1295 matched = set(changes[0] + changes[1] + changes[2])
1308 matched = set(changes[0] + changes[1] + changes[2])
1296
1309
1297 for f in match.files():
1310 for f in match.files():
1298 if f == '.' or f in matched or f in wctx.substate:
1311 if f == '.' or f in matched or f in wctx.substate:
1299 continue
1312 continue
1300 if f in changes[3]: # missing
1313 if f in changes[3]: # missing
1301 fail(f, _('file not found!'))
1314 fail(f, _('file not found!'))
1302 if f in vdirs: # visited directory
1315 if f in vdirs: # visited directory
1303 d = f + '/'
1316 d = f + '/'
1304 for mf in matched:
1317 for mf in matched:
1305 if mf.startswith(d):
1318 if mf.startswith(d):
1306 break
1319 break
1307 else:
1320 else:
1308 fail(f, _("no match under directory!"))
1321 fail(f, _("no match under directory!"))
1309 elif f not in self.dirstate:
1322 elif f not in self.dirstate:
1310 fail(f, _("file not tracked!"))
1323 fail(f, _("file not tracked!"))
1311
1324
1312 if (not force and not extra.get("close") and not merge
1325 if (not force and not extra.get("close") and not merge
1313 and not (changes[0] or changes[1] or changes[2])
1326 and not (changes[0] or changes[1] or changes[2])
1314 and wctx.branch() == wctx.p1().branch()):
1327 and wctx.branch() == wctx.p1().branch()):
1315 return None
1328 return None
1316
1329
1317 if merge and changes[3]:
1330 if merge and changes[3]:
1318 raise util.Abort(_("cannot commit merge with missing files"))
1331 raise util.Abort(_("cannot commit merge with missing files"))
1319
1332
1320 ms = mergemod.mergestate(self)
1333 ms = mergemod.mergestate(self)
1321 for f in changes[0]:
1334 for f in changes[0]:
1322 if f in ms and ms[f] == 'u':
1335 if f in ms and ms[f] == 'u':
1323 raise util.Abort(_("unresolved merge conflicts "
1336 raise util.Abort(_("unresolved merge conflicts "
1324 "(see hg help resolve)"))
1337 "(see hg help resolve)"))
1325
1338
1326 cctx = context.workingctx(self, text, user, date, extra, changes)
1339 cctx = context.workingctx(self, text, user, date, extra, changes)
1327 if editor:
1340 if editor:
1328 cctx._text = editor(self, cctx, subs)
1341 cctx._text = editor(self, cctx, subs)
1329 edited = (text != cctx._text)
1342 edited = (text != cctx._text)
1330
1343
1331 # commit subs and write new state
1344 # commit subs and write new state
1332 if subs:
1345 if subs:
1333 for s in sorted(commitsubs):
1346 for s in sorted(commitsubs):
1334 sub = wctx.sub(s)
1347 sub = wctx.sub(s)
1335 self.ui.status(_('committing subrepository %s\n') %
1348 self.ui.status(_('committing subrepository %s\n') %
1336 subrepo.subrelpath(sub))
1349 subrepo.subrelpath(sub))
1337 sr = sub.commit(cctx._text, user, date)
1350 sr = sub.commit(cctx._text, user, date)
1338 newstate[s] = (newstate[s][0], sr)
1351 newstate[s] = (newstate[s][0], sr)
1339 subrepo.writestate(self, newstate)
1352 subrepo.writestate(self, newstate)
1340
1353
1341 # Save commit message in case this transaction gets rolled back
1354 # Save commit message in case this transaction gets rolled back
1342 # (e.g. by a pretxncommit hook). Leave the content alone on
1355 # (e.g. by a pretxncommit hook). Leave the content alone on
1343 # the assumption that the user will use the same editor again.
1356 # the assumption that the user will use the same editor again.
1344 msgfn = self.savecommitmessage(cctx._text)
1357 msgfn = self.savecommitmessage(cctx._text)
1345
1358
1346 p1, p2 = self.dirstate.parents()
1359 p1, p2 = self.dirstate.parents()
1347 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1360 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1348 try:
1361 try:
1349 self.hook("precommit", throw=True, parent1=hookp1,
1362 self.hook("precommit", throw=True, parent1=hookp1,
1350 parent2=hookp2)
1363 parent2=hookp2)
1351 ret = self.commitctx(cctx, True)
1364 ret = self.commitctx(cctx, True)
1352 except: # re-raises
1365 except: # re-raises
1353 if edited:
1366 if edited:
1354 self.ui.write(
1367 self.ui.write(
1355 _('note: commit message saved in %s\n') % msgfn)
1368 _('note: commit message saved in %s\n') % msgfn)
1356 raise
1369 raise
1357
1370
1358 # update bookmarks, dirstate and mergestate
1371 # update bookmarks, dirstate and mergestate
1359 bookmarks.update(self, [p1, p2], ret)
1372 bookmarks.update(self, [p1, p2], ret)
1360 for f in changes[0] + changes[1]:
1373 for f in changes[0] + changes[1]:
1361 self.dirstate.normal(f)
1374 self.dirstate.normal(f)
1362 for f in changes[2]:
1375 for f in changes[2]:
1363 self.dirstate.drop(f)
1376 self.dirstate.drop(f)
1364 self.dirstate.setparents(ret)
1377 self.dirstate.setparents(ret)
1365 ms.reset()
1378 ms.reset()
1366 finally:
1379 finally:
1367 wlock.release()
1380 wlock.release()
1368
1381
1369 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1382 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1370 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1383 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1371 self._afterlock(commithook)
1384 self._afterlock(commithook)
1372 return ret
1385 return ret
1373
1386
1374 def commitctx(self, ctx, error=False):
1387 def commitctx(self, ctx, error=False):
1375 """Add a new revision to current repository.
1388 """Add a new revision to current repository.
1376 Revision information is passed via the context argument.
1389 Revision information is passed via the context argument.
1377 """
1390 """
1378
1391
1379 tr = lock = None
1392 tr = lock = None
1380 removed = list(ctx.removed())
1393 removed = list(ctx.removed())
1381 p1, p2 = ctx.p1(), ctx.p2()
1394 p1, p2 = ctx.p1(), ctx.p2()
1382 user = ctx.user()
1395 user = ctx.user()
1383
1396
1384 lock = self.lock()
1397 lock = self.lock()
1385 try:
1398 try:
1386 tr = self.transaction("commit")
1399 tr = self.transaction("commit")
1387 trp = weakref.proxy(tr)
1400 trp = weakref.proxy(tr)
1388
1401
1389 if ctx.files():
1402 if ctx.files():
1390 m1 = p1.manifest().copy()
1403 m1 = p1.manifest().copy()
1391 m2 = p2.manifest()
1404 m2 = p2.manifest()
1392
1405
1393 # check in files
1406 # check in files
1394 new = {}
1407 new = {}
1395 changed = []
1408 changed = []
1396 linkrev = len(self)
1409 linkrev = len(self)
1397 for f in sorted(ctx.modified() + ctx.added()):
1410 for f in sorted(ctx.modified() + ctx.added()):
1398 self.ui.note(f + "\n")
1411 self.ui.note(f + "\n")
1399 try:
1412 try:
1400 fctx = ctx[f]
1413 fctx = ctx[f]
1401 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1414 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1402 changed)
1415 changed)
1403 m1.set(f, fctx.flags())
1416 m1.set(f, fctx.flags())
1404 except OSError, inst:
1417 except OSError, inst:
1405 self.ui.warn(_("trouble committing %s!\n") % f)
1418 self.ui.warn(_("trouble committing %s!\n") % f)
1406 raise
1419 raise
1407 except IOError, inst:
1420 except IOError, inst:
1408 errcode = getattr(inst, 'errno', errno.ENOENT)
1421 errcode = getattr(inst, 'errno', errno.ENOENT)
1409 if error or errcode and errcode != errno.ENOENT:
1422 if error or errcode and errcode != errno.ENOENT:
1410 self.ui.warn(_("trouble committing %s!\n") % f)
1423 self.ui.warn(_("trouble committing %s!\n") % f)
1411 raise
1424 raise
1412 else:
1425 else:
1413 removed.append(f)
1426 removed.append(f)
1414
1427
1415 # update manifest
1428 # update manifest
1416 m1.update(new)
1429 m1.update(new)
1417 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1430 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1418 drop = [f for f in removed if f in m1]
1431 drop = [f for f in removed if f in m1]
1419 for f in drop:
1432 for f in drop:
1420 del m1[f]
1433 del m1[f]
1421 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1434 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1422 p2.manifestnode(), (new, drop))
1435 p2.manifestnode(), (new, drop))
1423 files = changed + removed
1436 files = changed + removed
1424 else:
1437 else:
1425 mn = p1.manifestnode()
1438 mn = p1.manifestnode()
1426 files = []
1439 files = []
1427
1440
1428 # update changelog
1441 # update changelog
1429 self.changelog.delayupdate()
1442 self.changelog.delayupdate()
1430 n = self.changelog.add(mn, files, ctx.description(),
1443 n = self.changelog.add(mn, files, ctx.description(),
1431 trp, p1.node(), p2.node(),
1444 trp, p1.node(), p2.node(),
1432 user, ctx.date(), ctx.extra().copy())
1445 user, ctx.date(), ctx.extra().copy())
1433 p = lambda: self.changelog.writepending() and self.root or ""
1446 p = lambda: self.changelog.writepending() and self.root or ""
1434 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1447 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1435 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1448 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1436 parent2=xp2, pending=p)
1449 parent2=xp2, pending=p)
1437 self.changelog.finalize(trp)
1450 self.changelog.finalize(trp)
1438 # set the new commit is proper phase
1451 # set the new commit is proper phase
1439 targetphase = phases.newcommitphase(self.ui)
1452 targetphase = phases.newcommitphase(self.ui)
1440 if targetphase:
1453 if targetphase:
1441 # retract boundary do not alter parent changeset.
1454 # retract boundary do not alter parent changeset.
1442 # if a parent have higher the resulting phase will
1455 # if a parent have higher the resulting phase will
1443 # be compliant anyway
1456 # be compliant anyway
1444 #
1457 #
1445 # if minimal phase was 0 we don't need to retract anything
1458 # if minimal phase was 0 we don't need to retract anything
1446 phases.retractboundary(self, targetphase, [n])
1459 phases.retractboundary(self, targetphase, [n])
1447 tr.close()
1460 tr.close()
1448 self.updatebranchcache()
1461 self.updatebranchcache()
1449 return n
1462 return n
1450 finally:
1463 finally:
1451 if tr:
1464 if tr:
1452 tr.release()
1465 tr.release()
1453 lock.release()
1466 lock.release()
1454
1467
1455 def destroyed(self, newheadnodes=None):
1468 def destroyed(self, newheadnodes=None):
1456 '''Inform the repository that nodes have been destroyed.
1469 '''Inform the repository that nodes have been destroyed.
1457 Intended for use by strip and rollback, so there's a common
1470 Intended for use by strip and rollback, so there's a common
1458 place for anything that has to be done after destroying history.
1471 place for anything that has to be done after destroying history.
1459
1472
1460 If you know the branchheadcache was uptodate before nodes were removed
1473 If you know the branchheadcache was uptodate before nodes were removed
1461 and you also know the set of candidate new heads that may have resulted
1474 and you also know the set of candidate new heads that may have resulted
1462 from the destruction, you can set newheadnodes. This will enable the
1475 from the destruction, you can set newheadnodes. This will enable the
1463 code to update the branchheads cache, rather than having future code
1476 code to update the branchheads cache, rather than having future code
1464 decide it's invalid and regenrating it from scratch.
1477 decide it's invalid and regenrating it from scratch.
1465 '''
1478 '''
1466 # If we have info, newheadnodes, on how to update the branch cache, do
1479 # If we have info, newheadnodes, on how to update the branch cache, do
1467 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1480 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1468 # will be caught the next time it is read.
1481 # will be caught the next time it is read.
1469 if newheadnodes:
1482 if newheadnodes:
1470 tiprev = len(self) - 1
1483 tiprev = len(self) - 1
1471 ctxgen = (self[node] for node in newheadnodes
1484 ctxgen = (self[node] for node in newheadnodes
1472 if self.changelog.hasnode(node))
1485 if self.changelog.hasnode(node))
1473 self._updatebranchcache(self._branchcache, ctxgen)
1486 self._updatebranchcache(self._branchcache, ctxgen)
1474 self._writebranchcache(self._branchcache, self.changelog.tip(),
1487 self._writebranchcache(self._branchcache, self.changelog.tip(),
1475 tiprev)
1488 tiprev)
1476
1489
1477 # Ensure the persistent tag cache is updated. Doing it now
1490 # Ensure the persistent tag cache is updated. Doing it now
1478 # means that the tag cache only has to worry about destroyed
1491 # means that the tag cache only has to worry about destroyed
1479 # heads immediately after a strip/rollback. That in turn
1492 # heads immediately after a strip/rollback. That in turn
1480 # guarantees that "cachetip == currenttip" (comparing both rev
1493 # guarantees that "cachetip == currenttip" (comparing both rev
1481 # and node) always means no nodes have been added or destroyed.
1494 # and node) always means no nodes have been added or destroyed.
1482
1495
1483 # XXX this is suboptimal when qrefresh'ing: we strip the current
1496 # XXX this is suboptimal when qrefresh'ing: we strip the current
1484 # head, refresh the tag cache, then immediately add a new head.
1497 # head, refresh the tag cache, then immediately add a new head.
1485 # But I think doing it this way is necessary for the "instant
1498 # But I think doing it this way is necessary for the "instant
1486 # tag cache retrieval" case to work.
1499 # tag cache retrieval" case to work.
1487 self.invalidatecaches()
1500 self.invalidatecaches()
1488
1501
1489 def walk(self, match, node=None):
1502 def walk(self, match, node=None):
1490 '''
1503 '''
1491 walk recursively through the directory tree or a given
1504 walk recursively through the directory tree or a given
1492 changeset, finding all files matched by the match
1505 changeset, finding all files matched by the match
1493 function
1506 function
1494 '''
1507 '''
1495 return self[node].walk(match)
1508 return self[node].walk(match)
1496
1509
1497 def status(self, node1='.', node2=None, match=None,
1510 def status(self, node1='.', node2=None, match=None,
1498 ignored=False, clean=False, unknown=False,
1511 ignored=False, clean=False, unknown=False,
1499 listsubrepos=False):
1512 listsubrepos=False):
1500 """return status of files between two nodes or node and working
1513 """return status of files between two nodes or node and working
1501 directory.
1514 directory.
1502
1515
1503 If node1 is None, use the first dirstate parent instead.
1516 If node1 is None, use the first dirstate parent instead.
1504 If node2 is None, compare node1 with working directory.
1517 If node2 is None, compare node1 with working directory.
1505 """
1518 """
1506
1519
1507 def mfmatches(ctx):
1520 def mfmatches(ctx):
1508 mf = ctx.manifest().copy()
1521 mf = ctx.manifest().copy()
1509 if match.always():
1522 if match.always():
1510 return mf
1523 return mf
1511 for fn in mf.keys():
1524 for fn in mf.keys():
1512 if not match(fn):
1525 if not match(fn):
1513 del mf[fn]
1526 del mf[fn]
1514 return mf
1527 return mf
1515
1528
1516 if isinstance(node1, context.changectx):
1529 if isinstance(node1, context.changectx):
1517 ctx1 = node1
1530 ctx1 = node1
1518 else:
1531 else:
1519 ctx1 = self[node1]
1532 ctx1 = self[node1]
1520 if isinstance(node2, context.changectx):
1533 if isinstance(node2, context.changectx):
1521 ctx2 = node2
1534 ctx2 = node2
1522 else:
1535 else:
1523 ctx2 = self[node2]
1536 ctx2 = self[node2]
1524
1537
1525 working = ctx2.rev() is None
1538 working = ctx2.rev() is None
1526 parentworking = working and ctx1 == self['.']
1539 parentworking = working and ctx1 == self['.']
1527 match = match or matchmod.always(self.root, self.getcwd())
1540 match = match or matchmod.always(self.root, self.getcwd())
1528 listignored, listclean, listunknown = ignored, clean, unknown
1541 listignored, listclean, listunknown = ignored, clean, unknown
1529
1542
1530 # load earliest manifest first for caching reasons
1543 # load earliest manifest first for caching reasons
1531 if not working and ctx2.rev() < ctx1.rev():
1544 if not working and ctx2.rev() < ctx1.rev():
1532 ctx2.manifest()
1545 ctx2.manifest()
1533
1546
1534 if not parentworking:
1547 if not parentworking:
1535 def bad(f, msg):
1548 def bad(f, msg):
1536 # 'f' may be a directory pattern from 'match.files()',
1549 # 'f' may be a directory pattern from 'match.files()',
1537 # so 'f not in ctx1' is not enough
1550 # so 'f not in ctx1' is not enough
1538 if f not in ctx1 and f not in ctx1.dirs():
1551 if f not in ctx1 and f not in ctx1.dirs():
1539 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1552 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1540 match.bad = bad
1553 match.bad = bad
1541
1554
1542 if working: # we need to scan the working dir
1555 if working: # we need to scan the working dir
1543 subrepos = []
1556 subrepos = []
1544 if '.hgsub' in self.dirstate:
1557 if '.hgsub' in self.dirstate:
1545 subrepos = ctx2.substate.keys()
1558 subrepos = ctx2.substate.keys()
1546 s = self.dirstate.status(match, subrepos, listignored,
1559 s = self.dirstate.status(match, subrepos, listignored,
1547 listclean, listunknown)
1560 listclean, listunknown)
1548 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1561 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1549
1562
1550 # check for any possibly clean files
1563 # check for any possibly clean files
1551 if parentworking and cmp:
1564 if parentworking and cmp:
1552 fixup = []
1565 fixup = []
1553 # do a full compare of any files that might have changed
1566 # do a full compare of any files that might have changed
1554 for f in sorted(cmp):
1567 for f in sorted(cmp):
1555 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1568 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1556 or ctx1[f].cmp(ctx2[f])):
1569 or ctx1[f].cmp(ctx2[f])):
1557 modified.append(f)
1570 modified.append(f)
1558 else:
1571 else:
1559 fixup.append(f)
1572 fixup.append(f)
1560
1573
1561 # update dirstate for files that are actually clean
1574 # update dirstate for files that are actually clean
1562 if fixup:
1575 if fixup:
1563 if listclean:
1576 if listclean:
1564 clean += fixup
1577 clean += fixup
1565
1578
1566 try:
1579 try:
1567 # updating the dirstate is optional
1580 # updating the dirstate is optional
1568 # so we don't wait on the lock
1581 # so we don't wait on the lock
1569 wlock = self.wlock(False)
1582 wlock = self.wlock(False)
1570 try:
1583 try:
1571 for f in fixup:
1584 for f in fixup:
1572 self.dirstate.normal(f)
1585 self.dirstate.normal(f)
1573 finally:
1586 finally:
1574 wlock.release()
1587 wlock.release()
1575 except error.LockError:
1588 except error.LockError:
1576 pass
1589 pass
1577
1590
1578 if not parentworking:
1591 if not parentworking:
1579 mf1 = mfmatches(ctx1)
1592 mf1 = mfmatches(ctx1)
1580 if working:
1593 if working:
1581 # we are comparing working dir against non-parent
1594 # we are comparing working dir against non-parent
1582 # generate a pseudo-manifest for the working dir
1595 # generate a pseudo-manifest for the working dir
1583 mf2 = mfmatches(self['.'])
1596 mf2 = mfmatches(self['.'])
1584 for f in cmp + modified + added:
1597 for f in cmp + modified + added:
1585 mf2[f] = None
1598 mf2[f] = None
1586 mf2.set(f, ctx2.flags(f))
1599 mf2.set(f, ctx2.flags(f))
1587 for f in removed:
1600 for f in removed:
1588 if f in mf2:
1601 if f in mf2:
1589 del mf2[f]
1602 del mf2[f]
1590 else:
1603 else:
1591 # we are comparing two revisions
1604 # we are comparing two revisions
1592 deleted, unknown, ignored = [], [], []
1605 deleted, unknown, ignored = [], [], []
1593 mf2 = mfmatches(ctx2)
1606 mf2 = mfmatches(ctx2)
1594
1607
1595 modified, added, clean = [], [], []
1608 modified, added, clean = [], [], []
1596 withflags = mf1.withflags() | mf2.withflags()
1609 withflags = mf1.withflags() | mf2.withflags()
1597 for fn in mf2:
1610 for fn in mf2:
1598 if fn in mf1:
1611 if fn in mf1:
1599 if (fn not in deleted and
1612 if (fn not in deleted and
1600 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1613 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1601 (mf1[fn] != mf2[fn] and
1614 (mf1[fn] != mf2[fn] and
1602 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1615 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1603 modified.append(fn)
1616 modified.append(fn)
1604 elif listclean:
1617 elif listclean:
1605 clean.append(fn)
1618 clean.append(fn)
1606 del mf1[fn]
1619 del mf1[fn]
1607 elif fn not in deleted:
1620 elif fn not in deleted:
1608 added.append(fn)
1621 added.append(fn)
1609 removed = mf1.keys()
1622 removed = mf1.keys()
1610
1623
1611 if working and modified and not self.dirstate._checklink:
1624 if working and modified and not self.dirstate._checklink:
1612 # Symlink placeholders may get non-symlink-like contents
1625 # Symlink placeholders may get non-symlink-like contents
1613 # via user error or dereferencing by NFS or Samba servers,
1626 # via user error or dereferencing by NFS or Samba servers,
1614 # so we filter out any placeholders that don't look like a
1627 # so we filter out any placeholders that don't look like a
1615 # symlink
1628 # symlink
1616 sane = []
1629 sane = []
1617 for f in modified:
1630 for f in modified:
1618 if ctx2.flags(f) == 'l':
1631 if ctx2.flags(f) == 'l':
1619 d = ctx2[f].data()
1632 d = ctx2[f].data()
1620 if len(d) >= 1024 or '\n' in d or util.binary(d):
1633 if len(d) >= 1024 or '\n' in d or util.binary(d):
1621 self.ui.debug('ignoring suspect symlink placeholder'
1634 self.ui.debug('ignoring suspect symlink placeholder'
1622 ' "%s"\n' % f)
1635 ' "%s"\n' % f)
1623 continue
1636 continue
1624 sane.append(f)
1637 sane.append(f)
1625 modified = sane
1638 modified = sane
1626
1639
1627 r = modified, added, removed, deleted, unknown, ignored, clean
1640 r = modified, added, removed, deleted, unknown, ignored, clean
1628
1641
1629 if listsubrepos:
1642 if listsubrepos:
1630 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1643 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1631 if working:
1644 if working:
1632 rev2 = None
1645 rev2 = None
1633 else:
1646 else:
1634 rev2 = ctx2.substate[subpath][1]
1647 rev2 = ctx2.substate[subpath][1]
1635 try:
1648 try:
1636 submatch = matchmod.narrowmatcher(subpath, match)
1649 submatch = matchmod.narrowmatcher(subpath, match)
1637 s = sub.status(rev2, match=submatch, ignored=listignored,
1650 s = sub.status(rev2, match=submatch, ignored=listignored,
1638 clean=listclean, unknown=listunknown,
1651 clean=listclean, unknown=listunknown,
1639 listsubrepos=True)
1652 listsubrepos=True)
1640 for rfiles, sfiles in zip(r, s):
1653 for rfiles, sfiles in zip(r, s):
1641 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1654 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1642 except error.LookupError:
1655 except error.LookupError:
1643 self.ui.status(_("skipping missing subrepository: %s\n")
1656 self.ui.status(_("skipping missing subrepository: %s\n")
1644 % subpath)
1657 % subpath)
1645
1658
1646 for l in r:
1659 for l in r:
1647 l.sort()
1660 l.sort()
1648 return r
1661 return r
1649
1662
1650 def heads(self, start=None):
1663 def heads(self, start=None):
1651 heads = self.changelog.heads(start)
1664 heads = self.changelog.heads(start)
1652 # sort the output in rev descending order
1665 # sort the output in rev descending order
1653 return sorted(heads, key=self.changelog.rev, reverse=True)
1666 return sorted(heads, key=self.changelog.rev, reverse=True)
1654
1667
1655 def branchheads(self, branch=None, start=None, closed=False):
1668 def branchheads(self, branch=None, start=None, closed=False):
1656 '''return a (possibly filtered) list of heads for the given branch
1669 '''return a (possibly filtered) list of heads for the given branch
1657
1670
1658 Heads are returned in topological order, from newest to oldest.
1671 Heads are returned in topological order, from newest to oldest.
1659 If branch is None, use the dirstate branch.
1672 If branch is None, use the dirstate branch.
1660 If start is not None, return only heads reachable from start.
1673 If start is not None, return only heads reachable from start.
1661 If closed is True, return heads that are marked as closed as well.
1674 If closed is True, return heads that are marked as closed as well.
1662 '''
1675 '''
1663 if branch is None:
1676 if branch is None:
1664 branch = self[None].branch()
1677 branch = self[None].branch()
1665 branches = self.branchmap()
1678 branches = self.branchmap()
1666 if branch not in branches:
1679 if branch not in branches:
1667 return []
1680 return []
1668 # the cache returns heads ordered lowest to highest
1681 # the cache returns heads ordered lowest to highest
1669 bheads = list(reversed(branches[branch]))
1682 bheads = list(reversed(branches[branch]))
1670 if start is not None:
1683 if start is not None:
1671 # filter out the heads that cannot be reached from startrev
1684 # filter out the heads that cannot be reached from startrev
1672 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1685 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1673 bheads = [h for h in bheads if h in fbheads]
1686 bheads = [h for h in bheads if h in fbheads]
1674 if not closed:
1687 if not closed:
1675 bheads = [h for h in bheads if not self[h].closesbranch()]
1688 bheads = [h for h in bheads if not self[h].closesbranch()]
1676 return bheads
1689 return bheads
1677
1690
1678 def branches(self, nodes):
1691 def branches(self, nodes):
1679 if not nodes:
1692 if not nodes:
1680 nodes = [self.changelog.tip()]
1693 nodes = [self.changelog.tip()]
1681 b = []
1694 b = []
1682 for n in nodes:
1695 for n in nodes:
1683 t = n
1696 t = n
1684 while True:
1697 while True:
1685 p = self.changelog.parents(n)
1698 p = self.changelog.parents(n)
1686 if p[1] != nullid or p[0] == nullid:
1699 if p[1] != nullid or p[0] == nullid:
1687 b.append((t, n, p[0], p[1]))
1700 b.append((t, n, p[0], p[1]))
1688 break
1701 break
1689 n = p[0]
1702 n = p[0]
1690 return b
1703 return b
1691
1704
1692 def between(self, pairs):
1705 def between(self, pairs):
1693 r = []
1706 r = []
1694
1707
1695 for top, bottom in pairs:
1708 for top, bottom in pairs:
1696 n, l, i = top, [], 0
1709 n, l, i = top, [], 0
1697 f = 1
1710 f = 1
1698
1711
1699 while n != bottom and n != nullid:
1712 while n != bottom and n != nullid:
1700 p = self.changelog.parents(n)[0]
1713 p = self.changelog.parents(n)[0]
1701 if i == f:
1714 if i == f:
1702 l.append(n)
1715 l.append(n)
1703 f = f * 2
1716 f = f * 2
1704 n = p
1717 n = p
1705 i += 1
1718 i += 1
1706
1719
1707 r.append(l)
1720 r.append(l)
1708
1721
1709 return r
1722 return r
1710
1723
1711 def pull(self, remote, heads=None, force=False):
1724 def pull(self, remote, heads=None, force=False):
1712 # don't open transaction for nothing or you break future useful
1725 # don't open transaction for nothing or you break future useful
1713 # rollback call
1726 # rollback call
1714 tr = None
1727 tr = None
1715 trname = 'pull\n' + util.hidepassword(remote.url())
1728 trname = 'pull\n' + util.hidepassword(remote.url())
1716 lock = self.lock()
1729 lock = self.lock()
1717 try:
1730 try:
1718 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1731 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1719 force=force)
1732 force=force)
1720 common, fetch, rheads = tmp
1733 common, fetch, rheads = tmp
1721 if not fetch:
1734 if not fetch:
1722 self.ui.status(_("no changes found\n"))
1735 self.ui.status(_("no changes found\n"))
1723 added = []
1736 added = []
1724 result = 0
1737 result = 0
1725 else:
1738 else:
1726 tr = self.transaction(trname)
1739 tr = self.transaction(trname)
1727 if heads is None and list(common) == [nullid]:
1740 if heads is None and list(common) == [nullid]:
1728 self.ui.status(_("requesting all changes\n"))
1741 self.ui.status(_("requesting all changes\n"))
1729 elif heads is None and remote.capable('changegroupsubset'):
1742 elif heads is None and remote.capable('changegroupsubset'):
1730 # issue1320, avoid a race if remote changed after discovery
1743 # issue1320, avoid a race if remote changed after discovery
1731 heads = rheads
1744 heads = rheads
1732
1745
1733 if remote.capable('getbundle'):
1746 if remote.capable('getbundle'):
1734 cg = remote.getbundle('pull', common=common,
1747 cg = remote.getbundle('pull', common=common,
1735 heads=heads or rheads)
1748 heads=heads or rheads)
1736 elif heads is None:
1749 elif heads is None:
1737 cg = remote.changegroup(fetch, 'pull')
1750 cg = remote.changegroup(fetch, 'pull')
1738 elif not remote.capable('changegroupsubset'):
1751 elif not remote.capable('changegroupsubset'):
1739 raise util.Abort(_("partial pull cannot be done because "
1752 raise util.Abort(_("partial pull cannot be done because "
1740 "other repository doesn't support "
1753 "other repository doesn't support "
1741 "changegroupsubset."))
1754 "changegroupsubset."))
1742 else:
1755 else:
1743 cg = remote.changegroupsubset(fetch, heads, 'pull')
1756 cg = remote.changegroupsubset(fetch, heads, 'pull')
1744 clstart = len(self.changelog)
1757 clstart = len(self.changelog)
1745 result = self.addchangegroup(cg, 'pull', remote.url())
1758 result = self.addchangegroup(cg, 'pull', remote.url())
1746 clend = len(self.changelog)
1759 clend = len(self.changelog)
1747 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1760 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1748
1761
1749 # compute target subset
1762 # compute target subset
1750 if heads is None:
1763 if heads is None:
1751 # We pulled every thing possible
1764 # We pulled every thing possible
1752 # sync on everything common
1765 # sync on everything common
1753 subset = common + added
1766 subset = common + added
1754 else:
1767 else:
1755 # We pulled a specific subset
1768 # We pulled a specific subset
1756 # sync on this subset
1769 # sync on this subset
1757 subset = heads
1770 subset = heads
1758
1771
1759 # Get remote phases data from remote
1772 # Get remote phases data from remote
1760 remotephases = remote.listkeys('phases')
1773 remotephases = remote.listkeys('phases')
1761 publishing = bool(remotephases.get('publishing', False))
1774 publishing = bool(remotephases.get('publishing', False))
1762 if remotephases and not publishing:
1775 if remotephases and not publishing:
1763 # remote is new and unpublishing
1776 # remote is new and unpublishing
1764 pheads, _dr = phases.analyzeremotephases(self, subset,
1777 pheads, _dr = phases.analyzeremotephases(self, subset,
1765 remotephases)
1778 remotephases)
1766 phases.advanceboundary(self, phases.public, pheads)
1779 phases.advanceboundary(self, phases.public, pheads)
1767 phases.advanceboundary(self, phases.draft, subset)
1780 phases.advanceboundary(self, phases.draft, subset)
1768 else:
1781 else:
1769 # Remote is old or publishing all common changesets
1782 # Remote is old or publishing all common changesets
1770 # should be seen as public
1783 # should be seen as public
1771 phases.advanceboundary(self, phases.public, subset)
1784 phases.advanceboundary(self, phases.public, subset)
1772
1785
1773 remoteobs = remote.listkeys('obsolete')
1786 remoteobs = remote.listkeys('obsolete')
1774 if 'dump' in remoteobs:
1787 if 'dump' in remoteobs:
1775 if tr is None:
1788 if tr is None:
1776 tr = self.transaction(trname)
1789 tr = self.transaction(trname)
1777 data = base85.b85decode(remoteobs['dump'])
1790 data = base85.b85decode(remoteobs['dump'])
1778 self.obsstore.mergemarkers(tr, data)
1791 self.obsstore.mergemarkers(tr, data)
1779 if tr is not None:
1792 if tr is not None:
1780 tr.close()
1793 tr.close()
1781 finally:
1794 finally:
1782 if tr is not None:
1795 if tr is not None:
1783 tr.release()
1796 tr.release()
1784 lock.release()
1797 lock.release()
1785
1798
1786 return result
1799 return result
1787
1800
1788 def checkpush(self, force, revs):
1801 def checkpush(self, force, revs):
1789 """Extensions can override this function if additional checks have
1802 """Extensions can override this function if additional checks have
1790 to be performed before pushing, or call it if they override push
1803 to be performed before pushing, or call it if they override push
1791 command.
1804 command.
1792 """
1805 """
1793 pass
1806 pass
1794
1807
1795 def push(self, remote, force=False, revs=None, newbranch=False):
1808 def push(self, remote, force=False, revs=None, newbranch=False):
1796 '''Push outgoing changesets (limited by revs) from the current
1809 '''Push outgoing changesets (limited by revs) from the current
1797 repository to remote. Return an integer:
1810 repository to remote. Return an integer:
1798 - None means nothing to push
1811 - None means nothing to push
1799 - 0 means HTTP error
1812 - 0 means HTTP error
1800 - 1 means we pushed and remote head count is unchanged *or*
1813 - 1 means we pushed and remote head count is unchanged *or*
1801 we have outgoing changesets but refused to push
1814 we have outgoing changesets but refused to push
1802 - other values as described by addchangegroup()
1815 - other values as described by addchangegroup()
1803 '''
1816 '''
1804 # there are two ways to push to remote repo:
1817 # there are two ways to push to remote repo:
1805 #
1818 #
1806 # addchangegroup assumes local user can lock remote
1819 # addchangegroup assumes local user can lock remote
1807 # repo (local filesystem, old ssh servers).
1820 # repo (local filesystem, old ssh servers).
1808 #
1821 #
1809 # unbundle assumes local user cannot lock remote repo (new ssh
1822 # unbundle assumes local user cannot lock remote repo (new ssh
1810 # servers, http servers).
1823 # servers, http servers).
1811
1824
1812 if not remote.canpush():
1825 if not remote.canpush():
1813 raise util.Abort(_("destination does not support push"))
1826 raise util.Abort(_("destination does not support push"))
1814 # get local lock as we might write phase data
1827 # get local lock as we might write phase data
1815 locallock = self.lock()
1828 locallock = self.lock()
1816 try:
1829 try:
1817 self.checkpush(force, revs)
1830 self.checkpush(force, revs)
1818 lock = None
1831 lock = None
1819 unbundle = remote.capable('unbundle')
1832 unbundle = remote.capable('unbundle')
1820 if not unbundle:
1833 if not unbundle:
1821 lock = remote.lock()
1834 lock = remote.lock()
1822 try:
1835 try:
1823 # discovery
1836 # discovery
1824 fci = discovery.findcommonincoming
1837 fci = discovery.findcommonincoming
1825 commoninc = fci(self, remote, force=force)
1838 commoninc = fci(self, remote, force=force)
1826 common, inc, remoteheads = commoninc
1839 common, inc, remoteheads = commoninc
1827 fco = discovery.findcommonoutgoing
1840 fco = discovery.findcommonoutgoing
1828 outgoing = fco(self, remote, onlyheads=revs,
1841 outgoing = fco(self, remote, onlyheads=revs,
1829 commoninc=commoninc, force=force)
1842 commoninc=commoninc, force=force)
1830
1843
1831
1844
1832 if not outgoing.missing:
1845 if not outgoing.missing:
1833 # nothing to push
1846 # nothing to push
1834 scmutil.nochangesfound(self.ui, outgoing.excluded)
1847 scmutil.nochangesfound(self.ui, outgoing.excluded)
1835 ret = None
1848 ret = None
1836 else:
1849 else:
1837 # something to push
1850 # something to push
1838 if not force:
1851 if not force:
1839 # if self.obsstore == False --> no obsolete
1852 # if self.obsstore == False --> no obsolete
1840 # then, save the iteration
1853 # then, save the iteration
1841 if self.obsstore:
1854 if self.obsstore:
1842 # this message are here for 80 char limit reason
1855 # this message are here for 80 char limit reason
1843 mso = _("push includes an obsolete changeset: %s!")
1856 mso = _("push includes an obsolete changeset: %s!")
1844 msu = _("push includes an unstable changeset: %s!")
1857 msu = _("push includes an unstable changeset: %s!")
1845 # If we are to push if there is at least one
1858 # If we are to push if there is at least one
1846 # obsolete or unstable changeset in missing, at
1859 # obsolete or unstable changeset in missing, at
1847 # least one of the missinghead will be obsolete or
1860 # least one of the missinghead will be obsolete or
1848 # unstable. So checking heads only is ok
1861 # unstable. So checking heads only is ok
1849 for node in outgoing.missingheads:
1862 for node in outgoing.missingheads:
1850 ctx = self[node]
1863 ctx = self[node]
1851 if ctx.obsolete():
1864 if ctx.obsolete():
1852 raise util.Abort(_(mso) % ctx)
1865 raise util.Abort(_(mso) % ctx)
1853 elif ctx.unstable():
1866 elif ctx.unstable():
1854 raise util.Abort(_(msu) % ctx)
1867 raise util.Abort(_(msu) % ctx)
1855 discovery.checkheads(self, remote, outgoing,
1868 discovery.checkheads(self, remote, outgoing,
1856 remoteheads, newbranch,
1869 remoteheads, newbranch,
1857 bool(inc))
1870 bool(inc))
1858
1871
1859 # create a changegroup from local
1872 # create a changegroup from local
1860 if revs is None and not outgoing.excluded:
1873 if revs is None and not outgoing.excluded:
1861 # push everything,
1874 # push everything,
1862 # use the fast path, no race possible on push
1875 # use the fast path, no race possible on push
1863 cg = self._changegroup(outgoing.missing, 'push')
1876 cg = self._changegroup(outgoing.missing, 'push')
1864 else:
1877 else:
1865 cg = self.getlocalbundle('push', outgoing)
1878 cg = self.getlocalbundle('push', outgoing)
1866
1879
1867 # apply changegroup to remote
1880 # apply changegroup to remote
1868 if unbundle:
1881 if unbundle:
1869 # local repo finds heads on server, finds out what
1882 # local repo finds heads on server, finds out what
1870 # revs it must push. once revs transferred, if server
1883 # revs it must push. once revs transferred, if server
1871 # finds it has different heads (someone else won
1884 # finds it has different heads (someone else won
1872 # commit/push race), server aborts.
1885 # commit/push race), server aborts.
1873 if force:
1886 if force:
1874 remoteheads = ['force']
1887 remoteheads = ['force']
1875 # ssh: return remote's addchangegroup()
1888 # ssh: return remote's addchangegroup()
1876 # http: return remote's addchangegroup() or 0 for error
1889 # http: return remote's addchangegroup() or 0 for error
1877 ret = remote.unbundle(cg, remoteheads, 'push')
1890 ret = remote.unbundle(cg, remoteheads, 'push')
1878 else:
1891 else:
1879 # we return an integer indicating remote head count
1892 # we return an integer indicating remote head count
1880 # change
1893 # change
1881 ret = remote.addchangegroup(cg, 'push', self.url())
1894 ret = remote.addchangegroup(cg, 'push', self.url())
1882
1895
1883 if ret:
1896 if ret:
1884 # push succeed, synchonize target of the push
1897 # push succeed, synchonize target of the push
1885 cheads = outgoing.missingheads
1898 cheads = outgoing.missingheads
1886 elif revs is None:
1899 elif revs is None:
1887 # All out push fails. synchronize all common
1900 # All out push fails. synchronize all common
1888 cheads = outgoing.commonheads
1901 cheads = outgoing.commonheads
1889 else:
1902 else:
1890 # I want cheads = heads(::missingheads and ::commonheads)
1903 # I want cheads = heads(::missingheads and ::commonheads)
1891 # (missingheads is revs with secret changeset filtered out)
1904 # (missingheads is revs with secret changeset filtered out)
1892 #
1905 #
1893 # This can be expressed as:
1906 # This can be expressed as:
1894 # cheads = ( (missingheads and ::commonheads)
1907 # cheads = ( (missingheads and ::commonheads)
1895 # + (commonheads and ::missingheads))"
1908 # + (commonheads and ::missingheads))"
1896 # )
1909 # )
1897 #
1910 #
1898 # while trying to push we already computed the following:
1911 # while trying to push we already computed the following:
1899 # common = (::commonheads)
1912 # common = (::commonheads)
1900 # missing = ((commonheads::missingheads) - commonheads)
1913 # missing = ((commonheads::missingheads) - commonheads)
1901 #
1914 #
1902 # We can pick:
1915 # We can pick:
1903 # * missingheads part of comon (::commonheads)
1916 # * missingheads part of comon (::commonheads)
1904 common = set(outgoing.common)
1917 common = set(outgoing.common)
1905 cheads = [node for node in revs if node in common]
1918 cheads = [node for node in revs if node in common]
1906 # and
1919 # and
1907 # * commonheads parents on missing
1920 # * commonheads parents on missing
1908 revset = self.set('%ln and parents(roots(%ln))',
1921 revset = self.set('%ln and parents(roots(%ln))',
1909 outgoing.commonheads,
1922 outgoing.commonheads,
1910 outgoing.missing)
1923 outgoing.missing)
1911 cheads.extend(c.node() for c in revset)
1924 cheads.extend(c.node() for c in revset)
1912 # even when we don't push, exchanging phase data is useful
1925 # even when we don't push, exchanging phase data is useful
1913 remotephases = remote.listkeys('phases')
1926 remotephases = remote.listkeys('phases')
1914 if not remotephases: # old server or public only repo
1927 if not remotephases: # old server or public only repo
1915 phases.advanceboundary(self, phases.public, cheads)
1928 phases.advanceboundary(self, phases.public, cheads)
1916 # don't push any phase data as there is nothing to push
1929 # don't push any phase data as there is nothing to push
1917 else:
1930 else:
1918 ana = phases.analyzeremotephases(self, cheads, remotephases)
1931 ana = phases.analyzeremotephases(self, cheads, remotephases)
1919 pheads, droots = ana
1932 pheads, droots = ana
1920 ### Apply remote phase on local
1933 ### Apply remote phase on local
1921 if remotephases.get('publishing', False):
1934 if remotephases.get('publishing', False):
1922 phases.advanceboundary(self, phases.public, cheads)
1935 phases.advanceboundary(self, phases.public, cheads)
1923 else: # publish = False
1936 else: # publish = False
1924 phases.advanceboundary(self, phases.public, pheads)
1937 phases.advanceboundary(self, phases.public, pheads)
1925 phases.advanceboundary(self, phases.draft, cheads)
1938 phases.advanceboundary(self, phases.draft, cheads)
1926 ### Apply local phase on remote
1939 ### Apply local phase on remote
1927
1940
1928 # Get the list of all revs draft on remote by public here.
1941 # Get the list of all revs draft on remote by public here.
1929 # XXX Beware that revset break if droots is not strictly
1942 # XXX Beware that revset break if droots is not strictly
1930 # XXX root we may want to ensure it is but it is costly
1943 # XXX root we may want to ensure it is but it is costly
1931 outdated = self.set('heads((%ln::%ln) and public())',
1944 outdated = self.set('heads((%ln::%ln) and public())',
1932 droots, cheads)
1945 droots, cheads)
1933 for newremotehead in outdated:
1946 for newremotehead in outdated:
1934 r = remote.pushkey('phases',
1947 r = remote.pushkey('phases',
1935 newremotehead.hex(),
1948 newremotehead.hex(),
1936 str(phases.draft),
1949 str(phases.draft),
1937 str(phases.public))
1950 str(phases.public))
1938 if not r:
1951 if not r:
1939 self.ui.warn(_('updating %s to public failed!\n')
1952 self.ui.warn(_('updating %s to public failed!\n')
1940 % newremotehead)
1953 % newremotehead)
1941 if ('obsolete' in remote.listkeys('namespaces')
1954 if ('obsolete' in remote.listkeys('namespaces')
1942 and self.obsstore):
1955 and self.obsstore):
1943 data = self.listkeys('obsolete')['dump']
1956 data = self.listkeys('obsolete')['dump']
1944 r = remote.pushkey('obsolete', 'dump', '', data)
1957 r = remote.pushkey('obsolete', 'dump', '', data)
1945 if not r:
1958 if not r:
1946 self.ui.warn(_('failed to push obsolete markers!\n'))
1959 self.ui.warn(_('failed to push obsolete markers!\n'))
1947 finally:
1960 finally:
1948 if lock is not None:
1961 if lock is not None:
1949 lock.release()
1962 lock.release()
1950 finally:
1963 finally:
1951 locallock.release()
1964 locallock.release()
1952
1965
1953 self.ui.debug("checking for updated bookmarks\n")
1966 self.ui.debug("checking for updated bookmarks\n")
1954 rb = remote.listkeys('bookmarks')
1967 rb = remote.listkeys('bookmarks')
1955 for k in rb.keys():
1968 for k in rb.keys():
1956 if k in self._bookmarks:
1969 if k in self._bookmarks:
1957 nr, nl = rb[k], hex(self._bookmarks[k])
1970 nr, nl = rb[k], hex(self._bookmarks[k])
1958 if nr in self:
1971 if nr in self:
1959 cr = self[nr]
1972 cr = self[nr]
1960 cl = self[nl]
1973 cl = self[nl]
1961 if cl in cr.descendants():
1974 if cl in cr.descendants():
1962 r = remote.pushkey('bookmarks', k, nr, nl)
1975 r = remote.pushkey('bookmarks', k, nr, nl)
1963 if r:
1976 if r:
1964 self.ui.status(_("updating bookmark %s\n") % k)
1977 self.ui.status(_("updating bookmark %s\n") % k)
1965 else:
1978 else:
1966 self.ui.warn(_('updating bookmark %s'
1979 self.ui.warn(_('updating bookmark %s'
1967 ' failed!\n') % k)
1980 ' failed!\n') % k)
1968
1981
1969 return ret
1982 return ret
1970
1983
1971 def changegroupinfo(self, nodes, source):
1984 def changegroupinfo(self, nodes, source):
1972 if self.ui.verbose or source == 'bundle':
1985 if self.ui.verbose or source == 'bundle':
1973 self.ui.status(_("%d changesets found\n") % len(nodes))
1986 self.ui.status(_("%d changesets found\n") % len(nodes))
1974 if self.ui.debugflag:
1987 if self.ui.debugflag:
1975 self.ui.debug("list of changesets:\n")
1988 self.ui.debug("list of changesets:\n")
1976 for node in nodes:
1989 for node in nodes:
1977 self.ui.debug("%s\n" % hex(node))
1990 self.ui.debug("%s\n" % hex(node))
1978
1991
1979 def changegroupsubset(self, bases, heads, source):
1992 def changegroupsubset(self, bases, heads, source):
1980 """Compute a changegroup consisting of all the nodes that are
1993 """Compute a changegroup consisting of all the nodes that are
1981 descendants of any of the bases and ancestors of any of the heads.
1994 descendants of any of the bases and ancestors of any of the heads.
1982 Return a chunkbuffer object whose read() method will return
1995 Return a chunkbuffer object whose read() method will return
1983 successive changegroup chunks.
1996 successive changegroup chunks.
1984
1997
1985 It is fairly complex as determining which filenodes and which
1998 It is fairly complex as determining which filenodes and which
1986 manifest nodes need to be included for the changeset to be complete
1999 manifest nodes need to be included for the changeset to be complete
1987 is non-trivial.
2000 is non-trivial.
1988
2001
1989 Another wrinkle is doing the reverse, figuring out which changeset in
2002 Another wrinkle is doing the reverse, figuring out which changeset in
1990 the changegroup a particular filenode or manifestnode belongs to.
2003 the changegroup a particular filenode or manifestnode belongs to.
1991 """
2004 """
1992 cl = self.changelog
2005 cl = self.changelog
1993 if not bases:
2006 if not bases:
1994 bases = [nullid]
2007 bases = [nullid]
1995 csets, bases, heads = cl.nodesbetween(bases, heads)
2008 csets, bases, heads = cl.nodesbetween(bases, heads)
1996 # We assume that all ancestors of bases are known
2009 # We assume that all ancestors of bases are known
1997 common = set(cl.ancestors([cl.rev(n) for n in bases]))
2010 common = set(cl.ancestors([cl.rev(n) for n in bases]))
1998 return self._changegroupsubset(common, csets, heads, source)
2011 return self._changegroupsubset(common, csets, heads, source)
1999
2012
2000 def getlocalbundle(self, source, outgoing):
2013 def getlocalbundle(self, source, outgoing):
2001 """Like getbundle, but taking a discovery.outgoing as an argument.
2014 """Like getbundle, but taking a discovery.outgoing as an argument.
2002
2015
2003 This is only implemented for local repos and reuses potentially
2016 This is only implemented for local repos and reuses potentially
2004 precomputed sets in outgoing."""
2017 precomputed sets in outgoing."""
2005 if not outgoing.missing:
2018 if not outgoing.missing:
2006 return None
2019 return None
2007 return self._changegroupsubset(outgoing.common,
2020 return self._changegroupsubset(outgoing.common,
2008 outgoing.missing,
2021 outgoing.missing,
2009 outgoing.missingheads,
2022 outgoing.missingheads,
2010 source)
2023 source)
2011
2024
2012 def getbundle(self, source, heads=None, common=None):
2025 def getbundle(self, source, heads=None, common=None):
2013 """Like changegroupsubset, but returns the set difference between the
2026 """Like changegroupsubset, but returns the set difference between the
2014 ancestors of heads and the ancestors common.
2027 ancestors of heads and the ancestors common.
2015
2028
2016 If heads is None, use the local heads. If common is None, use [nullid].
2029 If heads is None, use the local heads. If common is None, use [nullid].
2017
2030
2018 The nodes in common might not all be known locally due to the way the
2031 The nodes in common might not all be known locally due to the way the
2019 current discovery protocol works.
2032 current discovery protocol works.
2020 """
2033 """
2021 cl = self.changelog
2034 cl = self.changelog
2022 if common:
2035 if common:
2023 nm = cl.nodemap
2036 nm = cl.nodemap
2024 common = [n for n in common if n in nm]
2037 common = [n for n in common if n in nm]
2025 else:
2038 else:
2026 common = [nullid]
2039 common = [nullid]
2027 if not heads:
2040 if not heads:
2028 heads = cl.heads()
2041 heads = cl.heads()
2029 return self.getlocalbundle(source,
2042 return self.getlocalbundle(source,
2030 discovery.outgoing(cl, common, heads))
2043 discovery.outgoing(cl, common, heads))
2031
2044
2032 def _changegroupsubset(self, commonrevs, csets, heads, source):
2045 def _changegroupsubset(self, commonrevs, csets, heads, source):
2033
2046
2034 cl = self.changelog
2047 cl = self.changelog
2035 mf = self.manifest
2048 mf = self.manifest
2036 mfs = {} # needed manifests
2049 mfs = {} # needed manifests
2037 fnodes = {} # needed file nodes
2050 fnodes = {} # needed file nodes
2038 changedfiles = set()
2051 changedfiles = set()
2039 fstate = ['', {}]
2052 fstate = ['', {}]
2040 count = [0, 0]
2053 count = [0, 0]
2041
2054
2042 # can we go through the fast path ?
2055 # can we go through the fast path ?
2043 heads.sort()
2056 heads.sort()
2044 if heads == sorted(self.heads()):
2057 if heads == sorted(self.heads()):
2045 return self._changegroup(csets, source)
2058 return self._changegroup(csets, source)
2046
2059
2047 # slow path
2060 # slow path
2048 self.hook('preoutgoing', throw=True, source=source)
2061 self.hook('preoutgoing', throw=True, source=source)
2049 self.changegroupinfo(csets, source)
2062 self.changegroupinfo(csets, source)
2050
2063
2051 # filter any nodes that claim to be part of the known set
2064 # filter any nodes that claim to be part of the known set
2052 def prune(revlog, missing):
2065 def prune(revlog, missing):
2053 rr, rl = revlog.rev, revlog.linkrev
2066 rr, rl = revlog.rev, revlog.linkrev
2054 return [n for n in missing
2067 return [n for n in missing
2055 if rl(rr(n)) not in commonrevs]
2068 if rl(rr(n)) not in commonrevs]
2056
2069
2057 progress = self.ui.progress
2070 progress = self.ui.progress
2058 _bundling = _('bundling')
2071 _bundling = _('bundling')
2059 _changesets = _('changesets')
2072 _changesets = _('changesets')
2060 _manifests = _('manifests')
2073 _manifests = _('manifests')
2061 _files = _('files')
2074 _files = _('files')
2062
2075
2063 def lookup(revlog, x):
2076 def lookup(revlog, x):
2064 if revlog == cl:
2077 if revlog == cl:
2065 c = cl.read(x)
2078 c = cl.read(x)
2066 changedfiles.update(c[3])
2079 changedfiles.update(c[3])
2067 mfs.setdefault(c[0], x)
2080 mfs.setdefault(c[0], x)
2068 count[0] += 1
2081 count[0] += 1
2069 progress(_bundling, count[0],
2082 progress(_bundling, count[0],
2070 unit=_changesets, total=count[1])
2083 unit=_changesets, total=count[1])
2071 return x
2084 return x
2072 elif revlog == mf:
2085 elif revlog == mf:
2073 clnode = mfs[x]
2086 clnode = mfs[x]
2074 mdata = mf.readfast(x)
2087 mdata = mf.readfast(x)
2075 for f, n in mdata.iteritems():
2088 for f, n in mdata.iteritems():
2076 if f in changedfiles:
2089 if f in changedfiles:
2077 fnodes[f].setdefault(n, clnode)
2090 fnodes[f].setdefault(n, clnode)
2078 count[0] += 1
2091 count[0] += 1
2079 progress(_bundling, count[0],
2092 progress(_bundling, count[0],
2080 unit=_manifests, total=count[1])
2093 unit=_manifests, total=count[1])
2081 return clnode
2094 return clnode
2082 else:
2095 else:
2083 progress(_bundling, count[0], item=fstate[0],
2096 progress(_bundling, count[0], item=fstate[0],
2084 unit=_files, total=count[1])
2097 unit=_files, total=count[1])
2085 return fstate[1][x]
2098 return fstate[1][x]
2086
2099
2087 bundler = changegroup.bundle10(lookup)
2100 bundler = changegroup.bundle10(lookup)
2088 reorder = self.ui.config('bundle', 'reorder', 'auto')
2101 reorder = self.ui.config('bundle', 'reorder', 'auto')
2089 if reorder == 'auto':
2102 if reorder == 'auto':
2090 reorder = None
2103 reorder = None
2091 else:
2104 else:
2092 reorder = util.parsebool(reorder)
2105 reorder = util.parsebool(reorder)
2093
2106
2094 def gengroup():
2107 def gengroup():
2095 # Create a changenode group generator that will call our functions
2108 # Create a changenode group generator that will call our functions
2096 # back to lookup the owning changenode and collect information.
2109 # back to lookup the owning changenode and collect information.
2097 count[:] = [0, len(csets)]
2110 count[:] = [0, len(csets)]
2098 for chunk in cl.group(csets, bundler, reorder=reorder):
2111 for chunk in cl.group(csets, bundler, reorder=reorder):
2099 yield chunk
2112 yield chunk
2100 progress(_bundling, None)
2113 progress(_bundling, None)
2101
2114
2102 # Create a generator for the manifestnodes that calls our lookup
2115 # Create a generator for the manifestnodes that calls our lookup
2103 # and data collection functions back.
2116 # and data collection functions back.
2104 for f in changedfiles:
2117 for f in changedfiles:
2105 fnodes[f] = {}
2118 fnodes[f] = {}
2106 count[:] = [0, len(mfs)]
2119 count[:] = [0, len(mfs)]
2107 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2120 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2108 yield chunk
2121 yield chunk
2109 progress(_bundling, None)
2122 progress(_bundling, None)
2110
2123
2111 mfs.clear()
2124 mfs.clear()
2112
2125
2113 # Go through all our files in order sorted by name.
2126 # Go through all our files in order sorted by name.
2114 count[:] = [0, len(changedfiles)]
2127 count[:] = [0, len(changedfiles)]
2115 for fname in sorted(changedfiles):
2128 for fname in sorted(changedfiles):
2116 filerevlog = self.file(fname)
2129 filerevlog = self.file(fname)
2117 if not len(filerevlog):
2130 if not len(filerevlog):
2118 raise util.Abort(_("empty or missing revlog for %s")
2131 raise util.Abort(_("empty or missing revlog for %s")
2119 % fname)
2132 % fname)
2120 fstate[0] = fname
2133 fstate[0] = fname
2121 fstate[1] = fnodes.pop(fname, {})
2134 fstate[1] = fnodes.pop(fname, {})
2122
2135
2123 nodelist = prune(filerevlog, fstate[1])
2136 nodelist = prune(filerevlog, fstate[1])
2124 if nodelist:
2137 if nodelist:
2125 count[0] += 1
2138 count[0] += 1
2126 yield bundler.fileheader(fname)
2139 yield bundler.fileheader(fname)
2127 for chunk in filerevlog.group(nodelist, bundler, reorder):
2140 for chunk in filerevlog.group(nodelist, bundler, reorder):
2128 yield chunk
2141 yield chunk
2129
2142
2130 # Signal that no more groups are left.
2143 # Signal that no more groups are left.
2131 yield bundler.close()
2144 yield bundler.close()
2132 progress(_bundling, None)
2145 progress(_bundling, None)
2133
2146
2134 if csets:
2147 if csets:
2135 self.hook('outgoing', node=hex(csets[0]), source=source)
2148 self.hook('outgoing', node=hex(csets[0]), source=source)
2136
2149
2137 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2150 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2138
2151
2139 def changegroup(self, basenodes, source):
2152 def changegroup(self, basenodes, source):
2140 # to avoid a race we use changegroupsubset() (issue1320)
2153 # to avoid a race we use changegroupsubset() (issue1320)
2141 return self.changegroupsubset(basenodes, self.heads(), source)
2154 return self.changegroupsubset(basenodes, self.heads(), source)
2142
2155
2143 def _changegroup(self, nodes, source):
2156 def _changegroup(self, nodes, source):
2144 """Compute the changegroup of all nodes that we have that a recipient
2157 """Compute the changegroup of all nodes that we have that a recipient
2145 doesn't. Return a chunkbuffer object whose read() method will return
2158 doesn't. Return a chunkbuffer object whose read() method will return
2146 successive changegroup chunks.
2159 successive changegroup chunks.
2147
2160
2148 This is much easier than the previous function as we can assume that
2161 This is much easier than the previous function as we can assume that
2149 the recipient has any changenode we aren't sending them.
2162 the recipient has any changenode we aren't sending them.
2150
2163
2151 nodes is the set of nodes to send"""
2164 nodes is the set of nodes to send"""
2152
2165
2153 cl = self.changelog
2166 cl = self.changelog
2154 mf = self.manifest
2167 mf = self.manifest
2155 mfs = {}
2168 mfs = {}
2156 changedfiles = set()
2169 changedfiles = set()
2157 fstate = ['']
2170 fstate = ['']
2158 count = [0, 0]
2171 count = [0, 0]
2159
2172
2160 self.hook('preoutgoing', throw=True, source=source)
2173 self.hook('preoutgoing', throw=True, source=source)
2161 self.changegroupinfo(nodes, source)
2174 self.changegroupinfo(nodes, source)
2162
2175
2163 revset = set([cl.rev(n) for n in nodes])
2176 revset = set([cl.rev(n) for n in nodes])
2164
2177
2165 def gennodelst(log):
2178 def gennodelst(log):
2166 ln, llr = log.node, log.linkrev
2179 ln, llr = log.node, log.linkrev
2167 return [ln(r) for r in log if llr(r) in revset]
2180 return [ln(r) for r in log if llr(r) in revset]
2168
2181
2169 progress = self.ui.progress
2182 progress = self.ui.progress
2170 _bundling = _('bundling')
2183 _bundling = _('bundling')
2171 _changesets = _('changesets')
2184 _changesets = _('changesets')
2172 _manifests = _('manifests')
2185 _manifests = _('manifests')
2173 _files = _('files')
2186 _files = _('files')
2174
2187
2175 def lookup(revlog, x):
2188 def lookup(revlog, x):
2176 if revlog == cl:
2189 if revlog == cl:
2177 c = cl.read(x)
2190 c = cl.read(x)
2178 changedfiles.update(c[3])
2191 changedfiles.update(c[3])
2179 mfs.setdefault(c[0], x)
2192 mfs.setdefault(c[0], x)
2180 count[0] += 1
2193 count[0] += 1
2181 progress(_bundling, count[0],
2194 progress(_bundling, count[0],
2182 unit=_changesets, total=count[1])
2195 unit=_changesets, total=count[1])
2183 return x
2196 return x
2184 elif revlog == mf:
2197 elif revlog == mf:
2185 count[0] += 1
2198 count[0] += 1
2186 progress(_bundling, count[0],
2199 progress(_bundling, count[0],
2187 unit=_manifests, total=count[1])
2200 unit=_manifests, total=count[1])
2188 return cl.node(revlog.linkrev(revlog.rev(x)))
2201 return cl.node(revlog.linkrev(revlog.rev(x)))
2189 else:
2202 else:
2190 progress(_bundling, count[0], item=fstate[0],
2203 progress(_bundling, count[0], item=fstate[0],
2191 total=count[1], unit=_files)
2204 total=count[1], unit=_files)
2192 return cl.node(revlog.linkrev(revlog.rev(x)))
2205 return cl.node(revlog.linkrev(revlog.rev(x)))
2193
2206
2194 bundler = changegroup.bundle10(lookup)
2207 bundler = changegroup.bundle10(lookup)
2195 reorder = self.ui.config('bundle', 'reorder', 'auto')
2208 reorder = self.ui.config('bundle', 'reorder', 'auto')
2196 if reorder == 'auto':
2209 if reorder == 'auto':
2197 reorder = None
2210 reorder = None
2198 else:
2211 else:
2199 reorder = util.parsebool(reorder)
2212 reorder = util.parsebool(reorder)
2200
2213
2201 def gengroup():
2214 def gengroup():
2202 '''yield a sequence of changegroup chunks (strings)'''
2215 '''yield a sequence of changegroup chunks (strings)'''
2203 # construct a list of all changed files
2216 # construct a list of all changed files
2204
2217
2205 count[:] = [0, len(nodes)]
2218 count[:] = [0, len(nodes)]
2206 for chunk in cl.group(nodes, bundler, reorder=reorder):
2219 for chunk in cl.group(nodes, bundler, reorder=reorder):
2207 yield chunk
2220 yield chunk
2208 progress(_bundling, None)
2221 progress(_bundling, None)
2209
2222
2210 count[:] = [0, len(mfs)]
2223 count[:] = [0, len(mfs)]
2211 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2224 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2212 yield chunk
2225 yield chunk
2213 progress(_bundling, None)
2226 progress(_bundling, None)
2214
2227
2215 count[:] = [0, len(changedfiles)]
2228 count[:] = [0, len(changedfiles)]
2216 for fname in sorted(changedfiles):
2229 for fname in sorted(changedfiles):
2217 filerevlog = self.file(fname)
2230 filerevlog = self.file(fname)
2218 if not len(filerevlog):
2231 if not len(filerevlog):
2219 raise util.Abort(_("empty or missing revlog for %s")
2232 raise util.Abort(_("empty or missing revlog for %s")
2220 % fname)
2233 % fname)
2221 fstate[0] = fname
2234 fstate[0] = fname
2222 nodelist = gennodelst(filerevlog)
2235 nodelist = gennodelst(filerevlog)
2223 if nodelist:
2236 if nodelist:
2224 count[0] += 1
2237 count[0] += 1
2225 yield bundler.fileheader(fname)
2238 yield bundler.fileheader(fname)
2226 for chunk in filerevlog.group(nodelist, bundler, reorder):
2239 for chunk in filerevlog.group(nodelist, bundler, reorder):
2227 yield chunk
2240 yield chunk
2228 yield bundler.close()
2241 yield bundler.close()
2229 progress(_bundling, None)
2242 progress(_bundling, None)
2230
2243
2231 if nodes:
2244 if nodes:
2232 self.hook('outgoing', node=hex(nodes[0]), source=source)
2245 self.hook('outgoing', node=hex(nodes[0]), source=source)
2233
2246
2234 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2247 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2235
2248
2236 def addchangegroup(self, source, srctype, url, emptyok=False):
2249 def addchangegroup(self, source, srctype, url, emptyok=False):
2237 """Add the changegroup returned by source.read() to this repo.
2250 """Add the changegroup returned by source.read() to this repo.
2238 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2251 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2239 the URL of the repo where this changegroup is coming from.
2252 the URL of the repo where this changegroup is coming from.
2240
2253
2241 Return an integer summarizing the change to this repo:
2254 Return an integer summarizing the change to this repo:
2242 - nothing changed or no source: 0
2255 - nothing changed or no source: 0
2243 - more heads than before: 1+added heads (2..n)
2256 - more heads than before: 1+added heads (2..n)
2244 - fewer heads than before: -1-removed heads (-2..-n)
2257 - fewer heads than before: -1-removed heads (-2..-n)
2245 - number of heads stays the same: 1
2258 - number of heads stays the same: 1
2246 """
2259 """
2247 def csmap(x):
2260 def csmap(x):
2248 self.ui.debug("add changeset %s\n" % short(x))
2261 self.ui.debug("add changeset %s\n" % short(x))
2249 return len(cl)
2262 return len(cl)
2250
2263
2251 def revmap(x):
2264 def revmap(x):
2252 return cl.rev(x)
2265 return cl.rev(x)
2253
2266
2254 if not source:
2267 if not source:
2255 return 0
2268 return 0
2256
2269
2257 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2270 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2258
2271
2259 changesets = files = revisions = 0
2272 changesets = files = revisions = 0
2260 efiles = set()
2273 efiles = set()
2261
2274
2262 # write changelog data to temp files so concurrent readers will not see
2275 # write changelog data to temp files so concurrent readers will not see
2263 # inconsistent view
2276 # inconsistent view
2264 cl = self.changelog
2277 cl = self.changelog
2265 cl.delayupdate()
2278 cl.delayupdate()
2266 oldheads = cl.heads()
2279 oldheads = cl.heads()
2267
2280
2268 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2281 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2269 try:
2282 try:
2270 trp = weakref.proxy(tr)
2283 trp = weakref.proxy(tr)
2271 # pull off the changeset group
2284 # pull off the changeset group
2272 self.ui.status(_("adding changesets\n"))
2285 self.ui.status(_("adding changesets\n"))
2273 clstart = len(cl)
2286 clstart = len(cl)
2274 class prog(object):
2287 class prog(object):
2275 step = _('changesets')
2288 step = _('changesets')
2276 count = 1
2289 count = 1
2277 ui = self.ui
2290 ui = self.ui
2278 total = None
2291 total = None
2279 def __call__(self):
2292 def __call__(self):
2280 self.ui.progress(self.step, self.count, unit=_('chunks'),
2293 self.ui.progress(self.step, self.count, unit=_('chunks'),
2281 total=self.total)
2294 total=self.total)
2282 self.count += 1
2295 self.count += 1
2283 pr = prog()
2296 pr = prog()
2284 source.callback = pr
2297 source.callback = pr
2285
2298
2286 source.changelogheader()
2299 source.changelogheader()
2287 srccontent = cl.addgroup(source, csmap, trp)
2300 srccontent = cl.addgroup(source, csmap, trp)
2288 if not (srccontent or emptyok):
2301 if not (srccontent or emptyok):
2289 raise util.Abort(_("received changelog group is empty"))
2302 raise util.Abort(_("received changelog group is empty"))
2290 clend = len(cl)
2303 clend = len(cl)
2291 changesets = clend - clstart
2304 changesets = clend - clstart
2292 for c in xrange(clstart, clend):
2305 for c in xrange(clstart, clend):
2293 efiles.update(self[c].files())
2306 efiles.update(self[c].files())
2294 efiles = len(efiles)
2307 efiles = len(efiles)
2295 self.ui.progress(_('changesets'), None)
2308 self.ui.progress(_('changesets'), None)
2296
2309
2297 # pull off the manifest group
2310 # pull off the manifest group
2298 self.ui.status(_("adding manifests\n"))
2311 self.ui.status(_("adding manifests\n"))
2299 pr.step = _('manifests')
2312 pr.step = _('manifests')
2300 pr.count = 1
2313 pr.count = 1
2301 pr.total = changesets # manifests <= changesets
2314 pr.total = changesets # manifests <= changesets
2302 # no need to check for empty manifest group here:
2315 # no need to check for empty manifest group here:
2303 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2316 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2304 # no new manifest will be created and the manifest group will
2317 # no new manifest will be created and the manifest group will
2305 # be empty during the pull
2318 # be empty during the pull
2306 source.manifestheader()
2319 source.manifestheader()
2307 self.manifest.addgroup(source, revmap, trp)
2320 self.manifest.addgroup(source, revmap, trp)
2308 self.ui.progress(_('manifests'), None)
2321 self.ui.progress(_('manifests'), None)
2309
2322
2310 needfiles = {}
2323 needfiles = {}
2311 if self.ui.configbool('server', 'validate', default=False):
2324 if self.ui.configbool('server', 'validate', default=False):
2312 # validate incoming csets have their manifests
2325 # validate incoming csets have their manifests
2313 for cset in xrange(clstart, clend):
2326 for cset in xrange(clstart, clend):
2314 mfest = self.changelog.read(self.changelog.node(cset))[0]
2327 mfest = self.changelog.read(self.changelog.node(cset))[0]
2315 mfest = self.manifest.readdelta(mfest)
2328 mfest = self.manifest.readdelta(mfest)
2316 # store file nodes we must see
2329 # store file nodes we must see
2317 for f, n in mfest.iteritems():
2330 for f, n in mfest.iteritems():
2318 needfiles.setdefault(f, set()).add(n)
2331 needfiles.setdefault(f, set()).add(n)
2319
2332
2320 # process the files
2333 # process the files
2321 self.ui.status(_("adding file changes\n"))
2334 self.ui.status(_("adding file changes\n"))
2322 pr.step = _('files')
2335 pr.step = _('files')
2323 pr.count = 1
2336 pr.count = 1
2324 pr.total = efiles
2337 pr.total = efiles
2325 source.callback = None
2338 source.callback = None
2326
2339
2327 while True:
2340 while True:
2328 chunkdata = source.filelogheader()
2341 chunkdata = source.filelogheader()
2329 if not chunkdata:
2342 if not chunkdata:
2330 break
2343 break
2331 f = chunkdata["filename"]
2344 f = chunkdata["filename"]
2332 self.ui.debug("adding %s revisions\n" % f)
2345 self.ui.debug("adding %s revisions\n" % f)
2333 pr()
2346 pr()
2334 fl = self.file(f)
2347 fl = self.file(f)
2335 o = len(fl)
2348 o = len(fl)
2336 if not fl.addgroup(source, revmap, trp):
2349 if not fl.addgroup(source, revmap, trp):
2337 raise util.Abort(_("received file revlog group is empty"))
2350 raise util.Abort(_("received file revlog group is empty"))
2338 revisions += len(fl) - o
2351 revisions += len(fl) - o
2339 files += 1
2352 files += 1
2340 if f in needfiles:
2353 if f in needfiles:
2341 needs = needfiles[f]
2354 needs = needfiles[f]
2342 for new in xrange(o, len(fl)):
2355 for new in xrange(o, len(fl)):
2343 n = fl.node(new)
2356 n = fl.node(new)
2344 if n in needs:
2357 if n in needs:
2345 needs.remove(n)
2358 needs.remove(n)
2346 if not needs:
2359 if not needs:
2347 del needfiles[f]
2360 del needfiles[f]
2348 self.ui.progress(_('files'), None)
2361 self.ui.progress(_('files'), None)
2349
2362
2350 for f, needs in needfiles.iteritems():
2363 for f, needs in needfiles.iteritems():
2351 fl = self.file(f)
2364 fl = self.file(f)
2352 for n in needs:
2365 for n in needs:
2353 try:
2366 try:
2354 fl.rev(n)
2367 fl.rev(n)
2355 except error.LookupError:
2368 except error.LookupError:
2356 raise util.Abort(
2369 raise util.Abort(
2357 _('missing file data for %s:%s - run hg verify') %
2370 _('missing file data for %s:%s - run hg verify') %
2358 (f, hex(n)))
2371 (f, hex(n)))
2359
2372
2360 dh = 0
2373 dh = 0
2361 if oldheads:
2374 if oldheads:
2362 heads = cl.heads()
2375 heads = cl.heads()
2363 dh = len(heads) - len(oldheads)
2376 dh = len(heads) - len(oldheads)
2364 for h in heads:
2377 for h in heads:
2365 if h not in oldheads and self[h].closesbranch():
2378 if h not in oldheads and self[h].closesbranch():
2366 dh -= 1
2379 dh -= 1
2367 htext = ""
2380 htext = ""
2368 if dh:
2381 if dh:
2369 htext = _(" (%+d heads)") % dh
2382 htext = _(" (%+d heads)") % dh
2370
2383
2371 self.ui.status(_("added %d changesets"
2384 self.ui.status(_("added %d changesets"
2372 " with %d changes to %d files%s\n")
2385 " with %d changes to %d files%s\n")
2373 % (changesets, revisions, files, htext))
2386 % (changesets, revisions, files, htext))
2374
2387
2375 if changesets > 0:
2388 if changesets > 0:
2376 p = lambda: cl.writepending() and self.root or ""
2389 p = lambda: cl.writepending() and self.root or ""
2377 self.hook('pretxnchangegroup', throw=True,
2390 self.hook('pretxnchangegroup', throw=True,
2378 node=hex(cl.node(clstart)), source=srctype,
2391 node=hex(cl.node(clstart)), source=srctype,
2379 url=url, pending=p)
2392 url=url, pending=p)
2380
2393
2381 added = [cl.node(r) for r in xrange(clstart, clend)]
2394 added = [cl.node(r) for r in xrange(clstart, clend)]
2382 publishing = self.ui.configbool('phases', 'publish', True)
2395 publishing = self.ui.configbool('phases', 'publish', True)
2383 if srctype == 'push':
2396 if srctype == 'push':
2384 # Old server can not push the boundary themself.
2397 # Old server can not push the boundary themself.
2385 # New server won't push the boundary if changeset already
2398 # New server won't push the boundary if changeset already
2386 # existed locally as secrete
2399 # existed locally as secrete
2387 #
2400 #
2388 # We should not use added here but the list of all change in
2401 # We should not use added here but the list of all change in
2389 # the bundle
2402 # the bundle
2390 if publishing:
2403 if publishing:
2391 phases.advanceboundary(self, phases.public, srccontent)
2404 phases.advanceboundary(self, phases.public, srccontent)
2392 else:
2405 else:
2393 phases.advanceboundary(self, phases.draft, srccontent)
2406 phases.advanceboundary(self, phases.draft, srccontent)
2394 phases.retractboundary(self, phases.draft, added)
2407 phases.retractboundary(self, phases.draft, added)
2395 elif srctype != 'strip':
2408 elif srctype != 'strip':
2396 # publishing only alter behavior during push
2409 # publishing only alter behavior during push
2397 #
2410 #
2398 # strip should not touch boundary at all
2411 # strip should not touch boundary at all
2399 phases.retractboundary(self, phases.draft, added)
2412 phases.retractboundary(self, phases.draft, added)
2400
2413
2401 # make changelog see real files again
2414 # make changelog see real files again
2402 cl.finalize(trp)
2415 cl.finalize(trp)
2403
2416
2404 tr.close()
2417 tr.close()
2405
2418
2406 if changesets > 0:
2419 if changesets > 0:
2407 def runhooks():
2420 def runhooks():
2408 # forcefully update the on-disk branch cache
2421 # forcefully update the on-disk branch cache
2409 self.ui.debug("updating the branch cache\n")
2422 self.ui.debug("updating the branch cache\n")
2410 self.updatebranchcache()
2423 self.updatebranchcache()
2411 self.hook("changegroup", node=hex(cl.node(clstart)),
2424 self.hook("changegroup", node=hex(cl.node(clstart)),
2412 source=srctype, url=url)
2425 source=srctype, url=url)
2413
2426
2414 for n in added:
2427 for n in added:
2415 self.hook("incoming", node=hex(n), source=srctype,
2428 self.hook("incoming", node=hex(n), source=srctype,
2416 url=url)
2429 url=url)
2417 self._afterlock(runhooks)
2430 self._afterlock(runhooks)
2418
2431
2419 finally:
2432 finally:
2420 tr.release()
2433 tr.release()
2421 # never return 0 here:
2434 # never return 0 here:
2422 if dh < 0:
2435 if dh < 0:
2423 return dh - 1
2436 return dh - 1
2424 else:
2437 else:
2425 return dh + 1
2438 return dh + 1
2426
2439
2427 def stream_in(self, remote, requirements):
2440 def stream_in(self, remote, requirements):
2428 lock = self.lock()
2441 lock = self.lock()
2429 try:
2442 try:
2430 fp = remote.stream_out()
2443 fp = remote.stream_out()
2431 l = fp.readline()
2444 l = fp.readline()
2432 try:
2445 try:
2433 resp = int(l)
2446 resp = int(l)
2434 except ValueError:
2447 except ValueError:
2435 raise error.ResponseError(
2448 raise error.ResponseError(
2436 _('unexpected response from remote server:'), l)
2449 _('unexpected response from remote server:'), l)
2437 if resp == 1:
2450 if resp == 1:
2438 raise util.Abort(_('operation forbidden by server'))
2451 raise util.Abort(_('operation forbidden by server'))
2439 elif resp == 2:
2452 elif resp == 2:
2440 raise util.Abort(_('locking the remote repository failed'))
2453 raise util.Abort(_('locking the remote repository failed'))
2441 elif resp != 0:
2454 elif resp != 0:
2442 raise util.Abort(_('the server sent an unknown error code'))
2455 raise util.Abort(_('the server sent an unknown error code'))
2443 self.ui.status(_('streaming all changes\n'))
2456 self.ui.status(_('streaming all changes\n'))
2444 l = fp.readline()
2457 l = fp.readline()
2445 try:
2458 try:
2446 total_files, total_bytes = map(int, l.split(' ', 1))
2459 total_files, total_bytes = map(int, l.split(' ', 1))
2447 except (ValueError, TypeError):
2460 except (ValueError, TypeError):
2448 raise error.ResponseError(
2461 raise error.ResponseError(
2449 _('unexpected response from remote server:'), l)
2462 _('unexpected response from remote server:'), l)
2450 self.ui.status(_('%d files to transfer, %s of data\n') %
2463 self.ui.status(_('%d files to transfer, %s of data\n') %
2451 (total_files, util.bytecount(total_bytes)))
2464 (total_files, util.bytecount(total_bytes)))
2452 handled_bytes = 0
2465 handled_bytes = 0
2453 self.ui.progress(_('clone'), 0, total=total_bytes)
2466 self.ui.progress(_('clone'), 0, total=total_bytes)
2454 start = time.time()
2467 start = time.time()
2455 for i in xrange(total_files):
2468 for i in xrange(total_files):
2456 # XXX doesn't support '\n' or '\r' in filenames
2469 # XXX doesn't support '\n' or '\r' in filenames
2457 l = fp.readline()
2470 l = fp.readline()
2458 try:
2471 try:
2459 name, size = l.split('\0', 1)
2472 name, size = l.split('\0', 1)
2460 size = int(size)
2473 size = int(size)
2461 except (ValueError, TypeError):
2474 except (ValueError, TypeError):
2462 raise error.ResponseError(
2475 raise error.ResponseError(
2463 _('unexpected response from remote server:'), l)
2476 _('unexpected response from remote server:'), l)
2464 if self.ui.debugflag:
2477 if self.ui.debugflag:
2465 self.ui.debug('adding %s (%s)\n' %
2478 self.ui.debug('adding %s (%s)\n' %
2466 (name, util.bytecount(size)))
2479 (name, util.bytecount(size)))
2467 # for backwards compat, name was partially encoded
2480 # for backwards compat, name was partially encoded
2468 ofp = self.sopener(store.decodedir(name), 'w')
2481 ofp = self.sopener(store.decodedir(name), 'w')
2469 for chunk in util.filechunkiter(fp, limit=size):
2482 for chunk in util.filechunkiter(fp, limit=size):
2470 handled_bytes += len(chunk)
2483 handled_bytes += len(chunk)
2471 self.ui.progress(_('clone'), handled_bytes,
2484 self.ui.progress(_('clone'), handled_bytes,
2472 total=total_bytes)
2485 total=total_bytes)
2473 ofp.write(chunk)
2486 ofp.write(chunk)
2474 ofp.close()
2487 ofp.close()
2475 elapsed = time.time() - start
2488 elapsed = time.time() - start
2476 if elapsed <= 0:
2489 if elapsed <= 0:
2477 elapsed = 0.001
2490 elapsed = 0.001
2478 self.ui.progress(_('clone'), None)
2491 self.ui.progress(_('clone'), None)
2479 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2492 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2480 (util.bytecount(total_bytes), elapsed,
2493 (util.bytecount(total_bytes), elapsed,
2481 util.bytecount(total_bytes / elapsed)))
2494 util.bytecount(total_bytes / elapsed)))
2482
2495
2483 # new requirements = old non-format requirements +
2496 # new requirements = old non-format requirements +
2484 # new format-related
2497 # new format-related
2485 # requirements from the streamed-in repository
2498 # requirements from the streamed-in repository
2486 requirements.update(set(self.requirements) - self.supportedformats)
2499 requirements.update(set(self.requirements) - self.supportedformats)
2487 self._applyrequirements(requirements)
2500 self._applyrequirements(requirements)
2488 self._writerequirements()
2501 self._writerequirements()
2489
2502
2490 self.invalidate()
2503 self.invalidate()
2491 return len(self.heads()) + 1
2504 return len(self.heads()) + 1
2492 finally:
2505 finally:
2493 lock.release()
2506 lock.release()
2494
2507
2495 def clone(self, remote, heads=[], stream=False):
2508 def clone(self, remote, heads=[], stream=False):
2496 '''clone remote repository.
2509 '''clone remote repository.
2497
2510
2498 keyword arguments:
2511 keyword arguments:
2499 heads: list of revs to clone (forces use of pull)
2512 heads: list of revs to clone (forces use of pull)
2500 stream: use streaming clone if possible'''
2513 stream: use streaming clone if possible'''
2501
2514
2502 # now, all clients that can request uncompressed clones can
2515 # now, all clients that can request uncompressed clones can
2503 # read repo formats supported by all servers that can serve
2516 # read repo formats supported by all servers that can serve
2504 # them.
2517 # them.
2505
2518
2506 # if revlog format changes, client will have to check version
2519 # if revlog format changes, client will have to check version
2507 # and format flags on "stream" capability, and use
2520 # and format flags on "stream" capability, and use
2508 # uncompressed only if compatible.
2521 # uncompressed only if compatible.
2509
2522
2510 if not stream:
2523 if not stream:
2511 # if the server explicitely prefer to stream (for fast LANs)
2524 # if the server explicitely prefer to stream (for fast LANs)
2512 stream = remote.capable('stream-preferred')
2525 stream = remote.capable('stream-preferred')
2513
2526
2514 if stream and not heads:
2527 if stream and not heads:
2515 # 'stream' means remote revlog format is revlogv1 only
2528 # 'stream' means remote revlog format is revlogv1 only
2516 if remote.capable('stream'):
2529 if remote.capable('stream'):
2517 return self.stream_in(remote, set(('revlogv1',)))
2530 return self.stream_in(remote, set(('revlogv1',)))
2518 # otherwise, 'streamreqs' contains the remote revlog format
2531 # otherwise, 'streamreqs' contains the remote revlog format
2519 streamreqs = remote.capable('streamreqs')
2532 streamreqs = remote.capable('streamreqs')
2520 if streamreqs:
2533 if streamreqs:
2521 streamreqs = set(streamreqs.split(','))
2534 streamreqs = set(streamreqs.split(','))
2522 # if we support it, stream in and adjust our requirements
2535 # if we support it, stream in and adjust our requirements
2523 if not streamreqs - self.supportedformats:
2536 if not streamreqs - self.supportedformats:
2524 return self.stream_in(remote, streamreqs)
2537 return self.stream_in(remote, streamreqs)
2525 return self.pull(remote, heads)
2538 return self.pull(remote, heads)
2526
2539
2527 def pushkey(self, namespace, key, old, new):
2540 def pushkey(self, namespace, key, old, new):
2528 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2541 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2529 old=old, new=new)
2542 old=old, new=new)
2530 ret = pushkey.push(self, namespace, key, old, new)
2543 ret = pushkey.push(self, namespace, key, old, new)
2531 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2544 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2532 ret=ret)
2545 ret=ret)
2533 return ret
2546 return ret
2534
2547
2535 def listkeys(self, namespace):
2548 def listkeys(self, namespace):
2536 self.hook('prelistkeys', throw=True, namespace=namespace)
2549 self.hook('prelistkeys', throw=True, namespace=namespace)
2537 values = pushkey.list(self, namespace)
2550 values = pushkey.list(self, namespace)
2538 self.hook('listkeys', namespace=namespace, values=values)
2551 self.hook('listkeys', namespace=namespace, values=values)
2539 return values
2552 return values
2540
2553
2541 def debugwireargs(self, one, two, three=None, four=None, five=None):
2554 def debugwireargs(self, one, two, three=None, four=None, five=None):
2542 '''used to test argument passing over the wire'''
2555 '''used to test argument passing over the wire'''
2543 return "%s %s %s %s %s" % (one, two, three, four, five)
2556 return "%s %s %s %s %s" % (one, two, three, four, five)
2544
2557
2545 def savecommitmessage(self, text):
2558 def savecommitmessage(self, text):
2546 fp = self.opener('last-message.txt', 'wb')
2559 fp = self.opener('last-message.txt', 'wb')
2547 try:
2560 try:
2548 fp.write(text)
2561 fp.write(text)
2549 finally:
2562 finally:
2550 fp.close()
2563 fp.close()
2551 return self.pathto(fp.name[len(self.root)+1:])
2564 return self.pathto(fp.name[len(self.root)+1:])
2552
2565
2553 # used to avoid circular references so destructors work
2566 # used to avoid circular references so destructors work
2554 def aftertrans(files):
2567 def aftertrans(files):
2555 renamefiles = [tuple(t) for t in files]
2568 renamefiles = [tuple(t) for t in files]
2556 def a():
2569 def a():
2557 for src, dest in renamefiles:
2570 for src, dest in renamefiles:
2558 try:
2571 try:
2559 util.rename(src, dest)
2572 util.rename(src, dest)
2560 except OSError: # journal file does not yet exist
2573 except OSError: # journal file does not yet exist
2561 pass
2574 pass
2562 return a
2575 return a
2563
2576
2564 def undoname(fn):
2577 def undoname(fn):
2565 base, name = os.path.split(fn)
2578 base, name = os.path.split(fn)
2566 assert name.startswith('journal')
2579 assert name.startswith('journal')
2567 return os.path.join(base, name.replace('journal', 'undo', 1))
2580 return os.path.join(base, name.replace('journal', 'undo', 1))
2568
2581
2569 def instance(ui, path, create):
2582 def instance(ui, path, create):
2570 return localrepository(ui, util.urllocalpath(path), create)
2583 return localrepository(ui, util.urllocalpath(path), create)
2571
2584
2572 def islocal(path):
2585 def islocal(path):
2573 return True
2586 return True
@@ -1,388 +1,359 b''
1 $ cat >> $HGRCPATH << EOF
1 $ cat >> $HGRCPATH << EOF
2 > [extensions]
2 > [extensions]
3 > graphlog=
3 > graphlog=
4 > [phases]
4 > [phases]
5 > # public changeset are not obsolete
5 > # public changeset are not obsolete
6 > publish=false
6 > publish=false
7 > EOF
7 > EOF
8 $ mkcommit() {
8 $ mkcommit() {
9 > echo "$1" > "$1"
9 > echo "$1" > "$1"
10 > hg add "$1"
10 > hg add "$1"
11 > hg ci -m "add $1"
11 > hg ci -m "add $1"
12 > }
12 > }
13 $ getid() {
13 $ getid() {
14 > hg id --debug -ir "desc('$1')"
14 > hg id --debug -ir "desc('$1')"
15 > }
15 > }
16
16
17
17
18 $ hg init tmpa
18 $ hg init tmpa
19 $ cd tmpa
19 $ cd tmpa
20
20
21 Killing a single changeset without replacement
21 Killing a single changeset without replacement
22
22
23 $ mkcommit kill_me
23 $ mkcommit kill_me
24 $ hg debugobsolete -d '0 0' `getid kill_me` -u babar
24 $ hg debugobsolete -d '0 0' `getid kill_me` -u babar
25 $ hg debugobsolete
25 $ hg debugobsolete
26 97b7c2d76b1845ed3eb988cd612611e72406cef0 0 {'date': '0 0', 'user': 'babar'}
26 97b7c2d76b1845ed3eb988cd612611e72406cef0 0 {'date': '0 0', 'user': 'babar'}
27 $ cd ..
27 $ cd ..
28
28
29 Killing a single changeset with replacement
29 Killing a single changeset with replacement
30
30
31 $ hg init tmpb
31 $ hg init tmpb
32 $ cd tmpb
32 $ cd tmpb
33 $ mkcommit a
33 $ mkcommit a
34 $ mkcommit b
34 $ mkcommit b
35 $ mkcommit original_c
35 $ mkcommit original_c
36 $ hg up "desc('b')"
36 $ hg up "desc('b')"
37 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
37 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
38 $ mkcommit new_c
38 $ mkcommit new_c
39 created new head
39 created new head
40 $ hg debugobsolete `getid original_c` `getid new_c` -d '56 12'
40 $ hg debugobsolete `getid original_c` `getid new_c` -d '56 12'
41 $ hg debugobsolete
41 $ hg debugobsolete
42 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
42 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
43
43
44 do it again (it read the obsstore before adding new changeset)
44 do it again (it read the obsstore before adding new changeset)
45
45
46 $ hg up '.^'
46 $ hg up '.^'
47 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
47 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
48 $ mkcommit new_2_c
48 $ mkcommit new_2_c
49 created new head
49 created new head
50 $ hg debugobsolete -d '1337 0' `getid new_c` `getid new_2_c`
50 $ hg debugobsolete -d '1337 0' `getid new_c` `getid new_2_c`
51 $ hg debugobsolete
51 $ hg debugobsolete
52 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
52 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
53 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
53 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
54
54
55 Register two markers with a missing node
55 Register two markers with a missing node
56
56
57 $ hg up '.^'
57 $ hg up '.^'
58 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
58 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
59 $ mkcommit new_3_c
59 $ mkcommit new_3_c
60 created new head
60 created new head
61 $ hg debugobsolete -d '1338 0' `getid new_2_c` 1337133713371337133713371337133713371337
61 $ hg debugobsolete -d '1338 0' `getid new_2_c` 1337133713371337133713371337133713371337
62 $ hg debugobsolete -d '1339 0' 1337133713371337133713371337133713371337 `getid new_3_c`
62 $ hg debugobsolete -d '1339 0' 1337133713371337133713371337133713371337 `getid new_3_c`
63 $ hg debugobsolete
63 $ hg debugobsolete
64 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
64 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
65 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
65 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
66 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
66 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
67 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
67 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
68
68
69 Check that graphlog detect that a changeset is obsolete:
69 Check that graphlog detect that a changeset is obsolete:
70
70
71 $ hg glog
71 $ hg glog
72 @ changeset: 5:5601fb93a350
72 @ changeset: 5:5601fb93a350
73 | tag: tip
73 | tag: tip
74 | parent: 1:7c3bad9141dc
74 | parent: 1:7c3bad9141dc
75 | user: test
75 | user: test
76 | date: Thu Jan 01 00:00:00 1970 +0000
76 | date: Thu Jan 01 00:00:00 1970 +0000
77 | summary: add new_3_c
77 | summary: add new_3_c
78 |
78 |
79 | x changeset: 4:ca819180edb9
80 |/ parent: 1:7c3bad9141dc
81 | user: test
82 | date: Thu Jan 01 00:00:00 1970 +0000
83 | summary: add new_2_c
84 |
85 | x changeset: 3:cdbce2fbb163
86 |/ parent: 1:7c3bad9141dc
87 | user: test
88 | date: Thu Jan 01 00:00:00 1970 +0000
89 | summary: add new_c
90 |
91 | x changeset: 2:245bde4270cd
92 |/ user: test
93 | date: Thu Jan 01 00:00:00 1970 +0000
94 | summary: add original_c
95 |
96 o changeset: 1:7c3bad9141dc
79 o changeset: 1:7c3bad9141dc
97 | user: test
80 | user: test
98 | date: Thu Jan 01 00:00:00 1970 +0000
81 | date: Thu Jan 01 00:00:00 1970 +0000
99 | summary: add b
82 | summary: add b
100 |
83 |
101 o changeset: 0:1f0dee641bb7
84 o changeset: 0:1f0dee641bb7
102 user: test
85 user: test
103 date: Thu Jan 01 00:00:00 1970 +0000
86 date: Thu Jan 01 00:00:00 1970 +0000
104 summary: add a
87 summary: add a
105
88
106
89
107 Check that public changeset are not accounted as obsolete:
90 Check that public changeset are not accounted as obsolete:
108
91
109 $ hg phase --public 2
92 $ hg phase --public 2
110 $ hg --config 'extensions.graphlog=' glog
93 $ hg --config 'extensions.graphlog=' glog
111 @ changeset: 5:5601fb93a350
94 @ changeset: 5:5601fb93a350
112 | tag: tip
95 | tag: tip
113 | parent: 1:7c3bad9141dc
96 | parent: 1:7c3bad9141dc
114 | user: test
97 | user: test
115 | date: Thu Jan 01 00:00:00 1970 +0000
98 | date: Thu Jan 01 00:00:00 1970 +0000
116 | summary: add new_3_c
99 | summary: add new_3_c
117 |
100 |
118 | x changeset: 4:ca819180edb9
119 |/ parent: 1:7c3bad9141dc
120 | user: test
121 | date: Thu Jan 01 00:00:00 1970 +0000
122 | summary: add new_2_c
123 |
124 | x changeset: 3:cdbce2fbb163
125 |/ parent: 1:7c3bad9141dc
126 | user: test
127 | date: Thu Jan 01 00:00:00 1970 +0000
128 | summary: add new_c
129 |
130 | o changeset: 2:245bde4270cd
101 | o changeset: 2:245bde4270cd
131 |/ user: test
102 |/ user: test
132 | date: Thu Jan 01 00:00:00 1970 +0000
103 | date: Thu Jan 01 00:00:00 1970 +0000
133 | summary: add original_c
104 | summary: add original_c
134 |
105 |
135 o changeset: 1:7c3bad9141dc
106 o changeset: 1:7c3bad9141dc
136 | user: test
107 | user: test
137 | date: Thu Jan 01 00:00:00 1970 +0000
108 | date: Thu Jan 01 00:00:00 1970 +0000
138 | summary: add b
109 | summary: add b
139 |
110 |
140 o changeset: 0:1f0dee641bb7
111 o changeset: 0:1f0dee641bb7
141 user: test
112 user: test
142 date: Thu Jan 01 00:00:00 1970 +0000
113 date: Thu Jan 01 00:00:00 1970 +0000
143 summary: add a
114 summary: add a
144
115
145
116
146 $ cd ..
117 $ cd ..
147
118
148 Exchange Test
119 Exchange Test
149 ============================
120 ============================
150
121
151 Destination repo does not have any data
122 Destination repo does not have any data
152 ---------------------------------------
123 ---------------------------------------
153
124
154 Try to pull markers
125 Try to pull markers
155 (extinct changeset are excluded but marker are pushed)
126 (extinct changeset are excluded but marker are pushed)
156
127
157 $ hg init tmpc
128 $ hg init tmpc
158 $ cd tmpc
129 $ cd tmpc
159 $ hg pull ../tmpb
130 $ hg pull ../tmpb
160 pulling from ../tmpb
131 pulling from ../tmpb
161 requesting all changes
132 requesting all changes
162 adding changesets
133 adding changesets
163 adding manifests
134 adding manifests
164 adding file changes
135 adding file changes
165 added 4 changesets with 4 changes to 4 files (+1 heads)
136 added 4 changesets with 4 changes to 4 files (+1 heads)
166 (run 'hg heads' to see heads, 'hg merge' to merge)
137 (run 'hg heads' to see heads, 'hg merge' to merge)
167 $ hg debugobsolete
138 $ hg debugobsolete
168 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
139 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
169 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
140 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
170 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
141 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
171 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
142 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
172
143
173 Rollback//Transaction support
144 Rollback//Transaction support
174
145
175 $ hg debugobsolete -d '1340 0' aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
146 $ hg debugobsolete -d '1340 0' aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
176 $ hg debugobsolete
147 $ hg debugobsolete
177 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
148 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
178 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
149 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
179 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
150 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
180 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
151 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
181 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb 0 {'date': '1340 0', 'user': 'test'}
152 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb 0 {'date': '1340 0', 'user': 'test'}
182 $ hg rollback -n
153 $ hg rollback -n
183 repository tip rolled back to revision 3 (undo debugobsolete)
154 repository tip rolled back to revision 3 (undo debugobsolete)
184 $ hg rollback
155 $ hg rollback
185 repository tip rolled back to revision 3 (undo debugobsolete)
156 repository tip rolled back to revision 3 (undo debugobsolete)
186 $ hg debugobsolete
157 $ hg debugobsolete
187 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
158 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
188 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
159 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
189 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
160 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
190 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
161 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
191
162
192 $ cd ..
163 $ cd ..
193
164
194 Try to pull markers
165 Try to pull markers
195
166
196 $ hg init tmpd
167 $ hg init tmpd
197 $ hg -R tmpb push tmpd
168 $ hg -R tmpb push tmpd
198 pushing to tmpd
169 pushing to tmpd
199 searching for changes
170 searching for changes
200 adding changesets
171 adding changesets
201 adding manifests
172 adding manifests
202 adding file changes
173 adding file changes
203 added 4 changesets with 4 changes to 4 files (+1 heads)
174 added 4 changesets with 4 changes to 4 files (+1 heads)
204 $ hg -R tmpd debugobsolete
175 $ hg -R tmpd debugobsolete
205 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
176 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
206 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
177 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
207 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
178 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
208 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
179 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
209
180
210
181
211 Destination repo have existing data
182 Destination repo have existing data
212 ---------------------------------------
183 ---------------------------------------
213
184
214 On pull
185 On pull
215
186
216 $ hg init tmpe
187 $ hg init tmpe
217 $ cd tmpe
188 $ cd tmpe
218 $ hg debugobsolete -d '1339 0' 2448244824482448244824482448244824482448 1339133913391339133913391339133913391339
189 $ hg debugobsolete -d '1339 0' 2448244824482448244824482448244824482448 1339133913391339133913391339133913391339
219 $ hg pull ../tmpb
190 $ hg pull ../tmpb
220 pulling from ../tmpb
191 pulling from ../tmpb
221 requesting all changes
192 requesting all changes
222 adding changesets
193 adding changesets
223 adding manifests
194 adding manifests
224 adding file changes
195 adding file changes
225 added 4 changesets with 4 changes to 4 files (+1 heads)
196 added 4 changesets with 4 changes to 4 files (+1 heads)
226 (run 'hg heads' to see heads, 'hg merge' to merge)
197 (run 'hg heads' to see heads, 'hg merge' to merge)
227 $ hg debugobsolete
198 $ hg debugobsolete
228 2448244824482448244824482448244824482448 1339133913391339133913391339133913391339 0 {'date': '1339 0', 'user': 'test'}
199 2448244824482448244824482448244824482448 1339133913391339133913391339133913391339 0 {'date': '1339 0', 'user': 'test'}
229 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
200 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
230 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
201 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
231 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
202 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
232 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
203 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
233
204
234
205
235 On push
206 On push
236
207
237 $ hg push ../tmpc
208 $ hg push ../tmpc
238 pushing to ../tmpc
209 pushing to ../tmpc
239 searching for changes
210 searching for changes
240 no changes found
211 no changes found
241 [1]
212 [1]
242 $ hg -R ../tmpc debugobsolete
213 $ hg -R ../tmpc debugobsolete
243 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
214 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
244 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
215 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
245 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
216 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
246 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
217 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
247 2448244824482448244824482448244824482448 1339133913391339133913391339133913391339 0 {'date': '1339 0', 'user': 'test'}
218 2448244824482448244824482448244824482448 1339133913391339133913391339133913391339 0 {'date': '1339 0', 'user': 'test'}
248
219
249 detect outgoing obsolete and unstable
220 detect outgoing obsolete and unstable
250 ---------------------------------------
221 ---------------------------------------
251
222
252
223
253 $ hg glog
224 $ hg glog
254 o changeset: 3:5601fb93a350
225 o changeset: 3:5601fb93a350
255 | tag: tip
226 | tag: tip
256 | parent: 1:7c3bad9141dc
227 | parent: 1:7c3bad9141dc
257 | user: test
228 | user: test
258 | date: Thu Jan 01 00:00:00 1970 +0000
229 | date: Thu Jan 01 00:00:00 1970 +0000
259 | summary: add new_3_c
230 | summary: add new_3_c
260 |
231 |
261 | o changeset: 2:245bde4270cd
232 | o changeset: 2:245bde4270cd
262 |/ user: test
233 |/ user: test
263 | date: Thu Jan 01 00:00:00 1970 +0000
234 | date: Thu Jan 01 00:00:00 1970 +0000
264 | summary: add original_c
235 | summary: add original_c
265 |
236 |
266 o changeset: 1:7c3bad9141dc
237 o changeset: 1:7c3bad9141dc
267 | user: test
238 | user: test
268 | date: Thu Jan 01 00:00:00 1970 +0000
239 | date: Thu Jan 01 00:00:00 1970 +0000
269 | summary: add b
240 | summary: add b
270 |
241 |
271 o changeset: 0:1f0dee641bb7
242 o changeset: 0:1f0dee641bb7
272 user: test
243 user: test
273 date: Thu Jan 01 00:00:00 1970 +0000
244 date: Thu Jan 01 00:00:00 1970 +0000
274 summary: add a
245 summary: add a
275
246
276 $ hg up 'desc("new_3_c")'
247 $ hg up 'desc("new_3_c")'
277 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
248 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
278 $ mkcommit original_d
249 $ mkcommit original_d
279 $ mkcommit original_e
250 $ mkcommit original_e
280 $ hg debugobsolete `getid original_d` -d '0 0'
251 $ hg debugobsolete `getid original_d` -d '0 0'
281 $ hg log -r 'obsolete()'
252 $ hg log -r 'obsolete()'
282 changeset: 4:7c694bff0650
253 changeset: 4:7c694bff0650
283 user: test
254 user: test
284 date: Thu Jan 01 00:00:00 1970 +0000
255 date: Thu Jan 01 00:00:00 1970 +0000
285 summary: add original_d
256 summary: add original_d
286
257
287 $ hg glog -r '::unstable()'
258 $ hg glog -r '::unstable()'
288 @ changeset: 5:6e572121998e
259 @ changeset: 5:6e572121998e
289 | tag: tip
260 | tag: tip
290 | user: test
261 | user: test
291 | date: Thu Jan 01 00:00:00 1970 +0000
262 | date: Thu Jan 01 00:00:00 1970 +0000
292 | summary: add original_e
263 | summary: add original_e
293 |
264 |
294 x changeset: 4:7c694bff0650
265 x changeset: 4:7c694bff0650
295 | user: test
266 | user: test
296 | date: Thu Jan 01 00:00:00 1970 +0000
267 | date: Thu Jan 01 00:00:00 1970 +0000
297 | summary: add original_d
268 | summary: add original_d
298 |
269 |
299 o changeset: 3:5601fb93a350
270 o changeset: 3:5601fb93a350
300 | parent: 1:7c3bad9141dc
271 | parent: 1:7c3bad9141dc
301 | user: test
272 | user: test
302 | date: Thu Jan 01 00:00:00 1970 +0000
273 | date: Thu Jan 01 00:00:00 1970 +0000
303 | summary: add new_3_c
274 | summary: add new_3_c
304 |
275 |
305 o changeset: 1:7c3bad9141dc
276 o changeset: 1:7c3bad9141dc
306 | user: test
277 | user: test
307 | date: Thu Jan 01 00:00:00 1970 +0000
278 | date: Thu Jan 01 00:00:00 1970 +0000
308 | summary: add b
279 | summary: add b
309 |
280 |
310 o changeset: 0:1f0dee641bb7
281 o changeset: 0:1f0dee641bb7
311 user: test
282 user: test
312 date: Thu Jan 01 00:00:00 1970 +0000
283 date: Thu Jan 01 00:00:00 1970 +0000
313 summary: add a
284 summary: add a
314
285
315
286
316 refuse to push obsolete changeset
287 refuse to push obsolete changeset
317
288
318 $ hg push ../tmpc/ -r 'desc("original_d")'
289 $ hg push ../tmpc/ -r 'desc("original_d")'
319 pushing to ../tmpc/
290 pushing to ../tmpc/
320 searching for changes
291 searching for changes
321 abort: push includes an obsolete changeset: 7c694bff0650!
292 abort: push includes an obsolete changeset: 7c694bff0650!
322 [255]
293 [255]
323
294
324 refuse to push unstable changeset
295 refuse to push unstable changeset
325
296
326 $ hg push ../tmpc/
297 $ hg push ../tmpc/
327 pushing to ../tmpc/
298 pushing to ../tmpc/
328 searching for changes
299 searching for changes
329 abort: push includes an unstable changeset: 6e572121998e!
300 abort: push includes an unstable changeset: 6e572121998e!
330 [255]
301 [255]
331
302
332 Test that extinct changeset are properly detected
303 Test that extinct changeset are properly detected
333
304
334 $ hg log -r 'extinct()'
305 $ hg log -r 'extinct()'
335
306
336 Don't try to push extinct changeset
307 Don't try to push extinct changeset
337
308
338 $ hg init ../tmpf
309 $ hg init ../tmpf
339 $ hg out ../tmpf
310 $ hg out ../tmpf
340 comparing with ../tmpf
311 comparing with ../tmpf
341 searching for changes
312 searching for changes
342 changeset: 0:1f0dee641bb7
313 changeset: 0:1f0dee641bb7
343 user: test
314 user: test
344 date: Thu Jan 01 00:00:00 1970 +0000
315 date: Thu Jan 01 00:00:00 1970 +0000
345 summary: add a
316 summary: add a
346
317
347 changeset: 1:7c3bad9141dc
318 changeset: 1:7c3bad9141dc
348 user: test
319 user: test
349 date: Thu Jan 01 00:00:00 1970 +0000
320 date: Thu Jan 01 00:00:00 1970 +0000
350 summary: add b
321 summary: add b
351
322
352 changeset: 2:245bde4270cd
323 changeset: 2:245bde4270cd
353 user: test
324 user: test
354 date: Thu Jan 01 00:00:00 1970 +0000
325 date: Thu Jan 01 00:00:00 1970 +0000
355 summary: add original_c
326 summary: add original_c
356
327
357 changeset: 3:5601fb93a350
328 changeset: 3:5601fb93a350
358 parent: 1:7c3bad9141dc
329 parent: 1:7c3bad9141dc
359 user: test
330 user: test
360 date: Thu Jan 01 00:00:00 1970 +0000
331 date: Thu Jan 01 00:00:00 1970 +0000
361 summary: add new_3_c
332 summary: add new_3_c
362
333
363 changeset: 4:7c694bff0650
334 changeset: 4:7c694bff0650
364 user: test
335 user: test
365 date: Thu Jan 01 00:00:00 1970 +0000
336 date: Thu Jan 01 00:00:00 1970 +0000
366 summary: add original_d
337 summary: add original_d
367
338
368 changeset: 5:6e572121998e
339 changeset: 5:6e572121998e
369 tag: tip
340 tag: tip
370 user: test
341 user: test
371 date: Thu Jan 01 00:00:00 1970 +0000
342 date: Thu Jan 01 00:00:00 1970 +0000
372 summary: add original_e
343 summary: add original_e
373
344
374 $ hg push ../tmpf -f # -f because be push unstable too
345 $ hg push ../tmpf -f # -f because be push unstable too
375 pushing to ../tmpf
346 pushing to ../tmpf
376 searching for changes
347 searching for changes
377 adding changesets
348 adding changesets
378 adding manifests
349 adding manifests
379 adding file changes
350 adding file changes
380 added 6 changesets with 6 changes to 6 files (+1 heads)
351 added 6 changesets with 6 changes to 6 files (+1 heads)
381
352
382 no warning displayed
353 no warning displayed
383
354
384 $ hg push ../tmpf
355 $ hg push ../tmpf
385 pushing to ../tmpf
356 pushing to ../tmpf
386 searching for changes
357 searching for changes
387 no changes found
358 no changes found
388 [1]
359 [1]
General Comments 0
You need to be logged in to leave comments. Login now