##// END OF EJS Templates
phases: simplify phase exchange and movement over pushkey...
Pierre-Yves David -
r15892:592b3d17 default
parent child Browse files
Show More
@@ -1,2288 +1,2248 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import scmutil, util, extensions, hook, error, revset
13 import scmutil, util, extensions, hook, error, revset
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20 filecache = scmutil.filecache
20 filecache = scmutil.filecache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 'known', 'getbundle'))
24 'known', 'getbundle'))
25 supportedformats = set(('revlogv1', 'generaldelta'))
25 supportedformats = set(('revlogv1', 'generaldelta'))
26 supported = supportedformats | set(('store', 'fncache', 'shared',
26 supported = supportedformats | set(('store', 'fncache', 'shared',
27 'dotencode'))
27 'dotencode'))
28
28
29 def __init__(self, baseui, path=None, create=False):
29 def __init__(self, baseui, path=None, create=False):
30 repo.repository.__init__(self)
30 repo.repository.__init__(self)
31 self.root = os.path.realpath(util.expandpath(path))
31 self.root = os.path.realpath(util.expandpath(path))
32 self.path = os.path.join(self.root, ".hg")
32 self.path = os.path.join(self.root, ".hg")
33 self.origroot = path
33 self.origroot = path
34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
35 self.opener = scmutil.opener(self.path)
35 self.opener = scmutil.opener(self.path)
36 self.wopener = scmutil.opener(self.root)
36 self.wopener = scmutil.opener(self.root)
37 self.baseui = baseui
37 self.baseui = baseui
38 self.ui = baseui.copy()
38 self.ui = baseui.copy()
39 self._dirtyphases = False
39 self._dirtyphases = False
40
40
41 try:
41 try:
42 self.ui.readconfig(self.join("hgrc"), self.root)
42 self.ui.readconfig(self.join("hgrc"), self.root)
43 extensions.loadall(self.ui)
43 extensions.loadall(self.ui)
44 except IOError:
44 except IOError:
45 pass
45 pass
46
46
47 if not os.path.isdir(self.path):
47 if not os.path.isdir(self.path):
48 if create:
48 if create:
49 if not os.path.exists(path):
49 if not os.path.exists(path):
50 util.makedirs(path)
50 util.makedirs(path)
51 util.makedir(self.path, notindexed=True)
51 util.makedir(self.path, notindexed=True)
52 requirements = ["revlogv1"]
52 requirements = ["revlogv1"]
53 if self.ui.configbool('format', 'usestore', True):
53 if self.ui.configbool('format', 'usestore', True):
54 os.mkdir(os.path.join(self.path, "store"))
54 os.mkdir(os.path.join(self.path, "store"))
55 requirements.append("store")
55 requirements.append("store")
56 if self.ui.configbool('format', 'usefncache', True):
56 if self.ui.configbool('format', 'usefncache', True):
57 requirements.append("fncache")
57 requirements.append("fncache")
58 if self.ui.configbool('format', 'dotencode', True):
58 if self.ui.configbool('format', 'dotencode', True):
59 requirements.append('dotencode')
59 requirements.append('dotencode')
60 # create an invalid changelog
60 # create an invalid changelog
61 self.opener.append(
61 self.opener.append(
62 "00changelog.i",
62 "00changelog.i",
63 '\0\0\0\2' # represents revlogv2
63 '\0\0\0\2' # represents revlogv2
64 ' dummy changelog to prevent using the old repo layout'
64 ' dummy changelog to prevent using the old repo layout'
65 )
65 )
66 if self.ui.configbool('format', 'generaldelta', False):
66 if self.ui.configbool('format', 'generaldelta', False):
67 requirements.append("generaldelta")
67 requirements.append("generaldelta")
68 requirements = set(requirements)
68 requirements = set(requirements)
69 else:
69 else:
70 raise error.RepoError(_("repository %s not found") % path)
70 raise error.RepoError(_("repository %s not found") % path)
71 elif create:
71 elif create:
72 raise error.RepoError(_("repository %s already exists") % path)
72 raise error.RepoError(_("repository %s already exists") % path)
73 else:
73 else:
74 try:
74 try:
75 requirements = scmutil.readrequires(self.opener, self.supported)
75 requirements = scmutil.readrequires(self.opener, self.supported)
76 except IOError, inst:
76 except IOError, inst:
77 if inst.errno != errno.ENOENT:
77 if inst.errno != errno.ENOENT:
78 raise
78 raise
79 requirements = set()
79 requirements = set()
80
80
81 self.sharedpath = self.path
81 self.sharedpath = self.path
82 try:
82 try:
83 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
83 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
84 if not os.path.exists(s):
84 if not os.path.exists(s):
85 raise error.RepoError(
85 raise error.RepoError(
86 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 _('.hg/sharedpath points to nonexistent directory %s') % s)
87 self.sharedpath = s
87 self.sharedpath = s
88 except IOError, inst:
88 except IOError, inst:
89 if inst.errno != errno.ENOENT:
89 if inst.errno != errno.ENOENT:
90 raise
90 raise
91
91
92 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
92 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
93 self.spath = self.store.path
93 self.spath = self.store.path
94 self.sopener = self.store.opener
94 self.sopener = self.store.opener
95 self.sjoin = self.store.join
95 self.sjoin = self.store.join
96 self.opener.createmode = self.store.createmode
96 self.opener.createmode = self.store.createmode
97 self._applyrequirements(requirements)
97 self._applyrequirements(requirements)
98 if create:
98 if create:
99 self._writerequirements()
99 self._writerequirements()
100
100
101
101
102 self._branchcache = None
102 self._branchcache = None
103 self._branchcachetip = None
103 self._branchcachetip = None
104 self.filterpats = {}
104 self.filterpats = {}
105 self._datafilters = {}
105 self._datafilters = {}
106 self._transref = self._lockref = self._wlockref = None
106 self._transref = self._lockref = self._wlockref = None
107
107
108 # A cache for various files under .hg/ that tracks file changes,
108 # A cache for various files under .hg/ that tracks file changes,
109 # (used by the filecache decorator)
109 # (used by the filecache decorator)
110 #
110 #
111 # Maps a property name to its util.filecacheentry
111 # Maps a property name to its util.filecacheentry
112 self._filecache = {}
112 self._filecache = {}
113
113
114 def _applyrequirements(self, requirements):
114 def _applyrequirements(self, requirements):
115 self.requirements = requirements
115 self.requirements = requirements
116 openerreqs = set(('revlogv1', 'generaldelta'))
116 openerreqs = set(('revlogv1', 'generaldelta'))
117 self.sopener.options = dict((r, 1) for r in requirements
117 self.sopener.options = dict((r, 1) for r in requirements
118 if r in openerreqs)
118 if r in openerreqs)
119
119
120 def _writerequirements(self):
120 def _writerequirements(self):
121 reqfile = self.opener("requires", "w")
121 reqfile = self.opener("requires", "w")
122 for r in self.requirements:
122 for r in self.requirements:
123 reqfile.write("%s\n" % r)
123 reqfile.write("%s\n" % r)
124 reqfile.close()
124 reqfile.close()
125
125
126 def _checknested(self, path):
126 def _checknested(self, path):
127 """Determine if path is a legal nested repository."""
127 """Determine if path is a legal nested repository."""
128 if not path.startswith(self.root):
128 if not path.startswith(self.root):
129 return False
129 return False
130 subpath = path[len(self.root) + 1:]
130 subpath = path[len(self.root) + 1:]
131 normsubpath = util.pconvert(subpath)
131 normsubpath = util.pconvert(subpath)
132
132
133 # XXX: Checking against the current working copy is wrong in
133 # XXX: Checking against the current working copy is wrong in
134 # the sense that it can reject things like
134 # the sense that it can reject things like
135 #
135 #
136 # $ hg cat -r 10 sub/x.txt
136 # $ hg cat -r 10 sub/x.txt
137 #
137 #
138 # if sub/ is no longer a subrepository in the working copy
138 # if sub/ is no longer a subrepository in the working copy
139 # parent revision.
139 # parent revision.
140 #
140 #
141 # However, it can of course also allow things that would have
141 # However, it can of course also allow things that would have
142 # been rejected before, such as the above cat command if sub/
142 # been rejected before, such as the above cat command if sub/
143 # is a subrepository now, but was a normal directory before.
143 # is a subrepository now, but was a normal directory before.
144 # The old path auditor would have rejected by mistake since it
144 # The old path auditor would have rejected by mistake since it
145 # panics when it sees sub/.hg/.
145 # panics when it sees sub/.hg/.
146 #
146 #
147 # All in all, checking against the working copy seems sensible
147 # All in all, checking against the working copy seems sensible
148 # since we want to prevent access to nested repositories on
148 # since we want to prevent access to nested repositories on
149 # the filesystem *now*.
149 # the filesystem *now*.
150 ctx = self[None]
150 ctx = self[None]
151 parts = util.splitpath(subpath)
151 parts = util.splitpath(subpath)
152 while parts:
152 while parts:
153 prefix = '/'.join(parts)
153 prefix = '/'.join(parts)
154 if prefix in ctx.substate:
154 if prefix in ctx.substate:
155 if prefix == normsubpath:
155 if prefix == normsubpath:
156 return True
156 return True
157 else:
157 else:
158 sub = ctx.sub(prefix)
158 sub = ctx.sub(prefix)
159 return sub.checknested(subpath[len(prefix) + 1:])
159 return sub.checknested(subpath[len(prefix) + 1:])
160 else:
160 else:
161 parts.pop()
161 parts.pop()
162 return False
162 return False
163
163
164 @filecache('bookmarks')
164 @filecache('bookmarks')
165 def _bookmarks(self):
165 def _bookmarks(self):
166 return bookmarks.read(self)
166 return bookmarks.read(self)
167
167
168 @filecache('bookmarks.current')
168 @filecache('bookmarks.current')
169 def _bookmarkcurrent(self):
169 def _bookmarkcurrent(self):
170 return bookmarks.readcurrent(self)
170 return bookmarks.readcurrent(self)
171
171
172 def _writebookmarks(self, marks):
172 def _writebookmarks(self, marks):
173 bookmarks.write(self)
173 bookmarks.write(self)
174
174
175 @filecache('phaseroots')
175 @filecache('phaseroots')
176 def _phaseroots(self):
176 def _phaseroots(self):
177 self._dirtyphases = False
177 self._dirtyphases = False
178 phaseroots = phases.readroots(self)
178 phaseroots = phases.readroots(self)
179 phases.filterunknown(self, phaseroots)
179 phases.filterunknown(self, phaseroots)
180 return phaseroots
180 return phaseroots
181
181
182 @propertycache
182 @propertycache
183 def _phaserev(self):
183 def _phaserev(self):
184 cache = [phases.public] * len(self)
184 cache = [phases.public] * len(self)
185 for phase in phases.trackedphases:
185 for phase in phases.trackedphases:
186 roots = map(self.changelog.rev, self._phaseroots[phase])
186 roots = map(self.changelog.rev, self._phaseroots[phase])
187 if roots:
187 if roots:
188 for rev in roots:
188 for rev in roots:
189 cache[rev] = phase
189 cache[rev] = phase
190 for rev in self.changelog.descendants(*roots):
190 for rev in self.changelog.descendants(*roots):
191 cache[rev] = phase
191 cache[rev] = phase
192 return cache
192 return cache
193
193
194 @filecache('00changelog.i', True)
194 @filecache('00changelog.i', True)
195 def changelog(self):
195 def changelog(self):
196 c = changelog.changelog(self.sopener)
196 c = changelog.changelog(self.sopener)
197 if 'HG_PENDING' in os.environ:
197 if 'HG_PENDING' in os.environ:
198 p = os.environ['HG_PENDING']
198 p = os.environ['HG_PENDING']
199 if p.startswith(self.root):
199 if p.startswith(self.root):
200 c.readpending('00changelog.i.a')
200 c.readpending('00changelog.i.a')
201 return c
201 return c
202
202
203 @filecache('00manifest.i', True)
203 @filecache('00manifest.i', True)
204 def manifest(self):
204 def manifest(self):
205 return manifest.manifest(self.sopener)
205 return manifest.manifest(self.sopener)
206
206
207 @filecache('dirstate')
207 @filecache('dirstate')
208 def dirstate(self):
208 def dirstate(self):
209 warned = [0]
209 warned = [0]
210 def validate(node):
210 def validate(node):
211 try:
211 try:
212 self.changelog.rev(node)
212 self.changelog.rev(node)
213 return node
213 return node
214 except error.LookupError:
214 except error.LookupError:
215 if not warned[0]:
215 if not warned[0]:
216 warned[0] = True
216 warned[0] = True
217 self.ui.warn(_("warning: ignoring unknown"
217 self.ui.warn(_("warning: ignoring unknown"
218 " working parent %s!\n") % short(node))
218 " working parent %s!\n") % short(node))
219 return nullid
219 return nullid
220
220
221 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
221 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
222
222
223 def __getitem__(self, changeid):
223 def __getitem__(self, changeid):
224 if changeid is None:
224 if changeid is None:
225 return context.workingctx(self)
225 return context.workingctx(self)
226 return context.changectx(self, changeid)
226 return context.changectx(self, changeid)
227
227
228 def __contains__(self, changeid):
228 def __contains__(self, changeid):
229 try:
229 try:
230 return bool(self.lookup(changeid))
230 return bool(self.lookup(changeid))
231 except error.RepoLookupError:
231 except error.RepoLookupError:
232 return False
232 return False
233
233
234 def __nonzero__(self):
234 def __nonzero__(self):
235 return True
235 return True
236
236
237 def __len__(self):
237 def __len__(self):
238 return len(self.changelog)
238 return len(self.changelog)
239
239
240 def __iter__(self):
240 def __iter__(self):
241 for i in xrange(len(self)):
241 for i in xrange(len(self)):
242 yield i
242 yield i
243
243
244 def revs(self, expr, *args):
244 def revs(self, expr, *args):
245 '''Return a list of revisions matching the given revset'''
245 '''Return a list of revisions matching the given revset'''
246 expr = revset.formatspec(expr, *args)
246 expr = revset.formatspec(expr, *args)
247 m = revset.match(None, expr)
247 m = revset.match(None, expr)
248 return [r for r in m(self, range(len(self)))]
248 return [r for r in m(self, range(len(self)))]
249
249
250 def set(self, expr, *args):
250 def set(self, expr, *args):
251 '''
251 '''
252 Yield a context for each matching revision, after doing arg
252 Yield a context for each matching revision, after doing arg
253 replacement via revset.formatspec
253 replacement via revset.formatspec
254 '''
254 '''
255 for r in self.revs(expr, *args):
255 for r in self.revs(expr, *args):
256 yield self[r]
256 yield self[r]
257
257
258 def url(self):
258 def url(self):
259 return 'file:' + self.root
259 return 'file:' + self.root
260
260
261 def hook(self, name, throw=False, **args):
261 def hook(self, name, throw=False, **args):
262 return hook.hook(self.ui, self, name, throw, **args)
262 return hook.hook(self.ui, self, name, throw, **args)
263
263
264 tag_disallowed = ':\r\n'
264 tag_disallowed = ':\r\n'
265
265
266 def _tag(self, names, node, message, local, user, date, extra={}):
266 def _tag(self, names, node, message, local, user, date, extra={}):
267 if isinstance(names, str):
267 if isinstance(names, str):
268 allchars = names
268 allchars = names
269 names = (names,)
269 names = (names,)
270 else:
270 else:
271 allchars = ''.join(names)
271 allchars = ''.join(names)
272 for c in self.tag_disallowed:
272 for c in self.tag_disallowed:
273 if c in allchars:
273 if c in allchars:
274 raise util.Abort(_('%r cannot be used in a tag name') % c)
274 raise util.Abort(_('%r cannot be used in a tag name') % c)
275
275
276 branches = self.branchmap()
276 branches = self.branchmap()
277 for name in names:
277 for name in names:
278 self.hook('pretag', throw=True, node=hex(node), tag=name,
278 self.hook('pretag', throw=True, node=hex(node), tag=name,
279 local=local)
279 local=local)
280 if name in branches:
280 if name in branches:
281 self.ui.warn(_("warning: tag %s conflicts with existing"
281 self.ui.warn(_("warning: tag %s conflicts with existing"
282 " branch name\n") % name)
282 " branch name\n") % name)
283
283
284 def writetags(fp, names, munge, prevtags):
284 def writetags(fp, names, munge, prevtags):
285 fp.seek(0, 2)
285 fp.seek(0, 2)
286 if prevtags and prevtags[-1] != '\n':
286 if prevtags and prevtags[-1] != '\n':
287 fp.write('\n')
287 fp.write('\n')
288 for name in names:
288 for name in names:
289 m = munge and munge(name) or name
289 m = munge and munge(name) or name
290 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
290 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
291 old = self.tags().get(name, nullid)
291 old = self.tags().get(name, nullid)
292 fp.write('%s %s\n' % (hex(old), m))
292 fp.write('%s %s\n' % (hex(old), m))
293 fp.write('%s %s\n' % (hex(node), m))
293 fp.write('%s %s\n' % (hex(node), m))
294 fp.close()
294 fp.close()
295
295
296 prevtags = ''
296 prevtags = ''
297 if local:
297 if local:
298 try:
298 try:
299 fp = self.opener('localtags', 'r+')
299 fp = self.opener('localtags', 'r+')
300 except IOError:
300 except IOError:
301 fp = self.opener('localtags', 'a')
301 fp = self.opener('localtags', 'a')
302 else:
302 else:
303 prevtags = fp.read()
303 prevtags = fp.read()
304
304
305 # local tags are stored in the current charset
305 # local tags are stored in the current charset
306 writetags(fp, names, None, prevtags)
306 writetags(fp, names, None, prevtags)
307 for name in names:
307 for name in names:
308 self.hook('tag', node=hex(node), tag=name, local=local)
308 self.hook('tag', node=hex(node), tag=name, local=local)
309 return
309 return
310
310
311 try:
311 try:
312 fp = self.wfile('.hgtags', 'rb+')
312 fp = self.wfile('.hgtags', 'rb+')
313 except IOError, e:
313 except IOError, e:
314 if e.errno != errno.ENOENT:
314 if e.errno != errno.ENOENT:
315 raise
315 raise
316 fp = self.wfile('.hgtags', 'ab')
316 fp = self.wfile('.hgtags', 'ab')
317 else:
317 else:
318 prevtags = fp.read()
318 prevtags = fp.read()
319
319
320 # committed tags are stored in UTF-8
320 # committed tags are stored in UTF-8
321 writetags(fp, names, encoding.fromlocal, prevtags)
321 writetags(fp, names, encoding.fromlocal, prevtags)
322
322
323 fp.close()
323 fp.close()
324
324
325 if '.hgtags' not in self.dirstate:
325 if '.hgtags' not in self.dirstate:
326 self[None].add(['.hgtags'])
326 self[None].add(['.hgtags'])
327
327
328 m = matchmod.exact(self.root, '', ['.hgtags'])
328 m = matchmod.exact(self.root, '', ['.hgtags'])
329 tagnode = self.commit(message, user, date, extra=extra, match=m)
329 tagnode = self.commit(message, user, date, extra=extra, match=m)
330
330
331 for name in names:
331 for name in names:
332 self.hook('tag', node=hex(node), tag=name, local=local)
332 self.hook('tag', node=hex(node), tag=name, local=local)
333
333
334 return tagnode
334 return tagnode
335
335
336 def tag(self, names, node, message, local, user, date):
336 def tag(self, names, node, message, local, user, date):
337 '''tag a revision with one or more symbolic names.
337 '''tag a revision with one or more symbolic names.
338
338
339 names is a list of strings or, when adding a single tag, names may be a
339 names is a list of strings or, when adding a single tag, names may be a
340 string.
340 string.
341
341
342 if local is True, the tags are stored in a per-repository file.
342 if local is True, the tags are stored in a per-repository file.
343 otherwise, they are stored in the .hgtags file, and a new
343 otherwise, they are stored in the .hgtags file, and a new
344 changeset is committed with the change.
344 changeset is committed with the change.
345
345
346 keyword arguments:
346 keyword arguments:
347
347
348 local: whether to store tags in non-version-controlled file
348 local: whether to store tags in non-version-controlled file
349 (default False)
349 (default False)
350
350
351 message: commit message to use if committing
351 message: commit message to use if committing
352
352
353 user: name of user to use if committing
353 user: name of user to use if committing
354
354
355 date: date tuple to use if committing'''
355 date: date tuple to use if committing'''
356
356
357 if not local:
357 if not local:
358 for x in self.status()[:5]:
358 for x in self.status()[:5]:
359 if '.hgtags' in x:
359 if '.hgtags' in x:
360 raise util.Abort(_('working copy of .hgtags is changed '
360 raise util.Abort(_('working copy of .hgtags is changed '
361 '(please commit .hgtags manually)'))
361 '(please commit .hgtags manually)'))
362
362
363 self.tags() # instantiate the cache
363 self.tags() # instantiate the cache
364 self._tag(names, node, message, local, user, date)
364 self._tag(names, node, message, local, user, date)
365
365
366 @propertycache
366 @propertycache
367 def _tagscache(self):
367 def _tagscache(self):
368 '''Returns a tagscache object that contains various tags related caches.'''
368 '''Returns a tagscache object that contains various tags related caches.'''
369
369
370 # This simplifies its cache management by having one decorated
370 # This simplifies its cache management by having one decorated
371 # function (this one) and the rest simply fetch things from it.
371 # function (this one) and the rest simply fetch things from it.
372 class tagscache(object):
372 class tagscache(object):
373 def __init__(self):
373 def __init__(self):
374 # These two define the set of tags for this repository. tags
374 # These two define the set of tags for this repository. tags
375 # maps tag name to node; tagtypes maps tag name to 'global' or
375 # maps tag name to node; tagtypes maps tag name to 'global' or
376 # 'local'. (Global tags are defined by .hgtags across all
376 # 'local'. (Global tags are defined by .hgtags across all
377 # heads, and local tags are defined in .hg/localtags.)
377 # heads, and local tags are defined in .hg/localtags.)
378 # They constitute the in-memory cache of tags.
378 # They constitute the in-memory cache of tags.
379 self.tags = self.tagtypes = None
379 self.tags = self.tagtypes = None
380
380
381 self.nodetagscache = self.tagslist = None
381 self.nodetagscache = self.tagslist = None
382
382
383 cache = tagscache()
383 cache = tagscache()
384 cache.tags, cache.tagtypes = self._findtags()
384 cache.tags, cache.tagtypes = self._findtags()
385
385
386 return cache
386 return cache
387
387
388 def tags(self):
388 def tags(self):
389 '''return a mapping of tag to node'''
389 '''return a mapping of tag to node'''
390 return self._tagscache.tags
390 return self._tagscache.tags
391
391
392 def _findtags(self):
392 def _findtags(self):
393 '''Do the hard work of finding tags. Return a pair of dicts
393 '''Do the hard work of finding tags. Return a pair of dicts
394 (tags, tagtypes) where tags maps tag name to node, and tagtypes
394 (tags, tagtypes) where tags maps tag name to node, and tagtypes
395 maps tag name to a string like \'global\' or \'local\'.
395 maps tag name to a string like \'global\' or \'local\'.
396 Subclasses or extensions are free to add their own tags, but
396 Subclasses or extensions are free to add their own tags, but
397 should be aware that the returned dicts will be retained for the
397 should be aware that the returned dicts will be retained for the
398 duration of the localrepo object.'''
398 duration of the localrepo object.'''
399
399
400 # XXX what tagtype should subclasses/extensions use? Currently
400 # XXX what tagtype should subclasses/extensions use? Currently
401 # mq and bookmarks add tags, but do not set the tagtype at all.
401 # mq and bookmarks add tags, but do not set the tagtype at all.
402 # Should each extension invent its own tag type? Should there
402 # Should each extension invent its own tag type? Should there
403 # be one tagtype for all such "virtual" tags? Or is the status
403 # be one tagtype for all such "virtual" tags? Or is the status
404 # quo fine?
404 # quo fine?
405
405
406 alltags = {} # map tag name to (node, hist)
406 alltags = {} # map tag name to (node, hist)
407 tagtypes = {}
407 tagtypes = {}
408
408
409 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
409 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
410 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
410 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
411
411
412 # Build the return dicts. Have to re-encode tag names because
412 # Build the return dicts. Have to re-encode tag names because
413 # the tags module always uses UTF-8 (in order not to lose info
413 # the tags module always uses UTF-8 (in order not to lose info
414 # writing to the cache), but the rest of Mercurial wants them in
414 # writing to the cache), but the rest of Mercurial wants them in
415 # local encoding.
415 # local encoding.
416 tags = {}
416 tags = {}
417 for (name, (node, hist)) in alltags.iteritems():
417 for (name, (node, hist)) in alltags.iteritems():
418 if node != nullid:
418 if node != nullid:
419 try:
419 try:
420 # ignore tags to unknown nodes
420 # ignore tags to unknown nodes
421 self.changelog.lookup(node)
421 self.changelog.lookup(node)
422 tags[encoding.tolocal(name)] = node
422 tags[encoding.tolocal(name)] = node
423 except error.LookupError:
423 except error.LookupError:
424 pass
424 pass
425 tags['tip'] = self.changelog.tip()
425 tags['tip'] = self.changelog.tip()
426 tagtypes = dict([(encoding.tolocal(name), value)
426 tagtypes = dict([(encoding.tolocal(name), value)
427 for (name, value) in tagtypes.iteritems()])
427 for (name, value) in tagtypes.iteritems()])
428 return (tags, tagtypes)
428 return (tags, tagtypes)
429
429
430 def tagtype(self, tagname):
430 def tagtype(self, tagname):
431 '''
431 '''
432 return the type of the given tag. result can be:
432 return the type of the given tag. result can be:
433
433
434 'local' : a local tag
434 'local' : a local tag
435 'global' : a global tag
435 'global' : a global tag
436 None : tag does not exist
436 None : tag does not exist
437 '''
437 '''
438
438
439 return self._tagscache.tagtypes.get(tagname)
439 return self._tagscache.tagtypes.get(tagname)
440
440
441 def tagslist(self):
441 def tagslist(self):
442 '''return a list of tags ordered by revision'''
442 '''return a list of tags ordered by revision'''
443 if not self._tagscache.tagslist:
443 if not self._tagscache.tagslist:
444 l = []
444 l = []
445 for t, n in self.tags().iteritems():
445 for t, n in self.tags().iteritems():
446 r = self.changelog.rev(n)
446 r = self.changelog.rev(n)
447 l.append((r, t, n))
447 l.append((r, t, n))
448 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
448 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
449
449
450 return self._tagscache.tagslist
450 return self._tagscache.tagslist
451
451
452 def nodetags(self, node):
452 def nodetags(self, node):
453 '''return the tags associated with a node'''
453 '''return the tags associated with a node'''
454 if not self._tagscache.nodetagscache:
454 if not self._tagscache.nodetagscache:
455 nodetagscache = {}
455 nodetagscache = {}
456 for t, n in self.tags().iteritems():
456 for t, n in self.tags().iteritems():
457 nodetagscache.setdefault(n, []).append(t)
457 nodetagscache.setdefault(n, []).append(t)
458 for tags in nodetagscache.itervalues():
458 for tags in nodetagscache.itervalues():
459 tags.sort()
459 tags.sort()
460 self._tagscache.nodetagscache = nodetagscache
460 self._tagscache.nodetagscache = nodetagscache
461 return self._tagscache.nodetagscache.get(node, [])
461 return self._tagscache.nodetagscache.get(node, [])
462
462
463 def nodebookmarks(self, node):
463 def nodebookmarks(self, node):
464 marks = []
464 marks = []
465 for bookmark, n in self._bookmarks.iteritems():
465 for bookmark, n in self._bookmarks.iteritems():
466 if n == node:
466 if n == node:
467 marks.append(bookmark)
467 marks.append(bookmark)
468 return sorted(marks)
468 return sorted(marks)
469
469
470 def _branchtags(self, partial, lrev):
470 def _branchtags(self, partial, lrev):
471 # TODO: rename this function?
471 # TODO: rename this function?
472 tiprev = len(self) - 1
472 tiprev = len(self) - 1
473 if lrev != tiprev:
473 if lrev != tiprev:
474 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
474 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
475 self._updatebranchcache(partial, ctxgen)
475 self._updatebranchcache(partial, ctxgen)
476 self._writebranchcache(partial, self.changelog.tip(), tiprev)
476 self._writebranchcache(partial, self.changelog.tip(), tiprev)
477
477
478 return partial
478 return partial
479
479
480 def updatebranchcache(self):
480 def updatebranchcache(self):
481 tip = self.changelog.tip()
481 tip = self.changelog.tip()
482 if self._branchcache is not None and self._branchcachetip == tip:
482 if self._branchcache is not None and self._branchcachetip == tip:
483 return
483 return
484
484
485 oldtip = self._branchcachetip
485 oldtip = self._branchcachetip
486 self._branchcachetip = tip
486 self._branchcachetip = tip
487 if oldtip is None or oldtip not in self.changelog.nodemap:
487 if oldtip is None or oldtip not in self.changelog.nodemap:
488 partial, last, lrev = self._readbranchcache()
488 partial, last, lrev = self._readbranchcache()
489 else:
489 else:
490 lrev = self.changelog.rev(oldtip)
490 lrev = self.changelog.rev(oldtip)
491 partial = self._branchcache
491 partial = self._branchcache
492
492
493 self._branchtags(partial, lrev)
493 self._branchtags(partial, lrev)
494 # this private cache holds all heads (not just tips)
494 # this private cache holds all heads (not just tips)
495 self._branchcache = partial
495 self._branchcache = partial
496
496
497 def branchmap(self):
497 def branchmap(self):
498 '''returns a dictionary {branch: [branchheads]}'''
498 '''returns a dictionary {branch: [branchheads]}'''
499 self.updatebranchcache()
499 self.updatebranchcache()
500 return self._branchcache
500 return self._branchcache
501
501
502 def branchtags(self):
502 def branchtags(self):
503 '''return a dict where branch names map to the tipmost head of
503 '''return a dict where branch names map to the tipmost head of
504 the branch, open heads come before closed'''
504 the branch, open heads come before closed'''
505 bt = {}
505 bt = {}
506 for bn, heads in self.branchmap().iteritems():
506 for bn, heads in self.branchmap().iteritems():
507 tip = heads[-1]
507 tip = heads[-1]
508 for h in reversed(heads):
508 for h in reversed(heads):
509 if 'close' not in self.changelog.read(h)[5]:
509 if 'close' not in self.changelog.read(h)[5]:
510 tip = h
510 tip = h
511 break
511 break
512 bt[bn] = tip
512 bt[bn] = tip
513 return bt
513 return bt
514
514
515 def _readbranchcache(self):
515 def _readbranchcache(self):
516 partial = {}
516 partial = {}
517 try:
517 try:
518 f = self.opener("cache/branchheads")
518 f = self.opener("cache/branchheads")
519 lines = f.read().split('\n')
519 lines = f.read().split('\n')
520 f.close()
520 f.close()
521 except (IOError, OSError):
521 except (IOError, OSError):
522 return {}, nullid, nullrev
522 return {}, nullid, nullrev
523
523
524 try:
524 try:
525 last, lrev = lines.pop(0).split(" ", 1)
525 last, lrev = lines.pop(0).split(" ", 1)
526 last, lrev = bin(last), int(lrev)
526 last, lrev = bin(last), int(lrev)
527 if lrev >= len(self) or self[lrev].node() != last:
527 if lrev >= len(self) or self[lrev].node() != last:
528 # invalidate the cache
528 # invalidate the cache
529 raise ValueError('invalidating branch cache (tip differs)')
529 raise ValueError('invalidating branch cache (tip differs)')
530 for l in lines:
530 for l in lines:
531 if not l:
531 if not l:
532 continue
532 continue
533 node, label = l.split(" ", 1)
533 node, label = l.split(" ", 1)
534 label = encoding.tolocal(label.strip())
534 label = encoding.tolocal(label.strip())
535 partial.setdefault(label, []).append(bin(node))
535 partial.setdefault(label, []).append(bin(node))
536 except KeyboardInterrupt:
536 except KeyboardInterrupt:
537 raise
537 raise
538 except Exception, inst:
538 except Exception, inst:
539 if self.ui.debugflag:
539 if self.ui.debugflag:
540 self.ui.warn(str(inst), '\n')
540 self.ui.warn(str(inst), '\n')
541 partial, last, lrev = {}, nullid, nullrev
541 partial, last, lrev = {}, nullid, nullrev
542 return partial, last, lrev
542 return partial, last, lrev
543
543
544 def _writebranchcache(self, branches, tip, tiprev):
544 def _writebranchcache(self, branches, tip, tiprev):
545 try:
545 try:
546 f = self.opener("cache/branchheads", "w", atomictemp=True)
546 f = self.opener("cache/branchheads", "w", atomictemp=True)
547 f.write("%s %s\n" % (hex(tip), tiprev))
547 f.write("%s %s\n" % (hex(tip), tiprev))
548 for label, nodes in branches.iteritems():
548 for label, nodes in branches.iteritems():
549 for node in nodes:
549 for node in nodes:
550 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
550 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
551 f.close()
551 f.close()
552 except (IOError, OSError):
552 except (IOError, OSError):
553 pass
553 pass
554
554
555 def _updatebranchcache(self, partial, ctxgen):
555 def _updatebranchcache(self, partial, ctxgen):
556 # collect new branch entries
556 # collect new branch entries
557 newbranches = {}
557 newbranches = {}
558 for c in ctxgen:
558 for c in ctxgen:
559 newbranches.setdefault(c.branch(), []).append(c.node())
559 newbranches.setdefault(c.branch(), []).append(c.node())
560 # if older branchheads are reachable from new ones, they aren't
560 # if older branchheads are reachable from new ones, they aren't
561 # really branchheads. Note checking parents is insufficient:
561 # really branchheads. Note checking parents is insufficient:
562 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
562 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
563 for branch, newnodes in newbranches.iteritems():
563 for branch, newnodes in newbranches.iteritems():
564 bheads = partial.setdefault(branch, [])
564 bheads = partial.setdefault(branch, [])
565 bheads.extend(newnodes)
565 bheads.extend(newnodes)
566 if len(bheads) <= 1:
566 if len(bheads) <= 1:
567 continue
567 continue
568 bheads = sorted(bheads, key=lambda x: self[x].rev())
568 bheads = sorted(bheads, key=lambda x: self[x].rev())
569 # starting from tip means fewer passes over reachable
569 # starting from tip means fewer passes over reachable
570 while newnodes:
570 while newnodes:
571 latest = newnodes.pop()
571 latest = newnodes.pop()
572 if latest not in bheads:
572 if latest not in bheads:
573 continue
573 continue
574 minbhrev = self[bheads[0]].node()
574 minbhrev = self[bheads[0]].node()
575 reachable = self.changelog.reachable(latest, minbhrev)
575 reachable = self.changelog.reachable(latest, minbhrev)
576 reachable.remove(latest)
576 reachable.remove(latest)
577 if reachable:
577 if reachable:
578 bheads = [b for b in bheads if b not in reachable]
578 bheads = [b for b in bheads if b not in reachable]
579 partial[branch] = bheads
579 partial[branch] = bheads
580
580
581 def lookup(self, key):
581 def lookup(self, key):
582 if isinstance(key, int):
582 if isinstance(key, int):
583 return self.changelog.node(key)
583 return self.changelog.node(key)
584 elif key == '.':
584 elif key == '.':
585 return self.dirstate.p1()
585 return self.dirstate.p1()
586 elif key == 'null':
586 elif key == 'null':
587 return nullid
587 return nullid
588 elif key == 'tip':
588 elif key == 'tip':
589 return self.changelog.tip()
589 return self.changelog.tip()
590 n = self.changelog._match(key)
590 n = self.changelog._match(key)
591 if n:
591 if n:
592 return n
592 return n
593 if key in self._bookmarks:
593 if key in self._bookmarks:
594 return self._bookmarks[key]
594 return self._bookmarks[key]
595 if key in self.tags():
595 if key in self.tags():
596 return self.tags()[key]
596 return self.tags()[key]
597 if key in self.branchtags():
597 if key in self.branchtags():
598 return self.branchtags()[key]
598 return self.branchtags()[key]
599 n = self.changelog._partialmatch(key)
599 n = self.changelog._partialmatch(key)
600 if n:
600 if n:
601 return n
601 return n
602
602
603 # can't find key, check if it might have come from damaged dirstate
603 # can't find key, check if it might have come from damaged dirstate
604 if key in self.dirstate.parents():
604 if key in self.dirstate.parents():
605 raise error.Abort(_("working directory has unknown parent '%s'!")
605 raise error.Abort(_("working directory has unknown parent '%s'!")
606 % short(key))
606 % short(key))
607 try:
607 try:
608 if len(key) == 20:
608 if len(key) == 20:
609 key = hex(key)
609 key = hex(key)
610 except TypeError:
610 except TypeError:
611 pass
611 pass
612 raise error.RepoLookupError(_("unknown revision '%s'") % key)
612 raise error.RepoLookupError(_("unknown revision '%s'") % key)
613
613
614 def lookupbranch(self, key, remote=None):
614 def lookupbranch(self, key, remote=None):
615 repo = remote or self
615 repo = remote or self
616 if key in repo.branchmap():
616 if key in repo.branchmap():
617 return key
617 return key
618
618
619 repo = (remote and remote.local()) and remote or self
619 repo = (remote and remote.local()) and remote or self
620 return repo[key].branch()
620 return repo[key].branch()
621
621
622 def known(self, nodes):
622 def known(self, nodes):
623 nm = self.changelog.nodemap
623 nm = self.changelog.nodemap
624 result = []
624 result = []
625 for n in nodes:
625 for n in nodes:
626 r = nm.get(n)
626 r = nm.get(n)
627 resp = not (r is None or self._phaserev[r] >= phases.secret)
627 resp = not (r is None or self._phaserev[r] >= phases.secret)
628 result.append(resp)
628 result.append(resp)
629 return result
629 return result
630
630
631 def local(self):
631 def local(self):
632 return self
632 return self
633
633
634 def cancopy(self):
634 def cancopy(self):
635 return (repo.repository.cancopy(self)
635 return (repo.repository.cancopy(self)
636 and not self._phaseroots[phases.secret])
636 and not self._phaseroots[phases.secret])
637
637
638 def join(self, f):
638 def join(self, f):
639 return os.path.join(self.path, f)
639 return os.path.join(self.path, f)
640
640
641 def wjoin(self, f):
641 def wjoin(self, f):
642 return os.path.join(self.root, f)
642 return os.path.join(self.root, f)
643
643
644 def file(self, f):
644 def file(self, f):
645 if f[0] == '/':
645 if f[0] == '/':
646 f = f[1:]
646 f = f[1:]
647 return filelog.filelog(self.sopener, f)
647 return filelog.filelog(self.sopener, f)
648
648
649 def changectx(self, changeid):
649 def changectx(self, changeid):
650 return self[changeid]
650 return self[changeid]
651
651
652 def parents(self, changeid=None):
652 def parents(self, changeid=None):
653 '''get list of changectxs for parents of changeid'''
653 '''get list of changectxs for parents of changeid'''
654 return self[changeid].parents()
654 return self[changeid].parents()
655
655
656 def filectx(self, path, changeid=None, fileid=None):
656 def filectx(self, path, changeid=None, fileid=None):
657 """changeid can be a changeset revision, node, or tag.
657 """changeid can be a changeset revision, node, or tag.
658 fileid can be a file revision or node."""
658 fileid can be a file revision or node."""
659 return context.filectx(self, path, changeid, fileid)
659 return context.filectx(self, path, changeid, fileid)
660
660
661 def getcwd(self):
661 def getcwd(self):
662 return self.dirstate.getcwd()
662 return self.dirstate.getcwd()
663
663
664 def pathto(self, f, cwd=None):
664 def pathto(self, f, cwd=None):
665 return self.dirstate.pathto(f, cwd)
665 return self.dirstate.pathto(f, cwd)
666
666
667 def wfile(self, f, mode='r'):
667 def wfile(self, f, mode='r'):
668 return self.wopener(f, mode)
668 return self.wopener(f, mode)
669
669
670 def _link(self, f):
670 def _link(self, f):
671 return os.path.islink(self.wjoin(f))
671 return os.path.islink(self.wjoin(f))
672
672
673 def _loadfilter(self, filter):
673 def _loadfilter(self, filter):
674 if filter not in self.filterpats:
674 if filter not in self.filterpats:
675 l = []
675 l = []
676 for pat, cmd in self.ui.configitems(filter):
676 for pat, cmd in self.ui.configitems(filter):
677 if cmd == '!':
677 if cmd == '!':
678 continue
678 continue
679 mf = matchmod.match(self.root, '', [pat])
679 mf = matchmod.match(self.root, '', [pat])
680 fn = None
680 fn = None
681 params = cmd
681 params = cmd
682 for name, filterfn in self._datafilters.iteritems():
682 for name, filterfn in self._datafilters.iteritems():
683 if cmd.startswith(name):
683 if cmd.startswith(name):
684 fn = filterfn
684 fn = filterfn
685 params = cmd[len(name):].lstrip()
685 params = cmd[len(name):].lstrip()
686 break
686 break
687 if not fn:
687 if not fn:
688 fn = lambda s, c, **kwargs: util.filter(s, c)
688 fn = lambda s, c, **kwargs: util.filter(s, c)
689 # Wrap old filters not supporting keyword arguments
689 # Wrap old filters not supporting keyword arguments
690 if not inspect.getargspec(fn)[2]:
690 if not inspect.getargspec(fn)[2]:
691 oldfn = fn
691 oldfn = fn
692 fn = lambda s, c, **kwargs: oldfn(s, c)
692 fn = lambda s, c, **kwargs: oldfn(s, c)
693 l.append((mf, fn, params))
693 l.append((mf, fn, params))
694 self.filterpats[filter] = l
694 self.filterpats[filter] = l
695 return self.filterpats[filter]
695 return self.filterpats[filter]
696
696
697 def _filter(self, filterpats, filename, data):
697 def _filter(self, filterpats, filename, data):
698 for mf, fn, cmd in filterpats:
698 for mf, fn, cmd in filterpats:
699 if mf(filename):
699 if mf(filename):
700 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
700 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
701 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
701 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
702 break
702 break
703
703
704 return data
704 return data
705
705
706 @propertycache
706 @propertycache
707 def _encodefilterpats(self):
707 def _encodefilterpats(self):
708 return self._loadfilter('encode')
708 return self._loadfilter('encode')
709
709
710 @propertycache
710 @propertycache
711 def _decodefilterpats(self):
711 def _decodefilterpats(self):
712 return self._loadfilter('decode')
712 return self._loadfilter('decode')
713
713
714 def adddatafilter(self, name, filter):
714 def adddatafilter(self, name, filter):
715 self._datafilters[name] = filter
715 self._datafilters[name] = filter
716
716
717 def wread(self, filename):
717 def wread(self, filename):
718 if self._link(filename):
718 if self._link(filename):
719 data = os.readlink(self.wjoin(filename))
719 data = os.readlink(self.wjoin(filename))
720 else:
720 else:
721 data = self.wopener.read(filename)
721 data = self.wopener.read(filename)
722 return self._filter(self._encodefilterpats, filename, data)
722 return self._filter(self._encodefilterpats, filename, data)
723
723
724 def wwrite(self, filename, data, flags):
724 def wwrite(self, filename, data, flags):
725 data = self._filter(self._decodefilterpats, filename, data)
725 data = self._filter(self._decodefilterpats, filename, data)
726 if 'l' in flags:
726 if 'l' in flags:
727 self.wopener.symlink(data, filename)
727 self.wopener.symlink(data, filename)
728 else:
728 else:
729 self.wopener.write(filename, data)
729 self.wopener.write(filename, data)
730 if 'x' in flags:
730 if 'x' in flags:
731 util.setflags(self.wjoin(filename), False, True)
731 util.setflags(self.wjoin(filename), False, True)
732
732
733 def wwritedata(self, filename, data):
733 def wwritedata(self, filename, data):
734 return self._filter(self._decodefilterpats, filename, data)
734 return self._filter(self._decodefilterpats, filename, data)
735
735
736 def transaction(self, desc):
736 def transaction(self, desc):
737 tr = self._transref and self._transref() or None
737 tr = self._transref and self._transref() or None
738 if tr and tr.running():
738 if tr and tr.running():
739 return tr.nest()
739 return tr.nest()
740
740
741 # abort here if the journal already exists
741 # abort here if the journal already exists
742 if os.path.exists(self.sjoin("journal")):
742 if os.path.exists(self.sjoin("journal")):
743 raise error.RepoError(
743 raise error.RepoError(
744 _("abandoned transaction found - run hg recover"))
744 _("abandoned transaction found - run hg recover"))
745
745
746 journalfiles = self._writejournal(desc)
746 journalfiles = self._writejournal(desc)
747 renames = [(x, undoname(x)) for x in journalfiles]
747 renames = [(x, undoname(x)) for x in journalfiles]
748
748
749 tr = transaction.transaction(self.ui.warn, self.sopener,
749 tr = transaction.transaction(self.ui.warn, self.sopener,
750 self.sjoin("journal"),
750 self.sjoin("journal"),
751 aftertrans(renames),
751 aftertrans(renames),
752 self.store.createmode)
752 self.store.createmode)
753 self._transref = weakref.ref(tr)
753 self._transref = weakref.ref(tr)
754 return tr
754 return tr
755
755
756 def _writejournal(self, desc):
756 def _writejournal(self, desc):
757 # save dirstate for rollback
757 # save dirstate for rollback
758 try:
758 try:
759 ds = self.opener.read("dirstate")
759 ds = self.opener.read("dirstate")
760 except IOError:
760 except IOError:
761 ds = ""
761 ds = ""
762 self.opener.write("journal.dirstate", ds)
762 self.opener.write("journal.dirstate", ds)
763 self.opener.write("journal.branch",
763 self.opener.write("journal.branch",
764 encoding.fromlocal(self.dirstate.branch()))
764 encoding.fromlocal(self.dirstate.branch()))
765 self.opener.write("journal.desc",
765 self.opener.write("journal.desc",
766 "%d\n%s\n" % (len(self), desc))
766 "%d\n%s\n" % (len(self), desc))
767
767
768 bkname = self.join('bookmarks')
768 bkname = self.join('bookmarks')
769 if os.path.exists(bkname):
769 if os.path.exists(bkname):
770 util.copyfile(bkname, self.join('journal.bookmarks'))
770 util.copyfile(bkname, self.join('journal.bookmarks'))
771 else:
771 else:
772 self.opener.write('journal.bookmarks', '')
772 self.opener.write('journal.bookmarks', '')
773 phasesname = self.sjoin('phaseroots')
773 phasesname = self.sjoin('phaseroots')
774 if os.path.exists(phasesname):
774 if os.path.exists(phasesname):
775 util.copyfile(phasesname, self.sjoin('journal.phaseroots'))
775 util.copyfile(phasesname, self.sjoin('journal.phaseroots'))
776 else:
776 else:
777 self.sopener.write('journal.phaseroots', '')
777 self.sopener.write('journal.phaseroots', '')
778
778
779 return (self.sjoin('journal'), self.join('journal.dirstate'),
779 return (self.sjoin('journal'), self.join('journal.dirstate'),
780 self.join('journal.branch'), self.join('journal.desc'),
780 self.join('journal.branch'), self.join('journal.desc'),
781 self.join('journal.bookmarks'),
781 self.join('journal.bookmarks'),
782 self.sjoin('journal.phaseroots'))
782 self.sjoin('journal.phaseroots'))
783
783
784 def recover(self):
784 def recover(self):
785 lock = self.lock()
785 lock = self.lock()
786 try:
786 try:
787 if os.path.exists(self.sjoin("journal")):
787 if os.path.exists(self.sjoin("journal")):
788 self.ui.status(_("rolling back interrupted transaction\n"))
788 self.ui.status(_("rolling back interrupted transaction\n"))
789 transaction.rollback(self.sopener, self.sjoin("journal"),
789 transaction.rollback(self.sopener, self.sjoin("journal"),
790 self.ui.warn)
790 self.ui.warn)
791 self.invalidate()
791 self.invalidate()
792 return True
792 return True
793 else:
793 else:
794 self.ui.warn(_("no interrupted transaction available\n"))
794 self.ui.warn(_("no interrupted transaction available\n"))
795 return False
795 return False
796 finally:
796 finally:
797 lock.release()
797 lock.release()
798
798
799 def rollback(self, dryrun=False, force=False):
799 def rollback(self, dryrun=False, force=False):
800 wlock = lock = None
800 wlock = lock = None
801 try:
801 try:
802 wlock = self.wlock()
802 wlock = self.wlock()
803 lock = self.lock()
803 lock = self.lock()
804 if os.path.exists(self.sjoin("undo")):
804 if os.path.exists(self.sjoin("undo")):
805 return self._rollback(dryrun, force)
805 return self._rollback(dryrun, force)
806 else:
806 else:
807 self.ui.warn(_("no rollback information available\n"))
807 self.ui.warn(_("no rollback information available\n"))
808 return 1
808 return 1
809 finally:
809 finally:
810 release(lock, wlock)
810 release(lock, wlock)
811
811
812 def _rollback(self, dryrun, force):
812 def _rollback(self, dryrun, force):
813 ui = self.ui
813 ui = self.ui
814 try:
814 try:
815 args = self.opener.read('undo.desc').splitlines()
815 args = self.opener.read('undo.desc').splitlines()
816 (oldlen, desc, detail) = (int(args[0]), args[1], None)
816 (oldlen, desc, detail) = (int(args[0]), args[1], None)
817 if len(args) >= 3:
817 if len(args) >= 3:
818 detail = args[2]
818 detail = args[2]
819 oldtip = oldlen - 1
819 oldtip = oldlen - 1
820
820
821 if detail and ui.verbose:
821 if detail and ui.verbose:
822 msg = (_('repository tip rolled back to revision %s'
822 msg = (_('repository tip rolled back to revision %s'
823 ' (undo %s: %s)\n')
823 ' (undo %s: %s)\n')
824 % (oldtip, desc, detail))
824 % (oldtip, desc, detail))
825 else:
825 else:
826 msg = (_('repository tip rolled back to revision %s'
826 msg = (_('repository tip rolled back to revision %s'
827 ' (undo %s)\n')
827 ' (undo %s)\n')
828 % (oldtip, desc))
828 % (oldtip, desc))
829 except IOError:
829 except IOError:
830 msg = _('rolling back unknown transaction\n')
830 msg = _('rolling back unknown transaction\n')
831 desc = None
831 desc = None
832
832
833 if not force and self['.'] != self['tip'] and desc == 'commit':
833 if not force and self['.'] != self['tip'] and desc == 'commit':
834 raise util.Abort(
834 raise util.Abort(
835 _('rollback of last commit while not checked out '
835 _('rollback of last commit while not checked out '
836 'may lose data'), hint=_('use -f to force'))
836 'may lose data'), hint=_('use -f to force'))
837
837
838 ui.status(msg)
838 ui.status(msg)
839 if dryrun:
839 if dryrun:
840 return 0
840 return 0
841
841
842 parents = self.dirstate.parents()
842 parents = self.dirstate.parents()
843 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
843 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
844 if os.path.exists(self.join('undo.bookmarks')):
844 if os.path.exists(self.join('undo.bookmarks')):
845 util.rename(self.join('undo.bookmarks'),
845 util.rename(self.join('undo.bookmarks'),
846 self.join('bookmarks'))
846 self.join('bookmarks'))
847 if os.path.exists(self.sjoin('undo.phaseroots')):
847 if os.path.exists(self.sjoin('undo.phaseroots')):
848 util.rename(self.sjoin('undo.phaseroots'),
848 util.rename(self.sjoin('undo.phaseroots'),
849 self.sjoin('phaseroots'))
849 self.sjoin('phaseroots'))
850 self.invalidate()
850 self.invalidate()
851
851
852 parentgone = (parents[0] not in self.changelog.nodemap or
852 parentgone = (parents[0] not in self.changelog.nodemap or
853 parents[1] not in self.changelog.nodemap)
853 parents[1] not in self.changelog.nodemap)
854 if parentgone:
854 if parentgone:
855 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
855 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
856 try:
856 try:
857 branch = self.opener.read('undo.branch')
857 branch = self.opener.read('undo.branch')
858 self.dirstate.setbranch(branch)
858 self.dirstate.setbranch(branch)
859 except IOError:
859 except IOError:
860 ui.warn(_('named branch could not be reset: '
860 ui.warn(_('named branch could not be reset: '
861 'current branch is still \'%s\'\n')
861 'current branch is still \'%s\'\n')
862 % self.dirstate.branch())
862 % self.dirstate.branch())
863
863
864 self.dirstate.invalidate()
864 self.dirstate.invalidate()
865 parents = tuple([p.rev() for p in self.parents()])
865 parents = tuple([p.rev() for p in self.parents()])
866 if len(parents) > 1:
866 if len(parents) > 1:
867 ui.status(_('working directory now based on '
867 ui.status(_('working directory now based on '
868 'revisions %d and %d\n') % parents)
868 'revisions %d and %d\n') % parents)
869 else:
869 else:
870 ui.status(_('working directory now based on '
870 ui.status(_('working directory now based on '
871 'revision %d\n') % parents)
871 'revision %d\n') % parents)
872 self.destroyed()
872 self.destroyed()
873 return 0
873 return 0
874
874
875 def invalidatecaches(self):
875 def invalidatecaches(self):
876 try:
876 try:
877 delattr(self, '_tagscache')
877 delattr(self, '_tagscache')
878 except AttributeError:
878 except AttributeError:
879 pass
879 pass
880
880
881 self._branchcache = None # in UTF-8
881 self._branchcache = None # in UTF-8
882 self._branchcachetip = None
882 self._branchcachetip = None
883
883
884 def invalidatedirstate(self):
884 def invalidatedirstate(self):
885 '''Invalidates the dirstate, causing the next call to dirstate
885 '''Invalidates the dirstate, causing the next call to dirstate
886 to check if it was modified since the last time it was read,
886 to check if it was modified since the last time it was read,
887 rereading it if it has.
887 rereading it if it has.
888
888
889 This is different to dirstate.invalidate() that it doesn't always
889 This is different to dirstate.invalidate() that it doesn't always
890 rereads the dirstate. Use dirstate.invalidate() if you want to
890 rereads the dirstate. Use dirstate.invalidate() if you want to
891 explicitly read the dirstate again (i.e. restoring it to a previous
891 explicitly read the dirstate again (i.e. restoring it to a previous
892 known good state).'''
892 known good state).'''
893 try:
893 try:
894 delattr(self, 'dirstate')
894 delattr(self, 'dirstate')
895 except AttributeError:
895 except AttributeError:
896 pass
896 pass
897
897
898 def invalidate(self):
898 def invalidate(self):
899 for k in self._filecache:
899 for k in self._filecache:
900 # dirstate is invalidated separately in invalidatedirstate()
900 # dirstate is invalidated separately in invalidatedirstate()
901 if k == 'dirstate':
901 if k == 'dirstate':
902 continue
902 continue
903
903
904 try:
904 try:
905 delattr(self, k)
905 delattr(self, k)
906 except AttributeError:
906 except AttributeError:
907 pass
907 pass
908 self.invalidatecaches()
908 self.invalidatecaches()
909
909
910 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
910 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
911 try:
911 try:
912 l = lock.lock(lockname, 0, releasefn, desc=desc)
912 l = lock.lock(lockname, 0, releasefn, desc=desc)
913 except error.LockHeld, inst:
913 except error.LockHeld, inst:
914 if not wait:
914 if not wait:
915 raise
915 raise
916 self.ui.warn(_("waiting for lock on %s held by %r\n") %
916 self.ui.warn(_("waiting for lock on %s held by %r\n") %
917 (desc, inst.locker))
917 (desc, inst.locker))
918 # default to 600 seconds timeout
918 # default to 600 seconds timeout
919 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
919 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
920 releasefn, desc=desc)
920 releasefn, desc=desc)
921 if acquirefn:
921 if acquirefn:
922 acquirefn()
922 acquirefn()
923 return l
923 return l
924
924
925 def _afterlock(self, callback):
925 def _afterlock(self, callback):
926 """add a callback to the current repository lock.
926 """add a callback to the current repository lock.
927
927
928 The callback will be executed on lock release."""
928 The callback will be executed on lock release."""
929 l = self._lockref and self._lockref()
929 l = self._lockref and self._lockref()
930 if l:
930 if l:
931 l.postrelease.append(callback)
931 l.postrelease.append(callback)
932
932
933 def lock(self, wait=True):
933 def lock(self, wait=True):
934 '''Lock the repository store (.hg/store) and return a weak reference
934 '''Lock the repository store (.hg/store) and return a weak reference
935 to the lock. Use this before modifying the store (e.g. committing or
935 to the lock. Use this before modifying the store (e.g. committing or
936 stripping). If you are opening a transaction, get a lock as well.)'''
936 stripping). If you are opening a transaction, get a lock as well.)'''
937 l = self._lockref and self._lockref()
937 l = self._lockref and self._lockref()
938 if l is not None and l.held:
938 if l is not None and l.held:
939 l.lock()
939 l.lock()
940 return l
940 return l
941
941
942 def unlock():
942 def unlock():
943 self.store.write()
943 self.store.write()
944 if self._dirtyphases:
944 if self._dirtyphases:
945 phases.writeroots(self)
945 phases.writeroots(self)
946 for k, ce in self._filecache.items():
946 for k, ce in self._filecache.items():
947 if k == 'dirstate':
947 if k == 'dirstate':
948 continue
948 continue
949 ce.refresh()
949 ce.refresh()
950
950
951 l = self._lock(self.sjoin("lock"), wait, unlock,
951 l = self._lock(self.sjoin("lock"), wait, unlock,
952 self.invalidate, _('repository %s') % self.origroot)
952 self.invalidate, _('repository %s') % self.origroot)
953 self._lockref = weakref.ref(l)
953 self._lockref = weakref.ref(l)
954 return l
954 return l
955
955
956 def wlock(self, wait=True):
956 def wlock(self, wait=True):
957 '''Lock the non-store parts of the repository (everything under
957 '''Lock the non-store parts of the repository (everything under
958 .hg except .hg/store) and return a weak reference to the lock.
958 .hg except .hg/store) and return a weak reference to the lock.
959 Use this before modifying files in .hg.'''
959 Use this before modifying files in .hg.'''
960 l = self._wlockref and self._wlockref()
960 l = self._wlockref and self._wlockref()
961 if l is not None and l.held:
961 if l is not None and l.held:
962 l.lock()
962 l.lock()
963 return l
963 return l
964
964
965 def unlock():
965 def unlock():
966 self.dirstate.write()
966 self.dirstate.write()
967 ce = self._filecache.get('dirstate')
967 ce = self._filecache.get('dirstate')
968 if ce:
968 if ce:
969 ce.refresh()
969 ce.refresh()
970
970
971 l = self._lock(self.join("wlock"), wait, unlock,
971 l = self._lock(self.join("wlock"), wait, unlock,
972 self.invalidatedirstate, _('working directory of %s') %
972 self.invalidatedirstate, _('working directory of %s') %
973 self.origroot)
973 self.origroot)
974 self._wlockref = weakref.ref(l)
974 self._wlockref = weakref.ref(l)
975 return l
975 return l
976
976
977 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
977 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
978 """
978 """
979 commit an individual file as part of a larger transaction
979 commit an individual file as part of a larger transaction
980 """
980 """
981
981
982 fname = fctx.path()
982 fname = fctx.path()
983 text = fctx.data()
983 text = fctx.data()
984 flog = self.file(fname)
984 flog = self.file(fname)
985 fparent1 = manifest1.get(fname, nullid)
985 fparent1 = manifest1.get(fname, nullid)
986 fparent2 = fparent2o = manifest2.get(fname, nullid)
986 fparent2 = fparent2o = manifest2.get(fname, nullid)
987
987
988 meta = {}
988 meta = {}
989 copy = fctx.renamed()
989 copy = fctx.renamed()
990 if copy and copy[0] != fname:
990 if copy and copy[0] != fname:
991 # Mark the new revision of this file as a copy of another
991 # Mark the new revision of this file as a copy of another
992 # file. This copy data will effectively act as a parent
992 # file. This copy data will effectively act as a parent
993 # of this new revision. If this is a merge, the first
993 # of this new revision. If this is a merge, the first
994 # parent will be the nullid (meaning "look up the copy data")
994 # parent will be the nullid (meaning "look up the copy data")
995 # and the second one will be the other parent. For example:
995 # and the second one will be the other parent. For example:
996 #
996 #
997 # 0 --- 1 --- 3 rev1 changes file foo
997 # 0 --- 1 --- 3 rev1 changes file foo
998 # \ / rev2 renames foo to bar and changes it
998 # \ / rev2 renames foo to bar and changes it
999 # \- 2 -/ rev3 should have bar with all changes and
999 # \- 2 -/ rev3 should have bar with all changes and
1000 # should record that bar descends from
1000 # should record that bar descends from
1001 # bar in rev2 and foo in rev1
1001 # bar in rev2 and foo in rev1
1002 #
1002 #
1003 # this allows this merge to succeed:
1003 # this allows this merge to succeed:
1004 #
1004 #
1005 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1005 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1006 # \ / merging rev3 and rev4 should use bar@rev2
1006 # \ / merging rev3 and rev4 should use bar@rev2
1007 # \- 2 --- 4 as the merge base
1007 # \- 2 --- 4 as the merge base
1008 #
1008 #
1009
1009
1010 cfname = copy[0]
1010 cfname = copy[0]
1011 crev = manifest1.get(cfname)
1011 crev = manifest1.get(cfname)
1012 newfparent = fparent2
1012 newfparent = fparent2
1013
1013
1014 if manifest2: # branch merge
1014 if manifest2: # branch merge
1015 if fparent2 == nullid or crev is None: # copied on remote side
1015 if fparent2 == nullid or crev is None: # copied on remote side
1016 if cfname in manifest2:
1016 if cfname in manifest2:
1017 crev = manifest2[cfname]
1017 crev = manifest2[cfname]
1018 newfparent = fparent1
1018 newfparent = fparent1
1019
1019
1020 # find source in nearest ancestor if we've lost track
1020 # find source in nearest ancestor if we've lost track
1021 if not crev:
1021 if not crev:
1022 self.ui.debug(" %s: searching for copy revision for %s\n" %
1022 self.ui.debug(" %s: searching for copy revision for %s\n" %
1023 (fname, cfname))
1023 (fname, cfname))
1024 for ancestor in self[None].ancestors():
1024 for ancestor in self[None].ancestors():
1025 if cfname in ancestor:
1025 if cfname in ancestor:
1026 crev = ancestor[cfname].filenode()
1026 crev = ancestor[cfname].filenode()
1027 break
1027 break
1028
1028
1029 if crev:
1029 if crev:
1030 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1030 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1031 meta["copy"] = cfname
1031 meta["copy"] = cfname
1032 meta["copyrev"] = hex(crev)
1032 meta["copyrev"] = hex(crev)
1033 fparent1, fparent2 = nullid, newfparent
1033 fparent1, fparent2 = nullid, newfparent
1034 else:
1034 else:
1035 self.ui.warn(_("warning: can't find ancestor for '%s' "
1035 self.ui.warn(_("warning: can't find ancestor for '%s' "
1036 "copied from '%s'!\n") % (fname, cfname))
1036 "copied from '%s'!\n") % (fname, cfname))
1037
1037
1038 elif fparent2 != nullid:
1038 elif fparent2 != nullid:
1039 # is one parent an ancestor of the other?
1039 # is one parent an ancestor of the other?
1040 fparentancestor = flog.ancestor(fparent1, fparent2)
1040 fparentancestor = flog.ancestor(fparent1, fparent2)
1041 if fparentancestor == fparent1:
1041 if fparentancestor == fparent1:
1042 fparent1, fparent2 = fparent2, nullid
1042 fparent1, fparent2 = fparent2, nullid
1043 elif fparentancestor == fparent2:
1043 elif fparentancestor == fparent2:
1044 fparent2 = nullid
1044 fparent2 = nullid
1045
1045
1046 # is the file changed?
1046 # is the file changed?
1047 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1047 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1048 changelist.append(fname)
1048 changelist.append(fname)
1049 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1049 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1050
1050
1051 # are just the flags changed during merge?
1051 # are just the flags changed during merge?
1052 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1052 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1053 changelist.append(fname)
1053 changelist.append(fname)
1054
1054
1055 return fparent1
1055 return fparent1
1056
1056
1057 def commit(self, text="", user=None, date=None, match=None, force=False,
1057 def commit(self, text="", user=None, date=None, match=None, force=False,
1058 editor=False, extra={}):
1058 editor=False, extra={}):
1059 """Add a new revision to current repository.
1059 """Add a new revision to current repository.
1060
1060
1061 Revision information is gathered from the working directory,
1061 Revision information is gathered from the working directory,
1062 match can be used to filter the committed files. If editor is
1062 match can be used to filter the committed files. If editor is
1063 supplied, it is called to get a commit message.
1063 supplied, it is called to get a commit message.
1064 """
1064 """
1065
1065
1066 def fail(f, msg):
1066 def fail(f, msg):
1067 raise util.Abort('%s: %s' % (f, msg))
1067 raise util.Abort('%s: %s' % (f, msg))
1068
1068
1069 if not match:
1069 if not match:
1070 match = matchmod.always(self.root, '')
1070 match = matchmod.always(self.root, '')
1071
1071
1072 if not force:
1072 if not force:
1073 vdirs = []
1073 vdirs = []
1074 match.dir = vdirs.append
1074 match.dir = vdirs.append
1075 match.bad = fail
1075 match.bad = fail
1076
1076
1077 wlock = self.wlock()
1077 wlock = self.wlock()
1078 try:
1078 try:
1079 wctx = self[None]
1079 wctx = self[None]
1080 merge = len(wctx.parents()) > 1
1080 merge = len(wctx.parents()) > 1
1081
1081
1082 if (not force and merge and match and
1082 if (not force and merge and match and
1083 (match.files() or match.anypats())):
1083 (match.files() or match.anypats())):
1084 raise util.Abort(_('cannot partially commit a merge '
1084 raise util.Abort(_('cannot partially commit a merge '
1085 '(do not specify files or patterns)'))
1085 '(do not specify files or patterns)'))
1086
1086
1087 changes = self.status(match=match, clean=force)
1087 changes = self.status(match=match, clean=force)
1088 if force:
1088 if force:
1089 changes[0].extend(changes[6]) # mq may commit unchanged files
1089 changes[0].extend(changes[6]) # mq may commit unchanged files
1090
1090
1091 # check subrepos
1091 # check subrepos
1092 subs = []
1092 subs = []
1093 removedsubs = set()
1093 removedsubs = set()
1094 if '.hgsub' in wctx:
1094 if '.hgsub' in wctx:
1095 # only manage subrepos and .hgsubstate if .hgsub is present
1095 # only manage subrepos and .hgsubstate if .hgsub is present
1096 for p in wctx.parents():
1096 for p in wctx.parents():
1097 removedsubs.update(s for s in p.substate if match(s))
1097 removedsubs.update(s for s in p.substate if match(s))
1098 for s in wctx.substate:
1098 for s in wctx.substate:
1099 removedsubs.discard(s)
1099 removedsubs.discard(s)
1100 if match(s) and wctx.sub(s).dirty():
1100 if match(s) and wctx.sub(s).dirty():
1101 subs.append(s)
1101 subs.append(s)
1102 if (subs or removedsubs):
1102 if (subs or removedsubs):
1103 if (not match('.hgsub') and
1103 if (not match('.hgsub') and
1104 '.hgsub' in (wctx.modified() + wctx.added())):
1104 '.hgsub' in (wctx.modified() + wctx.added())):
1105 raise util.Abort(
1105 raise util.Abort(
1106 _("can't commit subrepos without .hgsub"))
1106 _("can't commit subrepos without .hgsub"))
1107 if '.hgsubstate' not in changes[0]:
1107 if '.hgsubstate' not in changes[0]:
1108 changes[0].insert(0, '.hgsubstate')
1108 changes[0].insert(0, '.hgsubstate')
1109 if '.hgsubstate' in changes[2]:
1109 if '.hgsubstate' in changes[2]:
1110 changes[2].remove('.hgsubstate')
1110 changes[2].remove('.hgsubstate')
1111 elif '.hgsub' in changes[2]:
1111 elif '.hgsub' in changes[2]:
1112 # clean up .hgsubstate when .hgsub is removed
1112 # clean up .hgsubstate when .hgsub is removed
1113 if ('.hgsubstate' in wctx and
1113 if ('.hgsubstate' in wctx and
1114 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1114 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1115 changes[2].insert(0, '.hgsubstate')
1115 changes[2].insert(0, '.hgsubstate')
1116
1116
1117 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1117 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1118 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1118 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1119 if changedsubs:
1119 if changedsubs:
1120 raise util.Abort(_("uncommitted changes in subrepo %s")
1120 raise util.Abort(_("uncommitted changes in subrepo %s")
1121 % changedsubs[0],
1121 % changedsubs[0],
1122 hint=_("use --subrepos for recursive commit"))
1122 hint=_("use --subrepos for recursive commit"))
1123
1123
1124 # make sure all explicit patterns are matched
1124 # make sure all explicit patterns are matched
1125 if not force and match.files():
1125 if not force and match.files():
1126 matched = set(changes[0] + changes[1] + changes[2])
1126 matched = set(changes[0] + changes[1] + changes[2])
1127
1127
1128 for f in match.files():
1128 for f in match.files():
1129 if f == '.' or f in matched or f in wctx.substate:
1129 if f == '.' or f in matched or f in wctx.substate:
1130 continue
1130 continue
1131 if f in changes[3]: # missing
1131 if f in changes[3]: # missing
1132 fail(f, _('file not found!'))
1132 fail(f, _('file not found!'))
1133 if f in vdirs: # visited directory
1133 if f in vdirs: # visited directory
1134 d = f + '/'
1134 d = f + '/'
1135 for mf in matched:
1135 for mf in matched:
1136 if mf.startswith(d):
1136 if mf.startswith(d):
1137 break
1137 break
1138 else:
1138 else:
1139 fail(f, _("no match under directory!"))
1139 fail(f, _("no match under directory!"))
1140 elif f not in self.dirstate:
1140 elif f not in self.dirstate:
1141 fail(f, _("file not tracked!"))
1141 fail(f, _("file not tracked!"))
1142
1142
1143 if (not force and not extra.get("close") and not merge
1143 if (not force and not extra.get("close") and not merge
1144 and not (changes[0] or changes[1] or changes[2])
1144 and not (changes[0] or changes[1] or changes[2])
1145 and wctx.branch() == wctx.p1().branch()):
1145 and wctx.branch() == wctx.p1().branch()):
1146 return None
1146 return None
1147
1147
1148 ms = mergemod.mergestate(self)
1148 ms = mergemod.mergestate(self)
1149 for f in changes[0]:
1149 for f in changes[0]:
1150 if f in ms and ms[f] == 'u':
1150 if f in ms and ms[f] == 'u':
1151 raise util.Abort(_("unresolved merge conflicts "
1151 raise util.Abort(_("unresolved merge conflicts "
1152 "(see hg help resolve)"))
1152 "(see hg help resolve)"))
1153
1153
1154 cctx = context.workingctx(self, text, user, date, extra, changes)
1154 cctx = context.workingctx(self, text, user, date, extra, changes)
1155 if editor:
1155 if editor:
1156 cctx._text = editor(self, cctx, subs)
1156 cctx._text = editor(self, cctx, subs)
1157 edited = (text != cctx._text)
1157 edited = (text != cctx._text)
1158
1158
1159 # commit subs
1159 # commit subs
1160 if subs or removedsubs:
1160 if subs or removedsubs:
1161 state = wctx.substate.copy()
1161 state = wctx.substate.copy()
1162 for s in sorted(subs):
1162 for s in sorted(subs):
1163 sub = wctx.sub(s)
1163 sub = wctx.sub(s)
1164 self.ui.status(_('committing subrepository %s\n') %
1164 self.ui.status(_('committing subrepository %s\n') %
1165 subrepo.subrelpath(sub))
1165 subrepo.subrelpath(sub))
1166 sr = sub.commit(cctx._text, user, date)
1166 sr = sub.commit(cctx._text, user, date)
1167 state[s] = (state[s][0], sr)
1167 state[s] = (state[s][0], sr)
1168 subrepo.writestate(self, state)
1168 subrepo.writestate(self, state)
1169
1169
1170 # Save commit message in case this transaction gets rolled back
1170 # Save commit message in case this transaction gets rolled back
1171 # (e.g. by a pretxncommit hook). Leave the content alone on
1171 # (e.g. by a pretxncommit hook). Leave the content alone on
1172 # the assumption that the user will use the same editor again.
1172 # the assumption that the user will use the same editor again.
1173 msgfn = self.savecommitmessage(cctx._text)
1173 msgfn = self.savecommitmessage(cctx._text)
1174
1174
1175 p1, p2 = self.dirstate.parents()
1175 p1, p2 = self.dirstate.parents()
1176 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1176 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1177 try:
1177 try:
1178 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1178 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1179 ret = self.commitctx(cctx, True)
1179 ret = self.commitctx(cctx, True)
1180 except:
1180 except:
1181 if edited:
1181 if edited:
1182 self.ui.write(
1182 self.ui.write(
1183 _('note: commit message saved in %s\n') % msgfn)
1183 _('note: commit message saved in %s\n') % msgfn)
1184 raise
1184 raise
1185
1185
1186 # update bookmarks, dirstate and mergestate
1186 # update bookmarks, dirstate and mergestate
1187 bookmarks.update(self, p1, ret)
1187 bookmarks.update(self, p1, ret)
1188 for f in changes[0] + changes[1]:
1188 for f in changes[0] + changes[1]:
1189 self.dirstate.normal(f)
1189 self.dirstate.normal(f)
1190 for f in changes[2]:
1190 for f in changes[2]:
1191 self.dirstate.drop(f)
1191 self.dirstate.drop(f)
1192 self.dirstate.setparents(ret)
1192 self.dirstate.setparents(ret)
1193 ms.reset()
1193 ms.reset()
1194 finally:
1194 finally:
1195 wlock.release()
1195 wlock.release()
1196
1196
1197 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1197 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1198 return ret
1198 return ret
1199
1199
1200 def commitctx(self, ctx, error=False):
1200 def commitctx(self, ctx, error=False):
1201 """Add a new revision to current repository.
1201 """Add a new revision to current repository.
1202 Revision information is passed via the context argument.
1202 Revision information is passed via the context argument.
1203 """
1203 """
1204
1204
1205 tr = lock = None
1205 tr = lock = None
1206 removed = list(ctx.removed())
1206 removed = list(ctx.removed())
1207 p1, p2 = ctx.p1(), ctx.p2()
1207 p1, p2 = ctx.p1(), ctx.p2()
1208 user = ctx.user()
1208 user = ctx.user()
1209
1209
1210 lock = self.lock()
1210 lock = self.lock()
1211 try:
1211 try:
1212 tr = self.transaction("commit")
1212 tr = self.transaction("commit")
1213 trp = weakref.proxy(tr)
1213 trp = weakref.proxy(tr)
1214
1214
1215 if ctx.files():
1215 if ctx.files():
1216 m1 = p1.manifest().copy()
1216 m1 = p1.manifest().copy()
1217 m2 = p2.manifest()
1217 m2 = p2.manifest()
1218
1218
1219 # check in files
1219 # check in files
1220 new = {}
1220 new = {}
1221 changed = []
1221 changed = []
1222 linkrev = len(self)
1222 linkrev = len(self)
1223 for f in sorted(ctx.modified() + ctx.added()):
1223 for f in sorted(ctx.modified() + ctx.added()):
1224 self.ui.note(f + "\n")
1224 self.ui.note(f + "\n")
1225 try:
1225 try:
1226 fctx = ctx[f]
1226 fctx = ctx[f]
1227 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1227 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1228 changed)
1228 changed)
1229 m1.set(f, fctx.flags())
1229 m1.set(f, fctx.flags())
1230 except OSError, inst:
1230 except OSError, inst:
1231 self.ui.warn(_("trouble committing %s!\n") % f)
1231 self.ui.warn(_("trouble committing %s!\n") % f)
1232 raise
1232 raise
1233 except IOError, inst:
1233 except IOError, inst:
1234 errcode = getattr(inst, 'errno', errno.ENOENT)
1234 errcode = getattr(inst, 'errno', errno.ENOENT)
1235 if error or errcode and errcode != errno.ENOENT:
1235 if error or errcode and errcode != errno.ENOENT:
1236 self.ui.warn(_("trouble committing %s!\n") % f)
1236 self.ui.warn(_("trouble committing %s!\n") % f)
1237 raise
1237 raise
1238 else:
1238 else:
1239 removed.append(f)
1239 removed.append(f)
1240
1240
1241 # update manifest
1241 # update manifest
1242 m1.update(new)
1242 m1.update(new)
1243 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1243 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1244 drop = [f for f in removed if f in m1]
1244 drop = [f for f in removed if f in m1]
1245 for f in drop:
1245 for f in drop:
1246 del m1[f]
1246 del m1[f]
1247 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1247 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1248 p2.manifestnode(), (new, drop))
1248 p2.manifestnode(), (new, drop))
1249 files = changed + removed
1249 files = changed + removed
1250 else:
1250 else:
1251 mn = p1.manifestnode()
1251 mn = p1.manifestnode()
1252 files = []
1252 files = []
1253
1253
1254 # update changelog
1254 # update changelog
1255 self.changelog.delayupdate()
1255 self.changelog.delayupdate()
1256 n = self.changelog.add(mn, files, ctx.description(),
1256 n = self.changelog.add(mn, files, ctx.description(),
1257 trp, p1.node(), p2.node(),
1257 trp, p1.node(), p2.node(),
1258 user, ctx.date(), ctx.extra().copy())
1258 user, ctx.date(), ctx.extra().copy())
1259 p = lambda: self.changelog.writepending() and self.root or ""
1259 p = lambda: self.changelog.writepending() and self.root or ""
1260 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1260 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1261 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1261 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1262 parent2=xp2, pending=p)
1262 parent2=xp2, pending=p)
1263 self.changelog.finalize(trp)
1263 self.changelog.finalize(trp)
1264 # set the new commit is proper phase
1264 # set the new commit is proper phase
1265 targetphase = self.ui.configint('phases', 'new-commit',
1265 targetphase = self.ui.configint('phases', 'new-commit',
1266 phases.draft)
1266 phases.draft)
1267 if targetphase:
1267 if targetphase:
1268 # retract boundary do not alter parent changeset.
1268 # retract boundary do not alter parent changeset.
1269 # if a parent have higher the resulting phase will
1269 # if a parent have higher the resulting phase will
1270 # be compliant anyway
1270 # be compliant anyway
1271 #
1271 #
1272 # if minimal phase was 0 we don't need to retract anything
1272 # if minimal phase was 0 we don't need to retract anything
1273 phases.retractboundary(self, targetphase, [n])
1273 phases.retractboundary(self, targetphase, [n])
1274 tr.close()
1274 tr.close()
1275 self.updatebranchcache()
1275 self.updatebranchcache()
1276 return n
1276 return n
1277 finally:
1277 finally:
1278 if tr:
1278 if tr:
1279 tr.release()
1279 tr.release()
1280 lock.release()
1280 lock.release()
1281
1281
1282 def destroyed(self):
1282 def destroyed(self):
1283 '''Inform the repository that nodes have been destroyed.
1283 '''Inform the repository that nodes have been destroyed.
1284 Intended for use by strip and rollback, so there's a common
1284 Intended for use by strip and rollback, so there's a common
1285 place for anything that has to be done after destroying history.'''
1285 place for anything that has to be done after destroying history.'''
1286 # XXX it might be nice if we could take the list of destroyed
1286 # XXX it might be nice if we could take the list of destroyed
1287 # nodes, but I don't see an easy way for rollback() to do that
1287 # nodes, but I don't see an easy way for rollback() to do that
1288
1288
1289 # Ensure the persistent tag cache is updated. Doing it now
1289 # Ensure the persistent tag cache is updated. Doing it now
1290 # means that the tag cache only has to worry about destroyed
1290 # means that the tag cache only has to worry about destroyed
1291 # heads immediately after a strip/rollback. That in turn
1291 # heads immediately after a strip/rollback. That in turn
1292 # guarantees that "cachetip == currenttip" (comparing both rev
1292 # guarantees that "cachetip == currenttip" (comparing both rev
1293 # and node) always means no nodes have been added or destroyed.
1293 # and node) always means no nodes have been added or destroyed.
1294
1294
1295 # XXX this is suboptimal when qrefresh'ing: we strip the current
1295 # XXX this is suboptimal when qrefresh'ing: we strip the current
1296 # head, refresh the tag cache, then immediately add a new head.
1296 # head, refresh the tag cache, then immediately add a new head.
1297 # But I think doing it this way is necessary for the "instant
1297 # But I think doing it this way is necessary for the "instant
1298 # tag cache retrieval" case to work.
1298 # tag cache retrieval" case to work.
1299 self.invalidatecaches()
1299 self.invalidatecaches()
1300
1300
1301 def walk(self, match, node=None):
1301 def walk(self, match, node=None):
1302 '''
1302 '''
1303 walk recursively through the directory tree or a given
1303 walk recursively through the directory tree or a given
1304 changeset, finding all files matched by the match
1304 changeset, finding all files matched by the match
1305 function
1305 function
1306 '''
1306 '''
1307 return self[node].walk(match)
1307 return self[node].walk(match)
1308
1308
1309 def status(self, node1='.', node2=None, match=None,
1309 def status(self, node1='.', node2=None, match=None,
1310 ignored=False, clean=False, unknown=False,
1310 ignored=False, clean=False, unknown=False,
1311 listsubrepos=False):
1311 listsubrepos=False):
1312 """return status of files between two nodes or node and working directory
1312 """return status of files between two nodes or node and working directory
1313
1313
1314 If node1 is None, use the first dirstate parent instead.
1314 If node1 is None, use the first dirstate parent instead.
1315 If node2 is None, compare node1 with working directory.
1315 If node2 is None, compare node1 with working directory.
1316 """
1316 """
1317
1317
1318 def mfmatches(ctx):
1318 def mfmatches(ctx):
1319 mf = ctx.manifest().copy()
1319 mf = ctx.manifest().copy()
1320 for fn in mf.keys():
1320 for fn in mf.keys():
1321 if not match(fn):
1321 if not match(fn):
1322 del mf[fn]
1322 del mf[fn]
1323 return mf
1323 return mf
1324
1324
1325 if isinstance(node1, context.changectx):
1325 if isinstance(node1, context.changectx):
1326 ctx1 = node1
1326 ctx1 = node1
1327 else:
1327 else:
1328 ctx1 = self[node1]
1328 ctx1 = self[node1]
1329 if isinstance(node2, context.changectx):
1329 if isinstance(node2, context.changectx):
1330 ctx2 = node2
1330 ctx2 = node2
1331 else:
1331 else:
1332 ctx2 = self[node2]
1332 ctx2 = self[node2]
1333
1333
1334 working = ctx2.rev() is None
1334 working = ctx2.rev() is None
1335 parentworking = working and ctx1 == self['.']
1335 parentworking = working and ctx1 == self['.']
1336 match = match or matchmod.always(self.root, self.getcwd())
1336 match = match or matchmod.always(self.root, self.getcwd())
1337 listignored, listclean, listunknown = ignored, clean, unknown
1337 listignored, listclean, listunknown = ignored, clean, unknown
1338
1338
1339 # load earliest manifest first for caching reasons
1339 # load earliest manifest first for caching reasons
1340 if not working and ctx2.rev() < ctx1.rev():
1340 if not working and ctx2.rev() < ctx1.rev():
1341 ctx2.manifest()
1341 ctx2.manifest()
1342
1342
1343 if not parentworking:
1343 if not parentworking:
1344 def bad(f, msg):
1344 def bad(f, msg):
1345 if f not in ctx1:
1345 if f not in ctx1:
1346 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1346 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1347 match.bad = bad
1347 match.bad = bad
1348
1348
1349 if working: # we need to scan the working dir
1349 if working: # we need to scan the working dir
1350 subrepos = []
1350 subrepos = []
1351 if '.hgsub' in self.dirstate:
1351 if '.hgsub' in self.dirstate:
1352 subrepos = ctx2.substate.keys()
1352 subrepos = ctx2.substate.keys()
1353 s = self.dirstate.status(match, subrepos, listignored,
1353 s = self.dirstate.status(match, subrepos, listignored,
1354 listclean, listunknown)
1354 listclean, listunknown)
1355 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1355 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1356
1356
1357 # check for any possibly clean files
1357 # check for any possibly clean files
1358 if parentworking and cmp:
1358 if parentworking and cmp:
1359 fixup = []
1359 fixup = []
1360 # do a full compare of any files that might have changed
1360 # do a full compare of any files that might have changed
1361 for f in sorted(cmp):
1361 for f in sorted(cmp):
1362 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1362 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1363 or ctx1[f].cmp(ctx2[f])):
1363 or ctx1[f].cmp(ctx2[f])):
1364 modified.append(f)
1364 modified.append(f)
1365 else:
1365 else:
1366 fixup.append(f)
1366 fixup.append(f)
1367
1367
1368 # update dirstate for files that are actually clean
1368 # update dirstate for files that are actually clean
1369 if fixup:
1369 if fixup:
1370 if listclean:
1370 if listclean:
1371 clean += fixup
1371 clean += fixup
1372
1372
1373 try:
1373 try:
1374 # updating the dirstate is optional
1374 # updating the dirstate is optional
1375 # so we don't wait on the lock
1375 # so we don't wait on the lock
1376 wlock = self.wlock(False)
1376 wlock = self.wlock(False)
1377 try:
1377 try:
1378 for f in fixup:
1378 for f in fixup:
1379 self.dirstate.normal(f)
1379 self.dirstate.normal(f)
1380 finally:
1380 finally:
1381 wlock.release()
1381 wlock.release()
1382 except error.LockError:
1382 except error.LockError:
1383 pass
1383 pass
1384
1384
1385 if not parentworking:
1385 if not parentworking:
1386 mf1 = mfmatches(ctx1)
1386 mf1 = mfmatches(ctx1)
1387 if working:
1387 if working:
1388 # we are comparing working dir against non-parent
1388 # we are comparing working dir against non-parent
1389 # generate a pseudo-manifest for the working dir
1389 # generate a pseudo-manifest for the working dir
1390 mf2 = mfmatches(self['.'])
1390 mf2 = mfmatches(self['.'])
1391 for f in cmp + modified + added:
1391 for f in cmp + modified + added:
1392 mf2[f] = None
1392 mf2[f] = None
1393 mf2.set(f, ctx2.flags(f))
1393 mf2.set(f, ctx2.flags(f))
1394 for f in removed:
1394 for f in removed:
1395 if f in mf2:
1395 if f in mf2:
1396 del mf2[f]
1396 del mf2[f]
1397 else:
1397 else:
1398 # we are comparing two revisions
1398 # we are comparing two revisions
1399 deleted, unknown, ignored = [], [], []
1399 deleted, unknown, ignored = [], [], []
1400 mf2 = mfmatches(ctx2)
1400 mf2 = mfmatches(ctx2)
1401
1401
1402 modified, added, clean = [], [], []
1402 modified, added, clean = [], [], []
1403 for fn in mf2:
1403 for fn in mf2:
1404 if fn in mf1:
1404 if fn in mf1:
1405 if (fn not in deleted and
1405 if (fn not in deleted and
1406 (mf1.flags(fn) != mf2.flags(fn) or
1406 (mf1.flags(fn) != mf2.flags(fn) or
1407 (mf1[fn] != mf2[fn] and
1407 (mf1[fn] != mf2[fn] and
1408 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1408 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1409 modified.append(fn)
1409 modified.append(fn)
1410 elif listclean:
1410 elif listclean:
1411 clean.append(fn)
1411 clean.append(fn)
1412 del mf1[fn]
1412 del mf1[fn]
1413 elif fn not in deleted:
1413 elif fn not in deleted:
1414 added.append(fn)
1414 added.append(fn)
1415 removed = mf1.keys()
1415 removed = mf1.keys()
1416
1416
1417 if working and modified and not self.dirstate._checklink:
1417 if working and modified and not self.dirstate._checklink:
1418 # Symlink placeholders may get non-symlink-like contents
1418 # Symlink placeholders may get non-symlink-like contents
1419 # via user error or dereferencing by NFS or Samba servers,
1419 # via user error or dereferencing by NFS or Samba servers,
1420 # so we filter out any placeholders that don't look like a
1420 # so we filter out any placeholders that don't look like a
1421 # symlink
1421 # symlink
1422 sane = []
1422 sane = []
1423 for f in modified:
1423 for f in modified:
1424 if ctx2.flags(f) == 'l':
1424 if ctx2.flags(f) == 'l':
1425 d = ctx2[f].data()
1425 d = ctx2[f].data()
1426 if len(d) >= 1024 or '\n' in d or util.binary(d):
1426 if len(d) >= 1024 or '\n' in d or util.binary(d):
1427 self.ui.debug('ignoring suspect symlink placeholder'
1427 self.ui.debug('ignoring suspect symlink placeholder'
1428 ' "%s"\n' % f)
1428 ' "%s"\n' % f)
1429 continue
1429 continue
1430 sane.append(f)
1430 sane.append(f)
1431 modified = sane
1431 modified = sane
1432
1432
1433 r = modified, added, removed, deleted, unknown, ignored, clean
1433 r = modified, added, removed, deleted, unknown, ignored, clean
1434
1434
1435 if listsubrepos:
1435 if listsubrepos:
1436 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1436 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1437 if working:
1437 if working:
1438 rev2 = None
1438 rev2 = None
1439 else:
1439 else:
1440 rev2 = ctx2.substate[subpath][1]
1440 rev2 = ctx2.substate[subpath][1]
1441 try:
1441 try:
1442 submatch = matchmod.narrowmatcher(subpath, match)
1442 submatch = matchmod.narrowmatcher(subpath, match)
1443 s = sub.status(rev2, match=submatch, ignored=listignored,
1443 s = sub.status(rev2, match=submatch, ignored=listignored,
1444 clean=listclean, unknown=listunknown,
1444 clean=listclean, unknown=listunknown,
1445 listsubrepos=True)
1445 listsubrepos=True)
1446 for rfiles, sfiles in zip(r, s):
1446 for rfiles, sfiles in zip(r, s):
1447 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1447 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1448 except error.LookupError:
1448 except error.LookupError:
1449 self.ui.status(_("skipping missing subrepository: %s\n")
1449 self.ui.status(_("skipping missing subrepository: %s\n")
1450 % subpath)
1450 % subpath)
1451
1451
1452 for l in r:
1452 for l in r:
1453 l.sort()
1453 l.sort()
1454 return r
1454 return r
1455
1455
1456 def heads(self, start=None):
1456 def heads(self, start=None):
1457 heads = self.changelog.heads(start)
1457 heads = self.changelog.heads(start)
1458 # sort the output in rev descending order
1458 # sort the output in rev descending order
1459 return sorted(heads, key=self.changelog.rev, reverse=True)
1459 return sorted(heads, key=self.changelog.rev, reverse=True)
1460
1460
1461 def branchheads(self, branch=None, start=None, closed=False):
1461 def branchheads(self, branch=None, start=None, closed=False):
1462 '''return a (possibly filtered) list of heads for the given branch
1462 '''return a (possibly filtered) list of heads for the given branch
1463
1463
1464 Heads are returned in topological order, from newest to oldest.
1464 Heads are returned in topological order, from newest to oldest.
1465 If branch is None, use the dirstate branch.
1465 If branch is None, use the dirstate branch.
1466 If start is not None, return only heads reachable from start.
1466 If start is not None, return only heads reachable from start.
1467 If closed is True, return heads that are marked as closed as well.
1467 If closed is True, return heads that are marked as closed as well.
1468 '''
1468 '''
1469 if branch is None:
1469 if branch is None:
1470 branch = self[None].branch()
1470 branch = self[None].branch()
1471 branches = self.branchmap()
1471 branches = self.branchmap()
1472 if branch not in branches:
1472 if branch not in branches:
1473 return []
1473 return []
1474 # the cache returns heads ordered lowest to highest
1474 # the cache returns heads ordered lowest to highest
1475 bheads = list(reversed(branches[branch]))
1475 bheads = list(reversed(branches[branch]))
1476 if start is not None:
1476 if start is not None:
1477 # filter out the heads that cannot be reached from startrev
1477 # filter out the heads that cannot be reached from startrev
1478 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1478 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1479 bheads = [h for h in bheads if h in fbheads]
1479 bheads = [h for h in bheads if h in fbheads]
1480 if not closed:
1480 if not closed:
1481 bheads = [h for h in bheads if
1481 bheads = [h for h in bheads if
1482 ('close' not in self.changelog.read(h)[5])]
1482 ('close' not in self.changelog.read(h)[5])]
1483 return bheads
1483 return bheads
1484
1484
1485 def branches(self, nodes):
1485 def branches(self, nodes):
1486 if not nodes:
1486 if not nodes:
1487 nodes = [self.changelog.tip()]
1487 nodes = [self.changelog.tip()]
1488 b = []
1488 b = []
1489 for n in nodes:
1489 for n in nodes:
1490 t = n
1490 t = n
1491 while True:
1491 while True:
1492 p = self.changelog.parents(n)
1492 p = self.changelog.parents(n)
1493 if p[1] != nullid or p[0] == nullid:
1493 if p[1] != nullid or p[0] == nullid:
1494 b.append((t, n, p[0], p[1]))
1494 b.append((t, n, p[0], p[1]))
1495 break
1495 break
1496 n = p[0]
1496 n = p[0]
1497 return b
1497 return b
1498
1498
1499 def between(self, pairs):
1499 def between(self, pairs):
1500 r = []
1500 r = []
1501
1501
1502 for top, bottom in pairs:
1502 for top, bottom in pairs:
1503 n, l, i = top, [], 0
1503 n, l, i = top, [], 0
1504 f = 1
1504 f = 1
1505
1505
1506 while n != bottom and n != nullid:
1506 while n != bottom and n != nullid:
1507 p = self.changelog.parents(n)[0]
1507 p = self.changelog.parents(n)[0]
1508 if i == f:
1508 if i == f:
1509 l.append(n)
1509 l.append(n)
1510 f = f * 2
1510 f = f * 2
1511 n = p
1511 n = p
1512 i += 1
1512 i += 1
1513
1513
1514 r.append(l)
1514 r.append(l)
1515
1515
1516 return r
1516 return r
1517
1517
1518 def pull(self, remote, heads=None, force=False):
1518 def pull(self, remote, heads=None, force=False):
1519 lock = self.lock()
1519 lock = self.lock()
1520 try:
1520 try:
1521 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1521 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1522 force=force)
1522 force=force)
1523 common, fetch, rheads = tmp
1523 common, fetch, rheads = tmp
1524 if not fetch:
1524 if not fetch:
1525 self.ui.status(_("no changes found\n"))
1525 self.ui.status(_("no changes found\n"))
1526 added = []
1526 added = []
1527 result = 0
1527 result = 0
1528 else:
1528 else:
1529 if heads is None and list(common) == [nullid]:
1529 if heads is None and list(common) == [nullid]:
1530 self.ui.status(_("requesting all changes\n"))
1530 self.ui.status(_("requesting all changes\n"))
1531 elif heads is None and remote.capable('changegroupsubset'):
1531 elif heads is None and remote.capable('changegroupsubset'):
1532 # issue1320, avoid a race if remote changed after discovery
1532 # issue1320, avoid a race if remote changed after discovery
1533 heads = rheads
1533 heads = rheads
1534
1534
1535 if remote.capable('getbundle'):
1535 if remote.capable('getbundle'):
1536 cg = remote.getbundle('pull', common=common,
1536 cg = remote.getbundle('pull', common=common,
1537 heads=heads or rheads)
1537 heads=heads or rheads)
1538 elif heads is None:
1538 elif heads is None:
1539 cg = remote.changegroup(fetch, 'pull')
1539 cg = remote.changegroup(fetch, 'pull')
1540 elif not remote.capable('changegroupsubset'):
1540 elif not remote.capable('changegroupsubset'):
1541 raise util.Abort(_("partial pull cannot be done because "
1541 raise util.Abort(_("partial pull cannot be done because "
1542 "other repository doesn't support "
1542 "other repository doesn't support "
1543 "changegroupsubset."))
1543 "changegroupsubset."))
1544 else:
1544 else:
1545 cg = remote.changegroupsubset(fetch, heads, 'pull')
1545 cg = remote.changegroupsubset(fetch, heads, 'pull')
1546 clstart = len(self.changelog)
1546 clstart = len(self.changelog)
1547 result = self.addchangegroup(cg, 'pull', remote.url())
1547 result = self.addchangegroup(cg, 'pull', remote.url())
1548 clend = len(self.changelog)
1548 clend = len(self.changelog)
1549 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1549 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1550
1550
1551
1551
1552 # Get remote phases data from remote
1552 # Get remote phases data from remote
1553 remotephases = remote.listkeys('phases')
1553 remotephases = remote.listkeys('phases')
1554 publishing = bool(remotephases.get('publishing', False))
1554 publishing = bool(remotephases.get('publishing', False))
1555 if remotephases and not publishing:
1555 if remotephases and not publishing:
1556 # remote is new and unpublishing
1556 # remote is new and unpublishing
1557 subset = common + added
1557 subset = common + added
1558 rheads, rroots = phases.analyzeremotephases(self, subset,
1558 pheads, _dr = phases.analyzeremotephases(self, subset,
1559 remotephases)
1559 remotephases)
1560 for phase, boundary in enumerate(rheads):
1560 phases.advanceboundary(self, phases.public, pheads)
1561 phases.advanceboundary(self, phase, boundary)
1561 phases.advanceboundary(self, phases.draft, common + added)
1562 else:
1562 else:
1563 # Remote is old or publishing all common changesets
1563 # Remote is old or publishing all common changesets
1564 # should be seen as public
1564 # should be seen as public
1565 phases.advanceboundary(self, phases.public, common + added)
1565 phases.advanceboundary(self, phases.public, common + added)
1566 finally:
1566 finally:
1567 lock.release()
1567 lock.release()
1568
1568
1569 return result
1569 return result
1570
1570
1571 def checkpush(self, force, revs):
1571 def checkpush(self, force, revs):
1572 """Extensions can override this function if additional checks have
1572 """Extensions can override this function if additional checks have
1573 to be performed before pushing, or call it if they override push
1573 to be performed before pushing, or call it if they override push
1574 command.
1574 command.
1575 """
1575 """
1576 pass
1576 pass
1577
1577
1578 def push(self, remote, force=False, revs=None, newbranch=False):
1578 def push(self, remote, force=False, revs=None, newbranch=False):
1579 '''Push outgoing changesets (limited by revs) from the current
1579 '''Push outgoing changesets (limited by revs) from the current
1580 repository to remote. Return an integer:
1580 repository to remote. Return an integer:
1581 - 0 means HTTP error *or* nothing to push
1581 - 0 means HTTP error *or* nothing to push
1582 - 1 means we pushed and remote head count is unchanged *or*
1582 - 1 means we pushed and remote head count is unchanged *or*
1583 we have outgoing changesets but refused to push
1583 we have outgoing changesets but refused to push
1584 - other values as described by addchangegroup()
1584 - other values as described by addchangegroup()
1585 '''
1585 '''
1586 # there are two ways to push to remote repo:
1586 # there are two ways to push to remote repo:
1587 #
1587 #
1588 # addchangegroup assumes local user can lock remote
1588 # addchangegroup assumes local user can lock remote
1589 # repo (local filesystem, old ssh servers).
1589 # repo (local filesystem, old ssh servers).
1590 #
1590 #
1591 # unbundle assumes local user cannot lock remote repo (new ssh
1591 # unbundle assumes local user cannot lock remote repo (new ssh
1592 # servers, http servers).
1592 # servers, http servers).
1593
1593
1594 self.checkpush(force, revs)
1594 self.checkpush(force, revs)
1595 lock = None
1595 lock = None
1596 unbundle = remote.capable('unbundle')
1596 unbundle = remote.capable('unbundle')
1597 if not unbundle:
1597 if not unbundle:
1598 lock = remote.lock()
1598 lock = remote.lock()
1599 try:
1599 try:
1600 # get local lock as we might write phase data
1600 # get local lock as we might write phase data
1601 locallock = self.lock()
1601 locallock = self.lock()
1602 try:
1602 try:
1603 cg, remote_heads, fut = discovery.prepush(self, remote, force,
1603 cg, remote_heads, fut = discovery.prepush(self, remote, force,
1604 revs, newbranch)
1604 revs, newbranch)
1605 ret = remote_heads
1605 ret = remote_heads
1606 # create a callback for addchangegroup.
1606 # create a callback for addchangegroup.
1607 # If will be used branch of the conditionnal too.
1607 # If will be used branch of the conditionnal too.
1608 if cg is not None:
1608 if cg is not None:
1609 if unbundle:
1609 if unbundle:
1610 # local repo finds heads on server, finds out what
1610 # local repo finds heads on server, finds out what
1611 # revs it must push. once revs transferred, if server
1611 # revs it must push. once revs transferred, if server
1612 # finds it has different heads (someone else won
1612 # finds it has different heads (someone else won
1613 # commit/push race), server aborts.
1613 # commit/push race), server aborts.
1614 if force:
1614 if force:
1615 remote_heads = ['force']
1615 remote_heads = ['force']
1616 # ssh: return remote's addchangegroup()
1616 # ssh: return remote's addchangegroup()
1617 # http: return remote's addchangegroup() or 0 for error
1617 # http: return remote's addchangegroup() or 0 for error
1618 ret = remote.unbundle(cg, remote_heads, 'push')
1618 ret = remote.unbundle(cg, remote_heads, 'push')
1619 else:
1619 else:
1620 # we return an integer indicating remote head count change
1620 # we return an integer indicating remote head count change
1621 ret = remote.addchangegroup(cg, 'push', self.url())
1621 ret = remote.addchangegroup(cg, 'push', self.url())
1622
1622
1623 # even when we don't push, exchanging phase data is useful
1623 # even when we don't push, exchanging phase data is useful
1624 remotephases = remote.listkeys('phases')
1624 remotephases = remote.listkeys('phases')
1625 if not remotephases: # old server or public only repo
1625 if not remotephases: # old server or public only repo
1626 phases.advanceboundary(self, phases.public, fut)
1626 phases.advanceboundary(self, phases.public, fut)
1627 # don't push any phase data as there is nothing to push
1627 # don't push any phase data as there is nothing to push
1628 else:
1628 else:
1629 ana = phases.analyzeremotephases(self, fut, remotephases)
1629 ana = phases.analyzeremotephases(self, fut, remotephases)
1630 rheads, rroots = ana
1630 pheads, droots = ana
1631 ### Apply remote phase on local
1631 ### Apply remote phase on local
1632 if remotephases.get('publishing', False):
1632 if remotephases.get('publishing', False):
1633 phases.advanceboundary(self, phases.public, fut)
1633 phases.advanceboundary(self, phases.public, fut)
1634 else: # publish = False
1634 else: # publish = False
1635 for phase, rpheads in enumerate(rheads):
1635 phases.advanceboundary(self, phases.public, pheads)
1636 phases.advanceboundary(self, phase, rpheads)
1636 phases.advanceboundary(self, phases.draft, fut)
1637 ### Apply local phase on remote
1637 ### Apply local phase on remote
1638 #
1638 #
1639 # XXX If push failed we should use strict common and not
1639 # XXX If push failed we should use strict common and not
1640 # future to avoir pushing phase data on unknown changeset.
1640 # future to avoid pushing phase data on unknown changeset.
1641 # This is to done later.
1641 # This is to done later.
1642
1642
1643 # element we want to push
1643 # Get the list of all revs draft on remote by public here.
1644 topush = []
1644 # XXX Beware that revset break if droots is not strictly
1645
1645 # XXX root we may want to ensure it is but it is costly
1646 # store details of known remote phase of several revision
1646 outdated = self.set('heads((%ln::%ln) and public())',
1647 # /!\ set of index I holds rev where: I <= rev.phase()
1647 droots, fut)
1648 # /!\ public phase (index 0) is ignored
1648 for newremotehead in outdated:
1649 remdetails = [set() for i in xrange(len(phases.allphases))]
1650 _revs = set()
1651 for relremphase in phases.trackedphases[::-1]:
1652 # we iterate backward because the list alway grows
1653 # when filled in this direction.
1654 _revs.update(self.revs('%ln::%ln',
1655 rroots[relremphase], fut))
1656 remdetails[relremphase].update(_revs)
1657
1658 for phase in phases.allphases[:-1]:
1659 # We don't need the last phase as we will never want to
1660 # move anything to it while moving phase backward.
1661
1662 # Get the list of all revs on remote which are in a
1663 # phase higher than currently processed phase.
1664 relremrev = remdetails[phase + 1]
1665
1666 if not relremrev:
1667 # no candidate to remote push anymore
1668 # break before any expensive revset
1669 break
1670
1671 #dynamical inject appropriate phase symbol
1672 phasename = phases.phasenames[phase]
1673 odrevset = 'heads(%%ld and %s())' % phasename
1674 outdated = self.set(odrevset, relremrev)
1675 for od in outdated:
1676 candstart = len(remdetails) - 1
1677 candstop = phase + 1
1678 candidateold = xrange(candstart, candstop, -1)
1679 for oldphase in candidateold:
1680 if od.rev() in remdetails[oldphase]:
1681 break
1682 else: # last one: no need to search
1683 oldphase = phase + 1
1684 topush.append((oldphase, phase, od))
1685
1686 # push every needed data
1687 for oldphase, newphase, newremotehead in topush:
1688 r = remote.pushkey('phases',
1649 r = remote.pushkey('phases',
1689 newremotehead.hex(),
1650 newremotehead.hex(),
1690 str(oldphase), str(newphase))
1651 str(phases.draft),
1652 str(phases.public))
1691 if not r:
1653 if not r:
1692 self.ui.warn(_('updating phase of %s '
1654 self.ui.warn(_('updating %s to public failed!\n')
1693 'to %s from %s failed!\n')
1655 % newremotehead)
1694 % (newremotehead, newphase,
1695 oldphase))
1696 finally:
1656 finally:
1697 locallock.release()
1657 locallock.release()
1698 finally:
1658 finally:
1699 if lock is not None:
1659 if lock is not None:
1700 lock.release()
1660 lock.release()
1701
1661
1702 self.ui.debug("checking for updated bookmarks\n")
1662 self.ui.debug("checking for updated bookmarks\n")
1703 rb = remote.listkeys('bookmarks')
1663 rb = remote.listkeys('bookmarks')
1704 for k in rb.keys():
1664 for k in rb.keys():
1705 if k in self._bookmarks:
1665 if k in self._bookmarks:
1706 nr, nl = rb[k], hex(self._bookmarks[k])
1666 nr, nl = rb[k], hex(self._bookmarks[k])
1707 if nr in self:
1667 if nr in self:
1708 cr = self[nr]
1668 cr = self[nr]
1709 cl = self[nl]
1669 cl = self[nl]
1710 if cl in cr.descendants():
1670 if cl in cr.descendants():
1711 r = remote.pushkey('bookmarks', k, nr, nl)
1671 r = remote.pushkey('bookmarks', k, nr, nl)
1712 if r:
1672 if r:
1713 self.ui.status(_("updating bookmark %s\n") % k)
1673 self.ui.status(_("updating bookmark %s\n") % k)
1714 else:
1674 else:
1715 self.ui.warn(_('updating bookmark %s'
1675 self.ui.warn(_('updating bookmark %s'
1716 ' failed!\n') % k)
1676 ' failed!\n') % k)
1717
1677
1718 return ret
1678 return ret
1719
1679
1720 def changegroupinfo(self, nodes, source):
1680 def changegroupinfo(self, nodes, source):
1721 if self.ui.verbose or source == 'bundle':
1681 if self.ui.verbose or source == 'bundle':
1722 self.ui.status(_("%d changesets found\n") % len(nodes))
1682 self.ui.status(_("%d changesets found\n") % len(nodes))
1723 if self.ui.debugflag:
1683 if self.ui.debugflag:
1724 self.ui.debug("list of changesets:\n")
1684 self.ui.debug("list of changesets:\n")
1725 for node in nodes:
1685 for node in nodes:
1726 self.ui.debug("%s\n" % hex(node))
1686 self.ui.debug("%s\n" % hex(node))
1727
1687
1728 def changegroupsubset(self, bases, heads, source):
1688 def changegroupsubset(self, bases, heads, source):
1729 """Compute a changegroup consisting of all the nodes that are
1689 """Compute a changegroup consisting of all the nodes that are
1730 descendants of any of the bases and ancestors of any of the heads.
1690 descendants of any of the bases and ancestors of any of the heads.
1731 Return a chunkbuffer object whose read() method will return
1691 Return a chunkbuffer object whose read() method will return
1732 successive changegroup chunks.
1692 successive changegroup chunks.
1733
1693
1734 It is fairly complex as determining which filenodes and which
1694 It is fairly complex as determining which filenodes and which
1735 manifest nodes need to be included for the changeset to be complete
1695 manifest nodes need to be included for the changeset to be complete
1736 is non-trivial.
1696 is non-trivial.
1737
1697
1738 Another wrinkle is doing the reverse, figuring out which changeset in
1698 Another wrinkle is doing the reverse, figuring out which changeset in
1739 the changegroup a particular filenode or manifestnode belongs to.
1699 the changegroup a particular filenode or manifestnode belongs to.
1740 """
1700 """
1741 cl = self.changelog
1701 cl = self.changelog
1742 if not bases:
1702 if not bases:
1743 bases = [nullid]
1703 bases = [nullid]
1744 csets, bases, heads = cl.nodesbetween(bases, heads)
1704 csets, bases, heads = cl.nodesbetween(bases, heads)
1745 # We assume that all ancestors of bases are known
1705 # We assume that all ancestors of bases are known
1746 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1706 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1747 return self._changegroupsubset(common, csets, heads, source)
1707 return self._changegroupsubset(common, csets, heads, source)
1748
1708
1749 def getlocalbundle(self, source, outgoing):
1709 def getlocalbundle(self, source, outgoing):
1750 """Like getbundle, but taking a discovery.outgoing as an argument.
1710 """Like getbundle, but taking a discovery.outgoing as an argument.
1751
1711
1752 This is only implemented for local repos and reuses potentially
1712 This is only implemented for local repos and reuses potentially
1753 precomputed sets in outgoing."""
1713 precomputed sets in outgoing."""
1754 if not outgoing.missing:
1714 if not outgoing.missing:
1755 return None
1715 return None
1756 return self._changegroupsubset(outgoing.common,
1716 return self._changegroupsubset(outgoing.common,
1757 outgoing.missing,
1717 outgoing.missing,
1758 outgoing.missingheads,
1718 outgoing.missingheads,
1759 source)
1719 source)
1760
1720
1761 def getbundle(self, source, heads=None, common=None):
1721 def getbundle(self, source, heads=None, common=None):
1762 """Like changegroupsubset, but returns the set difference between the
1722 """Like changegroupsubset, but returns the set difference between the
1763 ancestors of heads and the ancestors common.
1723 ancestors of heads and the ancestors common.
1764
1724
1765 If heads is None, use the local heads. If common is None, use [nullid].
1725 If heads is None, use the local heads. If common is None, use [nullid].
1766
1726
1767 The nodes in common might not all be known locally due to the way the
1727 The nodes in common might not all be known locally due to the way the
1768 current discovery protocol works.
1728 current discovery protocol works.
1769 """
1729 """
1770 cl = self.changelog
1730 cl = self.changelog
1771 if common:
1731 if common:
1772 nm = cl.nodemap
1732 nm = cl.nodemap
1773 common = [n for n in common if n in nm]
1733 common = [n for n in common if n in nm]
1774 else:
1734 else:
1775 common = [nullid]
1735 common = [nullid]
1776 if not heads:
1736 if not heads:
1777 heads = cl.heads()
1737 heads = cl.heads()
1778 return self.getlocalbundle(source,
1738 return self.getlocalbundle(source,
1779 discovery.outgoing(cl, common, heads))
1739 discovery.outgoing(cl, common, heads))
1780
1740
1781 def _changegroupsubset(self, commonrevs, csets, heads, source):
1741 def _changegroupsubset(self, commonrevs, csets, heads, source):
1782
1742
1783 cl = self.changelog
1743 cl = self.changelog
1784 mf = self.manifest
1744 mf = self.manifest
1785 mfs = {} # needed manifests
1745 mfs = {} # needed manifests
1786 fnodes = {} # needed file nodes
1746 fnodes = {} # needed file nodes
1787 changedfiles = set()
1747 changedfiles = set()
1788 fstate = ['', {}]
1748 fstate = ['', {}]
1789 count = [0]
1749 count = [0]
1790
1750
1791 # can we go through the fast path ?
1751 # can we go through the fast path ?
1792 heads.sort()
1752 heads.sort()
1793 if heads == sorted(self.heads()):
1753 if heads == sorted(self.heads()):
1794 return self._changegroup(csets, source)
1754 return self._changegroup(csets, source)
1795
1755
1796 # slow path
1756 # slow path
1797 self.hook('preoutgoing', throw=True, source=source)
1757 self.hook('preoutgoing', throw=True, source=source)
1798 self.changegroupinfo(csets, source)
1758 self.changegroupinfo(csets, source)
1799
1759
1800 # filter any nodes that claim to be part of the known set
1760 # filter any nodes that claim to be part of the known set
1801 def prune(revlog, missing):
1761 def prune(revlog, missing):
1802 return [n for n in missing
1762 return [n for n in missing
1803 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1763 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1804
1764
1805 def lookup(revlog, x):
1765 def lookup(revlog, x):
1806 if revlog == cl:
1766 if revlog == cl:
1807 c = cl.read(x)
1767 c = cl.read(x)
1808 changedfiles.update(c[3])
1768 changedfiles.update(c[3])
1809 mfs.setdefault(c[0], x)
1769 mfs.setdefault(c[0], x)
1810 count[0] += 1
1770 count[0] += 1
1811 self.ui.progress(_('bundling'), count[0],
1771 self.ui.progress(_('bundling'), count[0],
1812 unit=_('changesets'), total=len(csets))
1772 unit=_('changesets'), total=len(csets))
1813 return x
1773 return x
1814 elif revlog == mf:
1774 elif revlog == mf:
1815 clnode = mfs[x]
1775 clnode = mfs[x]
1816 mdata = mf.readfast(x)
1776 mdata = mf.readfast(x)
1817 for f in changedfiles:
1777 for f in changedfiles:
1818 if f in mdata:
1778 if f in mdata:
1819 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1779 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1820 count[0] += 1
1780 count[0] += 1
1821 self.ui.progress(_('bundling'), count[0],
1781 self.ui.progress(_('bundling'), count[0],
1822 unit=_('manifests'), total=len(mfs))
1782 unit=_('manifests'), total=len(mfs))
1823 return mfs[x]
1783 return mfs[x]
1824 else:
1784 else:
1825 self.ui.progress(
1785 self.ui.progress(
1826 _('bundling'), count[0], item=fstate[0],
1786 _('bundling'), count[0], item=fstate[0],
1827 unit=_('files'), total=len(changedfiles))
1787 unit=_('files'), total=len(changedfiles))
1828 return fstate[1][x]
1788 return fstate[1][x]
1829
1789
1830 bundler = changegroup.bundle10(lookup)
1790 bundler = changegroup.bundle10(lookup)
1831 reorder = self.ui.config('bundle', 'reorder', 'auto')
1791 reorder = self.ui.config('bundle', 'reorder', 'auto')
1832 if reorder == 'auto':
1792 if reorder == 'auto':
1833 reorder = None
1793 reorder = None
1834 else:
1794 else:
1835 reorder = util.parsebool(reorder)
1795 reorder = util.parsebool(reorder)
1836
1796
1837 def gengroup():
1797 def gengroup():
1838 # Create a changenode group generator that will call our functions
1798 # Create a changenode group generator that will call our functions
1839 # back to lookup the owning changenode and collect information.
1799 # back to lookup the owning changenode and collect information.
1840 for chunk in cl.group(csets, bundler, reorder=reorder):
1800 for chunk in cl.group(csets, bundler, reorder=reorder):
1841 yield chunk
1801 yield chunk
1842 self.ui.progress(_('bundling'), None)
1802 self.ui.progress(_('bundling'), None)
1843
1803
1844 # Create a generator for the manifestnodes that calls our lookup
1804 # Create a generator for the manifestnodes that calls our lookup
1845 # and data collection functions back.
1805 # and data collection functions back.
1846 count[0] = 0
1806 count[0] = 0
1847 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1807 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1848 yield chunk
1808 yield chunk
1849 self.ui.progress(_('bundling'), None)
1809 self.ui.progress(_('bundling'), None)
1850
1810
1851 mfs.clear()
1811 mfs.clear()
1852
1812
1853 # Go through all our files in order sorted by name.
1813 # Go through all our files in order sorted by name.
1854 count[0] = 0
1814 count[0] = 0
1855 for fname in sorted(changedfiles):
1815 for fname in sorted(changedfiles):
1856 filerevlog = self.file(fname)
1816 filerevlog = self.file(fname)
1857 if not len(filerevlog):
1817 if not len(filerevlog):
1858 raise util.Abort(_("empty or missing revlog for %s") % fname)
1818 raise util.Abort(_("empty or missing revlog for %s") % fname)
1859 fstate[0] = fname
1819 fstate[0] = fname
1860 fstate[1] = fnodes.pop(fname, {})
1820 fstate[1] = fnodes.pop(fname, {})
1861
1821
1862 nodelist = prune(filerevlog, fstate[1])
1822 nodelist = prune(filerevlog, fstate[1])
1863 if nodelist:
1823 if nodelist:
1864 count[0] += 1
1824 count[0] += 1
1865 yield bundler.fileheader(fname)
1825 yield bundler.fileheader(fname)
1866 for chunk in filerevlog.group(nodelist, bundler, reorder):
1826 for chunk in filerevlog.group(nodelist, bundler, reorder):
1867 yield chunk
1827 yield chunk
1868
1828
1869 # Signal that no more groups are left.
1829 # Signal that no more groups are left.
1870 yield bundler.close()
1830 yield bundler.close()
1871 self.ui.progress(_('bundling'), None)
1831 self.ui.progress(_('bundling'), None)
1872
1832
1873 if csets:
1833 if csets:
1874 self.hook('outgoing', node=hex(csets[0]), source=source)
1834 self.hook('outgoing', node=hex(csets[0]), source=source)
1875
1835
1876 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1836 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1877
1837
1878 def changegroup(self, basenodes, source):
1838 def changegroup(self, basenodes, source):
1879 # to avoid a race we use changegroupsubset() (issue1320)
1839 # to avoid a race we use changegroupsubset() (issue1320)
1880 return self.changegroupsubset(basenodes, self.heads(), source)
1840 return self.changegroupsubset(basenodes, self.heads(), source)
1881
1841
1882 def _changegroup(self, nodes, source):
1842 def _changegroup(self, nodes, source):
1883 """Compute the changegroup of all nodes that we have that a recipient
1843 """Compute the changegroup of all nodes that we have that a recipient
1884 doesn't. Return a chunkbuffer object whose read() method will return
1844 doesn't. Return a chunkbuffer object whose read() method will return
1885 successive changegroup chunks.
1845 successive changegroup chunks.
1886
1846
1887 This is much easier than the previous function as we can assume that
1847 This is much easier than the previous function as we can assume that
1888 the recipient has any changenode we aren't sending them.
1848 the recipient has any changenode we aren't sending them.
1889
1849
1890 nodes is the set of nodes to send"""
1850 nodes is the set of nodes to send"""
1891
1851
1892 cl = self.changelog
1852 cl = self.changelog
1893 mf = self.manifest
1853 mf = self.manifest
1894 mfs = {}
1854 mfs = {}
1895 changedfiles = set()
1855 changedfiles = set()
1896 fstate = ['']
1856 fstate = ['']
1897 count = [0]
1857 count = [0]
1898
1858
1899 self.hook('preoutgoing', throw=True, source=source)
1859 self.hook('preoutgoing', throw=True, source=source)
1900 self.changegroupinfo(nodes, source)
1860 self.changegroupinfo(nodes, source)
1901
1861
1902 revset = set([cl.rev(n) for n in nodes])
1862 revset = set([cl.rev(n) for n in nodes])
1903
1863
1904 def gennodelst(log):
1864 def gennodelst(log):
1905 return [log.node(r) for r in log if log.linkrev(r) in revset]
1865 return [log.node(r) for r in log if log.linkrev(r) in revset]
1906
1866
1907 def lookup(revlog, x):
1867 def lookup(revlog, x):
1908 if revlog == cl:
1868 if revlog == cl:
1909 c = cl.read(x)
1869 c = cl.read(x)
1910 changedfiles.update(c[3])
1870 changedfiles.update(c[3])
1911 mfs.setdefault(c[0], x)
1871 mfs.setdefault(c[0], x)
1912 count[0] += 1
1872 count[0] += 1
1913 self.ui.progress(_('bundling'), count[0],
1873 self.ui.progress(_('bundling'), count[0],
1914 unit=_('changesets'), total=len(nodes))
1874 unit=_('changesets'), total=len(nodes))
1915 return x
1875 return x
1916 elif revlog == mf:
1876 elif revlog == mf:
1917 count[0] += 1
1877 count[0] += 1
1918 self.ui.progress(_('bundling'), count[0],
1878 self.ui.progress(_('bundling'), count[0],
1919 unit=_('manifests'), total=len(mfs))
1879 unit=_('manifests'), total=len(mfs))
1920 return cl.node(revlog.linkrev(revlog.rev(x)))
1880 return cl.node(revlog.linkrev(revlog.rev(x)))
1921 else:
1881 else:
1922 self.ui.progress(
1882 self.ui.progress(
1923 _('bundling'), count[0], item=fstate[0],
1883 _('bundling'), count[0], item=fstate[0],
1924 total=len(changedfiles), unit=_('files'))
1884 total=len(changedfiles), unit=_('files'))
1925 return cl.node(revlog.linkrev(revlog.rev(x)))
1885 return cl.node(revlog.linkrev(revlog.rev(x)))
1926
1886
1927 bundler = changegroup.bundle10(lookup)
1887 bundler = changegroup.bundle10(lookup)
1928 reorder = self.ui.config('bundle', 'reorder', 'auto')
1888 reorder = self.ui.config('bundle', 'reorder', 'auto')
1929 if reorder == 'auto':
1889 if reorder == 'auto':
1930 reorder = None
1890 reorder = None
1931 else:
1891 else:
1932 reorder = util.parsebool(reorder)
1892 reorder = util.parsebool(reorder)
1933
1893
1934 def gengroup():
1894 def gengroup():
1935 '''yield a sequence of changegroup chunks (strings)'''
1895 '''yield a sequence of changegroup chunks (strings)'''
1936 # construct a list of all changed files
1896 # construct a list of all changed files
1937
1897
1938 for chunk in cl.group(nodes, bundler, reorder=reorder):
1898 for chunk in cl.group(nodes, bundler, reorder=reorder):
1939 yield chunk
1899 yield chunk
1940 self.ui.progress(_('bundling'), None)
1900 self.ui.progress(_('bundling'), None)
1941
1901
1942 count[0] = 0
1902 count[0] = 0
1943 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1903 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1944 yield chunk
1904 yield chunk
1945 self.ui.progress(_('bundling'), None)
1905 self.ui.progress(_('bundling'), None)
1946
1906
1947 count[0] = 0
1907 count[0] = 0
1948 for fname in sorted(changedfiles):
1908 for fname in sorted(changedfiles):
1949 filerevlog = self.file(fname)
1909 filerevlog = self.file(fname)
1950 if not len(filerevlog):
1910 if not len(filerevlog):
1951 raise util.Abort(_("empty or missing revlog for %s") % fname)
1911 raise util.Abort(_("empty or missing revlog for %s") % fname)
1952 fstate[0] = fname
1912 fstate[0] = fname
1953 nodelist = gennodelst(filerevlog)
1913 nodelist = gennodelst(filerevlog)
1954 if nodelist:
1914 if nodelist:
1955 count[0] += 1
1915 count[0] += 1
1956 yield bundler.fileheader(fname)
1916 yield bundler.fileheader(fname)
1957 for chunk in filerevlog.group(nodelist, bundler, reorder):
1917 for chunk in filerevlog.group(nodelist, bundler, reorder):
1958 yield chunk
1918 yield chunk
1959 yield bundler.close()
1919 yield bundler.close()
1960 self.ui.progress(_('bundling'), None)
1920 self.ui.progress(_('bundling'), None)
1961
1921
1962 if nodes:
1922 if nodes:
1963 self.hook('outgoing', node=hex(nodes[0]), source=source)
1923 self.hook('outgoing', node=hex(nodes[0]), source=source)
1964
1924
1965 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1925 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1966
1926
1967 def addchangegroup(self, source, srctype, url, emptyok=False):
1927 def addchangegroup(self, source, srctype, url, emptyok=False):
1968 """Add the changegroup returned by source.read() to this repo.
1928 """Add the changegroup returned by source.read() to this repo.
1969 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1929 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1970 the URL of the repo where this changegroup is coming from.
1930 the URL of the repo where this changegroup is coming from.
1971
1931
1972 Return an integer summarizing the change to this repo:
1932 Return an integer summarizing the change to this repo:
1973 - nothing changed or no source: 0
1933 - nothing changed or no source: 0
1974 - more heads than before: 1+added heads (2..n)
1934 - more heads than before: 1+added heads (2..n)
1975 - fewer heads than before: -1-removed heads (-2..-n)
1935 - fewer heads than before: -1-removed heads (-2..-n)
1976 - number of heads stays the same: 1
1936 - number of heads stays the same: 1
1977 """
1937 """
1978 def csmap(x):
1938 def csmap(x):
1979 self.ui.debug("add changeset %s\n" % short(x))
1939 self.ui.debug("add changeset %s\n" % short(x))
1980 return len(cl)
1940 return len(cl)
1981
1941
1982 def revmap(x):
1942 def revmap(x):
1983 return cl.rev(x)
1943 return cl.rev(x)
1984
1944
1985 if not source:
1945 if not source:
1986 return 0
1946 return 0
1987
1947
1988 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1948 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1989
1949
1990 changesets = files = revisions = 0
1950 changesets = files = revisions = 0
1991 efiles = set()
1951 efiles = set()
1992
1952
1993 # write changelog data to temp files so concurrent readers will not see
1953 # write changelog data to temp files so concurrent readers will not see
1994 # inconsistent view
1954 # inconsistent view
1995 cl = self.changelog
1955 cl = self.changelog
1996 cl.delayupdate()
1956 cl.delayupdate()
1997 oldheads = cl.heads()
1957 oldheads = cl.heads()
1998
1958
1999 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1959 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2000 try:
1960 try:
2001 trp = weakref.proxy(tr)
1961 trp = weakref.proxy(tr)
2002 # pull off the changeset group
1962 # pull off the changeset group
2003 self.ui.status(_("adding changesets\n"))
1963 self.ui.status(_("adding changesets\n"))
2004 clstart = len(cl)
1964 clstart = len(cl)
2005 class prog(object):
1965 class prog(object):
2006 step = _('changesets')
1966 step = _('changesets')
2007 count = 1
1967 count = 1
2008 ui = self.ui
1968 ui = self.ui
2009 total = None
1969 total = None
2010 def __call__(self):
1970 def __call__(self):
2011 self.ui.progress(self.step, self.count, unit=_('chunks'),
1971 self.ui.progress(self.step, self.count, unit=_('chunks'),
2012 total=self.total)
1972 total=self.total)
2013 self.count += 1
1973 self.count += 1
2014 pr = prog()
1974 pr = prog()
2015 source.callback = pr
1975 source.callback = pr
2016
1976
2017 source.changelogheader()
1977 source.changelogheader()
2018 srccontent = cl.addgroup(source, csmap, trp)
1978 srccontent = cl.addgroup(source, csmap, trp)
2019 if not (srccontent or emptyok):
1979 if not (srccontent or emptyok):
2020 raise util.Abort(_("received changelog group is empty"))
1980 raise util.Abort(_("received changelog group is empty"))
2021 clend = len(cl)
1981 clend = len(cl)
2022 changesets = clend - clstart
1982 changesets = clend - clstart
2023 for c in xrange(clstart, clend):
1983 for c in xrange(clstart, clend):
2024 efiles.update(self[c].files())
1984 efiles.update(self[c].files())
2025 efiles = len(efiles)
1985 efiles = len(efiles)
2026 self.ui.progress(_('changesets'), None)
1986 self.ui.progress(_('changesets'), None)
2027
1987
2028 # pull off the manifest group
1988 # pull off the manifest group
2029 self.ui.status(_("adding manifests\n"))
1989 self.ui.status(_("adding manifests\n"))
2030 pr.step = _('manifests')
1990 pr.step = _('manifests')
2031 pr.count = 1
1991 pr.count = 1
2032 pr.total = changesets # manifests <= changesets
1992 pr.total = changesets # manifests <= changesets
2033 # no need to check for empty manifest group here:
1993 # no need to check for empty manifest group here:
2034 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1994 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2035 # no new manifest will be created and the manifest group will
1995 # no new manifest will be created and the manifest group will
2036 # be empty during the pull
1996 # be empty during the pull
2037 source.manifestheader()
1997 source.manifestheader()
2038 self.manifest.addgroup(source, revmap, trp)
1998 self.manifest.addgroup(source, revmap, trp)
2039 self.ui.progress(_('manifests'), None)
1999 self.ui.progress(_('manifests'), None)
2040
2000
2041 needfiles = {}
2001 needfiles = {}
2042 if self.ui.configbool('server', 'validate', default=False):
2002 if self.ui.configbool('server', 'validate', default=False):
2043 # validate incoming csets have their manifests
2003 # validate incoming csets have their manifests
2044 for cset in xrange(clstart, clend):
2004 for cset in xrange(clstart, clend):
2045 mfest = self.changelog.read(self.changelog.node(cset))[0]
2005 mfest = self.changelog.read(self.changelog.node(cset))[0]
2046 mfest = self.manifest.readdelta(mfest)
2006 mfest = self.manifest.readdelta(mfest)
2047 # store file nodes we must see
2007 # store file nodes we must see
2048 for f, n in mfest.iteritems():
2008 for f, n in mfest.iteritems():
2049 needfiles.setdefault(f, set()).add(n)
2009 needfiles.setdefault(f, set()).add(n)
2050
2010
2051 # process the files
2011 # process the files
2052 self.ui.status(_("adding file changes\n"))
2012 self.ui.status(_("adding file changes\n"))
2053 pr.step = _('files')
2013 pr.step = _('files')
2054 pr.count = 1
2014 pr.count = 1
2055 pr.total = efiles
2015 pr.total = efiles
2056 source.callback = None
2016 source.callback = None
2057
2017
2058 while True:
2018 while True:
2059 chunkdata = source.filelogheader()
2019 chunkdata = source.filelogheader()
2060 if not chunkdata:
2020 if not chunkdata:
2061 break
2021 break
2062 f = chunkdata["filename"]
2022 f = chunkdata["filename"]
2063 self.ui.debug("adding %s revisions\n" % f)
2023 self.ui.debug("adding %s revisions\n" % f)
2064 pr()
2024 pr()
2065 fl = self.file(f)
2025 fl = self.file(f)
2066 o = len(fl)
2026 o = len(fl)
2067 if not fl.addgroup(source, revmap, trp):
2027 if not fl.addgroup(source, revmap, trp):
2068 raise util.Abort(_("received file revlog group is empty"))
2028 raise util.Abort(_("received file revlog group is empty"))
2069 revisions += len(fl) - o
2029 revisions += len(fl) - o
2070 files += 1
2030 files += 1
2071 if f in needfiles:
2031 if f in needfiles:
2072 needs = needfiles[f]
2032 needs = needfiles[f]
2073 for new in xrange(o, len(fl)):
2033 for new in xrange(o, len(fl)):
2074 n = fl.node(new)
2034 n = fl.node(new)
2075 if n in needs:
2035 if n in needs:
2076 needs.remove(n)
2036 needs.remove(n)
2077 if not needs:
2037 if not needs:
2078 del needfiles[f]
2038 del needfiles[f]
2079 self.ui.progress(_('files'), None)
2039 self.ui.progress(_('files'), None)
2080
2040
2081 for f, needs in needfiles.iteritems():
2041 for f, needs in needfiles.iteritems():
2082 fl = self.file(f)
2042 fl = self.file(f)
2083 for n in needs:
2043 for n in needs:
2084 try:
2044 try:
2085 fl.rev(n)
2045 fl.rev(n)
2086 except error.LookupError:
2046 except error.LookupError:
2087 raise util.Abort(
2047 raise util.Abort(
2088 _('missing file data for %s:%s - run hg verify') %
2048 _('missing file data for %s:%s - run hg verify') %
2089 (f, hex(n)))
2049 (f, hex(n)))
2090
2050
2091 dh = 0
2051 dh = 0
2092 if oldheads:
2052 if oldheads:
2093 heads = cl.heads()
2053 heads = cl.heads()
2094 dh = len(heads) - len(oldheads)
2054 dh = len(heads) - len(oldheads)
2095 for h in heads:
2055 for h in heads:
2096 if h not in oldheads and 'close' in self[h].extra():
2056 if h not in oldheads and 'close' in self[h].extra():
2097 dh -= 1
2057 dh -= 1
2098 htext = ""
2058 htext = ""
2099 if dh:
2059 if dh:
2100 htext = _(" (%+d heads)") % dh
2060 htext = _(" (%+d heads)") % dh
2101
2061
2102 self.ui.status(_("added %d changesets"
2062 self.ui.status(_("added %d changesets"
2103 " with %d changes to %d files%s\n")
2063 " with %d changes to %d files%s\n")
2104 % (changesets, revisions, files, htext))
2064 % (changesets, revisions, files, htext))
2105
2065
2106 if changesets > 0:
2066 if changesets > 0:
2107 p = lambda: cl.writepending() and self.root or ""
2067 p = lambda: cl.writepending() and self.root or ""
2108 self.hook('pretxnchangegroup', throw=True,
2068 self.hook('pretxnchangegroup', throw=True,
2109 node=hex(cl.node(clstart)), source=srctype,
2069 node=hex(cl.node(clstart)), source=srctype,
2110 url=url, pending=p)
2070 url=url, pending=p)
2111
2071
2112 added = [cl.node(r) for r in xrange(clstart, clend)]
2072 added = [cl.node(r) for r in xrange(clstart, clend)]
2113 publishing = self.ui.configbool('phases', 'publish', True)
2073 publishing = self.ui.configbool('phases', 'publish', True)
2114 if srctype == 'push':
2074 if srctype == 'push':
2115 # Old server can not push the boundary themself.
2075 # Old server can not push the boundary themself.
2116 # New server won't push the boundary if changeset already
2076 # New server won't push the boundary if changeset already
2117 # existed locally as secrete
2077 # existed locally as secrete
2118 #
2078 #
2119 # We should not use added here but the list of all change in
2079 # We should not use added here but the list of all change in
2120 # the bundle
2080 # the bundle
2121 if publishing:
2081 if publishing:
2122 phases.advanceboundary(self, phases.public, srccontent)
2082 phases.advanceboundary(self, phases.public, srccontent)
2123 else:
2083 else:
2124 phases.advanceboundary(self, phases.draft, srccontent)
2084 phases.advanceboundary(self, phases.draft, srccontent)
2125 phases.retractboundary(self, phases.draft, added)
2085 phases.retractboundary(self, phases.draft, added)
2126 elif srctype != 'strip':
2086 elif srctype != 'strip':
2127 # publishing only alter behavior during push
2087 # publishing only alter behavior during push
2128 #
2088 #
2129 # strip should not touch boundary at all
2089 # strip should not touch boundary at all
2130 phases.retractboundary(self, phases.draft, added)
2090 phases.retractboundary(self, phases.draft, added)
2131
2091
2132 # make changelog see real files again
2092 # make changelog see real files again
2133 cl.finalize(trp)
2093 cl.finalize(trp)
2134
2094
2135 tr.close()
2095 tr.close()
2136
2096
2137 if changesets > 0:
2097 if changesets > 0:
2138 def runhooks():
2098 def runhooks():
2139 # forcefully update the on-disk branch cache
2099 # forcefully update the on-disk branch cache
2140 self.ui.debug("updating the branch cache\n")
2100 self.ui.debug("updating the branch cache\n")
2141 self.updatebranchcache()
2101 self.updatebranchcache()
2142 self.hook("changegroup", node=hex(cl.node(clstart)),
2102 self.hook("changegroup", node=hex(cl.node(clstart)),
2143 source=srctype, url=url)
2103 source=srctype, url=url)
2144
2104
2145 for n in added:
2105 for n in added:
2146 self.hook("incoming", node=hex(n), source=srctype,
2106 self.hook("incoming", node=hex(n), source=srctype,
2147 url=url)
2107 url=url)
2148 self._afterlock(runhooks)
2108 self._afterlock(runhooks)
2149
2109
2150 finally:
2110 finally:
2151 tr.release()
2111 tr.release()
2152 # never return 0 here:
2112 # never return 0 here:
2153 if dh < 0:
2113 if dh < 0:
2154 return dh - 1
2114 return dh - 1
2155 else:
2115 else:
2156 return dh + 1
2116 return dh + 1
2157
2117
2158 def stream_in(self, remote, requirements):
2118 def stream_in(self, remote, requirements):
2159 lock = self.lock()
2119 lock = self.lock()
2160 try:
2120 try:
2161 fp = remote.stream_out()
2121 fp = remote.stream_out()
2162 l = fp.readline()
2122 l = fp.readline()
2163 try:
2123 try:
2164 resp = int(l)
2124 resp = int(l)
2165 except ValueError:
2125 except ValueError:
2166 raise error.ResponseError(
2126 raise error.ResponseError(
2167 _('Unexpected response from remote server:'), l)
2127 _('Unexpected response from remote server:'), l)
2168 if resp == 1:
2128 if resp == 1:
2169 raise util.Abort(_('operation forbidden by server'))
2129 raise util.Abort(_('operation forbidden by server'))
2170 elif resp == 2:
2130 elif resp == 2:
2171 raise util.Abort(_('locking the remote repository failed'))
2131 raise util.Abort(_('locking the remote repository failed'))
2172 elif resp != 0:
2132 elif resp != 0:
2173 raise util.Abort(_('the server sent an unknown error code'))
2133 raise util.Abort(_('the server sent an unknown error code'))
2174 self.ui.status(_('streaming all changes\n'))
2134 self.ui.status(_('streaming all changes\n'))
2175 l = fp.readline()
2135 l = fp.readline()
2176 try:
2136 try:
2177 total_files, total_bytes = map(int, l.split(' ', 1))
2137 total_files, total_bytes = map(int, l.split(' ', 1))
2178 except (ValueError, TypeError):
2138 except (ValueError, TypeError):
2179 raise error.ResponseError(
2139 raise error.ResponseError(
2180 _('Unexpected response from remote server:'), l)
2140 _('Unexpected response from remote server:'), l)
2181 self.ui.status(_('%d files to transfer, %s of data\n') %
2141 self.ui.status(_('%d files to transfer, %s of data\n') %
2182 (total_files, util.bytecount(total_bytes)))
2142 (total_files, util.bytecount(total_bytes)))
2183 start = time.time()
2143 start = time.time()
2184 for i in xrange(total_files):
2144 for i in xrange(total_files):
2185 # XXX doesn't support '\n' or '\r' in filenames
2145 # XXX doesn't support '\n' or '\r' in filenames
2186 l = fp.readline()
2146 l = fp.readline()
2187 try:
2147 try:
2188 name, size = l.split('\0', 1)
2148 name, size = l.split('\0', 1)
2189 size = int(size)
2149 size = int(size)
2190 except (ValueError, TypeError):
2150 except (ValueError, TypeError):
2191 raise error.ResponseError(
2151 raise error.ResponseError(
2192 _('Unexpected response from remote server:'), l)
2152 _('Unexpected response from remote server:'), l)
2193 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2153 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2194 # for backwards compat, name was partially encoded
2154 # for backwards compat, name was partially encoded
2195 ofp = self.sopener(store.decodedir(name), 'w')
2155 ofp = self.sopener(store.decodedir(name), 'w')
2196 for chunk in util.filechunkiter(fp, limit=size):
2156 for chunk in util.filechunkiter(fp, limit=size):
2197 ofp.write(chunk)
2157 ofp.write(chunk)
2198 ofp.close()
2158 ofp.close()
2199 elapsed = time.time() - start
2159 elapsed = time.time() - start
2200 if elapsed <= 0:
2160 if elapsed <= 0:
2201 elapsed = 0.001
2161 elapsed = 0.001
2202 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2162 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2203 (util.bytecount(total_bytes), elapsed,
2163 (util.bytecount(total_bytes), elapsed,
2204 util.bytecount(total_bytes / elapsed)))
2164 util.bytecount(total_bytes / elapsed)))
2205
2165
2206 # new requirements = old non-format requirements + new format-related
2166 # new requirements = old non-format requirements + new format-related
2207 # requirements from the streamed-in repository
2167 # requirements from the streamed-in repository
2208 requirements.update(set(self.requirements) - self.supportedformats)
2168 requirements.update(set(self.requirements) - self.supportedformats)
2209 self._applyrequirements(requirements)
2169 self._applyrequirements(requirements)
2210 self._writerequirements()
2170 self._writerequirements()
2211
2171
2212 self.invalidate()
2172 self.invalidate()
2213 return len(self.heads()) + 1
2173 return len(self.heads()) + 1
2214 finally:
2174 finally:
2215 lock.release()
2175 lock.release()
2216
2176
2217 def clone(self, remote, heads=[], stream=False):
2177 def clone(self, remote, heads=[], stream=False):
2218 '''clone remote repository.
2178 '''clone remote repository.
2219
2179
2220 keyword arguments:
2180 keyword arguments:
2221 heads: list of revs to clone (forces use of pull)
2181 heads: list of revs to clone (forces use of pull)
2222 stream: use streaming clone if possible'''
2182 stream: use streaming clone if possible'''
2223
2183
2224 # now, all clients that can request uncompressed clones can
2184 # now, all clients that can request uncompressed clones can
2225 # read repo formats supported by all servers that can serve
2185 # read repo formats supported by all servers that can serve
2226 # them.
2186 # them.
2227
2187
2228 # if revlog format changes, client will have to check version
2188 # if revlog format changes, client will have to check version
2229 # and format flags on "stream" capability, and use
2189 # and format flags on "stream" capability, and use
2230 # uncompressed only if compatible.
2190 # uncompressed only if compatible.
2231
2191
2232 if stream and not heads:
2192 if stream and not heads:
2233 # 'stream' means remote revlog format is revlogv1 only
2193 # 'stream' means remote revlog format is revlogv1 only
2234 if remote.capable('stream'):
2194 if remote.capable('stream'):
2235 return self.stream_in(remote, set(('revlogv1',)))
2195 return self.stream_in(remote, set(('revlogv1',)))
2236 # otherwise, 'streamreqs' contains the remote revlog format
2196 # otherwise, 'streamreqs' contains the remote revlog format
2237 streamreqs = remote.capable('streamreqs')
2197 streamreqs = remote.capable('streamreqs')
2238 if streamreqs:
2198 if streamreqs:
2239 streamreqs = set(streamreqs.split(','))
2199 streamreqs = set(streamreqs.split(','))
2240 # if we support it, stream in and adjust our requirements
2200 # if we support it, stream in and adjust our requirements
2241 if not streamreqs - self.supportedformats:
2201 if not streamreqs - self.supportedformats:
2242 return self.stream_in(remote, streamreqs)
2202 return self.stream_in(remote, streamreqs)
2243 return self.pull(remote, heads)
2203 return self.pull(remote, heads)
2244
2204
2245 def pushkey(self, namespace, key, old, new):
2205 def pushkey(self, namespace, key, old, new):
2246 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2206 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2247 old=old, new=new)
2207 old=old, new=new)
2248 ret = pushkey.push(self, namespace, key, old, new)
2208 ret = pushkey.push(self, namespace, key, old, new)
2249 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2209 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2250 ret=ret)
2210 ret=ret)
2251 return ret
2211 return ret
2252
2212
2253 def listkeys(self, namespace):
2213 def listkeys(self, namespace):
2254 self.hook('prelistkeys', throw=True, namespace=namespace)
2214 self.hook('prelistkeys', throw=True, namespace=namespace)
2255 values = pushkey.list(self, namespace)
2215 values = pushkey.list(self, namespace)
2256 self.hook('listkeys', namespace=namespace, values=values)
2216 self.hook('listkeys', namespace=namespace, values=values)
2257 return values
2217 return values
2258
2218
2259 def debugwireargs(self, one, two, three=None, four=None, five=None):
2219 def debugwireargs(self, one, two, three=None, four=None, five=None):
2260 '''used to test argument passing over the wire'''
2220 '''used to test argument passing over the wire'''
2261 return "%s %s %s %s %s" % (one, two, three, four, five)
2221 return "%s %s %s %s %s" % (one, two, three, four, five)
2262
2222
2263 def savecommitmessage(self, text):
2223 def savecommitmessage(self, text):
2264 fp = self.opener('last-message.txt', 'wb')
2224 fp = self.opener('last-message.txt', 'wb')
2265 try:
2225 try:
2266 fp.write(text)
2226 fp.write(text)
2267 finally:
2227 finally:
2268 fp.close()
2228 fp.close()
2269 return self.pathto(fp.name[len(self.root)+1:])
2229 return self.pathto(fp.name[len(self.root)+1:])
2270
2230
2271 # used to avoid circular references so destructors work
2231 # used to avoid circular references so destructors work
2272 def aftertrans(files):
2232 def aftertrans(files):
2273 renamefiles = [tuple(t) for t in files]
2233 renamefiles = [tuple(t) for t in files]
2274 def a():
2234 def a():
2275 for src, dest in renamefiles:
2235 for src, dest in renamefiles:
2276 util.rename(src, dest)
2236 util.rename(src, dest)
2277 return a
2237 return a
2278
2238
2279 def undoname(fn):
2239 def undoname(fn):
2280 base, name = os.path.split(fn)
2240 base, name = os.path.split(fn)
2281 assert name.startswith('journal')
2241 assert name.startswith('journal')
2282 return os.path.join(base, name.replace('journal', 'undo', 1))
2242 return os.path.join(base, name.replace('journal', 'undo', 1))
2283
2243
2284 def instance(ui, path, create):
2244 def instance(ui, path, create):
2285 return localrepository(ui, util.urllocalpath(path), create)
2245 return localrepository(ui, util.urllocalpath(path), create)
2286
2246
2287 def islocal(path):
2247 def islocal(path):
2288 return True
2248 return True
@@ -1,283 +1,290 b''
1 """ Mercurial phases support code
1 """ Mercurial phases support code
2
2
3 ---
3 ---
4
4
5 Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
5 Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
6 Logilab SA <contact@logilab.fr>
6 Logilab SA <contact@logilab.fr>
7 Augie Fackler <durin42@gmail.com>
7 Augie Fackler <durin42@gmail.com>
8
8
9 This software may be used and distributed according to the terms of the
9 This software may be used and distributed according to the terms of the
10 GNU General Public License version 2 or any later version.
10 GNU General Public License version 2 or any later version.
11
11
12 ---
12 ---
13
13
14 This module implements most phase logic in mercurial.
14 This module implements most phase logic in mercurial.
15
15
16
16
17 Basic Concept
17 Basic Concept
18 =============
18 =============
19
19
20 A 'changeset phases' is an indicator that tells us how a changeset is
20 A 'changeset phases' is an indicator that tells us how a changeset is
21 manipulated and communicated. The details of each phase is described below,
21 manipulated and communicated. The details of each phase is described below,
22 here we describe the properties they have in common.
22 here we describe the properties they have in common.
23
23
24 Like bookmarks, phases are not stored in history and thus are not permanent and
24 Like bookmarks, phases are not stored in history and thus are not permanent and
25 leave no audit trail.
25 leave no audit trail.
26
26
27 First, no changeset can be in two phases at once. Phases are ordered, so they
27 First, no changeset can be in two phases at once. Phases are ordered, so they
28 can be considered from lowest to highest. The default, lowest phase is 'public'
28 can be considered from lowest to highest. The default, lowest phase is 'public'
29 - this is the normal phase of existing changesets. A child changeset can not be
29 - this is the normal phase of existing changesets. A child changeset can not be
30 in a lower phase than its parents.
30 in a lower phase than its parents.
31
31
32 These phases share a hierarchy of traits:
32 These phases share a hierarchy of traits:
33
33
34 immutable shared
34 immutable shared
35 public: X X
35 public: X X
36 draft: X
36 draft: X
37 secret:
37 secret:
38
38
39 local commits are draft by default
39 local commits are draft by default
40
40
41 Phase movement and exchange
41 Phase movement and exchange
42 ============================
42 ============================
43
43
44 Phase data are exchanged by pushkey on pull and push. Some server have a
44 Phase data are exchanged by pushkey on pull and push. Some server have a
45 publish option set, we call them publishing server. Pushing to such server make
45 publish option set, we call them publishing server. Pushing to such server make
46 draft changeset publish.
46 draft changeset publish.
47
47
48 A small list of fact/rules define the exchange of phase:
48 A small list of fact/rules define the exchange of phase:
49
49
50 * old client never changes server states
50 * old client never changes server states
51 * pull never changes server states
51 * pull never changes server states
52 * publish and old server csets are seen as public by client
52 * publish and old server csets are seen as public by client
53
53
54 * Any secret changeset seens in another repository is lowered to at least draft
54 * Any secret changeset seens in another repository is lowered to at least draft
55
55
56
56
57 Here is the final table summing up the 49 possible usecase of phase exchange:
57 Here is the final table summing up the 49 possible usecase of phase exchange:
58
58
59 server
59 server
60 old publish non-publish
60 old publish non-publish
61 N X N D P N D P
61 N X N D P N D P
62 old client
62 old client
63 pull
63 pull
64 N - X/X - X/D X/P - X/D X/P
64 N - X/X - X/D X/P - X/D X/P
65 X - X/X - X/D X/P - X/D X/P
65 X - X/X - X/D X/P - X/D X/P
66 push
66 push
67 X X/X X/X X/P X/P X/P X/D X/D X/P
67 X X/X X/X X/P X/P X/P X/D X/D X/P
68 new client
68 new client
69 pull
69 pull
70 N - P/X - P/D P/P - D/D P/P
70 N - P/X - P/D P/P - D/D P/P
71 D - P/X - P/D P/P - D/D P/P
71 D - P/X - P/D P/P - D/D P/P
72 P - P/X - P/D P/P - P/D P/P
72 P - P/X - P/D P/P - P/D P/P
73 push
73 push
74 D P/X P/X P/P P/P P/P D/D D/D P/P
74 D P/X P/X P/P P/P P/P D/D D/D P/P
75 P P/X P/X P/P P/P P/P P/P P/P P/P
75 P P/X P/X P/P P/P P/P P/P P/P P/P
76
76
77 Legend:
77 Legend:
78
78
79 A/B = final state on client / state on server
79 A/B = final state on client / state on server
80
80
81 * N = new/not present,
81 * N = new/not present,
82 * P = public,
82 * P = public,
83 * D = draft,
83 * D = draft,
84 * X = not tracked (ie: the old client or server has no internal way of
84 * X = not tracked (ie: the old client or server has no internal way of
85 recording the phase.)
85 recording the phase.)
86
86
87 passive = only pushes
87 passive = only pushes
88
88
89
89
90 A cell here can be read like this:
90 A cell here can be read like this:
91
91
92 "When a new client pushes a draft changeset (D) to a publishing server
92 "When a new client pushes a draft changeset (D) to a publishing server
93 where it's not present (N), it's marked public on both sides (P/P)."
93 where it's not present (N), it's marked public on both sides (P/P)."
94
94
95 Note: old client behave as publish server with Draft only content
95 Note: old client behave as publish server with Draft only content
96 - other people see it as public
96 - other people see it as public
97 - content is pushed as draft
97 - content is pushed as draft
98
98
99 """
99 """
100
100
101 import errno
101 import errno
102 from node import nullid, bin, hex, short
102 from node import nullid, bin, hex, short
103 from i18n import _
103 from i18n import _
104
104
105 allphases = public, draft, secret = range(3)
105 allphases = public, draft, secret = range(3)
106 trackedphases = allphases[1:]
106 trackedphases = allphases[1:]
107 phasenames = ['public', 'draft', 'secret']
107 phasenames = ['public', 'draft', 'secret']
108
108
109 def readroots(repo):
109 def readroots(repo):
110 """Read phase roots from disk"""
110 """Read phase roots from disk"""
111 roots = [set() for i in allphases]
111 roots = [set() for i in allphases]
112 roots[0].add(nullid)
112 roots[0].add(nullid)
113 try:
113 try:
114 f = repo.sopener('phaseroots')
114 f = repo.sopener('phaseroots')
115 try:
115 try:
116 for line in f:
116 for line in f:
117 phase, nh = line.strip().split()
117 phase, nh = line.strip().split()
118 roots[int(phase)].add(bin(nh))
118 roots[int(phase)].add(bin(nh))
119 finally:
119 finally:
120 f.close()
120 f.close()
121 except IOError, inst:
121 except IOError, inst:
122 if inst.errno != errno.ENOENT:
122 if inst.errno != errno.ENOENT:
123 raise
123 raise
124 return roots
124 return roots
125
125
126 def writeroots(repo):
126 def writeroots(repo):
127 """Write phase roots from disk"""
127 """Write phase roots from disk"""
128 f = repo.sopener('phaseroots', 'w', atomictemp=True)
128 f = repo.sopener('phaseroots', 'w', atomictemp=True)
129 try:
129 try:
130 for phase, roots in enumerate(repo._phaseroots):
130 for phase, roots in enumerate(repo._phaseroots):
131 for h in roots:
131 for h in roots:
132 f.write('%i %s\n' % (phase, hex(h)))
132 f.write('%i %s\n' % (phase, hex(h)))
133 repo._dirtyphases = False
133 repo._dirtyphases = False
134 finally:
134 finally:
135 f.close()
135 f.close()
136
136
137 def filterunknown(repo, phaseroots=None):
137 def filterunknown(repo, phaseroots=None):
138 """remove unknown nodes from the phase boundary
138 """remove unknown nodes from the phase boundary
139
139
140 no data is lost as unknown node only old data for their descentants
140 no data is lost as unknown node only old data for their descentants
141 """
141 """
142 if phaseroots is None:
142 if phaseroots is None:
143 phaseroots = repo._phaseroots
143 phaseroots = repo._phaseroots
144 for phase, nodes in enumerate(phaseroots):
144 for phase, nodes in enumerate(phaseroots):
145 missing = [node for node in nodes if node not in repo]
145 missing = [node for node in nodes if node not in repo]
146 if missing:
146 if missing:
147 for mnode in missing:
147 for mnode in missing:
148 msg = _('Removing unknown node %(n)s from %(p)i-phase boundary')
148 msg = _('Removing unknown node %(n)s from %(p)i-phase boundary')
149 repo.ui.debug(msg, {'n': short(mnode), 'p': phase})
149 repo.ui.debug(msg, {'n': short(mnode), 'p': phase})
150 nodes.symmetric_difference_update(missing)
150 nodes.symmetric_difference_update(missing)
151 repo._dirtyphases = True
151 repo._dirtyphases = True
152
152
153 def advanceboundary(repo, targetphase, nodes):
153 def advanceboundary(repo, targetphase, nodes):
154 """Add nodes to a phase changing other nodes phases if necessary.
154 """Add nodes to a phase changing other nodes phases if necessary.
155
155
156 This function move boundary *forward* this means that all nodes are set
156 This function move boundary *forward* this means that all nodes are set
157 in the target phase or kept in a *lower* phase.
157 in the target phase or kept in a *lower* phase.
158
158
159 Simplify boundary to contains phase roots only."""
159 Simplify boundary to contains phase roots only."""
160 delroots = [] # set of root deleted by this path
160 delroots = [] # set of root deleted by this path
161 for phase in xrange(targetphase + 1, len(allphases)):
161 for phase in xrange(targetphase + 1, len(allphases)):
162 # filter nodes that are not in a compatible phase already
162 # filter nodes that are not in a compatible phase already
163 # XXX rev phase cache might have been invalidated by a previous loop
163 # XXX rev phase cache might have been invalidated by a previous loop
164 # XXX we need to be smarter here
164 # XXX we need to be smarter here
165 nodes = [n for n in nodes if repo[n].phase() >= phase]
165 nodes = [n for n in nodes if repo[n].phase() >= phase]
166 if not nodes:
166 if not nodes:
167 break # no roots to move anymore
167 break # no roots to move anymore
168 roots = repo._phaseroots[phase]
168 roots = repo._phaseroots[phase]
169 olds = roots.copy()
169 olds = roots.copy()
170 ctxs = list(repo.set('roots((%ln::) - (%ln::%ln))', olds, olds, nodes))
170 ctxs = list(repo.set('roots((%ln::) - (%ln::%ln))', olds, olds, nodes))
171 roots.clear()
171 roots.clear()
172 roots.update(ctx.node() for ctx in ctxs)
172 roots.update(ctx.node() for ctx in ctxs)
173 if olds != roots:
173 if olds != roots:
174 # invalidate cache (we probably could be smarter here
174 # invalidate cache (we probably could be smarter here
175 if '_phaserev' in vars(repo):
175 if '_phaserev' in vars(repo):
176 del repo._phaserev
176 del repo._phaserev
177 repo._dirtyphases = True
177 repo._dirtyphases = True
178 # some roots may need to be declared for lower phases
178 # some roots may need to be declared for lower phases
179 delroots.extend(olds - roots)
179 delroots.extend(olds - roots)
180 # declare deleted root in the target phase
180 # declare deleted root in the target phase
181 if targetphase != 0:
181 if targetphase != 0:
182 retractboundary(repo, targetphase, delroots)
182 retractboundary(repo, targetphase, delroots)
183
183
184
184
185 def retractboundary(repo, targetphase, nodes):
185 def retractboundary(repo, targetphase, nodes):
186 """Set nodes back to a phase changing other nodes phases if necessary.
186 """Set nodes back to a phase changing other nodes phases if necessary.
187
187
188 This function move boundary *backward* this means that all nodes are set
188 This function move boundary *backward* this means that all nodes are set
189 in the target phase or kept in a *higher* phase.
189 in the target phase or kept in a *higher* phase.
190
190
191 Simplify boundary to contains phase roots only."""
191 Simplify boundary to contains phase roots only."""
192 currentroots = repo._phaseroots[targetphase]
192 currentroots = repo._phaseroots[targetphase]
193 newroots = [n for n in nodes if repo[n].phase() < targetphase]
193 newroots = [n for n in nodes if repo[n].phase() < targetphase]
194 if newroots:
194 if newroots:
195 currentroots.update(newroots)
195 currentroots.update(newroots)
196 ctxs = repo.set('roots(%ln::)', currentroots)
196 ctxs = repo.set('roots(%ln::)', currentroots)
197 currentroots.intersection_update(ctx.node() for ctx in ctxs)
197 currentroots.intersection_update(ctx.node() for ctx in ctxs)
198 if '_phaserev' in vars(repo):
198 if '_phaserev' in vars(repo):
199 del repo._phaserev
199 del repo._phaserev
200 repo._dirtyphases = True
200 repo._dirtyphases = True
201
201
202
202
203 def listphases(repo):
203 def listphases(repo):
204 """List phases root for serialisation over pushkey"""
204 """List phases root for serialisation over pushkey"""
205 keys = {}
205 keys = {}
206 for phase in trackedphases:
206 value = '%i' % draft
207 for root in repo._phaseroots[phase]:
207 for root in repo._phaseroots[draft]:
208 keys[hex(root)] = '%i' % phase
208 keys[hex(root)] = value
209
209 if repo.ui.configbool('phases', 'publish', True):
210 if repo.ui.configbool('phases', 'publish', True):
210 # Add an extra data to let remote know we are a publishing repo.
211 # Add an extra data to let remote know we are a publishing repo.
211 # Publishing repo can't just pretend they are old repo. When pushing to
212 # Publishing repo can't just pretend they are old repo. When pushing to
212 # a publishing repo, the client still need to push phase boundary
213 # a publishing repo, the client still need to push phase boundary
213 #
214 #
214 # Push do not only push changeset. It also push phase data. New
215 # Push do not only push changeset. It also push phase data. New
215 # phase data may apply to common changeset which won't be push (as they
216 # phase data may apply to common changeset which won't be push (as they
216 # are common). Here is a very simple example:
217 # are common). Here is a very simple example:
217 #
218 #
218 # 1) repo A push changeset X as draft to repo B
219 # 1) repo A push changeset X as draft to repo B
219 # 2) repo B make changeset X public
220 # 2) repo B make changeset X public
220 # 3) repo B push to repo A. X is not pushed but the data that X as now
221 # 3) repo B push to repo A. X is not pushed but the data that X as now
221 # public should
222 # public should
222 #
223 #
223 # The server can't handle it on it's own as it has no idea of client
224 # The server can't handle it on it's own as it has no idea of client
224 # phase data.
225 # phase data.
225 keys['publishing'] = 'True'
226 keys['publishing'] = 'True'
226 return keys
227 return keys
227
228
228 def pushphase(repo, nhex, oldphasestr, newphasestr):
229 def pushphase(repo, nhex, oldphasestr, newphasestr):
229 """List phases root for serialisation over pushkey"""
230 """List phases root for serialisation over pushkey"""
230 lock = repo.lock()
231 lock = repo.lock()
231 try:
232 try:
232 currentphase = repo[nhex].phase()
233 currentphase = repo[nhex].phase()
233 newphase = abs(int(newphasestr)) # let's avoid negative index surprise
234 newphase = abs(int(newphasestr)) # let's avoid negative index surprise
234 oldphase = abs(int(oldphasestr)) # let's avoid negative index surprise
235 oldphase = abs(int(oldphasestr)) # let's avoid negative index surprise
235 if currentphase == oldphase and newphase < oldphase:
236 if currentphase == oldphase and newphase < oldphase:
236 advanceboundary(repo, newphase, [bin(nhex)])
237 advanceboundary(repo, newphase, [bin(nhex)])
237 return 1
238 return 1
238 else:
239 else:
239 return 0
240 return 0
240 finally:
241 finally:
241 lock.release()
242 lock.release()
242
243
243 def visibleheads(repo):
244 def visibleheads(repo):
244 """return the set of visible head of this repo"""
245 """return the set of visible head of this repo"""
245 # XXX we want a cache on this
246 # XXX we want a cache on this
246 sroots = repo._phaseroots[secret]
247 sroots = repo._phaseroots[secret]
247 if sroots:
248 if sroots:
248 # XXX very slow revset. storing heads or secret "boundary" would help.
249 # XXX very slow revset. storing heads or secret "boundary" would help.
249 revset = repo.set('heads(not (%ln::))', sroots)
250 revset = repo.set('heads(not (%ln::))', sroots)
250
251
251 vheads = [ctx.node() for ctx in revset]
252 vheads = [ctx.node() for ctx in revset]
252 if not vheads:
253 if not vheads:
253 vheads.append(nullid)
254 vheads.append(nullid)
254 else:
255 else:
255 vheads = repo.heads()
256 vheads = repo.heads()
256 return vheads
257 return vheads
257
258
258 def analyzeremotephases(repo, subset, roots):
259 def analyzeremotephases(repo, subset, roots):
259 """Compute phases heads and root in a subset of node from root dict
260 """Compute phases heads and root in a subset of node from root dict
260
261
261 * subset is heads of the subset
262 * subset is heads of the subset
262 * roots is {<nodeid> => phase} mapping. key and value are string.
263 * roots is {<nodeid> => phase} mapping. key and value are string.
263
264
264 Accept unknown element input
265 Accept unknown element input
265 """
266 """
266 # build list from dictionary
267 # build list from dictionary
267 phaseroots = [[] for p in allphases]
268 draftroots = []
269 nm = repo.changelog.nodemap # to filter unknown node
268 for nhex, phase in roots.iteritems():
270 for nhex, phase in roots.iteritems():
269 if nhex == 'publishing': # ignore data related to publish option
271 if nhex == 'publishing': # ignore data related to publish option
270 continue
272 continue
271 node = bin(nhex)
273 node = bin(nhex)
272 phase = int(phase)
274 phase = int(phase)
273 if node in repo:
275 if phase == 0:
274 phaseroots[phase].append(node)
276 if node != nullid:
277 msg = _('ignoring inconsistense public root from remote: %s')
278 repo.ui.warn(msg, nhex)
279 elif phase == 1:
280 if node in nm:
281 draftroots.append(node)
282 else:
283 msg = _('ignoring unexpected root from remote: %i %s')
284 repo.ui.warn(msg, phase, nhex)
275 # compute heads
285 # compute heads
276 phaseheads = [[] for p in allphases]
286 revset = repo.set('heads((%ln + parents(%ln)) - (%ln::%ln))',
277 for phase in allphases[:-1]:
287 subset, draftroots, draftroots, subset)
278 toproof = phaseroots[phase + 1]
288 publicheads = [c.node() for c in revset]
279 revset = repo.set('heads((%ln + parents(%ln)) - (%ln::%ln))',
289 return publicheads, draftroots
280 subset, toproof, toproof, subset)
281 phaseheads[phase].extend(c.node() for c in revset)
282 return phaseheads, phaseroots
283
290
@@ -1,121 +1,121 b''
1 $ "$TESTDIR/hghave" serve || exit 80
1 $ "$TESTDIR/hghave" serve || exit 80
2
2
3 $ hg init test
3 $ hg init test
4 $ cd test
4 $ cd test
5 $ echo a > a
5 $ echo a > a
6 $ hg ci -Ama
6 $ hg ci -Ama
7 adding a
7 adding a
8 $ cd ..
8 $ cd ..
9 $ hg clone test test2
9 $ hg clone test test2
10 updating to branch default
10 updating to branch default
11 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
11 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
12 $ cd test2
12 $ cd test2
13 $ echo a >> a
13 $ echo a >> a
14 $ hg ci -mb
14 $ hg ci -mb
15 $ req() {
15 $ req() {
16 > hg serve -p $HGPORT -d --pid-file=hg.pid -E errors.log
16 > hg serve -p $HGPORT -d --pid-file=hg.pid -E errors.log
17 > cat hg.pid >> $DAEMON_PIDS
17 > cat hg.pid >> $DAEMON_PIDS
18 > hg --cwd ../test2 push http://localhost:$HGPORT/
18 > hg --cwd ../test2 push http://localhost:$HGPORT/
19 > "$TESTDIR/killdaemons.py"
19 > "$TESTDIR/killdaemons.py"
20 > echo % serve errors
20 > echo % serve errors
21 > cat errors.log
21 > cat errors.log
22 > }
22 > }
23 $ cd ../test
23 $ cd ../test
24
24
25 expect ssl error
25 expect ssl error
26
26
27 $ req
27 $ req
28 pushing to http://localhost:$HGPORT/
28 pushing to http://localhost:$HGPORT/
29 searching for changes
29 searching for changes
30 remote: ssl required
30 remote: ssl required
31 remote: ssl required
31 remote: ssl required
32 updating phase of ba677d0156c1 to 0 from 1 failed!
32 updating ba677d0156c1 to public failed!
33 % serve errors
33 % serve errors
34
34
35 expect authorization error
35 expect authorization error
36
36
37 $ echo '[web]' > .hg/hgrc
37 $ echo '[web]' > .hg/hgrc
38 $ echo 'push_ssl = false' >> .hg/hgrc
38 $ echo 'push_ssl = false' >> .hg/hgrc
39 $ req
39 $ req
40 pushing to http://localhost:$HGPORT/
40 pushing to http://localhost:$HGPORT/
41 searching for changes
41 searching for changes
42 abort: authorization failed
42 abort: authorization failed
43 % serve errors
43 % serve errors
44
44
45 expect authorization error: must have authorized user
45 expect authorization error: must have authorized user
46
46
47 $ echo 'allow_push = unperson' >> .hg/hgrc
47 $ echo 'allow_push = unperson' >> .hg/hgrc
48 $ req
48 $ req
49 pushing to http://localhost:$HGPORT/
49 pushing to http://localhost:$HGPORT/
50 searching for changes
50 searching for changes
51 abort: authorization failed
51 abort: authorization failed
52 % serve errors
52 % serve errors
53
53
54 expect success
54 expect success
55
55
56 $ echo 'allow_push = *' >> .hg/hgrc
56 $ echo 'allow_push = *' >> .hg/hgrc
57 $ echo '[hooks]' >> .hg/hgrc
57 $ echo '[hooks]' >> .hg/hgrc
58 $ echo 'changegroup = python "$TESTDIR"/printenv.py changegroup 0' >> .hg/hgrc
58 $ echo 'changegroup = python "$TESTDIR"/printenv.py changegroup 0' >> .hg/hgrc
59 $ req
59 $ req
60 pushing to http://localhost:$HGPORT/
60 pushing to http://localhost:$HGPORT/
61 searching for changes
61 searching for changes
62 remote: adding changesets
62 remote: adding changesets
63 remote: adding manifests
63 remote: adding manifests
64 remote: adding file changes
64 remote: adding file changes
65 remote: added 1 changesets with 1 changes to 1 files
65 remote: added 1 changesets with 1 changes to 1 files
66 remote: changegroup hook: HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_URL=remote:http:*: (glob)
66 remote: changegroup hook: HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_URL=remote:http:*: (glob)
67 % serve errors
67 % serve errors
68 $ hg rollback
68 $ hg rollback
69 repository tip rolled back to revision 0 (undo serve)
69 repository tip rolled back to revision 0 (undo serve)
70
70
71 expect success, server lacks the httpheader capability
71 expect success, server lacks the httpheader capability
72
72
73 $ CAP=httpheader
73 $ CAP=httpheader
74 $ . "$TESTDIR/notcapable"
74 $ . "$TESTDIR/notcapable"
75 $ req
75 $ req
76 pushing to http://localhost:$HGPORT/
76 pushing to http://localhost:$HGPORT/
77 searching for changes
77 searching for changes
78 remote: adding changesets
78 remote: adding changesets
79 remote: adding manifests
79 remote: adding manifests
80 remote: adding file changes
80 remote: adding file changes
81 remote: added 1 changesets with 1 changes to 1 files
81 remote: added 1 changesets with 1 changes to 1 files
82 remote: changegroup hook: HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_URL=remote:http:*: (glob)
82 remote: changegroup hook: HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_URL=remote:http:*: (glob)
83 % serve errors
83 % serve errors
84 $ hg rollback
84 $ hg rollback
85 repository tip rolled back to revision 0 (undo serve)
85 repository tip rolled back to revision 0 (undo serve)
86
86
87 expect success, server lacks the unbundlehash capability
87 expect success, server lacks the unbundlehash capability
88
88
89 $ CAP=unbundlehash
89 $ CAP=unbundlehash
90 $ . "$TESTDIR/notcapable"
90 $ . "$TESTDIR/notcapable"
91 $ req
91 $ req
92 pushing to http://localhost:$HGPORT/
92 pushing to http://localhost:$HGPORT/
93 searching for changes
93 searching for changes
94 remote: adding changesets
94 remote: adding changesets
95 remote: adding manifests
95 remote: adding manifests
96 remote: adding file changes
96 remote: adding file changes
97 remote: added 1 changesets with 1 changes to 1 files
97 remote: added 1 changesets with 1 changes to 1 files
98 remote: changegroup hook: HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_URL=remote:http:*: (glob)
98 remote: changegroup hook: HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_URL=remote:http:*: (glob)
99 % serve errors
99 % serve errors
100 $ hg rollback
100 $ hg rollback
101 repository tip rolled back to revision 0 (undo serve)
101 repository tip rolled back to revision 0 (undo serve)
102
102
103 expect authorization error: all users denied
103 expect authorization error: all users denied
104
104
105 $ echo '[web]' > .hg/hgrc
105 $ echo '[web]' > .hg/hgrc
106 $ echo 'push_ssl = false' >> .hg/hgrc
106 $ echo 'push_ssl = false' >> .hg/hgrc
107 $ echo 'deny_push = *' >> .hg/hgrc
107 $ echo 'deny_push = *' >> .hg/hgrc
108 $ req
108 $ req
109 pushing to http://localhost:$HGPORT/
109 pushing to http://localhost:$HGPORT/
110 searching for changes
110 searching for changes
111 abort: authorization failed
111 abort: authorization failed
112 % serve errors
112 % serve errors
113
113
114 expect authorization error: some users denied, users must be authenticated
114 expect authorization error: some users denied, users must be authenticated
115
115
116 $ echo 'deny_push = unperson' >> .hg/hgrc
116 $ echo 'deny_push = unperson' >> .hg/hgrc
117 $ req
117 $ req
118 pushing to http://localhost:$HGPORT/
118 pushing to http://localhost:$HGPORT/
119 searching for changes
119 searching for changes
120 abort: authorization failed
120 abort: authorization failed
121 % serve errors
121 % serve errors
General Comments 0
You need to be logged in to leave comments. Login now