##// END OF EJS Templates
phases: stop modifying localrepo in writeroots()...
Patrick Mezard -
r16626:503e674f default
parent child Browse files
Show More
@@ -1,2348 +1,2348 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import scmutil, util, extensions, hook, error, revset
13 import scmutil, util, extensions, hook, error, revset
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20 filecache = scmutil.filecache
20 filecache = scmutil.filecache
21
21
22 class storecache(filecache):
22 class storecache(filecache):
23 """filecache for files in the store"""
23 """filecache for files in the store"""
24 def join(self, obj, fname):
24 def join(self, obj, fname):
25 return obj.sjoin(fname)
25 return obj.sjoin(fname)
26
26
27 class localrepository(repo.repository):
27 class localrepository(repo.repository):
28 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
28 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
29 'known', 'getbundle'))
29 'known', 'getbundle'))
30 supportedformats = set(('revlogv1', 'generaldelta'))
30 supportedformats = set(('revlogv1', 'generaldelta'))
31 supported = supportedformats | set(('store', 'fncache', 'shared',
31 supported = supportedformats | set(('store', 'fncache', 'shared',
32 'dotencode'))
32 'dotencode'))
33
33
34 def __init__(self, baseui, path=None, create=False):
34 def __init__(self, baseui, path=None, create=False):
35 repo.repository.__init__(self)
35 repo.repository.__init__(self)
36 self.root = os.path.realpath(util.expandpath(path))
36 self.root = os.path.realpath(util.expandpath(path))
37 self.path = os.path.join(self.root, ".hg")
37 self.path = os.path.join(self.root, ".hg")
38 self.origroot = path
38 self.origroot = path
39 self.auditor = scmutil.pathauditor(self.root, self._checknested)
39 self.auditor = scmutil.pathauditor(self.root, self._checknested)
40 self.opener = scmutil.opener(self.path)
40 self.opener = scmutil.opener(self.path)
41 self.wopener = scmutil.opener(self.root)
41 self.wopener = scmutil.opener(self.root)
42 self.baseui = baseui
42 self.baseui = baseui
43 self.ui = baseui.copy()
43 self.ui = baseui.copy()
44 self._dirtyphases = False
44 self._dirtyphases = False
45 # A list of callback to shape the phase if no data were found.
45 # A list of callback to shape the phase if no data were found.
46 # Callback are in the form: func(repo, roots) --> processed root.
46 # Callback are in the form: func(repo, roots) --> processed root.
47 # This list it to be filled by extension during repo setup
47 # This list it to be filled by extension during repo setup
48 self._phasedefaults = []
48 self._phasedefaults = []
49
49
50 try:
50 try:
51 self.ui.readconfig(self.join("hgrc"), self.root)
51 self.ui.readconfig(self.join("hgrc"), self.root)
52 extensions.loadall(self.ui)
52 extensions.loadall(self.ui)
53 except IOError:
53 except IOError:
54 pass
54 pass
55
55
56 if not os.path.isdir(self.path):
56 if not os.path.isdir(self.path):
57 if create:
57 if create:
58 if not os.path.exists(path):
58 if not os.path.exists(path):
59 util.makedirs(path)
59 util.makedirs(path)
60 util.makedir(self.path, notindexed=True)
60 util.makedir(self.path, notindexed=True)
61 requirements = ["revlogv1"]
61 requirements = ["revlogv1"]
62 if self.ui.configbool('format', 'usestore', True):
62 if self.ui.configbool('format', 'usestore', True):
63 os.mkdir(os.path.join(self.path, "store"))
63 os.mkdir(os.path.join(self.path, "store"))
64 requirements.append("store")
64 requirements.append("store")
65 if self.ui.configbool('format', 'usefncache', True):
65 if self.ui.configbool('format', 'usefncache', True):
66 requirements.append("fncache")
66 requirements.append("fncache")
67 if self.ui.configbool('format', 'dotencode', True):
67 if self.ui.configbool('format', 'dotencode', True):
68 requirements.append('dotencode')
68 requirements.append('dotencode')
69 # create an invalid changelog
69 # create an invalid changelog
70 self.opener.append(
70 self.opener.append(
71 "00changelog.i",
71 "00changelog.i",
72 '\0\0\0\2' # represents revlogv2
72 '\0\0\0\2' # represents revlogv2
73 ' dummy changelog to prevent using the old repo layout'
73 ' dummy changelog to prevent using the old repo layout'
74 )
74 )
75 if self.ui.configbool('format', 'generaldelta', False):
75 if self.ui.configbool('format', 'generaldelta', False):
76 requirements.append("generaldelta")
76 requirements.append("generaldelta")
77 requirements = set(requirements)
77 requirements = set(requirements)
78 else:
78 else:
79 raise error.RepoError(_("repository %s not found") % path)
79 raise error.RepoError(_("repository %s not found") % path)
80 elif create:
80 elif create:
81 raise error.RepoError(_("repository %s already exists") % path)
81 raise error.RepoError(_("repository %s already exists") % path)
82 else:
82 else:
83 try:
83 try:
84 requirements = scmutil.readrequires(self.opener, self.supported)
84 requirements = scmutil.readrequires(self.opener, self.supported)
85 except IOError, inst:
85 except IOError, inst:
86 if inst.errno != errno.ENOENT:
86 if inst.errno != errno.ENOENT:
87 raise
87 raise
88 requirements = set()
88 requirements = set()
89
89
90 self.sharedpath = self.path
90 self.sharedpath = self.path
91 try:
91 try:
92 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
92 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
93 if not os.path.exists(s):
93 if not os.path.exists(s):
94 raise error.RepoError(
94 raise error.RepoError(
95 _('.hg/sharedpath points to nonexistent directory %s') % s)
95 _('.hg/sharedpath points to nonexistent directory %s') % s)
96 self.sharedpath = s
96 self.sharedpath = s
97 except IOError, inst:
97 except IOError, inst:
98 if inst.errno != errno.ENOENT:
98 if inst.errno != errno.ENOENT:
99 raise
99 raise
100
100
101 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
101 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
102 self.spath = self.store.path
102 self.spath = self.store.path
103 self.sopener = self.store.opener
103 self.sopener = self.store.opener
104 self.sjoin = self.store.join
104 self.sjoin = self.store.join
105 self.opener.createmode = self.store.createmode
105 self.opener.createmode = self.store.createmode
106 self._applyrequirements(requirements)
106 self._applyrequirements(requirements)
107 if create:
107 if create:
108 self._writerequirements()
108 self._writerequirements()
109
109
110
110
111 self._branchcache = None
111 self._branchcache = None
112 self._branchcachetip = None
112 self._branchcachetip = None
113 self.filterpats = {}
113 self.filterpats = {}
114 self._datafilters = {}
114 self._datafilters = {}
115 self._transref = self._lockref = self._wlockref = None
115 self._transref = self._lockref = self._wlockref = None
116
116
117 # A cache for various files under .hg/ that tracks file changes,
117 # A cache for various files under .hg/ that tracks file changes,
118 # (used by the filecache decorator)
118 # (used by the filecache decorator)
119 #
119 #
120 # Maps a property name to its util.filecacheentry
120 # Maps a property name to its util.filecacheentry
121 self._filecache = {}
121 self._filecache = {}
122
122
123 def _applyrequirements(self, requirements):
123 def _applyrequirements(self, requirements):
124 self.requirements = requirements
124 self.requirements = requirements
125 openerreqs = set(('revlogv1', 'generaldelta'))
125 openerreqs = set(('revlogv1', 'generaldelta'))
126 self.sopener.options = dict((r, 1) for r in requirements
126 self.sopener.options = dict((r, 1) for r in requirements
127 if r in openerreqs)
127 if r in openerreqs)
128
128
129 def _writerequirements(self):
129 def _writerequirements(self):
130 reqfile = self.opener("requires", "w")
130 reqfile = self.opener("requires", "w")
131 for r in self.requirements:
131 for r in self.requirements:
132 reqfile.write("%s\n" % r)
132 reqfile.write("%s\n" % r)
133 reqfile.close()
133 reqfile.close()
134
134
135 def _checknested(self, path):
135 def _checknested(self, path):
136 """Determine if path is a legal nested repository."""
136 """Determine if path is a legal nested repository."""
137 if not path.startswith(self.root):
137 if not path.startswith(self.root):
138 return False
138 return False
139 subpath = path[len(self.root) + 1:]
139 subpath = path[len(self.root) + 1:]
140 normsubpath = util.pconvert(subpath)
140 normsubpath = util.pconvert(subpath)
141
141
142 # XXX: Checking against the current working copy is wrong in
142 # XXX: Checking against the current working copy is wrong in
143 # the sense that it can reject things like
143 # the sense that it can reject things like
144 #
144 #
145 # $ hg cat -r 10 sub/x.txt
145 # $ hg cat -r 10 sub/x.txt
146 #
146 #
147 # if sub/ is no longer a subrepository in the working copy
147 # if sub/ is no longer a subrepository in the working copy
148 # parent revision.
148 # parent revision.
149 #
149 #
150 # However, it can of course also allow things that would have
150 # However, it can of course also allow things that would have
151 # been rejected before, such as the above cat command if sub/
151 # been rejected before, such as the above cat command if sub/
152 # is a subrepository now, but was a normal directory before.
152 # is a subrepository now, but was a normal directory before.
153 # The old path auditor would have rejected by mistake since it
153 # The old path auditor would have rejected by mistake since it
154 # panics when it sees sub/.hg/.
154 # panics when it sees sub/.hg/.
155 #
155 #
156 # All in all, checking against the working copy seems sensible
156 # All in all, checking against the working copy seems sensible
157 # since we want to prevent access to nested repositories on
157 # since we want to prevent access to nested repositories on
158 # the filesystem *now*.
158 # the filesystem *now*.
159 ctx = self[None]
159 ctx = self[None]
160 parts = util.splitpath(subpath)
160 parts = util.splitpath(subpath)
161 while parts:
161 while parts:
162 prefix = '/'.join(parts)
162 prefix = '/'.join(parts)
163 if prefix in ctx.substate:
163 if prefix in ctx.substate:
164 if prefix == normsubpath:
164 if prefix == normsubpath:
165 return True
165 return True
166 else:
166 else:
167 sub = ctx.sub(prefix)
167 sub = ctx.sub(prefix)
168 return sub.checknested(subpath[len(prefix) + 1:])
168 return sub.checknested(subpath[len(prefix) + 1:])
169 else:
169 else:
170 parts.pop()
170 parts.pop()
171 return False
171 return False
172
172
173 @filecache('bookmarks')
173 @filecache('bookmarks')
174 def _bookmarks(self):
174 def _bookmarks(self):
175 return bookmarks.read(self)
175 return bookmarks.read(self)
176
176
177 @filecache('bookmarks.current')
177 @filecache('bookmarks.current')
178 def _bookmarkcurrent(self):
178 def _bookmarkcurrent(self):
179 return bookmarks.readcurrent(self)
179 return bookmarks.readcurrent(self)
180
180
181 def _writebookmarks(self, marks):
181 def _writebookmarks(self, marks):
182 bookmarks.write(self)
182 bookmarks.write(self)
183
183
184 @storecache('phaseroots')
184 @storecache('phaseroots')
185 def _phaseroots(self):
185 def _phaseroots(self):
186 phaseroots, self._dirtyphases = phases.readroots(
186 phaseroots, self._dirtyphases = phases.readroots(
187 self, self._phasedefaults)
187 self, self._phasedefaults)
188 return phaseroots
188 return phaseroots
189
189
190 @propertycache
190 @propertycache
191 def _phaserev(self):
191 def _phaserev(self):
192 cache = [phases.public] * len(self)
192 cache = [phases.public] * len(self)
193 for phase in phases.trackedphases:
193 for phase in phases.trackedphases:
194 roots = map(self.changelog.rev, self._phaseroots[phase])
194 roots = map(self.changelog.rev, self._phaseroots[phase])
195 if roots:
195 if roots:
196 for rev in roots:
196 for rev in roots:
197 cache[rev] = phase
197 cache[rev] = phase
198 for rev in self.changelog.descendants(*roots):
198 for rev in self.changelog.descendants(*roots):
199 cache[rev] = phase
199 cache[rev] = phase
200 return cache
200 return cache
201
201
202 @storecache('00changelog.i')
202 @storecache('00changelog.i')
203 def changelog(self):
203 def changelog(self):
204 c = changelog.changelog(self.sopener)
204 c = changelog.changelog(self.sopener)
205 if 'HG_PENDING' in os.environ:
205 if 'HG_PENDING' in os.environ:
206 p = os.environ['HG_PENDING']
206 p = os.environ['HG_PENDING']
207 if p.startswith(self.root):
207 if p.startswith(self.root):
208 c.readpending('00changelog.i.a')
208 c.readpending('00changelog.i.a')
209 return c
209 return c
210
210
211 @storecache('00manifest.i')
211 @storecache('00manifest.i')
212 def manifest(self):
212 def manifest(self):
213 return manifest.manifest(self.sopener)
213 return manifest.manifest(self.sopener)
214
214
215 @filecache('dirstate')
215 @filecache('dirstate')
216 def dirstate(self):
216 def dirstate(self):
217 warned = [0]
217 warned = [0]
218 def validate(node):
218 def validate(node):
219 try:
219 try:
220 self.changelog.rev(node)
220 self.changelog.rev(node)
221 return node
221 return node
222 except error.LookupError:
222 except error.LookupError:
223 if not warned[0]:
223 if not warned[0]:
224 warned[0] = True
224 warned[0] = True
225 self.ui.warn(_("warning: ignoring unknown"
225 self.ui.warn(_("warning: ignoring unknown"
226 " working parent %s!\n") % short(node))
226 " working parent %s!\n") % short(node))
227 return nullid
227 return nullid
228
228
229 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
229 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
230
230
231 def __getitem__(self, changeid):
231 def __getitem__(self, changeid):
232 if changeid is None:
232 if changeid is None:
233 return context.workingctx(self)
233 return context.workingctx(self)
234 return context.changectx(self, changeid)
234 return context.changectx(self, changeid)
235
235
236 def __contains__(self, changeid):
236 def __contains__(self, changeid):
237 try:
237 try:
238 return bool(self.lookup(changeid))
238 return bool(self.lookup(changeid))
239 except error.RepoLookupError:
239 except error.RepoLookupError:
240 return False
240 return False
241
241
242 def __nonzero__(self):
242 def __nonzero__(self):
243 return True
243 return True
244
244
245 def __len__(self):
245 def __len__(self):
246 return len(self.changelog)
246 return len(self.changelog)
247
247
248 def __iter__(self):
248 def __iter__(self):
249 for i in xrange(len(self)):
249 for i in xrange(len(self)):
250 yield i
250 yield i
251
251
252 def revs(self, expr, *args):
252 def revs(self, expr, *args):
253 '''Return a list of revisions matching the given revset'''
253 '''Return a list of revisions matching the given revset'''
254 expr = revset.formatspec(expr, *args)
254 expr = revset.formatspec(expr, *args)
255 m = revset.match(None, expr)
255 m = revset.match(None, expr)
256 return [r for r in m(self, range(len(self)))]
256 return [r for r in m(self, range(len(self)))]
257
257
258 def set(self, expr, *args):
258 def set(self, expr, *args):
259 '''
259 '''
260 Yield a context for each matching revision, after doing arg
260 Yield a context for each matching revision, after doing arg
261 replacement via revset.formatspec
261 replacement via revset.formatspec
262 '''
262 '''
263 for r in self.revs(expr, *args):
263 for r in self.revs(expr, *args):
264 yield self[r]
264 yield self[r]
265
265
266 def url(self):
266 def url(self):
267 return 'file:' + self.root
267 return 'file:' + self.root
268
268
269 def hook(self, name, throw=False, **args):
269 def hook(self, name, throw=False, **args):
270 return hook.hook(self.ui, self, name, throw, **args)
270 return hook.hook(self.ui, self, name, throw, **args)
271
271
272 tag_disallowed = ':\r\n'
272 tag_disallowed = ':\r\n'
273
273
274 def _tag(self, names, node, message, local, user, date, extra={}):
274 def _tag(self, names, node, message, local, user, date, extra={}):
275 if isinstance(names, str):
275 if isinstance(names, str):
276 allchars = names
276 allchars = names
277 names = (names,)
277 names = (names,)
278 else:
278 else:
279 allchars = ''.join(names)
279 allchars = ''.join(names)
280 for c in self.tag_disallowed:
280 for c in self.tag_disallowed:
281 if c in allchars:
281 if c in allchars:
282 raise util.Abort(_('%r cannot be used in a tag name') % c)
282 raise util.Abort(_('%r cannot be used in a tag name') % c)
283
283
284 branches = self.branchmap()
284 branches = self.branchmap()
285 for name in names:
285 for name in names:
286 self.hook('pretag', throw=True, node=hex(node), tag=name,
286 self.hook('pretag', throw=True, node=hex(node), tag=name,
287 local=local)
287 local=local)
288 if name in branches:
288 if name in branches:
289 self.ui.warn(_("warning: tag %s conflicts with existing"
289 self.ui.warn(_("warning: tag %s conflicts with existing"
290 " branch name\n") % name)
290 " branch name\n") % name)
291
291
292 def writetags(fp, names, munge, prevtags):
292 def writetags(fp, names, munge, prevtags):
293 fp.seek(0, 2)
293 fp.seek(0, 2)
294 if prevtags and prevtags[-1] != '\n':
294 if prevtags and prevtags[-1] != '\n':
295 fp.write('\n')
295 fp.write('\n')
296 for name in names:
296 for name in names:
297 m = munge and munge(name) or name
297 m = munge and munge(name) or name
298 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
298 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
299 old = self.tags().get(name, nullid)
299 old = self.tags().get(name, nullid)
300 fp.write('%s %s\n' % (hex(old), m))
300 fp.write('%s %s\n' % (hex(old), m))
301 fp.write('%s %s\n' % (hex(node), m))
301 fp.write('%s %s\n' % (hex(node), m))
302 fp.close()
302 fp.close()
303
303
304 prevtags = ''
304 prevtags = ''
305 if local:
305 if local:
306 try:
306 try:
307 fp = self.opener('localtags', 'r+')
307 fp = self.opener('localtags', 'r+')
308 except IOError:
308 except IOError:
309 fp = self.opener('localtags', 'a')
309 fp = self.opener('localtags', 'a')
310 else:
310 else:
311 prevtags = fp.read()
311 prevtags = fp.read()
312
312
313 # local tags are stored in the current charset
313 # local tags are stored in the current charset
314 writetags(fp, names, None, prevtags)
314 writetags(fp, names, None, prevtags)
315 for name in names:
315 for name in names:
316 self.hook('tag', node=hex(node), tag=name, local=local)
316 self.hook('tag', node=hex(node), tag=name, local=local)
317 return
317 return
318
318
319 try:
319 try:
320 fp = self.wfile('.hgtags', 'rb+')
320 fp = self.wfile('.hgtags', 'rb+')
321 except IOError, e:
321 except IOError, e:
322 if e.errno != errno.ENOENT:
322 if e.errno != errno.ENOENT:
323 raise
323 raise
324 fp = self.wfile('.hgtags', 'ab')
324 fp = self.wfile('.hgtags', 'ab')
325 else:
325 else:
326 prevtags = fp.read()
326 prevtags = fp.read()
327
327
328 # committed tags are stored in UTF-8
328 # committed tags are stored in UTF-8
329 writetags(fp, names, encoding.fromlocal, prevtags)
329 writetags(fp, names, encoding.fromlocal, prevtags)
330
330
331 fp.close()
331 fp.close()
332
332
333 self.invalidatecaches()
333 self.invalidatecaches()
334
334
335 if '.hgtags' not in self.dirstate:
335 if '.hgtags' not in self.dirstate:
336 self[None].add(['.hgtags'])
336 self[None].add(['.hgtags'])
337
337
338 m = matchmod.exact(self.root, '', ['.hgtags'])
338 m = matchmod.exact(self.root, '', ['.hgtags'])
339 tagnode = self.commit(message, user, date, extra=extra, match=m)
339 tagnode = self.commit(message, user, date, extra=extra, match=m)
340
340
341 for name in names:
341 for name in names:
342 self.hook('tag', node=hex(node), tag=name, local=local)
342 self.hook('tag', node=hex(node), tag=name, local=local)
343
343
344 return tagnode
344 return tagnode
345
345
346 def tag(self, names, node, message, local, user, date):
346 def tag(self, names, node, message, local, user, date):
347 '''tag a revision with one or more symbolic names.
347 '''tag a revision with one or more symbolic names.
348
348
349 names is a list of strings or, when adding a single tag, names may be a
349 names is a list of strings or, when adding a single tag, names may be a
350 string.
350 string.
351
351
352 if local is True, the tags are stored in a per-repository file.
352 if local is True, the tags are stored in a per-repository file.
353 otherwise, they are stored in the .hgtags file, and a new
353 otherwise, they are stored in the .hgtags file, and a new
354 changeset is committed with the change.
354 changeset is committed with the change.
355
355
356 keyword arguments:
356 keyword arguments:
357
357
358 local: whether to store tags in non-version-controlled file
358 local: whether to store tags in non-version-controlled file
359 (default False)
359 (default False)
360
360
361 message: commit message to use if committing
361 message: commit message to use if committing
362
362
363 user: name of user to use if committing
363 user: name of user to use if committing
364
364
365 date: date tuple to use if committing'''
365 date: date tuple to use if committing'''
366
366
367 if not local:
367 if not local:
368 for x in self.status()[:5]:
368 for x in self.status()[:5]:
369 if '.hgtags' in x:
369 if '.hgtags' in x:
370 raise util.Abort(_('working copy of .hgtags is changed '
370 raise util.Abort(_('working copy of .hgtags is changed '
371 '(please commit .hgtags manually)'))
371 '(please commit .hgtags manually)'))
372
372
373 self.tags() # instantiate the cache
373 self.tags() # instantiate the cache
374 self._tag(names, node, message, local, user, date)
374 self._tag(names, node, message, local, user, date)
375
375
376 @propertycache
376 @propertycache
377 def _tagscache(self):
377 def _tagscache(self):
378 '''Returns a tagscache object that contains various tags related caches.'''
378 '''Returns a tagscache object that contains various tags related caches.'''
379
379
380 # This simplifies its cache management by having one decorated
380 # This simplifies its cache management by having one decorated
381 # function (this one) and the rest simply fetch things from it.
381 # function (this one) and the rest simply fetch things from it.
382 class tagscache(object):
382 class tagscache(object):
383 def __init__(self):
383 def __init__(self):
384 # These two define the set of tags for this repository. tags
384 # These two define the set of tags for this repository. tags
385 # maps tag name to node; tagtypes maps tag name to 'global' or
385 # maps tag name to node; tagtypes maps tag name to 'global' or
386 # 'local'. (Global tags are defined by .hgtags across all
386 # 'local'. (Global tags are defined by .hgtags across all
387 # heads, and local tags are defined in .hg/localtags.)
387 # heads, and local tags are defined in .hg/localtags.)
388 # They constitute the in-memory cache of tags.
388 # They constitute the in-memory cache of tags.
389 self.tags = self.tagtypes = None
389 self.tags = self.tagtypes = None
390
390
391 self.nodetagscache = self.tagslist = None
391 self.nodetagscache = self.tagslist = None
392
392
393 cache = tagscache()
393 cache = tagscache()
394 cache.tags, cache.tagtypes = self._findtags()
394 cache.tags, cache.tagtypes = self._findtags()
395
395
396 return cache
396 return cache
397
397
398 def tags(self):
398 def tags(self):
399 '''return a mapping of tag to node'''
399 '''return a mapping of tag to node'''
400 t = {}
400 t = {}
401 for k, v in self._tagscache.tags.iteritems():
401 for k, v in self._tagscache.tags.iteritems():
402 try:
402 try:
403 # ignore tags to unknown nodes
403 # ignore tags to unknown nodes
404 self.changelog.rev(v)
404 self.changelog.rev(v)
405 t[k] = v
405 t[k] = v
406 except error.LookupError:
406 except error.LookupError:
407 pass
407 pass
408 return t
408 return t
409
409
410 def _findtags(self):
410 def _findtags(self):
411 '''Do the hard work of finding tags. Return a pair of dicts
411 '''Do the hard work of finding tags. Return a pair of dicts
412 (tags, tagtypes) where tags maps tag name to node, and tagtypes
412 (tags, tagtypes) where tags maps tag name to node, and tagtypes
413 maps tag name to a string like \'global\' or \'local\'.
413 maps tag name to a string like \'global\' or \'local\'.
414 Subclasses or extensions are free to add their own tags, but
414 Subclasses or extensions are free to add their own tags, but
415 should be aware that the returned dicts will be retained for the
415 should be aware that the returned dicts will be retained for the
416 duration of the localrepo object.'''
416 duration of the localrepo object.'''
417
417
418 # XXX what tagtype should subclasses/extensions use? Currently
418 # XXX what tagtype should subclasses/extensions use? Currently
419 # mq and bookmarks add tags, but do not set the tagtype at all.
419 # mq and bookmarks add tags, but do not set the tagtype at all.
420 # Should each extension invent its own tag type? Should there
420 # Should each extension invent its own tag type? Should there
421 # be one tagtype for all such "virtual" tags? Or is the status
421 # be one tagtype for all such "virtual" tags? Or is the status
422 # quo fine?
422 # quo fine?
423
423
424 alltags = {} # map tag name to (node, hist)
424 alltags = {} # map tag name to (node, hist)
425 tagtypes = {}
425 tagtypes = {}
426
426
427 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
427 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
428 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
428 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
429
429
430 # Build the return dicts. Have to re-encode tag names because
430 # Build the return dicts. Have to re-encode tag names because
431 # the tags module always uses UTF-8 (in order not to lose info
431 # the tags module always uses UTF-8 (in order not to lose info
432 # writing to the cache), but the rest of Mercurial wants them in
432 # writing to the cache), but the rest of Mercurial wants them in
433 # local encoding.
433 # local encoding.
434 tags = {}
434 tags = {}
435 for (name, (node, hist)) in alltags.iteritems():
435 for (name, (node, hist)) in alltags.iteritems():
436 if node != nullid:
436 if node != nullid:
437 tags[encoding.tolocal(name)] = node
437 tags[encoding.tolocal(name)] = node
438 tags['tip'] = self.changelog.tip()
438 tags['tip'] = self.changelog.tip()
439 tagtypes = dict([(encoding.tolocal(name), value)
439 tagtypes = dict([(encoding.tolocal(name), value)
440 for (name, value) in tagtypes.iteritems()])
440 for (name, value) in tagtypes.iteritems()])
441 return (tags, tagtypes)
441 return (tags, tagtypes)
442
442
443 def tagtype(self, tagname):
443 def tagtype(self, tagname):
444 '''
444 '''
445 return the type of the given tag. result can be:
445 return the type of the given tag. result can be:
446
446
447 'local' : a local tag
447 'local' : a local tag
448 'global' : a global tag
448 'global' : a global tag
449 None : tag does not exist
449 None : tag does not exist
450 '''
450 '''
451
451
452 return self._tagscache.tagtypes.get(tagname)
452 return self._tagscache.tagtypes.get(tagname)
453
453
454 def tagslist(self):
454 def tagslist(self):
455 '''return a list of tags ordered by revision'''
455 '''return a list of tags ordered by revision'''
456 if not self._tagscache.tagslist:
456 if not self._tagscache.tagslist:
457 l = []
457 l = []
458 for t, n in self.tags().iteritems():
458 for t, n in self.tags().iteritems():
459 r = self.changelog.rev(n)
459 r = self.changelog.rev(n)
460 l.append((r, t, n))
460 l.append((r, t, n))
461 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
461 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
462
462
463 return self._tagscache.tagslist
463 return self._tagscache.tagslist
464
464
465 def nodetags(self, node):
465 def nodetags(self, node):
466 '''return the tags associated with a node'''
466 '''return the tags associated with a node'''
467 if not self._tagscache.nodetagscache:
467 if not self._tagscache.nodetagscache:
468 nodetagscache = {}
468 nodetagscache = {}
469 for t, n in self._tagscache.tags.iteritems():
469 for t, n in self._tagscache.tags.iteritems():
470 nodetagscache.setdefault(n, []).append(t)
470 nodetagscache.setdefault(n, []).append(t)
471 for tags in nodetagscache.itervalues():
471 for tags in nodetagscache.itervalues():
472 tags.sort()
472 tags.sort()
473 self._tagscache.nodetagscache = nodetagscache
473 self._tagscache.nodetagscache = nodetagscache
474 return self._tagscache.nodetagscache.get(node, [])
474 return self._tagscache.nodetagscache.get(node, [])
475
475
476 def nodebookmarks(self, node):
476 def nodebookmarks(self, node):
477 marks = []
477 marks = []
478 for bookmark, n in self._bookmarks.iteritems():
478 for bookmark, n in self._bookmarks.iteritems():
479 if n == node:
479 if n == node:
480 marks.append(bookmark)
480 marks.append(bookmark)
481 return sorted(marks)
481 return sorted(marks)
482
482
483 def _branchtags(self, partial, lrev):
483 def _branchtags(self, partial, lrev):
484 # TODO: rename this function?
484 # TODO: rename this function?
485 tiprev = len(self) - 1
485 tiprev = len(self) - 1
486 if lrev != tiprev:
486 if lrev != tiprev:
487 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
487 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
488 self._updatebranchcache(partial, ctxgen)
488 self._updatebranchcache(partial, ctxgen)
489 self._writebranchcache(partial, self.changelog.tip(), tiprev)
489 self._writebranchcache(partial, self.changelog.tip(), tiprev)
490
490
491 return partial
491 return partial
492
492
493 def updatebranchcache(self):
493 def updatebranchcache(self):
494 tip = self.changelog.tip()
494 tip = self.changelog.tip()
495 if self._branchcache is not None and self._branchcachetip == tip:
495 if self._branchcache is not None and self._branchcachetip == tip:
496 return
496 return
497
497
498 oldtip = self._branchcachetip
498 oldtip = self._branchcachetip
499 self._branchcachetip = tip
499 self._branchcachetip = tip
500 if oldtip is None or oldtip not in self.changelog.nodemap:
500 if oldtip is None or oldtip not in self.changelog.nodemap:
501 partial, last, lrev = self._readbranchcache()
501 partial, last, lrev = self._readbranchcache()
502 else:
502 else:
503 lrev = self.changelog.rev(oldtip)
503 lrev = self.changelog.rev(oldtip)
504 partial = self._branchcache
504 partial = self._branchcache
505
505
506 self._branchtags(partial, lrev)
506 self._branchtags(partial, lrev)
507 # this private cache holds all heads (not just tips)
507 # this private cache holds all heads (not just tips)
508 self._branchcache = partial
508 self._branchcache = partial
509
509
510 def branchmap(self):
510 def branchmap(self):
511 '''returns a dictionary {branch: [branchheads]}'''
511 '''returns a dictionary {branch: [branchheads]}'''
512 self.updatebranchcache()
512 self.updatebranchcache()
513 return self._branchcache
513 return self._branchcache
514
514
515 def branchtags(self):
515 def branchtags(self):
516 '''return a dict where branch names map to the tipmost head of
516 '''return a dict where branch names map to the tipmost head of
517 the branch, open heads come before closed'''
517 the branch, open heads come before closed'''
518 bt = {}
518 bt = {}
519 for bn, heads in self.branchmap().iteritems():
519 for bn, heads in self.branchmap().iteritems():
520 tip = heads[-1]
520 tip = heads[-1]
521 for h in reversed(heads):
521 for h in reversed(heads):
522 if 'close' not in self.changelog.read(h)[5]:
522 if 'close' not in self.changelog.read(h)[5]:
523 tip = h
523 tip = h
524 break
524 break
525 bt[bn] = tip
525 bt[bn] = tip
526 return bt
526 return bt
527
527
528 def _readbranchcache(self):
528 def _readbranchcache(self):
529 partial = {}
529 partial = {}
530 try:
530 try:
531 f = self.opener("cache/branchheads")
531 f = self.opener("cache/branchheads")
532 lines = f.read().split('\n')
532 lines = f.read().split('\n')
533 f.close()
533 f.close()
534 except (IOError, OSError):
534 except (IOError, OSError):
535 return {}, nullid, nullrev
535 return {}, nullid, nullrev
536
536
537 try:
537 try:
538 last, lrev = lines.pop(0).split(" ", 1)
538 last, lrev = lines.pop(0).split(" ", 1)
539 last, lrev = bin(last), int(lrev)
539 last, lrev = bin(last), int(lrev)
540 if lrev >= len(self) or self[lrev].node() != last:
540 if lrev >= len(self) or self[lrev].node() != last:
541 # invalidate the cache
541 # invalidate the cache
542 raise ValueError('invalidating branch cache (tip differs)')
542 raise ValueError('invalidating branch cache (tip differs)')
543 for l in lines:
543 for l in lines:
544 if not l:
544 if not l:
545 continue
545 continue
546 node, label = l.split(" ", 1)
546 node, label = l.split(" ", 1)
547 label = encoding.tolocal(label.strip())
547 label = encoding.tolocal(label.strip())
548 partial.setdefault(label, []).append(bin(node))
548 partial.setdefault(label, []).append(bin(node))
549 except KeyboardInterrupt:
549 except KeyboardInterrupt:
550 raise
550 raise
551 except Exception, inst:
551 except Exception, inst:
552 if self.ui.debugflag:
552 if self.ui.debugflag:
553 self.ui.warn(str(inst), '\n')
553 self.ui.warn(str(inst), '\n')
554 partial, last, lrev = {}, nullid, nullrev
554 partial, last, lrev = {}, nullid, nullrev
555 return partial, last, lrev
555 return partial, last, lrev
556
556
557 def _writebranchcache(self, branches, tip, tiprev):
557 def _writebranchcache(self, branches, tip, tiprev):
558 try:
558 try:
559 f = self.opener("cache/branchheads", "w", atomictemp=True)
559 f = self.opener("cache/branchheads", "w", atomictemp=True)
560 f.write("%s %s\n" % (hex(tip), tiprev))
560 f.write("%s %s\n" % (hex(tip), tiprev))
561 for label, nodes in branches.iteritems():
561 for label, nodes in branches.iteritems():
562 for node in nodes:
562 for node in nodes:
563 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
563 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
564 f.close()
564 f.close()
565 except (IOError, OSError):
565 except (IOError, OSError):
566 pass
566 pass
567
567
568 def _updatebranchcache(self, partial, ctxgen):
568 def _updatebranchcache(self, partial, ctxgen):
569 # collect new branch entries
569 # collect new branch entries
570 newbranches = {}
570 newbranches = {}
571 for c in ctxgen:
571 for c in ctxgen:
572 newbranches.setdefault(c.branch(), []).append(c.node())
572 newbranches.setdefault(c.branch(), []).append(c.node())
573 # if older branchheads are reachable from new ones, they aren't
573 # if older branchheads are reachable from new ones, they aren't
574 # really branchheads. Note checking parents is insufficient:
574 # really branchheads. Note checking parents is insufficient:
575 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
575 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
576 for branch, newnodes in newbranches.iteritems():
576 for branch, newnodes in newbranches.iteritems():
577 bheads = partial.setdefault(branch, [])
577 bheads = partial.setdefault(branch, [])
578 bheads.extend(newnodes)
578 bheads.extend(newnodes)
579 if len(bheads) <= 1:
579 if len(bheads) <= 1:
580 continue
580 continue
581 bheads = sorted(bheads, key=lambda x: self[x].rev())
581 bheads = sorted(bheads, key=lambda x: self[x].rev())
582 # starting from tip means fewer passes over reachable
582 # starting from tip means fewer passes over reachable
583 while newnodes:
583 while newnodes:
584 latest = newnodes.pop()
584 latest = newnodes.pop()
585 if latest not in bheads:
585 if latest not in bheads:
586 continue
586 continue
587 minbhrev = self[bheads[0]].node()
587 minbhrev = self[bheads[0]].node()
588 reachable = self.changelog.reachable(latest, minbhrev)
588 reachable = self.changelog.reachable(latest, minbhrev)
589 reachable.remove(latest)
589 reachable.remove(latest)
590 if reachable:
590 if reachable:
591 bheads = [b for b in bheads if b not in reachable]
591 bheads = [b for b in bheads if b not in reachable]
592 partial[branch] = bheads
592 partial[branch] = bheads
593
593
594 def lookup(self, key):
594 def lookup(self, key):
595 return self[key].node()
595 return self[key].node()
596
596
597 def lookupbranch(self, key, remote=None):
597 def lookupbranch(self, key, remote=None):
598 repo = remote or self
598 repo = remote or self
599 if key in repo.branchmap():
599 if key in repo.branchmap():
600 return key
600 return key
601
601
602 repo = (remote and remote.local()) and remote or self
602 repo = (remote and remote.local()) and remote or self
603 return repo[key].branch()
603 return repo[key].branch()
604
604
605 def known(self, nodes):
605 def known(self, nodes):
606 nm = self.changelog.nodemap
606 nm = self.changelog.nodemap
607 result = []
607 result = []
608 for n in nodes:
608 for n in nodes:
609 r = nm.get(n)
609 r = nm.get(n)
610 resp = not (r is None or self._phaserev[r] >= phases.secret)
610 resp = not (r is None or self._phaserev[r] >= phases.secret)
611 result.append(resp)
611 result.append(resp)
612 return result
612 return result
613
613
614 def local(self):
614 def local(self):
615 return self
615 return self
616
616
617 def join(self, f):
617 def join(self, f):
618 return os.path.join(self.path, f)
618 return os.path.join(self.path, f)
619
619
620 def wjoin(self, f):
620 def wjoin(self, f):
621 return os.path.join(self.root, f)
621 return os.path.join(self.root, f)
622
622
623 def file(self, f):
623 def file(self, f):
624 if f[0] == '/':
624 if f[0] == '/':
625 f = f[1:]
625 f = f[1:]
626 return filelog.filelog(self.sopener, f)
626 return filelog.filelog(self.sopener, f)
627
627
628 def changectx(self, changeid):
628 def changectx(self, changeid):
629 return self[changeid]
629 return self[changeid]
630
630
631 def parents(self, changeid=None):
631 def parents(self, changeid=None):
632 '''get list of changectxs for parents of changeid'''
632 '''get list of changectxs for parents of changeid'''
633 return self[changeid].parents()
633 return self[changeid].parents()
634
634
635 def setparents(self, p1, p2=nullid):
635 def setparents(self, p1, p2=nullid):
636 copies = self.dirstate.setparents(p1, p2)
636 copies = self.dirstate.setparents(p1, p2)
637 if copies:
637 if copies:
638 # Adjust copy records, the dirstate cannot do it, it
638 # Adjust copy records, the dirstate cannot do it, it
639 # requires access to parents manifests. Preserve them
639 # requires access to parents manifests. Preserve them
640 # only for entries added to first parent.
640 # only for entries added to first parent.
641 pctx = self[p1]
641 pctx = self[p1]
642 for f in copies:
642 for f in copies:
643 if f not in pctx and copies[f] in pctx:
643 if f not in pctx and copies[f] in pctx:
644 self.dirstate.copy(copies[f], f)
644 self.dirstate.copy(copies[f], f)
645
645
646 def filectx(self, path, changeid=None, fileid=None):
646 def filectx(self, path, changeid=None, fileid=None):
647 """changeid can be a changeset revision, node, or tag.
647 """changeid can be a changeset revision, node, or tag.
648 fileid can be a file revision or node."""
648 fileid can be a file revision or node."""
649 return context.filectx(self, path, changeid, fileid)
649 return context.filectx(self, path, changeid, fileid)
650
650
651 def getcwd(self):
651 def getcwd(self):
652 return self.dirstate.getcwd()
652 return self.dirstate.getcwd()
653
653
654 def pathto(self, f, cwd=None):
654 def pathto(self, f, cwd=None):
655 return self.dirstate.pathto(f, cwd)
655 return self.dirstate.pathto(f, cwd)
656
656
657 def wfile(self, f, mode='r'):
657 def wfile(self, f, mode='r'):
658 return self.wopener(f, mode)
658 return self.wopener(f, mode)
659
659
660 def _link(self, f):
660 def _link(self, f):
661 return os.path.islink(self.wjoin(f))
661 return os.path.islink(self.wjoin(f))
662
662
663 def _loadfilter(self, filter):
663 def _loadfilter(self, filter):
664 if filter not in self.filterpats:
664 if filter not in self.filterpats:
665 l = []
665 l = []
666 for pat, cmd in self.ui.configitems(filter):
666 for pat, cmd in self.ui.configitems(filter):
667 if cmd == '!':
667 if cmd == '!':
668 continue
668 continue
669 mf = matchmod.match(self.root, '', [pat])
669 mf = matchmod.match(self.root, '', [pat])
670 fn = None
670 fn = None
671 params = cmd
671 params = cmd
672 for name, filterfn in self._datafilters.iteritems():
672 for name, filterfn in self._datafilters.iteritems():
673 if cmd.startswith(name):
673 if cmd.startswith(name):
674 fn = filterfn
674 fn = filterfn
675 params = cmd[len(name):].lstrip()
675 params = cmd[len(name):].lstrip()
676 break
676 break
677 if not fn:
677 if not fn:
678 fn = lambda s, c, **kwargs: util.filter(s, c)
678 fn = lambda s, c, **kwargs: util.filter(s, c)
679 # Wrap old filters not supporting keyword arguments
679 # Wrap old filters not supporting keyword arguments
680 if not inspect.getargspec(fn)[2]:
680 if not inspect.getargspec(fn)[2]:
681 oldfn = fn
681 oldfn = fn
682 fn = lambda s, c, **kwargs: oldfn(s, c)
682 fn = lambda s, c, **kwargs: oldfn(s, c)
683 l.append((mf, fn, params))
683 l.append((mf, fn, params))
684 self.filterpats[filter] = l
684 self.filterpats[filter] = l
685 return self.filterpats[filter]
685 return self.filterpats[filter]
686
686
687 def _filter(self, filterpats, filename, data):
687 def _filter(self, filterpats, filename, data):
688 for mf, fn, cmd in filterpats:
688 for mf, fn, cmd in filterpats:
689 if mf(filename):
689 if mf(filename):
690 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
690 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
691 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
691 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
692 break
692 break
693
693
694 return data
694 return data
695
695
696 @propertycache
696 @propertycache
697 def _encodefilterpats(self):
697 def _encodefilterpats(self):
698 return self._loadfilter('encode')
698 return self._loadfilter('encode')
699
699
700 @propertycache
700 @propertycache
701 def _decodefilterpats(self):
701 def _decodefilterpats(self):
702 return self._loadfilter('decode')
702 return self._loadfilter('decode')
703
703
704 def adddatafilter(self, name, filter):
704 def adddatafilter(self, name, filter):
705 self._datafilters[name] = filter
705 self._datafilters[name] = filter
706
706
707 def wread(self, filename):
707 def wread(self, filename):
708 if self._link(filename):
708 if self._link(filename):
709 data = os.readlink(self.wjoin(filename))
709 data = os.readlink(self.wjoin(filename))
710 else:
710 else:
711 data = self.wopener.read(filename)
711 data = self.wopener.read(filename)
712 return self._filter(self._encodefilterpats, filename, data)
712 return self._filter(self._encodefilterpats, filename, data)
713
713
714 def wwrite(self, filename, data, flags):
714 def wwrite(self, filename, data, flags):
715 data = self._filter(self._decodefilterpats, filename, data)
715 data = self._filter(self._decodefilterpats, filename, data)
716 if 'l' in flags:
716 if 'l' in flags:
717 self.wopener.symlink(data, filename)
717 self.wopener.symlink(data, filename)
718 else:
718 else:
719 self.wopener.write(filename, data)
719 self.wopener.write(filename, data)
720 if 'x' in flags:
720 if 'x' in flags:
721 util.setflags(self.wjoin(filename), False, True)
721 util.setflags(self.wjoin(filename), False, True)
722
722
723 def wwritedata(self, filename, data):
723 def wwritedata(self, filename, data):
724 return self._filter(self._decodefilterpats, filename, data)
724 return self._filter(self._decodefilterpats, filename, data)
725
725
726 def transaction(self, desc):
726 def transaction(self, desc):
727 tr = self._transref and self._transref() or None
727 tr = self._transref and self._transref() or None
728 if tr and tr.running():
728 if tr and tr.running():
729 return tr.nest()
729 return tr.nest()
730
730
731 # abort here if the journal already exists
731 # abort here if the journal already exists
732 if os.path.exists(self.sjoin("journal")):
732 if os.path.exists(self.sjoin("journal")):
733 raise error.RepoError(
733 raise error.RepoError(
734 _("abandoned transaction found - run hg recover"))
734 _("abandoned transaction found - run hg recover"))
735
735
736 self._writejournal(desc)
736 self._writejournal(desc)
737 renames = [(x, undoname(x)) for x in self._journalfiles()]
737 renames = [(x, undoname(x)) for x in self._journalfiles()]
738
738
739 tr = transaction.transaction(self.ui.warn, self.sopener,
739 tr = transaction.transaction(self.ui.warn, self.sopener,
740 self.sjoin("journal"),
740 self.sjoin("journal"),
741 aftertrans(renames),
741 aftertrans(renames),
742 self.store.createmode)
742 self.store.createmode)
743 self._transref = weakref.ref(tr)
743 self._transref = weakref.ref(tr)
744 return tr
744 return tr
745
745
746 def _journalfiles(self):
746 def _journalfiles(self):
747 return (self.sjoin('journal'), self.join('journal.dirstate'),
747 return (self.sjoin('journal'), self.join('journal.dirstate'),
748 self.join('journal.branch'), self.join('journal.desc'),
748 self.join('journal.branch'), self.join('journal.desc'),
749 self.join('journal.bookmarks'),
749 self.join('journal.bookmarks'),
750 self.sjoin('journal.phaseroots'))
750 self.sjoin('journal.phaseroots'))
751
751
752 def undofiles(self):
752 def undofiles(self):
753 return [undoname(x) for x in self._journalfiles()]
753 return [undoname(x) for x in self._journalfiles()]
754
754
755 def _writejournal(self, desc):
755 def _writejournal(self, desc):
756 self.opener.write("journal.dirstate",
756 self.opener.write("journal.dirstate",
757 self.opener.tryread("dirstate"))
757 self.opener.tryread("dirstate"))
758 self.opener.write("journal.branch",
758 self.opener.write("journal.branch",
759 encoding.fromlocal(self.dirstate.branch()))
759 encoding.fromlocal(self.dirstate.branch()))
760 self.opener.write("journal.desc",
760 self.opener.write("journal.desc",
761 "%d\n%s\n" % (len(self), desc))
761 "%d\n%s\n" % (len(self), desc))
762 self.opener.write("journal.bookmarks",
762 self.opener.write("journal.bookmarks",
763 self.opener.tryread("bookmarks"))
763 self.opener.tryread("bookmarks"))
764 self.sopener.write("journal.phaseroots",
764 self.sopener.write("journal.phaseroots",
765 self.sopener.tryread("phaseroots"))
765 self.sopener.tryread("phaseroots"))
766
766
767 def recover(self):
767 def recover(self):
768 lock = self.lock()
768 lock = self.lock()
769 try:
769 try:
770 if os.path.exists(self.sjoin("journal")):
770 if os.path.exists(self.sjoin("journal")):
771 self.ui.status(_("rolling back interrupted transaction\n"))
771 self.ui.status(_("rolling back interrupted transaction\n"))
772 transaction.rollback(self.sopener, self.sjoin("journal"),
772 transaction.rollback(self.sopener, self.sjoin("journal"),
773 self.ui.warn)
773 self.ui.warn)
774 self.invalidate()
774 self.invalidate()
775 return True
775 return True
776 else:
776 else:
777 self.ui.warn(_("no interrupted transaction available\n"))
777 self.ui.warn(_("no interrupted transaction available\n"))
778 return False
778 return False
779 finally:
779 finally:
780 lock.release()
780 lock.release()
781
781
782 def rollback(self, dryrun=False, force=False):
782 def rollback(self, dryrun=False, force=False):
783 wlock = lock = None
783 wlock = lock = None
784 try:
784 try:
785 wlock = self.wlock()
785 wlock = self.wlock()
786 lock = self.lock()
786 lock = self.lock()
787 if os.path.exists(self.sjoin("undo")):
787 if os.path.exists(self.sjoin("undo")):
788 return self._rollback(dryrun, force)
788 return self._rollback(dryrun, force)
789 else:
789 else:
790 self.ui.warn(_("no rollback information available\n"))
790 self.ui.warn(_("no rollback information available\n"))
791 return 1
791 return 1
792 finally:
792 finally:
793 release(lock, wlock)
793 release(lock, wlock)
794
794
795 def _rollback(self, dryrun, force):
795 def _rollback(self, dryrun, force):
796 ui = self.ui
796 ui = self.ui
797 try:
797 try:
798 args = self.opener.read('undo.desc').splitlines()
798 args = self.opener.read('undo.desc').splitlines()
799 (oldlen, desc, detail) = (int(args[0]), args[1], None)
799 (oldlen, desc, detail) = (int(args[0]), args[1], None)
800 if len(args) >= 3:
800 if len(args) >= 3:
801 detail = args[2]
801 detail = args[2]
802 oldtip = oldlen - 1
802 oldtip = oldlen - 1
803
803
804 if detail and ui.verbose:
804 if detail and ui.verbose:
805 msg = (_('repository tip rolled back to revision %s'
805 msg = (_('repository tip rolled back to revision %s'
806 ' (undo %s: %s)\n')
806 ' (undo %s: %s)\n')
807 % (oldtip, desc, detail))
807 % (oldtip, desc, detail))
808 else:
808 else:
809 msg = (_('repository tip rolled back to revision %s'
809 msg = (_('repository tip rolled back to revision %s'
810 ' (undo %s)\n')
810 ' (undo %s)\n')
811 % (oldtip, desc))
811 % (oldtip, desc))
812 except IOError:
812 except IOError:
813 msg = _('rolling back unknown transaction\n')
813 msg = _('rolling back unknown transaction\n')
814 desc = None
814 desc = None
815
815
816 if not force and self['.'] != self['tip'] and desc == 'commit':
816 if not force and self['.'] != self['tip'] and desc == 'commit':
817 raise util.Abort(
817 raise util.Abort(
818 _('rollback of last commit while not checked out '
818 _('rollback of last commit while not checked out '
819 'may lose data'), hint=_('use -f to force'))
819 'may lose data'), hint=_('use -f to force'))
820
820
821 ui.status(msg)
821 ui.status(msg)
822 if dryrun:
822 if dryrun:
823 return 0
823 return 0
824
824
825 parents = self.dirstate.parents()
825 parents = self.dirstate.parents()
826 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
826 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
827 if os.path.exists(self.join('undo.bookmarks')):
827 if os.path.exists(self.join('undo.bookmarks')):
828 util.rename(self.join('undo.bookmarks'),
828 util.rename(self.join('undo.bookmarks'),
829 self.join('bookmarks'))
829 self.join('bookmarks'))
830 if os.path.exists(self.sjoin('undo.phaseroots')):
830 if os.path.exists(self.sjoin('undo.phaseroots')):
831 util.rename(self.sjoin('undo.phaseroots'),
831 util.rename(self.sjoin('undo.phaseroots'),
832 self.sjoin('phaseroots'))
832 self.sjoin('phaseroots'))
833 self.invalidate()
833 self.invalidate()
834
834
835 parentgone = (parents[0] not in self.changelog.nodemap or
835 parentgone = (parents[0] not in self.changelog.nodemap or
836 parents[1] not in self.changelog.nodemap)
836 parents[1] not in self.changelog.nodemap)
837 if parentgone:
837 if parentgone:
838 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
838 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
839 try:
839 try:
840 branch = self.opener.read('undo.branch')
840 branch = self.opener.read('undo.branch')
841 self.dirstate.setbranch(branch)
841 self.dirstate.setbranch(branch)
842 except IOError:
842 except IOError:
843 ui.warn(_('named branch could not be reset: '
843 ui.warn(_('named branch could not be reset: '
844 'current branch is still \'%s\'\n')
844 'current branch is still \'%s\'\n')
845 % self.dirstate.branch())
845 % self.dirstate.branch())
846
846
847 self.dirstate.invalidate()
847 self.dirstate.invalidate()
848 parents = tuple([p.rev() for p in self.parents()])
848 parents = tuple([p.rev() for p in self.parents()])
849 if len(parents) > 1:
849 if len(parents) > 1:
850 ui.status(_('working directory now based on '
850 ui.status(_('working directory now based on '
851 'revisions %d and %d\n') % parents)
851 'revisions %d and %d\n') % parents)
852 else:
852 else:
853 ui.status(_('working directory now based on '
853 ui.status(_('working directory now based on '
854 'revision %d\n') % parents)
854 'revision %d\n') % parents)
855 self.destroyed()
855 self.destroyed()
856 return 0
856 return 0
857
857
858 def invalidatecaches(self):
858 def invalidatecaches(self):
859 def delcache(name):
859 def delcache(name):
860 try:
860 try:
861 delattr(self, name)
861 delattr(self, name)
862 except AttributeError:
862 except AttributeError:
863 pass
863 pass
864
864
865 delcache('_tagscache')
865 delcache('_tagscache')
866 delcache('_phaserev')
866 delcache('_phaserev')
867
867
868 self._branchcache = None # in UTF-8
868 self._branchcache = None # in UTF-8
869 self._branchcachetip = None
869 self._branchcachetip = None
870
870
871 def invalidatedirstate(self):
871 def invalidatedirstate(self):
872 '''Invalidates the dirstate, causing the next call to dirstate
872 '''Invalidates the dirstate, causing the next call to dirstate
873 to check if it was modified since the last time it was read,
873 to check if it was modified since the last time it was read,
874 rereading it if it has.
874 rereading it if it has.
875
875
876 This is different to dirstate.invalidate() that it doesn't always
876 This is different to dirstate.invalidate() that it doesn't always
877 rereads the dirstate. Use dirstate.invalidate() if you want to
877 rereads the dirstate. Use dirstate.invalidate() if you want to
878 explicitly read the dirstate again (i.e. restoring it to a previous
878 explicitly read the dirstate again (i.e. restoring it to a previous
879 known good state).'''
879 known good state).'''
880 if 'dirstate' in self.__dict__:
880 if 'dirstate' in self.__dict__:
881 for k in self.dirstate._filecache:
881 for k in self.dirstate._filecache:
882 try:
882 try:
883 delattr(self.dirstate, k)
883 delattr(self.dirstate, k)
884 except AttributeError:
884 except AttributeError:
885 pass
885 pass
886 delattr(self, 'dirstate')
886 delattr(self, 'dirstate')
887
887
888 def invalidate(self):
888 def invalidate(self):
889 for k in self._filecache:
889 for k in self._filecache:
890 # dirstate is invalidated separately in invalidatedirstate()
890 # dirstate is invalidated separately in invalidatedirstate()
891 if k == 'dirstate':
891 if k == 'dirstate':
892 continue
892 continue
893
893
894 try:
894 try:
895 delattr(self, k)
895 delattr(self, k)
896 except AttributeError:
896 except AttributeError:
897 pass
897 pass
898 self.invalidatecaches()
898 self.invalidatecaches()
899
899
900 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
900 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
901 try:
901 try:
902 l = lock.lock(lockname, 0, releasefn, desc=desc)
902 l = lock.lock(lockname, 0, releasefn, desc=desc)
903 except error.LockHeld, inst:
903 except error.LockHeld, inst:
904 if not wait:
904 if not wait:
905 raise
905 raise
906 self.ui.warn(_("waiting for lock on %s held by %r\n") %
906 self.ui.warn(_("waiting for lock on %s held by %r\n") %
907 (desc, inst.locker))
907 (desc, inst.locker))
908 # default to 600 seconds timeout
908 # default to 600 seconds timeout
909 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
909 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
910 releasefn, desc=desc)
910 releasefn, desc=desc)
911 if acquirefn:
911 if acquirefn:
912 acquirefn()
912 acquirefn()
913 return l
913 return l
914
914
915 def _afterlock(self, callback):
915 def _afterlock(self, callback):
916 """add a callback to the current repository lock.
916 """add a callback to the current repository lock.
917
917
918 The callback will be executed on lock release."""
918 The callback will be executed on lock release."""
919 l = self._lockref and self._lockref()
919 l = self._lockref and self._lockref()
920 if l:
920 if l:
921 l.postrelease.append(callback)
921 l.postrelease.append(callback)
922
922
923 def lock(self, wait=True):
923 def lock(self, wait=True):
924 '''Lock the repository store (.hg/store) and return a weak reference
924 '''Lock the repository store (.hg/store) and return a weak reference
925 to the lock. Use this before modifying the store (e.g. committing or
925 to the lock. Use this before modifying the store (e.g. committing or
926 stripping). If you are opening a transaction, get a lock as well.)'''
926 stripping). If you are opening a transaction, get a lock as well.)'''
927 l = self._lockref and self._lockref()
927 l = self._lockref and self._lockref()
928 if l is not None and l.held:
928 if l is not None and l.held:
929 l.lock()
929 l.lock()
930 return l
930 return l
931
931
932 def unlock():
932 def unlock():
933 self.store.write()
933 self.store.write()
934 if self._dirtyphases:
934 if self._dirtyphases:
935 phases.writeroots(self)
935 phases.writeroots(self, self._phaseroots)
936 self._dirtyphases = False
936 self._dirtyphases = False
937 for k, ce in self._filecache.items():
937 for k, ce in self._filecache.items():
938 if k == 'dirstate':
938 if k == 'dirstate':
939 continue
939 continue
940 ce.refresh()
940 ce.refresh()
941
941
942 l = self._lock(self.sjoin("lock"), wait, unlock,
942 l = self._lock(self.sjoin("lock"), wait, unlock,
943 self.invalidate, _('repository %s') % self.origroot)
943 self.invalidate, _('repository %s') % self.origroot)
944 self._lockref = weakref.ref(l)
944 self._lockref = weakref.ref(l)
945 return l
945 return l
946
946
947 def wlock(self, wait=True):
947 def wlock(self, wait=True):
948 '''Lock the non-store parts of the repository (everything under
948 '''Lock the non-store parts of the repository (everything under
949 .hg except .hg/store) and return a weak reference to the lock.
949 .hg except .hg/store) and return a weak reference to the lock.
950 Use this before modifying files in .hg.'''
950 Use this before modifying files in .hg.'''
951 l = self._wlockref and self._wlockref()
951 l = self._wlockref and self._wlockref()
952 if l is not None and l.held:
952 if l is not None and l.held:
953 l.lock()
953 l.lock()
954 return l
954 return l
955
955
956 def unlock():
956 def unlock():
957 self.dirstate.write()
957 self.dirstate.write()
958 ce = self._filecache.get('dirstate')
958 ce = self._filecache.get('dirstate')
959 if ce:
959 if ce:
960 ce.refresh()
960 ce.refresh()
961
961
962 l = self._lock(self.join("wlock"), wait, unlock,
962 l = self._lock(self.join("wlock"), wait, unlock,
963 self.invalidatedirstate, _('working directory of %s') %
963 self.invalidatedirstate, _('working directory of %s') %
964 self.origroot)
964 self.origroot)
965 self._wlockref = weakref.ref(l)
965 self._wlockref = weakref.ref(l)
966 return l
966 return l
967
967
968 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
968 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
969 """
969 """
970 commit an individual file as part of a larger transaction
970 commit an individual file as part of a larger transaction
971 """
971 """
972
972
973 fname = fctx.path()
973 fname = fctx.path()
974 text = fctx.data()
974 text = fctx.data()
975 flog = self.file(fname)
975 flog = self.file(fname)
976 fparent1 = manifest1.get(fname, nullid)
976 fparent1 = manifest1.get(fname, nullid)
977 fparent2 = fparent2o = manifest2.get(fname, nullid)
977 fparent2 = fparent2o = manifest2.get(fname, nullid)
978
978
979 meta = {}
979 meta = {}
980 copy = fctx.renamed()
980 copy = fctx.renamed()
981 if copy and copy[0] != fname:
981 if copy and copy[0] != fname:
982 # Mark the new revision of this file as a copy of another
982 # Mark the new revision of this file as a copy of another
983 # file. This copy data will effectively act as a parent
983 # file. This copy data will effectively act as a parent
984 # of this new revision. If this is a merge, the first
984 # of this new revision. If this is a merge, the first
985 # parent will be the nullid (meaning "look up the copy data")
985 # parent will be the nullid (meaning "look up the copy data")
986 # and the second one will be the other parent. For example:
986 # and the second one will be the other parent. For example:
987 #
987 #
988 # 0 --- 1 --- 3 rev1 changes file foo
988 # 0 --- 1 --- 3 rev1 changes file foo
989 # \ / rev2 renames foo to bar and changes it
989 # \ / rev2 renames foo to bar and changes it
990 # \- 2 -/ rev3 should have bar with all changes and
990 # \- 2 -/ rev3 should have bar with all changes and
991 # should record that bar descends from
991 # should record that bar descends from
992 # bar in rev2 and foo in rev1
992 # bar in rev2 and foo in rev1
993 #
993 #
994 # this allows this merge to succeed:
994 # this allows this merge to succeed:
995 #
995 #
996 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
996 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
997 # \ / merging rev3 and rev4 should use bar@rev2
997 # \ / merging rev3 and rev4 should use bar@rev2
998 # \- 2 --- 4 as the merge base
998 # \- 2 --- 4 as the merge base
999 #
999 #
1000
1000
1001 cfname = copy[0]
1001 cfname = copy[0]
1002 crev = manifest1.get(cfname)
1002 crev = manifest1.get(cfname)
1003 newfparent = fparent2
1003 newfparent = fparent2
1004
1004
1005 if manifest2: # branch merge
1005 if manifest2: # branch merge
1006 if fparent2 == nullid or crev is None: # copied on remote side
1006 if fparent2 == nullid or crev is None: # copied on remote side
1007 if cfname in manifest2:
1007 if cfname in manifest2:
1008 crev = manifest2[cfname]
1008 crev = manifest2[cfname]
1009 newfparent = fparent1
1009 newfparent = fparent1
1010
1010
1011 # find source in nearest ancestor if we've lost track
1011 # find source in nearest ancestor if we've lost track
1012 if not crev:
1012 if not crev:
1013 self.ui.debug(" %s: searching for copy revision for %s\n" %
1013 self.ui.debug(" %s: searching for copy revision for %s\n" %
1014 (fname, cfname))
1014 (fname, cfname))
1015 for ancestor in self[None].ancestors():
1015 for ancestor in self[None].ancestors():
1016 if cfname in ancestor:
1016 if cfname in ancestor:
1017 crev = ancestor[cfname].filenode()
1017 crev = ancestor[cfname].filenode()
1018 break
1018 break
1019
1019
1020 if crev:
1020 if crev:
1021 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1021 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1022 meta["copy"] = cfname
1022 meta["copy"] = cfname
1023 meta["copyrev"] = hex(crev)
1023 meta["copyrev"] = hex(crev)
1024 fparent1, fparent2 = nullid, newfparent
1024 fparent1, fparent2 = nullid, newfparent
1025 else:
1025 else:
1026 self.ui.warn(_("warning: can't find ancestor for '%s' "
1026 self.ui.warn(_("warning: can't find ancestor for '%s' "
1027 "copied from '%s'!\n") % (fname, cfname))
1027 "copied from '%s'!\n") % (fname, cfname))
1028
1028
1029 elif fparent2 != nullid:
1029 elif fparent2 != nullid:
1030 # is one parent an ancestor of the other?
1030 # is one parent an ancestor of the other?
1031 fparentancestor = flog.ancestor(fparent1, fparent2)
1031 fparentancestor = flog.ancestor(fparent1, fparent2)
1032 if fparentancestor == fparent1:
1032 if fparentancestor == fparent1:
1033 fparent1, fparent2 = fparent2, nullid
1033 fparent1, fparent2 = fparent2, nullid
1034 elif fparentancestor == fparent2:
1034 elif fparentancestor == fparent2:
1035 fparent2 = nullid
1035 fparent2 = nullid
1036
1036
1037 # is the file changed?
1037 # is the file changed?
1038 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1038 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1039 changelist.append(fname)
1039 changelist.append(fname)
1040 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1040 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1041
1041
1042 # are just the flags changed during merge?
1042 # are just the flags changed during merge?
1043 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1043 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1044 changelist.append(fname)
1044 changelist.append(fname)
1045
1045
1046 return fparent1
1046 return fparent1
1047
1047
1048 def commit(self, text="", user=None, date=None, match=None, force=False,
1048 def commit(self, text="", user=None, date=None, match=None, force=False,
1049 editor=False, extra={}):
1049 editor=False, extra={}):
1050 """Add a new revision to current repository.
1050 """Add a new revision to current repository.
1051
1051
1052 Revision information is gathered from the working directory,
1052 Revision information is gathered from the working directory,
1053 match can be used to filter the committed files. If editor is
1053 match can be used to filter the committed files. If editor is
1054 supplied, it is called to get a commit message.
1054 supplied, it is called to get a commit message.
1055 """
1055 """
1056
1056
1057 def fail(f, msg):
1057 def fail(f, msg):
1058 raise util.Abort('%s: %s' % (f, msg))
1058 raise util.Abort('%s: %s' % (f, msg))
1059
1059
1060 if not match:
1060 if not match:
1061 match = matchmod.always(self.root, '')
1061 match = matchmod.always(self.root, '')
1062
1062
1063 if not force:
1063 if not force:
1064 vdirs = []
1064 vdirs = []
1065 match.dir = vdirs.append
1065 match.dir = vdirs.append
1066 match.bad = fail
1066 match.bad = fail
1067
1067
1068 wlock = self.wlock()
1068 wlock = self.wlock()
1069 try:
1069 try:
1070 wctx = self[None]
1070 wctx = self[None]
1071 merge = len(wctx.parents()) > 1
1071 merge = len(wctx.parents()) > 1
1072
1072
1073 if (not force and merge and match and
1073 if (not force and merge and match and
1074 (match.files() or match.anypats())):
1074 (match.files() or match.anypats())):
1075 raise util.Abort(_('cannot partially commit a merge '
1075 raise util.Abort(_('cannot partially commit a merge '
1076 '(do not specify files or patterns)'))
1076 '(do not specify files or patterns)'))
1077
1077
1078 changes = self.status(match=match, clean=force)
1078 changes = self.status(match=match, clean=force)
1079 if force:
1079 if force:
1080 changes[0].extend(changes[6]) # mq may commit unchanged files
1080 changes[0].extend(changes[6]) # mq may commit unchanged files
1081
1081
1082 # check subrepos
1082 # check subrepos
1083 subs = []
1083 subs = []
1084 commitsubs = set()
1084 commitsubs = set()
1085 newstate = wctx.substate.copy()
1085 newstate = wctx.substate.copy()
1086 # only manage subrepos and .hgsubstate if .hgsub is present
1086 # only manage subrepos and .hgsubstate if .hgsub is present
1087 if '.hgsub' in wctx:
1087 if '.hgsub' in wctx:
1088 # we'll decide whether to track this ourselves, thanks
1088 # we'll decide whether to track this ourselves, thanks
1089 if '.hgsubstate' in changes[0]:
1089 if '.hgsubstate' in changes[0]:
1090 changes[0].remove('.hgsubstate')
1090 changes[0].remove('.hgsubstate')
1091 if '.hgsubstate' in changes[2]:
1091 if '.hgsubstate' in changes[2]:
1092 changes[2].remove('.hgsubstate')
1092 changes[2].remove('.hgsubstate')
1093
1093
1094 # compare current state to last committed state
1094 # compare current state to last committed state
1095 # build new substate based on last committed state
1095 # build new substate based on last committed state
1096 oldstate = wctx.p1().substate
1096 oldstate = wctx.p1().substate
1097 for s in sorted(newstate.keys()):
1097 for s in sorted(newstate.keys()):
1098 if not match(s):
1098 if not match(s):
1099 # ignore working copy, use old state if present
1099 # ignore working copy, use old state if present
1100 if s in oldstate:
1100 if s in oldstate:
1101 newstate[s] = oldstate[s]
1101 newstate[s] = oldstate[s]
1102 continue
1102 continue
1103 if not force:
1103 if not force:
1104 raise util.Abort(
1104 raise util.Abort(
1105 _("commit with new subrepo %s excluded") % s)
1105 _("commit with new subrepo %s excluded") % s)
1106 if wctx.sub(s).dirty(True):
1106 if wctx.sub(s).dirty(True):
1107 if not self.ui.configbool('ui', 'commitsubrepos'):
1107 if not self.ui.configbool('ui', 'commitsubrepos'):
1108 raise util.Abort(
1108 raise util.Abort(
1109 _("uncommitted changes in subrepo %s") % s,
1109 _("uncommitted changes in subrepo %s") % s,
1110 hint=_("use --subrepos for recursive commit"))
1110 hint=_("use --subrepos for recursive commit"))
1111 subs.append(s)
1111 subs.append(s)
1112 commitsubs.add(s)
1112 commitsubs.add(s)
1113 else:
1113 else:
1114 bs = wctx.sub(s).basestate()
1114 bs = wctx.sub(s).basestate()
1115 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1115 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1116 if oldstate.get(s, (None, None, None))[1] != bs:
1116 if oldstate.get(s, (None, None, None))[1] != bs:
1117 subs.append(s)
1117 subs.append(s)
1118
1118
1119 # check for removed subrepos
1119 # check for removed subrepos
1120 for p in wctx.parents():
1120 for p in wctx.parents():
1121 r = [s for s in p.substate if s not in newstate]
1121 r = [s for s in p.substate if s not in newstate]
1122 subs += [s for s in r if match(s)]
1122 subs += [s for s in r if match(s)]
1123 if subs:
1123 if subs:
1124 if (not match('.hgsub') and
1124 if (not match('.hgsub') and
1125 '.hgsub' in (wctx.modified() + wctx.added())):
1125 '.hgsub' in (wctx.modified() + wctx.added())):
1126 raise util.Abort(
1126 raise util.Abort(
1127 _("can't commit subrepos without .hgsub"))
1127 _("can't commit subrepos without .hgsub"))
1128 changes[0].insert(0, '.hgsubstate')
1128 changes[0].insert(0, '.hgsubstate')
1129
1129
1130 elif '.hgsub' in changes[2]:
1130 elif '.hgsub' in changes[2]:
1131 # clean up .hgsubstate when .hgsub is removed
1131 # clean up .hgsubstate when .hgsub is removed
1132 if ('.hgsubstate' in wctx and
1132 if ('.hgsubstate' in wctx and
1133 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1133 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1134 changes[2].insert(0, '.hgsubstate')
1134 changes[2].insert(0, '.hgsubstate')
1135
1135
1136 # make sure all explicit patterns are matched
1136 # make sure all explicit patterns are matched
1137 if not force and match.files():
1137 if not force and match.files():
1138 matched = set(changes[0] + changes[1] + changes[2])
1138 matched = set(changes[0] + changes[1] + changes[2])
1139
1139
1140 for f in match.files():
1140 for f in match.files():
1141 if f == '.' or f in matched or f in wctx.substate:
1141 if f == '.' or f in matched or f in wctx.substate:
1142 continue
1142 continue
1143 if f in changes[3]: # missing
1143 if f in changes[3]: # missing
1144 fail(f, _('file not found!'))
1144 fail(f, _('file not found!'))
1145 if f in vdirs: # visited directory
1145 if f in vdirs: # visited directory
1146 d = f + '/'
1146 d = f + '/'
1147 for mf in matched:
1147 for mf in matched:
1148 if mf.startswith(d):
1148 if mf.startswith(d):
1149 break
1149 break
1150 else:
1150 else:
1151 fail(f, _("no match under directory!"))
1151 fail(f, _("no match under directory!"))
1152 elif f not in self.dirstate:
1152 elif f not in self.dirstate:
1153 fail(f, _("file not tracked!"))
1153 fail(f, _("file not tracked!"))
1154
1154
1155 if (not force and not extra.get("close") and not merge
1155 if (not force and not extra.get("close") and not merge
1156 and not (changes[0] or changes[1] or changes[2])
1156 and not (changes[0] or changes[1] or changes[2])
1157 and wctx.branch() == wctx.p1().branch()):
1157 and wctx.branch() == wctx.p1().branch()):
1158 return None
1158 return None
1159
1159
1160 if merge and changes[3]:
1160 if merge and changes[3]:
1161 raise util.Abort(_("cannot commit merge with missing files"))
1161 raise util.Abort(_("cannot commit merge with missing files"))
1162
1162
1163 ms = mergemod.mergestate(self)
1163 ms = mergemod.mergestate(self)
1164 for f in changes[0]:
1164 for f in changes[0]:
1165 if f in ms and ms[f] == 'u':
1165 if f in ms and ms[f] == 'u':
1166 raise util.Abort(_("unresolved merge conflicts "
1166 raise util.Abort(_("unresolved merge conflicts "
1167 "(see hg help resolve)"))
1167 "(see hg help resolve)"))
1168
1168
1169 cctx = context.workingctx(self, text, user, date, extra, changes)
1169 cctx = context.workingctx(self, text, user, date, extra, changes)
1170 if editor:
1170 if editor:
1171 cctx._text = editor(self, cctx, subs)
1171 cctx._text = editor(self, cctx, subs)
1172 edited = (text != cctx._text)
1172 edited = (text != cctx._text)
1173
1173
1174 # commit subs and write new state
1174 # commit subs and write new state
1175 if subs:
1175 if subs:
1176 for s in sorted(commitsubs):
1176 for s in sorted(commitsubs):
1177 sub = wctx.sub(s)
1177 sub = wctx.sub(s)
1178 self.ui.status(_('committing subrepository %s\n') %
1178 self.ui.status(_('committing subrepository %s\n') %
1179 subrepo.subrelpath(sub))
1179 subrepo.subrelpath(sub))
1180 sr = sub.commit(cctx._text, user, date)
1180 sr = sub.commit(cctx._text, user, date)
1181 newstate[s] = (newstate[s][0], sr)
1181 newstate[s] = (newstate[s][0], sr)
1182 subrepo.writestate(self, newstate)
1182 subrepo.writestate(self, newstate)
1183
1183
1184 # Save commit message in case this transaction gets rolled back
1184 # Save commit message in case this transaction gets rolled back
1185 # (e.g. by a pretxncommit hook). Leave the content alone on
1185 # (e.g. by a pretxncommit hook). Leave the content alone on
1186 # the assumption that the user will use the same editor again.
1186 # the assumption that the user will use the same editor again.
1187 msgfn = self.savecommitmessage(cctx._text)
1187 msgfn = self.savecommitmessage(cctx._text)
1188
1188
1189 p1, p2 = self.dirstate.parents()
1189 p1, p2 = self.dirstate.parents()
1190 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1190 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1191 try:
1191 try:
1192 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1192 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1193 ret = self.commitctx(cctx, True)
1193 ret = self.commitctx(cctx, True)
1194 except:
1194 except:
1195 if edited:
1195 if edited:
1196 self.ui.write(
1196 self.ui.write(
1197 _('note: commit message saved in %s\n') % msgfn)
1197 _('note: commit message saved in %s\n') % msgfn)
1198 raise
1198 raise
1199
1199
1200 # update bookmarks, dirstate and mergestate
1200 # update bookmarks, dirstate and mergestate
1201 bookmarks.update(self, p1, ret)
1201 bookmarks.update(self, p1, ret)
1202 for f in changes[0] + changes[1]:
1202 for f in changes[0] + changes[1]:
1203 self.dirstate.normal(f)
1203 self.dirstate.normal(f)
1204 for f in changes[2]:
1204 for f in changes[2]:
1205 self.dirstate.drop(f)
1205 self.dirstate.drop(f)
1206 self.dirstate.setparents(ret)
1206 self.dirstate.setparents(ret)
1207 ms.reset()
1207 ms.reset()
1208 finally:
1208 finally:
1209 wlock.release()
1209 wlock.release()
1210
1210
1211 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1211 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1212 return ret
1212 return ret
1213
1213
1214 def commitctx(self, ctx, error=False):
1214 def commitctx(self, ctx, error=False):
1215 """Add a new revision to current repository.
1215 """Add a new revision to current repository.
1216 Revision information is passed via the context argument.
1216 Revision information is passed via the context argument.
1217 """
1217 """
1218
1218
1219 tr = lock = None
1219 tr = lock = None
1220 removed = list(ctx.removed())
1220 removed = list(ctx.removed())
1221 p1, p2 = ctx.p1(), ctx.p2()
1221 p1, p2 = ctx.p1(), ctx.p2()
1222 user = ctx.user()
1222 user = ctx.user()
1223
1223
1224 lock = self.lock()
1224 lock = self.lock()
1225 try:
1225 try:
1226 tr = self.transaction("commit")
1226 tr = self.transaction("commit")
1227 trp = weakref.proxy(tr)
1227 trp = weakref.proxy(tr)
1228
1228
1229 if ctx.files():
1229 if ctx.files():
1230 m1 = p1.manifest().copy()
1230 m1 = p1.manifest().copy()
1231 m2 = p2.manifest()
1231 m2 = p2.manifest()
1232
1232
1233 # check in files
1233 # check in files
1234 new = {}
1234 new = {}
1235 changed = []
1235 changed = []
1236 linkrev = len(self)
1236 linkrev = len(self)
1237 for f in sorted(ctx.modified() + ctx.added()):
1237 for f in sorted(ctx.modified() + ctx.added()):
1238 self.ui.note(f + "\n")
1238 self.ui.note(f + "\n")
1239 try:
1239 try:
1240 fctx = ctx[f]
1240 fctx = ctx[f]
1241 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1241 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1242 changed)
1242 changed)
1243 m1.set(f, fctx.flags())
1243 m1.set(f, fctx.flags())
1244 except OSError, inst:
1244 except OSError, inst:
1245 self.ui.warn(_("trouble committing %s!\n") % f)
1245 self.ui.warn(_("trouble committing %s!\n") % f)
1246 raise
1246 raise
1247 except IOError, inst:
1247 except IOError, inst:
1248 errcode = getattr(inst, 'errno', errno.ENOENT)
1248 errcode = getattr(inst, 'errno', errno.ENOENT)
1249 if error or errcode and errcode != errno.ENOENT:
1249 if error or errcode and errcode != errno.ENOENT:
1250 self.ui.warn(_("trouble committing %s!\n") % f)
1250 self.ui.warn(_("trouble committing %s!\n") % f)
1251 raise
1251 raise
1252 else:
1252 else:
1253 removed.append(f)
1253 removed.append(f)
1254
1254
1255 # update manifest
1255 # update manifest
1256 m1.update(new)
1256 m1.update(new)
1257 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1257 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1258 drop = [f for f in removed if f in m1]
1258 drop = [f for f in removed if f in m1]
1259 for f in drop:
1259 for f in drop:
1260 del m1[f]
1260 del m1[f]
1261 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1261 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1262 p2.manifestnode(), (new, drop))
1262 p2.manifestnode(), (new, drop))
1263 files = changed + removed
1263 files = changed + removed
1264 else:
1264 else:
1265 mn = p1.manifestnode()
1265 mn = p1.manifestnode()
1266 files = []
1266 files = []
1267
1267
1268 # update changelog
1268 # update changelog
1269 self.changelog.delayupdate()
1269 self.changelog.delayupdate()
1270 n = self.changelog.add(mn, files, ctx.description(),
1270 n = self.changelog.add(mn, files, ctx.description(),
1271 trp, p1.node(), p2.node(),
1271 trp, p1.node(), p2.node(),
1272 user, ctx.date(), ctx.extra().copy())
1272 user, ctx.date(), ctx.extra().copy())
1273 p = lambda: self.changelog.writepending() and self.root or ""
1273 p = lambda: self.changelog.writepending() and self.root or ""
1274 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1274 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1275 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1275 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1276 parent2=xp2, pending=p)
1276 parent2=xp2, pending=p)
1277 self.changelog.finalize(trp)
1277 self.changelog.finalize(trp)
1278 # set the new commit is proper phase
1278 # set the new commit is proper phase
1279 targetphase = phases.newcommitphase(self.ui)
1279 targetphase = phases.newcommitphase(self.ui)
1280 if targetphase:
1280 if targetphase:
1281 # retract boundary do not alter parent changeset.
1281 # retract boundary do not alter parent changeset.
1282 # if a parent have higher the resulting phase will
1282 # if a parent have higher the resulting phase will
1283 # be compliant anyway
1283 # be compliant anyway
1284 #
1284 #
1285 # if minimal phase was 0 we don't need to retract anything
1285 # if minimal phase was 0 we don't need to retract anything
1286 phases.retractboundary(self, targetphase, [n])
1286 phases.retractboundary(self, targetphase, [n])
1287 tr.close()
1287 tr.close()
1288 self.updatebranchcache()
1288 self.updatebranchcache()
1289 return n
1289 return n
1290 finally:
1290 finally:
1291 if tr:
1291 if tr:
1292 tr.release()
1292 tr.release()
1293 lock.release()
1293 lock.release()
1294
1294
1295 def destroyed(self):
1295 def destroyed(self):
1296 '''Inform the repository that nodes have been destroyed.
1296 '''Inform the repository that nodes have been destroyed.
1297 Intended for use by strip and rollback, so there's a common
1297 Intended for use by strip and rollback, so there's a common
1298 place for anything that has to be done after destroying history.'''
1298 place for anything that has to be done after destroying history.'''
1299 # XXX it might be nice if we could take the list of destroyed
1299 # XXX it might be nice if we could take the list of destroyed
1300 # nodes, but I don't see an easy way for rollback() to do that
1300 # nodes, but I don't see an easy way for rollback() to do that
1301
1301
1302 # Ensure the persistent tag cache is updated. Doing it now
1302 # Ensure the persistent tag cache is updated. Doing it now
1303 # means that the tag cache only has to worry about destroyed
1303 # means that the tag cache only has to worry about destroyed
1304 # heads immediately after a strip/rollback. That in turn
1304 # heads immediately after a strip/rollback. That in turn
1305 # guarantees that "cachetip == currenttip" (comparing both rev
1305 # guarantees that "cachetip == currenttip" (comparing both rev
1306 # and node) always means no nodes have been added or destroyed.
1306 # and node) always means no nodes have been added or destroyed.
1307
1307
1308 # XXX this is suboptimal when qrefresh'ing: we strip the current
1308 # XXX this is suboptimal when qrefresh'ing: we strip the current
1309 # head, refresh the tag cache, then immediately add a new head.
1309 # head, refresh the tag cache, then immediately add a new head.
1310 # But I think doing it this way is necessary for the "instant
1310 # But I think doing it this way is necessary for the "instant
1311 # tag cache retrieval" case to work.
1311 # tag cache retrieval" case to work.
1312 self.invalidatecaches()
1312 self.invalidatecaches()
1313
1313
1314 # Discard all cache entries to force reloading everything.
1314 # Discard all cache entries to force reloading everything.
1315 self._filecache.clear()
1315 self._filecache.clear()
1316
1316
1317 def walk(self, match, node=None):
1317 def walk(self, match, node=None):
1318 '''
1318 '''
1319 walk recursively through the directory tree or a given
1319 walk recursively through the directory tree or a given
1320 changeset, finding all files matched by the match
1320 changeset, finding all files matched by the match
1321 function
1321 function
1322 '''
1322 '''
1323 return self[node].walk(match)
1323 return self[node].walk(match)
1324
1324
1325 def status(self, node1='.', node2=None, match=None,
1325 def status(self, node1='.', node2=None, match=None,
1326 ignored=False, clean=False, unknown=False,
1326 ignored=False, clean=False, unknown=False,
1327 listsubrepos=False):
1327 listsubrepos=False):
1328 """return status of files between two nodes or node and working directory
1328 """return status of files between two nodes or node and working directory
1329
1329
1330 If node1 is None, use the first dirstate parent instead.
1330 If node1 is None, use the first dirstate parent instead.
1331 If node2 is None, compare node1 with working directory.
1331 If node2 is None, compare node1 with working directory.
1332 """
1332 """
1333
1333
1334 def mfmatches(ctx):
1334 def mfmatches(ctx):
1335 mf = ctx.manifest().copy()
1335 mf = ctx.manifest().copy()
1336 for fn in mf.keys():
1336 for fn in mf.keys():
1337 if not match(fn):
1337 if not match(fn):
1338 del mf[fn]
1338 del mf[fn]
1339 return mf
1339 return mf
1340
1340
1341 if isinstance(node1, context.changectx):
1341 if isinstance(node1, context.changectx):
1342 ctx1 = node1
1342 ctx1 = node1
1343 else:
1343 else:
1344 ctx1 = self[node1]
1344 ctx1 = self[node1]
1345 if isinstance(node2, context.changectx):
1345 if isinstance(node2, context.changectx):
1346 ctx2 = node2
1346 ctx2 = node2
1347 else:
1347 else:
1348 ctx2 = self[node2]
1348 ctx2 = self[node2]
1349
1349
1350 working = ctx2.rev() is None
1350 working = ctx2.rev() is None
1351 parentworking = working and ctx1 == self['.']
1351 parentworking = working and ctx1 == self['.']
1352 match = match or matchmod.always(self.root, self.getcwd())
1352 match = match or matchmod.always(self.root, self.getcwd())
1353 listignored, listclean, listunknown = ignored, clean, unknown
1353 listignored, listclean, listunknown = ignored, clean, unknown
1354
1354
1355 # load earliest manifest first for caching reasons
1355 # load earliest manifest first for caching reasons
1356 if not working and ctx2.rev() < ctx1.rev():
1356 if not working and ctx2.rev() < ctx1.rev():
1357 ctx2.manifest()
1357 ctx2.manifest()
1358
1358
1359 if not parentworking:
1359 if not parentworking:
1360 def bad(f, msg):
1360 def bad(f, msg):
1361 # 'f' may be a directory pattern from 'match.files()',
1361 # 'f' may be a directory pattern from 'match.files()',
1362 # so 'f not in ctx1' is not enough
1362 # so 'f not in ctx1' is not enough
1363 if f not in ctx1 and f not in ctx1.dirs():
1363 if f not in ctx1 and f not in ctx1.dirs():
1364 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1364 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1365 match.bad = bad
1365 match.bad = bad
1366
1366
1367 if working: # we need to scan the working dir
1367 if working: # we need to scan the working dir
1368 subrepos = []
1368 subrepos = []
1369 if '.hgsub' in self.dirstate:
1369 if '.hgsub' in self.dirstate:
1370 subrepos = ctx2.substate.keys()
1370 subrepos = ctx2.substate.keys()
1371 s = self.dirstate.status(match, subrepos, listignored,
1371 s = self.dirstate.status(match, subrepos, listignored,
1372 listclean, listunknown)
1372 listclean, listunknown)
1373 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1373 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1374
1374
1375 # check for any possibly clean files
1375 # check for any possibly clean files
1376 if parentworking and cmp:
1376 if parentworking and cmp:
1377 fixup = []
1377 fixup = []
1378 # do a full compare of any files that might have changed
1378 # do a full compare of any files that might have changed
1379 for f in sorted(cmp):
1379 for f in sorted(cmp):
1380 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1380 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1381 or ctx1[f].cmp(ctx2[f])):
1381 or ctx1[f].cmp(ctx2[f])):
1382 modified.append(f)
1382 modified.append(f)
1383 else:
1383 else:
1384 fixup.append(f)
1384 fixup.append(f)
1385
1385
1386 # update dirstate for files that are actually clean
1386 # update dirstate for files that are actually clean
1387 if fixup:
1387 if fixup:
1388 if listclean:
1388 if listclean:
1389 clean += fixup
1389 clean += fixup
1390
1390
1391 try:
1391 try:
1392 # updating the dirstate is optional
1392 # updating the dirstate is optional
1393 # so we don't wait on the lock
1393 # so we don't wait on the lock
1394 wlock = self.wlock(False)
1394 wlock = self.wlock(False)
1395 try:
1395 try:
1396 for f in fixup:
1396 for f in fixup:
1397 self.dirstate.normal(f)
1397 self.dirstate.normal(f)
1398 finally:
1398 finally:
1399 wlock.release()
1399 wlock.release()
1400 except error.LockError:
1400 except error.LockError:
1401 pass
1401 pass
1402
1402
1403 if not parentworking:
1403 if not parentworking:
1404 mf1 = mfmatches(ctx1)
1404 mf1 = mfmatches(ctx1)
1405 if working:
1405 if working:
1406 # we are comparing working dir against non-parent
1406 # we are comparing working dir against non-parent
1407 # generate a pseudo-manifest for the working dir
1407 # generate a pseudo-manifest for the working dir
1408 mf2 = mfmatches(self['.'])
1408 mf2 = mfmatches(self['.'])
1409 for f in cmp + modified + added:
1409 for f in cmp + modified + added:
1410 mf2[f] = None
1410 mf2[f] = None
1411 mf2.set(f, ctx2.flags(f))
1411 mf2.set(f, ctx2.flags(f))
1412 for f in removed:
1412 for f in removed:
1413 if f in mf2:
1413 if f in mf2:
1414 del mf2[f]
1414 del mf2[f]
1415 else:
1415 else:
1416 # we are comparing two revisions
1416 # we are comparing two revisions
1417 deleted, unknown, ignored = [], [], []
1417 deleted, unknown, ignored = [], [], []
1418 mf2 = mfmatches(ctx2)
1418 mf2 = mfmatches(ctx2)
1419
1419
1420 modified, added, clean = [], [], []
1420 modified, added, clean = [], [], []
1421 for fn in mf2:
1421 for fn in mf2:
1422 if fn in mf1:
1422 if fn in mf1:
1423 if (fn not in deleted and
1423 if (fn not in deleted and
1424 (mf1.flags(fn) != mf2.flags(fn) or
1424 (mf1.flags(fn) != mf2.flags(fn) or
1425 (mf1[fn] != mf2[fn] and
1425 (mf1[fn] != mf2[fn] and
1426 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1426 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1427 modified.append(fn)
1427 modified.append(fn)
1428 elif listclean:
1428 elif listclean:
1429 clean.append(fn)
1429 clean.append(fn)
1430 del mf1[fn]
1430 del mf1[fn]
1431 elif fn not in deleted:
1431 elif fn not in deleted:
1432 added.append(fn)
1432 added.append(fn)
1433 removed = mf1.keys()
1433 removed = mf1.keys()
1434
1434
1435 if working and modified and not self.dirstate._checklink:
1435 if working and modified and not self.dirstate._checklink:
1436 # Symlink placeholders may get non-symlink-like contents
1436 # Symlink placeholders may get non-symlink-like contents
1437 # via user error or dereferencing by NFS or Samba servers,
1437 # via user error or dereferencing by NFS or Samba servers,
1438 # so we filter out any placeholders that don't look like a
1438 # so we filter out any placeholders that don't look like a
1439 # symlink
1439 # symlink
1440 sane = []
1440 sane = []
1441 for f in modified:
1441 for f in modified:
1442 if ctx2.flags(f) == 'l':
1442 if ctx2.flags(f) == 'l':
1443 d = ctx2[f].data()
1443 d = ctx2[f].data()
1444 if len(d) >= 1024 or '\n' in d or util.binary(d):
1444 if len(d) >= 1024 or '\n' in d or util.binary(d):
1445 self.ui.debug('ignoring suspect symlink placeholder'
1445 self.ui.debug('ignoring suspect symlink placeholder'
1446 ' "%s"\n' % f)
1446 ' "%s"\n' % f)
1447 continue
1447 continue
1448 sane.append(f)
1448 sane.append(f)
1449 modified = sane
1449 modified = sane
1450
1450
1451 r = modified, added, removed, deleted, unknown, ignored, clean
1451 r = modified, added, removed, deleted, unknown, ignored, clean
1452
1452
1453 if listsubrepos:
1453 if listsubrepos:
1454 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1454 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1455 if working:
1455 if working:
1456 rev2 = None
1456 rev2 = None
1457 else:
1457 else:
1458 rev2 = ctx2.substate[subpath][1]
1458 rev2 = ctx2.substate[subpath][1]
1459 try:
1459 try:
1460 submatch = matchmod.narrowmatcher(subpath, match)
1460 submatch = matchmod.narrowmatcher(subpath, match)
1461 s = sub.status(rev2, match=submatch, ignored=listignored,
1461 s = sub.status(rev2, match=submatch, ignored=listignored,
1462 clean=listclean, unknown=listunknown,
1462 clean=listclean, unknown=listunknown,
1463 listsubrepos=True)
1463 listsubrepos=True)
1464 for rfiles, sfiles in zip(r, s):
1464 for rfiles, sfiles in zip(r, s):
1465 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1465 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1466 except error.LookupError:
1466 except error.LookupError:
1467 self.ui.status(_("skipping missing subrepository: %s\n")
1467 self.ui.status(_("skipping missing subrepository: %s\n")
1468 % subpath)
1468 % subpath)
1469
1469
1470 for l in r:
1470 for l in r:
1471 l.sort()
1471 l.sort()
1472 return r
1472 return r
1473
1473
1474 def heads(self, start=None):
1474 def heads(self, start=None):
1475 heads = self.changelog.heads(start)
1475 heads = self.changelog.heads(start)
1476 # sort the output in rev descending order
1476 # sort the output in rev descending order
1477 return sorted(heads, key=self.changelog.rev, reverse=True)
1477 return sorted(heads, key=self.changelog.rev, reverse=True)
1478
1478
1479 def branchheads(self, branch=None, start=None, closed=False):
1479 def branchheads(self, branch=None, start=None, closed=False):
1480 '''return a (possibly filtered) list of heads for the given branch
1480 '''return a (possibly filtered) list of heads for the given branch
1481
1481
1482 Heads are returned in topological order, from newest to oldest.
1482 Heads are returned in topological order, from newest to oldest.
1483 If branch is None, use the dirstate branch.
1483 If branch is None, use the dirstate branch.
1484 If start is not None, return only heads reachable from start.
1484 If start is not None, return only heads reachable from start.
1485 If closed is True, return heads that are marked as closed as well.
1485 If closed is True, return heads that are marked as closed as well.
1486 '''
1486 '''
1487 if branch is None:
1487 if branch is None:
1488 branch = self[None].branch()
1488 branch = self[None].branch()
1489 branches = self.branchmap()
1489 branches = self.branchmap()
1490 if branch not in branches:
1490 if branch not in branches:
1491 return []
1491 return []
1492 # the cache returns heads ordered lowest to highest
1492 # the cache returns heads ordered lowest to highest
1493 bheads = list(reversed(branches[branch]))
1493 bheads = list(reversed(branches[branch]))
1494 if start is not None:
1494 if start is not None:
1495 # filter out the heads that cannot be reached from startrev
1495 # filter out the heads that cannot be reached from startrev
1496 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1496 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1497 bheads = [h for h in bheads if h in fbheads]
1497 bheads = [h for h in bheads if h in fbheads]
1498 if not closed:
1498 if not closed:
1499 bheads = [h for h in bheads if
1499 bheads = [h for h in bheads if
1500 ('close' not in self.changelog.read(h)[5])]
1500 ('close' not in self.changelog.read(h)[5])]
1501 return bheads
1501 return bheads
1502
1502
1503 def branches(self, nodes):
1503 def branches(self, nodes):
1504 if not nodes:
1504 if not nodes:
1505 nodes = [self.changelog.tip()]
1505 nodes = [self.changelog.tip()]
1506 b = []
1506 b = []
1507 for n in nodes:
1507 for n in nodes:
1508 t = n
1508 t = n
1509 while True:
1509 while True:
1510 p = self.changelog.parents(n)
1510 p = self.changelog.parents(n)
1511 if p[1] != nullid or p[0] == nullid:
1511 if p[1] != nullid or p[0] == nullid:
1512 b.append((t, n, p[0], p[1]))
1512 b.append((t, n, p[0], p[1]))
1513 break
1513 break
1514 n = p[0]
1514 n = p[0]
1515 return b
1515 return b
1516
1516
1517 def between(self, pairs):
1517 def between(self, pairs):
1518 r = []
1518 r = []
1519
1519
1520 for top, bottom in pairs:
1520 for top, bottom in pairs:
1521 n, l, i = top, [], 0
1521 n, l, i = top, [], 0
1522 f = 1
1522 f = 1
1523
1523
1524 while n != bottom and n != nullid:
1524 while n != bottom and n != nullid:
1525 p = self.changelog.parents(n)[0]
1525 p = self.changelog.parents(n)[0]
1526 if i == f:
1526 if i == f:
1527 l.append(n)
1527 l.append(n)
1528 f = f * 2
1528 f = f * 2
1529 n = p
1529 n = p
1530 i += 1
1530 i += 1
1531
1531
1532 r.append(l)
1532 r.append(l)
1533
1533
1534 return r
1534 return r
1535
1535
1536 def pull(self, remote, heads=None, force=False):
1536 def pull(self, remote, heads=None, force=False):
1537 lock = self.lock()
1537 lock = self.lock()
1538 try:
1538 try:
1539 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1539 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1540 force=force)
1540 force=force)
1541 common, fetch, rheads = tmp
1541 common, fetch, rheads = tmp
1542 if not fetch:
1542 if not fetch:
1543 self.ui.status(_("no changes found\n"))
1543 self.ui.status(_("no changes found\n"))
1544 added = []
1544 added = []
1545 result = 0
1545 result = 0
1546 else:
1546 else:
1547 if heads is None and list(common) == [nullid]:
1547 if heads is None and list(common) == [nullid]:
1548 self.ui.status(_("requesting all changes\n"))
1548 self.ui.status(_("requesting all changes\n"))
1549 elif heads is None and remote.capable('changegroupsubset'):
1549 elif heads is None and remote.capable('changegroupsubset'):
1550 # issue1320, avoid a race if remote changed after discovery
1550 # issue1320, avoid a race if remote changed after discovery
1551 heads = rheads
1551 heads = rheads
1552
1552
1553 if remote.capable('getbundle'):
1553 if remote.capable('getbundle'):
1554 cg = remote.getbundle('pull', common=common,
1554 cg = remote.getbundle('pull', common=common,
1555 heads=heads or rheads)
1555 heads=heads or rheads)
1556 elif heads is None:
1556 elif heads is None:
1557 cg = remote.changegroup(fetch, 'pull')
1557 cg = remote.changegroup(fetch, 'pull')
1558 elif not remote.capable('changegroupsubset'):
1558 elif not remote.capable('changegroupsubset'):
1559 raise util.Abort(_("partial pull cannot be done because "
1559 raise util.Abort(_("partial pull cannot be done because "
1560 "other repository doesn't support "
1560 "other repository doesn't support "
1561 "changegroupsubset."))
1561 "changegroupsubset."))
1562 else:
1562 else:
1563 cg = remote.changegroupsubset(fetch, heads, 'pull')
1563 cg = remote.changegroupsubset(fetch, heads, 'pull')
1564 clstart = len(self.changelog)
1564 clstart = len(self.changelog)
1565 result = self.addchangegroup(cg, 'pull', remote.url())
1565 result = self.addchangegroup(cg, 'pull', remote.url())
1566 clend = len(self.changelog)
1566 clend = len(self.changelog)
1567 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1567 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1568
1568
1569 # compute target subset
1569 # compute target subset
1570 if heads is None:
1570 if heads is None:
1571 # We pulled every thing possible
1571 # We pulled every thing possible
1572 # sync on everything common
1572 # sync on everything common
1573 subset = common + added
1573 subset = common + added
1574 else:
1574 else:
1575 # We pulled a specific subset
1575 # We pulled a specific subset
1576 # sync on this subset
1576 # sync on this subset
1577 subset = heads
1577 subset = heads
1578
1578
1579 # Get remote phases data from remote
1579 # Get remote phases data from remote
1580 remotephases = remote.listkeys('phases')
1580 remotephases = remote.listkeys('phases')
1581 publishing = bool(remotephases.get('publishing', False))
1581 publishing = bool(remotephases.get('publishing', False))
1582 if remotephases and not publishing:
1582 if remotephases and not publishing:
1583 # remote is new and unpublishing
1583 # remote is new and unpublishing
1584 pheads, _dr = phases.analyzeremotephases(self, subset,
1584 pheads, _dr = phases.analyzeremotephases(self, subset,
1585 remotephases)
1585 remotephases)
1586 phases.advanceboundary(self, phases.public, pheads)
1586 phases.advanceboundary(self, phases.public, pheads)
1587 phases.advanceboundary(self, phases.draft, subset)
1587 phases.advanceboundary(self, phases.draft, subset)
1588 else:
1588 else:
1589 # Remote is old or publishing all common changesets
1589 # Remote is old or publishing all common changesets
1590 # should be seen as public
1590 # should be seen as public
1591 phases.advanceboundary(self, phases.public, subset)
1591 phases.advanceboundary(self, phases.public, subset)
1592 finally:
1592 finally:
1593 lock.release()
1593 lock.release()
1594
1594
1595 return result
1595 return result
1596
1596
1597 def checkpush(self, force, revs):
1597 def checkpush(self, force, revs):
1598 """Extensions can override this function if additional checks have
1598 """Extensions can override this function if additional checks have
1599 to be performed before pushing, or call it if they override push
1599 to be performed before pushing, or call it if they override push
1600 command.
1600 command.
1601 """
1601 """
1602 pass
1602 pass
1603
1603
1604 def push(self, remote, force=False, revs=None, newbranch=False):
1604 def push(self, remote, force=False, revs=None, newbranch=False):
1605 '''Push outgoing changesets (limited by revs) from the current
1605 '''Push outgoing changesets (limited by revs) from the current
1606 repository to remote. Return an integer:
1606 repository to remote. Return an integer:
1607 - None means nothing to push
1607 - None means nothing to push
1608 - 0 means HTTP error
1608 - 0 means HTTP error
1609 - 1 means we pushed and remote head count is unchanged *or*
1609 - 1 means we pushed and remote head count is unchanged *or*
1610 we have outgoing changesets but refused to push
1610 we have outgoing changesets but refused to push
1611 - other values as described by addchangegroup()
1611 - other values as described by addchangegroup()
1612 '''
1612 '''
1613 # there are two ways to push to remote repo:
1613 # there are two ways to push to remote repo:
1614 #
1614 #
1615 # addchangegroup assumes local user can lock remote
1615 # addchangegroup assumes local user can lock remote
1616 # repo (local filesystem, old ssh servers).
1616 # repo (local filesystem, old ssh servers).
1617 #
1617 #
1618 # unbundle assumes local user cannot lock remote repo (new ssh
1618 # unbundle assumes local user cannot lock remote repo (new ssh
1619 # servers, http servers).
1619 # servers, http servers).
1620
1620
1621 # get local lock as we might write phase data
1621 # get local lock as we might write phase data
1622 locallock = self.lock()
1622 locallock = self.lock()
1623 try:
1623 try:
1624 self.checkpush(force, revs)
1624 self.checkpush(force, revs)
1625 lock = None
1625 lock = None
1626 unbundle = remote.capable('unbundle')
1626 unbundle = remote.capable('unbundle')
1627 if not unbundle:
1627 if not unbundle:
1628 lock = remote.lock()
1628 lock = remote.lock()
1629 try:
1629 try:
1630 # discovery
1630 # discovery
1631 fci = discovery.findcommonincoming
1631 fci = discovery.findcommonincoming
1632 commoninc = fci(self, remote, force=force)
1632 commoninc = fci(self, remote, force=force)
1633 common, inc, remoteheads = commoninc
1633 common, inc, remoteheads = commoninc
1634 fco = discovery.findcommonoutgoing
1634 fco = discovery.findcommonoutgoing
1635 outgoing = fco(self, remote, onlyheads=revs,
1635 outgoing = fco(self, remote, onlyheads=revs,
1636 commoninc=commoninc, force=force)
1636 commoninc=commoninc, force=force)
1637
1637
1638
1638
1639 if not outgoing.missing:
1639 if not outgoing.missing:
1640 # nothing to push
1640 # nothing to push
1641 scmutil.nochangesfound(self.ui, outgoing.excluded)
1641 scmutil.nochangesfound(self.ui, outgoing.excluded)
1642 ret = None
1642 ret = None
1643 else:
1643 else:
1644 # something to push
1644 # something to push
1645 if not force:
1645 if not force:
1646 discovery.checkheads(self, remote, outgoing,
1646 discovery.checkheads(self, remote, outgoing,
1647 remoteheads, newbranch,
1647 remoteheads, newbranch,
1648 bool(inc))
1648 bool(inc))
1649
1649
1650 # create a changegroup from local
1650 # create a changegroup from local
1651 if revs is None and not outgoing.excluded:
1651 if revs is None and not outgoing.excluded:
1652 # push everything,
1652 # push everything,
1653 # use the fast path, no race possible on push
1653 # use the fast path, no race possible on push
1654 cg = self._changegroup(outgoing.missing, 'push')
1654 cg = self._changegroup(outgoing.missing, 'push')
1655 else:
1655 else:
1656 cg = self.getlocalbundle('push', outgoing)
1656 cg = self.getlocalbundle('push', outgoing)
1657
1657
1658 # apply changegroup to remote
1658 # apply changegroup to remote
1659 if unbundle:
1659 if unbundle:
1660 # local repo finds heads on server, finds out what
1660 # local repo finds heads on server, finds out what
1661 # revs it must push. once revs transferred, if server
1661 # revs it must push. once revs transferred, if server
1662 # finds it has different heads (someone else won
1662 # finds it has different heads (someone else won
1663 # commit/push race), server aborts.
1663 # commit/push race), server aborts.
1664 if force:
1664 if force:
1665 remoteheads = ['force']
1665 remoteheads = ['force']
1666 # ssh: return remote's addchangegroup()
1666 # ssh: return remote's addchangegroup()
1667 # http: return remote's addchangegroup() or 0 for error
1667 # http: return remote's addchangegroup() or 0 for error
1668 ret = remote.unbundle(cg, remoteheads, 'push')
1668 ret = remote.unbundle(cg, remoteheads, 'push')
1669 else:
1669 else:
1670 # we return an integer indicating remote head count change
1670 # we return an integer indicating remote head count change
1671 ret = remote.addchangegroup(cg, 'push', self.url())
1671 ret = remote.addchangegroup(cg, 'push', self.url())
1672
1672
1673 if ret:
1673 if ret:
1674 # push succeed, synchonize target of the push
1674 # push succeed, synchonize target of the push
1675 cheads = outgoing.missingheads
1675 cheads = outgoing.missingheads
1676 elif revs is None:
1676 elif revs is None:
1677 # All out push fails. synchronize all common
1677 # All out push fails. synchronize all common
1678 cheads = outgoing.commonheads
1678 cheads = outgoing.commonheads
1679 else:
1679 else:
1680 # I want cheads = heads(::missingheads and ::commonheads)
1680 # I want cheads = heads(::missingheads and ::commonheads)
1681 # (missingheads is revs with secret changeset filtered out)
1681 # (missingheads is revs with secret changeset filtered out)
1682 #
1682 #
1683 # This can be expressed as:
1683 # This can be expressed as:
1684 # cheads = ( (missingheads and ::commonheads)
1684 # cheads = ( (missingheads and ::commonheads)
1685 # + (commonheads and ::missingheads))"
1685 # + (commonheads and ::missingheads))"
1686 # )
1686 # )
1687 #
1687 #
1688 # while trying to push we already computed the following:
1688 # while trying to push we already computed the following:
1689 # common = (::commonheads)
1689 # common = (::commonheads)
1690 # missing = ((commonheads::missingheads) - commonheads)
1690 # missing = ((commonheads::missingheads) - commonheads)
1691 #
1691 #
1692 # We can pick:
1692 # We can pick:
1693 # * missingheads part of comon (::commonheads)
1693 # * missingheads part of comon (::commonheads)
1694 common = set(outgoing.common)
1694 common = set(outgoing.common)
1695 cheads = [node for node in revs if node in common]
1695 cheads = [node for node in revs if node in common]
1696 # and
1696 # and
1697 # * commonheads parents on missing
1697 # * commonheads parents on missing
1698 revset = self.set('%ln and parents(roots(%ln))',
1698 revset = self.set('%ln and parents(roots(%ln))',
1699 outgoing.commonheads,
1699 outgoing.commonheads,
1700 outgoing.missing)
1700 outgoing.missing)
1701 cheads.extend(c.node() for c in revset)
1701 cheads.extend(c.node() for c in revset)
1702 # even when we don't push, exchanging phase data is useful
1702 # even when we don't push, exchanging phase data is useful
1703 remotephases = remote.listkeys('phases')
1703 remotephases = remote.listkeys('phases')
1704 if not remotephases: # old server or public only repo
1704 if not remotephases: # old server or public only repo
1705 phases.advanceboundary(self, phases.public, cheads)
1705 phases.advanceboundary(self, phases.public, cheads)
1706 # don't push any phase data as there is nothing to push
1706 # don't push any phase data as there is nothing to push
1707 else:
1707 else:
1708 ana = phases.analyzeremotephases(self, cheads, remotephases)
1708 ana = phases.analyzeremotephases(self, cheads, remotephases)
1709 pheads, droots = ana
1709 pheads, droots = ana
1710 ### Apply remote phase on local
1710 ### Apply remote phase on local
1711 if remotephases.get('publishing', False):
1711 if remotephases.get('publishing', False):
1712 phases.advanceboundary(self, phases.public, cheads)
1712 phases.advanceboundary(self, phases.public, cheads)
1713 else: # publish = False
1713 else: # publish = False
1714 phases.advanceboundary(self, phases.public, pheads)
1714 phases.advanceboundary(self, phases.public, pheads)
1715 phases.advanceboundary(self, phases.draft, cheads)
1715 phases.advanceboundary(self, phases.draft, cheads)
1716 ### Apply local phase on remote
1716 ### Apply local phase on remote
1717
1717
1718 # Get the list of all revs draft on remote by public here.
1718 # Get the list of all revs draft on remote by public here.
1719 # XXX Beware that revset break if droots is not strictly
1719 # XXX Beware that revset break if droots is not strictly
1720 # XXX root we may want to ensure it is but it is costly
1720 # XXX root we may want to ensure it is but it is costly
1721 outdated = self.set('heads((%ln::%ln) and public())',
1721 outdated = self.set('heads((%ln::%ln) and public())',
1722 droots, cheads)
1722 droots, cheads)
1723 for newremotehead in outdated:
1723 for newremotehead in outdated:
1724 r = remote.pushkey('phases',
1724 r = remote.pushkey('phases',
1725 newremotehead.hex(),
1725 newremotehead.hex(),
1726 str(phases.draft),
1726 str(phases.draft),
1727 str(phases.public))
1727 str(phases.public))
1728 if not r:
1728 if not r:
1729 self.ui.warn(_('updating %s to public failed!\n')
1729 self.ui.warn(_('updating %s to public failed!\n')
1730 % newremotehead)
1730 % newremotehead)
1731 finally:
1731 finally:
1732 if lock is not None:
1732 if lock is not None:
1733 lock.release()
1733 lock.release()
1734 finally:
1734 finally:
1735 locallock.release()
1735 locallock.release()
1736
1736
1737 self.ui.debug("checking for updated bookmarks\n")
1737 self.ui.debug("checking for updated bookmarks\n")
1738 rb = remote.listkeys('bookmarks')
1738 rb = remote.listkeys('bookmarks')
1739 for k in rb.keys():
1739 for k in rb.keys():
1740 if k in self._bookmarks:
1740 if k in self._bookmarks:
1741 nr, nl = rb[k], hex(self._bookmarks[k])
1741 nr, nl = rb[k], hex(self._bookmarks[k])
1742 if nr in self:
1742 if nr in self:
1743 cr = self[nr]
1743 cr = self[nr]
1744 cl = self[nl]
1744 cl = self[nl]
1745 if cl in cr.descendants():
1745 if cl in cr.descendants():
1746 r = remote.pushkey('bookmarks', k, nr, nl)
1746 r = remote.pushkey('bookmarks', k, nr, nl)
1747 if r:
1747 if r:
1748 self.ui.status(_("updating bookmark %s\n") % k)
1748 self.ui.status(_("updating bookmark %s\n") % k)
1749 else:
1749 else:
1750 self.ui.warn(_('updating bookmark %s'
1750 self.ui.warn(_('updating bookmark %s'
1751 ' failed!\n') % k)
1751 ' failed!\n') % k)
1752
1752
1753 return ret
1753 return ret
1754
1754
1755 def changegroupinfo(self, nodes, source):
1755 def changegroupinfo(self, nodes, source):
1756 if self.ui.verbose or source == 'bundle':
1756 if self.ui.verbose or source == 'bundle':
1757 self.ui.status(_("%d changesets found\n") % len(nodes))
1757 self.ui.status(_("%d changesets found\n") % len(nodes))
1758 if self.ui.debugflag:
1758 if self.ui.debugflag:
1759 self.ui.debug("list of changesets:\n")
1759 self.ui.debug("list of changesets:\n")
1760 for node in nodes:
1760 for node in nodes:
1761 self.ui.debug("%s\n" % hex(node))
1761 self.ui.debug("%s\n" % hex(node))
1762
1762
1763 def changegroupsubset(self, bases, heads, source):
1763 def changegroupsubset(self, bases, heads, source):
1764 """Compute a changegroup consisting of all the nodes that are
1764 """Compute a changegroup consisting of all the nodes that are
1765 descendants of any of the bases and ancestors of any of the heads.
1765 descendants of any of the bases and ancestors of any of the heads.
1766 Return a chunkbuffer object whose read() method will return
1766 Return a chunkbuffer object whose read() method will return
1767 successive changegroup chunks.
1767 successive changegroup chunks.
1768
1768
1769 It is fairly complex as determining which filenodes and which
1769 It is fairly complex as determining which filenodes and which
1770 manifest nodes need to be included for the changeset to be complete
1770 manifest nodes need to be included for the changeset to be complete
1771 is non-trivial.
1771 is non-trivial.
1772
1772
1773 Another wrinkle is doing the reverse, figuring out which changeset in
1773 Another wrinkle is doing the reverse, figuring out which changeset in
1774 the changegroup a particular filenode or manifestnode belongs to.
1774 the changegroup a particular filenode or manifestnode belongs to.
1775 """
1775 """
1776 cl = self.changelog
1776 cl = self.changelog
1777 if not bases:
1777 if not bases:
1778 bases = [nullid]
1778 bases = [nullid]
1779 csets, bases, heads = cl.nodesbetween(bases, heads)
1779 csets, bases, heads = cl.nodesbetween(bases, heads)
1780 # We assume that all ancestors of bases are known
1780 # We assume that all ancestors of bases are known
1781 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1781 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1782 return self._changegroupsubset(common, csets, heads, source)
1782 return self._changegroupsubset(common, csets, heads, source)
1783
1783
1784 def getlocalbundle(self, source, outgoing):
1784 def getlocalbundle(self, source, outgoing):
1785 """Like getbundle, but taking a discovery.outgoing as an argument.
1785 """Like getbundle, but taking a discovery.outgoing as an argument.
1786
1786
1787 This is only implemented for local repos and reuses potentially
1787 This is only implemented for local repos and reuses potentially
1788 precomputed sets in outgoing."""
1788 precomputed sets in outgoing."""
1789 if not outgoing.missing:
1789 if not outgoing.missing:
1790 return None
1790 return None
1791 return self._changegroupsubset(outgoing.common,
1791 return self._changegroupsubset(outgoing.common,
1792 outgoing.missing,
1792 outgoing.missing,
1793 outgoing.missingheads,
1793 outgoing.missingheads,
1794 source)
1794 source)
1795
1795
1796 def getbundle(self, source, heads=None, common=None):
1796 def getbundle(self, source, heads=None, common=None):
1797 """Like changegroupsubset, but returns the set difference between the
1797 """Like changegroupsubset, but returns the set difference between the
1798 ancestors of heads and the ancestors common.
1798 ancestors of heads and the ancestors common.
1799
1799
1800 If heads is None, use the local heads. If common is None, use [nullid].
1800 If heads is None, use the local heads. If common is None, use [nullid].
1801
1801
1802 The nodes in common might not all be known locally due to the way the
1802 The nodes in common might not all be known locally due to the way the
1803 current discovery protocol works.
1803 current discovery protocol works.
1804 """
1804 """
1805 cl = self.changelog
1805 cl = self.changelog
1806 if common:
1806 if common:
1807 nm = cl.nodemap
1807 nm = cl.nodemap
1808 common = [n for n in common if n in nm]
1808 common = [n for n in common if n in nm]
1809 else:
1809 else:
1810 common = [nullid]
1810 common = [nullid]
1811 if not heads:
1811 if not heads:
1812 heads = cl.heads()
1812 heads = cl.heads()
1813 return self.getlocalbundle(source,
1813 return self.getlocalbundle(source,
1814 discovery.outgoing(cl, common, heads))
1814 discovery.outgoing(cl, common, heads))
1815
1815
1816 def _changegroupsubset(self, commonrevs, csets, heads, source):
1816 def _changegroupsubset(self, commonrevs, csets, heads, source):
1817
1817
1818 cl = self.changelog
1818 cl = self.changelog
1819 mf = self.manifest
1819 mf = self.manifest
1820 mfs = {} # needed manifests
1820 mfs = {} # needed manifests
1821 fnodes = {} # needed file nodes
1821 fnodes = {} # needed file nodes
1822 changedfiles = set()
1822 changedfiles = set()
1823 fstate = ['', {}]
1823 fstate = ['', {}]
1824 count = [0, 0]
1824 count = [0, 0]
1825
1825
1826 # can we go through the fast path ?
1826 # can we go through the fast path ?
1827 heads.sort()
1827 heads.sort()
1828 if heads == sorted(self.heads()):
1828 if heads == sorted(self.heads()):
1829 return self._changegroup(csets, source)
1829 return self._changegroup(csets, source)
1830
1830
1831 # slow path
1831 # slow path
1832 self.hook('preoutgoing', throw=True, source=source)
1832 self.hook('preoutgoing', throw=True, source=source)
1833 self.changegroupinfo(csets, source)
1833 self.changegroupinfo(csets, source)
1834
1834
1835 # filter any nodes that claim to be part of the known set
1835 # filter any nodes that claim to be part of the known set
1836 def prune(revlog, missing):
1836 def prune(revlog, missing):
1837 rr, rl = revlog.rev, revlog.linkrev
1837 rr, rl = revlog.rev, revlog.linkrev
1838 return [n for n in missing
1838 return [n for n in missing
1839 if rl(rr(n)) not in commonrevs]
1839 if rl(rr(n)) not in commonrevs]
1840
1840
1841 progress = self.ui.progress
1841 progress = self.ui.progress
1842 _bundling = _('bundling')
1842 _bundling = _('bundling')
1843 _changesets = _('changesets')
1843 _changesets = _('changesets')
1844 _manifests = _('manifests')
1844 _manifests = _('manifests')
1845 _files = _('files')
1845 _files = _('files')
1846
1846
1847 def lookup(revlog, x):
1847 def lookup(revlog, x):
1848 if revlog == cl:
1848 if revlog == cl:
1849 c = cl.read(x)
1849 c = cl.read(x)
1850 changedfiles.update(c[3])
1850 changedfiles.update(c[3])
1851 mfs.setdefault(c[0], x)
1851 mfs.setdefault(c[0], x)
1852 count[0] += 1
1852 count[0] += 1
1853 progress(_bundling, count[0],
1853 progress(_bundling, count[0],
1854 unit=_changesets, total=count[1])
1854 unit=_changesets, total=count[1])
1855 return x
1855 return x
1856 elif revlog == mf:
1856 elif revlog == mf:
1857 clnode = mfs[x]
1857 clnode = mfs[x]
1858 mdata = mf.readfast(x)
1858 mdata = mf.readfast(x)
1859 for f, n in mdata.iteritems():
1859 for f, n in mdata.iteritems():
1860 if f in changedfiles:
1860 if f in changedfiles:
1861 fnodes[f].setdefault(n, clnode)
1861 fnodes[f].setdefault(n, clnode)
1862 count[0] += 1
1862 count[0] += 1
1863 progress(_bundling, count[0],
1863 progress(_bundling, count[0],
1864 unit=_manifests, total=count[1])
1864 unit=_manifests, total=count[1])
1865 return clnode
1865 return clnode
1866 else:
1866 else:
1867 progress(_bundling, count[0], item=fstate[0],
1867 progress(_bundling, count[0], item=fstate[0],
1868 unit=_files, total=count[1])
1868 unit=_files, total=count[1])
1869 return fstate[1][x]
1869 return fstate[1][x]
1870
1870
1871 bundler = changegroup.bundle10(lookup)
1871 bundler = changegroup.bundle10(lookup)
1872 reorder = self.ui.config('bundle', 'reorder', 'auto')
1872 reorder = self.ui.config('bundle', 'reorder', 'auto')
1873 if reorder == 'auto':
1873 if reorder == 'auto':
1874 reorder = None
1874 reorder = None
1875 else:
1875 else:
1876 reorder = util.parsebool(reorder)
1876 reorder = util.parsebool(reorder)
1877
1877
1878 def gengroup():
1878 def gengroup():
1879 # Create a changenode group generator that will call our functions
1879 # Create a changenode group generator that will call our functions
1880 # back to lookup the owning changenode and collect information.
1880 # back to lookup the owning changenode and collect information.
1881 count[:] = [0, len(csets)]
1881 count[:] = [0, len(csets)]
1882 for chunk in cl.group(csets, bundler, reorder=reorder):
1882 for chunk in cl.group(csets, bundler, reorder=reorder):
1883 yield chunk
1883 yield chunk
1884 progress(_bundling, None)
1884 progress(_bundling, None)
1885
1885
1886 # Create a generator for the manifestnodes that calls our lookup
1886 # Create a generator for the manifestnodes that calls our lookup
1887 # and data collection functions back.
1887 # and data collection functions back.
1888 for f in changedfiles:
1888 for f in changedfiles:
1889 fnodes[f] = {}
1889 fnodes[f] = {}
1890 count[:] = [0, len(mfs)]
1890 count[:] = [0, len(mfs)]
1891 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1891 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1892 yield chunk
1892 yield chunk
1893 progress(_bundling, None)
1893 progress(_bundling, None)
1894
1894
1895 mfs.clear()
1895 mfs.clear()
1896
1896
1897 # Go through all our files in order sorted by name.
1897 # Go through all our files in order sorted by name.
1898 count[:] = [0, len(changedfiles)]
1898 count[:] = [0, len(changedfiles)]
1899 for fname in sorted(changedfiles):
1899 for fname in sorted(changedfiles):
1900 filerevlog = self.file(fname)
1900 filerevlog = self.file(fname)
1901 if not len(filerevlog):
1901 if not len(filerevlog):
1902 raise util.Abort(_("empty or missing revlog for %s") % fname)
1902 raise util.Abort(_("empty or missing revlog for %s") % fname)
1903 fstate[0] = fname
1903 fstate[0] = fname
1904 fstate[1] = fnodes.pop(fname, {})
1904 fstate[1] = fnodes.pop(fname, {})
1905
1905
1906 nodelist = prune(filerevlog, fstate[1])
1906 nodelist = prune(filerevlog, fstate[1])
1907 if nodelist:
1907 if nodelist:
1908 count[0] += 1
1908 count[0] += 1
1909 yield bundler.fileheader(fname)
1909 yield bundler.fileheader(fname)
1910 for chunk in filerevlog.group(nodelist, bundler, reorder):
1910 for chunk in filerevlog.group(nodelist, bundler, reorder):
1911 yield chunk
1911 yield chunk
1912
1912
1913 # Signal that no more groups are left.
1913 # Signal that no more groups are left.
1914 yield bundler.close()
1914 yield bundler.close()
1915 progress(_bundling, None)
1915 progress(_bundling, None)
1916
1916
1917 if csets:
1917 if csets:
1918 self.hook('outgoing', node=hex(csets[0]), source=source)
1918 self.hook('outgoing', node=hex(csets[0]), source=source)
1919
1919
1920 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1920 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1921
1921
1922 def changegroup(self, basenodes, source):
1922 def changegroup(self, basenodes, source):
1923 # to avoid a race we use changegroupsubset() (issue1320)
1923 # to avoid a race we use changegroupsubset() (issue1320)
1924 return self.changegroupsubset(basenodes, self.heads(), source)
1924 return self.changegroupsubset(basenodes, self.heads(), source)
1925
1925
1926 def _changegroup(self, nodes, source):
1926 def _changegroup(self, nodes, source):
1927 """Compute the changegroup of all nodes that we have that a recipient
1927 """Compute the changegroup of all nodes that we have that a recipient
1928 doesn't. Return a chunkbuffer object whose read() method will return
1928 doesn't. Return a chunkbuffer object whose read() method will return
1929 successive changegroup chunks.
1929 successive changegroup chunks.
1930
1930
1931 This is much easier than the previous function as we can assume that
1931 This is much easier than the previous function as we can assume that
1932 the recipient has any changenode we aren't sending them.
1932 the recipient has any changenode we aren't sending them.
1933
1933
1934 nodes is the set of nodes to send"""
1934 nodes is the set of nodes to send"""
1935
1935
1936 cl = self.changelog
1936 cl = self.changelog
1937 mf = self.manifest
1937 mf = self.manifest
1938 mfs = {}
1938 mfs = {}
1939 changedfiles = set()
1939 changedfiles = set()
1940 fstate = ['']
1940 fstate = ['']
1941 count = [0, 0]
1941 count = [0, 0]
1942
1942
1943 self.hook('preoutgoing', throw=True, source=source)
1943 self.hook('preoutgoing', throw=True, source=source)
1944 self.changegroupinfo(nodes, source)
1944 self.changegroupinfo(nodes, source)
1945
1945
1946 revset = set([cl.rev(n) for n in nodes])
1946 revset = set([cl.rev(n) for n in nodes])
1947
1947
1948 def gennodelst(log):
1948 def gennodelst(log):
1949 ln, llr = log.node, log.linkrev
1949 ln, llr = log.node, log.linkrev
1950 return [ln(r) for r in log if llr(r) in revset]
1950 return [ln(r) for r in log if llr(r) in revset]
1951
1951
1952 progress = self.ui.progress
1952 progress = self.ui.progress
1953 _bundling = _('bundling')
1953 _bundling = _('bundling')
1954 _changesets = _('changesets')
1954 _changesets = _('changesets')
1955 _manifests = _('manifests')
1955 _manifests = _('manifests')
1956 _files = _('files')
1956 _files = _('files')
1957
1957
1958 def lookup(revlog, x):
1958 def lookup(revlog, x):
1959 if revlog == cl:
1959 if revlog == cl:
1960 c = cl.read(x)
1960 c = cl.read(x)
1961 changedfiles.update(c[3])
1961 changedfiles.update(c[3])
1962 mfs.setdefault(c[0], x)
1962 mfs.setdefault(c[0], x)
1963 count[0] += 1
1963 count[0] += 1
1964 progress(_bundling, count[0],
1964 progress(_bundling, count[0],
1965 unit=_changesets, total=count[1])
1965 unit=_changesets, total=count[1])
1966 return x
1966 return x
1967 elif revlog == mf:
1967 elif revlog == mf:
1968 count[0] += 1
1968 count[0] += 1
1969 progress(_bundling, count[0],
1969 progress(_bundling, count[0],
1970 unit=_manifests, total=count[1])
1970 unit=_manifests, total=count[1])
1971 return cl.node(revlog.linkrev(revlog.rev(x)))
1971 return cl.node(revlog.linkrev(revlog.rev(x)))
1972 else:
1972 else:
1973 progress(_bundling, count[0], item=fstate[0],
1973 progress(_bundling, count[0], item=fstate[0],
1974 total=count[1], unit=_files)
1974 total=count[1], unit=_files)
1975 return cl.node(revlog.linkrev(revlog.rev(x)))
1975 return cl.node(revlog.linkrev(revlog.rev(x)))
1976
1976
1977 bundler = changegroup.bundle10(lookup)
1977 bundler = changegroup.bundle10(lookup)
1978 reorder = self.ui.config('bundle', 'reorder', 'auto')
1978 reorder = self.ui.config('bundle', 'reorder', 'auto')
1979 if reorder == 'auto':
1979 if reorder == 'auto':
1980 reorder = None
1980 reorder = None
1981 else:
1981 else:
1982 reorder = util.parsebool(reorder)
1982 reorder = util.parsebool(reorder)
1983
1983
1984 def gengroup():
1984 def gengroup():
1985 '''yield a sequence of changegroup chunks (strings)'''
1985 '''yield a sequence of changegroup chunks (strings)'''
1986 # construct a list of all changed files
1986 # construct a list of all changed files
1987
1987
1988 count[:] = [0, len(nodes)]
1988 count[:] = [0, len(nodes)]
1989 for chunk in cl.group(nodes, bundler, reorder=reorder):
1989 for chunk in cl.group(nodes, bundler, reorder=reorder):
1990 yield chunk
1990 yield chunk
1991 progress(_bundling, None)
1991 progress(_bundling, None)
1992
1992
1993 count[:] = [0, len(mfs)]
1993 count[:] = [0, len(mfs)]
1994 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1994 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1995 yield chunk
1995 yield chunk
1996 progress(_bundling, None)
1996 progress(_bundling, None)
1997
1997
1998 count[:] = [0, len(changedfiles)]
1998 count[:] = [0, len(changedfiles)]
1999 for fname in sorted(changedfiles):
1999 for fname in sorted(changedfiles):
2000 filerevlog = self.file(fname)
2000 filerevlog = self.file(fname)
2001 if not len(filerevlog):
2001 if not len(filerevlog):
2002 raise util.Abort(_("empty or missing revlog for %s") % fname)
2002 raise util.Abort(_("empty or missing revlog for %s") % fname)
2003 fstate[0] = fname
2003 fstate[0] = fname
2004 nodelist = gennodelst(filerevlog)
2004 nodelist = gennodelst(filerevlog)
2005 if nodelist:
2005 if nodelist:
2006 count[0] += 1
2006 count[0] += 1
2007 yield bundler.fileheader(fname)
2007 yield bundler.fileheader(fname)
2008 for chunk in filerevlog.group(nodelist, bundler, reorder):
2008 for chunk in filerevlog.group(nodelist, bundler, reorder):
2009 yield chunk
2009 yield chunk
2010 yield bundler.close()
2010 yield bundler.close()
2011 progress(_bundling, None)
2011 progress(_bundling, None)
2012
2012
2013 if nodes:
2013 if nodes:
2014 self.hook('outgoing', node=hex(nodes[0]), source=source)
2014 self.hook('outgoing', node=hex(nodes[0]), source=source)
2015
2015
2016 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2016 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2017
2017
2018 def addchangegroup(self, source, srctype, url, emptyok=False):
2018 def addchangegroup(self, source, srctype, url, emptyok=False):
2019 """Add the changegroup returned by source.read() to this repo.
2019 """Add the changegroup returned by source.read() to this repo.
2020 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2020 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2021 the URL of the repo where this changegroup is coming from.
2021 the URL of the repo where this changegroup is coming from.
2022
2022
2023 Return an integer summarizing the change to this repo:
2023 Return an integer summarizing the change to this repo:
2024 - nothing changed or no source: 0
2024 - nothing changed or no source: 0
2025 - more heads than before: 1+added heads (2..n)
2025 - more heads than before: 1+added heads (2..n)
2026 - fewer heads than before: -1-removed heads (-2..-n)
2026 - fewer heads than before: -1-removed heads (-2..-n)
2027 - number of heads stays the same: 1
2027 - number of heads stays the same: 1
2028 """
2028 """
2029 def csmap(x):
2029 def csmap(x):
2030 self.ui.debug("add changeset %s\n" % short(x))
2030 self.ui.debug("add changeset %s\n" % short(x))
2031 return len(cl)
2031 return len(cl)
2032
2032
2033 def revmap(x):
2033 def revmap(x):
2034 return cl.rev(x)
2034 return cl.rev(x)
2035
2035
2036 if not source:
2036 if not source:
2037 return 0
2037 return 0
2038
2038
2039 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2039 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2040
2040
2041 changesets = files = revisions = 0
2041 changesets = files = revisions = 0
2042 efiles = set()
2042 efiles = set()
2043
2043
2044 # write changelog data to temp files so concurrent readers will not see
2044 # write changelog data to temp files so concurrent readers will not see
2045 # inconsistent view
2045 # inconsistent view
2046 cl = self.changelog
2046 cl = self.changelog
2047 cl.delayupdate()
2047 cl.delayupdate()
2048 oldheads = cl.heads()
2048 oldheads = cl.heads()
2049
2049
2050 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2050 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2051 try:
2051 try:
2052 trp = weakref.proxy(tr)
2052 trp = weakref.proxy(tr)
2053 # pull off the changeset group
2053 # pull off the changeset group
2054 self.ui.status(_("adding changesets\n"))
2054 self.ui.status(_("adding changesets\n"))
2055 clstart = len(cl)
2055 clstart = len(cl)
2056 class prog(object):
2056 class prog(object):
2057 step = _('changesets')
2057 step = _('changesets')
2058 count = 1
2058 count = 1
2059 ui = self.ui
2059 ui = self.ui
2060 total = None
2060 total = None
2061 def __call__(self):
2061 def __call__(self):
2062 self.ui.progress(self.step, self.count, unit=_('chunks'),
2062 self.ui.progress(self.step, self.count, unit=_('chunks'),
2063 total=self.total)
2063 total=self.total)
2064 self.count += 1
2064 self.count += 1
2065 pr = prog()
2065 pr = prog()
2066 source.callback = pr
2066 source.callback = pr
2067
2067
2068 source.changelogheader()
2068 source.changelogheader()
2069 srccontent = cl.addgroup(source, csmap, trp)
2069 srccontent = cl.addgroup(source, csmap, trp)
2070 if not (srccontent or emptyok):
2070 if not (srccontent or emptyok):
2071 raise util.Abort(_("received changelog group is empty"))
2071 raise util.Abort(_("received changelog group is empty"))
2072 clend = len(cl)
2072 clend = len(cl)
2073 changesets = clend - clstart
2073 changesets = clend - clstart
2074 for c in xrange(clstart, clend):
2074 for c in xrange(clstart, clend):
2075 efiles.update(self[c].files())
2075 efiles.update(self[c].files())
2076 efiles = len(efiles)
2076 efiles = len(efiles)
2077 self.ui.progress(_('changesets'), None)
2077 self.ui.progress(_('changesets'), None)
2078
2078
2079 # pull off the manifest group
2079 # pull off the manifest group
2080 self.ui.status(_("adding manifests\n"))
2080 self.ui.status(_("adding manifests\n"))
2081 pr.step = _('manifests')
2081 pr.step = _('manifests')
2082 pr.count = 1
2082 pr.count = 1
2083 pr.total = changesets # manifests <= changesets
2083 pr.total = changesets # manifests <= changesets
2084 # no need to check for empty manifest group here:
2084 # no need to check for empty manifest group here:
2085 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2085 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2086 # no new manifest will be created and the manifest group will
2086 # no new manifest will be created and the manifest group will
2087 # be empty during the pull
2087 # be empty during the pull
2088 source.manifestheader()
2088 source.manifestheader()
2089 self.manifest.addgroup(source, revmap, trp)
2089 self.manifest.addgroup(source, revmap, trp)
2090 self.ui.progress(_('manifests'), None)
2090 self.ui.progress(_('manifests'), None)
2091
2091
2092 needfiles = {}
2092 needfiles = {}
2093 if self.ui.configbool('server', 'validate', default=False):
2093 if self.ui.configbool('server', 'validate', default=False):
2094 # validate incoming csets have their manifests
2094 # validate incoming csets have their manifests
2095 for cset in xrange(clstart, clend):
2095 for cset in xrange(clstart, clend):
2096 mfest = self.changelog.read(self.changelog.node(cset))[0]
2096 mfest = self.changelog.read(self.changelog.node(cset))[0]
2097 mfest = self.manifest.readdelta(mfest)
2097 mfest = self.manifest.readdelta(mfest)
2098 # store file nodes we must see
2098 # store file nodes we must see
2099 for f, n in mfest.iteritems():
2099 for f, n in mfest.iteritems():
2100 needfiles.setdefault(f, set()).add(n)
2100 needfiles.setdefault(f, set()).add(n)
2101
2101
2102 # process the files
2102 # process the files
2103 self.ui.status(_("adding file changes\n"))
2103 self.ui.status(_("adding file changes\n"))
2104 pr.step = _('files')
2104 pr.step = _('files')
2105 pr.count = 1
2105 pr.count = 1
2106 pr.total = efiles
2106 pr.total = efiles
2107 source.callback = None
2107 source.callback = None
2108
2108
2109 while True:
2109 while True:
2110 chunkdata = source.filelogheader()
2110 chunkdata = source.filelogheader()
2111 if not chunkdata:
2111 if not chunkdata:
2112 break
2112 break
2113 f = chunkdata["filename"]
2113 f = chunkdata["filename"]
2114 self.ui.debug("adding %s revisions\n" % f)
2114 self.ui.debug("adding %s revisions\n" % f)
2115 pr()
2115 pr()
2116 fl = self.file(f)
2116 fl = self.file(f)
2117 o = len(fl)
2117 o = len(fl)
2118 if not fl.addgroup(source, revmap, trp):
2118 if not fl.addgroup(source, revmap, trp):
2119 raise util.Abort(_("received file revlog group is empty"))
2119 raise util.Abort(_("received file revlog group is empty"))
2120 revisions += len(fl) - o
2120 revisions += len(fl) - o
2121 files += 1
2121 files += 1
2122 if f in needfiles:
2122 if f in needfiles:
2123 needs = needfiles[f]
2123 needs = needfiles[f]
2124 for new in xrange(o, len(fl)):
2124 for new in xrange(o, len(fl)):
2125 n = fl.node(new)
2125 n = fl.node(new)
2126 if n in needs:
2126 if n in needs:
2127 needs.remove(n)
2127 needs.remove(n)
2128 if not needs:
2128 if not needs:
2129 del needfiles[f]
2129 del needfiles[f]
2130 self.ui.progress(_('files'), None)
2130 self.ui.progress(_('files'), None)
2131
2131
2132 for f, needs in needfiles.iteritems():
2132 for f, needs in needfiles.iteritems():
2133 fl = self.file(f)
2133 fl = self.file(f)
2134 for n in needs:
2134 for n in needs:
2135 try:
2135 try:
2136 fl.rev(n)
2136 fl.rev(n)
2137 except error.LookupError:
2137 except error.LookupError:
2138 raise util.Abort(
2138 raise util.Abort(
2139 _('missing file data for %s:%s - run hg verify') %
2139 _('missing file data for %s:%s - run hg verify') %
2140 (f, hex(n)))
2140 (f, hex(n)))
2141
2141
2142 dh = 0
2142 dh = 0
2143 if oldheads:
2143 if oldheads:
2144 heads = cl.heads()
2144 heads = cl.heads()
2145 dh = len(heads) - len(oldheads)
2145 dh = len(heads) - len(oldheads)
2146 for h in heads:
2146 for h in heads:
2147 if h not in oldheads and 'close' in self[h].extra():
2147 if h not in oldheads and 'close' in self[h].extra():
2148 dh -= 1
2148 dh -= 1
2149 htext = ""
2149 htext = ""
2150 if dh:
2150 if dh:
2151 htext = _(" (%+d heads)") % dh
2151 htext = _(" (%+d heads)") % dh
2152
2152
2153 self.ui.status(_("added %d changesets"
2153 self.ui.status(_("added %d changesets"
2154 " with %d changes to %d files%s\n")
2154 " with %d changes to %d files%s\n")
2155 % (changesets, revisions, files, htext))
2155 % (changesets, revisions, files, htext))
2156
2156
2157 if changesets > 0:
2157 if changesets > 0:
2158 p = lambda: cl.writepending() and self.root or ""
2158 p = lambda: cl.writepending() and self.root or ""
2159 self.hook('pretxnchangegroup', throw=True,
2159 self.hook('pretxnchangegroup', throw=True,
2160 node=hex(cl.node(clstart)), source=srctype,
2160 node=hex(cl.node(clstart)), source=srctype,
2161 url=url, pending=p)
2161 url=url, pending=p)
2162
2162
2163 added = [cl.node(r) for r in xrange(clstart, clend)]
2163 added = [cl.node(r) for r in xrange(clstart, clend)]
2164 publishing = self.ui.configbool('phases', 'publish', True)
2164 publishing = self.ui.configbool('phases', 'publish', True)
2165 if srctype == 'push':
2165 if srctype == 'push':
2166 # Old server can not push the boundary themself.
2166 # Old server can not push the boundary themself.
2167 # New server won't push the boundary if changeset already
2167 # New server won't push the boundary if changeset already
2168 # existed locally as secrete
2168 # existed locally as secrete
2169 #
2169 #
2170 # We should not use added here but the list of all change in
2170 # We should not use added here but the list of all change in
2171 # the bundle
2171 # the bundle
2172 if publishing:
2172 if publishing:
2173 phases.advanceboundary(self, phases.public, srccontent)
2173 phases.advanceboundary(self, phases.public, srccontent)
2174 else:
2174 else:
2175 phases.advanceboundary(self, phases.draft, srccontent)
2175 phases.advanceboundary(self, phases.draft, srccontent)
2176 phases.retractboundary(self, phases.draft, added)
2176 phases.retractboundary(self, phases.draft, added)
2177 elif srctype != 'strip':
2177 elif srctype != 'strip':
2178 # publishing only alter behavior during push
2178 # publishing only alter behavior during push
2179 #
2179 #
2180 # strip should not touch boundary at all
2180 # strip should not touch boundary at all
2181 phases.retractboundary(self, phases.draft, added)
2181 phases.retractboundary(self, phases.draft, added)
2182
2182
2183 # make changelog see real files again
2183 # make changelog see real files again
2184 cl.finalize(trp)
2184 cl.finalize(trp)
2185
2185
2186 tr.close()
2186 tr.close()
2187
2187
2188 if changesets > 0:
2188 if changesets > 0:
2189 def runhooks():
2189 def runhooks():
2190 # forcefully update the on-disk branch cache
2190 # forcefully update the on-disk branch cache
2191 self.ui.debug("updating the branch cache\n")
2191 self.ui.debug("updating the branch cache\n")
2192 self.updatebranchcache()
2192 self.updatebranchcache()
2193 self.hook("changegroup", node=hex(cl.node(clstart)),
2193 self.hook("changegroup", node=hex(cl.node(clstart)),
2194 source=srctype, url=url)
2194 source=srctype, url=url)
2195
2195
2196 for n in added:
2196 for n in added:
2197 self.hook("incoming", node=hex(n), source=srctype,
2197 self.hook("incoming", node=hex(n), source=srctype,
2198 url=url)
2198 url=url)
2199 self._afterlock(runhooks)
2199 self._afterlock(runhooks)
2200
2200
2201 finally:
2201 finally:
2202 tr.release()
2202 tr.release()
2203 # never return 0 here:
2203 # never return 0 here:
2204 if dh < 0:
2204 if dh < 0:
2205 return dh - 1
2205 return dh - 1
2206 else:
2206 else:
2207 return dh + 1
2207 return dh + 1
2208
2208
2209 def stream_in(self, remote, requirements):
2209 def stream_in(self, remote, requirements):
2210 lock = self.lock()
2210 lock = self.lock()
2211 try:
2211 try:
2212 fp = remote.stream_out()
2212 fp = remote.stream_out()
2213 l = fp.readline()
2213 l = fp.readline()
2214 try:
2214 try:
2215 resp = int(l)
2215 resp = int(l)
2216 except ValueError:
2216 except ValueError:
2217 raise error.ResponseError(
2217 raise error.ResponseError(
2218 _('Unexpected response from remote server:'), l)
2218 _('Unexpected response from remote server:'), l)
2219 if resp == 1:
2219 if resp == 1:
2220 raise util.Abort(_('operation forbidden by server'))
2220 raise util.Abort(_('operation forbidden by server'))
2221 elif resp == 2:
2221 elif resp == 2:
2222 raise util.Abort(_('locking the remote repository failed'))
2222 raise util.Abort(_('locking the remote repository failed'))
2223 elif resp != 0:
2223 elif resp != 0:
2224 raise util.Abort(_('the server sent an unknown error code'))
2224 raise util.Abort(_('the server sent an unknown error code'))
2225 self.ui.status(_('streaming all changes\n'))
2225 self.ui.status(_('streaming all changes\n'))
2226 l = fp.readline()
2226 l = fp.readline()
2227 try:
2227 try:
2228 total_files, total_bytes = map(int, l.split(' ', 1))
2228 total_files, total_bytes = map(int, l.split(' ', 1))
2229 except (ValueError, TypeError):
2229 except (ValueError, TypeError):
2230 raise error.ResponseError(
2230 raise error.ResponseError(
2231 _('Unexpected response from remote server:'), l)
2231 _('Unexpected response from remote server:'), l)
2232 self.ui.status(_('%d files to transfer, %s of data\n') %
2232 self.ui.status(_('%d files to transfer, %s of data\n') %
2233 (total_files, util.bytecount(total_bytes)))
2233 (total_files, util.bytecount(total_bytes)))
2234 start = time.time()
2234 start = time.time()
2235 for i in xrange(total_files):
2235 for i in xrange(total_files):
2236 # XXX doesn't support '\n' or '\r' in filenames
2236 # XXX doesn't support '\n' or '\r' in filenames
2237 l = fp.readline()
2237 l = fp.readline()
2238 try:
2238 try:
2239 name, size = l.split('\0', 1)
2239 name, size = l.split('\0', 1)
2240 size = int(size)
2240 size = int(size)
2241 except (ValueError, TypeError):
2241 except (ValueError, TypeError):
2242 raise error.ResponseError(
2242 raise error.ResponseError(
2243 _('Unexpected response from remote server:'), l)
2243 _('Unexpected response from remote server:'), l)
2244 if self.ui.debugflag:
2244 if self.ui.debugflag:
2245 self.ui.debug('adding %s (%s)\n' %
2245 self.ui.debug('adding %s (%s)\n' %
2246 (name, util.bytecount(size)))
2246 (name, util.bytecount(size)))
2247 # for backwards compat, name was partially encoded
2247 # for backwards compat, name was partially encoded
2248 ofp = self.sopener(store.decodedir(name), 'w')
2248 ofp = self.sopener(store.decodedir(name), 'w')
2249 for chunk in util.filechunkiter(fp, limit=size):
2249 for chunk in util.filechunkiter(fp, limit=size):
2250 ofp.write(chunk)
2250 ofp.write(chunk)
2251 ofp.close()
2251 ofp.close()
2252 elapsed = time.time() - start
2252 elapsed = time.time() - start
2253 if elapsed <= 0:
2253 if elapsed <= 0:
2254 elapsed = 0.001
2254 elapsed = 0.001
2255 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2255 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2256 (util.bytecount(total_bytes), elapsed,
2256 (util.bytecount(total_bytes), elapsed,
2257 util.bytecount(total_bytes / elapsed)))
2257 util.bytecount(total_bytes / elapsed)))
2258
2258
2259 # new requirements = old non-format requirements + new format-related
2259 # new requirements = old non-format requirements + new format-related
2260 # requirements from the streamed-in repository
2260 # requirements from the streamed-in repository
2261 requirements.update(set(self.requirements) - self.supportedformats)
2261 requirements.update(set(self.requirements) - self.supportedformats)
2262 self._applyrequirements(requirements)
2262 self._applyrequirements(requirements)
2263 self._writerequirements()
2263 self._writerequirements()
2264
2264
2265 self.invalidate()
2265 self.invalidate()
2266 return len(self.heads()) + 1
2266 return len(self.heads()) + 1
2267 finally:
2267 finally:
2268 lock.release()
2268 lock.release()
2269
2269
2270 def clone(self, remote, heads=[], stream=False):
2270 def clone(self, remote, heads=[], stream=False):
2271 '''clone remote repository.
2271 '''clone remote repository.
2272
2272
2273 keyword arguments:
2273 keyword arguments:
2274 heads: list of revs to clone (forces use of pull)
2274 heads: list of revs to clone (forces use of pull)
2275 stream: use streaming clone if possible'''
2275 stream: use streaming clone if possible'''
2276
2276
2277 # now, all clients that can request uncompressed clones can
2277 # now, all clients that can request uncompressed clones can
2278 # read repo formats supported by all servers that can serve
2278 # read repo formats supported by all servers that can serve
2279 # them.
2279 # them.
2280
2280
2281 # if revlog format changes, client will have to check version
2281 # if revlog format changes, client will have to check version
2282 # and format flags on "stream" capability, and use
2282 # and format flags on "stream" capability, and use
2283 # uncompressed only if compatible.
2283 # uncompressed only if compatible.
2284
2284
2285 if not stream:
2285 if not stream:
2286 # if the server explicitely prefer to stream (for fast LANs)
2286 # if the server explicitely prefer to stream (for fast LANs)
2287 stream = remote.capable('stream-preferred')
2287 stream = remote.capable('stream-preferred')
2288
2288
2289 if stream and not heads:
2289 if stream and not heads:
2290 # 'stream' means remote revlog format is revlogv1 only
2290 # 'stream' means remote revlog format is revlogv1 only
2291 if remote.capable('stream'):
2291 if remote.capable('stream'):
2292 return self.stream_in(remote, set(('revlogv1',)))
2292 return self.stream_in(remote, set(('revlogv1',)))
2293 # otherwise, 'streamreqs' contains the remote revlog format
2293 # otherwise, 'streamreqs' contains the remote revlog format
2294 streamreqs = remote.capable('streamreqs')
2294 streamreqs = remote.capable('streamreqs')
2295 if streamreqs:
2295 if streamreqs:
2296 streamreqs = set(streamreqs.split(','))
2296 streamreqs = set(streamreqs.split(','))
2297 # if we support it, stream in and adjust our requirements
2297 # if we support it, stream in and adjust our requirements
2298 if not streamreqs - self.supportedformats:
2298 if not streamreqs - self.supportedformats:
2299 return self.stream_in(remote, streamreqs)
2299 return self.stream_in(remote, streamreqs)
2300 return self.pull(remote, heads)
2300 return self.pull(remote, heads)
2301
2301
2302 def pushkey(self, namespace, key, old, new):
2302 def pushkey(self, namespace, key, old, new):
2303 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2303 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2304 old=old, new=new)
2304 old=old, new=new)
2305 ret = pushkey.push(self, namespace, key, old, new)
2305 ret = pushkey.push(self, namespace, key, old, new)
2306 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2306 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2307 ret=ret)
2307 ret=ret)
2308 return ret
2308 return ret
2309
2309
2310 def listkeys(self, namespace):
2310 def listkeys(self, namespace):
2311 self.hook('prelistkeys', throw=True, namespace=namespace)
2311 self.hook('prelistkeys', throw=True, namespace=namespace)
2312 values = pushkey.list(self, namespace)
2312 values = pushkey.list(self, namespace)
2313 self.hook('listkeys', namespace=namespace, values=values)
2313 self.hook('listkeys', namespace=namespace, values=values)
2314 return values
2314 return values
2315
2315
2316 def debugwireargs(self, one, two, three=None, four=None, five=None):
2316 def debugwireargs(self, one, two, three=None, four=None, five=None):
2317 '''used to test argument passing over the wire'''
2317 '''used to test argument passing over the wire'''
2318 return "%s %s %s %s %s" % (one, two, three, four, five)
2318 return "%s %s %s %s %s" % (one, two, three, four, five)
2319
2319
2320 def savecommitmessage(self, text):
2320 def savecommitmessage(self, text):
2321 fp = self.opener('last-message.txt', 'wb')
2321 fp = self.opener('last-message.txt', 'wb')
2322 try:
2322 try:
2323 fp.write(text)
2323 fp.write(text)
2324 finally:
2324 finally:
2325 fp.close()
2325 fp.close()
2326 return self.pathto(fp.name[len(self.root)+1:])
2326 return self.pathto(fp.name[len(self.root)+1:])
2327
2327
2328 # used to avoid circular references so destructors work
2328 # used to avoid circular references so destructors work
2329 def aftertrans(files):
2329 def aftertrans(files):
2330 renamefiles = [tuple(t) for t in files]
2330 renamefiles = [tuple(t) for t in files]
2331 def a():
2331 def a():
2332 for src, dest in renamefiles:
2332 for src, dest in renamefiles:
2333 try:
2333 try:
2334 util.rename(src, dest)
2334 util.rename(src, dest)
2335 except OSError: # journal file does not yet exist
2335 except OSError: # journal file does not yet exist
2336 pass
2336 pass
2337 return a
2337 return a
2338
2338
2339 def undoname(fn):
2339 def undoname(fn):
2340 base, name = os.path.split(fn)
2340 base, name = os.path.split(fn)
2341 assert name.startswith('journal')
2341 assert name.startswith('journal')
2342 return os.path.join(base, name.replace('journal', 'undo', 1))
2342 return os.path.join(base, name.replace('journal', 'undo', 1))
2343
2343
2344 def instance(ui, path, create):
2344 def instance(ui, path, create):
2345 return localrepository(ui, util.urllocalpath(path), create)
2345 return localrepository(ui, util.urllocalpath(path), create)
2346
2346
2347 def islocal(path):
2347 def islocal(path):
2348 return True
2348 return True
@@ -1,356 +1,355 b''
1 """ Mercurial phases support code
1 """ Mercurial phases support code
2
2
3 ---
3 ---
4
4
5 Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
5 Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
6 Logilab SA <contact@logilab.fr>
6 Logilab SA <contact@logilab.fr>
7 Augie Fackler <durin42@gmail.com>
7 Augie Fackler <durin42@gmail.com>
8
8
9 This software may be used and distributed according to the terms of the
9 This software may be used and distributed according to the terms of the
10 GNU General Public License version 2 or any later version.
10 GNU General Public License version 2 or any later version.
11
11
12 ---
12 ---
13
13
14 This module implements most phase logic in mercurial.
14 This module implements most phase logic in mercurial.
15
15
16
16
17 Basic Concept
17 Basic Concept
18 =============
18 =============
19
19
20 A 'changeset phases' is an indicator that tells us how a changeset is
20 A 'changeset phases' is an indicator that tells us how a changeset is
21 manipulated and communicated. The details of each phase is described below,
21 manipulated and communicated. The details of each phase is described below,
22 here we describe the properties they have in common.
22 here we describe the properties they have in common.
23
23
24 Like bookmarks, phases are not stored in history and thus are not permanent and
24 Like bookmarks, phases are not stored in history and thus are not permanent and
25 leave no audit trail.
25 leave no audit trail.
26
26
27 First, no changeset can be in two phases at once. Phases are ordered, so they
27 First, no changeset can be in two phases at once. Phases are ordered, so they
28 can be considered from lowest to highest. The default, lowest phase is 'public'
28 can be considered from lowest to highest. The default, lowest phase is 'public'
29 - this is the normal phase of existing changesets. A child changeset can not be
29 - this is the normal phase of existing changesets. A child changeset can not be
30 in a lower phase than its parents.
30 in a lower phase than its parents.
31
31
32 These phases share a hierarchy of traits:
32 These phases share a hierarchy of traits:
33
33
34 immutable shared
34 immutable shared
35 public: X X
35 public: X X
36 draft: X
36 draft: X
37 secret:
37 secret:
38
38
39 local commits are draft by default
39 local commits are draft by default
40
40
41 Phase movement and exchange
41 Phase movement and exchange
42 ============================
42 ============================
43
43
44 Phase data are exchanged by pushkey on pull and push. Some server have a
44 Phase data are exchanged by pushkey on pull and push. Some server have a
45 publish option set, we call them publishing server. Pushing to such server make
45 publish option set, we call them publishing server. Pushing to such server make
46 draft changeset publish.
46 draft changeset publish.
47
47
48 A small list of fact/rules define the exchange of phase:
48 A small list of fact/rules define the exchange of phase:
49
49
50 * old client never changes server states
50 * old client never changes server states
51 * pull never changes server states
51 * pull never changes server states
52 * publish and old server csets are seen as public by client
52 * publish and old server csets are seen as public by client
53
53
54 * Any secret changeset seens in another repository is lowered to at least draft
54 * Any secret changeset seens in another repository is lowered to at least draft
55
55
56
56
57 Here is the final table summing up the 49 possible usecase of phase exchange:
57 Here is the final table summing up the 49 possible usecase of phase exchange:
58
58
59 server
59 server
60 old publish non-publish
60 old publish non-publish
61 N X N D P N D P
61 N X N D P N D P
62 old client
62 old client
63 pull
63 pull
64 N - X/X - X/D X/P - X/D X/P
64 N - X/X - X/D X/P - X/D X/P
65 X - X/X - X/D X/P - X/D X/P
65 X - X/X - X/D X/P - X/D X/P
66 push
66 push
67 X X/X X/X X/P X/P X/P X/D X/D X/P
67 X X/X X/X X/P X/P X/P X/D X/D X/P
68 new client
68 new client
69 pull
69 pull
70 N - P/X - P/D P/P - D/D P/P
70 N - P/X - P/D P/P - D/D P/P
71 D - P/X - P/D P/P - D/D P/P
71 D - P/X - P/D P/P - D/D P/P
72 P - P/X - P/D P/P - P/D P/P
72 P - P/X - P/D P/P - P/D P/P
73 push
73 push
74 D P/X P/X P/P P/P P/P D/D D/D P/P
74 D P/X P/X P/P P/P P/P D/D D/D P/P
75 P P/X P/X P/P P/P P/P P/P P/P P/P
75 P P/X P/X P/P P/P P/P P/P P/P P/P
76
76
77 Legend:
77 Legend:
78
78
79 A/B = final state on client / state on server
79 A/B = final state on client / state on server
80
80
81 * N = new/not present,
81 * N = new/not present,
82 * P = public,
82 * P = public,
83 * D = draft,
83 * D = draft,
84 * X = not tracked (ie: the old client or server has no internal way of
84 * X = not tracked (ie: the old client or server has no internal way of
85 recording the phase.)
85 recording the phase.)
86
86
87 passive = only pushes
87 passive = only pushes
88
88
89
89
90 A cell here can be read like this:
90 A cell here can be read like this:
91
91
92 "When a new client pushes a draft changeset (D) to a publishing server
92 "When a new client pushes a draft changeset (D) to a publishing server
93 where it's not present (N), it's marked public on both sides (P/P)."
93 where it's not present (N), it's marked public on both sides (P/P)."
94
94
95 Note: old client behave as publish server with Draft only content
95 Note: old client behave as publish server with Draft only content
96 - other people see it as public
96 - other people see it as public
97 - content is pushed as draft
97 - content is pushed as draft
98
98
99 """
99 """
100
100
101 import errno
101 import errno
102 from node import nullid, bin, hex, short
102 from node import nullid, bin, hex, short
103 from i18n import _
103 from i18n import _
104
104
105 allphases = public, draft, secret = range(3)
105 allphases = public, draft, secret = range(3)
106 trackedphases = allphases[1:]
106 trackedphases = allphases[1:]
107 phasenames = ['public', 'draft', 'secret']
107 phasenames = ['public', 'draft', 'secret']
108
108
109 def _filterunknown(ui, changelog, phaseroots):
109 def _filterunknown(ui, changelog, phaseroots):
110 """remove unknown nodes from the phase boundary
110 """remove unknown nodes from the phase boundary
111
111
112 Nothing is lost as unknown nodes only hold data for their descendants
112 Nothing is lost as unknown nodes only hold data for their descendants
113 """
113 """
114 updated = False
114 updated = False
115 nodemap = changelog.nodemap # to filter unknown nodes
115 nodemap = changelog.nodemap # to filter unknown nodes
116 for phase, nodes in enumerate(phaseroots):
116 for phase, nodes in enumerate(phaseroots):
117 missing = [node for node in nodes if node not in nodemap]
117 missing = [node for node in nodes if node not in nodemap]
118 if missing:
118 if missing:
119 for mnode in missing:
119 for mnode in missing:
120 ui.debug(
120 ui.debug(
121 'removing unknown node %s from %i-phase boundary\n'
121 'removing unknown node %s from %i-phase boundary\n'
122 % (short(mnode), phase))
122 % (short(mnode), phase))
123 nodes.symmetric_difference_update(missing)
123 nodes.symmetric_difference_update(missing)
124 updated = True
124 updated = True
125 return updated
125 return updated
126
126
127 def readroots(repo, phasedefaults=None):
127 def readroots(repo, phasedefaults=None):
128 """Read phase roots from disk
128 """Read phase roots from disk
129
129
130 phasedefaults is a list of fn(repo, roots) callable, which are
130 phasedefaults is a list of fn(repo, roots) callable, which are
131 executed if the phase roots file does not exist. When phases are
131 executed if the phase roots file does not exist. When phases are
132 being initialized on an existing repository, this could be used to
132 being initialized on an existing repository, this could be used to
133 set selected changesets phase to something else than public.
133 set selected changesets phase to something else than public.
134
134
135 Return (roots, dirty) where dirty is true if roots differ from
135 Return (roots, dirty) where dirty is true if roots differ from
136 what is being stored.
136 what is being stored.
137 """
137 """
138 dirty = False
138 dirty = False
139 roots = [set() for i in allphases]
139 roots = [set() for i in allphases]
140 try:
140 try:
141 f = repo.sopener('phaseroots')
141 f = repo.sopener('phaseroots')
142 try:
142 try:
143 for line in f:
143 for line in f:
144 phase, nh = line.split()
144 phase, nh = line.split()
145 roots[int(phase)].add(bin(nh))
145 roots[int(phase)].add(bin(nh))
146 finally:
146 finally:
147 f.close()
147 f.close()
148 except IOError, inst:
148 except IOError, inst:
149 if inst.errno != errno.ENOENT:
149 if inst.errno != errno.ENOENT:
150 raise
150 raise
151 if phasedefaults:
151 if phasedefaults:
152 for f in phasedefaults:
152 for f in phasedefaults:
153 roots = f(repo, roots)
153 roots = f(repo, roots)
154 dirty = True
154 dirty = True
155 if _filterunknown(repo.ui, repo.changelog, roots):
155 if _filterunknown(repo.ui, repo.changelog, roots):
156 dirty = True
156 dirty = True
157 return roots, dirty
157 return roots, dirty
158
158
159 def writeroots(repo):
159 def writeroots(repo, phaseroots):
160 """Write phase roots from disk"""
160 """Write phase roots from disk"""
161 f = repo.sopener('phaseroots', 'w', atomictemp=True)
161 f = repo.sopener('phaseroots', 'w', atomictemp=True)
162 try:
162 try:
163 for phase, roots in enumerate(repo._phaseroots):
163 for phase, roots in enumerate(phaseroots):
164 for h in roots:
164 for h in roots:
165 f.write('%i %s\n' % (phase, hex(h)))
165 f.write('%i %s\n' % (phase, hex(h)))
166 repo._dirtyphases = False
167 finally:
166 finally:
168 f.close()
167 f.close()
169
168
170 def advanceboundary(repo, targetphase, nodes):
169 def advanceboundary(repo, targetphase, nodes):
171 """Add nodes to a phase changing other nodes phases if necessary.
170 """Add nodes to a phase changing other nodes phases if necessary.
172
171
173 This function move boundary *forward* this means that all nodes are set
172 This function move boundary *forward* this means that all nodes are set
174 in the target phase or kept in a *lower* phase.
173 in the target phase or kept in a *lower* phase.
175
174
176 Simplify boundary to contains phase roots only."""
175 Simplify boundary to contains phase roots only."""
177 delroots = [] # set of root deleted by this path
176 delroots = [] # set of root deleted by this path
178 for phase in xrange(targetphase + 1, len(allphases)):
177 for phase in xrange(targetphase + 1, len(allphases)):
179 # filter nodes that are not in a compatible phase already
178 # filter nodes that are not in a compatible phase already
180 # XXX rev phase cache might have been invalidated by a previous loop
179 # XXX rev phase cache might have been invalidated by a previous loop
181 # XXX we need to be smarter here
180 # XXX we need to be smarter here
182 nodes = [n for n in nodes if repo[n].phase() >= phase]
181 nodes = [n for n in nodes if repo[n].phase() >= phase]
183 if not nodes:
182 if not nodes:
184 break # no roots to move anymore
183 break # no roots to move anymore
185 roots = repo._phaseroots[phase]
184 roots = repo._phaseroots[phase]
186 olds = roots.copy()
185 olds = roots.copy()
187 ctxs = list(repo.set('roots((%ln::) - (%ln::%ln))', olds, olds, nodes))
186 ctxs = list(repo.set('roots((%ln::) - (%ln::%ln))', olds, olds, nodes))
188 roots.clear()
187 roots.clear()
189 roots.update(ctx.node() for ctx in ctxs)
188 roots.update(ctx.node() for ctx in ctxs)
190 if olds != roots:
189 if olds != roots:
191 # invalidate cache (we probably could be smarter here
190 # invalidate cache (we probably could be smarter here
192 if '_phaserev' in vars(repo):
191 if '_phaserev' in vars(repo):
193 del repo._phaserev
192 del repo._phaserev
194 repo._dirtyphases = True
193 repo._dirtyphases = True
195 # some roots may need to be declared for lower phases
194 # some roots may need to be declared for lower phases
196 delroots.extend(olds - roots)
195 delroots.extend(olds - roots)
197 # declare deleted root in the target phase
196 # declare deleted root in the target phase
198 if targetphase != 0:
197 if targetphase != 0:
199 retractboundary(repo, targetphase, delroots)
198 retractboundary(repo, targetphase, delroots)
200
199
201
200
202 def retractboundary(repo, targetphase, nodes):
201 def retractboundary(repo, targetphase, nodes):
203 """Set nodes back to a phase changing other nodes phases if necessary.
202 """Set nodes back to a phase changing other nodes phases if necessary.
204
203
205 This function move boundary *backward* this means that all nodes are set
204 This function move boundary *backward* this means that all nodes are set
206 in the target phase or kept in a *higher* phase.
205 in the target phase or kept in a *higher* phase.
207
206
208 Simplify boundary to contains phase roots only."""
207 Simplify boundary to contains phase roots only."""
209 currentroots = repo._phaseroots[targetphase]
208 currentroots = repo._phaseroots[targetphase]
210 newroots = [n for n in nodes if repo[n].phase() < targetphase]
209 newroots = [n for n in nodes if repo[n].phase() < targetphase]
211 if newroots:
210 if newroots:
212 currentroots.update(newroots)
211 currentroots.update(newroots)
213 ctxs = repo.set('roots(%ln::)', currentroots)
212 ctxs = repo.set('roots(%ln::)', currentroots)
214 currentroots.intersection_update(ctx.node() for ctx in ctxs)
213 currentroots.intersection_update(ctx.node() for ctx in ctxs)
215 if '_phaserev' in vars(repo):
214 if '_phaserev' in vars(repo):
216 del repo._phaserev
215 del repo._phaserev
217 repo._dirtyphases = True
216 repo._dirtyphases = True
218
217
219
218
220 def listphases(repo):
219 def listphases(repo):
221 """List phases root for serialisation over pushkey"""
220 """List phases root for serialisation over pushkey"""
222 keys = {}
221 keys = {}
223 value = '%i' % draft
222 value = '%i' % draft
224 for root in repo._phaseroots[draft]:
223 for root in repo._phaseroots[draft]:
225 keys[hex(root)] = value
224 keys[hex(root)] = value
226
225
227 if repo.ui.configbool('phases', 'publish', True):
226 if repo.ui.configbool('phases', 'publish', True):
228 # Add an extra data to let remote know we are a publishing repo.
227 # Add an extra data to let remote know we are a publishing repo.
229 # Publishing repo can't just pretend they are old repo. When pushing to
228 # Publishing repo can't just pretend they are old repo. When pushing to
230 # a publishing repo, the client still need to push phase boundary
229 # a publishing repo, the client still need to push phase boundary
231 #
230 #
232 # Push do not only push changeset. It also push phase data. New
231 # Push do not only push changeset. It also push phase data. New
233 # phase data may apply to common changeset which won't be push (as they
232 # phase data may apply to common changeset which won't be push (as they
234 # are common). Here is a very simple example:
233 # are common). Here is a very simple example:
235 #
234 #
236 # 1) repo A push changeset X as draft to repo B
235 # 1) repo A push changeset X as draft to repo B
237 # 2) repo B make changeset X public
236 # 2) repo B make changeset X public
238 # 3) repo B push to repo A. X is not pushed but the data that X as now
237 # 3) repo B push to repo A. X is not pushed but the data that X as now
239 # public should
238 # public should
240 #
239 #
241 # The server can't handle it on it's own as it has no idea of client
240 # The server can't handle it on it's own as it has no idea of client
242 # phase data.
241 # phase data.
243 keys['publishing'] = 'True'
242 keys['publishing'] = 'True'
244 return keys
243 return keys
245
244
246 def pushphase(repo, nhex, oldphasestr, newphasestr):
245 def pushphase(repo, nhex, oldphasestr, newphasestr):
247 """List phases root for serialisation over pushkey"""
246 """List phases root for serialisation over pushkey"""
248 lock = repo.lock()
247 lock = repo.lock()
249 try:
248 try:
250 currentphase = repo[nhex].phase()
249 currentphase = repo[nhex].phase()
251 newphase = abs(int(newphasestr)) # let's avoid negative index surprise
250 newphase = abs(int(newphasestr)) # let's avoid negative index surprise
252 oldphase = abs(int(oldphasestr)) # let's avoid negative index surprise
251 oldphase = abs(int(oldphasestr)) # let's avoid negative index surprise
253 if currentphase == oldphase and newphase < oldphase:
252 if currentphase == oldphase and newphase < oldphase:
254 advanceboundary(repo, newphase, [bin(nhex)])
253 advanceboundary(repo, newphase, [bin(nhex)])
255 return 1
254 return 1
256 elif currentphase == newphase:
255 elif currentphase == newphase:
257 # raced, but got correct result
256 # raced, but got correct result
258 return 1
257 return 1
259 else:
258 else:
260 return 0
259 return 0
261 finally:
260 finally:
262 lock.release()
261 lock.release()
263
262
264 def visibleheads(repo):
263 def visibleheads(repo):
265 """return the set of visible head of this repo"""
264 """return the set of visible head of this repo"""
266 # XXX we want a cache on this
265 # XXX we want a cache on this
267 sroots = repo._phaseroots[secret]
266 sroots = repo._phaseroots[secret]
268 if sroots:
267 if sroots:
269 # XXX very slow revset. storing heads or secret "boundary" would help.
268 # XXX very slow revset. storing heads or secret "boundary" would help.
270 revset = repo.set('heads(not (%ln::))', sroots)
269 revset = repo.set('heads(not (%ln::))', sroots)
271
270
272 vheads = [ctx.node() for ctx in revset]
271 vheads = [ctx.node() for ctx in revset]
273 if not vheads:
272 if not vheads:
274 vheads.append(nullid)
273 vheads.append(nullid)
275 else:
274 else:
276 vheads = repo.heads()
275 vheads = repo.heads()
277 return vheads
276 return vheads
278
277
279 def visiblebranchmap(repo):
278 def visiblebranchmap(repo):
280 """return a branchmap for the visible set"""
279 """return a branchmap for the visible set"""
281 # XXX Recomputing this data on the fly is very slow. We should build a
280 # XXX Recomputing this data on the fly is very slow. We should build a
282 # XXX cached version while computin the standard branchmap version.
281 # XXX cached version while computin the standard branchmap version.
283 sroots = repo._phaseroots[secret]
282 sroots = repo._phaseroots[secret]
284 if sroots:
283 if sroots:
285 vbranchmap = {}
284 vbranchmap = {}
286 for branch, nodes in repo.branchmap().iteritems():
285 for branch, nodes in repo.branchmap().iteritems():
287 # search for secret heads.
286 # search for secret heads.
288 for n in nodes:
287 for n in nodes:
289 if repo[n].phase() >= secret:
288 if repo[n].phase() >= secret:
290 nodes = None
289 nodes = None
291 break
290 break
292 # if secreat heads where found we must compute them again
291 # if secreat heads where found we must compute them again
293 if nodes is None:
292 if nodes is None:
294 s = repo.set('heads(branch(%s) - secret())', branch)
293 s = repo.set('heads(branch(%s) - secret())', branch)
295 nodes = [c.node() for c in s]
294 nodes = [c.node() for c in s]
296 vbranchmap[branch] = nodes
295 vbranchmap[branch] = nodes
297 else:
296 else:
298 vbranchmap = repo.branchmap()
297 vbranchmap = repo.branchmap()
299 return vbranchmap
298 return vbranchmap
300
299
301 def analyzeremotephases(repo, subset, roots):
300 def analyzeremotephases(repo, subset, roots):
302 """Compute phases heads and root in a subset of node from root dict
301 """Compute phases heads and root in a subset of node from root dict
303
302
304 * subset is heads of the subset
303 * subset is heads of the subset
305 * roots is {<nodeid> => phase} mapping. key and value are string.
304 * roots is {<nodeid> => phase} mapping. key and value are string.
306
305
307 Accept unknown element input
306 Accept unknown element input
308 """
307 """
309 # build list from dictionary
308 # build list from dictionary
310 draftroots = []
309 draftroots = []
311 nodemap = repo.changelog.nodemap # to filter unknown nodes
310 nodemap = repo.changelog.nodemap # to filter unknown nodes
312 for nhex, phase in roots.iteritems():
311 for nhex, phase in roots.iteritems():
313 if nhex == 'publishing': # ignore data related to publish option
312 if nhex == 'publishing': # ignore data related to publish option
314 continue
313 continue
315 node = bin(nhex)
314 node = bin(nhex)
316 phase = int(phase)
315 phase = int(phase)
317 if phase == 0:
316 if phase == 0:
318 if node != nullid:
317 if node != nullid:
319 repo.ui.warn(_('ignoring inconsistent public root'
318 repo.ui.warn(_('ignoring inconsistent public root'
320 ' from remote: %s\n') % nhex)
319 ' from remote: %s\n') % nhex)
321 elif phase == 1:
320 elif phase == 1:
322 if node in nodemap:
321 if node in nodemap:
323 draftroots.append(node)
322 draftroots.append(node)
324 else:
323 else:
325 repo.ui.warn(_('ignoring unexpected root from remote: %i %s\n')
324 repo.ui.warn(_('ignoring unexpected root from remote: %i %s\n')
326 % (phase, nhex))
325 % (phase, nhex))
327 # compute heads
326 # compute heads
328 publicheads = newheads(repo, subset, draftroots)
327 publicheads = newheads(repo, subset, draftroots)
329 return publicheads, draftroots
328 return publicheads, draftroots
330
329
331 def newheads(repo, heads, roots):
330 def newheads(repo, heads, roots):
332 """compute new head of a subset minus another
331 """compute new head of a subset minus another
333
332
334 * `heads`: define the first subset
333 * `heads`: define the first subset
335 * `rroots`: define the second we substract to the first"""
334 * `rroots`: define the second we substract to the first"""
336 revset = repo.set('heads((%ln + parents(%ln)) - (%ln::%ln))',
335 revset = repo.set('heads((%ln + parents(%ln)) - (%ln::%ln))',
337 heads, roots, roots, heads)
336 heads, roots, roots, heads)
338 return [c.node() for c in revset]
337 return [c.node() for c in revset]
339
338
340
339
341 def newcommitphase(ui):
340 def newcommitphase(ui):
342 """helper to get the target phase of new commit
341 """helper to get the target phase of new commit
343
342
344 Handle all possible values for the phases.new-commit options.
343 Handle all possible values for the phases.new-commit options.
345
344
346 """
345 """
347 v = ui.config('phases', 'new-commit', draft)
346 v = ui.config('phases', 'new-commit', draft)
348 try:
347 try:
349 return phasenames.index(v)
348 return phasenames.index(v)
350 except ValueError:
349 except ValueError:
351 try:
350 try:
352 return int(v)
351 return int(v)
353 except ValueError:
352 except ValueError:
354 msg = _("phases.new-commit: not a valid phase name ('%s')")
353 msg = _("phases.new-commit: not a valid phase name ('%s')")
355 raise error.ConfigError(msg % v)
354 raise error.ConfigError(msg % v)
356
355
General Comments 0
You need to be logged in to leave comments. Login now