##// END OF EJS Templates
phases: call filterunknown() in readroots()...
Patrick Mezard -
r16624:3f85cef6 default
parent child Browse files
Show More
@@ -1,2349 +1,2348 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import scmutil, util, extensions, hook, error, revset
13 import scmutil, util, extensions, hook, error, revset
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20 filecache = scmutil.filecache
20 filecache = scmutil.filecache
21
21
22 class storecache(filecache):
22 class storecache(filecache):
23 """filecache for files in the store"""
23 """filecache for files in the store"""
24 def join(self, obj, fname):
24 def join(self, obj, fname):
25 return obj.sjoin(fname)
25 return obj.sjoin(fname)
26
26
27 class localrepository(repo.repository):
27 class localrepository(repo.repository):
28 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
28 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
29 'known', 'getbundle'))
29 'known', 'getbundle'))
30 supportedformats = set(('revlogv1', 'generaldelta'))
30 supportedformats = set(('revlogv1', 'generaldelta'))
31 supported = supportedformats | set(('store', 'fncache', 'shared',
31 supported = supportedformats | set(('store', 'fncache', 'shared',
32 'dotencode'))
32 'dotencode'))
33
33
34 def __init__(self, baseui, path=None, create=False):
34 def __init__(self, baseui, path=None, create=False):
35 repo.repository.__init__(self)
35 repo.repository.__init__(self)
36 self.root = os.path.realpath(util.expandpath(path))
36 self.root = os.path.realpath(util.expandpath(path))
37 self.path = os.path.join(self.root, ".hg")
37 self.path = os.path.join(self.root, ".hg")
38 self.origroot = path
38 self.origroot = path
39 self.auditor = scmutil.pathauditor(self.root, self._checknested)
39 self.auditor = scmutil.pathauditor(self.root, self._checknested)
40 self.opener = scmutil.opener(self.path)
40 self.opener = scmutil.opener(self.path)
41 self.wopener = scmutil.opener(self.root)
41 self.wopener = scmutil.opener(self.root)
42 self.baseui = baseui
42 self.baseui = baseui
43 self.ui = baseui.copy()
43 self.ui = baseui.copy()
44 self._dirtyphases = False
44 self._dirtyphases = False
45 # A list of callback to shape the phase if no data were found.
45 # A list of callback to shape the phase if no data were found.
46 # Callback are in the form: func(repo, roots) --> processed root.
46 # Callback are in the form: func(repo, roots) --> processed root.
47 # This list it to be filled by extension during repo setup
47 # This list it to be filled by extension during repo setup
48 self._phasedefaults = []
48 self._phasedefaults = []
49
49
50 try:
50 try:
51 self.ui.readconfig(self.join("hgrc"), self.root)
51 self.ui.readconfig(self.join("hgrc"), self.root)
52 extensions.loadall(self.ui)
52 extensions.loadall(self.ui)
53 except IOError:
53 except IOError:
54 pass
54 pass
55
55
56 if not os.path.isdir(self.path):
56 if not os.path.isdir(self.path):
57 if create:
57 if create:
58 if not os.path.exists(path):
58 if not os.path.exists(path):
59 util.makedirs(path)
59 util.makedirs(path)
60 util.makedir(self.path, notindexed=True)
60 util.makedir(self.path, notindexed=True)
61 requirements = ["revlogv1"]
61 requirements = ["revlogv1"]
62 if self.ui.configbool('format', 'usestore', True):
62 if self.ui.configbool('format', 'usestore', True):
63 os.mkdir(os.path.join(self.path, "store"))
63 os.mkdir(os.path.join(self.path, "store"))
64 requirements.append("store")
64 requirements.append("store")
65 if self.ui.configbool('format', 'usefncache', True):
65 if self.ui.configbool('format', 'usefncache', True):
66 requirements.append("fncache")
66 requirements.append("fncache")
67 if self.ui.configbool('format', 'dotencode', True):
67 if self.ui.configbool('format', 'dotencode', True):
68 requirements.append('dotencode')
68 requirements.append('dotencode')
69 # create an invalid changelog
69 # create an invalid changelog
70 self.opener.append(
70 self.opener.append(
71 "00changelog.i",
71 "00changelog.i",
72 '\0\0\0\2' # represents revlogv2
72 '\0\0\0\2' # represents revlogv2
73 ' dummy changelog to prevent using the old repo layout'
73 ' dummy changelog to prevent using the old repo layout'
74 )
74 )
75 if self.ui.configbool('format', 'generaldelta', False):
75 if self.ui.configbool('format', 'generaldelta', False):
76 requirements.append("generaldelta")
76 requirements.append("generaldelta")
77 requirements = set(requirements)
77 requirements = set(requirements)
78 else:
78 else:
79 raise error.RepoError(_("repository %s not found") % path)
79 raise error.RepoError(_("repository %s not found") % path)
80 elif create:
80 elif create:
81 raise error.RepoError(_("repository %s already exists") % path)
81 raise error.RepoError(_("repository %s already exists") % path)
82 else:
82 else:
83 try:
83 try:
84 requirements = scmutil.readrequires(self.opener, self.supported)
84 requirements = scmutil.readrequires(self.opener, self.supported)
85 except IOError, inst:
85 except IOError, inst:
86 if inst.errno != errno.ENOENT:
86 if inst.errno != errno.ENOENT:
87 raise
87 raise
88 requirements = set()
88 requirements = set()
89
89
90 self.sharedpath = self.path
90 self.sharedpath = self.path
91 try:
91 try:
92 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
92 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
93 if not os.path.exists(s):
93 if not os.path.exists(s):
94 raise error.RepoError(
94 raise error.RepoError(
95 _('.hg/sharedpath points to nonexistent directory %s') % s)
95 _('.hg/sharedpath points to nonexistent directory %s') % s)
96 self.sharedpath = s
96 self.sharedpath = s
97 except IOError, inst:
97 except IOError, inst:
98 if inst.errno != errno.ENOENT:
98 if inst.errno != errno.ENOENT:
99 raise
99 raise
100
100
101 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
101 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
102 self.spath = self.store.path
102 self.spath = self.store.path
103 self.sopener = self.store.opener
103 self.sopener = self.store.opener
104 self.sjoin = self.store.join
104 self.sjoin = self.store.join
105 self.opener.createmode = self.store.createmode
105 self.opener.createmode = self.store.createmode
106 self._applyrequirements(requirements)
106 self._applyrequirements(requirements)
107 if create:
107 if create:
108 self._writerequirements()
108 self._writerequirements()
109
109
110
110
111 self._branchcache = None
111 self._branchcache = None
112 self._branchcachetip = None
112 self._branchcachetip = None
113 self.filterpats = {}
113 self.filterpats = {}
114 self._datafilters = {}
114 self._datafilters = {}
115 self._transref = self._lockref = self._wlockref = None
115 self._transref = self._lockref = self._wlockref = None
116
116
117 # A cache for various files under .hg/ that tracks file changes,
117 # A cache for various files under .hg/ that tracks file changes,
118 # (used by the filecache decorator)
118 # (used by the filecache decorator)
119 #
119 #
120 # Maps a property name to its util.filecacheentry
120 # Maps a property name to its util.filecacheentry
121 self._filecache = {}
121 self._filecache = {}
122
122
123 def _applyrequirements(self, requirements):
123 def _applyrequirements(self, requirements):
124 self.requirements = requirements
124 self.requirements = requirements
125 openerreqs = set(('revlogv1', 'generaldelta'))
125 openerreqs = set(('revlogv1', 'generaldelta'))
126 self.sopener.options = dict((r, 1) for r in requirements
126 self.sopener.options = dict((r, 1) for r in requirements
127 if r in openerreqs)
127 if r in openerreqs)
128
128
129 def _writerequirements(self):
129 def _writerequirements(self):
130 reqfile = self.opener("requires", "w")
130 reqfile = self.opener("requires", "w")
131 for r in self.requirements:
131 for r in self.requirements:
132 reqfile.write("%s\n" % r)
132 reqfile.write("%s\n" % r)
133 reqfile.close()
133 reqfile.close()
134
134
135 def _checknested(self, path):
135 def _checknested(self, path):
136 """Determine if path is a legal nested repository."""
136 """Determine if path is a legal nested repository."""
137 if not path.startswith(self.root):
137 if not path.startswith(self.root):
138 return False
138 return False
139 subpath = path[len(self.root) + 1:]
139 subpath = path[len(self.root) + 1:]
140 normsubpath = util.pconvert(subpath)
140 normsubpath = util.pconvert(subpath)
141
141
142 # XXX: Checking against the current working copy is wrong in
142 # XXX: Checking against the current working copy is wrong in
143 # the sense that it can reject things like
143 # the sense that it can reject things like
144 #
144 #
145 # $ hg cat -r 10 sub/x.txt
145 # $ hg cat -r 10 sub/x.txt
146 #
146 #
147 # if sub/ is no longer a subrepository in the working copy
147 # if sub/ is no longer a subrepository in the working copy
148 # parent revision.
148 # parent revision.
149 #
149 #
150 # However, it can of course also allow things that would have
150 # However, it can of course also allow things that would have
151 # been rejected before, such as the above cat command if sub/
151 # been rejected before, such as the above cat command if sub/
152 # is a subrepository now, but was a normal directory before.
152 # is a subrepository now, but was a normal directory before.
153 # The old path auditor would have rejected by mistake since it
153 # The old path auditor would have rejected by mistake since it
154 # panics when it sees sub/.hg/.
154 # panics when it sees sub/.hg/.
155 #
155 #
156 # All in all, checking against the working copy seems sensible
156 # All in all, checking against the working copy seems sensible
157 # since we want to prevent access to nested repositories on
157 # since we want to prevent access to nested repositories on
158 # the filesystem *now*.
158 # the filesystem *now*.
159 ctx = self[None]
159 ctx = self[None]
160 parts = util.splitpath(subpath)
160 parts = util.splitpath(subpath)
161 while parts:
161 while parts:
162 prefix = '/'.join(parts)
162 prefix = '/'.join(parts)
163 if prefix in ctx.substate:
163 if prefix in ctx.substate:
164 if prefix == normsubpath:
164 if prefix == normsubpath:
165 return True
165 return True
166 else:
166 else:
167 sub = ctx.sub(prefix)
167 sub = ctx.sub(prefix)
168 return sub.checknested(subpath[len(prefix) + 1:])
168 return sub.checknested(subpath[len(prefix) + 1:])
169 else:
169 else:
170 parts.pop()
170 parts.pop()
171 return False
171 return False
172
172
173 @filecache('bookmarks')
173 @filecache('bookmarks')
174 def _bookmarks(self):
174 def _bookmarks(self):
175 return bookmarks.read(self)
175 return bookmarks.read(self)
176
176
177 @filecache('bookmarks.current')
177 @filecache('bookmarks.current')
178 def _bookmarkcurrent(self):
178 def _bookmarkcurrent(self):
179 return bookmarks.readcurrent(self)
179 return bookmarks.readcurrent(self)
180
180
181 def _writebookmarks(self, marks):
181 def _writebookmarks(self, marks):
182 bookmarks.write(self)
182 bookmarks.write(self)
183
183
184 @storecache('phaseroots')
184 @storecache('phaseroots')
185 def _phaseroots(self):
185 def _phaseroots(self):
186 self._dirtyphases = False
186 self._dirtyphases = False
187 phaseroots = phases.readroots(self)
187 phaseroots = phases.readroots(self)
188 phases.filterunknown(self, phaseroots)
189 return phaseroots
188 return phaseroots
190
189
191 @propertycache
190 @propertycache
192 def _phaserev(self):
191 def _phaserev(self):
193 cache = [phases.public] * len(self)
192 cache = [phases.public] * len(self)
194 for phase in phases.trackedphases:
193 for phase in phases.trackedphases:
195 roots = map(self.changelog.rev, self._phaseroots[phase])
194 roots = map(self.changelog.rev, self._phaseroots[phase])
196 if roots:
195 if roots:
197 for rev in roots:
196 for rev in roots:
198 cache[rev] = phase
197 cache[rev] = phase
199 for rev in self.changelog.descendants(*roots):
198 for rev in self.changelog.descendants(*roots):
200 cache[rev] = phase
199 cache[rev] = phase
201 return cache
200 return cache
202
201
203 @storecache('00changelog.i')
202 @storecache('00changelog.i')
204 def changelog(self):
203 def changelog(self):
205 c = changelog.changelog(self.sopener)
204 c = changelog.changelog(self.sopener)
206 if 'HG_PENDING' in os.environ:
205 if 'HG_PENDING' in os.environ:
207 p = os.environ['HG_PENDING']
206 p = os.environ['HG_PENDING']
208 if p.startswith(self.root):
207 if p.startswith(self.root):
209 c.readpending('00changelog.i.a')
208 c.readpending('00changelog.i.a')
210 return c
209 return c
211
210
212 @storecache('00manifest.i')
211 @storecache('00manifest.i')
213 def manifest(self):
212 def manifest(self):
214 return manifest.manifest(self.sopener)
213 return manifest.manifest(self.sopener)
215
214
216 @filecache('dirstate')
215 @filecache('dirstate')
217 def dirstate(self):
216 def dirstate(self):
218 warned = [0]
217 warned = [0]
219 def validate(node):
218 def validate(node):
220 try:
219 try:
221 self.changelog.rev(node)
220 self.changelog.rev(node)
222 return node
221 return node
223 except error.LookupError:
222 except error.LookupError:
224 if not warned[0]:
223 if not warned[0]:
225 warned[0] = True
224 warned[0] = True
226 self.ui.warn(_("warning: ignoring unknown"
225 self.ui.warn(_("warning: ignoring unknown"
227 " working parent %s!\n") % short(node))
226 " working parent %s!\n") % short(node))
228 return nullid
227 return nullid
229
228
230 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
229 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
231
230
232 def __getitem__(self, changeid):
231 def __getitem__(self, changeid):
233 if changeid is None:
232 if changeid is None:
234 return context.workingctx(self)
233 return context.workingctx(self)
235 return context.changectx(self, changeid)
234 return context.changectx(self, changeid)
236
235
237 def __contains__(self, changeid):
236 def __contains__(self, changeid):
238 try:
237 try:
239 return bool(self.lookup(changeid))
238 return bool(self.lookup(changeid))
240 except error.RepoLookupError:
239 except error.RepoLookupError:
241 return False
240 return False
242
241
243 def __nonzero__(self):
242 def __nonzero__(self):
244 return True
243 return True
245
244
246 def __len__(self):
245 def __len__(self):
247 return len(self.changelog)
246 return len(self.changelog)
248
247
249 def __iter__(self):
248 def __iter__(self):
250 for i in xrange(len(self)):
249 for i in xrange(len(self)):
251 yield i
250 yield i
252
251
253 def revs(self, expr, *args):
252 def revs(self, expr, *args):
254 '''Return a list of revisions matching the given revset'''
253 '''Return a list of revisions matching the given revset'''
255 expr = revset.formatspec(expr, *args)
254 expr = revset.formatspec(expr, *args)
256 m = revset.match(None, expr)
255 m = revset.match(None, expr)
257 return [r for r in m(self, range(len(self)))]
256 return [r for r in m(self, range(len(self)))]
258
257
259 def set(self, expr, *args):
258 def set(self, expr, *args):
260 '''
259 '''
261 Yield a context for each matching revision, after doing arg
260 Yield a context for each matching revision, after doing arg
262 replacement via revset.formatspec
261 replacement via revset.formatspec
263 '''
262 '''
264 for r in self.revs(expr, *args):
263 for r in self.revs(expr, *args):
265 yield self[r]
264 yield self[r]
266
265
267 def url(self):
266 def url(self):
268 return 'file:' + self.root
267 return 'file:' + self.root
269
268
270 def hook(self, name, throw=False, **args):
269 def hook(self, name, throw=False, **args):
271 return hook.hook(self.ui, self, name, throw, **args)
270 return hook.hook(self.ui, self, name, throw, **args)
272
271
273 tag_disallowed = ':\r\n'
272 tag_disallowed = ':\r\n'
274
273
275 def _tag(self, names, node, message, local, user, date, extra={}):
274 def _tag(self, names, node, message, local, user, date, extra={}):
276 if isinstance(names, str):
275 if isinstance(names, str):
277 allchars = names
276 allchars = names
278 names = (names,)
277 names = (names,)
279 else:
278 else:
280 allchars = ''.join(names)
279 allchars = ''.join(names)
281 for c in self.tag_disallowed:
280 for c in self.tag_disallowed:
282 if c in allchars:
281 if c in allchars:
283 raise util.Abort(_('%r cannot be used in a tag name') % c)
282 raise util.Abort(_('%r cannot be used in a tag name') % c)
284
283
285 branches = self.branchmap()
284 branches = self.branchmap()
286 for name in names:
285 for name in names:
287 self.hook('pretag', throw=True, node=hex(node), tag=name,
286 self.hook('pretag', throw=True, node=hex(node), tag=name,
288 local=local)
287 local=local)
289 if name in branches:
288 if name in branches:
290 self.ui.warn(_("warning: tag %s conflicts with existing"
289 self.ui.warn(_("warning: tag %s conflicts with existing"
291 " branch name\n") % name)
290 " branch name\n") % name)
292
291
293 def writetags(fp, names, munge, prevtags):
292 def writetags(fp, names, munge, prevtags):
294 fp.seek(0, 2)
293 fp.seek(0, 2)
295 if prevtags and prevtags[-1] != '\n':
294 if prevtags and prevtags[-1] != '\n':
296 fp.write('\n')
295 fp.write('\n')
297 for name in names:
296 for name in names:
298 m = munge and munge(name) or name
297 m = munge and munge(name) or name
299 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
298 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
300 old = self.tags().get(name, nullid)
299 old = self.tags().get(name, nullid)
301 fp.write('%s %s\n' % (hex(old), m))
300 fp.write('%s %s\n' % (hex(old), m))
302 fp.write('%s %s\n' % (hex(node), m))
301 fp.write('%s %s\n' % (hex(node), m))
303 fp.close()
302 fp.close()
304
303
305 prevtags = ''
304 prevtags = ''
306 if local:
305 if local:
307 try:
306 try:
308 fp = self.opener('localtags', 'r+')
307 fp = self.opener('localtags', 'r+')
309 except IOError:
308 except IOError:
310 fp = self.opener('localtags', 'a')
309 fp = self.opener('localtags', 'a')
311 else:
310 else:
312 prevtags = fp.read()
311 prevtags = fp.read()
313
312
314 # local tags are stored in the current charset
313 # local tags are stored in the current charset
315 writetags(fp, names, None, prevtags)
314 writetags(fp, names, None, prevtags)
316 for name in names:
315 for name in names:
317 self.hook('tag', node=hex(node), tag=name, local=local)
316 self.hook('tag', node=hex(node), tag=name, local=local)
318 return
317 return
319
318
320 try:
319 try:
321 fp = self.wfile('.hgtags', 'rb+')
320 fp = self.wfile('.hgtags', 'rb+')
322 except IOError, e:
321 except IOError, e:
323 if e.errno != errno.ENOENT:
322 if e.errno != errno.ENOENT:
324 raise
323 raise
325 fp = self.wfile('.hgtags', 'ab')
324 fp = self.wfile('.hgtags', 'ab')
326 else:
325 else:
327 prevtags = fp.read()
326 prevtags = fp.read()
328
327
329 # committed tags are stored in UTF-8
328 # committed tags are stored in UTF-8
330 writetags(fp, names, encoding.fromlocal, prevtags)
329 writetags(fp, names, encoding.fromlocal, prevtags)
331
330
332 fp.close()
331 fp.close()
333
332
334 self.invalidatecaches()
333 self.invalidatecaches()
335
334
336 if '.hgtags' not in self.dirstate:
335 if '.hgtags' not in self.dirstate:
337 self[None].add(['.hgtags'])
336 self[None].add(['.hgtags'])
338
337
339 m = matchmod.exact(self.root, '', ['.hgtags'])
338 m = matchmod.exact(self.root, '', ['.hgtags'])
340 tagnode = self.commit(message, user, date, extra=extra, match=m)
339 tagnode = self.commit(message, user, date, extra=extra, match=m)
341
340
342 for name in names:
341 for name in names:
343 self.hook('tag', node=hex(node), tag=name, local=local)
342 self.hook('tag', node=hex(node), tag=name, local=local)
344
343
345 return tagnode
344 return tagnode
346
345
347 def tag(self, names, node, message, local, user, date):
346 def tag(self, names, node, message, local, user, date):
348 '''tag a revision with one or more symbolic names.
347 '''tag a revision with one or more symbolic names.
349
348
350 names is a list of strings or, when adding a single tag, names may be a
349 names is a list of strings or, when adding a single tag, names may be a
351 string.
350 string.
352
351
353 if local is True, the tags are stored in a per-repository file.
352 if local is True, the tags are stored in a per-repository file.
354 otherwise, they are stored in the .hgtags file, and a new
353 otherwise, they are stored in the .hgtags file, and a new
355 changeset is committed with the change.
354 changeset is committed with the change.
356
355
357 keyword arguments:
356 keyword arguments:
358
357
359 local: whether to store tags in non-version-controlled file
358 local: whether to store tags in non-version-controlled file
360 (default False)
359 (default False)
361
360
362 message: commit message to use if committing
361 message: commit message to use if committing
363
362
364 user: name of user to use if committing
363 user: name of user to use if committing
365
364
366 date: date tuple to use if committing'''
365 date: date tuple to use if committing'''
367
366
368 if not local:
367 if not local:
369 for x in self.status()[:5]:
368 for x in self.status()[:5]:
370 if '.hgtags' in x:
369 if '.hgtags' in x:
371 raise util.Abort(_('working copy of .hgtags is changed '
370 raise util.Abort(_('working copy of .hgtags is changed '
372 '(please commit .hgtags manually)'))
371 '(please commit .hgtags manually)'))
373
372
374 self.tags() # instantiate the cache
373 self.tags() # instantiate the cache
375 self._tag(names, node, message, local, user, date)
374 self._tag(names, node, message, local, user, date)
376
375
377 @propertycache
376 @propertycache
378 def _tagscache(self):
377 def _tagscache(self):
379 '''Returns a tagscache object that contains various tags related caches.'''
378 '''Returns a tagscache object that contains various tags related caches.'''
380
379
381 # This simplifies its cache management by having one decorated
380 # This simplifies its cache management by having one decorated
382 # function (this one) and the rest simply fetch things from it.
381 # function (this one) and the rest simply fetch things from it.
383 class tagscache(object):
382 class tagscache(object):
384 def __init__(self):
383 def __init__(self):
385 # These two define the set of tags for this repository. tags
384 # These two define the set of tags for this repository. tags
386 # maps tag name to node; tagtypes maps tag name to 'global' or
385 # maps tag name to node; tagtypes maps tag name to 'global' or
387 # 'local'. (Global tags are defined by .hgtags across all
386 # 'local'. (Global tags are defined by .hgtags across all
388 # heads, and local tags are defined in .hg/localtags.)
387 # heads, and local tags are defined in .hg/localtags.)
389 # They constitute the in-memory cache of tags.
388 # They constitute the in-memory cache of tags.
390 self.tags = self.tagtypes = None
389 self.tags = self.tagtypes = None
391
390
392 self.nodetagscache = self.tagslist = None
391 self.nodetagscache = self.tagslist = None
393
392
394 cache = tagscache()
393 cache = tagscache()
395 cache.tags, cache.tagtypes = self._findtags()
394 cache.tags, cache.tagtypes = self._findtags()
396
395
397 return cache
396 return cache
398
397
399 def tags(self):
398 def tags(self):
400 '''return a mapping of tag to node'''
399 '''return a mapping of tag to node'''
401 t = {}
400 t = {}
402 for k, v in self._tagscache.tags.iteritems():
401 for k, v in self._tagscache.tags.iteritems():
403 try:
402 try:
404 # ignore tags to unknown nodes
403 # ignore tags to unknown nodes
405 self.changelog.rev(v)
404 self.changelog.rev(v)
406 t[k] = v
405 t[k] = v
407 except error.LookupError:
406 except error.LookupError:
408 pass
407 pass
409 return t
408 return t
410
409
411 def _findtags(self):
410 def _findtags(self):
412 '''Do the hard work of finding tags. Return a pair of dicts
411 '''Do the hard work of finding tags. Return a pair of dicts
413 (tags, tagtypes) where tags maps tag name to node, and tagtypes
412 (tags, tagtypes) where tags maps tag name to node, and tagtypes
414 maps tag name to a string like \'global\' or \'local\'.
413 maps tag name to a string like \'global\' or \'local\'.
415 Subclasses or extensions are free to add their own tags, but
414 Subclasses or extensions are free to add their own tags, but
416 should be aware that the returned dicts will be retained for the
415 should be aware that the returned dicts will be retained for the
417 duration of the localrepo object.'''
416 duration of the localrepo object.'''
418
417
419 # XXX what tagtype should subclasses/extensions use? Currently
418 # XXX what tagtype should subclasses/extensions use? Currently
420 # mq and bookmarks add tags, but do not set the tagtype at all.
419 # mq and bookmarks add tags, but do not set the tagtype at all.
421 # Should each extension invent its own tag type? Should there
420 # Should each extension invent its own tag type? Should there
422 # be one tagtype for all such "virtual" tags? Or is the status
421 # be one tagtype for all such "virtual" tags? Or is the status
423 # quo fine?
422 # quo fine?
424
423
425 alltags = {} # map tag name to (node, hist)
424 alltags = {} # map tag name to (node, hist)
426 tagtypes = {}
425 tagtypes = {}
427
426
428 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
427 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
429 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
428 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
430
429
431 # Build the return dicts. Have to re-encode tag names because
430 # Build the return dicts. Have to re-encode tag names because
432 # the tags module always uses UTF-8 (in order not to lose info
431 # the tags module always uses UTF-8 (in order not to lose info
433 # writing to the cache), but the rest of Mercurial wants them in
432 # writing to the cache), but the rest of Mercurial wants them in
434 # local encoding.
433 # local encoding.
435 tags = {}
434 tags = {}
436 for (name, (node, hist)) in alltags.iteritems():
435 for (name, (node, hist)) in alltags.iteritems():
437 if node != nullid:
436 if node != nullid:
438 tags[encoding.tolocal(name)] = node
437 tags[encoding.tolocal(name)] = node
439 tags['tip'] = self.changelog.tip()
438 tags['tip'] = self.changelog.tip()
440 tagtypes = dict([(encoding.tolocal(name), value)
439 tagtypes = dict([(encoding.tolocal(name), value)
441 for (name, value) in tagtypes.iteritems()])
440 for (name, value) in tagtypes.iteritems()])
442 return (tags, tagtypes)
441 return (tags, tagtypes)
443
442
444 def tagtype(self, tagname):
443 def tagtype(self, tagname):
445 '''
444 '''
446 return the type of the given tag. result can be:
445 return the type of the given tag. result can be:
447
446
448 'local' : a local tag
447 'local' : a local tag
449 'global' : a global tag
448 'global' : a global tag
450 None : tag does not exist
449 None : tag does not exist
451 '''
450 '''
452
451
453 return self._tagscache.tagtypes.get(tagname)
452 return self._tagscache.tagtypes.get(tagname)
454
453
455 def tagslist(self):
454 def tagslist(self):
456 '''return a list of tags ordered by revision'''
455 '''return a list of tags ordered by revision'''
457 if not self._tagscache.tagslist:
456 if not self._tagscache.tagslist:
458 l = []
457 l = []
459 for t, n in self.tags().iteritems():
458 for t, n in self.tags().iteritems():
460 r = self.changelog.rev(n)
459 r = self.changelog.rev(n)
461 l.append((r, t, n))
460 l.append((r, t, n))
462 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
461 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
463
462
464 return self._tagscache.tagslist
463 return self._tagscache.tagslist
465
464
466 def nodetags(self, node):
465 def nodetags(self, node):
467 '''return the tags associated with a node'''
466 '''return the tags associated with a node'''
468 if not self._tagscache.nodetagscache:
467 if not self._tagscache.nodetagscache:
469 nodetagscache = {}
468 nodetagscache = {}
470 for t, n in self._tagscache.tags.iteritems():
469 for t, n in self._tagscache.tags.iteritems():
471 nodetagscache.setdefault(n, []).append(t)
470 nodetagscache.setdefault(n, []).append(t)
472 for tags in nodetagscache.itervalues():
471 for tags in nodetagscache.itervalues():
473 tags.sort()
472 tags.sort()
474 self._tagscache.nodetagscache = nodetagscache
473 self._tagscache.nodetagscache = nodetagscache
475 return self._tagscache.nodetagscache.get(node, [])
474 return self._tagscache.nodetagscache.get(node, [])
476
475
477 def nodebookmarks(self, node):
476 def nodebookmarks(self, node):
478 marks = []
477 marks = []
479 for bookmark, n in self._bookmarks.iteritems():
478 for bookmark, n in self._bookmarks.iteritems():
480 if n == node:
479 if n == node:
481 marks.append(bookmark)
480 marks.append(bookmark)
482 return sorted(marks)
481 return sorted(marks)
483
482
484 def _branchtags(self, partial, lrev):
483 def _branchtags(self, partial, lrev):
485 # TODO: rename this function?
484 # TODO: rename this function?
486 tiprev = len(self) - 1
485 tiprev = len(self) - 1
487 if lrev != tiprev:
486 if lrev != tiprev:
488 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
487 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
489 self._updatebranchcache(partial, ctxgen)
488 self._updatebranchcache(partial, ctxgen)
490 self._writebranchcache(partial, self.changelog.tip(), tiprev)
489 self._writebranchcache(partial, self.changelog.tip(), tiprev)
491
490
492 return partial
491 return partial
493
492
494 def updatebranchcache(self):
493 def updatebranchcache(self):
495 tip = self.changelog.tip()
494 tip = self.changelog.tip()
496 if self._branchcache is not None and self._branchcachetip == tip:
495 if self._branchcache is not None and self._branchcachetip == tip:
497 return
496 return
498
497
499 oldtip = self._branchcachetip
498 oldtip = self._branchcachetip
500 self._branchcachetip = tip
499 self._branchcachetip = tip
501 if oldtip is None or oldtip not in self.changelog.nodemap:
500 if oldtip is None or oldtip not in self.changelog.nodemap:
502 partial, last, lrev = self._readbranchcache()
501 partial, last, lrev = self._readbranchcache()
503 else:
502 else:
504 lrev = self.changelog.rev(oldtip)
503 lrev = self.changelog.rev(oldtip)
505 partial = self._branchcache
504 partial = self._branchcache
506
505
507 self._branchtags(partial, lrev)
506 self._branchtags(partial, lrev)
508 # this private cache holds all heads (not just tips)
507 # this private cache holds all heads (not just tips)
509 self._branchcache = partial
508 self._branchcache = partial
510
509
511 def branchmap(self):
510 def branchmap(self):
512 '''returns a dictionary {branch: [branchheads]}'''
511 '''returns a dictionary {branch: [branchheads]}'''
513 self.updatebranchcache()
512 self.updatebranchcache()
514 return self._branchcache
513 return self._branchcache
515
514
516 def branchtags(self):
515 def branchtags(self):
517 '''return a dict where branch names map to the tipmost head of
516 '''return a dict where branch names map to the tipmost head of
518 the branch, open heads come before closed'''
517 the branch, open heads come before closed'''
519 bt = {}
518 bt = {}
520 for bn, heads in self.branchmap().iteritems():
519 for bn, heads in self.branchmap().iteritems():
521 tip = heads[-1]
520 tip = heads[-1]
522 for h in reversed(heads):
521 for h in reversed(heads):
523 if 'close' not in self.changelog.read(h)[5]:
522 if 'close' not in self.changelog.read(h)[5]:
524 tip = h
523 tip = h
525 break
524 break
526 bt[bn] = tip
525 bt[bn] = tip
527 return bt
526 return bt
528
527
529 def _readbranchcache(self):
528 def _readbranchcache(self):
530 partial = {}
529 partial = {}
531 try:
530 try:
532 f = self.opener("cache/branchheads")
531 f = self.opener("cache/branchheads")
533 lines = f.read().split('\n')
532 lines = f.read().split('\n')
534 f.close()
533 f.close()
535 except (IOError, OSError):
534 except (IOError, OSError):
536 return {}, nullid, nullrev
535 return {}, nullid, nullrev
537
536
538 try:
537 try:
539 last, lrev = lines.pop(0).split(" ", 1)
538 last, lrev = lines.pop(0).split(" ", 1)
540 last, lrev = bin(last), int(lrev)
539 last, lrev = bin(last), int(lrev)
541 if lrev >= len(self) or self[lrev].node() != last:
540 if lrev >= len(self) or self[lrev].node() != last:
542 # invalidate the cache
541 # invalidate the cache
543 raise ValueError('invalidating branch cache (tip differs)')
542 raise ValueError('invalidating branch cache (tip differs)')
544 for l in lines:
543 for l in lines:
545 if not l:
544 if not l:
546 continue
545 continue
547 node, label = l.split(" ", 1)
546 node, label = l.split(" ", 1)
548 label = encoding.tolocal(label.strip())
547 label = encoding.tolocal(label.strip())
549 partial.setdefault(label, []).append(bin(node))
548 partial.setdefault(label, []).append(bin(node))
550 except KeyboardInterrupt:
549 except KeyboardInterrupt:
551 raise
550 raise
552 except Exception, inst:
551 except Exception, inst:
553 if self.ui.debugflag:
552 if self.ui.debugflag:
554 self.ui.warn(str(inst), '\n')
553 self.ui.warn(str(inst), '\n')
555 partial, last, lrev = {}, nullid, nullrev
554 partial, last, lrev = {}, nullid, nullrev
556 return partial, last, lrev
555 return partial, last, lrev
557
556
558 def _writebranchcache(self, branches, tip, tiprev):
557 def _writebranchcache(self, branches, tip, tiprev):
559 try:
558 try:
560 f = self.opener("cache/branchheads", "w", atomictemp=True)
559 f = self.opener("cache/branchheads", "w", atomictemp=True)
561 f.write("%s %s\n" % (hex(tip), tiprev))
560 f.write("%s %s\n" % (hex(tip), tiprev))
562 for label, nodes in branches.iteritems():
561 for label, nodes in branches.iteritems():
563 for node in nodes:
562 for node in nodes:
564 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
563 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
565 f.close()
564 f.close()
566 except (IOError, OSError):
565 except (IOError, OSError):
567 pass
566 pass
568
567
569 def _updatebranchcache(self, partial, ctxgen):
568 def _updatebranchcache(self, partial, ctxgen):
570 # collect new branch entries
569 # collect new branch entries
571 newbranches = {}
570 newbranches = {}
572 for c in ctxgen:
571 for c in ctxgen:
573 newbranches.setdefault(c.branch(), []).append(c.node())
572 newbranches.setdefault(c.branch(), []).append(c.node())
574 # if older branchheads are reachable from new ones, they aren't
573 # if older branchheads are reachable from new ones, they aren't
575 # really branchheads. Note checking parents is insufficient:
574 # really branchheads. Note checking parents is insufficient:
576 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
575 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
577 for branch, newnodes in newbranches.iteritems():
576 for branch, newnodes in newbranches.iteritems():
578 bheads = partial.setdefault(branch, [])
577 bheads = partial.setdefault(branch, [])
579 bheads.extend(newnodes)
578 bheads.extend(newnodes)
580 if len(bheads) <= 1:
579 if len(bheads) <= 1:
581 continue
580 continue
582 bheads = sorted(bheads, key=lambda x: self[x].rev())
581 bheads = sorted(bheads, key=lambda x: self[x].rev())
583 # starting from tip means fewer passes over reachable
582 # starting from tip means fewer passes over reachable
584 while newnodes:
583 while newnodes:
585 latest = newnodes.pop()
584 latest = newnodes.pop()
586 if latest not in bheads:
585 if latest not in bheads:
587 continue
586 continue
588 minbhrev = self[bheads[0]].node()
587 minbhrev = self[bheads[0]].node()
589 reachable = self.changelog.reachable(latest, minbhrev)
588 reachable = self.changelog.reachable(latest, minbhrev)
590 reachable.remove(latest)
589 reachable.remove(latest)
591 if reachable:
590 if reachable:
592 bheads = [b for b in bheads if b not in reachable]
591 bheads = [b for b in bheads if b not in reachable]
593 partial[branch] = bheads
592 partial[branch] = bheads
594
593
595 def lookup(self, key):
594 def lookup(self, key):
596 return self[key].node()
595 return self[key].node()
597
596
598 def lookupbranch(self, key, remote=None):
597 def lookupbranch(self, key, remote=None):
599 repo = remote or self
598 repo = remote or self
600 if key in repo.branchmap():
599 if key in repo.branchmap():
601 return key
600 return key
602
601
603 repo = (remote and remote.local()) and remote or self
602 repo = (remote and remote.local()) and remote or self
604 return repo[key].branch()
603 return repo[key].branch()
605
604
606 def known(self, nodes):
605 def known(self, nodes):
607 nm = self.changelog.nodemap
606 nm = self.changelog.nodemap
608 result = []
607 result = []
609 for n in nodes:
608 for n in nodes:
610 r = nm.get(n)
609 r = nm.get(n)
611 resp = not (r is None or self._phaserev[r] >= phases.secret)
610 resp = not (r is None or self._phaserev[r] >= phases.secret)
612 result.append(resp)
611 result.append(resp)
613 return result
612 return result
614
613
615 def local(self):
614 def local(self):
616 return self
615 return self
617
616
618 def join(self, f):
617 def join(self, f):
619 return os.path.join(self.path, f)
618 return os.path.join(self.path, f)
620
619
621 def wjoin(self, f):
620 def wjoin(self, f):
622 return os.path.join(self.root, f)
621 return os.path.join(self.root, f)
623
622
624 def file(self, f):
623 def file(self, f):
625 if f[0] == '/':
624 if f[0] == '/':
626 f = f[1:]
625 f = f[1:]
627 return filelog.filelog(self.sopener, f)
626 return filelog.filelog(self.sopener, f)
628
627
629 def changectx(self, changeid):
628 def changectx(self, changeid):
630 return self[changeid]
629 return self[changeid]
631
630
632 def parents(self, changeid=None):
631 def parents(self, changeid=None):
633 '''get list of changectxs for parents of changeid'''
632 '''get list of changectxs for parents of changeid'''
634 return self[changeid].parents()
633 return self[changeid].parents()
635
634
636 def setparents(self, p1, p2=nullid):
635 def setparents(self, p1, p2=nullid):
637 copies = self.dirstate.setparents(p1, p2)
636 copies = self.dirstate.setparents(p1, p2)
638 if copies:
637 if copies:
639 # Adjust copy records, the dirstate cannot do it, it
638 # Adjust copy records, the dirstate cannot do it, it
640 # requires access to parents manifests. Preserve them
639 # requires access to parents manifests. Preserve them
641 # only for entries added to first parent.
640 # only for entries added to first parent.
642 pctx = self[p1]
641 pctx = self[p1]
643 for f in copies:
642 for f in copies:
644 if f not in pctx and copies[f] in pctx:
643 if f not in pctx and copies[f] in pctx:
645 self.dirstate.copy(copies[f], f)
644 self.dirstate.copy(copies[f], f)
646
645
647 def filectx(self, path, changeid=None, fileid=None):
646 def filectx(self, path, changeid=None, fileid=None):
648 """changeid can be a changeset revision, node, or tag.
647 """changeid can be a changeset revision, node, or tag.
649 fileid can be a file revision or node."""
648 fileid can be a file revision or node."""
650 return context.filectx(self, path, changeid, fileid)
649 return context.filectx(self, path, changeid, fileid)
651
650
652 def getcwd(self):
651 def getcwd(self):
653 return self.dirstate.getcwd()
652 return self.dirstate.getcwd()
654
653
655 def pathto(self, f, cwd=None):
654 def pathto(self, f, cwd=None):
656 return self.dirstate.pathto(f, cwd)
655 return self.dirstate.pathto(f, cwd)
657
656
658 def wfile(self, f, mode='r'):
657 def wfile(self, f, mode='r'):
659 return self.wopener(f, mode)
658 return self.wopener(f, mode)
660
659
661 def _link(self, f):
660 def _link(self, f):
662 return os.path.islink(self.wjoin(f))
661 return os.path.islink(self.wjoin(f))
663
662
664 def _loadfilter(self, filter):
663 def _loadfilter(self, filter):
665 if filter not in self.filterpats:
664 if filter not in self.filterpats:
666 l = []
665 l = []
667 for pat, cmd in self.ui.configitems(filter):
666 for pat, cmd in self.ui.configitems(filter):
668 if cmd == '!':
667 if cmd == '!':
669 continue
668 continue
670 mf = matchmod.match(self.root, '', [pat])
669 mf = matchmod.match(self.root, '', [pat])
671 fn = None
670 fn = None
672 params = cmd
671 params = cmd
673 for name, filterfn in self._datafilters.iteritems():
672 for name, filterfn in self._datafilters.iteritems():
674 if cmd.startswith(name):
673 if cmd.startswith(name):
675 fn = filterfn
674 fn = filterfn
676 params = cmd[len(name):].lstrip()
675 params = cmd[len(name):].lstrip()
677 break
676 break
678 if not fn:
677 if not fn:
679 fn = lambda s, c, **kwargs: util.filter(s, c)
678 fn = lambda s, c, **kwargs: util.filter(s, c)
680 # Wrap old filters not supporting keyword arguments
679 # Wrap old filters not supporting keyword arguments
681 if not inspect.getargspec(fn)[2]:
680 if not inspect.getargspec(fn)[2]:
682 oldfn = fn
681 oldfn = fn
683 fn = lambda s, c, **kwargs: oldfn(s, c)
682 fn = lambda s, c, **kwargs: oldfn(s, c)
684 l.append((mf, fn, params))
683 l.append((mf, fn, params))
685 self.filterpats[filter] = l
684 self.filterpats[filter] = l
686 return self.filterpats[filter]
685 return self.filterpats[filter]
687
686
688 def _filter(self, filterpats, filename, data):
687 def _filter(self, filterpats, filename, data):
689 for mf, fn, cmd in filterpats:
688 for mf, fn, cmd in filterpats:
690 if mf(filename):
689 if mf(filename):
691 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
690 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
692 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
691 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
693 break
692 break
694
693
695 return data
694 return data
696
695
697 @propertycache
696 @propertycache
698 def _encodefilterpats(self):
697 def _encodefilterpats(self):
699 return self._loadfilter('encode')
698 return self._loadfilter('encode')
700
699
701 @propertycache
700 @propertycache
702 def _decodefilterpats(self):
701 def _decodefilterpats(self):
703 return self._loadfilter('decode')
702 return self._loadfilter('decode')
704
703
705 def adddatafilter(self, name, filter):
704 def adddatafilter(self, name, filter):
706 self._datafilters[name] = filter
705 self._datafilters[name] = filter
707
706
708 def wread(self, filename):
707 def wread(self, filename):
709 if self._link(filename):
708 if self._link(filename):
710 data = os.readlink(self.wjoin(filename))
709 data = os.readlink(self.wjoin(filename))
711 else:
710 else:
712 data = self.wopener.read(filename)
711 data = self.wopener.read(filename)
713 return self._filter(self._encodefilterpats, filename, data)
712 return self._filter(self._encodefilterpats, filename, data)
714
713
715 def wwrite(self, filename, data, flags):
714 def wwrite(self, filename, data, flags):
716 data = self._filter(self._decodefilterpats, filename, data)
715 data = self._filter(self._decodefilterpats, filename, data)
717 if 'l' in flags:
716 if 'l' in flags:
718 self.wopener.symlink(data, filename)
717 self.wopener.symlink(data, filename)
719 else:
718 else:
720 self.wopener.write(filename, data)
719 self.wopener.write(filename, data)
721 if 'x' in flags:
720 if 'x' in flags:
722 util.setflags(self.wjoin(filename), False, True)
721 util.setflags(self.wjoin(filename), False, True)
723
722
724 def wwritedata(self, filename, data):
723 def wwritedata(self, filename, data):
725 return self._filter(self._decodefilterpats, filename, data)
724 return self._filter(self._decodefilterpats, filename, data)
726
725
727 def transaction(self, desc):
726 def transaction(self, desc):
728 tr = self._transref and self._transref() or None
727 tr = self._transref and self._transref() or None
729 if tr and tr.running():
728 if tr and tr.running():
730 return tr.nest()
729 return tr.nest()
731
730
732 # abort here if the journal already exists
731 # abort here if the journal already exists
733 if os.path.exists(self.sjoin("journal")):
732 if os.path.exists(self.sjoin("journal")):
734 raise error.RepoError(
733 raise error.RepoError(
735 _("abandoned transaction found - run hg recover"))
734 _("abandoned transaction found - run hg recover"))
736
735
737 self._writejournal(desc)
736 self._writejournal(desc)
738 renames = [(x, undoname(x)) for x in self._journalfiles()]
737 renames = [(x, undoname(x)) for x in self._journalfiles()]
739
738
740 tr = transaction.transaction(self.ui.warn, self.sopener,
739 tr = transaction.transaction(self.ui.warn, self.sopener,
741 self.sjoin("journal"),
740 self.sjoin("journal"),
742 aftertrans(renames),
741 aftertrans(renames),
743 self.store.createmode)
742 self.store.createmode)
744 self._transref = weakref.ref(tr)
743 self._transref = weakref.ref(tr)
745 return tr
744 return tr
746
745
747 def _journalfiles(self):
746 def _journalfiles(self):
748 return (self.sjoin('journal'), self.join('journal.dirstate'),
747 return (self.sjoin('journal'), self.join('journal.dirstate'),
749 self.join('journal.branch'), self.join('journal.desc'),
748 self.join('journal.branch'), self.join('journal.desc'),
750 self.join('journal.bookmarks'),
749 self.join('journal.bookmarks'),
751 self.sjoin('journal.phaseroots'))
750 self.sjoin('journal.phaseroots'))
752
751
753 def undofiles(self):
752 def undofiles(self):
754 return [undoname(x) for x in self._journalfiles()]
753 return [undoname(x) for x in self._journalfiles()]
755
754
756 def _writejournal(self, desc):
755 def _writejournal(self, desc):
757 self.opener.write("journal.dirstate",
756 self.opener.write("journal.dirstate",
758 self.opener.tryread("dirstate"))
757 self.opener.tryread("dirstate"))
759 self.opener.write("journal.branch",
758 self.opener.write("journal.branch",
760 encoding.fromlocal(self.dirstate.branch()))
759 encoding.fromlocal(self.dirstate.branch()))
761 self.opener.write("journal.desc",
760 self.opener.write("journal.desc",
762 "%d\n%s\n" % (len(self), desc))
761 "%d\n%s\n" % (len(self), desc))
763 self.opener.write("journal.bookmarks",
762 self.opener.write("journal.bookmarks",
764 self.opener.tryread("bookmarks"))
763 self.opener.tryread("bookmarks"))
765 self.sopener.write("journal.phaseroots",
764 self.sopener.write("journal.phaseroots",
766 self.sopener.tryread("phaseroots"))
765 self.sopener.tryread("phaseroots"))
767
766
768 def recover(self):
767 def recover(self):
769 lock = self.lock()
768 lock = self.lock()
770 try:
769 try:
771 if os.path.exists(self.sjoin("journal")):
770 if os.path.exists(self.sjoin("journal")):
772 self.ui.status(_("rolling back interrupted transaction\n"))
771 self.ui.status(_("rolling back interrupted transaction\n"))
773 transaction.rollback(self.sopener, self.sjoin("journal"),
772 transaction.rollback(self.sopener, self.sjoin("journal"),
774 self.ui.warn)
773 self.ui.warn)
775 self.invalidate()
774 self.invalidate()
776 return True
775 return True
777 else:
776 else:
778 self.ui.warn(_("no interrupted transaction available\n"))
777 self.ui.warn(_("no interrupted transaction available\n"))
779 return False
778 return False
780 finally:
779 finally:
781 lock.release()
780 lock.release()
782
781
783 def rollback(self, dryrun=False, force=False):
782 def rollback(self, dryrun=False, force=False):
784 wlock = lock = None
783 wlock = lock = None
785 try:
784 try:
786 wlock = self.wlock()
785 wlock = self.wlock()
787 lock = self.lock()
786 lock = self.lock()
788 if os.path.exists(self.sjoin("undo")):
787 if os.path.exists(self.sjoin("undo")):
789 return self._rollback(dryrun, force)
788 return self._rollback(dryrun, force)
790 else:
789 else:
791 self.ui.warn(_("no rollback information available\n"))
790 self.ui.warn(_("no rollback information available\n"))
792 return 1
791 return 1
793 finally:
792 finally:
794 release(lock, wlock)
793 release(lock, wlock)
795
794
796 def _rollback(self, dryrun, force):
795 def _rollback(self, dryrun, force):
797 ui = self.ui
796 ui = self.ui
798 try:
797 try:
799 args = self.opener.read('undo.desc').splitlines()
798 args = self.opener.read('undo.desc').splitlines()
800 (oldlen, desc, detail) = (int(args[0]), args[1], None)
799 (oldlen, desc, detail) = (int(args[0]), args[1], None)
801 if len(args) >= 3:
800 if len(args) >= 3:
802 detail = args[2]
801 detail = args[2]
803 oldtip = oldlen - 1
802 oldtip = oldlen - 1
804
803
805 if detail and ui.verbose:
804 if detail and ui.verbose:
806 msg = (_('repository tip rolled back to revision %s'
805 msg = (_('repository tip rolled back to revision %s'
807 ' (undo %s: %s)\n')
806 ' (undo %s: %s)\n')
808 % (oldtip, desc, detail))
807 % (oldtip, desc, detail))
809 else:
808 else:
810 msg = (_('repository tip rolled back to revision %s'
809 msg = (_('repository tip rolled back to revision %s'
811 ' (undo %s)\n')
810 ' (undo %s)\n')
812 % (oldtip, desc))
811 % (oldtip, desc))
813 except IOError:
812 except IOError:
814 msg = _('rolling back unknown transaction\n')
813 msg = _('rolling back unknown transaction\n')
815 desc = None
814 desc = None
816
815
817 if not force and self['.'] != self['tip'] and desc == 'commit':
816 if not force and self['.'] != self['tip'] and desc == 'commit':
818 raise util.Abort(
817 raise util.Abort(
819 _('rollback of last commit while not checked out '
818 _('rollback of last commit while not checked out '
820 'may lose data'), hint=_('use -f to force'))
819 'may lose data'), hint=_('use -f to force'))
821
820
822 ui.status(msg)
821 ui.status(msg)
823 if dryrun:
822 if dryrun:
824 return 0
823 return 0
825
824
826 parents = self.dirstate.parents()
825 parents = self.dirstate.parents()
827 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
826 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
828 if os.path.exists(self.join('undo.bookmarks')):
827 if os.path.exists(self.join('undo.bookmarks')):
829 util.rename(self.join('undo.bookmarks'),
828 util.rename(self.join('undo.bookmarks'),
830 self.join('bookmarks'))
829 self.join('bookmarks'))
831 if os.path.exists(self.sjoin('undo.phaseroots')):
830 if os.path.exists(self.sjoin('undo.phaseroots')):
832 util.rename(self.sjoin('undo.phaseroots'),
831 util.rename(self.sjoin('undo.phaseroots'),
833 self.sjoin('phaseroots'))
832 self.sjoin('phaseroots'))
834 self.invalidate()
833 self.invalidate()
835
834
836 parentgone = (parents[0] not in self.changelog.nodemap or
835 parentgone = (parents[0] not in self.changelog.nodemap or
837 parents[1] not in self.changelog.nodemap)
836 parents[1] not in self.changelog.nodemap)
838 if parentgone:
837 if parentgone:
839 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
838 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
840 try:
839 try:
841 branch = self.opener.read('undo.branch')
840 branch = self.opener.read('undo.branch')
842 self.dirstate.setbranch(branch)
841 self.dirstate.setbranch(branch)
843 except IOError:
842 except IOError:
844 ui.warn(_('named branch could not be reset: '
843 ui.warn(_('named branch could not be reset: '
845 'current branch is still \'%s\'\n')
844 'current branch is still \'%s\'\n')
846 % self.dirstate.branch())
845 % self.dirstate.branch())
847
846
848 self.dirstate.invalidate()
847 self.dirstate.invalidate()
849 parents = tuple([p.rev() for p in self.parents()])
848 parents = tuple([p.rev() for p in self.parents()])
850 if len(parents) > 1:
849 if len(parents) > 1:
851 ui.status(_('working directory now based on '
850 ui.status(_('working directory now based on '
852 'revisions %d and %d\n') % parents)
851 'revisions %d and %d\n') % parents)
853 else:
852 else:
854 ui.status(_('working directory now based on '
853 ui.status(_('working directory now based on '
855 'revision %d\n') % parents)
854 'revision %d\n') % parents)
856 self.destroyed()
855 self.destroyed()
857 return 0
856 return 0
858
857
859 def invalidatecaches(self):
858 def invalidatecaches(self):
860 def delcache(name):
859 def delcache(name):
861 try:
860 try:
862 delattr(self, name)
861 delattr(self, name)
863 except AttributeError:
862 except AttributeError:
864 pass
863 pass
865
864
866 delcache('_tagscache')
865 delcache('_tagscache')
867 delcache('_phaserev')
866 delcache('_phaserev')
868
867
869 self._branchcache = None # in UTF-8
868 self._branchcache = None # in UTF-8
870 self._branchcachetip = None
869 self._branchcachetip = None
871
870
872 def invalidatedirstate(self):
871 def invalidatedirstate(self):
873 '''Invalidates the dirstate, causing the next call to dirstate
872 '''Invalidates the dirstate, causing the next call to dirstate
874 to check if it was modified since the last time it was read,
873 to check if it was modified since the last time it was read,
875 rereading it if it has.
874 rereading it if it has.
876
875
877 This is different to dirstate.invalidate() that it doesn't always
876 This is different to dirstate.invalidate() that it doesn't always
878 rereads the dirstate. Use dirstate.invalidate() if you want to
877 rereads the dirstate. Use dirstate.invalidate() if you want to
879 explicitly read the dirstate again (i.e. restoring it to a previous
878 explicitly read the dirstate again (i.e. restoring it to a previous
880 known good state).'''
879 known good state).'''
881 if 'dirstate' in self.__dict__:
880 if 'dirstate' in self.__dict__:
882 for k in self.dirstate._filecache:
881 for k in self.dirstate._filecache:
883 try:
882 try:
884 delattr(self.dirstate, k)
883 delattr(self.dirstate, k)
885 except AttributeError:
884 except AttributeError:
886 pass
885 pass
887 delattr(self, 'dirstate')
886 delattr(self, 'dirstate')
888
887
889 def invalidate(self):
888 def invalidate(self):
890 for k in self._filecache:
889 for k in self._filecache:
891 # dirstate is invalidated separately in invalidatedirstate()
890 # dirstate is invalidated separately in invalidatedirstate()
892 if k == 'dirstate':
891 if k == 'dirstate':
893 continue
892 continue
894
893
895 try:
894 try:
896 delattr(self, k)
895 delattr(self, k)
897 except AttributeError:
896 except AttributeError:
898 pass
897 pass
899 self.invalidatecaches()
898 self.invalidatecaches()
900
899
901 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
900 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
902 try:
901 try:
903 l = lock.lock(lockname, 0, releasefn, desc=desc)
902 l = lock.lock(lockname, 0, releasefn, desc=desc)
904 except error.LockHeld, inst:
903 except error.LockHeld, inst:
905 if not wait:
904 if not wait:
906 raise
905 raise
907 self.ui.warn(_("waiting for lock on %s held by %r\n") %
906 self.ui.warn(_("waiting for lock on %s held by %r\n") %
908 (desc, inst.locker))
907 (desc, inst.locker))
909 # default to 600 seconds timeout
908 # default to 600 seconds timeout
910 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
909 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
911 releasefn, desc=desc)
910 releasefn, desc=desc)
912 if acquirefn:
911 if acquirefn:
913 acquirefn()
912 acquirefn()
914 return l
913 return l
915
914
916 def _afterlock(self, callback):
915 def _afterlock(self, callback):
917 """add a callback to the current repository lock.
916 """add a callback to the current repository lock.
918
917
919 The callback will be executed on lock release."""
918 The callback will be executed on lock release."""
920 l = self._lockref and self._lockref()
919 l = self._lockref and self._lockref()
921 if l:
920 if l:
922 l.postrelease.append(callback)
921 l.postrelease.append(callback)
923
922
924 def lock(self, wait=True):
923 def lock(self, wait=True):
925 '''Lock the repository store (.hg/store) and return a weak reference
924 '''Lock the repository store (.hg/store) and return a weak reference
926 to the lock. Use this before modifying the store (e.g. committing or
925 to the lock. Use this before modifying the store (e.g. committing or
927 stripping). If you are opening a transaction, get a lock as well.)'''
926 stripping). If you are opening a transaction, get a lock as well.)'''
928 l = self._lockref and self._lockref()
927 l = self._lockref and self._lockref()
929 if l is not None and l.held:
928 if l is not None and l.held:
930 l.lock()
929 l.lock()
931 return l
930 return l
932
931
933 def unlock():
932 def unlock():
934 self.store.write()
933 self.store.write()
935 if self._dirtyphases:
934 if self._dirtyphases:
936 phases.writeroots(self)
935 phases.writeroots(self)
937 self._dirtyphases = False
936 self._dirtyphases = False
938 for k, ce in self._filecache.items():
937 for k, ce in self._filecache.items():
939 if k == 'dirstate':
938 if k == 'dirstate':
940 continue
939 continue
941 ce.refresh()
940 ce.refresh()
942
941
943 l = self._lock(self.sjoin("lock"), wait, unlock,
942 l = self._lock(self.sjoin("lock"), wait, unlock,
944 self.invalidate, _('repository %s') % self.origroot)
943 self.invalidate, _('repository %s') % self.origroot)
945 self._lockref = weakref.ref(l)
944 self._lockref = weakref.ref(l)
946 return l
945 return l
947
946
948 def wlock(self, wait=True):
947 def wlock(self, wait=True):
949 '''Lock the non-store parts of the repository (everything under
948 '''Lock the non-store parts of the repository (everything under
950 .hg except .hg/store) and return a weak reference to the lock.
949 .hg except .hg/store) and return a weak reference to the lock.
951 Use this before modifying files in .hg.'''
950 Use this before modifying files in .hg.'''
952 l = self._wlockref and self._wlockref()
951 l = self._wlockref and self._wlockref()
953 if l is not None and l.held:
952 if l is not None and l.held:
954 l.lock()
953 l.lock()
955 return l
954 return l
956
955
957 def unlock():
956 def unlock():
958 self.dirstate.write()
957 self.dirstate.write()
959 ce = self._filecache.get('dirstate')
958 ce = self._filecache.get('dirstate')
960 if ce:
959 if ce:
961 ce.refresh()
960 ce.refresh()
962
961
963 l = self._lock(self.join("wlock"), wait, unlock,
962 l = self._lock(self.join("wlock"), wait, unlock,
964 self.invalidatedirstate, _('working directory of %s') %
963 self.invalidatedirstate, _('working directory of %s') %
965 self.origroot)
964 self.origroot)
966 self._wlockref = weakref.ref(l)
965 self._wlockref = weakref.ref(l)
967 return l
966 return l
968
967
969 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
968 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
970 """
969 """
971 commit an individual file as part of a larger transaction
970 commit an individual file as part of a larger transaction
972 """
971 """
973
972
974 fname = fctx.path()
973 fname = fctx.path()
975 text = fctx.data()
974 text = fctx.data()
976 flog = self.file(fname)
975 flog = self.file(fname)
977 fparent1 = manifest1.get(fname, nullid)
976 fparent1 = manifest1.get(fname, nullid)
978 fparent2 = fparent2o = manifest2.get(fname, nullid)
977 fparent2 = fparent2o = manifest2.get(fname, nullid)
979
978
980 meta = {}
979 meta = {}
981 copy = fctx.renamed()
980 copy = fctx.renamed()
982 if copy and copy[0] != fname:
981 if copy and copy[0] != fname:
983 # Mark the new revision of this file as a copy of another
982 # Mark the new revision of this file as a copy of another
984 # file. This copy data will effectively act as a parent
983 # file. This copy data will effectively act as a parent
985 # of this new revision. If this is a merge, the first
984 # of this new revision. If this is a merge, the first
986 # parent will be the nullid (meaning "look up the copy data")
985 # parent will be the nullid (meaning "look up the copy data")
987 # and the second one will be the other parent. For example:
986 # and the second one will be the other parent. For example:
988 #
987 #
989 # 0 --- 1 --- 3 rev1 changes file foo
988 # 0 --- 1 --- 3 rev1 changes file foo
990 # \ / rev2 renames foo to bar and changes it
989 # \ / rev2 renames foo to bar and changes it
991 # \- 2 -/ rev3 should have bar with all changes and
990 # \- 2 -/ rev3 should have bar with all changes and
992 # should record that bar descends from
991 # should record that bar descends from
993 # bar in rev2 and foo in rev1
992 # bar in rev2 and foo in rev1
994 #
993 #
995 # this allows this merge to succeed:
994 # this allows this merge to succeed:
996 #
995 #
997 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
996 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
998 # \ / merging rev3 and rev4 should use bar@rev2
997 # \ / merging rev3 and rev4 should use bar@rev2
999 # \- 2 --- 4 as the merge base
998 # \- 2 --- 4 as the merge base
1000 #
999 #
1001
1000
1002 cfname = copy[0]
1001 cfname = copy[0]
1003 crev = manifest1.get(cfname)
1002 crev = manifest1.get(cfname)
1004 newfparent = fparent2
1003 newfparent = fparent2
1005
1004
1006 if manifest2: # branch merge
1005 if manifest2: # branch merge
1007 if fparent2 == nullid or crev is None: # copied on remote side
1006 if fparent2 == nullid or crev is None: # copied on remote side
1008 if cfname in manifest2:
1007 if cfname in manifest2:
1009 crev = manifest2[cfname]
1008 crev = manifest2[cfname]
1010 newfparent = fparent1
1009 newfparent = fparent1
1011
1010
1012 # find source in nearest ancestor if we've lost track
1011 # find source in nearest ancestor if we've lost track
1013 if not crev:
1012 if not crev:
1014 self.ui.debug(" %s: searching for copy revision for %s\n" %
1013 self.ui.debug(" %s: searching for copy revision for %s\n" %
1015 (fname, cfname))
1014 (fname, cfname))
1016 for ancestor in self[None].ancestors():
1015 for ancestor in self[None].ancestors():
1017 if cfname in ancestor:
1016 if cfname in ancestor:
1018 crev = ancestor[cfname].filenode()
1017 crev = ancestor[cfname].filenode()
1019 break
1018 break
1020
1019
1021 if crev:
1020 if crev:
1022 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1021 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1023 meta["copy"] = cfname
1022 meta["copy"] = cfname
1024 meta["copyrev"] = hex(crev)
1023 meta["copyrev"] = hex(crev)
1025 fparent1, fparent2 = nullid, newfparent
1024 fparent1, fparent2 = nullid, newfparent
1026 else:
1025 else:
1027 self.ui.warn(_("warning: can't find ancestor for '%s' "
1026 self.ui.warn(_("warning: can't find ancestor for '%s' "
1028 "copied from '%s'!\n") % (fname, cfname))
1027 "copied from '%s'!\n") % (fname, cfname))
1029
1028
1030 elif fparent2 != nullid:
1029 elif fparent2 != nullid:
1031 # is one parent an ancestor of the other?
1030 # is one parent an ancestor of the other?
1032 fparentancestor = flog.ancestor(fparent1, fparent2)
1031 fparentancestor = flog.ancestor(fparent1, fparent2)
1033 if fparentancestor == fparent1:
1032 if fparentancestor == fparent1:
1034 fparent1, fparent2 = fparent2, nullid
1033 fparent1, fparent2 = fparent2, nullid
1035 elif fparentancestor == fparent2:
1034 elif fparentancestor == fparent2:
1036 fparent2 = nullid
1035 fparent2 = nullid
1037
1036
1038 # is the file changed?
1037 # is the file changed?
1039 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1038 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1040 changelist.append(fname)
1039 changelist.append(fname)
1041 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1040 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1042
1041
1043 # are just the flags changed during merge?
1042 # are just the flags changed during merge?
1044 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1043 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1045 changelist.append(fname)
1044 changelist.append(fname)
1046
1045
1047 return fparent1
1046 return fparent1
1048
1047
1049 def commit(self, text="", user=None, date=None, match=None, force=False,
1048 def commit(self, text="", user=None, date=None, match=None, force=False,
1050 editor=False, extra={}):
1049 editor=False, extra={}):
1051 """Add a new revision to current repository.
1050 """Add a new revision to current repository.
1052
1051
1053 Revision information is gathered from the working directory,
1052 Revision information is gathered from the working directory,
1054 match can be used to filter the committed files. If editor is
1053 match can be used to filter the committed files. If editor is
1055 supplied, it is called to get a commit message.
1054 supplied, it is called to get a commit message.
1056 """
1055 """
1057
1056
1058 def fail(f, msg):
1057 def fail(f, msg):
1059 raise util.Abort('%s: %s' % (f, msg))
1058 raise util.Abort('%s: %s' % (f, msg))
1060
1059
1061 if not match:
1060 if not match:
1062 match = matchmod.always(self.root, '')
1061 match = matchmod.always(self.root, '')
1063
1062
1064 if not force:
1063 if not force:
1065 vdirs = []
1064 vdirs = []
1066 match.dir = vdirs.append
1065 match.dir = vdirs.append
1067 match.bad = fail
1066 match.bad = fail
1068
1067
1069 wlock = self.wlock()
1068 wlock = self.wlock()
1070 try:
1069 try:
1071 wctx = self[None]
1070 wctx = self[None]
1072 merge = len(wctx.parents()) > 1
1071 merge = len(wctx.parents()) > 1
1073
1072
1074 if (not force and merge and match and
1073 if (not force and merge and match and
1075 (match.files() or match.anypats())):
1074 (match.files() or match.anypats())):
1076 raise util.Abort(_('cannot partially commit a merge '
1075 raise util.Abort(_('cannot partially commit a merge '
1077 '(do not specify files or patterns)'))
1076 '(do not specify files or patterns)'))
1078
1077
1079 changes = self.status(match=match, clean=force)
1078 changes = self.status(match=match, clean=force)
1080 if force:
1079 if force:
1081 changes[0].extend(changes[6]) # mq may commit unchanged files
1080 changes[0].extend(changes[6]) # mq may commit unchanged files
1082
1081
1083 # check subrepos
1082 # check subrepos
1084 subs = []
1083 subs = []
1085 commitsubs = set()
1084 commitsubs = set()
1086 newstate = wctx.substate.copy()
1085 newstate = wctx.substate.copy()
1087 # only manage subrepos and .hgsubstate if .hgsub is present
1086 # only manage subrepos and .hgsubstate if .hgsub is present
1088 if '.hgsub' in wctx:
1087 if '.hgsub' in wctx:
1089 # we'll decide whether to track this ourselves, thanks
1088 # we'll decide whether to track this ourselves, thanks
1090 if '.hgsubstate' in changes[0]:
1089 if '.hgsubstate' in changes[0]:
1091 changes[0].remove('.hgsubstate')
1090 changes[0].remove('.hgsubstate')
1092 if '.hgsubstate' in changes[2]:
1091 if '.hgsubstate' in changes[2]:
1093 changes[2].remove('.hgsubstate')
1092 changes[2].remove('.hgsubstate')
1094
1093
1095 # compare current state to last committed state
1094 # compare current state to last committed state
1096 # build new substate based on last committed state
1095 # build new substate based on last committed state
1097 oldstate = wctx.p1().substate
1096 oldstate = wctx.p1().substate
1098 for s in sorted(newstate.keys()):
1097 for s in sorted(newstate.keys()):
1099 if not match(s):
1098 if not match(s):
1100 # ignore working copy, use old state if present
1099 # ignore working copy, use old state if present
1101 if s in oldstate:
1100 if s in oldstate:
1102 newstate[s] = oldstate[s]
1101 newstate[s] = oldstate[s]
1103 continue
1102 continue
1104 if not force:
1103 if not force:
1105 raise util.Abort(
1104 raise util.Abort(
1106 _("commit with new subrepo %s excluded") % s)
1105 _("commit with new subrepo %s excluded") % s)
1107 if wctx.sub(s).dirty(True):
1106 if wctx.sub(s).dirty(True):
1108 if not self.ui.configbool('ui', 'commitsubrepos'):
1107 if not self.ui.configbool('ui', 'commitsubrepos'):
1109 raise util.Abort(
1108 raise util.Abort(
1110 _("uncommitted changes in subrepo %s") % s,
1109 _("uncommitted changes in subrepo %s") % s,
1111 hint=_("use --subrepos for recursive commit"))
1110 hint=_("use --subrepos for recursive commit"))
1112 subs.append(s)
1111 subs.append(s)
1113 commitsubs.add(s)
1112 commitsubs.add(s)
1114 else:
1113 else:
1115 bs = wctx.sub(s).basestate()
1114 bs = wctx.sub(s).basestate()
1116 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1115 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1117 if oldstate.get(s, (None, None, None))[1] != bs:
1116 if oldstate.get(s, (None, None, None))[1] != bs:
1118 subs.append(s)
1117 subs.append(s)
1119
1118
1120 # check for removed subrepos
1119 # check for removed subrepos
1121 for p in wctx.parents():
1120 for p in wctx.parents():
1122 r = [s for s in p.substate if s not in newstate]
1121 r = [s for s in p.substate if s not in newstate]
1123 subs += [s for s in r if match(s)]
1122 subs += [s for s in r if match(s)]
1124 if subs:
1123 if subs:
1125 if (not match('.hgsub') and
1124 if (not match('.hgsub') and
1126 '.hgsub' in (wctx.modified() + wctx.added())):
1125 '.hgsub' in (wctx.modified() + wctx.added())):
1127 raise util.Abort(
1126 raise util.Abort(
1128 _("can't commit subrepos without .hgsub"))
1127 _("can't commit subrepos without .hgsub"))
1129 changes[0].insert(0, '.hgsubstate')
1128 changes[0].insert(0, '.hgsubstate')
1130
1129
1131 elif '.hgsub' in changes[2]:
1130 elif '.hgsub' in changes[2]:
1132 # clean up .hgsubstate when .hgsub is removed
1131 # clean up .hgsubstate when .hgsub is removed
1133 if ('.hgsubstate' in wctx and
1132 if ('.hgsubstate' in wctx and
1134 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1133 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1135 changes[2].insert(0, '.hgsubstate')
1134 changes[2].insert(0, '.hgsubstate')
1136
1135
1137 # make sure all explicit patterns are matched
1136 # make sure all explicit patterns are matched
1138 if not force and match.files():
1137 if not force and match.files():
1139 matched = set(changes[0] + changes[1] + changes[2])
1138 matched = set(changes[0] + changes[1] + changes[2])
1140
1139
1141 for f in match.files():
1140 for f in match.files():
1142 if f == '.' or f in matched or f in wctx.substate:
1141 if f == '.' or f in matched or f in wctx.substate:
1143 continue
1142 continue
1144 if f in changes[3]: # missing
1143 if f in changes[3]: # missing
1145 fail(f, _('file not found!'))
1144 fail(f, _('file not found!'))
1146 if f in vdirs: # visited directory
1145 if f in vdirs: # visited directory
1147 d = f + '/'
1146 d = f + '/'
1148 for mf in matched:
1147 for mf in matched:
1149 if mf.startswith(d):
1148 if mf.startswith(d):
1150 break
1149 break
1151 else:
1150 else:
1152 fail(f, _("no match under directory!"))
1151 fail(f, _("no match under directory!"))
1153 elif f not in self.dirstate:
1152 elif f not in self.dirstate:
1154 fail(f, _("file not tracked!"))
1153 fail(f, _("file not tracked!"))
1155
1154
1156 if (not force and not extra.get("close") and not merge
1155 if (not force and not extra.get("close") and not merge
1157 and not (changes[0] or changes[1] or changes[2])
1156 and not (changes[0] or changes[1] or changes[2])
1158 and wctx.branch() == wctx.p1().branch()):
1157 and wctx.branch() == wctx.p1().branch()):
1159 return None
1158 return None
1160
1159
1161 if merge and changes[3]:
1160 if merge and changes[3]:
1162 raise util.Abort(_("cannot commit merge with missing files"))
1161 raise util.Abort(_("cannot commit merge with missing files"))
1163
1162
1164 ms = mergemod.mergestate(self)
1163 ms = mergemod.mergestate(self)
1165 for f in changes[0]:
1164 for f in changes[0]:
1166 if f in ms and ms[f] == 'u':
1165 if f in ms and ms[f] == 'u':
1167 raise util.Abort(_("unresolved merge conflicts "
1166 raise util.Abort(_("unresolved merge conflicts "
1168 "(see hg help resolve)"))
1167 "(see hg help resolve)"))
1169
1168
1170 cctx = context.workingctx(self, text, user, date, extra, changes)
1169 cctx = context.workingctx(self, text, user, date, extra, changes)
1171 if editor:
1170 if editor:
1172 cctx._text = editor(self, cctx, subs)
1171 cctx._text = editor(self, cctx, subs)
1173 edited = (text != cctx._text)
1172 edited = (text != cctx._text)
1174
1173
1175 # commit subs and write new state
1174 # commit subs and write new state
1176 if subs:
1175 if subs:
1177 for s in sorted(commitsubs):
1176 for s in sorted(commitsubs):
1178 sub = wctx.sub(s)
1177 sub = wctx.sub(s)
1179 self.ui.status(_('committing subrepository %s\n') %
1178 self.ui.status(_('committing subrepository %s\n') %
1180 subrepo.subrelpath(sub))
1179 subrepo.subrelpath(sub))
1181 sr = sub.commit(cctx._text, user, date)
1180 sr = sub.commit(cctx._text, user, date)
1182 newstate[s] = (newstate[s][0], sr)
1181 newstate[s] = (newstate[s][0], sr)
1183 subrepo.writestate(self, newstate)
1182 subrepo.writestate(self, newstate)
1184
1183
1185 # Save commit message in case this transaction gets rolled back
1184 # Save commit message in case this transaction gets rolled back
1186 # (e.g. by a pretxncommit hook). Leave the content alone on
1185 # (e.g. by a pretxncommit hook). Leave the content alone on
1187 # the assumption that the user will use the same editor again.
1186 # the assumption that the user will use the same editor again.
1188 msgfn = self.savecommitmessage(cctx._text)
1187 msgfn = self.savecommitmessage(cctx._text)
1189
1188
1190 p1, p2 = self.dirstate.parents()
1189 p1, p2 = self.dirstate.parents()
1191 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1190 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1192 try:
1191 try:
1193 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1192 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1194 ret = self.commitctx(cctx, True)
1193 ret = self.commitctx(cctx, True)
1195 except:
1194 except:
1196 if edited:
1195 if edited:
1197 self.ui.write(
1196 self.ui.write(
1198 _('note: commit message saved in %s\n') % msgfn)
1197 _('note: commit message saved in %s\n') % msgfn)
1199 raise
1198 raise
1200
1199
1201 # update bookmarks, dirstate and mergestate
1200 # update bookmarks, dirstate and mergestate
1202 bookmarks.update(self, p1, ret)
1201 bookmarks.update(self, p1, ret)
1203 for f in changes[0] + changes[1]:
1202 for f in changes[0] + changes[1]:
1204 self.dirstate.normal(f)
1203 self.dirstate.normal(f)
1205 for f in changes[2]:
1204 for f in changes[2]:
1206 self.dirstate.drop(f)
1205 self.dirstate.drop(f)
1207 self.dirstate.setparents(ret)
1206 self.dirstate.setparents(ret)
1208 ms.reset()
1207 ms.reset()
1209 finally:
1208 finally:
1210 wlock.release()
1209 wlock.release()
1211
1210
1212 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1211 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1213 return ret
1212 return ret
1214
1213
1215 def commitctx(self, ctx, error=False):
1214 def commitctx(self, ctx, error=False):
1216 """Add a new revision to current repository.
1215 """Add a new revision to current repository.
1217 Revision information is passed via the context argument.
1216 Revision information is passed via the context argument.
1218 """
1217 """
1219
1218
1220 tr = lock = None
1219 tr = lock = None
1221 removed = list(ctx.removed())
1220 removed = list(ctx.removed())
1222 p1, p2 = ctx.p1(), ctx.p2()
1221 p1, p2 = ctx.p1(), ctx.p2()
1223 user = ctx.user()
1222 user = ctx.user()
1224
1223
1225 lock = self.lock()
1224 lock = self.lock()
1226 try:
1225 try:
1227 tr = self.transaction("commit")
1226 tr = self.transaction("commit")
1228 trp = weakref.proxy(tr)
1227 trp = weakref.proxy(tr)
1229
1228
1230 if ctx.files():
1229 if ctx.files():
1231 m1 = p1.manifest().copy()
1230 m1 = p1.manifest().copy()
1232 m2 = p2.manifest()
1231 m2 = p2.manifest()
1233
1232
1234 # check in files
1233 # check in files
1235 new = {}
1234 new = {}
1236 changed = []
1235 changed = []
1237 linkrev = len(self)
1236 linkrev = len(self)
1238 for f in sorted(ctx.modified() + ctx.added()):
1237 for f in sorted(ctx.modified() + ctx.added()):
1239 self.ui.note(f + "\n")
1238 self.ui.note(f + "\n")
1240 try:
1239 try:
1241 fctx = ctx[f]
1240 fctx = ctx[f]
1242 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1241 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1243 changed)
1242 changed)
1244 m1.set(f, fctx.flags())
1243 m1.set(f, fctx.flags())
1245 except OSError, inst:
1244 except OSError, inst:
1246 self.ui.warn(_("trouble committing %s!\n") % f)
1245 self.ui.warn(_("trouble committing %s!\n") % f)
1247 raise
1246 raise
1248 except IOError, inst:
1247 except IOError, inst:
1249 errcode = getattr(inst, 'errno', errno.ENOENT)
1248 errcode = getattr(inst, 'errno', errno.ENOENT)
1250 if error or errcode and errcode != errno.ENOENT:
1249 if error or errcode and errcode != errno.ENOENT:
1251 self.ui.warn(_("trouble committing %s!\n") % f)
1250 self.ui.warn(_("trouble committing %s!\n") % f)
1252 raise
1251 raise
1253 else:
1252 else:
1254 removed.append(f)
1253 removed.append(f)
1255
1254
1256 # update manifest
1255 # update manifest
1257 m1.update(new)
1256 m1.update(new)
1258 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1257 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1259 drop = [f for f in removed if f in m1]
1258 drop = [f for f in removed if f in m1]
1260 for f in drop:
1259 for f in drop:
1261 del m1[f]
1260 del m1[f]
1262 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1261 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1263 p2.manifestnode(), (new, drop))
1262 p2.manifestnode(), (new, drop))
1264 files = changed + removed
1263 files = changed + removed
1265 else:
1264 else:
1266 mn = p1.manifestnode()
1265 mn = p1.manifestnode()
1267 files = []
1266 files = []
1268
1267
1269 # update changelog
1268 # update changelog
1270 self.changelog.delayupdate()
1269 self.changelog.delayupdate()
1271 n = self.changelog.add(mn, files, ctx.description(),
1270 n = self.changelog.add(mn, files, ctx.description(),
1272 trp, p1.node(), p2.node(),
1271 trp, p1.node(), p2.node(),
1273 user, ctx.date(), ctx.extra().copy())
1272 user, ctx.date(), ctx.extra().copy())
1274 p = lambda: self.changelog.writepending() and self.root or ""
1273 p = lambda: self.changelog.writepending() and self.root or ""
1275 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1274 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1276 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1275 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1277 parent2=xp2, pending=p)
1276 parent2=xp2, pending=p)
1278 self.changelog.finalize(trp)
1277 self.changelog.finalize(trp)
1279 # set the new commit is proper phase
1278 # set the new commit is proper phase
1280 targetphase = phases.newcommitphase(self.ui)
1279 targetphase = phases.newcommitphase(self.ui)
1281 if targetphase:
1280 if targetphase:
1282 # retract boundary do not alter parent changeset.
1281 # retract boundary do not alter parent changeset.
1283 # if a parent have higher the resulting phase will
1282 # if a parent have higher the resulting phase will
1284 # be compliant anyway
1283 # be compliant anyway
1285 #
1284 #
1286 # if minimal phase was 0 we don't need to retract anything
1285 # if minimal phase was 0 we don't need to retract anything
1287 phases.retractboundary(self, targetphase, [n])
1286 phases.retractboundary(self, targetphase, [n])
1288 tr.close()
1287 tr.close()
1289 self.updatebranchcache()
1288 self.updatebranchcache()
1290 return n
1289 return n
1291 finally:
1290 finally:
1292 if tr:
1291 if tr:
1293 tr.release()
1292 tr.release()
1294 lock.release()
1293 lock.release()
1295
1294
1296 def destroyed(self):
1295 def destroyed(self):
1297 '''Inform the repository that nodes have been destroyed.
1296 '''Inform the repository that nodes have been destroyed.
1298 Intended for use by strip and rollback, so there's a common
1297 Intended for use by strip and rollback, so there's a common
1299 place for anything that has to be done after destroying history.'''
1298 place for anything that has to be done after destroying history.'''
1300 # XXX it might be nice if we could take the list of destroyed
1299 # XXX it might be nice if we could take the list of destroyed
1301 # nodes, but I don't see an easy way for rollback() to do that
1300 # nodes, but I don't see an easy way for rollback() to do that
1302
1301
1303 # Ensure the persistent tag cache is updated. Doing it now
1302 # Ensure the persistent tag cache is updated. Doing it now
1304 # means that the tag cache only has to worry about destroyed
1303 # means that the tag cache only has to worry about destroyed
1305 # heads immediately after a strip/rollback. That in turn
1304 # heads immediately after a strip/rollback. That in turn
1306 # guarantees that "cachetip == currenttip" (comparing both rev
1305 # guarantees that "cachetip == currenttip" (comparing both rev
1307 # and node) always means no nodes have been added or destroyed.
1306 # and node) always means no nodes have been added or destroyed.
1308
1307
1309 # XXX this is suboptimal when qrefresh'ing: we strip the current
1308 # XXX this is suboptimal when qrefresh'ing: we strip the current
1310 # head, refresh the tag cache, then immediately add a new head.
1309 # head, refresh the tag cache, then immediately add a new head.
1311 # But I think doing it this way is necessary for the "instant
1310 # But I think doing it this way is necessary for the "instant
1312 # tag cache retrieval" case to work.
1311 # tag cache retrieval" case to work.
1313 self.invalidatecaches()
1312 self.invalidatecaches()
1314
1313
1315 # Discard all cache entries to force reloading everything.
1314 # Discard all cache entries to force reloading everything.
1316 self._filecache.clear()
1315 self._filecache.clear()
1317
1316
1318 def walk(self, match, node=None):
1317 def walk(self, match, node=None):
1319 '''
1318 '''
1320 walk recursively through the directory tree or a given
1319 walk recursively through the directory tree or a given
1321 changeset, finding all files matched by the match
1320 changeset, finding all files matched by the match
1322 function
1321 function
1323 '''
1322 '''
1324 return self[node].walk(match)
1323 return self[node].walk(match)
1325
1324
1326 def status(self, node1='.', node2=None, match=None,
1325 def status(self, node1='.', node2=None, match=None,
1327 ignored=False, clean=False, unknown=False,
1326 ignored=False, clean=False, unknown=False,
1328 listsubrepos=False):
1327 listsubrepos=False):
1329 """return status of files between two nodes or node and working directory
1328 """return status of files between two nodes or node and working directory
1330
1329
1331 If node1 is None, use the first dirstate parent instead.
1330 If node1 is None, use the first dirstate parent instead.
1332 If node2 is None, compare node1 with working directory.
1331 If node2 is None, compare node1 with working directory.
1333 """
1332 """
1334
1333
1335 def mfmatches(ctx):
1334 def mfmatches(ctx):
1336 mf = ctx.manifest().copy()
1335 mf = ctx.manifest().copy()
1337 for fn in mf.keys():
1336 for fn in mf.keys():
1338 if not match(fn):
1337 if not match(fn):
1339 del mf[fn]
1338 del mf[fn]
1340 return mf
1339 return mf
1341
1340
1342 if isinstance(node1, context.changectx):
1341 if isinstance(node1, context.changectx):
1343 ctx1 = node1
1342 ctx1 = node1
1344 else:
1343 else:
1345 ctx1 = self[node1]
1344 ctx1 = self[node1]
1346 if isinstance(node2, context.changectx):
1345 if isinstance(node2, context.changectx):
1347 ctx2 = node2
1346 ctx2 = node2
1348 else:
1347 else:
1349 ctx2 = self[node2]
1348 ctx2 = self[node2]
1350
1349
1351 working = ctx2.rev() is None
1350 working = ctx2.rev() is None
1352 parentworking = working and ctx1 == self['.']
1351 parentworking = working and ctx1 == self['.']
1353 match = match or matchmod.always(self.root, self.getcwd())
1352 match = match or matchmod.always(self.root, self.getcwd())
1354 listignored, listclean, listunknown = ignored, clean, unknown
1353 listignored, listclean, listunknown = ignored, clean, unknown
1355
1354
1356 # load earliest manifest first for caching reasons
1355 # load earliest manifest first for caching reasons
1357 if not working and ctx2.rev() < ctx1.rev():
1356 if not working and ctx2.rev() < ctx1.rev():
1358 ctx2.manifest()
1357 ctx2.manifest()
1359
1358
1360 if not parentworking:
1359 if not parentworking:
1361 def bad(f, msg):
1360 def bad(f, msg):
1362 # 'f' may be a directory pattern from 'match.files()',
1361 # 'f' may be a directory pattern from 'match.files()',
1363 # so 'f not in ctx1' is not enough
1362 # so 'f not in ctx1' is not enough
1364 if f not in ctx1 and f not in ctx1.dirs():
1363 if f not in ctx1 and f not in ctx1.dirs():
1365 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1364 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1366 match.bad = bad
1365 match.bad = bad
1367
1366
1368 if working: # we need to scan the working dir
1367 if working: # we need to scan the working dir
1369 subrepos = []
1368 subrepos = []
1370 if '.hgsub' in self.dirstate:
1369 if '.hgsub' in self.dirstate:
1371 subrepos = ctx2.substate.keys()
1370 subrepos = ctx2.substate.keys()
1372 s = self.dirstate.status(match, subrepos, listignored,
1371 s = self.dirstate.status(match, subrepos, listignored,
1373 listclean, listunknown)
1372 listclean, listunknown)
1374 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1373 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1375
1374
1376 # check for any possibly clean files
1375 # check for any possibly clean files
1377 if parentworking and cmp:
1376 if parentworking and cmp:
1378 fixup = []
1377 fixup = []
1379 # do a full compare of any files that might have changed
1378 # do a full compare of any files that might have changed
1380 for f in sorted(cmp):
1379 for f in sorted(cmp):
1381 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1380 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1382 or ctx1[f].cmp(ctx2[f])):
1381 or ctx1[f].cmp(ctx2[f])):
1383 modified.append(f)
1382 modified.append(f)
1384 else:
1383 else:
1385 fixup.append(f)
1384 fixup.append(f)
1386
1385
1387 # update dirstate for files that are actually clean
1386 # update dirstate for files that are actually clean
1388 if fixup:
1387 if fixup:
1389 if listclean:
1388 if listclean:
1390 clean += fixup
1389 clean += fixup
1391
1390
1392 try:
1391 try:
1393 # updating the dirstate is optional
1392 # updating the dirstate is optional
1394 # so we don't wait on the lock
1393 # so we don't wait on the lock
1395 wlock = self.wlock(False)
1394 wlock = self.wlock(False)
1396 try:
1395 try:
1397 for f in fixup:
1396 for f in fixup:
1398 self.dirstate.normal(f)
1397 self.dirstate.normal(f)
1399 finally:
1398 finally:
1400 wlock.release()
1399 wlock.release()
1401 except error.LockError:
1400 except error.LockError:
1402 pass
1401 pass
1403
1402
1404 if not parentworking:
1403 if not parentworking:
1405 mf1 = mfmatches(ctx1)
1404 mf1 = mfmatches(ctx1)
1406 if working:
1405 if working:
1407 # we are comparing working dir against non-parent
1406 # we are comparing working dir against non-parent
1408 # generate a pseudo-manifest for the working dir
1407 # generate a pseudo-manifest for the working dir
1409 mf2 = mfmatches(self['.'])
1408 mf2 = mfmatches(self['.'])
1410 for f in cmp + modified + added:
1409 for f in cmp + modified + added:
1411 mf2[f] = None
1410 mf2[f] = None
1412 mf2.set(f, ctx2.flags(f))
1411 mf2.set(f, ctx2.flags(f))
1413 for f in removed:
1412 for f in removed:
1414 if f in mf2:
1413 if f in mf2:
1415 del mf2[f]
1414 del mf2[f]
1416 else:
1415 else:
1417 # we are comparing two revisions
1416 # we are comparing two revisions
1418 deleted, unknown, ignored = [], [], []
1417 deleted, unknown, ignored = [], [], []
1419 mf2 = mfmatches(ctx2)
1418 mf2 = mfmatches(ctx2)
1420
1419
1421 modified, added, clean = [], [], []
1420 modified, added, clean = [], [], []
1422 for fn in mf2:
1421 for fn in mf2:
1423 if fn in mf1:
1422 if fn in mf1:
1424 if (fn not in deleted and
1423 if (fn not in deleted and
1425 (mf1.flags(fn) != mf2.flags(fn) or
1424 (mf1.flags(fn) != mf2.flags(fn) or
1426 (mf1[fn] != mf2[fn] and
1425 (mf1[fn] != mf2[fn] and
1427 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1426 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1428 modified.append(fn)
1427 modified.append(fn)
1429 elif listclean:
1428 elif listclean:
1430 clean.append(fn)
1429 clean.append(fn)
1431 del mf1[fn]
1430 del mf1[fn]
1432 elif fn not in deleted:
1431 elif fn not in deleted:
1433 added.append(fn)
1432 added.append(fn)
1434 removed = mf1.keys()
1433 removed = mf1.keys()
1435
1434
1436 if working and modified and not self.dirstate._checklink:
1435 if working and modified and not self.dirstate._checklink:
1437 # Symlink placeholders may get non-symlink-like contents
1436 # Symlink placeholders may get non-symlink-like contents
1438 # via user error or dereferencing by NFS or Samba servers,
1437 # via user error or dereferencing by NFS or Samba servers,
1439 # so we filter out any placeholders that don't look like a
1438 # so we filter out any placeholders that don't look like a
1440 # symlink
1439 # symlink
1441 sane = []
1440 sane = []
1442 for f in modified:
1441 for f in modified:
1443 if ctx2.flags(f) == 'l':
1442 if ctx2.flags(f) == 'l':
1444 d = ctx2[f].data()
1443 d = ctx2[f].data()
1445 if len(d) >= 1024 or '\n' in d or util.binary(d):
1444 if len(d) >= 1024 or '\n' in d or util.binary(d):
1446 self.ui.debug('ignoring suspect symlink placeholder'
1445 self.ui.debug('ignoring suspect symlink placeholder'
1447 ' "%s"\n' % f)
1446 ' "%s"\n' % f)
1448 continue
1447 continue
1449 sane.append(f)
1448 sane.append(f)
1450 modified = sane
1449 modified = sane
1451
1450
1452 r = modified, added, removed, deleted, unknown, ignored, clean
1451 r = modified, added, removed, deleted, unknown, ignored, clean
1453
1452
1454 if listsubrepos:
1453 if listsubrepos:
1455 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1454 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1456 if working:
1455 if working:
1457 rev2 = None
1456 rev2 = None
1458 else:
1457 else:
1459 rev2 = ctx2.substate[subpath][1]
1458 rev2 = ctx2.substate[subpath][1]
1460 try:
1459 try:
1461 submatch = matchmod.narrowmatcher(subpath, match)
1460 submatch = matchmod.narrowmatcher(subpath, match)
1462 s = sub.status(rev2, match=submatch, ignored=listignored,
1461 s = sub.status(rev2, match=submatch, ignored=listignored,
1463 clean=listclean, unknown=listunknown,
1462 clean=listclean, unknown=listunknown,
1464 listsubrepos=True)
1463 listsubrepos=True)
1465 for rfiles, sfiles in zip(r, s):
1464 for rfiles, sfiles in zip(r, s):
1466 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1465 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1467 except error.LookupError:
1466 except error.LookupError:
1468 self.ui.status(_("skipping missing subrepository: %s\n")
1467 self.ui.status(_("skipping missing subrepository: %s\n")
1469 % subpath)
1468 % subpath)
1470
1469
1471 for l in r:
1470 for l in r:
1472 l.sort()
1471 l.sort()
1473 return r
1472 return r
1474
1473
1475 def heads(self, start=None):
1474 def heads(self, start=None):
1476 heads = self.changelog.heads(start)
1475 heads = self.changelog.heads(start)
1477 # sort the output in rev descending order
1476 # sort the output in rev descending order
1478 return sorted(heads, key=self.changelog.rev, reverse=True)
1477 return sorted(heads, key=self.changelog.rev, reverse=True)
1479
1478
1480 def branchheads(self, branch=None, start=None, closed=False):
1479 def branchheads(self, branch=None, start=None, closed=False):
1481 '''return a (possibly filtered) list of heads for the given branch
1480 '''return a (possibly filtered) list of heads for the given branch
1482
1481
1483 Heads are returned in topological order, from newest to oldest.
1482 Heads are returned in topological order, from newest to oldest.
1484 If branch is None, use the dirstate branch.
1483 If branch is None, use the dirstate branch.
1485 If start is not None, return only heads reachable from start.
1484 If start is not None, return only heads reachable from start.
1486 If closed is True, return heads that are marked as closed as well.
1485 If closed is True, return heads that are marked as closed as well.
1487 '''
1486 '''
1488 if branch is None:
1487 if branch is None:
1489 branch = self[None].branch()
1488 branch = self[None].branch()
1490 branches = self.branchmap()
1489 branches = self.branchmap()
1491 if branch not in branches:
1490 if branch not in branches:
1492 return []
1491 return []
1493 # the cache returns heads ordered lowest to highest
1492 # the cache returns heads ordered lowest to highest
1494 bheads = list(reversed(branches[branch]))
1493 bheads = list(reversed(branches[branch]))
1495 if start is not None:
1494 if start is not None:
1496 # filter out the heads that cannot be reached from startrev
1495 # filter out the heads that cannot be reached from startrev
1497 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1496 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1498 bheads = [h for h in bheads if h in fbheads]
1497 bheads = [h for h in bheads if h in fbheads]
1499 if not closed:
1498 if not closed:
1500 bheads = [h for h in bheads if
1499 bheads = [h for h in bheads if
1501 ('close' not in self.changelog.read(h)[5])]
1500 ('close' not in self.changelog.read(h)[5])]
1502 return bheads
1501 return bheads
1503
1502
1504 def branches(self, nodes):
1503 def branches(self, nodes):
1505 if not nodes:
1504 if not nodes:
1506 nodes = [self.changelog.tip()]
1505 nodes = [self.changelog.tip()]
1507 b = []
1506 b = []
1508 for n in nodes:
1507 for n in nodes:
1509 t = n
1508 t = n
1510 while True:
1509 while True:
1511 p = self.changelog.parents(n)
1510 p = self.changelog.parents(n)
1512 if p[1] != nullid or p[0] == nullid:
1511 if p[1] != nullid or p[0] == nullid:
1513 b.append((t, n, p[0], p[1]))
1512 b.append((t, n, p[0], p[1]))
1514 break
1513 break
1515 n = p[0]
1514 n = p[0]
1516 return b
1515 return b
1517
1516
1518 def between(self, pairs):
1517 def between(self, pairs):
1519 r = []
1518 r = []
1520
1519
1521 for top, bottom in pairs:
1520 for top, bottom in pairs:
1522 n, l, i = top, [], 0
1521 n, l, i = top, [], 0
1523 f = 1
1522 f = 1
1524
1523
1525 while n != bottom and n != nullid:
1524 while n != bottom and n != nullid:
1526 p = self.changelog.parents(n)[0]
1525 p = self.changelog.parents(n)[0]
1527 if i == f:
1526 if i == f:
1528 l.append(n)
1527 l.append(n)
1529 f = f * 2
1528 f = f * 2
1530 n = p
1529 n = p
1531 i += 1
1530 i += 1
1532
1531
1533 r.append(l)
1532 r.append(l)
1534
1533
1535 return r
1534 return r
1536
1535
1537 def pull(self, remote, heads=None, force=False):
1536 def pull(self, remote, heads=None, force=False):
1538 lock = self.lock()
1537 lock = self.lock()
1539 try:
1538 try:
1540 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1539 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1541 force=force)
1540 force=force)
1542 common, fetch, rheads = tmp
1541 common, fetch, rheads = tmp
1543 if not fetch:
1542 if not fetch:
1544 self.ui.status(_("no changes found\n"))
1543 self.ui.status(_("no changes found\n"))
1545 added = []
1544 added = []
1546 result = 0
1545 result = 0
1547 else:
1546 else:
1548 if heads is None and list(common) == [nullid]:
1547 if heads is None and list(common) == [nullid]:
1549 self.ui.status(_("requesting all changes\n"))
1548 self.ui.status(_("requesting all changes\n"))
1550 elif heads is None and remote.capable('changegroupsubset'):
1549 elif heads is None and remote.capable('changegroupsubset'):
1551 # issue1320, avoid a race if remote changed after discovery
1550 # issue1320, avoid a race if remote changed after discovery
1552 heads = rheads
1551 heads = rheads
1553
1552
1554 if remote.capable('getbundle'):
1553 if remote.capable('getbundle'):
1555 cg = remote.getbundle('pull', common=common,
1554 cg = remote.getbundle('pull', common=common,
1556 heads=heads or rheads)
1555 heads=heads or rheads)
1557 elif heads is None:
1556 elif heads is None:
1558 cg = remote.changegroup(fetch, 'pull')
1557 cg = remote.changegroup(fetch, 'pull')
1559 elif not remote.capable('changegroupsubset'):
1558 elif not remote.capable('changegroupsubset'):
1560 raise util.Abort(_("partial pull cannot be done because "
1559 raise util.Abort(_("partial pull cannot be done because "
1561 "other repository doesn't support "
1560 "other repository doesn't support "
1562 "changegroupsubset."))
1561 "changegroupsubset."))
1563 else:
1562 else:
1564 cg = remote.changegroupsubset(fetch, heads, 'pull')
1563 cg = remote.changegroupsubset(fetch, heads, 'pull')
1565 clstart = len(self.changelog)
1564 clstart = len(self.changelog)
1566 result = self.addchangegroup(cg, 'pull', remote.url())
1565 result = self.addchangegroup(cg, 'pull', remote.url())
1567 clend = len(self.changelog)
1566 clend = len(self.changelog)
1568 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1567 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1569
1568
1570 # compute target subset
1569 # compute target subset
1571 if heads is None:
1570 if heads is None:
1572 # We pulled every thing possible
1571 # We pulled every thing possible
1573 # sync on everything common
1572 # sync on everything common
1574 subset = common + added
1573 subset = common + added
1575 else:
1574 else:
1576 # We pulled a specific subset
1575 # We pulled a specific subset
1577 # sync on this subset
1576 # sync on this subset
1578 subset = heads
1577 subset = heads
1579
1578
1580 # Get remote phases data from remote
1579 # Get remote phases data from remote
1581 remotephases = remote.listkeys('phases')
1580 remotephases = remote.listkeys('phases')
1582 publishing = bool(remotephases.get('publishing', False))
1581 publishing = bool(remotephases.get('publishing', False))
1583 if remotephases and not publishing:
1582 if remotephases and not publishing:
1584 # remote is new and unpublishing
1583 # remote is new and unpublishing
1585 pheads, _dr = phases.analyzeremotephases(self, subset,
1584 pheads, _dr = phases.analyzeremotephases(self, subset,
1586 remotephases)
1585 remotephases)
1587 phases.advanceboundary(self, phases.public, pheads)
1586 phases.advanceboundary(self, phases.public, pheads)
1588 phases.advanceboundary(self, phases.draft, subset)
1587 phases.advanceboundary(self, phases.draft, subset)
1589 else:
1588 else:
1590 # Remote is old or publishing all common changesets
1589 # Remote is old or publishing all common changesets
1591 # should be seen as public
1590 # should be seen as public
1592 phases.advanceboundary(self, phases.public, subset)
1591 phases.advanceboundary(self, phases.public, subset)
1593 finally:
1592 finally:
1594 lock.release()
1593 lock.release()
1595
1594
1596 return result
1595 return result
1597
1596
1598 def checkpush(self, force, revs):
1597 def checkpush(self, force, revs):
1599 """Extensions can override this function if additional checks have
1598 """Extensions can override this function if additional checks have
1600 to be performed before pushing, or call it if they override push
1599 to be performed before pushing, or call it if they override push
1601 command.
1600 command.
1602 """
1601 """
1603 pass
1602 pass
1604
1603
1605 def push(self, remote, force=False, revs=None, newbranch=False):
1604 def push(self, remote, force=False, revs=None, newbranch=False):
1606 '''Push outgoing changesets (limited by revs) from the current
1605 '''Push outgoing changesets (limited by revs) from the current
1607 repository to remote. Return an integer:
1606 repository to remote. Return an integer:
1608 - None means nothing to push
1607 - None means nothing to push
1609 - 0 means HTTP error
1608 - 0 means HTTP error
1610 - 1 means we pushed and remote head count is unchanged *or*
1609 - 1 means we pushed and remote head count is unchanged *or*
1611 we have outgoing changesets but refused to push
1610 we have outgoing changesets but refused to push
1612 - other values as described by addchangegroup()
1611 - other values as described by addchangegroup()
1613 '''
1612 '''
1614 # there are two ways to push to remote repo:
1613 # there are two ways to push to remote repo:
1615 #
1614 #
1616 # addchangegroup assumes local user can lock remote
1615 # addchangegroup assumes local user can lock remote
1617 # repo (local filesystem, old ssh servers).
1616 # repo (local filesystem, old ssh servers).
1618 #
1617 #
1619 # unbundle assumes local user cannot lock remote repo (new ssh
1618 # unbundle assumes local user cannot lock remote repo (new ssh
1620 # servers, http servers).
1619 # servers, http servers).
1621
1620
1622 # get local lock as we might write phase data
1621 # get local lock as we might write phase data
1623 locallock = self.lock()
1622 locallock = self.lock()
1624 try:
1623 try:
1625 self.checkpush(force, revs)
1624 self.checkpush(force, revs)
1626 lock = None
1625 lock = None
1627 unbundle = remote.capable('unbundle')
1626 unbundle = remote.capable('unbundle')
1628 if not unbundle:
1627 if not unbundle:
1629 lock = remote.lock()
1628 lock = remote.lock()
1630 try:
1629 try:
1631 # discovery
1630 # discovery
1632 fci = discovery.findcommonincoming
1631 fci = discovery.findcommonincoming
1633 commoninc = fci(self, remote, force=force)
1632 commoninc = fci(self, remote, force=force)
1634 common, inc, remoteheads = commoninc
1633 common, inc, remoteheads = commoninc
1635 fco = discovery.findcommonoutgoing
1634 fco = discovery.findcommonoutgoing
1636 outgoing = fco(self, remote, onlyheads=revs,
1635 outgoing = fco(self, remote, onlyheads=revs,
1637 commoninc=commoninc, force=force)
1636 commoninc=commoninc, force=force)
1638
1637
1639
1638
1640 if not outgoing.missing:
1639 if not outgoing.missing:
1641 # nothing to push
1640 # nothing to push
1642 scmutil.nochangesfound(self.ui, outgoing.excluded)
1641 scmutil.nochangesfound(self.ui, outgoing.excluded)
1643 ret = None
1642 ret = None
1644 else:
1643 else:
1645 # something to push
1644 # something to push
1646 if not force:
1645 if not force:
1647 discovery.checkheads(self, remote, outgoing,
1646 discovery.checkheads(self, remote, outgoing,
1648 remoteheads, newbranch,
1647 remoteheads, newbranch,
1649 bool(inc))
1648 bool(inc))
1650
1649
1651 # create a changegroup from local
1650 # create a changegroup from local
1652 if revs is None and not outgoing.excluded:
1651 if revs is None and not outgoing.excluded:
1653 # push everything,
1652 # push everything,
1654 # use the fast path, no race possible on push
1653 # use the fast path, no race possible on push
1655 cg = self._changegroup(outgoing.missing, 'push')
1654 cg = self._changegroup(outgoing.missing, 'push')
1656 else:
1655 else:
1657 cg = self.getlocalbundle('push', outgoing)
1656 cg = self.getlocalbundle('push', outgoing)
1658
1657
1659 # apply changegroup to remote
1658 # apply changegroup to remote
1660 if unbundle:
1659 if unbundle:
1661 # local repo finds heads on server, finds out what
1660 # local repo finds heads on server, finds out what
1662 # revs it must push. once revs transferred, if server
1661 # revs it must push. once revs transferred, if server
1663 # finds it has different heads (someone else won
1662 # finds it has different heads (someone else won
1664 # commit/push race), server aborts.
1663 # commit/push race), server aborts.
1665 if force:
1664 if force:
1666 remoteheads = ['force']
1665 remoteheads = ['force']
1667 # ssh: return remote's addchangegroup()
1666 # ssh: return remote's addchangegroup()
1668 # http: return remote's addchangegroup() or 0 for error
1667 # http: return remote's addchangegroup() or 0 for error
1669 ret = remote.unbundle(cg, remoteheads, 'push')
1668 ret = remote.unbundle(cg, remoteheads, 'push')
1670 else:
1669 else:
1671 # we return an integer indicating remote head count change
1670 # we return an integer indicating remote head count change
1672 ret = remote.addchangegroup(cg, 'push', self.url())
1671 ret = remote.addchangegroup(cg, 'push', self.url())
1673
1672
1674 if ret:
1673 if ret:
1675 # push succeed, synchonize target of the push
1674 # push succeed, synchonize target of the push
1676 cheads = outgoing.missingheads
1675 cheads = outgoing.missingheads
1677 elif revs is None:
1676 elif revs is None:
1678 # All out push fails. synchronize all common
1677 # All out push fails. synchronize all common
1679 cheads = outgoing.commonheads
1678 cheads = outgoing.commonheads
1680 else:
1679 else:
1681 # I want cheads = heads(::missingheads and ::commonheads)
1680 # I want cheads = heads(::missingheads and ::commonheads)
1682 # (missingheads is revs with secret changeset filtered out)
1681 # (missingheads is revs with secret changeset filtered out)
1683 #
1682 #
1684 # This can be expressed as:
1683 # This can be expressed as:
1685 # cheads = ( (missingheads and ::commonheads)
1684 # cheads = ( (missingheads and ::commonheads)
1686 # + (commonheads and ::missingheads))"
1685 # + (commonheads and ::missingheads))"
1687 # )
1686 # )
1688 #
1687 #
1689 # while trying to push we already computed the following:
1688 # while trying to push we already computed the following:
1690 # common = (::commonheads)
1689 # common = (::commonheads)
1691 # missing = ((commonheads::missingheads) - commonheads)
1690 # missing = ((commonheads::missingheads) - commonheads)
1692 #
1691 #
1693 # We can pick:
1692 # We can pick:
1694 # * missingheads part of comon (::commonheads)
1693 # * missingheads part of comon (::commonheads)
1695 common = set(outgoing.common)
1694 common = set(outgoing.common)
1696 cheads = [node for node in revs if node in common]
1695 cheads = [node for node in revs if node in common]
1697 # and
1696 # and
1698 # * commonheads parents on missing
1697 # * commonheads parents on missing
1699 revset = self.set('%ln and parents(roots(%ln))',
1698 revset = self.set('%ln and parents(roots(%ln))',
1700 outgoing.commonheads,
1699 outgoing.commonheads,
1701 outgoing.missing)
1700 outgoing.missing)
1702 cheads.extend(c.node() for c in revset)
1701 cheads.extend(c.node() for c in revset)
1703 # even when we don't push, exchanging phase data is useful
1702 # even when we don't push, exchanging phase data is useful
1704 remotephases = remote.listkeys('phases')
1703 remotephases = remote.listkeys('phases')
1705 if not remotephases: # old server or public only repo
1704 if not remotephases: # old server or public only repo
1706 phases.advanceboundary(self, phases.public, cheads)
1705 phases.advanceboundary(self, phases.public, cheads)
1707 # don't push any phase data as there is nothing to push
1706 # don't push any phase data as there is nothing to push
1708 else:
1707 else:
1709 ana = phases.analyzeremotephases(self, cheads, remotephases)
1708 ana = phases.analyzeremotephases(self, cheads, remotephases)
1710 pheads, droots = ana
1709 pheads, droots = ana
1711 ### Apply remote phase on local
1710 ### Apply remote phase on local
1712 if remotephases.get('publishing', False):
1711 if remotephases.get('publishing', False):
1713 phases.advanceboundary(self, phases.public, cheads)
1712 phases.advanceboundary(self, phases.public, cheads)
1714 else: # publish = False
1713 else: # publish = False
1715 phases.advanceboundary(self, phases.public, pheads)
1714 phases.advanceboundary(self, phases.public, pheads)
1716 phases.advanceboundary(self, phases.draft, cheads)
1715 phases.advanceboundary(self, phases.draft, cheads)
1717 ### Apply local phase on remote
1716 ### Apply local phase on remote
1718
1717
1719 # Get the list of all revs draft on remote by public here.
1718 # Get the list of all revs draft on remote by public here.
1720 # XXX Beware that revset break if droots is not strictly
1719 # XXX Beware that revset break if droots is not strictly
1721 # XXX root we may want to ensure it is but it is costly
1720 # XXX root we may want to ensure it is but it is costly
1722 outdated = self.set('heads((%ln::%ln) and public())',
1721 outdated = self.set('heads((%ln::%ln) and public())',
1723 droots, cheads)
1722 droots, cheads)
1724 for newremotehead in outdated:
1723 for newremotehead in outdated:
1725 r = remote.pushkey('phases',
1724 r = remote.pushkey('phases',
1726 newremotehead.hex(),
1725 newremotehead.hex(),
1727 str(phases.draft),
1726 str(phases.draft),
1728 str(phases.public))
1727 str(phases.public))
1729 if not r:
1728 if not r:
1730 self.ui.warn(_('updating %s to public failed!\n')
1729 self.ui.warn(_('updating %s to public failed!\n')
1731 % newremotehead)
1730 % newremotehead)
1732 finally:
1731 finally:
1733 if lock is not None:
1732 if lock is not None:
1734 lock.release()
1733 lock.release()
1735 finally:
1734 finally:
1736 locallock.release()
1735 locallock.release()
1737
1736
1738 self.ui.debug("checking for updated bookmarks\n")
1737 self.ui.debug("checking for updated bookmarks\n")
1739 rb = remote.listkeys('bookmarks')
1738 rb = remote.listkeys('bookmarks')
1740 for k in rb.keys():
1739 for k in rb.keys():
1741 if k in self._bookmarks:
1740 if k in self._bookmarks:
1742 nr, nl = rb[k], hex(self._bookmarks[k])
1741 nr, nl = rb[k], hex(self._bookmarks[k])
1743 if nr in self:
1742 if nr in self:
1744 cr = self[nr]
1743 cr = self[nr]
1745 cl = self[nl]
1744 cl = self[nl]
1746 if cl in cr.descendants():
1745 if cl in cr.descendants():
1747 r = remote.pushkey('bookmarks', k, nr, nl)
1746 r = remote.pushkey('bookmarks', k, nr, nl)
1748 if r:
1747 if r:
1749 self.ui.status(_("updating bookmark %s\n") % k)
1748 self.ui.status(_("updating bookmark %s\n") % k)
1750 else:
1749 else:
1751 self.ui.warn(_('updating bookmark %s'
1750 self.ui.warn(_('updating bookmark %s'
1752 ' failed!\n') % k)
1751 ' failed!\n') % k)
1753
1752
1754 return ret
1753 return ret
1755
1754
1756 def changegroupinfo(self, nodes, source):
1755 def changegroupinfo(self, nodes, source):
1757 if self.ui.verbose or source == 'bundle':
1756 if self.ui.verbose or source == 'bundle':
1758 self.ui.status(_("%d changesets found\n") % len(nodes))
1757 self.ui.status(_("%d changesets found\n") % len(nodes))
1759 if self.ui.debugflag:
1758 if self.ui.debugflag:
1760 self.ui.debug("list of changesets:\n")
1759 self.ui.debug("list of changesets:\n")
1761 for node in nodes:
1760 for node in nodes:
1762 self.ui.debug("%s\n" % hex(node))
1761 self.ui.debug("%s\n" % hex(node))
1763
1762
1764 def changegroupsubset(self, bases, heads, source):
1763 def changegroupsubset(self, bases, heads, source):
1765 """Compute a changegroup consisting of all the nodes that are
1764 """Compute a changegroup consisting of all the nodes that are
1766 descendants of any of the bases and ancestors of any of the heads.
1765 descendants of any of the bases and ancestors of any of the heads.
1767 Return a chunkbuffer object whose read() method will return
1766 Return a chunkbuffer object whose read() method will return
1768 successive changegroup chunks.
1767 successive changegroup chunks.
1769
1768
1770 It is fairly complex as determining which filenodes and which
1769 It is fairly complex as determining which filenodes and which
1771 manifest nodes need to be included for the changeset to be complete
1770 manifest nodes need to be included for the changeset to be complete
1772 is non-trivial.
1771 is non-trivial.
1773
1772
1774 Another wrinkle is doing the reverse, figuring out which changeset in
1773 Another wrinkle is doing the reverse, figuring out which changeset in
1775 the changegroup a particular filenode or manifestnode belongs to.
1774 the changegroup a particular filenode or manifestnode belongs to.
1776 """
1775 """
1777 cl = self.changelog
1776 cl = self.changelog
1778 if not bases:
1777 if not bases:
1779 bases = [nullid]
1778 bases = [nullid]
1780 csets, bases, heads = cl.nodesbetween(bases, heads)
1779 csets, bases, heads = cl.nodesbetween(bases, heads)
1781 # We assume that all ancestors of bases are known
1780 # We assume that all ancestors of bases are known
1782 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1781 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1783 return self._changegroupsubset(common, csets, heads, source)
1782 return self._changegroupsubset(common, csets, heads, source)
1784
1783
1785 def getlocalbundle(self, source, outgoing):
1784 def getlocalbundle(self, source, outgoing):
1786 """Like getbundle, but taking a discovery.outgoing as an argument.
1785 """Like getbundle, but taking a discovery.outgoing as an argument.
1787
1786
1788 This is only implemented for local repos and reuses potentially
1787 This is only implemented for local repos and reuses potentially
1789 precomputed sets in outgoing."""
1788 precomputed sets in outgoing."""
1790 if not outgoing.missing:
1789 if not outgoing.missing:
1791 return None
1790 return None
1792 return self._changegroupsubset(outgoing.common,
1791 return self._changegroupsubset(outgoing.common,
1793 outgoing.missing,
1792 outgoing.missing,
1794 outgoing.missingheads,
1793 outgoing.missingheads,
1795 source)
1794 source)
1796
1795
1797 def getbundle(self, source, heads=None, common=None):
1796 def getbundle(self, source, heads=None, common=None):
1798 """Like changegroupsubset, but returns the set difference between the
1797 """Like changegroupsubset, but returns the set difference between the
1799 ancestors of heads and the ancestors common.
1798 ancestors of heads and the ancestors common.
1800
1799
1801 If heads is None, use the local heads. If common is None, use [nullid].
1800 If heads is None, use the local heads. If common is None, use [nullid].
1802
1801
1803 The nodes in common might not all be known locally due to the way the
1802 The nodes in common might not all be known locally due to the way the
1804 current discovery protocol works.
1803 current discovery protocol works.
1805 """
1804 """
1806 cl = self.changelog
1805 cl = self.changelog
1807 if common:
1806 if common:
1808 nm = cl.nodemap
1807 nm = cl.nodemap
1809 common = [n for n in common if n in nm]
1808 common = [n for n in common if n in nm]
1810 else:
1809 else:
1811 common = [nullid]
1810 common = [nullid]
1812 if not heads:
1811 if not heads:
1813 heads = cl.heads()
1812 heads = cl.heads()
1814 return self.getlocalbundle(source,
1813 return self.getlocalbundle(source,
1815 discovery.outgoing(cl, common, heads))
1814 discovery.outgoing(cl, common, heads))
1816
1815
1817 def _changegroupsubset(self, commonrevs, csets, heads, source):
1816 def _changegroupsubset(self, commonrevs, csets, heads, source):
1818
1817
1819 cl = self.changelog
1818 cl = self.changelog
1820 mf = self.manifest
1819 mf = self.manifest
1821 mfs = {} # needed manifests
1820 mfs = {} # needed manifests
1822 fnodes = {} # needed file nodes
1821 fnodes = {} # needed file nodes
1823 changedfiles = set()
1822 changedfiles = set()
1824 fstate = ['', {}]
1823 fstate = ['', {}]
1825 count = [0, 0]
1824 count = [0, 0]
1826
1825
1827 # can we go through the fast path ?
1826 # can we go through the fast path ?
1828 heads.sort()
1827 heads.sort()
1829 if heads == sorted(self.heads()):
1828 if heads == sorted(self.heads()):
1830 return self._changegroup(csets, source)
1829 return self._changegroup(csets, source)
1831
1830
1832 # slow path
1831 # slow path
1833 self.hook('preoutgoing', throw=True, source=source)
1832 self.hook('preoutgoing', throw=True, source=source)
1834 self.changegroupinfo(csets, source)
1833 self.changegroupinfo(csets, source)
1835
1834
1836 # filter any nodes that claim to be part of the known set
1835 # filter any nodes that claim to be part of the known set
1837 def prune(revlog, missing):
1836 def prune(revlog, missing):
1838 rr, rl = revlog.rev, revlog.linkrev
1837 rr, rl = revlog.rev, revlog.linkrev
1839 return [n for n in missing
1838 return [n for n in missing
1840 if rl(rr(n)) not in commonrevs]
1839 if rl(rr(n)) not in commonrevs]
1841
1840
1842 progress = self.ui.progress
1841 progress = self.ui.progress
1843 _bundling = _('bundling')
1842 _bundling = _('bundling')
1844 _changesets = _('changesets')
1843 _changesets = _('changesets')
1845 _manifests = _('manifests')
1844 _manifests = _('manifests')
1846 _files = _('files')
1845 _files = _('files')
1847
1846
1848 def lookup(revlog, x):
1847 def lookup(revlog, x):
1849 if revlog == cl:
1848 if revlog == cl:
1850 c = cl.read(x)
1849 c = cl.read(x)
1851 changedfiles.update(c[3])
1850 changedfiles.update(c[3])
1852 mfs.setdefault(c[0], x)
1851 mfs.setdefault(c[0], x)
1853 count[0] += 1
1852 count[0] += 1
1854 progress(_bundling, count[0],
1853 progress(_bundling, count[0],
1855 unit=_changesets, total=count[1])
1854 unit=_changesets, total=count[1])
1856 return x
1855 return x
1857 elif revlog == mf:
1856 elif revlog == mf:
1858 clnode = mfs[x]
1857 clnode = mfs[x]
1859 mdata = mf.readfast(x)
1858 mdata = mf.readfast(x)
1860 for f, n in mdata.iteritems():
1859 for f, n in mdata.iteritems():
1861 if f in changedfiles:
1860 if f in changedfiles:
1862 fnodes[f].setdefault(n, clnode)
1861 fnodes[f].setdefault(n, clnode)
1863 count[0] += 1
1862 count[0] += 1
1864 progress(_bundling, count[0],
1863 progress(_bundling, count[0],
1865 unit=_manifests, total=count[1])
1864 unit=_manifests, total=count[1])
1866 return clnode
1865 return clnode
1867 else:
1866 else:
1868 progress(_bundling, count[0], item=fstate[0],
1867 progress(_bundling, count[0], item=fstate[0],
1869 unit=_files, total=count[1])
1868 unit=_files, total=count[1])
1870 return fstate[1][x]
1869 return fstate[1][x]
1871
1870
1872 bundler = changegroup.bundle10(lookup)
1871 bundler = changegroup.bundle10(lookup)
1873 reorder = self.ui.config('bundle', 'reorder', 'auto')
1872 reorder = self.ui.config('bundle', 'reorder', 'auto')
1874 if reorder == 'auto':
1873 if reorder == 'auto':
1875 reorder = None
1874 reorder = None
1876 else:
1875 else:
1877 reorder = util.parsebool(reorder)
1876 reorder = util.parsebool(reorder)
1878
1877
1879 def gengroup():
1878 def gengroup():
1880 # Create a changenode group generator that will call our functions
1879 # Create a changenode group generator that will call our functions
1881 # back to lookup the owning changenode and collect information.
1880 # back to lookup the owning changenode and collect information.
1882 count[:] = [0, len(csets)]
1881 count[:] = [0, len(csets)]
1883 for chunk in cl.group(csets, bundler, reorder=reorder):
1882 for chunk in cl.group(csets, bundler, reorder=reorder):
1884 yield chunk
1883 yield chunk
1885 progress(_bundling, None)
1884 progress(_bundling, None)
1886
1885
1887 # Create a generator for the manifestnodes that calls our lookup
1886 # Create a generator for the manifestnodes that calls our lookup
1888 # and data collection functions back.
1887 # and data collection functions back.
1889 for f in changedfiles:
1888 for f in changedfiles:
1890 fnodes[f] = {}
1889 fnodes[f] = {}
1891 count[:] = [0, len(mfs)]
1890 count[:] = [0, len(mfs)]
1892 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1891 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1893 yield chunk
1892 yield chunk
1894 progress(_bundling, None)
1893 progress(_bundling, None)
1895
1894
1896 mfs.clear()
1895 mfs.clear()
1897
1896
1898 # Go through all our files in order sorted by name.
1897 # Go through all our files in order sorted by name.
1899 count[:] = [0, len(changedfiles)]
1898 count[:] = [0, len(changedfiles)]
1900 for fname in sorted(changedfiles):
1899 for fname in sorted(changedfiles):
1901 filerevlog = self.file(fname)
1900 filerevlog = self.file(fname)
1902 if not len(filerevlog):
1901 if not len(filerevlog):
1903 raise util.Abort(_("empty or missing revlog for %s") % fname)
1902 raise util.Abort(_("empty or missing revlog for %s") % fname)
1904 fstate[0] = fname
1903 fstate[0] = fname
1905 fstate[1] = fnodes.pop(fname, {})
1904 fstate[1] = fnodes.pop(fname, {})
1906
1905
1907 nodelist = prune(filerevlog, fstate[1])
1906 nodelist = prune(filerevlog, fstate[1])
1908 if nodelist:
1907 if nodelist:
1909 count[0] += 1
1908 count[0] += 1
1910 yield bundler.fileheader(fname)
1909 yield bundler.fileheader(fname)
1911 for chunk in filerevlog.group(nodelist, bundler, reorder):
1910 for chunk in filerevlog.group(nodelist, bundler, reorder):
1912 yield chunk
1911 yield chunk
1913
1912
1914 # Signal that no more groups are left.
1913 # Signal that no more groups are left.
1915 yield bundler.close()
1914 yield bundler.close()
1916 progress(_bundling, None)
1915 progress(_bundling, None)
1917
1916
1918 if csets:
1917 if csets:
1919 self.hook('outgoing', node=hex(csets[0]), source=source)
1918 self.hook('outgoing', node=hex(csets[0]), source=source)
1920
1919
1921 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1920 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1922
1921
1923 def changegroup(self, basenodes, source):
1922 def changegroup(self, basenodes, source):
1924 # to avoid a race we use changegroupsubset() (issue1320)
1923 # to avoid a race we use changegroupsubset() (issue1320)
1925 return self.changegroupsubset(basenodes, self.heads(), source)
1924 return self.changegroupsubset(basenodes, self.heads(), source)
1926
1925
1927 def _changegroup(self, nodes, source):
1926 def _changegroup(self, nodes, source):
1928 """Compute the changegroup of all nodes that we have that a recipient
1927 """Compute the changegroup of all nodes that we have that a recipient
1929 doesn't. Return a chunkbuffer object whose read() method will return
1928 doesn't. Return a chunkbuffer object whose read() method will return
1930 successive changegroup chunks.
1929 successive changegroup chunks.
1931
1930
1932 This is much easier than the previous function as we can assume that
1931 This is much easier than the previous function as we can assume that
1933 the recipient has any changenode we aren't sending them.
1932 the recipient has any changenode we aren't sending them.
1934
1933
1935 nodes is the set of nodes to send"""
1934 nodes is the set of nodes to send"""
1936
1935
1937 cl = self.changelog
1936 cl = self.changelog
1938 mf = self.manifest
1937 mf = self.manifest
1939 mfs = {}
1938 mfs = {}
1940 changedfiles = set()
1939 changedfiles = set()
1941 fstate = ['']
1940 fstate = ['']
1942 count = [0, 0]
1941 count = [0, 0]
1943
1942
1944 self.hook('preoutgoing', throw=True, source=source)
1943 self.hook('preoutgoing', throw=True, source=source)
1945 self.changegroupinfo(nodes, source)
1944 self.changegroupinfo(nodes, source)
1946
1945
1947 revset = set([cl.rev(n) for n in nodes])
1946 revset = set([cl.rev(n) for n in nodes])
1948
1947
1949 def gennodelst(log):
1948 def gennodelst(log):
1950 ln, llr = log.node, log.linkrev
1949 ln, llr = log.node, log.linkrev
1951 return [ln(r) for r in log if llr(r) in revset]
1950 return [ln(r) for r in log if llr(r) in revset]
1952
1951
1953 progress = self.ui.progress
1952 progress = self.ui.progress
1954 _bundling = _('bundling')
1953 _bundling = _('bundling')
1955 _changesets = _('changesets')
1954 _changesets = _('changesets')
1956 _manifests = _('manifests')
1955 _manifests = _('manifests')
1957 _files = _('files')
1956 _files = _('files')
1958
1957
1959 def lookup(revlog, x):
1958 def lookup(revlog, x):
1960 if revlog == cl:
1959 if revlog == cl:
1961 c = cl.read(x)
1960 c = cl.read(x)
1962 changedfiles.update(c[3])
1961 changedfiles.update(c[3])
1963 mfs.setdefault(c[0], x)
1962 mfs.setdefault(c[0], x)
1964 count[0] += 1
1963 count[0] += 1
1965 progress(_bundling, count[0],
1964 progress(_bundling, count[0],
1966 unit=_changesets, total=count[1])
1965 unit=_changesets, total=count[1])
1967 return x
1966 return x
1968 elif revlog == mf:
1967 elif revlog == mf:
1969 count[0] += 1
1968 count[0] += 1
1970 progress(_bundling, count[0],
1969 progress(_bundling, count[0],
1971 unit=_manifests, total=count[1])
1970 unit=_manifests, total=count[1])
1972 return cl.node(revlog.linkrev(revlog.rev(x)))
1971 return cl.node(revlog.linkrev(revlog.rev(x)))
1973 else:
1972 else:
1974 progress(_bundling, count[0], item=fstate[0],
1973 progress(_bundling, count[0], item=fstate[0],
1975 total=count[1], unit=_files)
1974 total=count[1], unit=_files)
1976 return cl.node(revlog.linkrev(revlog.rev(x)))
1975 return cl.node(revlog.linkrev(revlog.rev(x)))
1977
1976
1978 bundler = changegroup.bundle10(lookup)
1977 bundler = changegroup.bundle10(lookup)
1979 reorder = self.ui.config('bundle', 'reorder', 'auto')
1978 reorder = self.ui.config('bundle', 'reorder', 'auto')
1980 if reorder == 'auto':
1979 if reorder == 'auto':
1981 reorder = None
1980 reorder = None
1982 else:
1981 else:
1983 reorder = util.parsebool(reorder)
1982 reorder = util.parsebool(reorder)
1984
1983
1985 def gengroup():
1984 def gengroup():
1986 '''yield a sequence of changegroup chunks (strings)'''
1985 '''yield a sequence of changegroup chunks (strings)'''
1987 # construct a list of all changed files
1986 # construct a list of all changed files
1988
1987
1989 count[:] = [0, len(nodes)]
1988 count[:] = [0, len(nodes)]
1990 for chunk in cl.group(nodes, bundler, reorder=reorder):
1989 for chunk in cl.group(nodes, bundler, reorder=reorder):
1991 yield chunk
1990 yield chunk
1992 progress(_bundling, None)
1991 progress(_bundling, None)
1993
1992
1994 count[:] = [0, len(mfs)]
1993 count[:] = [0, len(mfs)]
1995 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1994 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1996 yield chunk
1995 yield chunk
1997 progress(_bundling, None)
1996 progress(_bundling, None)
1998
1997
1999 count[:] = [0, len(changedfiles)]
1998 count[:] = [0, len(changedfiles)]
2000 for fname in sorted(changedfiles):
1999 for fname in sorted(changedfiles):
2001 filerevlog = self.file(fname)
2000 filerevlog = self.file(fname)
2002 if not len(filerevlog):
2001 if not len(filerevlog):
2003 raise util.Abort(_("empty or missing revlog for %s") % fname)
2002 raise util.Abort(_("empty or missing revlog for %s") % fname)
2004 fstate[0] = fname
2003 fstate[0] = fname
2005 nodelist = gennodelst(filerevlog)
2004 nodelist = gennodelst(filerevlog)
2006 if nodelist:
2005 if nodelist:
2007 count[0] += 1
2006 count[0] += 1
2008 yield bundler.fileheader(fname)
2007 yield bundler.fileheader(fname)
2009 for chunk in filerevlog.group(nodelist, bundler, reorder):
2008 for chunk in filerevlog.group(nodelist, bundler, reorder):
2010 yield chunk
2009 yield chunk
2011 yield bundler.close()
2010 yield bundler.close()
2012 progress(_bundling, None)
2011 progress(_bundling, None)
2013
2012
2014 if nodes:
2013 if nodes:
2015 self.hook('outgoing', node=hex(nodes[0]), source=source)
2014 self.hook('outgoing', node=hex(nodes[0]), source=source)
2016
2015
2017 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2016 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2018
2017
2019 def addchangegroup(self, source, srctype, url, emptyok=False):
2018 def addchangegroup(self, source, srctype, url, emptyok=False):
2020 """Add the changegroup returned by source.read() to this repo.
2019 """Add the changegroup returned by source.read() to this repo.
2021 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2020 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2022 the URL of the repo where this changegroup is coming from.
2021 the URL of the repo where this changegroup is coming from.
2023
2022
2024 Return an integer summarizing the change to this repo:
2023 Return an integer summarizing the change to this repo:
2025 - nothing changed or no source: 0
2024 - nothing changed or no source: 0
2026 - more heads than before: 1+added heads (2..n)
2025 - more heads than before: 1+added heads (2..n)
2027 - fewer heads than before: -1-removed heads (-2..-n)
2026 - fewer heads than before: -1-removed heads (-2..-n)
2028 - number of heads stays the same: 1
2027 - number of heads stays the same: 1
2029 """
2028 """
2030 def csmap(x):
2029 def csmap(x):
2031 self.ui.debug("add changeset %s\n" % short(x))
2030 self.ui.debug("add changeset %s\n" % short(x))
2032 return len(cl)
2031 return len(cl)
2033
2032
2034 def revmap(x):
2033 def revmap(x):
2035 return cl.rev(x)
2034 return cl.rev(x)
2036
2035
2037 if not source:
2036 if not source:
2038 return 0
2037 return 0
2039
2038
2040 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2039 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2041
2040
2042 changesets = files = revisions = 0
2041 changesets = files = revisions = 0
2043 efiles = set()
2042 efiles = set()
2044
2043
2045 # write changelog data to temp files so concurrent readers will not see
2044 # write changelog data to temp files so concurrent readers will not see
2046 # inconsistent view
2045 # inconsistent view
2047 cl = self.changelog
2046 cl = self.changelog
2048 cl.delayupdate()
2047 cl.delayupdate()
2049 oldheads = cl.heads()
2048 oldheads = cl.heads()
2050
2049
2051 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2050 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2052 try:
2051 try:
2053 trp = weakref.proxy(tr)
2052 trp = weakref.proxy(tr)
2054 # pull off the changeset group
2053 # pull off the changeset group
2055 self.ui.status(_("adding changesets\n"))
2054 self.ui.status(_("adding changesets\n"))
2056 clstart = len(cl)
2055 clstart = len(cl)
2057 class prog(object):
2056 class prog(object):
2058 step = _('changesets')
2057 step = _('changesets')
2059 count = 1
2058 count = 1
2060 ui = self.ui
2059 ui = self.ui
2061 total = None
2060 total = None
2062 def __call__(self):
2061 def __call__(self):
2063 self.ui.progress(self.step, self.count, unit=_('chunks'),
2062 self.ui.progress(self.step, self.count, unit=_('chunks'),
2064 total=self.total)
2063 total=self.total)
2065 self.count += 1
2064 self.count += 1
2066 pr = prog()
2065 pr = prog()
2067 source.callback = pr
2066 source.callback = pr
2068
2067
2069 source.changelogheader()
2068 source.changelogheader()
2070 srccontent = cl.addgroup(source, csmap, trp)
2069 srccontent = cl.addgroup(source, csmap, trp)
2071 if not (srccontent or emptyok):
2070 if not (srccontent or emptyok):
2072 raise util.Abort(_("received changelog group is empty"))
2071 raise util.Abort(_("received changelog group is empty"))
2073 clend = len(cl)
2072 clend = len(cl)
2074 changesets = clend - clstart
2073 changesets = clend - clstart
2075 for c in xrange(clstart, clend):
2074 for c in xrange(clstart, clend):
2076 efiles.update(self[c].files())
2075 efiles.update(self[c].files())
2077 efiles = len(efiles)
2076 efiles = len(efiles)
2078 self.ui.progress(_('changesets'), None)
2077 self.ui.progress(_('changesets'), None)
2079
2078
2080 # pull off the manifest group
2079 # pull off the manifest group
2081 self.ui.status(_("adding manifests\n"))
2080 self.ui.status(_("adding manifests\n"))
2082 pr.step = _('manifests')
2081 pr.step = _('manifests')
2083 pr.count = 1
2082 pr.count = 1
2084 pr.total = changesets # manifests <= changesets
2083 pr.total = changesets # manifests <= changesets
2085 # no need to check for empty manifest group here:
2084 # no need to check for empty manifest group here:
2086 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2085 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2087 # no new manifest will be created and the manifest group will
2086 # no new manifest will be created and the manifest group will
2088 # be empty during the pull
2087 # be empty during the pull
2089 source.manifestheader()
2088 source.manifestheader()
2090 self.manifest.addgroup(source, revmap, trp)
2089 self.manifest.addgroup(source, revmap, trp)
2091 self.ui.progress(_('manifests'), None)
2090 self.ui.progress(_('manifests'), None)
2092
2091
2093 needfiles = {}
2092 needfiles = {}
2094 if self.ui.configbool('server', 'validate', default=False):
2093 if self.ui.configbool('server', 'validate', default=False):
2095 # validate incoming csets have their manifests
2094 # validate incoming csets have their manifests
2096 for cset in xrange(clstart, clend):
2095 for cset in xrange(clstart, clend):
2097 mfest = self.changelog.read(self.changelog.node(cset))[0]
2096 mfest = self.changelog.read(self.changelog.node(cset))[0]
2098 mfest = self.manifest.readdelta(mfest)
2097 mfest = self.manifest.readdelta(mfest)
2099 # store file nodes we must see
2098 # store file nodes we must see
2100 for f, n in mfest.iteritems():
2099 for f, n in mfest.iteritems():
2101 needfiles.setdefault(f, set()).add(n)
2100 needfiles.setdefault(f, set()).add(n)
2102
2101
2103 # process the files
2102 # process the files
2104 self.ui.status(_("adding file changes\n"))
2103 self.ui.status(_("adding file changes\n"))
2105 pr.step = _('files')
2104 pr.step = _('files')
2106 pr.count = 1
2105 pr.count = 1
2107 pr.total = efiles
2106 pr.total = efiles
2108 source.callback = None
2107 source.callback = None
2109
2108
2110 while True:
2109 while True:
2111 chunkdata = source.filelogheader()
2110 chunkdata = source.filelogheader()
2112 if not chunkdata:
2111 if not chunkdata:
2113 break
2112 break
2114 f = chunkdata["filename"]
2113 f = chunkdata["filename"]
2115 self.ui.debug("adding %s revisions\n" % f)
2114 self.ui.debug("adding %s revisions\n" % f)
2116 pr()
2115 pr()
2117 fl = self.file(f)
2116 fl = self.file(f)
2118 o = len(fl)
2117 o = len(fl)
2119 if not fl.addgroup(source, revmap, trp):
2118 if not fl.addgroup(source, revmap, trp):
2120 raise util.Abort(_("received file revlog group is empty"))
2119 raise util.Abort(_("received file revlog group is empty"))
2121 revisions += len(fl) - o
2120 revisions += len(fl) - o
2122 files += 1
2121 files += 1
2123 if f in needfiles:
2122 if f in needfiles:
2124 needs = needfiles[f]
2123 needs = needfiles[f]
2125 for new in xrange(o, len(fl)):
2124 for new in xrange(o, len(fl)):
2126 n = fl.node(new)
2125 n = fl.node(new)
2127 if n in needs:
2126 if n in needs:
2128 needs.remove(n)
2127 needs.remove(n)
2129 if not needs:
2128 if not needs:
2130 del needfiles[f]
2129 del needfiles[f]
2131 self.ui.progress(_('files'), None)
2130 self.ui.progress(_('files'), None)
2132
2131
2133 for f, needs in needfiles.iteritems():
2132 for f, needs in needfiles.iteritems():
2134 fl = self.file(f)
2133 fl = self.file(f)
2135 for n in needs:
2134 for n in needs:
2136 try:
2135 try:
2137 fl.rev(n)
2136 fl.rev(n)
2138 except error.LookupError:
2137 except error.LookupError:
2139 raise util.Abort(
2138 raise util.Abort(
2140 _('missing file data for %s:%s - run hg verify') %
2139 _('missing file data for %s:%s - run hg verify') %
2141 (f, hex(n)))
2140 (f, hex(n)))
2142
2141
2143 dh = 0
2142 dh = 0
2144 if oldheads:
2143 if oldheads:
2145 heads = cl.heads()
2144 heads = cl.heads()
2146 dh = len(heads) - len(oldheads)
2145 dh = len(heads) - len(oldheads)
2147 for h in heads:
2146 for h in heads:
2148 if h not in oldheads and 'close' in self[h].extra():
2147 if h not in oldheads and 'close' in self[h].extra():
2149 dh -= 1
2148 dh -= 1
2150 htext = ""
2149 htext = ""
2151 if dh:
2150 if dh:
2152 htext = _(" (%+d heads)") % dh
2151 htext = _(" (%+d heads)") % dh
2153
2152
2154 self.ui.status(_("added %d changesets"
2153 self.ui.status(_("added %d changesets"
2155 " with %d changes to %d files%s\n")
2154 " with %d changes to %d files%s\n")
2156 % (changesets, revisions, files, htext))
2155 % (changesets, revisions, files, htext))
2157
2156
2158 if changesets > 0:
2157 if changesets > 0:
2159 p = lambda: cl.writepending() and self.root or ""
2158 p = lambda: cl.writepending() and self.root or ""
2160 self.hook('pretxnchangegroup', throw=True,
2159 self.hook('pretxnchangegroup', throw=True,
2161 node=hex(cl.node(clstart)), source=srctype,
2160 node=hex(cl.node(clstart)), source=srctype,
2162 url=url, pending=p)
2161 url=url, pending=p)
2163
2162
2164 added = [cl.node(r) for r in xrange(clstart, clend)]
2163 added = [cl.node(r) for r in xrange(clstart, clend)]
2165 publishing = self.ui.configbool('phases', 'publish', True)
2164 publishing = self.ui.configbool('phases', 'publish', True)
2166 if srctype == 'push':
2165 if srctype == 'push':
2167 # Old server can not push the boundary themself.
2166 # Old server can not push the boundary themself.
2168 # New server won't push the boundary if changeset already
2167 # New server won't push the boundary if changeset already
2169 # existed locally as secrete
2168 # existed locally as secrete
2170 #
2169 #
2171 # We should not use added here but the list of all change in
2170 # We should not use added here but the list of all change in
2172 # the bundle
2171 # the bundle
2173 if publishing:
2172 if publishing:
2174 phases.advanceboundary(self, phases.public, srccontent)
2173 phases.advanceboundary(self, phases.public, srccontent)
2175 else:
2174 else:
2176 phases.advanceboundary(self, phases.draft, srccontent)
2175 phases.advanceboundary(self, phases.draft, srccontent)
2177 phases.retractboundary(self, phases.draft, added)
2176 phases.retractboundary(self, phases.draft, added)
2178 elif srctype != 'strip':
2177 elif srctype != 'strip':
2179 # publishing only alter behavior during push
2178 # publishing only alter behavior during push
2180 #
2179 #
2181 # strip should not touch boundary at all
2180 # strip should not touch boundary at all
2182 phases.retractboundary(self, phases.draft, added)
2181 phases.retractboundary(self, phases.draft, added)
2183
2182
2184 # make changelog see real files again
2183 # make changelog see real files again
2185 cl.finalize(trp)
2184 cl.finalize(trp)
2186
2185
2187 tr.close()
2186 tr.close()
2188
2187
2189 if changesets > 0:
2188 if changesets > 0:
2190 def runhooks():
2189 def runhooks():
2191 # forcefully update the on-disk branch cache
2190 # forcefully update the on-disk branch cache
2192 self.ui.debug("updating the branch cache\n")
2191 self.ui.debug("updating the branch cache\n")
2193 self.updatebranchcache()
2192 self.updatebranchcache()
2194 self.hook("changegroup", node=hex(cl.node(clstart)),
2193 self.hook("changegroup", node=hex(cl.node(clstart)),
2195 source=srctype, url=url)
2194 source=srctype, url=url)
2196
2195
2197 for n in added:
2196 for n in added:
2198 self.hook("incoming", node=hex(n), source=srctype,
2197 self.hook("incoming", node=hex(n), source=srctype,
2199 url=url)
2198 url=url)
2200 self._afterlock(runhooks)
2199 self._afterlock(runhooks)
2201
2200
2202 finally:
2201 finally:
2203 tr.release()
2202 tr.release()
2204 # never return 0 here:
2203 # never return 0 here:
2205 if dh < 0:
2204 if dh < 0:
2206 return dh - 1
2205 return dh - 1
2207 else:
2206 else:
2208 return dh + 1
2207 return dh + 1
2209
2208
2210 def stream_in(self, remote, requirements):
2209 def stream_in(self, remote, requirements):
2211 lock = self.lock()
2210 lock = self.lock()
2212 try:
2211 try:
2213 fp = remote.stream_out()
2212 fp = remote.stream_out()
2214 l = fp.readline()
2213 l = fp.readline()
2215 try:
2214 try:
2216 resp = int(l)
2215 resp = int(l)
2217 except ValueError:
2216 except ValueError:
2218 raise error.ResponseError(
2217 raise error.ResponseError(
2219 _('Unexpected response from remote server:'), l)
2218 _('Unexpected response from remote server:'), l)
2220 if resp == 1:
2219 if resp == 1:
2221 raise util.Abort(_('operation forbidden by server'))
2220 raise util.Abort(_('operation forbidden by server'))
2222 elif resp == 2:
2221 elif resp == 2:
2223 raise util.Abort(_('locking the remote repository failed'))
2222 raise util.Abort(_('locking the remote repository failed'))
2224 elif resp != 0:
2223 elif resp != 0:
2225 raise util.Abort(_('the server sent an unknown error code'))
2224 raise util.Abort(_('the server sent an unknown error code'))
2226 self.ui.status(_('streaming all changes\n'))
2225 self.ui.status(_('streaming all changes\n'))
2227 l = fp.readline()
2226 l = fp.readline()
2228 try:
2227 try:
2229 total_files, total_bytes = map(int, l.split(' ', 1))
2228 total_files, total_bytes = map(int, l.split(' ', 1))
2230 except (ValueError, TypeError):
2229 except (ValueError, TypeError):
2231 raise error.ResponseError(
2230 raise error.ResponseError(
2232 _('Unexpected response from remote server:'), l)
2231 _('Unexpected response from remote server:'), l)
2233 self.ui.status(_('%d files to transfer, %s of data\n') %
2232 self.ui.status(_('%d files to transfer, %s of data\n') %
2234 (total_files, util.bytecount(total_bytes)))
2233 (total_files, util.bytecount(total_bytes)))
2235 start = time.time()
2234 start = time.time()
2236 for i in xrange(total_files):
2235 for i in xrange(total_files):
2237 # XXX doesn't support '\n' or '\r' in filenames
2236 # XXX doesn't support '\n' or '\r' in filenames
2238 l = fp.readline()
2237 l = fp.readline()
2239 try:
2238 try:
2240 name, size = l.split('\0', 1)
2239 name, size = l.split('\0', 1)
2241 size = int(size)
2240 size = int(size)
2242 except (ValueError, TypeError):
2241 except (ValueError, TypeError):
2243 raise error.ResponseError(
2242 raise error.ResponseError(
2244 _('Unexpected response from remote server:'), l)
2243 _('Unexpected response from remote server:'), l)
2245 if self.ui.debugflag:
2244 if self.ui.debugflag:
2246 self.ui.debug('adding %s (%s)\n' %
2245 self.ui.debug('adding %s (%s)\n' %
2247 (name, util.bytecount(size)))
2246 (name, util.bytecount(size)))
2248 # for backwards compat, name was partially encoded
2247 # for backwards compat, name was partially encoded
2249 ofp = self.sopener(store.decodedir(name), 'w')
2248 ofp = self.sopener(store.decodedir(name), 'w')
2250 for chunk in util.filechunkiter(fp, limit=size):
2249 for chunk in util.filechunkiter(fp, limit=size):
2251 ofp.write(chunk)
2250 ofp.write(chunk)
2252 ofp.close()
2251 ofp.close()
2253 elapsed = time.time() - start
2252 elapsed = time.time() - start
2254 if elapsed <= 0:
2253 if elapsed <= 0:
2255 elapsed = 0.001
2254 elapsed = 0.001
2256 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2255 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2257 (util.bytecount(total_bytes), elapsed,
2256 (util.bytecount(total_bytes), elapsed,
2258 util.bytecount(total_bytes / elapsed)))
2257 util.bytecount(total_bytes / elapsed)))
2259
2258
2260 # new requirements = old non-format requirements + new format-related
2259 # new requirements = old non-format requirements + new format-related
2261 # requirements from the streamed-in repository
2260 # requirements from the streamed-in repository
2262 requirements.update(set(self.requirements) - self.supportedformats)
2261 requirements.update(set(self.requirements) - self.supportedformats)
2263 self._applyrequirements(requirements)
2262 self._applyrequirements(requirements)
2264 self._writerequirements()
2263 self._writerequirements()
2265
2264
2266 self.invalidate()
2265 self.invalidate()
2267 return len(self.heads()) + 1
2266 return len(self.heads()) + 1
2268 finally:
2267 finally:
2269 lock.release()
2268 lock.release()
2270
2269
2271 def clone(self, remote, heads=[], stream=False):
2270 def clone(self, remote, heads=[], stream=False):
2272 '''clone remote repository.
2271 '''clone remote repository.
2273
2272
2274 keyword arguments:
2273 keyword arguments:
2275 heads: list of revs to clone (forces use of pull)
2274 heads: list of revs to clone (forces use of pull)
2276 stream: use streaming clone if possible'''
2275 stream: use streaming clone if possible'''
2277
2276
2278 # now, all clients that can request uncompressed clones can
2277 # now, all clients that can request uncompressed clones can
2279 # read repo formats supported by all servers that can serve
2278 # read repo formats supported by all servers that can serve
2280 # them.
2279 # them.
2281
2280
2282 # if revlog format changes, client will have to check version
2281 # if revlog format changes, client will have to check version
2283 # and format flags on "stream" capability, and use
2282 # and format flags on "stream" capability, and use
2284 # uncompressed only if compatible.
2283 # uncompressed only if compatible.
2285
2284
2286 if not stream:
2285 if not stream:
2287 # if the server explicitely prefer to stream (for fast LANs)
2286 # if the server explicitely prefer to stream (for fast LANs)
2288 stream = remote.capable('stream-preferred')
2287 stream = remote.capable('stream-preferred')
2289
2288
2290 if stream and not heads:
2289 if stream and not heads:
2291 # 'stream' means remote revlog format is revlogv1 only
2290 # 'stream' means remote revlog format is revlogv1 only
2292 if remote.capable('stream'):
2291 if remote.capable('stream'):
2293 return self.stream_in(remote, set(('revlogv1',)))
2292 return self.stream_in(remote, set(('revlogv1',)))
2294 # otherwise, 'streamreqs' contains the remote revlog format
2293 # otherwise, 'streamreqs' contains the remote revlog format
2295 streamreqs = remote.capable('streamreqs')
2294 streamreqs = remote.capable('streamreqs')
2296 if streamreqs:
2295 if streamreqs:
2297 streamreqs = set(streamreqs.split(','))
2296 streamreqs = set(streamreqs.split(','))
2298 # if we support it, stream in and adjust our requirements
2297 # if we support it, stream in and adjust our requirements
2299 if not streamreqs - self.supportedformats:
2298 if not streamreqs - self.supportedformats:
2300 return self.stream_in(remote, streamreqs)
2299 return self.stream_in(remote, streamreqs)
2301 return self.pull(remote, heads)
2300 return self.pull(remote, heads)
2302
2301
2303 def pushkey(self, namespace, key, old, new):
2302 def pushkey(self, namespace, key, old, new):
2304 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2303 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2305 old=old, new=new)
2304 old=old, new=new)
2306 ret = pushkey.push(self, namespace, key, old, new)
2305 ret = pushkey.push(self, namespace, key, old, new)
2307 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2306 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2308 ret=ret)
2307 ret=ret)
2309 return ret
2308 return ret
2310
2309
2311 def listkeys(self, namespace):
2310 def listkeys(self, namespace):
2312 self.hook('prelistkeys', throw=True, namespace=namespace)
2311 self.hook('prelistkeys', throw=True, namespace=namespace)
2313 values = pushkey.list(self, namespace)
2312 values = pushkey.list(self, namespace)
2314 self.hook('listkeys', namespace=namespace, values=values)
2313 self.hook('listkeys', namespace=namespace, values=values)
2315 return values
2314 return values
2316
2315
2317 def debugwireargs(self, one, two, three=None, four=None, five=None):
2316 def debugwireargs(self, one, two, three=None, four=None, five=None):
2318 '''used to test argument passing over the wire'''
2317 '''used to test argument passing over the wire'''
2319 return "%s %s %s %s %s" % (one, two, three, four, five)
2318 return "%s %s %s %s %s" % (one, two, three, four, five)
2320
2319
2321 def savecommitmessage(self, text):
2320 def savecommitmessage(self, text):
2322 fp = self.opener('last-message.txt', 'wb')
2321 fp = self.opener('last-message.txt', 'wb')
2323 try:
2322 try:
2324 fp.write(text)
2323 fp.write(text)
2325 finally:
2324 finally:
2326 fp.close()
2325 fp.close()
2327 return self.pathto(fp.name[len(self.root)+1:])
2326 return self.pathto(fp.name[len(self.root)+1:])
2328
2327
2329 # used to avoid circular references so destructors work
2328 # used to avoid circular references so destructors work
2330 def aftertrans(files):
2329 def aftertrans(files):
2331 renamefiles = [tuple(t) for t in files]
2330 renamefiles = [tuple(t) for t in files]
2332 def a():
2331 def a():
2333 for src, dest in renamefiles:
2332 for src, dest in renamefiles:
2334 try:
2333 try:
2335 util.rename(src, dest)
2334 util.rename(src, dest)
2336 except OSError: # journal file does not yet exist
2335 except OSError: # journal file does not yet exist
2337 pass
2336 pass
2338 return a
2337 return a
2339
2338
2340 def undoname(fn):
2339 def undoname(fn):
2341 base, name = os.path.split(fn)
2340 base, name = os.path.split(fn)
2342 assert name.startswith('journal')
2341 assert name.startswith('journal')
2343 return os.path.join(base, name.replace('journal', 'undo', 1))
2342 return os.path.join(base, name.replace('journal', 'undo', 1))
2344
2343
2345 def instance(ui, path, create):
2344 def instance(ui, path, create):
2346 return localrepository(ui, util.urllocalpath(path), create)
2345 return localrepository(ui, util.urllocalpath(path), create)
2347
2346
2348 def islocal(path):
2347 def islocal(path):
2349 return True
2348 return True
@@ -1,343 +1,345 b''
1 """ Mercurial phases support code
1 """ Mercurial phases support code
2
2
3 ---
3 ---
4
4
5 Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
5 Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
6 Logilab SA <contact@logilab.fr>
6 Logilab SA <contact@logilab.fr>
7 Augie Fackler <durin42@gmail.com>
7 Augie Fackler <durin42@gmail.com>
8
8
9 This software may be used and distributed according to the terms of the
9 This software may be used and distributed according to the terms of the
10 GNU General Public License version 2 or any later version.
10 GNU General Public License version 2 or any later version.
11
11
12 ---
12 ---
13
13
14 This module implements most phase logic in mercurial.
14 This module implements most phase logic in mercurial.
15
15
16
16
17 Basic Concept
17 Basic Concept
18 =============
18 =============
19
19
20 A 'changeset phases' is an indicator that tells us how a changeset is
20 A 'changeset phases' is an indicator that tells us how a changeset is
21 manipulated and communicated. The details of each phase is described below,
21 manipulated and communicated. The details of each phase is described below,
22 here we describe the properties they have in common.
22 here we describe the properties they have in common.
23
23
24 Like bookmarks, phases are not stored in history and thus are not permanent and
24 Like bookmarks, phases are not stored in history and thus are not permanent and
25 leave no audit trail.
25 leave no audit trail.
26
26
27 First, no changeset can be in two phases at once. Phases are ordered, so they
27 First, no changeset can be in two phases at once. Phases are ordered, so they
28 can be considered from lowest to highest. The default, lowest phase is 'public'
28 can be considered from lowest to highest. The default, lowest phase is 'public'
29 - this is the normal phase of existing changesets. A child changeset can not be
29 - this is the normal phase of existing changesets. A child changeset can not be
30 in a lower phase than its parents.
30 in a lower phase than its parents.
31
31
32 These phases share a hierarchy of traits:
32 These phases share a hierarchy of traits:
33
33
34 immutable shared
34 immutable shared
35 public: X X
35 public: X X
36 draft: X
36 draft: X
37 secret:
37 secret:
38
38
39 local commits are draft by default
39 local commits are draft by default
40
40
41 Phase movement and exchange
41 Phase movement and exchange
42 ============================
42 ============================
43
43
44 Phase data are exchanged by pushkey on pull and push. Some server have a
44 Phase data are exchanged by pushkey on pull and push. Some server have a
45 publish option set, we call them publishing server. Pushing to such server make
45 publish option set, we call them publishing server. Pushing to such server make
46 draft changeset publish.
46 draft changeset publish.
47
47
48 A small list of fact/rules define the exchange of phase:
48 A small list of fact/rules define the exchange of phase:
49
49
50 * old client never changes server states
50 * old client never changes server states
51 * pull never changes server states
51 * pull never changes server states
52 * publish and old server csets are seen as public by client
52 * publish and old server csets are seen as public by client
53
53
54 * Any secret changeset seens in another repository is lowered to at least draft
54 * Any secret changeset seens in another repository is lowered to at least draft
55
55
56
56
57 Here is the final table summing up the 49 possible usecase of phase exchange:
57 Here is the final table summing up the 49 possible usecase of phase exchange:
58
58
59 server
59 server
60 old publish non-publish
60 old publish non-publish
61 N X N D P N D P
61 N X N D P N D P
62 old client
62 old client
63 pull
63 pull
64 N - X/X - X/D X/P - X/D X/P
64 N - X/X - X/D X/P - X/D X/P
65 X - X/X - X/D X/P - X/D X/P
65 X - X/X - X/D X/P - X/D X/P
66 push
66 push
67 X X/X X/X X/P X/P X/P X/D X/D X/P
67 X X/X X/X X/P X/P X/P X/D X/D X/P
68 new client
68 new client
69 pull
69 pull
70 N - P/X - P/D P/P - D/D P/P
70 N - P/X - P/D P/P - D/D P/P
71 D - P/X - P/D P/P - D/D P/P
71 D - P/X - P/D P/P - D/D P/P
72 P - P/X - P/D P/P - P/D P/P
72 P - P/X - P/D P/P - P/D P/P
73 push
73 push
74 D P/X P/X P/P P/P P/P D/D D/D P/P
74 D P/X P/X P/P P/P P/P D/D D/D P/P
75 P P/X P/X P/P P/P P/P P/P P/P P/P
75 P P/X P/X P/P P/P P/P P/P P/P P/P
76
76
77 Legend:
77 Legend:
78
78
79 A/B = final state on client / state on server
79 A/B = final state on client / state on server
80
80
81 * N = new/not present,
81 * N = new/not present,
82 * P = public,
82 * P = public,
83 * D = draft,
83 * D = draft,
84 * X = not tracked (ie: the old client or server has no internal way of
84 * X = not tracked (ie: the old client or server has no internal way of
85 recording the phase.)
85 recording the phase.)
86
86
87 passive = only pushes
87 passive = only pushes
88
88
89
89
90 A cell here can be read like this:
90 A cell here can be read like this:
91
91
92 "When a new client pushes a draft changeset (D) to a publishing server
92 "When a new client pushes a draft changeset (D) to a publishing server
93 where it's not present (N), it's marked public on both sides (P/P)."
93 where it's not present (N), it's marked public on both sides (P/P)."
94
94
95 Note: old client behave as publish server with Draft only content
95 Note: old client behave as publish server with Draft only content
96 - other people see it as public
96 - other people see it as public
97 - content is pushed as draft
97 - content is pushed as draft
98
98
99 """
99 """
100
100
101 import errno
101 import errno
102 from node import nullid, bin, hex, short
102 from node import nullid, bin, hex, short
103 from i18n import _
103 from i18n import _
104
104
105 allphases = public, draft, secret = range(3)
105 allphases = public, draft, secret = range(3)
106 trackedphases = allphases[1:]
106 trackedphases = allphases[1:]
107 phasenames = ['public', 'draft', 'secret']
107 phasenames = ['public', 'draft', 'secret']
108
108
109 def _filterunknown(ui, changelog, phaseroots):
110 """remove unknown nodes from the phase boundary
111
112 Nothing is lost as unknown nodes only hold data for their descendants
113 """
114 updated = False
115 nodemap = changelog.nodemap # to filter unknown nodes
116 for phase, nodes in enumerate(phaseroots):
117 missing = [node for node in nodes if node not in nodemap]
118 if missing:
119 for mnode in missing:
120 ui.debug(
121 'removing unknown node %s from %i-phase boundary\n'
122 % (short(mnode), phase))
123 nodes.symmetric_difference_update(missing)
124 updated = True
125 return updated
126
109 def readroots(repo):
127 def readroots(repo):
110 """Read phase roots from disk"""
128 """Read phase roots from disk"""
111 roots = [set() for i in allphases]
129 roots = [set() for i in allphases]
112 try:
130 try:
113 f = repo.sopener('phaseroots')
131 f = repo.sopener('phaseroots')
114 try:
132 try:
115 for line in f:
133 for line in f:
116 phase, nh = line.split()
134 phase, nh = line.split()
117 roots[int(phase)].add(bin(nh))
135 roots[int(phase)].add(bin(nh))
118 finally:
136 finally:
119 f.close()
137 f.close()
120 except IOError, inst:
138 except IOError, inst:
121 if inst.errno != errno.ENOENT:
139 if inst.errno != errno.ENOENT:
122 raise
140 raise
123 for f in repo._phasedefaults:
141 for f in repo._phasedefaults:
124 roots = f(repo, roots)
142 roots = f(repo, roots)
125 repo._dirtyphases = True
143 repo._dirtyphases = True
144 if _filterunknown(repo.ui, repo.changelog, roots):
145 repo._dirtyphases = True
126 return roots
146 return roots
127
147
128 def writeroots(repo):
148 def writeroots(repo):
129 """Write phase roots from disk"""
149 """Write phase roots from disk"""
130 f = repo.sopener('phaseroots', 'w', atomictemp=True)
150 f = repo.sopener('phaseroots', 'w', atomictemp=True)
131 try:
151 try:
132 for phase, roots in enumerate(repo._phaseroots):
152 for phase, roots in enumerate(repo._phaseroots):
133 for h in roots:
153 for h in roots:
134 f.write('%i %s\n' % (phase, hex(h)))
154 f.write('%i %s\n' % (phase, hex(h)))
135 repo._dirtyphases = False
155 repo._dirtyphases = False
136 finally:
156 finally:
137 f.close()
157 f.close()
138
158
139 def filterunknown(repo, phaseroots=None):
140 """remove unknown nodes from the phase boundary
141
142 no data is lost as unknown node only old data for their descentants
143 """
144 if phaseroots is None:
145 phaseroots = repo._phaseroots
146 nodemap = repo.changelog.nodemap # to filter unknown nodes
147 for phase, nodes in enumerate(phaseroots):
148 missing = [node for node in nodes if node not in nodemap]
149 if missing:
150 for mnode in missing:
151 repo.ui.debug(
152 'removing unknown node %s from %i-phase boundary\n'
153 % (short(mnode), phase))
154 nodes.symmetric_difference_update(missing)
155 repo._dirtyphases = True
156
157 def advanceboundary(repo, targetphase, nodes):
159 def advanceboundary(repo, targetphase, nodes):
158 """Add nodes to a phase changing other nodes phases if necessary.
160 """Add nodes to a phase changing other nodes phases if necessary.
159
161
160 This function move boundary *forward* this means that all nodes are set
162 This function move boundary *forward* this means that all nodes are set
161 in the target phase or kept in a *lower* phase.
163 in the target phase or kept in a *lower* phase.
162
164
163 Simplify boundary to contains phase roots only."""
165 Simplify boundary to contains phase roots only."""
164 delroots = [] # set of root deleted by this path
166 delroots = [] # set of root deleted by this path
165 for phase in xrange(targetphase + 1, len(allphases)):
167 for phase in xrange(targetphase + 1, len(allphases)):
166 # filter nodes that are not in a compatible phase already
168 # filter nodes that are not in a compatible phase already
167 # XXX rev phase cache might have been invalidated by a previous loop
169 # XXX rev phase cache might have been invalidated by a previous loop
168 # XXX we need to be smarter here
170 # XXX we need to be smarter here
169 nodes = [n for n in nodes if repo[n].phase() >= phase]
171 nodes = [n for n in nodes if repo[n].phase() >= phase]
170 if not nodes:
172 if not nodes:
171 break # no roots to move anymore
173 break # no roots to move anymore
172 roots = repo._phaseroots[phase]
174 roots = repo._phaseroots[phase]
173 olds = roots.copy()
175 olds = roots.copy()
174 ctxs = list(repo.set('roots((%ln::) - (%ln::%ln))', olds, olds, nodes))
176 ctxs = list(repo.set('roots((%ln::) - (%ln::%ln))', olds, olds, nodes))
175 roots.clear()
177 roots.clear()
176 roots.update(ctx.node() for ctx in ctxs)
178 roots.update(ctx.node() for ctx in ctxs)
177 if olds != roots:
179 if olds != roots:
178 # invalidate cache (we probably could be smarter here
180 # invalidate cache (we probably could be smarter here
179 if '_phaserev' in vars(repo):
181 if '_phaserev' in vars(repo):
180 del repo._phaserev
182 del repo._phaserev
181 repo._dirtyphases = True
183 repo._dirtyphases = True
182 # some roots may need to be declared for lower phases
184 # some roots may need to be declared for lower phases
183 delroots.extend(olds - roots)
185 delroots.extend(olds - roots)
184 # declare deleted root in the target phase
186 # declare deleted root in the target phase
185 if targetphase != 0:
187 if targetphase != 0:
186 retractboundary(repo, targetphase, delroots)
188 retractboundary(repo, targetphase, delroots)
187
189
188
190
189 def retractboundary(repo, targetphase, nodes):
191 def retractboundary(repo, targetphase, nodes):
190 """Set nodes back to a phase changing other nodes phases if necessary.
192 """Set nodes back to a phase changing other nodes phases if necessary.
191
193
192 This function move boundary *backward* this means that all nodes are set
194 This function move boundary *backward* this means that all nodes are set
193 in the target phase or kept in a *higher* phase.
195 in the target phase or kept in a *higher* phase.
194
196
195 Simplify boundary to contains phase roots only."""
197 Simplify boundary to contains phase roots only."""
196 currentroots = repo._phaseroots[targetphase]
198 currentroots = repo._phaseroots[targetphase]
197 newroots = [n for n in nodes if repo[n].phase() < targetphase]
199 newroots = [n for n in nodes if repo[n].phase() < targetphase]
198 if newroots:
200 if newroots:
199 currentroots.update(newroots)
201 currentroots.update(newroots)
200 ctxs = repo.set('roots(%ln::)', currentroots)
202 ctxs = repo.set('roots(%ln::)', currentroots)
201 currentroots.intersection_update(ctx.node() for ctx in ctxs)
203 currentroots.intersection_update(ctx.node() for ctx in ctxs)
202 if '_phaserev' in vars(repo):
204 if '_phaserev' in vars(repo):
203 del repo._phaserev
205 del repo._phaserev
204 repo._dirtyphases = True
206 repo._dirtyphases = True
205
207
206
208
207 def listphases(repo):
209 def listphases(repo):
208 """List phases root for serialisation over pushkey"""
210 """List phases root for serialisation over pushkey"""
209 keys = {}
211 keys = {}
210 value = '%i' % draft
212 value = '%i' % draft
211 for root in repo._phaseroots[draft]:
213 for root in repo._phaseroots[draft]:
212 keys[hex(root)] = value
214 keys[hex(root)] = value
213
215
214 if repo.ui.configbool('phases', 'publish', True):
216 if repo.ui.configbool('phases', 'publish', True):
215 # Add an extra data to let remote know we are a publishing repo.
217 # Add an extra data to let remote know we are a publishing repo.
216 # Publishing repo can't just pretend they are old repo. When pushing to
218 # Publishing repo can't just pretend they are old repo. When pushing to
217 # a publishing repo, the client still need to push phase boundary
219 # a publishing repo, the client still need to push phase boundary
218 #
220 #
219 # Push do not only push changeset. It also push phase data. New
221 # Push do not only push changeset. It also push phase data. New
220 # phase data may apply to common changeset which won't be push (as they
222 # phase data may apply to common changeset which won't be push (as they
221 # are common). Here is a very simple example:
223 # are common). Here is a very simple example:
222 #
224 #
223 # 1) repo A push changeset X as draft to repo B
225 # 1) repo A push changeset X as draft to repo B
224 # 2) repo B make changeset X public
226 # 2) repo B make changeset X public
225 # 3) repo B push to repo A. X is not pushed but the data that X as now
227 # 3) repo B push to repo A. X is not pushed but the data that X as now
226 # public should
228 # public should
227 #
229 #
228 # The server can't handle it on it's own as it has no idea of client
230 # The server can't handle it on it's own as it has no idea of client
229 # phase data.
231 # phase data.
230 keys['publishing'] = 'True'
232 keys['publishing'] = 'True'
231 return keys
233 return keys
232
234
233 def pushphase(repo, nhex, oldphasestr, newphasestr):
235 def pushphase(repo, nhex, oldphasestr, newphasestr):
234 """List phases root for serialisation over pushkey"""
236 """List phases root for serialisation over pushkey"""
235 lock = repo.lock()
237 lock = repo.lock()
236 try:
238 try:
237 currentphase = repo[nhex].phase()
239 currentphase = repo[nhex].phase()
238 newphase = abs(int(newphasestr)) # let's avoid negative index surprise
240 newphase = abs(int(newphasestr)) # let's avoid negative index surprise
239 oldphase = abs(int(oldphasestr)) # let's avoid negative index surprise
241 oldphase = abs(int(oldphasestr)) # let's avoid negative index surprise
240 if currentphase == oldphase and newphase < oldphase:
242 if currentphase == oldphase and newphase < oldphase:
241 advanceboundary(repo, newphase, [bin(nhex)])
243 advanceboundary(repo, newphase, [bin(nhex)])
242 return 1
244 return 1
243 elif currentphase == newphase:
245 elif currentphase == newphase:
244 # raced, but got correct result
246 # raced, but got correct result
245 return 1
247 return 1
246 else:
248 else:
247 return 0
249 return 0
248 finally:
250 finally:
249 lock.release()
251 lock.release()
250
252
251 def visibleheads(repo):
253 def visibleheads(repo):
252 """return the set of visible head of this repo"""
254 """return the set of visible head of this repo"""
253 # XXX we want a cache on this
255 # XXX we want a cache on this
254 sroots = repo._phaseroots[secret]
256 sroots = repo._phaseroots[secret]
255 if sroots:
257 if sroots:
256 # XXX very slow revset. storing heads or secret "boundary" would help.
258 # XXX very slow revset. storing heads or secret "boundary" would help.
257 revset = repo.set('heads(not (%ln::))', sroots)
259 revset = repo.set('heads(not (%ln::))', sroots)
258
260
259 vheads = [ctx.node() for ctx in revset]
261 vheads = [ctx.node() for ctx in revset]
260 if not vheads:
262 if not vheads:
261 vheads.append(nullid)
263 vheads.append(nullid)
262 else:
264 else:
263 vheads = repo.heads()
265 vheads = repo.heads()
264 return vheads
266 return vheads
265
267
266 def visiblebranchmap(repo):
268 def visiblebranchmap(repo):
267 """return a branchmap for the visible set"""
269 """return a branchmap for the visible set"""
268 # XXX Recomputing this data on the fly is very slow. We should build a
270 # XXX Recomputing this data on the fly is very slow. We should build a
269 # XXX cached version while computin the standard branchmap version.
271 # XXX cached version while computin the standard branchmap version.
270 sroots = repo._phaseroots[secret]
272 sroots = repo._phaseroots[secret]
271 if sroots:
273 if sroots:
272 vbranchmap = {}
274 vbranchmap = {}
273 for branch, nodes in repo.branchmap().iteritems():
275 for branch, nodes in repo.branchmap().iteritems():
274 # search for secret heads.
276 # search for secret heads.
275 for n in nodes:
277 for n in nodes:
276 if repo[n].phase() >= secret:
278 if repo[n].phase() >= secret:
277 nodes = None
279 nodes = None
278 break
280 break
279 # if secreat heads where found we must compute them again
281 # if secreat heads where found we must compute them again
280 if nodes is None:
282 if nodes is None:
281 s = repo.set('heads(branch(%s) - secret())', branch)
283 s = repo.set('heads(branch(%s) - secret())', branch)
282 nodes = [c.node() for c in s]
284 nodes = [c.node() for c in s]
283 vbranchmap[branch] = nodes
285 vbranchmap[branch] = nodes
284 else:
286 else:
285 vbranchmap = repo.branchmap()
287 vbranchmap = repo.branchmap()
286 return vbranchmap
288 return vbranchmap
287
289
288 def analyzeremotephases(repo, subset, roots):
290 def analyzeremotephases(repo, subset, roots):
289 """Compute phases heads and root in a subset of node from root dict
291 """Compute phases heads and root in a subset of node from root dict
290
292
291 * subset is heads of the subset
293 * subset is heads of the subset
292 * roots is {<nodeid> => phase} mapping. key and value are string.
294 * roots is {<nodeid> => phase} mapping. key and value are string.
293
295
294 Accept unknown element input
296 Accept unknown element input
295 """
297 """
296 # build list from dictionary
298 # build list from dictionary
297 draftroots = []
299 draftroots = []
298 nodemap = repo.changelog.nodemap # to filter unknown nodes
300 nodemap = repo.changelog.nodemap # to filter unknown nodes
299 for nhex, phase in roots.iteritems():
301 for nhex, phase in roots.iteritems():
300 if nhex == 'publishing': # ignore data related to publish option
302 if nhex == 'publishing': # ignore data related to publish option
301 continue
303 continue
302 node = bin(nhex)
304 node = bin(nhex)
303 phase = int(phase)
305 phase = int(phase)
304 if phase == 0:
306 if phase == 0:
305 if node != nullid:
307 if node != nullid:
306 repo.ui.warn(_('ignoring inconsistent public root'
308 repo.ui.warn(_('ignoring inconsistent public root'
307 ' from remote: %s\n') % nhex)
309 ' from remote: %s\n') % nhex)
308 elif phase == 1:
310 elif phase == 1:
309 if node in nodemap:
311 if node in nodemap:
310 draftroots.append(node)
312 draftroots.append(node)
311 else:
313 else:
312 repo.ui.warn(_('ignoring unexpected root from remote: %i %s\n')
314 repo.ui.warn(_('ignoring unexpected root from remote: %i %s\n')
313 % (phase, nhex))
315 % (phase, nhex))
314 # compute heads
316 # compute heads
315 publicheads = newheads(repo, subset, draftroots)
317 publicheads = newheads(repo, subset, draftroots)
316 return publicheads, draftroots
318 return publicheads, draftroots
317
319
318 def newheads(repo, heads, roots):
320 def newheads(repo, heads, roots):
319 """compute new head of a subset minus another
321 """compute new head of a subset minus another
320
322
321 * `heads`: define the first subset
323 * `heads`: define the first subset
322 * `rroots`: define the second we substract to the first"""
324 * `rroots`: define the second we substract to the first"""
323 revset = repo.set('heads((%ln + parents(%ln)) - (%ln::%ln))',
325 revset = repo.set('heads((%ln + parents(%ln)) - (%ln::%ln))',
324 heads, roots, roots, heads)
326 heads, roots, roots, heads)
325 return [c.node() for c in revset]
327 return [c.node() for c in revset]
326
328
327
329
328 def newcommitphase(ui):
330 def newcommitphase(ui):
329 """helper to get the target phase of new commit
331 """helper to get the target phase of new commit
330
332
331 Handle all possible values for the phases.new-commit options.
333 Handle all possible values for the phases.new-commit options.
332
334
333 """
335 """
334 v = ui.config('phases', 'new-commit', draft)
336 v = ui.config('phases', 'new-commit', draft)
335 try:
337 try:
336 return phasenames.index(v)
338 return phasenames.index(v)
337 except ValueError:
339 except ValueError:
338 try:
340 try:
339 return int(v)
341 return int(v)
340 except ValueError:
342 except ValueError:
341 msg = _("phases.new-commit: not a valid phase name ('%s')")
343 msg = _("phases.new-commit: not a valid phase name ('%s')")
342 raise error.ConfigError(msg % v)
344 raise error.ConfigError(msg % v)
343
345
General Comments 0
You need to be logged in to leave comments. Login now