##// END OF EJS Templates
phases: add a moveboundary function to move phases boundaries...
Pierre-Yves David -
r15454:5a7dde5a default
parent child Browse files
Show More
@@ -1,2120 +1,2124
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import scmutil, util, extensions, hook, error, revset
13 import scmutil, util, extensions, hook, error, revset
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20 filecache = scmutil.filecache
20 filecache = scmutil.filecache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 'known', 'getbundle'))
24 'known', 'getbundle'))
25 supportedformats = set(('revlogv1', 'generaldelta'))
25 supportedformats = set(('revlogv1', 'generaldelta'))
26 supported = supportedformats | set(('store', 'fncache', 'shared',
26 supported = supportedformats | set(('store', 'fncache', 'shared',
27 'dotencode'))
27 'dotencode'))
28
28
29 def __init__(self, baseui, path=None, create=False):
29 def __init__(self, baseui, path=None, create=False):
30 repo.repository.__init__(self)
30 repo.repository.__init__(self)
31 self.root = os.path.realpath(util.expandpath(path))
31 self.root = os.path.realpath(util.expandpath(path))
32 self.path = os.path.join(self.root, ".hg")
32 self.path = os.path.join(self.root, ".hg")
33 self.origroot = path
33 self.origroot = path
34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
35 self.opener = scmutil.opener(self.path)
35 self.opener = scmutil.opener(self.path)
36 self.wopener = scmutil.opener(self.root)
36 self.wopener = scmutil.opener(self.root)
37 self.baseui = baseui
37 self.baseui = baseui
38 self.ui = baseui.copy()
38 self.ui = baseui.copy()
39 self._dirtyphases = False
39
40
40 try:
41 try:
41 self.ui.readconfig(self.join("hgrc"), self.root)
42 self.ui.readconfig(self.join("hgrc"), self.root)
42 extensions.loadall(self.ui)
43 extensions.loadall(self.ui)
43 except IOError:
44 except IOError:
44 pass
45 pass
45
46
46 if not os.path.isdir(self.path):
47 if not os.path.isdir(self.path):
47 if create:
48 if create:
48 if not os.path.exists(path):
49 if not os.path.exists(path):
49 util.makedirs(path)
50 util.makedirs(path)
50 util.makedir(self.path, notindexed=True)
51 util.makedir(self.path, notindexed=True)
51 requirements = ["revlogv1"]
52 requirements = ["revlogv1"]
52 if self.ui.configbool('format', 'usestore', True):
53 if self.ui.configbool('format', 'usestore', True):
53 os.mkdir(os.path.join(self.path, "store"))
54 os.mkdir(os.path.join(self.path, "store"))
54 requirements.append("store")
55 requirements.append("store")
55 if self.ui.configbool('format', 'usefncache', True):
56 if self.ui.configbool('format', 'usefncache', True):
56 requirements.append("fncache")
57 requirements.append("fncache")
57 if self.ui.configbool('format', 'dotencode', True):
58 if self.ui.configbool('format', 'dotencode', True):
58 requirements.append('dotencode')
59 requirements.append('dotencode')
59 # create an invalid changelog
60 # create an invalid changelog
60 self.opener.append(
61 self.opener.append(
61 "00changelog.i",
62 "00changelog.i",
62 '\0\0\0\2' # represents revlogv2
63 '\0\0\0\2' # represents revlogv2
63 ' dummy changelog to prevent using the old repo layout'
64 ' dummy changelog to prevent using the old repo layout'
64 )
65 )
65 if self.ui.configbool('format', 'generaldelta', False):
66 if self.ui.configbool('format', 'generaldelta', False):
66 requirements.append("generaldelta")
67 requirements.append("generaldelta")
67 requirements = set(requirements)
68 requirements = set(requirements)
68 else:
69 else:
69 raise error.RepoError(_("repository %s not found") % path)
70 raise error.RepoError(_("repository %s not found") % path)
70 elif create:
71 elif create:
71 raise error.RepoError(_("repository %s already exists") % path)
72 raise error.RepoError(_("repository %s already exists") % path)
72 else:
73 else:
73 try:
74 try:
74 requirements = scmutil.readrequires(self.opener, self.supported)
75 requirements = scmutil.readrequires(self.opener, self.supported)
75 except IOError, inst:
76 except IOError, inst:
76 if inst.errno != errno.ENOENT:
77 if inst.errno != errno.ENOENT:
77 raise
78 raise
78 requirements = set()
79 requirements = set()
79
80
80 self.sharedpath = self.path
81 self.sharedpath = self.path
81 try:
82 try:
82 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
83 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
83 if not os.path.exists(s):
84 if not os.path.exists(s):
84 raise error.RepoError(
85 raise error.RepoError(
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 self.sharedpath = s
87 self.sharedpath = s
87 except IOError, inst:
88 except IOError, inst:
88 if inst.errno != errno.ENOENT:
89 if inst.errno != errno.ENOENT:
89 raise
90 raise
90
91
91 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
92 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
92 self.spath = self.store.path
93 self.spath = self.store.path
93 self.sopener = self.store.opener
94 self.sopener = self.store.opener
94 self.sjoin = self.store.join
95 self.sjoin = self.store.join
95 self.opener.createmode = self.store.createmode
96 self.opener.createmode = self.store.createmode
96 self._applyrequirements(requirements)
97 self._applyrequirements(requirements)
97 if create:
98 if create:
98 self._writerequirements()
99 self._writerequirements()
99
100
100
101
101 self._branchcache = None
102 self._branchcache = None
102 self._branchcachetip = None
103 self._branchcachetip = None
103 self.filterpats = {}
104 self.filterpats = {}
104 self._datafilters = {}
105 self._datafilters = {}
105 self._transref = self._lockref = self._wlockref = None
106 self._transref = self._lockref = self._wlockref = None
106
107
107 # A cache for various files under .hg/ that tracks file changes,
108 # A cache for various files under .hg/ that tracks file changes,
108 # (used by the filecache decorator)
109 # (used by the filecache decorator)
109 #
110 #
110 # Maps a property name to its util.filecacheentry
111 # Maps a property name to its util.filecacheentry
111 self._filecache = {}
112 self._filecache = {}
112
113
113 def _applyrequirements(self, requirements):
114 def _applyrequirements(self, requirements):
114 self.requirements = requirements
115 self.requirements = requirements
115 openerreqs = set(('revlogv1', 'generaldelta'))
116 openerreqs = set(('revlogv1', 'generaldelta'))
116 self.sopener.options = dict((r, 1) for r in requirements
117 self.sopener.options = dict((r, 1) for r in requirements
117 if r in openerreqs)
118 if r in openerreqs)
118
119
119 def _writerequirements(self):
120 def _writerequirements(self):
120 reqfile = self.opener("requires", "w")
121 reqfile = self.opener("requires", "w")
121 for r in self.requirements:
122 for r in self.requirements:
122 reqfile.write("%s\n" % r)
123 reqfile.write("%s\n" % r)
123 reqfile.close()
124 reqfile.close()
124
125
125 def _checknested(self, path):
126 def _checknested(self, path):
126 """Determine if path is a legal nested repository."""
127 """Determine if path is a legal nested repository."""
127 if not path.startswith(self.root):
128 if not path.startswith(self.root):
128 return False
129 return False
129 subpath = path[len(self.root) + 1:]
130 subpath = path[len(self.root) + 1:]
130
131
131 # XXX: Checking against the current working copy is wrong in
132 # XXX: Checking against the current working copy is wrong in
132 # the sense that it can reject things like
133 # the sense that it can reject things like
133 #
134 #
134 # $ hg cat -r 10 sub/x.txt
135 # $ hg cat -r 10 sub/x.txt
135 #
136 #
136 # if sub/ is no longer a subrepository in the working copy
137 # if sub/ is no longer a subrepository in the working copy
137 # parent revision.
138 # parent revision.
138 #
139 #
139 # However, it can of course also allow things that would have
140 # However, it can of course also allow things that would have
140 # been rejected before, such as the above cat command if sub/
141 # been rejected before, such as the above cat command if sub/
141 # is a subrepository now, but was a normal directory before.
142 # is a subrepository now, but was a normal directory before.
142 # The old path auditor would have rejected by mistake since it
143 # The old path auditor would have rejected by mistake since it
143 # panics when it sees sub/.hg/.
144 # panics when it sees sub/.hg/.
144 #
145 #
145 # All in all, checking against the working copy seems sensible
146 # All in all, checking against the working copy seems sensible
146 # since we want to prevent access to nested repositories on
147 # since we want to prevent access to nested repositories on
147 # the filesystem *now*.
148 # the filesystem *now*.
148 ctx = self[None]
149 ctx = self[None]
149 parts = util.splitpath(subpath)
150 parts = util.splitpath(subpath)
150 while parts:
151 while parts:
151 prefix = os.sep.join(parts)
152 prefix = os.sep.join(parts)
152 if prefix in ctx.substate:
153 if prefix in ctx.substate:
153 if prefix == subpath:
154 if prefix == subpath:
154 return True
155 return True
155 else:
156 else:
156 sub = ctx.sub(prefix)
157 sub = ctx.sub(prefix)
157 return sub.checknested(subpath[len(prefix) + 1:])
158 return sub.checknested(subpath[len(prefix) + 1:])
158 else:
159 else:
159 parts.pop()
160 parts.pop()
160 return False
161 return False
161
162
162 @filecache('bookmarks')
163 @filecache('bookmarks')
163 def _bookmarks(self):
164 def _bookmarks(self):
164 return bookmarks.read(self)
165 return bookmarks.read(self)
165
166
166 @filecache('bookmarks.current')
167 @filecache('bookmarks.current')
167 def _bookmarkcurrent(self):
168 def _bookmarkcurrent(self):
168 return bookmarks.readcurrent(self)
169 return bookmarks.readcurrent(self)
169
170
170 def _writebookmarks(self, marks):
171 def _writebookmarks(self, marks):
171 bookmarks.write(self)
172 bookmarks.write(self)
172
173
173 @filecache('phaseroots')
174 @filecache('phaseroots')
174 def _phaseroots(self):
175 def _phaseroots(self):
176 self._dirtyphases = False
175 return phases.readroots(self)
177 return phases.readroots(self)
176
178
177 @propertycache
179 @propertycache
178 def _phaserev(self):
180 def _phaserev(self):
179 cache = [0] * len(self)
181 cache = [0] * len(self)
180 for phase in phases.trackedphases:
182 for phase in phases.trackedphases:
181 roots = map(self.changelog.rev, self._phaseroots[phase])
183 roots = map(self.changelog.rev, self._phaseroots[phase])
182 if roots:
184 if roots:
183 for rev in roots:
185 for rev in roots:
184 cache[rev] = phase
186 cache[rev] = phase
185 for rev in self.changelog.descendants(*roots):
187 for rev in self.changelog.descendants(*roots):
186 cache[rev] = phase
188 cache[rev] = phase
187 return cache
189 return cache
188
190
189 @filecache('00changelog.i', True)
191 @filecache('00changelog.i', True)
190 def changelog(self):
192 def changelog(self):
191 c = changelog.changelog(self.sopener)
193 c = changelog.changelog(self.sopener)
192 if 'HG_PENDING' in os.environ:
194 if 'HG_PENDING' in os.environ:
193 p = os.environ['HG_PENDING']
195 p = os.environ['HG_PENDING']
194 if p.startswith(self.root):
196 if p.startswith(self.root):
195 c.readpending('00changelog.i.a')
197 c.readpending('00changelog.i.a')
196 return c
198 return c
197
199
198 @filecache('00manifest.i', True)
200 @filecache('00manifest.i', True)
199 def manifest(self):
201 def manifest(self):
200 return manifest.manifest(self.sopener)
202 return manifest.manifest(self.sopener)
201
203
202 @filecache('dirstate')
204 @filecache('dirstate')
203 def dirstate(self):
205 def dirstate(self):
204 warned = [0]
206 warned = [0]
205 def validate(node):
207 def validate(node):
206 try:
208 try:
207 self.changelog.rev(node)
209 self.changelog.rev(node)
208 return node
210 return node
209 except error.LookupError:
211 except error.LookupError:
210 if not warned[0]:
212 if not warned[0]:
211 warned[0] = True
213 warned[0] = True
212 self.ui.warn(_("warning: ignoring unknown"
214 self.ui.warn(_("warning: ignoring unknown"
213 " working parent %s!\n") % short(node))
215 " working parent %s!\n") % short(node))
214 return nullid
216 return nullid
215
217
216 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
218 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
217
219
218 def __getitem__(self, changeid):
220 def __getitem__(self, changeid):
219 if changeid is None:
221 if changeid is None:
220 return context.workingctx(self)
222 return context.workingctx(self)
221 return context.changectx(self, changeid)
223 return context.changectx(self, changeid)
222
224
223 def __contains__(self, changeid):
225 def __contains__(self, changeid):
224 try:
226 try:
225 return bool(self.lookup(changeid))
227 return bool(self.lookup(changeid))
226 except error.RepoLookupError:
228 except error.RepoLookupError:
227 return False
229 return False
228
230
229 def __nonzero__(self):
231 def __nonzero__(self):
230 return True
232 return True
231
233
232 def __len__(self):
234 def __len__(self):
233 return len(self.changelog)
235 return len(self.changelog)
234
236
235 def __iter__(self):
237 def __iter__(self):
236 for i in xrange(len(self)):
238 for i in xrange(len(self)):
237 yield i
239 yield i
238
240
239 def revs(self, expr, *args):
241 def revs(self, expr, *args):
240 '''Return a list of revisions matching the given revset'''
242 '''Return a list of revisions matching the given revset'''
241 expr = revset.formatspec(expr, *args)
243 expr = revset.formatspec(expr, *args)
242 m = revset.match(None, expr)
244 m = revset.match(None, expr)
243 return [r for r in m(self, range(len(self)))]
245 return [r for r in m(self, range(len(self)))]
244
246
245 def set(self, expr, *args):
247 def set(self, expr, *args):
246 '''
248 '''
247 Yield a context for each matching revision, after doing arg
249 Yield a context for each matching revision, after doing arg
248 replacement via revset.formatspec
250 replacement via revset.formatspec
249 '''
251 '''
250 for r in self.revs(expr, *args):
252 for r in self.revs(expr, *args):
251 yield self[r]
253 yield self[r]
252
254
253 def url(self):
255 def url(self):
254 return 'file:' + self.root
256 return 'file:' + self.root
255
257
256 def hook(self, name, throw=False, **args):
258 def hook(self, name, throw=False, **args):
257 return hook.hook(self.ui, self, name, throw, **args)
259 return hook.hook(self.ui, self, name, throw, **args)
258
260
259 tag_disallowed = ':\r\n'
261 tag_disallowed = ':\r\n'
260
262
261 def _tag(self, names, node, message, local, user, date, extra={}):
263 def _tag(self, names, node, message, local, user, date, extra={}):
262 if isinstance(names, str):
264 if isinstance(names, str):
263 allchars = names
265 allchars = names
264 names = (names,)
266 names = (names,)
265 else:
267 else:
266 allchars = ''.join(names)
268 allchars = ''.join(names)
267 for c in self.tag_disallowed:
269 for c in self.tag_disallowed:
268 if c in allchars:
270 if c in allchars:
269 raise util.Abort(_('%r cannot be used in a tag name') % c)
271 raise util.Abort(_('%r cannot be used in a tag name') % c)
270
272
271 branches = self.branchmap()
273 branches = self.branchmap()
272 for name in names:
274 for name in names:
273 self.hook('pretag', throw=True, node=hex(node), tag=name,
275 self.hook('pretag', throw=True, node=hex(node), tag=name,
274 local=local)
276 local=local)
275 if name in branches:
277 if name in branches:
276 self.ui.warn(_("warning: tag %s conflicts with existing"
278 self.ui.warn(_("warning: tag %s conflicts with existing"
277 " branch name\n") % name)
279 " branch name\n") % name)
278
280
279 def writetags(fp, names, munge, prevtags):
281 def writetags(fp, names, munge, prevtags):
280 fp.seek(0, 2)
282 fp.seek(0, 2)
281 if prevtags and prevtags[-1] != '\n':
283 if prevtags and prevtags[-1] != '\n':
282 fp.write('\n')
284 fp.write('\n')
283 for name in names:
285 for name in names:
284 m = munge and munge(name) or name
286 m = munge and munge(name) or name
285 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
287 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
286 old = self.tags().get(name, nullid)
288 old = self.tags().get(name, nullid)
287 fp.write('%s %s\n' % (hex(old), m))
289 fp.write('%s %s\n' % (hex(old), m))
288 fp.write('%s %s\n' % (hex(node), m))
290 fp.write('%s %s\n' % (hex(node), m))
289 fp.close()
291 fp.close()
290
292
291 prevtags = ''
293 prevtags = ''
292 if local:
294 if local:
293 try:
295 try:
294 fp = self.opener('localtags', 'r+')
296 fp = self.opener('localtags', 'r+')
295 except IOError:
297 except IOError:
296 fp = self.opener('localtags', 'a')
298 fp = self.opener('localtags', 'a')
297 else:
299 else:
298 prevtags = fp.read()
300 prevtags = fp.read()
299
301
300 # local tags are stored in the current charset
302 # local tags are stored in the current charset
301 writetags(fp, names, None, prevtags)
303 writetags(fp, names, None, prevtags)
302 for name in names:
304 for name in names:
303 self.hook('tag', node=hex(node), tag=name, local=local)
305 self.hook('tag', node=hex(node), tag=name, local=local)
304 return
306 return
305
307
306 try:
308 try:
307 fp = self.wfile('.hgtags', 'rb+')
309 fp = self.wfile('.hgtags', 'rb+')
308 except IOError, e:
310 except IOError, e:
309 if e.errno != errno.ENOENT:
311 if e.errno != errno.ENOENT:
310 raise
312 raise
311 fp = self.wfile('.hgtags', 'ab')
313 fp = self.wfile('.hgtags', 'ab')
312 else:
314 else:
313 prevtags = fp.read()
315 prevtags = fp.read()
314
316
315 # committed tags are stored in UTF-8
317 # committed tags are stored in UTF-8
316 writetags(fp, names, encoding.fromlocal, prevtags)
318 writetags(fp, names, encoding.fromlocal, prevtags)
317
319
318 fp.close()
320 fp.close()
319
321
320 if '.hgtags' not in self.dirstate:
322 if '.hgtags' not in self.dirstate:
321 self[None].add(['.hgtags'])
323 self[None].add(['.hgtags'])
322
324
323 m = matchmod.exact(self.root, '', ['.hgtags'])
325 m = matchmod.exact(self.root, '', ['.hgtags'])
324 tagnode = self.commit(message, user, date, extra=extra, match=m)
326 tagnode = self.commit(message, user, date, extra=extra, match=m)
325
327
326 for name in names:
328 for name in names:
327 self.hook('tag', node=hex(node), tag=name, local=local)
329 self.hook('tag', node=hex(node), tag=name, local=local)
328
330
329 return tagnode
331 return tagnode
330
332
331 def tag(self, names, node, message, local, user, date):
333 def tag(self, names, node, message, local, user, date):
332 '''tag a revision with one or more symbolic names.
334 '''tag a revision with one or more symbolic names.
333
335
334 names is a list of strings or, when adding a single tag, names may be a
336 names is a list of strings or, when adding a single tag, names may be a
335 string.
337 string.
336
338
337 if local is True, the tags are stored in a per-repository file.
339 if local is True, the tags are stored in a per-repository file.
338 otherwise, they are stored in the .hgtags file, and a new
340 otherwise, they are stored in the .hgtags file, and a new
339 changeset is committed with the change.
341 changeset is committed with the change.
340
342
341 keyword arguments:
343 keyword arguments:
342
344
343 local: whether to store tags in non-version-controlled file
345 local: whether to store tags in non-version-controlled file
344 (default False)
346 (default False)
345
347
346 message: commit message to use if committing
348 message: commit message to use if committing
347
349
348 user: name of user to use if committing
350 user: name of user to use if committing
349
351
350 date: date tuple to use if committing'''
352 date: date tuple to use if committing'''
351
353
352 if not local:
354 if not local:
353 for x in self.status()[:5]:
355 for x in self.status()[:5]:
354 if '.hgtags' in x:
356 if '.hgtags' in x:
355 raise util.Abort(_('working copy of .hgtags is changed '
357 raise util.Abort(_('working copy of .hgtags is changed '
356 '(please commit .hgtags manually)'))
358 '(please commit .hgtags manually)'))
357
359
358 self.tags() # instantiate the cache
360 self.tags() # instantiate the cache
359 self._tag(names, node, message, local, user, date)
361 self._tag(names, node, message, local, user, date)
360
362
361 @propertycache
363 @propertycache
362 def _tagscache(self):
364 def _tagscache(self):
363 '''Returns a tagscache object that contains various tags related caches.'''
365 '''Returns a tagscache object that contains various tags related caches.'''
364
366
365 # This simplifies its cache management by having one decorated
367 # This simplifies its cache management by having one decorated
366 # function (this one) and the rest simply fetch things from it.
368 # function (this one) and the rest simply fetch things from it.
367 class tagscache(object):
369 class tagscache(object):
368 def __init__(self):
370 def __init__(self):
369 # These two define the set of tags for this repository. tags
371 # These two define the set of tags for this repository. tags
370 # maps tag name to node; tagtypes maps tag name to 'global' or
372 # maps tag name to node; tagtypes maps tag name to 'global' or
371 # 'local'. (Global tags are defined by .hgtags across all
373 # 'local'. (Global tags are defined by .hgtags across all
372 # heads, and local tags are defined in .hg/localtags.)
374 # heads, and local tags are defined in .hg/localtags.)
373 # They constitute the in-memory cache of tags.
375 # They constitute the in-memory cache of tags.
374 self.tags = self.tagtypes = None
376 self.tags = self.tagtypes = None
375
377
376 self.nodetagscache = self.tagslist = None
378 self.nodetagscache = self.tagslist = None
377
379
378 cache = tagscache()
380 cache = tagscache()
379 cache.tags, cache.tagtypes = self._findtags()
381 cache.tags, cache.tagtypes = self._findtags()
380
382
381 return cache
383 return cache
382
384
383 def tags(self):
385 def tags(self):
384 '''return a mapping of tag to node'''
386 '''return a mapping of tag to node'''
385 return self._tagscache.tags
387 return self._tagscache.tags
386
388
387 def _findtags(self):
389 def _findtags(self):
388 '''Do the hard work of finding tags. Return a pair of dicts
390 '''Do the hard work of finding tags. Return a pair of dicts
389 (tags, tagtypes) where tags maps tag name to node, and tagtypes
391 (tags, tagtypes) where tags maps tag name to node, and tagtypes
390 maps tag name to a string like \'global\' or \'local\'.
392 maps tag name to a string like \'global\' or \'local\'.
391 Subclasses or extensions are free to add their own tags, but
393 Subclasses or extensions are free to add their own tags, but
392 should be aware that the returned dicts will be retained for the
394 should be aware that the returned dicts will be retained for the
393 duration of the localrepo object.'''
395 duration of the localrepo object.'''
394
396
395 # XXX what tagtype should subclasses/extensions use? Currently
397 # XXX what tagtype should subclasses/extensions use? Currently
396 # mq and bookmarks add tags, but do not set the tagtype at all.
398 # mq and bookmarks add tags, but do not set the tagtype at all.
397 # Should each extension invent its own tag type? Should there
399 # Should each extension invent its own tag type? Should there
398 # be one tagtype for all such "virtual" tags? Or is the status
400 # be one tagtype for all such "virtual" tags? Or is the status
399 # quo fine?
401 # quo fine?
400
402
401 alltags = {} # map tag name to (node, hist)
403 alltags = {} # map tag name to (node, hist)
402 tagtypes = {}
404 tagtypes = {}
403
405
404 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
406 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
405 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
407 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
406
408
407 # Build the return dicts. Have to re-encode tag names because
409 # Build the return dicts. Have to re-encode tag names because
408 # the tags module always uses UTF-8 (in order not to lose info
410 # the tags module always uses UTF-8 (in order not to lose info
409 # writing to the cache), but the rest of Mercurial wants them in
411 # writing to the cache), but the rest of Mercurial wants them in
410 # local encoding.
412 # local encoding.
411 tags = {}
413 tags = {}
412 for (name, (node, hist)) in alltags.iteritems():
414 for (name, (node, hist)) in alltags.iteritems():
413 if node != nullid:
415 if node != nullid:
414 try:
416 try:
415 # ignore tags to unknown nodes
417 # ignore tags to unknown nodes
416 self.changelog.lookup(node)
418 self.changelog.lookup(node)
417 tags[encoding.tolocal(name)] = node
419 tags[encoding.tolocal(name)] = node
418 except error.LookupError:
420 except error.LookupError:
419 pass
421 pass
420 tags['tip'] = self.changelog.tip()
422 tags['tip'] = self.changelog.tip()
421 tagtypes = dict([(encoding.tolocal(name), value)
423 tagtypes = dict([(encoding.tolocal(name), value)
422 for (name, value) in tagtypes.iteritems()])
424 for (name, value) in tagtypes.iteritems()])
423 return (tags, tagtypes)
425 return (tags, tagtypes)
424
426
425 def tagtype(self, tagname):
427 def tagtype(self, tagname):
426 '''
428 '''
427 return the type of the given tag. result can be:
429 return the type of the given tag. result can be:
428
430
429 'local' : a local tag
431 'local' : a local tag
430 'global' : a global tag
432 'global' : a global tag
431 None : tag does not exist
433 None : tag does not exist
432 '''
434 '''
433
435
434 return self._tagscache.tagtypes.get(tagname)
436 return self._tagscache.tagtypes.get(tagname)
435
437
436 def tagslist(self):
438 def tagslist(self):
437 '''return a list of tags ordered by revision'''
439 '''return a list of tags ordered by revision'''
438 if not self._tagscache.tagslist:
440 if not self._tagscache.tagslist:
439 l = []
441 l = []
440 for t, n in self.tags().iteritems():
442 for t, n in self.tags().iteritems():
441 r = self.changelog.rev(n)
443 r = self.changelog.rev(n)
442 l.append((r, t, n))
444 l.append((r, t, n))
443 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
445 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
444
446
445 return self._tagscache.tagslist
447 return self._tagscache.tagslist
446
448
447 def nodetags(self, node):
449 def nodetags(self, node):
448 '''return the tags associated with a node'''
450 '''return the tags associated with a node'''
449 if not self._tagscache.nodetagscache:
451 if not self._tagscache.nodetagscache:
450 nodetagscache = {}
452 nodetagscache = {}
451 for t, n in self.tags().iteritems():
453 for t, n in self.tags().iteritems():
452 nodetagscache.setdefault(n, []).append(t)
454 nodetagscache.setdefault(n, []).append(t)
453 for tags in nodetagscache.itervalues():
455 for tags in nodetagscache.itervalues():
454 tags.sort()
456 tags.sort()
455 self._tagscache.nodetagscache = nodetagscache
457 self._tagscache.nodetagscache = nodetagscache
456 return self._tagscache.nodetagscache.get(node, [])
458 return self._tagscache.nodetagscache.get(node, [])
457
459
458 def nodebookmarks(self, node):
460 def nodebookmarks(self, node):
459 marks = []
461 marks = []
460 for bookmark, n in self._bookmarks.iteritems():
462 for bookmark, n in self._bookmarks.iteritems():
461 if n == node:
463 if n == node:
462 marks.append(bookmark)
464 marks.append(bookmark)
463 return sorted(marks)
465 return sorted(marks)
464
466
465 def _branchtags(self, partial, lrev):
467 def _branchtags(self, partial, lrev):
466 # TODO: rename this function?
468 # TODO: rename this function?
467 tiprev = len(self) - 1
469 tiprev = len(self) - 1
468 if lrev != tiprev:
470 if lrev != tiprev:
469 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
471 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
470 self._updatebranchcache(partial, ctxgen)
472 self._updatebranchcache(partial, ctxgen)
471 self._writebranchcache(partial, self.changelog.tip(), tiprev)
473 self._writebranchcache(partial, self.changelog.tip(), tiprev)
472
474
473 return partial
475 return partial
474
476
475 def updatebranchcache(self):
477 def updatebranchcache(self):
476 tip = self.changelog.tip()
478 tip = self.changelog.tip()
477 if self._branchcache is not None and self._branchcachetip == tip:
479 if self._branchcache is not None and self._branchcachetip == tip:
478 return self._branchcache
480 return self._branchcache
479
481
480 oldtip = self._branchcachetip
482 oldtip = self._branchcachetip
481 self._branchcachetip = tip
483 self._branchcachetip = tip
482 if oldtip is None or oldtip not in self.changelog.nodemap:
484 if oldtip is None or oldtip not in self.changelog.nodemap:
483 partial, last, lrev = self._readbranchcache()
485 partial, last, lrev = self._readbranchcache()
484 else:
486 else:
485 lrev = self.changelog.rev(oldtip)
487 lrev = self.changelog.rev(oldtip)
486 partial = self._branchcache
488 partial = self._branchcache
487
489
488 self._branchtags(partial, lrev)
490 self._branchtags(partial, lrev)
489 # this private cache holds all heads (not just tips)
491 # this private cache holds all heads (not just tips)
490 self._branchcache = partial
492 self._branchcache = partial
491
493
492 def branchmap(self):
494 def branchmap(self):
493 '''returns a dictionary {branch: [branchheads]}'''
495 '''returns a dictionary {branch: [branchheads]}'''
494 self.updatebranchcache()
496 self.updatebranchcache()
495 return self._branchcache
497 return self._branchcache
496
498
497 def branchtags(self):
499 def branchtags(self):
498 '''return a dict where branch names map to the tipmost head of
500 '''return a dict where branch names map to the tipmost head of
499 the branch, open heads come before closed'''
501 the branch, open heads come before closed'''
500 bt = {}
502 bt = {}
501 for bn, heads in self.branchmap().iteritems():
503 for bn, heads in self.branchmap().iteritems():
502 tip = heads[-1]
504 tip = heads[-1]
503 for h in reversed(heads):
505 for h in reversed(heads):
504 if 'close' not in self.changelog.read(h)[5]:
506 if 'close' not in self.changelog.read(h)[5]:
505 tip = h
507 tip = h
506 break
508 break
507 bt[bn] = tip
509 bt[bn] = tip
508 return bt
510 return bt
509
511
510 def _readbranchcache(self):
512 def _readbranchcache(self):
511 partial = {}
513 partial = {}
512 try:
514 try:
513 f = self.opener("cache/branchheads")
515 f = self.opener("cache/branchheads")
514 lines = f.read().split('\n')
516 lines = f.read().split('\n')
515 f.close()
517 f.close()
516 except (IOError, OSError):
518 except (IOError, OSError):
517 return {}, nullid, nullrev
519 return {}, nullid, nullrev
518
520
519 try:
521 try:
520 last, lrev = lines.pop(0).split(" ", 1)
522 last, lrev = lines.pop(0).split(" ", 1)
521 last, lrev = bin(last), int(lrev)
523 last, lrev = bin(last), int(lrev)
522 if lrev >= len(self) or self[lrev].node() != last:
524 if lrev >= len(self) or self[lrev].node() != last:
523 # invalidate the cache
525 # invalidate the cache
524 raise ValueError('invalidating branch cache (tip differs)')
526 raise ValueError('invalidating branch cache (tip differs)')
525 for l in lines:
527 for l in lines:
526 if not l:
528 if not l:
527 continue
529 continue
528 node, label = l.split(" ", 1)
530 node, label = l.split(" ", 1)
529 label = encoding.tolocal(label.strip())
531 label = encoding.tolocal(label.strip())
530 partial.setdefault(label, []).append(bin(node))
532 partial.setdefault(label, []).append(bin(node))
531 except KeyboardInterrupt:
533 except KeyboardInterrupt:
532 raise
534 raise
533 except Exception, inst:
535 except Exception, inst:
534 if self.ui.debugflag:
536 if self.ui.debugflag:
535 self.ui.warn(str(inst), '\n')
537 self.ui.warn(str(inst), '\n')
536 partial, last, lrev = {}, nullid, nullrev
538 partial, last, lrev = {}, nullid, nullrev
537 return partial, last, lrev
539 return partial, last, lrev
538
540
539 def _writebranchcache(self, branches, tip, tiprev):
541 def _writebranchcache(self, branches, tip, tiprev):
540 try:
542 try:
541 f = self.opener("cache/branchheads", "w", atomictemp=True)
543 f = self.opener("cache/branchheads", "w", atomictemp=True)
542 f.write("%s %s\n" % (hex(tip), tiprev))
544 f.write("%s %s\n" % (hex(tip), tiprev))
543 for label, nodes in branches.iteritems():
545 for label, nodes in branches.iteritems():
544 for node in nodes:
546 for node in nodes:
545 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
547 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
546 f.close()
548 f.close()
547 except (IOError, OSError):
549 except (IOError, OSError):
548 pass
550 pass
549
551
550 def _updatebranchcache(self, partial, ctxgen):
552 def _updatebranchcache(self, partial, ctxgen):
551 # collect new branch entries
553 # collect new branch entries
552 newbranches = {}
554 newbranches = {}
553 for c in ctxgen:
555 for c in ctxgen:
554 newbranches.setdefault(c.branch(), []).append(c.node())
556 newbranches.setdefault(c.branch(), []).append(c.node())
555 # if older branchheads are reachable from new ones, they aren't
557 # if older branchheads are reachable from new ones, they aren't
556 # really branchheads. Note checking parents is insufficient:
558 # really branchheads. Note checking parents is insufficient:
557 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
559 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
558 for branch, newnodes in newbranches.iteritems():
560 for branch, newnodes in newbranches.iteritems():
559 bheads = partial.setdefault(branch, [])
561 bheads = partial.setdefault(branch, [])
560 bheads.extend(newnodes)
562 bheads.extend(newnodes)
561 if len(bheads) <= 1:
563 if len(bheads) <= 1:
562 continue
564 continue
563 bheads = sorted(bheads, key=lambda x: self[x].rev())
565 bheads = sorted(bheads, key=lambda x: self[x].rev())
564 # starting from tip means fewer passes over reachable
566 # starting from tip means fewer passes over reachable
565 while newnodes:
567 while newnodes:
566 latest = newnodes.pop()
568 latest = newnodes.pop()
567 if latest not in bheads:
569 if latest not in bheads:
568 continue
570 continue
569 minbhrev = self[bheads[0]].node()
571 minbhrev = self[bheads[0]].node()
570 reachable = self.changelog.reachable(latest, minbhrev)
572 reachable = self.changelog.reachable(latest, minbhrev)
571 reachable.remove(latest)
573 reachable.remove(latest)
572 if reachable:
574 if reachable:
573 bheads = [b for b in bheads if b not in reachable]
575 bheads = [b for b in bheads if b not in reachable]
574 partial[branch] = bheads
576 partial[branch] = bheads
575
577
576 def lookup(self, key):
578 def lookup(self, key):
577 if isinstance(key, int):
579 if isinstance(key, int):
578 return self.changelog.node(key)
580 return self.changelog.node(key)
579 elif key == '.':
581 elif key == '.':
580 return self.dirstate.p1()
582 return self.dirstate.p1()
581 elif key == 'null':
583 elif key == 'null':
582 return nullid
584 return nullid
583 elif key == 'tip':
585 elif key == 'tip':
584 return self.changelog.tip()
586 return self.changelog.tip()
585 n = self.changelog._match(key)
587 n = self.changelog._match(key)
586 if n:
588 if n:
587 return n
589 return n
588 if key in self._bookmarks:
590 if key in self._bookmarks:
589 return self._bookmarks[key]
591 return self._bookmarks[key]
590 if key in self.tags():
592 if key in self.tags():
591 return self.tags()[key]
593 return self.tags()[key]
592 if key in self.branchtags():
594 if key in self.branchtags():
593 return self.branchtags()[key]
595 return self.branchtags()[key]
594 n = self.changelog._partialmatch(key)
596 n = self.changelog._partialmatch(key)
595 if n:
597 if n:
596 return n
598 return n
597
599
598 # can't find key, check if it might have come from damaged dirstate
600 # can't find key, check if it might have come from damaged dirstate
599 if key in self.dirstate.parents():
601 if key in self.dirstate.parents():
600 raise error.Abort(_("working directory has unknown parent '%s'!")
602 raise error.Abort(_("working directory has unknown parent '%s'!")
601 % short(key))
603 % short(key))
602 try:
604 try:
603 if len(key) == 20:
605 if len(key) == 20:
604 key = hex(key)
606 key = hex(key)
605 except TypeError:
607 except TypeError:
606 pass
608 pass
607 raise error.RepoLookupError(_("unknown revision '%s'") % key)
609 raise error.RepoLookupError(_("unknown revision '%s'") % key)
608
610
609 def lookupbranch(self, key, remote=None):
611 def lookupbranch(self, key, remote=None):
610 repo = remote or self
612 repo = remote or self
611 if key in repo.branchmap():
613 if key in repo.branchmap():
612 return key
614 return key
613
615
614 repo = (remote and remote.local()) and remote or self
616 repo = (remote and remote.local()) and remote or self
615 return repo[key].branch()
617 return repo[key].branch()
616
618
617 def known(self, nodes):
619 def known(self, nodes):
618 nm = self.changelog.nodemap
620 nm = self.changelog.nodemap
619 return [(n in nm) for n in nodes]
621 return [(n in nm) for n in nodes]
620
622
621 def local(self):
623 def local(self):
622 return self
624 return self
623
625
624 def join(self, f):
626 def join(self, f):
625 return os.path.join(self.path, f)
627 return os.path.join(self.path, f)
626
628
627 def wjoin(self, f):
629 def wjoin(self, f):
628 return os.path.join(self.root, f)
630 return os.path.join(self.root, f)
629
631
630 def file(self, f):
632 def file(self, f):
631 if f[0] == '/':
633 if f[0] == '/':
632 f = f[1:]
634 f = f[1:]
633 return filelog.filelog(self.sopener, f)
635 return filelog.filelog(self.sopener, f)
634
636
635 def changectx(self, changeid):
637 def changectx(self, changeid):
636 return self[changeid]
638 return self[changeid]
637
639
638 def parents(self, changeid=None):
640 def parents(self, changeid=None):
639 '''get list of changectxs for parents of changeid'''
641 '''get list of changectxs for parents of changeid'''
640 return self[changeid].parents()
642 return self[changeid].parents()
641
643
642 def filectx(self, path, changeid=None, fileid=None):
644 def filectx(self, path, changeid=None, fileid=None):
643 """changeid can be a changeset revision, node, or tag.
645 """changeid can be a changeset revision, node, or tag.
644 fileid can be a file revision or node."""
646 fileid can be a file revision or node."""
645 return context.filectx(self, path, changeid, fileid)
647 return context.filectx(self, path, changeid, fileid)
646
648
647 def getcwd(self):
649 def getcwd(self):
648 return self.dirstate.getcwd()
650 return self.dirstate.getcwd()
649
651
650 def pathto(self, f, cwd=None):
652 def pathto(self, f, cwd=None):
651 return self.dirstate.pathto(f, cwd)
653 return self.dirstate.pathto(f, cwd)
652
654
653 def wfile(self, f, mode='r'):
655 def wfile(self, f, mode='r'):
654 return self.wopener(f, mode)
656 return self.wopener(f, mode)
655
657
656 def _link(self, f):
658 def _link(self, f):
657 return os.path.islink(self.wjoin(f))
659 return os.path.islink(self.wjoin(f))
658
660
659 def _loadfilter(self, filter):
661 def _loadfilter(self, filter):
660 if filter not in self.filterpats:
662 if filter not in self.filterpats:
661 l = []
663 l = []
662 for pat, cmd in self.ui.configitems(filter):
664 for pat, cmd in self.ui.configitems(filter):
663 if cmd == '!':
665 if cmd == '!':
664 continue
666 continue
665 mf = matchmod.match(self.root, '', [pat])
667 mf = matchmod.match(self.root, '', [pat])
666 fn = None
668 fn = None
667 params = cmd
669 params = cmd
668 for name, filterfn in self._datafilters.iteritems():
670 for name, filterfn in self._datafilters.iteritems():
669 if cmd.startswith(name):
671 if cmd.startswith(name):
670 fn = filterfn
672 fn = filterfn
671 params = cmd[len(name):].lstrip()
673 params = cmd[len(name):].lstrip()
672 break
674 break
673 if not fn:
675 if not fn:
674 fn = lambda s, c, **kwargs: util.filter(s, c)
676 fn = lambda s, c, **kwargs: util.filter(s, c)
675 # Wrap old filters not supporting keyword arguments
677 # Wrap old filters not supporting keyword arguments
676 if not inspect.getargspec(fn)[2]:
678 if not inspect.getargspec(fn)[2]:
677 oldfn = fn
679 oldfn = fn
678 fn = lambda s, c, **kwargs: oldfn(s, c)
680 fn = lambda s, c, **kwargs: oldfn(s, c)
679 l.append((mf, fn, params))
681 l.append((mf, fn, params))
680 self.filterpats[filter] = l
682 self.filterpats[filter] = l
681 return self.filterpats[filter]
683 return self.filterpats[filter]
682
684
683 def _filter(self, filterpats, filename, data):
685 def _filter(self, filterpats, filename, data):
684 for mf, fn, cmd in filterpats:
686 for mf, fn, cmd in filterpats:
685 if mf(filename):
687 if mf(filename):
686 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
688 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
687 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
689 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
688 break
690 break
689
691
690 return data
692 return data
691
693
692 @propertycache
694 @propertycache
693 def _encodefilterpats(self):
695 def _encodefilterpats(self):
694 return self._loadfilter('encode')
696 return self._loadfilter('encode')
695
697
696 @propertycache
698 @propertycache
697 def _decodefilterpats(self):
699 def _decodefilterpats(self):
698 return self._loadfilter('decode')
700 return self._loadfilter('decode')
699
701
700 def adddatafilter(self, name, filter):
702 def adddatafilter(self, name, filter):
701 self._datafilters[name] = filter
703 self._datafilters[name] = filter
702
704
703 def wread(self, filename):
705 def wread(self, filename):
704 if self._link(filename):
706 if self._link(filename):
705 data = os.readlink(self.wjoin(filename))
707 data = os.readlink(self.wjoin(filename))
706 else:
708 else:
707 data = self.wopener.read(filename)
709 data = self.wopener.read(filename)
708 return self._filter(self._encodefilterpats, filename, data)
710 return self._filter(self._encodefilterpats, filename, data)
709
711
710 def wwrite(self, filename, data, flags):
712 def wwrite(self, filename, data, flags):
711 data = self._filter(self._decodefilterpats, filename, data)
713 data = self._filter(self._decodefilterpats, filename, data)
712 if 'l' in flags:
714 if 'l' in flags:
713 self.wopener.symlink(data, filename)
715 self.wopener.symlink(data, filename)
714 else:
716 else:
715 self.wopener.write(filename, data)
717 self.wopener.write(filename, data)
716 if 'x' in flags:
718 if 'x' in flags:
717 util.setflags(self.wjoin(filename), False, True)
719 util.setflags(self.wjoin(filename), False, True)
718
720
719 def wwritedata(self, filename, data):
721 def wwritedata(self, filename, data):
720 return self._filter(self._decodefilterpats, filename, data)
722 return self._filter(self._decodefilterpats, filename, data)
721
723
722 def transaction(self, desc):
724 def transaction(self, desc):
723 tr = self._transref and self._transref() or None
725 tr = self._transref and self._transref() or None
724 if tr and tr.running():
726 if tr and tr.running():
725 return tr.nest()
727 return tr.nest()
726
728
727 # abort here if the journal already exists
729 # abort here if the journal already exists
728 if os.path.exists(self.sjoin("journal")):
730 if os.path.exists(self.sjoin("journal")):
729 raise error.RepoError(
731 raise error.RepoError(
730 _("abandoned transaction found - run hg recover"))
732 _("abandoned transaction found - run hg recover"))
731
733
732 journalfiles = self._writejournal(desc)
734 journalfiles = self._writejournal(desc)
733 renames = [(x, undoname(x)) for x in journalfiles]
735 renames = [(x, undoname(x)) for x in journalfiles]
734
736
735 tr = transaction.transaction(self.ui.warn, self.sopener,
737 tr = transaction.transaction(self.ui.warn, self.sopener,
736 self.sjoin("journal"),
738 self.sjoin("journal"),
737 aftertrans(renames),
739 aftertrans(renames),
738 self.store.createmode)
740 self.store.createmode)
739 self._transref = weakref.ref(tr)
741 self._transref = weakref.ref(tr)
740 return tr
742 return tr
741
743
742 def _writejournal(self, desc):
744 def _writejournal(self, desc):
743 # save dirstate for rollback
745 # save dirstate for rollback
744 try:
746 try:
745 ds = self.opener.read("dirstate")
747 ds = self.opener.read("dirstate")
746 except IOError:
748 except IOError:
747 ds = ""
749 ds = ""
748 self.opener.write("journal.dirstate", ds)
750 self.opener.write("journal.dirstate", ds)
749 self.opener.write("journal.branch",
751 self.opener.write("journal.branch",
750 encoding.fromlocal(self.dirstate.branch()))
752 encoding.fromlocal(self.dirstate.branch()))
751 self.opener.write("journal.desc",
753 self.opener.write("journal.desc",
752 "%d\n%s\n" % (len(self), desc))
754 "%d\n%s\n" % (len(self), desc))
753
755
754 bkname = self.join('bookmarks')
756 bkname = self.join('bookmarks')
755 if os.path.exists(bkname):
757 if os.path.exists(bkname):
756 util.copyfile(bkname, self.join('journal.bookmarks'))
758 util.copyfile(bkname, self.join('journal.bookmarks'))
757 else:
759 else:
758 self.opener.write('journal.bookmarks', '')
760 self.opener.write('journal.bookmarks', '')
759
761
760 return (self.sjoin('journal'), self.join('journal.dirstate'),
762 return (self.sjoin('journal'), self.join('journal.dirstate'),
761 self.join('journal.branch'), self.join('journal.desc'),
763 self.join('journal.branch'), self.join('journal.desc'),
762 self.join('journal.bookmarks'))
764 self.join('journal.bookmarks'))
763
765
764 def recover(self):
766 def recover(self):
765 lock = self.lock()
767 lock = self.lock()
766 try:
768 try:
767 if os.path.exists(self.sjoin("journal")):
769 if os.path.exists(self.sjoin("journal")):
768 self.ui.status(_("rolling back interrupted transaction\n"))
770 self.ui.status(_("rolling back interrupted transaction\n"))
769 transaction.rollback(self.sopener, self.sjoin("journal"),
771 transaction.rollback(self.sopener, self.sjoin("journal"),
770 self.ui.warn)
772 self.ui.warn)
771 self.invalidate()
773 self.invalidate()
772 return True
774 return True
773 else:
775 else:
774 self.ui.warn(_("no interrupted transaction available\n"))
776 self.ui.warn(_("no interrupted transaction available\n"))
775 return False
777 return False
776 finally:
778 finally:
777 lock.release()
779 lock.release()
778
780
779 def rollback(self, dryrun=False, force=False):
781 def rollback(self, dryrun=False, force=False):
780 wlock = lock = None
782 wlock = lock = None
781 try:
783 try:
782 wlock = self.wlock()
784 wlock = self.wlock()
783 lock = self.lock()
785 lock = self.lock()
784 if os.path.exists(self.sjoin("undo")):
786 if os.path.exists(self.sjoin("undo")):
785 return self._rollback(dryrun, force)
787 return self._rollback(dryrun, force)
786 else:
788 else:
787 self.ui.warn(_("no rollback information available\n"))
789 self.ui.warn(_("no rollback information available\n"))
788 return 1
790 return 1
789 finally:
791 finally:
790 release(lock, wlock)
792 release(lock, wlock)
791
793
792 def _rollback(self, dryrun, force):
794 def _rollback(self, dryrun, force):
793 ui = self.ui
795 ui = self.ui
794 try:
796 try:
795 args = self.opener.read('undo.desc').splitlines()
797 args = self.opener.read('undo.desc').splitlines()
796 (oldlen, desc, detail) = (int(args[0]), args[1], None)
798 (oldlen, desc, detail) = (int(args[0]), args[1], None)
797 if len(args) >= 3:
799 if len(args) >= 3:
798 detail = args[2]
800 detail = args[2]
799 oldtip = oldlen - 1
801 oldtip = oldlen - 1
800
802
801 if detail and ui.verbose:
803 if detail and ui.verbose:
802 msg = (_('repository tip rolled back to revision %s'
804 msg = (_('repository tip rolled back to revision %s'
803 ' (undo %s: %s)\n')
805 ' (undo %s: %s)\n')
804 % (oldtip, desc, detail))
806 % (oldtip, desc, detail))
805 else:
807 else:
806 msg = (_('repository tip rolled back to revision %s'
808 msg = (_('repository tip rolled back to revision %s'
807 ' (undo %s)\n')
809 ' (undo %s)\n')
808 % (oldtip, desc))
810 % (oldtip, desc))
809 except IOError:
811 except IOError:
810 msg = _('rolling back unknown transaction\n')
812 msg = _('rolling back unknown transaction\n')
811 desc = None
813 desc = None
812
814
813 if not force and self['.'] != self['tip'] and desc == 'commit':
815 if not force and self['.'] != self['tip'] and desc == 'commit':
814 raise util.Abort(
816 raise util.Abort(
815 _('rollback of last commit while not checked out '
817 _('rollback of last commit while not checked out '
816 'may lose data'), hint=_('use -f to force'))
818 'may lose data'), hint=_('use -f to force'))
817
819
818 ui.status(msg)
820 ui.status(msg)
819 if dryrun:
821 if dryrun:
820 return 0
822 return 0
821
823
822 parents = self.dirstate.parents()
824 parents = self.dirstate.parents()
823 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
825 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
824 if os.path.exists(self.join('undo.bookmarks')):
826 if os.path.exists(self.join('undo.bookmarks')):
825 util.rename(self.join('undo.bookmarks'),
827 util.rename(self.join('undo.bookmarks'),
826 self.join('bookmarks'))
828 self.join('bookmarks'))
827 self.invalidate()
829 self.invalidate()
828
830
829 parentgone = (parents[0] not in self.changelog.nodemap or
831 parentgone = (parents[0] not in self.changelog.nodemap or
830 parents[1] not in self.changelog.nodemap)
832 parents[1] not in self.changelog.nodemap)
831 if parentgone:
833 if parentgone:
832 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
834 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
833 try:
835 try:
834 branch = self.opener.read('undo.branch')
836 branch = self.opener.read('undo.branch')
835 self.dirstate.setbranch(branch)
837 self.dirstate.setbranch(branch)
836 except IOError:
838 except IOError:
837 ui.warn(_('named branch could not be reset: '
839 ui.warn(_('named branch could not be reset: '
838 'current branch is still \'%s\'\n')
840 'current branch is still \'%s\'\n')
839 % self.dirstate.branch())
841 % self.dirstate.branch())
840
842
841 self.dirstate.invalidate()
843 self.dirstate.invalidate()
842 self.destroyed()
844 self.destroyed()
843 parents = tuple([p.rev() for p in self.parents()])
845 parents = tuple([p.rev() for p in self.parents()])
844 if len(parents) > 1:
846 if len(parents) > 1:
845 ui.status(_('working directory now based on '
847 ui.status(_('working directory now based on '
846 'revisions %d and %d\n') % parents)
848 'revisions %d and %d\n') % parents)
847 else:
849 else:
848 ui.status(_('working directory now based on '
850 ui.status(_('working directory now based on '
849 'revision %d\n') % parents)
851 'revision %d\n') % parents)
850 return 0
852 return 0
851
853
852 def invalidatecaches(self):
854 def invalidatecaches(self):
853 try:
855 try:
854 delattr(self, '_tagscache')
856 delattr(self, '_tagscache')
855 except AttributeError:
857 except AttributeError:
856 pass
858 pass
857
859
858 self._branchcache = None # in UTF-8
860 self._branchcache = None # in UTF-8
859 self._branchcachetip = None
861 self._branchcachetip = None
860
862
861 def invalidatedirstate(self):
863 def invalidatedirstate(self):
862 '''Invalidates the dirstate, causing the next call to dirstate
864 '''Invalidates the dirstate, causing the next call to dirstate
863 to check if it was modified since the last time it was read,
865 to check if it was modified since the last time it was read,
864 rereading it if it has.
866 rereading it if it has.
865
867
866 This is different to dirstate.invalidate() that it doesn't always
868 This is different to dirstate.invalidate() that it doesn't always
867 rereads the dirstate. Use dirstate.invalidate() if you want to
869 rereads the dirstate. Use dirstate.invalidate() if you want to
868 explicitly read the dirstate again (i.e. restoring it to a previous
870 explicitly read the dirstate again (i.e. restoring it to a previous
869 known good state).'''
871 known good state).'''
870 try:
872 try:
871 delattr(self, 'dirstate')
873 delattr(self, 'dirstate')
872 except AttributeError:
874 except AttributeError:
873 pass
875 pass
874
876
875 def invalidate(self):
877 def invalidate(self):
876 for k in self._filecache:
878 for k in self._filecache:
877 # dirstate is invalidated separately in invalidatedirstate()
879 # dirstate is invalidated separately in invalidatedirstate()
878 if k == 'dirstate':
880 if k == 'dirstate':
879 continue
881 continue
880
882
881 try:
883 try:
882 delattr(self, k)
884 delattr(self, k)
883 except AttributeError:
885 except AttributeError:
884 pass
886 pass
885 self.invalidatecaches()
887 self.invalidatecaches()
886
888
887 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
889 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
888 try:
890 try:
889 l = lock.lock(lockname, 0, releasefn, desc=desc)
891 l = lock.lock(lockname, 0, releasefn, desc=desc)
890 except error.LockHeld, inst:
892 except error.LockHeld, inst:
891 if not wait:
893 if not wait:
892 raise
894 raise
893 self.ui.warn(_("waiting for lock on %s held by %r\n") %
895 self.ui.warn(_("waiting for lock on %s held by %r\n") %
894 (desc, inst.locker))
896 (desc, inst.locker))
895 # default to 600 seconds timeout
897 # default to 600 seconds timeout
896 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
898 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
897 releasefn, desc=desc)
899 releasefn, desc=desc)
898 if acquirefn:
900 if acquirefn:
899 acquirefn()
901 acquirefn()
900 return l
902 return l
901
903
902 def lock(self, wait=True):
904 def lock(self, wait=True):
903 '''Lock the repository store (.hg/store) and return a weak reference
905 '''Lock the repository store (.hg/store) and return a weak reference
904 to the lock. Use this before modifying the store (e.g. committing or
906 to the lock. Use this before modifying the store (e.g. committing or
905 stripping). If you are opening a transaction, get a lock as well.)'''
907 stripping). If you are opening a transaction, get a lock as well.)'''
906 l = self._lockref and self._lockref()
908 l = self._lockref and self._lockref()
907 if l is not None and l.held:
909 if l is not None and l.held:
908 l.lock()
910 l.lock()
909 return l
911 return l
910
912
911 def unlock():
913 def unlock():
912 self.store.write()
914 self.store.write()
915 if self._dirtyphases:
916 phases.writeroots(self)
913 for k, ce in self._filecache.items():
917 for k, ce in self._filecache.items():
914 if k == 'dirstate':
918 if k == 'dirstate':
915 continue
919 continue
916 ce.refresh()
920 ce.refresh()
917
921
918 l = self._lock(self.sjoin("lock"), wait, unlock,
922 l = self._lock(self.sjoin("lock"), wait, unlock,
919 self.invalidate, _('repository %s') % self.origroot)
923 self.invalidate, _('repository %s') % self.origroot)
920 self._lockref = weakref.ref(l)
924 self._lockref = weakref.ref(l)
921 return l
925 return l
922
926
923 def wlock(self, wait=True):
927 def wlock(self, wait=True):
924 '''Lock the non-store parts of the repository (everything under
928 '''Lock the non-store parts of the repository (everything under
925 .hg except .hg/store) and return a weak reference to the lock.
929 .hg except .hg/store) and return a weak reference to the lock.
926 Use this before modifying files in .hg.'''
930 Use this before modifying files in .hg.'''
927 l = self._wlockref and self._wlockref()
931 l = self._wlockref and self._wlockref()
928 if l is not None and l.held:
932 if l is not None and l.held:
929 l.lock()
933 l.lock()
930 return l
934 return l
931
935
932 def unlock():
936 def unlock():
933 self.dirstate.write()
937 self.dirstate.write()
934 ce = self._filecache.get('dirstate')
938 ce = self._filecache.get('dirstate')
935 if ce:
939 if ce:
936 ce.refresh()
940 ce.refresh()
937
941
938 l = self._lock(self.join("wlock"), wait, unlock,
942 l = self._lock(self.join("wlock"), wait, unlock,
939 self.invalidatedirstate, _('working directory of %s') %
943 self.invalidatedirstate, _('working directory of %s') %
940 self.origroot)
944 self.origroot)
941 self._wlockref = weakref.ref(l)
945 self._wlockref = weakref.ref(l)
942 return l
946 return l
943
947
944 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
948 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
945 """
949 """
946 commit an individual file as part of a larger transaction
950 commit an individual file as part of a larger transaction
947 """
951 """
948
952
949 fname = fctx.path()
953 fname = fctx.path()
950 text = fctx.data()
954 text = fctx.data()
951 flog = self.file(fname)
955 flog = self.file(fname)
952 fparent1 = manifest1.get(fname, nullid)
956 fparent1 = manifest1.get(fname, nullid)
953 fparent2 = fparent2o = manifest2.get(fname, nullid)
957 fparent2 = fparent2o = manifest2.get(fname, nullid)
954
958
955 meta = {}
959 meta = {}
956 copy = fctx.renamed()
960 copy = fctx.renamed()
957 if copy and copy[0] != fname:
961 if copy and copy[0] != fname:
958 # Mark the new revision of this file as a copy of another
962 # Mark the new revision of this file as a copy of another
959 # file. This copy data will effectively act as a parent
963 # file. This copy data will effectively act as a parent
960 # of this new revision. If this is a merge, the first
964 # of this new revision. If this is a merge, the first
961 # parent will be the nullid (meaning "look up the copy data")
965 # parent will be the nullid (meaning "look up the copy data")
962 # and the second one will be the other parent. For example:
966 # and the second one will be the other parent. For example:
963 #
967 #
964 # 0 --- 1 --- 3 rev1 changes file foo
968 # 0 --- 1 --- 3 rev1 changes file foo
965 # \ / rev2 renames foo to bar and changes it
969 # \ / rev2 renames foo to bar and changes it
966 # \- 2 -/ rev3 should have bar with all changes and
970 # \- 2 -/ rev3 should have bar with all changes and
967 # should record that bar descends from
971 # should record that bar descends from
968 # bar in rev2 and foo in rev1
972 # bar in rev2 and foo in rev1
969 #
973 #
970 # this allows this merge to succeed:
974 # this allows this merge to succeed:
971 #
975 #
972 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
976 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
973 # \ / merging rev3 and rev4 should use bar@rev2
977 # \ / merging rev3 and rev4 should use bar@rev2
974 # \- 2 --- 4 as the merge base
978 # \- 2 --- 4 as the merge base
975 #
979 #
976
980
977 cfname = copy[0]
981 cfname = copy[0]
978 crev = manifest1.get(cfname)
982 crev = manifest1.get(cfname)
979 newfparent = fparent2
983 newfparent = fparent2
980
984
981 if manifest2: # branch merge
985 if manifest2: # branch merge
982 if fparent2 == nullid or crev is None: # copied on remote side
986 if fparent2 == nullid or crev is None: # copied on remote side
983 if cfname in manifest2:
987 if cfname in manifest2:
984 crev = manifest2[cfname]
988 crev = manifest2[cfname]
985 newfparent = fparent1
989 newfparent = fparent1
986
990
987 # find source in nearest ancestor if we've lost track
991 # find source in nearest ancestor if we've lost track
988 if not crev:
992 if not crev:
989 self.ui.debug(" %s: searching for copy revision for %s\n" %
993 self.ui.debug(" %s: searching for copy revision for %s\n" %
990 (fname, cfname))
994 (fname, cfname))
991 for ancestor in self[None].ancestors():
995 for ancestor in self[None].ancestors():
992 if cfname in ancestor:
996 if cfname in ancestor:
993 crev = ancestor[cfname].filenode()
997 crev = ancestor[cfname].filenode()
994 break
998 break
995
999
996 if crev:
1000 if crev:
997 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1001 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
998 meta["copy"] = cfname
1002 meta["copy"] = cfname
999 meta["copyrev"] = hex(crev)
1003 meta["copyrev"] = hex(crev)
1000 fparent1, fparent2 = nullid, newfparent
1004 fparent1, fparent2 = nullid, newfparent
1001 else:
1005 else:
1002 self.ui.warn(_("warning: can't find ancestor for '%s' "
1006 self.ui.warn(_("warning: can't find ancestor for '%s' "
1003 "copied from '%s'!\n") % (fname, cfname))
1007 "copied from '%s'!\n") % (fname, cfname))
1004
1008
1005 elif fparent2 != nullid:
1009 elif fparent2 != nullid:
1006 # is one parent an ancestor of the other?
1010 # is one parent an ancestor of the other?
1007 fparentancestor = flog.ancestor(fparent1, fparent2)
1011 fparentancestor = flog.ancestor(fparent1, fparent2)
1008 if fparentancestor == fparent1:
1012 if fparentancestor == fparent1:
1009 fparent1, fparent2 = fparent2, nullid
1013 fparent1, fparent2 = fparent2, nullid
1010 elif fparentancestor == fparent2:
1014 elif fparentancestor == fparent2:
1011 fparent2 = nullid
1015 fparent2 = nullid
1012
1016
1013 # is the file changed?
1017 # is the file changed?
1014 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1018 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1015 changelist.append(fname)
1019 changelist.append(fname)
1016 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1020 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1017
1021
1018 # are just the flags changed during merge?
1022 # are just the flags changed during merge?
1019 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1023 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1020 changelist.append(fname)
1024 changelist.append(fname)
1021
1025
1022 return fparent1
1026 return fparent1
1023
1027
1024 def commit(self, text="", user=None, date=None, match=None, force=False,
1028 def commit(self, text="", user=None, date=None, match=None, force=False,
1025 editor=False, extra={}):
1029 editor=False, extra={}):
1026 """Add a new revision to current repository.
1030 """Add a new revision to current repository.
1027
1031
1028 Revision information is gathered from the working directory,
1032 Revision information is gathered from the working directory,
1029 match can be used to filter the committed files. If editor is
1033 match can be used to filter the committed files. If editor is
1030 supplied, it is called to get a commit message.
1034 supplied, it is called to get a commit message.
1031 """
1035 """
1032
1036
1033 def fail(f, msg):
1037 def fail(f, msg):
1034 raise util.Abort('%s: %s' % (f, msg))
1038 raise util.Abort('%s: %s' % (f, msg))
1035
1039
1036 if not match:
1040 if not match:
1037 match = matchmod.always(self.root, '')
1041 match = matchmod.always(self.root, '')
1038
1042
1039 if not force:
1043 if not force:
1040 vdirs = []
1044 vdirs = []
1041 match.dir = vdirs.append
1045 match.dir = vdirs.append
1042 match.bad = fail
1046 match.bad = fail
1043
1047
1044 wlock = self.wlock()
1048 wlock = self.wlock()
1045 try:
1049 try:
1046 wctx = self[None]
1050 wctx = self[None]
1047 merge = len(wctx.parents()) > 1
1051 merge = len(wctx.parents()) > 1
1048
1052
1049 if (not force and merge and match and
1053 if (not force and merge and match and
1050 (match.files() or match.anypats())):
1054 (match.files() or match.anypats())):
1051 raise util.Abort(_('cannot partially commit a merge '
1055 raise util.Abort(_('cannot partially commit a merge '
1052 '(do not specify files or patterns)'))
1056 '(do not specify files or patterns)'))
1053
1057
1054 changes = self.status(match=match, clean=force)
1058 changes = self.status(match=match, clean=force)
1055 if force:
1059 if force:
1056 changes[0].extend(changes[6]) # mq may commit unchanged files
1060 changes[0].extend(changes[6]) # mq may commit unchanged files
1057
1061
1058 # check subrepos
1062 # check subrepos
1059 subs = []
1063 subs = []
1060 removedsubs = set()
1064 removedsubs = set()
1061 if '.hgsub' in wctx:
1065 if '.hgsub' in wctx:
1062 # only manage subrepos and .hgsubstate if .hgsub is present
1066 # only manage subrepos and .hgsubstate if .hgsub is present
1063 for p in wctx.parents():
1067 for p in wctx.parents():
1064 removedsubs.update(s for s in p.substate if match(s))
1068 removedsubs.update(s for s in p.substate if match(s))
1065 for s in wctx.substate:
1069 for s in wctx.substate:
1066 removedsubs.discard(s)
1070 removedsubs.discard(s)
1067 if match(s) and wctx.sub(s).dirty():
1071 if match(s) and wctx.sub(s).dirty():
1068 subs.append(s)
1072 subs.append(s)
1069 if (subs or removedsubs):
1073 if (subs or removedsubs):
1070 if (not match('.hgsub') and
1074 if (not match('.hgsub') and
1071 '.hgsub' in (wctx.modified() + wctx.added())):
1075 '.hgsub' in (wctx.modified() + wctx.added())):
1072 raise util.Abort(
1076 raise util.Abort(
1073 _("can't commit subrepos without .hgsub"))
1077 _("can't commit subrepos without .hgsub"))
1074 if '.hgsubstate' not in changes[0]:
1078 if '.hgsubstate' not in changes[0]:
1075 changes[0].insert(0, '.hgsubstate')
1079 changes[0].insert(0, '.hgsubstate')
1076 if '.hgsubstate' in changes[2]:
1080 if '.hgsubstate' in changes[2]:
1077 changes[2].remove('.hgsubstate')
1081 changes[2].remove('.hgsubstate')
1078 elif '.hgsub' in changes[2]:
1082 elif '.hgsub' in changes[2]:
1079 # clean up .hgsubstate when .hgsub is removed
1083 # clean up .hgsubstate when .hgsub is removed
1080 if ('.hgsubstate' in wctx and
1084 if ('.hgsubstate' in wctx and
1081 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1085 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1082 changes[2].insert(0, '.hgsubstate')
1086 changes[2].insert(0, '.hgsubstate')
1083
1087
1084 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1088 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1085 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1089 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1086 if changedsubs:
1090 if changedsubs:
1087 raise util.Abort(_("uncommitted changes in subrepo %s")
1091 raise util.Abort(_("uncommitted changes in subrepo %s")
1088 % changedsubs[0],
1092 % changedsubs[0],
1089 hint=_("use --subrepos for recursive commit"))
1093 hint=_("use --subrepos for recursive commit"))
1090
1094
1091 # make sure all explicit patterns are matched
1095 # make sure all explicit patterns are matched
1092 if not force and match.files():
1096 if not force and match.files():
1093 matched = set(changes[0] + changes[1] + changes[2])
1097 matched = set(changes[0] + changes[1] + changes[2])
1094
1098
1095 for f in match.files():
1099 for f in match.files():
1096 if f == '.' or f in matched or f in wctx.substate:
1100 if f == '.' or f in matched or f in wctx.substate:
1097 continue
1101 continue
1098 if f in changes[3]: # missing
1102 if f in changes[3]: # missing
1099 fail(f, _('file not found!'))
1103 fail(f, _('file not found!'))
1100 if f in vdirs: # visited directory
1104 if f in vdirs: # visited directory
1101 d = f + '/'
1105 d = f + '/'
1102 for mf in matched:
1106 for mf in matched:
1103 if mf.startswith(d):
1107 if mf.startswith(d):
1104 break
1108 break
1105 else:
1109 else:
1106 fail(f, _("no match under directory!"))
1110 fail(f, _("no match under directory!"))
1107 elif f not in self.dirstate:
1111 elif f not in self.dirstate:
1108 fail(f, _("file not tracked!"))
1112 fail(f, _("file not tracked!"))
1109
1113
1110 if (not force and not extra.get("close") and not merge
1114 if (not force and not extra.get("close") and not merge
1111 and not (changes[0] or changes[1] or changes[2])
1115 and not (changes[0] or changes[1] or changes[2])
1112 and wctx.branch() == wctx.p1().branch()):
1116 and wctx.branch() == wctx.p1().branch()):
1113 return None
1117 return None
1114
1118
1115 ms = mergemod.mergestate(self)
1119 ms = mergemod.mergestate(self)
1116 for f in changes[0]:
1120 for f in changes[0]:
1117 if f in ms and ms[f] == 'u':
1121 if f in ms and ms[f] == 'u':
1118 raise util.Abort(_("unresolved merge conflicts "
1122 raise util.Abort(_("unresolved merge conflicts "
1119 "(see hg help resolve)"))
1123 "(see hg help resolve)"))
1120
1124
1121 cctx = context.workingctx(self, text, user, date, extra, changes)
1125 cctx = context.workingctx(self, text, user, date, extra, changes)
1122 if editor:
1126 if editor:
1123 cctx._text = editor(self, cctx, subs)
1127 cctx._text = editor(self, cctx, subs)
1124 edited = (text != cctx._text)
1128 edited = (text != cctx._text)
1125
1129
1126 # commit subs
1130 # commit subs
1127 if subs or removedsubs:
1131 if subs or removedsubs:
1128 state = wctx.substate.copy()
1132 state = wctx.substate.copy()
1129 for s in sorted(subs):
1133 for s in sorted(subs):
1130 sub = wctx.sub(s)
1134 sub = wctx.sub(s)
1131 self.ui.status(_('committing subrepository %s\n') %
1135 self.ui.status(_('committing subrepository %s\n') %
1132 subrepo.subrelpath(sub))
1136 subrepo.subrelpath(sub))
1133 sr = sub.commit(cctx._text, user, date)
1137 sr = sub.commit(cctx._text, user, date)
1134 state[s] = (state[s][0], sr)
1138 state[s] = (state[s][0], sr)
1135 subrepo.writestate(self, state)
1139 subrepo.writestate(self, state)
1136
1140
1137 # Save commit message in case this transaction gets rolled back
1141 # Save commit message in case this transaction gets rolled back
1138 # (e.g. by a pretxncommit hook). Leave the content alone on
1142 # (e.g. by a pretxncommit hook). Leave the content alone on
1139 # the assumption that the user will use the same editor again.
1143 # the assumption that the user will use the same editor again.
1140 msgfn = self.savecommitmessage(cctx._text)
1144 msgfn = self.savecommitmessage(cctx._text)
1141
1145
1142 p1, p2 = self.dirstate.parents()
1146 p1, p2 = self.dirstate.parents()
1143 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1147 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1144 try:
1148 try:
1145 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1149 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1146 ret = self.commitctx(cctx, True)
1150 ret = self.commitctx(cctx, True)
1147 except:
1151 except:
1148 if edited:
1152 if edited:
1149 self.ui.write(
1153 self.ui.write(
1150 _('note: commit message saved in %s\n') % msgfn)
1154 _('note: commit message saved in %s\n') % msgfn)
1151 raise
1155 raise
1152
1156
1153 # update bookmarks, dirstate and mergestate
1157 # update bookmarks, dirstate and mergestate
1154 bookmarks.update(self, p1, ret)
1158 bookmarks.update(self, p1, ret)
1155 for f in changes[0] + changes[1]:
1159 for f in changes[0] + changes[1]:
1156 self.dirstate.normal(f)
1160 self.dirstate.normal(f)
1157 for f in changes[2]:
1161 for f in changes[2]:
1158 self.dirstate.drop(f)
1162 self.dirstate.drop(f)
1159 self.dirstate.setparents(ret)
1163 self.dirstate.setparents(ret)
1160 ms.reset()
1164 ms.reset()
1161 finally:
1165 finally:
1162 wlock.release()
1166 wlock.release()
1163
1167
1164 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1168 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1165 return ret
1169 return ret
1166
1170
1167 def commitctx(self, ctx, error=False):
1171 def commitctx(self, ctx, error=False):
1168 """Add a new revision to current repository.
1172 """Add a new revision to current repository.
1169 Revision information is passed via the context argument.
1173 Revision information is passed via the context argument.
1170 """
1174 """
1171
1175
1172 tr = lock = None
1176 tr = lock = None
1173 removed = list(ctx.removed())
1177 removed = list(ctx.removed())
1174 p1, p2 = ctx.p1(), ctx.p2()
1178 p1, p2 = ctx.p1(), ctx.p2()
1175 user = ctx.user()
1179 user = ctx.user()
1176
1180
1177 lock = self.lock()
1181 lock = self.lock()
1178 try:
1182 try:
1179 tr = self.transaction("commit")
1183 tr = self.transaction("commit")
1180 trp = weakref.proxy(tr)
1184 trp = weakref.proxy(tr)
1181
1185
1182 if ctx.files():
1186 if ctx.files():
1183 m1 = p1.manifest().copy()
1187 m1 = p1.manifest().copy()
1184 m2 = p2.manifest()
1188 m2 = p2.manifest()
1185
1189
1186 # check in files
1190 # check in files
1187 new = {}
1191 new = {}
1188 changed = []
1192 changed = []
1189 linkrev = len(self)
1193 linkrev = len(self)
1190 for f in sorted(ctx.modified() + ctx.added()):
1194 for f in sorted(ctx.modified() + ctx.added()):
1191 self.ui.note(f + "\n")
1195 self.ui.note(f + "\n")
1192 try:
1196 try:
1193 fctx = ctx[f]
1197 fctx = ctx[f]
1194 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1198 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1195 changed)
1199 changed)
1196 m1.set(f, fctx.flags())
1200 m1.set(f, fctx.flags())
1197 except OSError, inst:
1201 except OSError, inst:
1198 self.ui.warn(_("trouble committing %s!\n") % f)
1202 self.ui.warn(_("trouble committing %s!\n") % f)
1199 raise
1203 raise
1200 except IOError, inst:
1204 except IOError, inst:
1201 errcode = getattr(inst, 'errno', errno.ENOENT)
1205 errcode = getattr(inst, 'errno', errno.ENOENT)
1202 if error or errcode and errcode != errno.ENOENT:
1206 if error or errcode and errcode != errno.ENOENT:
1203 self.ui.warn(_("trouble committing %s!\n") % f)
1207 self.ui.warn(_("trouble committing %s!\n") % f)
1204 raise
1208 raise
1205 else:
1209 else:
1206 removed.append(f)
1210 removed.append(f)
1207
1211
1208 # update manifest
1212 # update manifest
1209 m1.update(new)
1213 m1.update(new)
1210 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1214 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1211 drop = [f for f in removed if f in m1]
1215 drop = [f for f in removed if f in m1]
1212 for f in drop:
1216 for f in drop:
1213 del m1[f]
1217 del m1[f]
1214 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1218 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1215 p2.manifestnode(), (new, drop))
1219 p2.manifestnode(), (new, drop))
1216 files = changed + removed
1220 files = changed + removed
1217 else:
1221 else:
1218 mn = p1.manifestnode()
1222 mn = p1.manifestnode()
1219 files = []
1223 files = []
1220
1224
1221 # update changelog
1225 # update changelog
1222 self.changelog.delayupdate()
1226 self.changelog.delayupdate()
1223 n = self.changelog.add(mn, files, ctx.description(),
1227 n = self.changelog.add(mn, files, ctx.description(),
1224 trp, p1.node(), p2.node(),
1228 trp, p1.node(), p2.node(),
1225 user, ctx.date(), ctx.extra().copy())
1229 user, ctx.date(), ctx.extra().copy())
1226 p = lambda: self.changelog.writepending() and self.root or ""
1230 p = lambda: self.changelog.writepending() and self.root or ""
1227 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1231 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1228 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1232 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1229 parent2=xp2, pending=p)
1233 parent2=xp2, pending=p)
1230 self.changelog.finalize(trp)
1234 self.changelog.finalize(trp)
1231 tr.close()
1235 tr.close()
1232
1236
1233 if self._branchcache:
1237 if self._branchcache:
1234 self.updatebranchcache()
1238 self.updatebranchcache()
1235 return n
1239 return n
1236 finally:
1240 finally:
1237 if tr:
1241 if tr:
1238 tr.release()
1242 tr.release()
1239 lock.release()
1243 lock.release()
1240
1244
1241 def destroyed(self):
1245 def destroyed(self):
1242 '''Inform the repository that nodes have been destroyed.
1246 '''Inform the repository that nodes have been destroyed.
1243 Intended for use by strip and rollback, so there's a common
1247 Intended for use by strip and rollback, so there's a common
1244 place for anything that has to be done after destroying history.'''
1248 place for anything that has to be done after destroying history.'''
1245 # XXX it might be nice if we could take the list of destroyed
1249 # XXX it might be nice if we could take the list of destroyed
1246 # nodes, but I don't see an easy way for rollback() to do that
1250 # nodes, but I don't see an easy way for rollback() to do that
1247
1251
1248 # Ensure the persistent tag cache is updated. Doing it now
1252 # Ensure the persistent tag cache is updated. Doing it now
1249 # means that the tag cache only has to worry about destroyed
1253 # means that the tag cache only has to worry about destroyed
1250 # heads immediately after a strip/rollback. That in turn
1254 # heads immediately after a strip/rollback. That in turn
1251 # guarantees that "cachetip == currenttip" (comparing both rev
1255 # guarantees that "cachetip == currenttip" (comparing both rev
1252 # and node) always means no nodes have been added or destroyed.
1256 # and node) always means no nodes have been added or destroyed.
1253
1257
1254 # XXX this is suboptimal when qrefresh'ing: we strip the current
1258 # XXX this is suboptimal when qrefresh'ing: we strip the current
1255 # head, refresh the tag cache, then immediately add a new head.
1259 # head, refresh the tag cache, then immediately add a new head.
1256 # But I think doing it this way is necessary for the "instant
1260 # But I think doing it this way is necessary for the "instant
1257 # tag cache retrieval" case to work.
1261 # tag cache retrieval" case to work.
1258 self.invalidatecaches()
1262 self.invalidatecaches()
1259
1263
1260 def walk(self, match, node=None):
1264 def walk(self, match, node=None):
1261 '''
1265 '''
1262 walk recursively through the directory tree or a given
1266 walk recursively through the directory tree or a given
1263 changeset, finding all files matched by the match
1267 changeset, finding all files matched by the match
1264 function
1268 function
1265 '''
1269 '''
1266 return self[node].walk(match)
1270 return self[node].walk(match)
1267
1271
1268 def status(self, node1='.', node2=None, match=None,
1272 def status(self, node1='.', node2=None, match=None,
1269 ignored=False, clean=False, unknown=False,
1273 ignored=False, clean=False, unknown=False,
1270 listsubrepos=False):
1274 listsubrepos=False):
1271 """return status of files between two nodes or node and working directory
1275 """return status of files between two nodes or node and working directory
1272
1276
1273 If node1 is None, use the first dirstate parent instead.
1277 If node1 is None, use the first dirstate parent instead.
1274 If node2 is None, compare node1 with working directory.
1278 If node2 is None, compare node1 with working directory.
1275 """
1279 """
1276
1280
1277 def mfmatches(ctx):
1281 def mfmatches(ctx):
1278 mf = ctx.manifest().copy()
1282 mf = ctx.manifest().copy()
1279 for fn in mf.keys():
1283 for fn in mf.keys():
1280 if not match(fn):
1284 if not match(fn):
1281 del mf[fn]
1285 del mf[fn]
1282 return mf
1286 return mf
1283
1287
1284 if isinstance(node1, context.changectx):
1288 if isinstance(node1, context.changectx):
1285 ctx1 = node1
1289 ctx1 = node1
1286 else:
1290 else:
1287 ctx1 = self[node1]
1291 ctx1 = self[node1]
1288 if isinstance(node2, context.changectx):
1292 if isinstance(node2, context.changectx):
1289 ctx2 = node2
1293 ctx2 = node2
1290 else:
1294 else:
1291 ctx2 = self[node2]
1295 ctx2 = self[node2]
1292
1296
1293 working = ctx2.rev() is None
1297 working = ctx2.rev() is None
1294 parentworking = working and ctx1 == self['.']
1298 parentworking = working and ctx1 == self['.']
1295 match = match or matchmod.always(self.root, self.getcwd())
1299 match = match or matchmod.always(self.root, self.getcwd())
1296 listignored, listclean, listunknown = ignored, clean, unknown
1300 listignored, listclean, listunknown = ignored, clean, unknown
1297
1301
1298 # load earliest manifest first for caching reasons
1302 # load earliest manifest first for caching reasons
1299 if not working and ctx2.rev() < ctx1.rev():
1303 if not working and ctx2.rev() < ctx1.rev():
1300 ctx2.manifest()
1304 ctx2.manifest()
1301
1305
1302 if not parentworking:
1306 if not parentworking:
1303 def bad(f, msg):
1307 def bad(f, msg):
1304 if f not in ctx1:
1308 if f not in ctx1:
1305 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1309 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1306 match.bad = bad
1310 match.bad = bad
1307
1311
1308 if working: # we need to scan the working dir
1312 if working: # we need to scan the working dir
1309 subrepos = []
1313 subrepos = []
1310 if '.hgsub' in self.dirstate:
1314 if '.hgsub' in self.dirstate:
1311 subrepos = ctx2.substate.keys()
1315 subrepos = ctx2.substate.keys()
1312 s = self.dirstate.status(match, subrepos, listignored,
1316 s = self.dirstate.status(match, subrepos, listignored,
1313 listclean, listunknown)
1317 listclean, listunknown)
1314 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1318 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1315
1319
1316 # check for any possibly clean files
1320 # check for any possibly clean files
1317 if parentworking and cmp:
1321 if parentworking and cmp:
1318 fixup = []
1322 fixup = []
1319 # do a full compare of any files that might have changed
1323 # do a full compare of any files that might have changed
1320 for f in sorted(cmp):
1324 for f in sorted(cmp):
1321 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1325 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1322 or ctx1[f].cmp(ctx2[f])):
1326 or ctx1[f].cmp(ctx2[f])):
1323 modified.append(f)
1327 modified.append(f)
1324 else:
1328 else:
1325 fixup.append(f)
1329 fixup.append(f)
1326
1330
1327 # update dirstate for files that are actually clean
1331 # update dirstate for files that are actually clean
1328 if fixup:
1332 if fixup:
1329 if listclean:
1333 if listclean:
1330 clean += fixup
1334 clean += fixup
1331
1335
1332 try:
1336 try:
1333 # updating the dirstate is optional
1337 # updating the dirstate is optional
1334 # so we don't wait on the lock
1338 # so we don't wait on the lock
1335 wlock = self.wlock(False)
1339 wlock = self.wlock(False)
1336 try:
1340 try:
1337 for f in fixup:
1341 for f in fixup:
1338 self.dirstate.normal(f)
1342 self.dirstate.normal(f)
1339 finally:
1343 finally:
1340 wlock.release()
1344 wlock.release()
1341 except error.LockError:
1345 except error.LockError:
1342 pass
1346 pass
1343
1347
1344 if not parentworking:
1348 if not parentworking:
1345 mf1 = mfmatches(ctx1)
1349 mf1 = mfmatches(ctx1)
1346 if working:
1350 if working:
1347 # we are comparing working dir against non-parent
1351 # we are comparing working dir against non-parent
1348 # generate a pseudo-manifest for the working dir
1352 # generate a pseudo-manifest for the working dir
1349 mf2 = mfmatches(self['.'])
1353 mf2 = mfmatches(self['.'])
1350 for f in cmp + modified + added:
1354 for f in cmp + modified + added:
1351 mf2[f] = None
1355 mf2[f] = None
1352 mf2.set(f, ctx2.flags(f))
1356 mf2.set(f, ctx2.flags(f))
1353 for f in removed:
1357 for f in removed:
1354 if f in mf2:
1358 if f in mf2:
1355 del mf2[f]
1359 del mf2[f]
1356 else:
1360 else:
1357 # we are comparing two revisions
1361 # we are comparing two revisions
1358 deleted, unknown, ignored = [], [], []
1362 deleted, unknown, ignored = [], [], []
1359 mf2 = mfmatches(ctx2)
1363 mf2 = mfmatches(ctx2)
1360
1364
1361 modified, added, clean = [], [], []
1365 modified, added, clean = [], [], []
1362 for fn in mf2:
1366 for fn in mf2:
1363 if fn in mf1:
1367 if fn in mf1:
1364 if (fn not in deleted and
1368 if (fn not in deleted and
1365 (mf1.flags(fn) != mf2.flags(fn) or
1369 (mf1.flags(fn) != mf2.flags(fn) or
1366 (mf1[fn] != mf2[fn] and
1370 (mf1[fn] != mf2[fn] and
1367 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1371 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1368 modified.append(fn)
1372 modified.append(fn)
1369 elif listclean:
1373 elif listclean:
1370 clean.append(fn)
1374 clean.append(fn)
1371 del mf1[fn]
1375 del mf1[fn]
1372 elif fn not in deleted:
1376 elif fn not in deleted:
1373 added.append(fn)
1377 added.append(fn)
1374 removed = mf1.keys()
1378 removed = mf1.keys()
1375
1379
1376 if working and modified and not self.dirstate._checklink:
1380 if working and modified and not self.dirstate._checklink:
1377 # Symlink placeholders may get non-symlink-like contents
1381 # Symlink placeholders may get non-symlink-like contents
1378 # via user error or dereferencing by NFS or Samba servers,
1382 # via user error or dereferencing by NFS or Samba servers,
1379 # so we filter out any placeholders that don't look like a
1383 # so we filter out any placeholders that don't look like a
1380 # symlink
1384 # symlink
1381 sane = []
1385 sane = []
1382 for f in modified:
1386 for f in modified:
1383 if ctx2.flags(f) == 'l':
1387 if ctx2.flags(f) == 'l':
1384 d = ctx2[f].data()
1388 d = ctx2[f].data()
1385 if len(d) >= 1024 or '\n' in d or util.binary(d):
1389 if len(d) >= 1024 or '\n' in d or util.binary(d):
1386 self.ui.debug('ignoring suspect symlink placeholder'
1390 self.ui.debug('ignoring suspect symlink placeholder'
1387 ' "%s"\n' % f)
1391 ' "%s"\n' % f)
1388 continue
1392 continue
1389 sane.append(f)
1393 sane.append(f)
1390 modified = sane
1394 modified = sane
1391
1395
1392 r = modified, added, removed, deleted, unknown, ignored, clean
1396 r = modified, added, removed, deleted, unknown, ignored, clean
1393
1397
1394 if listsubrepos:
1398 if listsubrepos:
1395 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1399 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1396 if working:
1400 if working:
1397 rev2 = None
1401 rev2 = None
1398 else:
1402 else:
1399 rev2 = ctx2.substate[subpath][1]
1403 rev2 = ctx2.substate[subpath][1]
1400 try:
1404 try:
1401 submatch = matchmod.narrowmatcher(subpath, match)
1405 submatch = matchmod.narrowmatcher(subpath, match)
1402 s = sub.status(rev2, match=submatch, ignored=listignored,
1406 s = sub.status(rev2, match=submatch, ignored=listignored,
1403 clean=listclean, unknown=listunknown,
1407 clean=listclean, unknown=listunknown,
1404 listsubrepos=True)
1408 listsubrepos=True)
1405 for rfiles, sfiles in zip(r, s):
1409 for rfiles, sfiles in zip(r, s):
1406 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1410 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1407 except error.LookupError:
1411 except error.LookupError:
1408 self.ui.status(_("skipping missing subrepository: %s\n")
1412 self.ui.status(_("skipping missing subrepository: %s\n")
1409 % subpath)
1413 % subpath)
1410
1414
1411 for l in r:
1415 for l in r:
1412 l.sort()
1416 l.sort()
1413 return r
1417 return r
1414
1418
1415 def heads(self, start=None):
1419 def heads(self, start=None):
1416 heads = self.changelog.heads(start)
1420 heads = self.changelog.heads(start)
1417 # sort the output in rev descending order
1421 # sort the output in rev descending order
1418 return sorted(heads, key=self.changelog.rev, reverse=True)
1422 return sorted(heads, key=self.changelog.rev, reverse=True)
1419
1423
1420 def branchheads(self, branch=None, start=None, closed=False):
1424 def branchheads(self, branch=None, start=None, closed=False):
1421 '''return a (possibly filtered) list of heads for the given branch
1425 '''return a (possibly filtered) list of heads for the given branch
1422
1426
1423 Heads are returned in topological order, from newest to oldest.
1427 Heads are returned in topological order, from newest to oldest.
1424 If branch is None, use the dirstate branch.
1428 If branch is None, use the dirstate branch.
1425 If start is not None, return only heads reachable from start.
1429 If start is not None, return only heads reachable from start.
1426 If closed is True, return heads that are marked as closed as well.
1430 If closed is True, return heads that are marked as closed as well.
1427 '''
1431 '''
1428 if branch is None:
1432 if branch is None:
1429 branch = self[None].branch()
1433 branch = self[None].branch()
1430 branches = self.branchmap()
1434 branches = self.branchmap()
1431 if branch not in branches:
1435 if branch not in branches:
1432 return []
1436 return []
1433 # the cache returns heads ordered lowest to highest
1437 # the cache returns heads ordered lowest to highest
1434 bheads = list(reversed(branches[branch]))
1438 bheads = list(reversed(branches[branch]))
1435 if start is not None:
1439 if start is not None:
1436 # filter out the heads that cannot be reached from startrev
1440 # filter out the heads that cannot be reached from startrev
1437 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1441 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1438 bheads = [h for h in bheads if h in fbheads]
1442 bheads = [h for h in bheads if h in fbheads]
1439 if not closed:
1443 if not closed:
1440 bheads = [h for h in bheads if
1444 bheads = [h for h in bheads if
1441 ('close' not in self.changelog.read(h)[5])]
1445 ('close' not in self.changelog.read(h)[5])]
1442 return bheads
1446 return bheads
1443
1447
1444 def branches(self, nodes):
1448 def branches(self, nodes):
1445 if not nodes:
1449 if not nodes:
1446 nodes = [self.changelog.tip()]
1450 nodes = [self.changelog.tip()]
1447 b = []
1451 b = []
1448 for n in nodes:
1452 for n in nodes:
1449 t = n
1453 t = n
1450 while True:
1454 while True:
1451 p = self.changelog.parents(n)
1455 p = self.changelog.parents(n)
1452 if p[1] != nullid or p[0] == nullid:
1456 if p[1] != nullid or p[0] == nullid:
1453 b.append((t, n, p[0], p[1]))
1457 b.append((t, n, p[0], p[1]))
1454 break
1458 break
1455 n = p[0]
1459 n = p[0]
1456 return b
1460 return b
1457
1461
1458 def between(self, pairs):
1462 def between(self, pairs):
1459 r = []
1463 r = []
1460
1464
1461 for top, bottom in pairs:
1465 for top, bottom in pairs:
1462 n, l, i = top, [], 0
1466 n, l, i = top, [], 0
1463 f = 1
1467 f = 1
1464
1468
1465 while n != bottom and n != nullid:
1469 while n != bottom and n != nullid:
1466 p = self.changelog.parents(n)[0]
1470 p = self.changelog.parents(n)[0]
1467 if i == f:
1471 if i == f:
1468 l.append(n)
1472 l.append(n)
1469 f = f * 2
1473 f = f * 2
1470 n = p
1474 n = p
1471 i += 1
1475 i += 1
1472
1476
1473 r.append(l)
1477 r.append(l)
1474
1478
1475 return r
1479 return r
1476
1480
1477 def pull(self, remote, heads=None, force=False):
1481 def pull(self, remote, heads=None, force=False):
1478 lock = self.lock()
1482 lock = self.lock()
1479 try:
1483 try:
1480 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1484 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1481 force=force)
1485 force=force)
1482 common, fetch, rheads = tmp
1486 common, fetch, rheads = tmp
1483 if not fetch:
1487 if not fetch:
1484 self.ui.status(_("no changes found\n"))
1488 self.ui.status(_("no changes found\n"))
1485 result = 0
1489 result = 0
1486 else:
1490 else:
1487 if heads is None and list(common) == [nullid]:
1491 if heads is None and list(common) == [nullid]:
1488 self.ui.status(_("requesting all changes\n"))
1492 self.ui.status(_("requesting all changes\n"))
1489 elif heads is None and remote.capable('changegroupsubset'):
1493 elif heads is None and remote.capable('changegroupsubset'):
1490 # issue1320, avoid a race if remote changed after discovery
1494 # issue1320, avoid a race if remote changed after discovery
1491 heads = rheads
1495 heads = rheads
1492
1496
1493 if remote.capable('getbundle'):
1497 if remote.capable('getbundle'):
1494 cg = remote.getbundle('pull', common=common,
1498 cg = remote.getbundle('pull', common=common,
1495 heads=heads or rheads)
1499 heads=heads or rheads)
1496 elif heads is None:
1500 elif heads is None:
1497 cg = remote.changegroup(fetch, 'pull')
1501 cg = remote.changegroup(fetch, 'pull')
1498 elif not remote.capable('changegroupsubset'):
1502 elif not remote.capable('changegroupsubset'):
1499 raise util.Abort(_("partial pull cannot be done because "
1503 raise util.Abort(_("partial pull cannot be done because "
1500 "other repository doesn't support "
1504 "other repository doesn't support "
1501 "changegroupsubset."))
1505 "changegroupsubset."))
1502 else:
1506 else:
1503 cg = remote.changegroupsubset(fetch, heads, 'pull')
1507 cg = remote.changegroupsubset(fetch, heads, 'pull')
1504 result = self.addchangegroup(cg, 'pull', remote.url(),
1508 result = self.addchangegroup(cg, 'pull', remote.url(),
1505 lock=lock)
1509 lock=lock)
1506 finally:
1510 finally:
1507 lock.release()
1511 lock.release()
1508
1512
1509 return result
1513 return result
1510
1514
1511 def checkpush(self, force, revs):
1515 def checkpush(self, force, revs):
1512 """Extensions can override this function if additional checks have
1516 """Extensions can override this function if additional checks have
1513 to be performed before pushing, or call it if they override push
1517 to be performed before pushing, or call it if they override push
1514 command.
1518 command.
1515 """
1519 """
1516 pass
1520 pass
1517
1521
1518 def push(self, remote, force=False, revs=None, newbranch=False):
1522 def push(self, remote, force=False, revs=None, newbranch=False):
1519 '''Push outgoing changesets (limited by revs) from the current
1523 '''Push outgoing changesets (limited by revs) from the current
1520 repository to remote. Return an integer:
1524 repository to remote. Return an integer:
1521 - 0 means HTTP error *or* nothing to push
1525 - 0 means HTTP error *or* nothing to push
1522 - 1 means we pushed and remote head count is unchanged *or*
1526 - 1 means we pushed and remote head count is unchanged *or*
1523 we have outgoing changesets but refused to push
1527 we have outgoing changesets but refused to push
1524 - other values as described by addchangegroup()
1528 - other values as described by addchangegroup()
1525 '''
1529 '''
1526 # there are two ways to push to remote repo:
1530 # there are two ways to push to remote repo:
1527 #
1531 #
1528 # addchangegroup assumes local user can lock remote
1532 # addchangegroup assumes local user can lock remote
1529 # repo (local filesystem, old ssh servers).
1533 # repo (local filesystem, old ssh servers).
1530 #
1534 #
1531 # unbundle assumes local user cannot lock remote repo (new ssh
1535 # unbundle assumes local user cannot lock remote repo (new ssh
1532 # servers, http servers).
1536 # servers, http servers).
1533
1537
1534 self.checkpush(force, revs)
1538 self.checkpush(force, revs)
1535 lock = None
1539 lock = None
1536 unbundle = remote.capable('unbundle')
1540 unbundle = remote.capable('unbundle')
1537 if not unbundle:
1541 if not unbundle:
1538 lock = remote.lock()
1542 lock = remote.lock()
1539 try:
1543 try:
1540 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1544 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1541 newbranch)
1545 newbranch)
1542 ret = remote_heads
1546 ret = remote_heads
1543 if cg is not None:
1547 if cg is not None:
1544 if unbundle:
1548 if unbundle:
1545 # local repo finds heads on server, finds out what
1549 # local repo finds heads on server, finds out what
1546 # revs it must push. once revs transferred, if server
1550 # revs it must push. once revs transferred, if server
1547 # finds it has different heads (someone else won
1551 # finds it has different heads (someone else won
1548 # commit/push race), server aborts.
1552 # commit/push race), server aborts.
1549 if force:
1553 if force:
1550 remote_heads = ['force']
1554 remote_heads = ['force']
1551 # ssh: return remote's addchangegroup()
1555 # ssh: return remote's addchangegroup()
1552 # http: return remote's addchangegroup() or 0 for error
1556 # http: return remote's addchangegroup() or 0 for error
1553 ret = remote.unbundle(cg, remote_heads, 'push')
1557 ret = remote.unbundle(cg, remote_heads, 'push')
1554 else:
1558 else:
1555 # we return an integer indicating remote head count change
1559 # we return an integer indicating remote head count change
1556 ret = remote.addchangegroup(cg, 'push', self.url(),
1560 ret = remote.addchangegroup(cg, 'push', self.url(),
1557 lock=lock)
1561 lock=lock)
1558 finally:
1562 finally:
1559 if lock is not None:
1563 if lock is not None:
1560 lock.release()
1564 lock.release()
1561
1565
1562 self.ui.debug("checking for updated bookmarks\n")
1566 self.ui.debug("checking for updated bookmarks\n")
1563 rb = remote.listkeys('bookmarks')
1567 rb = remote.listkeys('bookmarks')
1564 for k in rb.keys():
1568 for k in rb.keys():
1565 if k in self._bookmarks:
1569 if k in self._bookmarks:
1566 nr, nl = rb[k], hex(self._bookmarks[k])
1570 nr, nl = rb[k], hex(self._bookmarks[k])
1567 if nr in self:
1571 if nr in self:
1568 cr = self[nr]
1572 cr = self[nr]
1569 cl = self[nl]
1573 cl = self[nl]
1570 if cl in cr.descendants():
1574 if cl in cr.descendants():
1571 r = remote.pushkey('bookmarks', k, nr, nl)
1575 r = remote.pushkey('bookmarks', k, nr, nl)
1572 if r:
1576 if r:
1573 self.ui.status(_("updating bookmark %s\n") % k)
1577 self.ui.status(_("updating bookmark %s\n") % k)
1574 else:
1578 else:
1575 self.ui.warn(_('updating bookmark %s'
1579 self.ui.warn(_('updating bookmark %s'
1576 ' failed!\n') % k)
1580 ' failed!\n') % k)
1577
1581
1578 return ret
1582 return ret
1579
1583
1580 def changegroupinfo(self, nodes, source):
1584 def changegroupinfo(self, nodes, source):
1581 if self.ui.verbose or source == 'bundle':
1585 if self.ui.verbose or source == 'bundle':
1582 self.ui.status(_("%d changesets found\n") % len(nodes))
1586 self.ui.status(_("%d changesets found\n") % len(nodes))
1583 if self.ui.debugflag:
1587 if self.ui.debugflag:
1584 self.ui.debug("list of changesets:\n")
1588 self.ui.debug("list of changesets:\n")
1585 for node in nodes:
1589 for node in nodes:
1586 self.ui.debug("%s\n" % hex(node))
1590 self.ui.debug("%s\n" % hex(node))
1587
1591
1588 def changegroupsubset(self, bases, heads, source):
1592 def changegroupsubset(self, bases, heads, source):
1589 """Compute a changegroup consisting of all the nodes that are
1593 """Compute a changegroup consisting of all the nodes that are
1590 descendants of any of the bases and ancestors of any of the heads.
1594 descendants of any of the bases and ancestors of any of the heads.
1591 Return a chunkbuffer object whose read() method will return
1595 Return a chunkbuffer object whose read() method will return
1592 successive changegroup chunks.
1596 successive changegroup chunks.
1593
1597
1594 It is fairly complex as determining which filenodes and which
1598 It is fairly complex as determining which filenodes and which
1595 manifest nodes need to be included for the changeset to be complete
1599 manifest nodes need to be included for the changeset to be complete
1596 is non-trivial.
1600 is non-trivial.
1597
1601
1598 Another wrinkle is doing the reverse, figuring out which changeset in
1602 Another wrinkle is doing the reverse, figuring out which changeset in
1599 the changegroup a particular filenode or manifestnode belongs to.
1603 the changegroup a particular filenode or manifestnode belongs to.
1600 """
1604 """
1601 cl = self.changelog
1605 cl = self.changelog
1602 if not bases:
1606 if not bases:
1603 bases = [nullid]
1607 bases = [nullid]
1604 csets, bases, heads = cl.nodesbetween(bases, heads)
1608 csets, bases, heads = cl.nodesbetween(bases, heads)
1605 # We assume that all ancestors of bases are known
1609 # We assume that all ancestors of bases are known
1606 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1610 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1607 return self._changegroupsubset(common, csets, heads, source)
1611 return self._changegroupsubset(common, csets, heads, source)
1608
1612
1609 def getbundle(self, source, heads=None, common=None):
1613 def getbundle(self, source, heads=None, common=None):
1610 """Like changegroupsubset, but returns the set difference between the
1614 """Like changegroupsubset, but returns the set difference between the
1611 ancestors of heads and the ancestors common.
1615 ancestors of heads and the ancestors common.
1612
1616
1613 If heads is None, use the local heads. If common is None, use [nullid].
1617 If heads is None, use the local heads. If common is None, use [nullid].
1614
1618
1615 The nodes in common might not all be known locally due to the way the
1619 The nodes in common might not all be known locally due to the way the
1616 current discovery protocol works.
1620 current discovery protocol works.
1617 """
1621 """
1618 cl = self.changelog
1622 cl = self.changelog
1619 if common:
1623 if common:
1620 nm = cl.nodemap
1624 nm = cl.nodemap
1621 common = [n for n in common if n in nm]
1625 common = [n for n in common if n in nm]
1622 else:
1626 else:
1623 common = [nullid]
1627 common = [nullid]
1624 if not heads:
1628 if not heads:
1625 heads = cl.heads()
1629 heads = cl.heads()
1626 common, missing = cl.findcommonmissing(common, heads)
1630 common, missing = cl.findcommonmissing(common, heads)
1627 if not missing:
1631 if not missing:
1628 return None
1632 return None
1629 return self._changegroupsubset(common, missing, heads, source)
1633 return self._changegroupsubset(common, missing, heads, source)
1630
1634
1631 def _changegroupsubset(self, commonrevs, csets, heads, source):
1635 def _changegroupsubset(self, commonrevs, csets, heads, source):
1632
1636
1633 cl = self.changelog
1637 cl = self.changelog
1634 mf = self.manifest
1638 mf = self.manifest
1635 mfs = {} # needed manifests
1639 mfs = {} # needed manifests
1636 fnodes = {} # needed file nodes
1640 fnodes = {} # needed file nodes
1637 changedfiles = set()
1641 changedfiles = set()
1638 fstate = ['', {}]
1642 fstate = ['', {}]
1639 count = [0]
1643 count = [0]
1640
1644
1641 # can we go through the fast path ?
1645 # can we go through the fast path ?
1642 heads.sort()
1646 heads.sort()
1643 if heads == sorted(self.heads()):
1647 if heads == sorted(self.heads()):
1644 return self._changegroup(csets, source)
1648 return self._changegroup(csets, source)
1645
1649
1646 # slow path
1650 # slow path
1647 self.hook('preoutgoing', throw=True, source=source)
1651 self.hook('preoutgoing', throw=True, source=source)
1648 self.changegroupinfo(csets, source)
1652 self.changegroupinfo(csets, source)
1649
1653
1650 # filter any nodes that claim to be part of the known set
1654 # filter any nodes that claim to be part of the known set
1651 def prune(revlog, missing):
1655 def prune(revlog, missing):
1652 return [n for n in missing
1656 return [n for n in missing
1653 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1657 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1654
1658
1655 def lookup(revlog, x):
1659 def lookup(revlog, x):
1656 if revlog == cl:
1660 if revlog == cl:
1657 c = cl.read(x)
1661 c = cl.read(x)
1658 changedfiles.update(c[3])
1662 changedfiles.update(c[3])
1659 mfs.setdefault(c[0], x)
1663 mfs.setdefault(c[0], x)
1660 count[0] += 1
1664 count[0] += 1
1661 self.ui.progress(_('bundling'), count[0],
1665 self.ui.progress(_('bundling'), count[0],
1662 unit=_('changesets'), total=len(csets))
1666 unit=_('changesets'), total=len(csets))
1663 return x
1667 return x
1664 elif revlog == mf:
1668 elif revlog == mf:
1665 clnode = mfs[x]
1669 clnode = mfs[x]
1666 mdata = mf.readfast(x)
1670 mdata = mf.readfast(x)
1667 for f in changedfiles:
1671 for f in changedfiles:
1668 if f in mdata:
1672 if f in mdata:
1669 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1673 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1670 count[0] += 1
1674 count[0] += 1
1671 self.ui.progress(_('bundling'), count[0],
1675 self.ui.progress(_('bundling'), count[0],
1672 unit=_('manifests'), total=len(mfs))
1676 unit=_('manifests'), total=len(mfs))
1673 return mfs[x]
1677 return mfs[x]
1674 else:
1678 else:
1675 self.ui.progress(
1679 self.ui.progress(
1676 _('bundling'), count[0], item=fstate[0],
1680 _('bundling'), count[0], item=fstate[0],
1677 unit=_('files'), total=len(changedfiles))
1681 unit=_('files'), total=len(changedfiles))
1678 return fstate[1][x]
1682 return fstate[1][x]
1679
1683
1680 bundler = changegroup.bundle10(lookup)
1684 bundler = changegroup.bundle10(lookup)
1681 reorder = self.ui.config('bundle', 'reorder', 'auto')
1685 reorder = self.ui.config('bundle', 'reorder', 'auto')
1682 if reorder == 'auto':
1686 if reorder == 'auto':
1683 reorder = None
1687 reorder = None
1684 else:
1688 else:
1685 reorder = util.parsebool(reorder)
1689 reorder = util.parsebool(reorder)
1686
1690
1687 def gengroup():
1691 def gengroup():
1688 # Create a changenode group generator that will call our functions
1692 # Create a changenode group generator that will call our functions
1689 # back to lookup the owning changenode and collect information.
1693 # back to lookup the owning changenode and collect information.
1690 for chunk in cl.group(csets, bundler, reorder=reorder):
1694 for chunk in cl.group(csets, bundler, reorder=reorder):
1691 yield chunk
1695 yield chunk
1692 self.ui.progress(_('bundling'), None)
1696 self.ui.progress(_('bundling'), None)
1693
1697
1694 # Create a generator for the manifestnodes that calls our lookup
1698 # Create a generator for the manifestnodes that calls our lookup
1695 # and data collection functions back.
1699 # and data collection functions back.
1696 count[0] = 0
1700 count[0] = 0
1697 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1701 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1698 yield chunk
1702 yield chunk
1699 self.ui.progress(_('bundling'), None)
1703 self.ui.progress(_('bundling'), None)
1700
1704
1701 mfs.clear()
1705 mfs.clear()
1702
1706
1703 # Go through all our files in order sorted by name.
1707 # Go through all our files in order sorted by name.
1704 count[0] = 0
1708 count[0] = 0
1705 for fname in sorted(changedfiles):
1709 for fname in sorted(changedfiles):
1706 filerevlog = self.file(fname)
1710 filerevlog = self.file(fname)
1707 if not len(filerevlog):
1711 if not len(filerevlog):
1708 raise util.Abort(_("empty or missing revlog for %s") % fname)
1712 raise util.Abort(_("empty or missing revlog for %s") % fname)
1709 fstate[0] = fname
1713 fstate[0] = fname
1710 fstate[1] = fnodes.pop(fname, {})
1714 fstate[1] = fnodes.pop(fname, {})
1711
1715
1712 nodelist = prune(filerevlog, fstate[1])
1716 nodelist = prune(filerevlog, fstate[1])
1713 if nodelist:
1717 if nodelist:
1714 count[0] += 1
1718 count[0] += 1
1715 yield bundler.fileheader(fname)
1719 yield bundler.fileheader(fname)
1716 for chunk in filerevlog.group(nodelist, bundler, reorder):
1720 for chunk in filerevlog.group(nodelist, bundler, reorder):
1717 yield chunk
1721 yield chunk
1718
1722
1719 # Signal that no more groups are left.
1723 # Signal that no more groups are left.
1720 yield bundler.close()
1724 yield bundler.close()
1721 self.ui.progress(_('bundling'), None)
1725 self.ui.progress(_('bundling'), None)
1722
1726
1723 if csets:
1727 if csets:
1724 self.hook('outgoing', node=hex(csets[0]), source=source)
1728 self.hook('outgoing', node=hex(csets[0]), source=source)
1725
1729
1726 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1730 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1727
1731
1728 def changegroup(self, basenodes, source):
1732 def changegroup(self, basenodes, source):
1729 # to avoid a race we use changegroupsubset() (issue1320)
1733 # to avoid a race we use changegroupsubset() (issue1320)
1730 return self.changegroupsubset(basenodes, self.heads(), source)
1734 return self.changegroupsubset(basenodes, self.heads(), source)
1731
1735
1732 def _changegroup(self, nodes, source):
1736 def _changegroup(self, nodes, source):
1733 """Compute the changegroup of all nodes that we have that a recipient
1737 """Compute the changegroup of all nodes that we have that a recipient
1734 doesn't. Return a chunkbuffer object whose read() method will return
1738 doesn't. Return a chunkbuffer object whose read() method will return
1735 successive changegroup chunks.
1739 successive changegroup chunks.
1736
1740
1737 This is much easier than the previous function as we can assume that
1741 This is much easier than the previous function as we can assume that
1738 the recipient has any changenode we aren't sending them.
1742 the recipient has any changenode we aren't sending them.
1739
1743
1740 nodes is the set of nodes to send"""
1744 nodes is the set of nodes to send"""
1741
1745
1742 cl = self.changelog
1746 cl = self.changelog
1743 mf = self.manifest
1747 mf = self.manifest
1744 mfs = {}
1748 mfs = {}
1745 changedfiles = set()
1749 changedfiles = set()
1746 fstate = ['']
1750 fstate = ['']
1747 count = [0]
1751 count = [0]
1748
1752
1749 self.hook('preoutgoing', throw=True, source=source)
1753 self.hook('preoutgoing', throw=True, source=source)
1750 self.changegroupinfo(nodes, source)
1754 self.changegroupinfo(nodes, source)
1751
1755
1752 revset = set([cl.rev(n) for n in nodes])
1756 revset = set([cl.rev(n) for n in nodes])
1753
1757
1754 def gennodelst(log):
1758 def gennodelst(log):
1755 return [log.node(r) for r in log if log.linkrev(r) in revset]
1759 return [log.node(r) for r in log if log.linkrev(r) in revset]
1756
1760
1757 def lookup(revlog, x):
1761 def lookup(revlog, x):
1758 if revlog == cl:
1762 if revlog == cl:
1759 c = cl.read(x)
1763 c = cl.read(x)
1760 changedfiles.update(c[3])
1764 changedfiles.update(c[3])
1761 mfs.setdefault(c[0], x)
1765 mfs.setdefault(c[0], x)
1762 count[0] += 1
1766 count[0] += 1
1763 self.ui.progress(_('bundling'), count[0],
1767 self.ui.progress(_('bundling'), count[0],
1764 unit=_('changesets'), total=len(nodes))
1768 unit=_('changesets'), total=len(nodes))
1765 return x
1769 return x
1766 elif revlog == mf:
1770 elif revlog == mf:
1767 count[0] += 1
1771 count[0] += 1
1768 self.ui.progress(_('bundling'), count[0],
1772 self.ui.progress(_('bundling'), count[0],
1769 unit=_('manifests'), total=len(mfs))
1773 unit=_('manifests'), total=len(mfs))
1770 return cl.node(revlog.linkrev(revlog.rev(x)))
1774 return cl.node(revlog.linkrev(revlog.rev(x)))
1771 else:
1775 else:
1772 self.ui.progress(
1776 self.ui.progress(
1773 _('bundling'), count[0], item=fstate[0],
1777 _('bundling'), count[0], item=fstate[0],
1774 total=len(changedfiles), unit=_('files'))
1778 total=len(changedfiles), unit=_('files'))
1775 return cl.node(revlog.linkrev(revlog.rev(x)))
1779 return cl.node(revlog.linkrev(revlog.rev(x)))
1776
1780
1777 bundler = changegroup.bundle10(lookup)
1781 bundler = changegroup.bundle10(lookup)
1778 reorder = self.ui.config('bundle', 'reorder', 'auto')
1782 reorder = self.ui.config('bundle', 'reorder', 'auto')
1779 if reorder == 'auto':
1783 if reorder == 'auto':
1780 reorder = None
1784 reorder = None
1781 else:
1785 else:
1782 reorder = util.parsebool(reorder)
1786 reorder = util.parsebool(reorder)
1783
1787
1784 def gengroup():
1788 def gengroup():
1785 '''yield a sequence of changegroup chunks (strings)'''
1789 '''yield a sequence of changegroup chunks (strings)'''
1786 # construct a list of all changed files
1790 # construct a list of all changed files
1787
1791
1788 for chunk in cl.group(nodes, bundler, reorder=reorder):
1792 for chunk in cl.group(nodes, bundler, reorder=reorder):
1789 yield chunk
1793 yield chunk
1790 self.ui.progress(_('bundling'), None)
1794 self.ui.progress(_('bundling'), None)
1791
1795
1792 count[0] = 0
1796 count[0] = 0
1793 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1797 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1794 yield chunk
1798 yield chunk
1795 self.ui.progress(_('bundling'), None)
1799 self.ui.progress(_('bundling'), None)
1796
1800
1797 count[0] = 0
1801 count[0] = 0
1798 for fname in sorted(changedfiles):
1802 for fname in sorted(changedfiles):
1799 filerevlog = self.file(fname)
1803 filerevlog = self.file(fname)
1800 if not len(filerevlog):
1804 if not len(filerevlog):
1801 raise util.Abort(_("empty or missing revlog for %s") % fname)
1805 raise util.Abort(_("empty or missing revlog for %s") % fname)
1802 fstate[0] = fname
1806 fstate[0] = fname
1803 nodelist = gennodelst(filerevlog)
1807 nodelist = gennodelst(filerevlog)
1804 if nodelist:
1808 if nodelist:
1805 count[0] += 1
1809 count[0] += 1
1806 yield bundler.fileheader(fname)
1810 yield bundler.fileheader(fname)
1807 for chunk in filerevlog.group(nodelist, bundler, reorder):
1811 for chunk in filerevlog.group(nodelist, bundler, reorder):
1808 yield chunk
1812 yield chunk
1809 yield bundler.close()
1813 yield bundler.close()
1810 self.ui.progress(_('bundling'), None)
1814 self.ui.progress(_('bundling'), None)
1811
1815
1812 if nodes:
1816 if nodes:
1813 self.hook('outgoing', node=hex(nodes[0]), source=source)
1817 self.hook('outgoing', node=hex(nodes[0]), source=source)
1814
1818
1815 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1819 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1816
1820
1817 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1821 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1818 """Add the changegroup returned by source.read() to this repo.
1822 """Add the changegroup returned by source.read() to this repo.
1819 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1823 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1820 the URL of the repo where this changegroup is coming from.
1824 the URL of the repo where this changegroup is coming from.
1821 If lock is not None, the function takes ownership of the lock
1825 If lock is not None, the function takes ownership of the lock
1822 and releases it after the changegroup is added.
1826 and releases it after the changegroup is added.
1823
1827
1824 Return an integer summarizing the change to this repo:
1828 Return an integer summarizing the change to this repo:
1825 - nothing changed or no source: 0
1829 - nothing changed or no source: 0
1826 - more heads than before: 1+added heads (2..n)
1830 - more heads than before: 1+added heads (2..n)
1827 - fewer heads than before: -1-removed heads (-2..-n)
1831 - fewer heads than before: -1-removed heads (-2..-n)
1828 - number of heads stays the same: 1
1832 - number of heads stays the same: 1
1829 """
1833 """
1830 def csmap(x):
1834 def csmap(x):
1831 self.ui.debug("add changeset %s\n" % short(x))
1835 self.ui.debug("add changeset %s\n" % short(x))
1832 return len(cl)
1836 return len(cl)
1833
1837
1834 def revmap(x):
1838 def revmap(x):
1835 return cl.rev(x)
1839 return cl.rev(x)
1836
1840
1837 if not source:
1841 if not source:
1838 return 0
1842 return 0
1839
1843
1840 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1844 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1841
1845
1842 changesets = files = revisions = 0
1846 changesets = files = revisions = 0
1843 efiles = set()
1847 efiles = set()
1844
1848
1845 # write changelog data to temp files so concurrent readers will not see
1849 # write changelog data to temp files so concurrent readers will not see
1846 # inconsistent view
1850 # inconsistent view
1847 cl = self.changelog
1851 cl = self.changelog
1848 cl.delayupdate()
1852 cl.delayupdate()
1849 oldheads = cl.heads()
1853 oldheads = cl.heads()
1850
1854
1851 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1855 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1852 try:
1856 try:
1853 trp = weakref.proxy(tr)
1857 trp = weakref.proxy(tr)
1854 # pull off the changeset group
1858 # pull off the changeset group
1855 self.ui.status(_("adding changesets\n"))
1859 self.ui.status(_("adding changesets\n"))
1856 clstart = len(cl)
1860 clstart = len(cl)
1857 class prog(object):
1861 class prog(object):
1858 step = _('changesets')
1862 step = _('changesets')
1859 count = 1
1863 count = 1
1860 ui = self.ui
1864 ui = self.ui
1861 total = None
1865 total = None
1862 def __call__(self):
1866 def __call__(self):
1863 self.ui.progress(self.step, self.count, unit=_('chunks'),
1867 self.ui.progress(self.step, self.count, unit=_('chunks'),
1864 total=self.total)
1868 total=self.total)
1865 self.count += 1
1869 self.count += 1
1866 pr = prog()
1870 pr = prog()
1867 source.callback = pr
1871 source.callback = pr
1868
1872
1869 source.changelogheader()
1873 source.changelogheader()
1870 if (cl.addgroup(source, csmap, trp) is None
1874 if (cl.addgroup(source, csmap, trp) is None
1871 and not emptyok):
1875 and not emptyok):
1872 raise util.Abort(_("received changelog group is empty"))
1876 raise util.Abort(_("received changelog group is empty"))
1873 clend = len(cl)
1877 clend = len(cl)
1874 changesets = clend - clstart
1878 changesets = clend - clstart
1875 for c in xrange(clstart, clend):
1879 for c in xrange(clstart, clend):
1876 efiles.update(self[c].files())
1880 efiles.update(self[c].files())
1877 efiles = len(efiles)
1881 efiles = len(efiles)
1878 self.ui.progress(_('changesets'), None)
1882 self.ui.progress(_('changesets'), None)
1879
1883
1880 # pull off the manifest group
1884 # pull off the manifest group
1881 self.ui.status(_("adding manifests\n"))
1885 self.ui.status(_("adding manifests\n"))
1882 pr.step = _('manifests')
1886 pr.step = _('manifests')
1883 pr.count = 1
1887 pr.count = 1
1884 pr.total = changesets # manifests <= changesets
1888 pr.total = changesets # manifests <= changesets
1885 # no need to check for empty manifest group here:
1889 # no need to check for empty manifest group here:
1886 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1890 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1887 # no new manifest will be created and the manifest group will
1891 # no new manifest will be created and the manifest group will
1888 # be empty during the pull
1892 # be empty during the pull
1889 source.manifestheader()
1893 source.manifestheader()
1890 self.manifest.addgroup(source, revmap, trp)
1894 self.manifest.addgroup(source, revmap, trp)
1891 self.ui.progress(_('manifests'), None)
1895 self.ui.progress(_('manifests'), None)
1892
1896
1893 needfiles = {}
1897 needfiles = {}
1894 if self.ui.configbool('server', 'validate', default=False):
1898 if self.ui.configbool('server', 'validate', default=False):
1895 # validate incoming csets have their manifests
1899 # validate incoming csets have their manifests
1896 for cset in xrange(clstart, clend):
1900 for cset in xrange(clstart, clend):
1897 mfest = self.changelog.read(self.changelog.node(cset))[0]
1901 mfest = self.changelog.read(self.changelog.node(cset))[0]
1898 mfest = self.manifest.readdelta(mfest)
1902 mfest = self.manifest.readdelta(mfest)
1899 # store file nodes we must see
1903 # store file nodes we must see
1900 for f, n in mfest.iteritems():
1904 for f, n in mfest.iteritems():
1901 needfiles.setdefault(f, set()).add(n)
1905 needfiles.setdefault(f, set()).add(n)
1902
1906
1903 # process the files
1907 # process the files
1904 self.ui.status(_("adding file changes\n"))
1908 self.ui.status(_("adding file changes\n"))
1905 pr.step = _('files')
1909 pr.step = _('files')
1906 pr.count = 1
1910 pr.count = 1
1907 pr.total = efiles
1911 pr.total = efiles
1908 source.callback = None
1912 source.callback = None
1909
1913
1910 while True:
1914 while True:
1911 chunkdata = source.filelogheader()
1915 chunkdata = source.filelogheader()
1912 if not chunkdata:
1916 if not chunkdata:
1913 break
1917 break
1914 f = chunkdata["filename"]
1918 f = chunkdata["filename"]
1915 self.ui.debug("adding %s revisions\n" % f)
1919 self.ui.debug("adding %s revisions\n" % f)
1916 pr()
1920 pr()
1917 fl = self.file(f)
1921 fl = self.file(f)
1918 o = len(fl)
1922 o = len(fl)
1919 if fl.addgroup(source, revmap, trp) is None:
1923 if fl.addgroup(source, revmap, trp) is None:
1920 raise util.Abort(_("received file revlog group is empty"))
1924 raise util.Abort(_("received file revlog group is empty"))
1921 revisions += len(fl) - o
1925 revisions += len(fl) - o
1922 files += 1
1926 files += 1
1923 if f in needfiles:
1927 if f in needfiles:
1924 needs = needfiles[f]
1928 needs = needfiles[f]
1925 for new in xrange(o, len(fl)):
1929 for new in xrange(o, len(fl)):
1926 n = fl.node(new)
1930 n = fl.node(new)
1927 if n in needs:
1931 if n in needs:
1928 needs.remove(n)
1932 needs.remove(n)
1929 if not needs:
1933 if not needs:
1930 del needfiles[f]
1934 del needfiles[f]
1931 self.ui.progress(_('files'), None)
1935 self.ui.progress(_('files'), None)
1932
1936
1933 for f, needs in needfiles.iteritems():
1937 for f, needs in needfiles.iteritems():
1934 fl = self.file(f)
1938 fl = self.file(f)
1935 for n in needs:
1939 for n in needs:
1936 try:
1940 try:
1937 fl.rev(n)
1941 fl.rev(n)
1938 except error.LookupError:
1942 except error.LookupError:
1939 raise util.Abort(
1943 raise util.Abort(
1940 _('missing file data for %s:%s - run hg verify') %
1944 _('missing file data for %s:%s - run hg verify') %
1941 (f, hex(n)))
1945 (f, hex(n)))
1942
1946
1943 dh = 0
1947 dh = 0
1944 if oldheads:
1948 if oldheads:
1945 heads = cl.heads()
1949 heads = cl.heads()
1946 dh = len(heads) - len(oldheads)
1950 dh = len(heads) - len(oldheads)
1947 for h in heads:
1951 for h in heads:
1948 if h not in oldheads and 'close' in self[h].extra():
1952 if h not in oldheads and 'close' in self[h].extra():
1949 dh -= 1
1953 dh -= 1
1950 htext = ""
1954 htext = ""
1951 if dh:
1955 if dh:
1952 htext = _(" (%+d heads)") % dh
1956 htext = _(" (%+d heads)") % dh
1953
1957
1954 self.ui.status(_("added %d changesets"
1958 self.ui.status(_("added %d changesets"
1955 " with %d changes to %d files%s\n")
1959 " with %d changes to %d files%s\n")
1956 % (changesets, revisions, files, htext))
1960 % (changesets, revisions, files, htext))
1957
1961
1958 if changesets > 0:
1962 if changesets > 0:
1959 p = lambda: cl.writepending() and self.root or ""
1963 p = lambda: cl.writepending() and self.root or ""
1960 self.hook('pretxnchangegroup', throw=True,
1964 self.hook('pretxnchangegroup', throw=True,
1961 node=hex(cl.node(clstart)), source=srctype,
1965 node=hex(cl.node(clstart)), source=srctype,
1962 url=url, pending=p)
1966 url=url, pending=p)
1963
1967
1964 # make changelog see real files again
1968 # make changelog see real files again
1965 cl.finalize(trp)
1969 cl.finalize(trp)
1966
1970
1967 tr.close()
1971 tr.close()
1968 finally:
1972 finally:
1969 tr.release()
1973 tr.release()
1970 if lock:
1974 if lock:
1971 lock.release()
1975 lock.release()
1972
1976
1973 if changesets > 0:
1977 if changesets > 0:
1974 # forcefully update the on-disk branch cache
1978 # forcefully update the on-disk branch cache
1975 self.ui.debug("updating the branch cache\n")
1979 self.ui.debug("updating the branch cache\n")
1976 self.updatebranchcache()
1980 self.updatebranchcache()
1977 self.hook("changegroup", node=hex(cl.node(clstart)),
1981 self.hook("changegroup", node=hex(cl.node(clstart)),
1978 source=srctype, url=url)
1982 source=srctype, url=url)
1979
1983
1980 for i in xrange(clstart, clend):
1984 for i in xrange(clstart, clend):
1981 self.hook("incoming", node=hex(cl.node(i)),
1985 self.hook("incoming", node=hex(cl.node(i)),
1982 source=srctype, url=url)
1986 source=srctype, url=url)
1983
1987
1984 # never return 0 here:
1988 # never return 0 here:
1985 if dh < 0:
1989 if dh < 0:
1986 return dh - 1
1990 return dh - 1
1987 else:
1991 else:
1988 return dh + 1
1992 return dh + 1
1989
1993
1990 def stream_in(self, remote, requirements):
1994 def stream_in(self, remote, requirements):
1991 lock = self.lock()
1995 lock = self.lock()
1992 try:
1996 try:
1993 fp = remote.stream_out()
1997 fp = remote.stream_out()
1994 l = fp.readline()
1998 l = fp.readline()
1995 try:
1999 try:
1996 resp = int(l)
2000 resp = int(l)
1997 except ValueError:
2001 except ValueError:
1998 raise error.ResponseError(
2002 raise error.ResponseError(
1999 _('Unexpected response from remote server:'), l)
2003 _('Unexpected response from remote server:'), l)
2000 if resp == 1:
2004 if resp == 1:
2001 raise util.Abort(_('operation forbidden by server'))
2005 raise util.Abort(_('operation forbidden by server'))
2002 elif resp == 2:
2006 elif resp == 2:
2003 raise util.Abort(_('locking the remote repository failed'))
2007 raise util.Abort(_('locking the remote repository failed'))
2004 elif resp != 0:
2008 elif resp != 0:
2005 raise util.Abort(_('the server sent an unknown error code'))
2009 raise util.Abort(_('the server sent an unknown error code'))
2006 self.ui.status(_('streaming all changes\n'))
2010 self.ui.status(_('streaming all changes\n'))
2007 l = fp.readline()
2011 l = fp.readline()
2008 try:
2012 try:
2009 total_files, total_bytes = map(int, l.split(' ', 1))
2013 total_files, total_bytes = map(int, l.split(' ', 1))
2010 except (ValueError, TypeError):
2014 except (ValueError, TypeError):
2011 raise error.ResponseError(
2015 raise error.ResponseError(
2012 _('Unexpected response from remote server:'), l)
2016 _('Unexpected response from remote server:'), l)
2013 self.ui.status(_('%d files to transfer, %s of data\n') %
2017 self.ui.status(_('%d files to transfer, %s of data\n') %
2014 (total_files, util.bytecount(total_bytes)))
2018 (total_files, util.bytecount(total_bytes)))
2015 start = time.time()
2019 start = time.time()
2016 for i in xrange(total_files):
2020 for i in xrange(total_files):
2017 # XXX doesn't support '\n' or '\r' in filenames
2021 # XXX doesn't support '\n' or '\r' in filenames
2018 l = fp.readline()
2022 l = fp.readline()
2019 try:
2023 try:
2020 name, size = l.split('\0', 1)
2024 name, size = l.split('\0', 1)
2021 size = int(size)
2025 size = int(size)
2022 except (ValueError, TypeError):
2026 except (ValueError, TypeError):
2023 raise error.ResponseError(
2027 raise error.ResponseError(
2024 _('Unexpected response from remote server:'), l)
2028 _('Unexpected response from remote server:'), l)
2025 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2029 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2026 # for backwards compat, name was partially encoded
2030 # for backwards compat, name was partially encoded
2027 ofp = self.sopener(store.decodedir(name), 'w')
2031 ofp = self.sopener(store.decodedir(name), 'w')
2028 for chunk in util.filechunkiter(fp, limit=size):
2032 for chunk in util.filechunkiter(fp, limit=size):
2029 ofp.write(chunk)
2033 ofp.write(chunk)
2030 ofp.close()
2034 ofp.close()
2031 elapsed = time.time() - start
2035 elapsed = time.time() - start
2032 if elapsed <= 0:
2036 if elapsed <= 0:
2033 elapsed = 0.001
2037 elapsed = 0.001
2034 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2038 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2035 (util.bytecount(total_bytes), elapsed,
2039 (util.bytecount(total_bytes), elapsed,
2036 util.bytecount(total_bytes / elapsed)))
2040 util.bytecount(total_bytes / elapsed)))
2037
2041
2038 # new requirements = old non-format requirements + new format-related
2042 # new requirements = old non-format requirements + new format-related
2039 # requirements from the streamed-in repository
2043 # requirements from the streamed-in repository
2040 requirements.update(set(self.requirements) - self.supportedformats)
2044 requirements.update(set(self.requirements) - self.supportedformats)
2041 self._applyrequirements(requirements)
2045 self._applyrequirements(requirements)
2042 self._writerequirements()
2046 self._writerequirements()
2043
2047
2044 self.invalidate()
2048 self.invalidate()
2045 return len(self.heads()) + 1
2049 return len(self.heads()) + 1
2046 finally:
2050 finally:
2047 lock.release()
2051 lock.release()
2048
2052
2049 def clone(self, remote, heads=[], stream=False):
2053 def clone(self, remote, heads=[], stream=False):
2050 '''clone remote repository.
2054 '''clone remote repository.
2051
2055
2052 keyword arguments:
2056 keyword arguments:
2053 heads: list of revs to clone (forces use of pull)
2057 heads: list of revs to clone (forces use of pull)
2054 stream: use streaming clone if possible'''
2058 stream: use streaming clone if possible'''
2055
2059
2056 # now, all clients that can request uncompressed clones can
2060 # now, all clients that can request uncompressed clones can
2057 # read repo formats supported by all servers that can serve
2061 # read repo formats supported by all servers that can serve
2058 # them.
2062 # them.
2059
2063
2060 # if revlog format changes, client will have to check version
2064 # if revlog format changes, client will have to check version
2061 # and format flags on "stream" capability, and use
2065 # and format flags on "stream" capability, and use
2062 # uncompressed only if compatible.
2066 # uncompressed only if compatible.
2063
2067
2064 if stream and not heads:
2068 if stream and not heads:
2065 # 'stream' means remote revlog format is revlogv1 only
2069 # 'stream' means remote revlog format is revlogv1 only
2066 if remote.capable('stream'):
2070 if remote.capable('stream'):
2067 return self.stream_in(remote, set(('revlogv1',)))
2071 return self.stream_in(remote, set(('revlogv1',)))
2068 # otherwise, 'streamreqs' contains the remote revlog format
2072 # otherwise, 'streamreqs' contains the remote revlog format
2069 streamreqs = remote.capable('streamreqs')
2073 streamreqs = remote.capable('streamreqs')
2070 if streamreqs:
2074 if streamreqs:
2071 streamreqs = set(streamreqs.split(','))
2075 streamreqs = set(streamreqs.split(','))
2072 # if we support it, stream in and adjust our requirements
2076 # if we support it, stream in and adjust our requirements
2073 if not streamreqs - self.supportedformats:
2077 if not streamreqs - self.supportedformats:
2074 return self.stream_in(remote, streamreqs)
2078 return self.stream_in(remote, streamreqs)
2075 return self.pull(remote, heads)
2079 return self.pull(remote, heads)
2076
2080
2077 def pushkey(self, namespace, key, old, new):
2081 def pushkey(self, namespace, key, old, new):
2078 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2082 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2079 old=old, new=new)
2083 old=old, new=new)
2080 ret = pushkey.push(self, namespace, key, old, new)
2084 ret = pushkey.push(self, namespace, key, old, new)
2081 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2085 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2082 ret=ret)
2086 ret=ret)
2083 return ret
2087 return ret
2084
2088
2085 def listkeys(self, namespace):
2089 def listkeys(self, namespace):
2086 self.hook('prelistkeys', throw=True, namespace=namespace)
2090 self.hook('prelistkeys', throw=True, namespace=namespace)
2087 values = pushkey.list(self, namespace)
2091 values = pushkey.list(self, namespace)
2088 self.hook('listkeys', namespace=namespace, values=values)
2092 self.hook('listkeys', namespace=namespace, values=values)
2089 return values
2093 return values
2090
2094
2091 def debugwireargs(self, one, two, three=None, four=None, five=None):
2095 def debugwireargs(self, one, two, three=None, four=None, five=None):
2092 '''used to test argument passing over the wire'''
2096 '''used to test argument passing over the wire'''
2093 return "%s %s %s %s %s" % (one, two, three, four, five)
2097 return "%s %s %s %s %s" % (one, two, three, four, five)
2094
2098
2095 def savecommitmessage(self, text):
2099 def savecommitmessage(self, text):
2096 fp = self.opener('last-message.txt', 'wb')
2100 fp = self.opener('last-message.txt', 'wb')
2097 try:
2101 try:
2098 fp.write(text)
2102 fp.write(text)
2099 finally:
2103 finally:
2100 fp.close()
2104 fp.close()
2101 return self.pathto(fp.name[len(self.root)+1:])
2105 return self.pathto(fp.name[len(self.root)+1:])
2102
2106
2103 # used to avoid circular references so destructors work
2107 # used to avoid circular references so destructors work
2104 def aftertrans(files):
2108 def aftertrans(files):
2105 renamefiles = [tuple(t) for t in files]
2109 renamefiles = [tuple(t) for t in files]
2106 def a():
2110 def a():
2107 for src, dest in renamefiles:
2111 for src, dest in renamefiles:
2108 util.rename(src, dest)
2112 util.rename(src, dest)
2109 return a
2113 return a
2110
2114
2111 def undoname(fn):
2115 def undoname(fn):
2112 base, name = os.path.split(fn)
2116 base, name = os.path.split(fn)
2113 assert name.startswith('journal')
2117 assert name.startswith('journal')
2114 return os.path.join(base, name.replace('journal', 'undo', 1))
2118 return os.path.join(base, name.replace('journal', 'undo', 1))
2115
2119
2116 def instance(ui, path, create):
2120 def instance(ui, path, create):
2117 return localrepository(ui, util.urllocalpath(path), create)
2121 return localrepository(ui, util.urllocalpath(path), create)
2118
2122
2119 def islocal(path):
2123 def islocal(path):
2120 return True
2124 return True
@@ -1,41 +1,66
1 # Mercurial phases support code
1 # Mercurial phases support code
2 #
2 #
3 # Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
3 # Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Logilab SA <contact@logilab.fr>
4 # Logilab SA <contact@logilab.fr>
5 # Augie Fackler <durin42@gmail.com>
5 # Augie Fackler <durin42@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 import errno
10 import errno
11 from node import nullid, bin, hex
11 from node import nullid, bin, hex
12
12
13 allphases = range(2)
13 allphases = range(2)
14 trackedphases = allphases[1:]
14 trackedphases = allphases[1:]
15
15
16 def readroots(repo):
16 def readroots(repo):
17 """Read phase roots from disk"""
17 """Read phase roots from disk"""
18 roots = [set() for i in allphases]
18 roots = [set() for i in allphases]
19 roots[0].add(nullid)
19 roots[0].add(nullid)
20 try:
20 try:
21 f = repo.sopener('phaseroots')
21 f = repo.sopener('phaseroots')
22 try:
22 try:
23 for line in f:
23 for line in f:
24 phase, nh = line.strip().split()
24 phase, nh = line.strip().split()
25 roots[int(phase)].add(bin(nh))
25 roots[int(phase)].add(bin(nh))
26 finally:
26 finally:
27 f.close()
27 f.close()
28 except IOError, inst:
28 except IOError, inst:
29 if inst.errno != errno.ENOENT:
29 if inst.errno != errno.ENOENT:
30 raise
30 raise
31 return roots
31 return roots
32
32
33 def writeroots(repo):
33 def writeroots(repo):
34 """Write phase roots from disk"""
34 """Write phase roots from disk"""
35 f = repo.sopener('phaseroots', 'w', atomictemp=True)
35 f = repo.sopener('phaseroots', 'w', atomictemp=True)
36 try:
36 try:
37 for phase, roots in enumerate(repo._phaseroots):
37 for phase, roots in enumerate(repo._phaseroots):
38 for h in roots:
38 for h in roots:
39 f.write('%i %s\n' % (phase, hex(h)))
39 f.write('%i %s\n' % (phase, hex(h)))
40 repo._dirtyphases = False
40 finally:
41 finally:
41 f.close()
42 f.close()
43
44 def moveboundary(repo, target_phase, nodes):
45 """Add nodes to a phase changing other nodes phases if necessary.
46
47 Simplify boundary to contains phase roots only."""
48
49 # move roots of lower states
50 for phase in xrange(target_phase + 1, len(allphases)):
51 # filter nodes that are not in a compatible phase already
52 # XXX rev phase cache might have been invalidated by a previous loop
53 # XXX we need to be smarter here
54 nodes = [n for n in nodes if repo[n].phase() >= phase]
55 if not nodes:
56 break # no roots to move anymore
57 roots = repo._phaseroots[phase]
58 olds = roots.copy()
59 ctxs = list(repo.set('roots((%ln::) - (%ln::%ln))', olds, olds, nodes))
60 roots.clear()
61 roots.update(ctx.node() for ctx in ctxs)
62 if olds != roots:
63 # invalidate cache (we probably could be smarter here
64 if '_phaserev' in vars(repo):
65 del repo._phaserev
66 repo._dirtyphases = True
General Comments 0
You need to be logged in to leave comments. Login now