##// END OF EJS Templates
localrepo: delete _phaserev when invalidating caches
Idan Kamara -
r15988:827e0126 stable
parent child Browse files
Show More
@@ -1,2314 +1,2318 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import scmutil, util, extensions, hook, error, revset
13 import scmutil, util, extensions, hook, error, revset
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20 filecache = scmutil.filecache
20 filecache = scmutil.filecache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 'known', 'getbundle'))
24 'known', 'getbundle'))
25 supportedformats = set(('revlogv1', 'generaldelta'))
25 supportedformats = set(('revlogv1', 'generaldelta'))
26 supported = supportedformats | set(('store', 'fncache', 'shared',
26 supported = supportedformats | set(('store', 'fncache', 'shared',
27 'dotencode'))
27 'dotencode'))
28
28
29 def __init__(self, baseui, path=None, create=False):
29 def __init__(self, baseui, path=None, create=False):
30 repo.repository.__init__(self)
30 repo.repository.__init__(self)
31 self.root = os.path.realpath(util.expandpath(path))
31 self.root = os.path.realpath(util.expandpath(path))
32 self.path = os.path.join(self.root, ".hg")
32 self.path = os.path.join(self.root, ".hg")
33 self.origroot = path
33 self.origroot = path
34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
35 self.opener = scmutil.opener(self.path)
35 self.opener = scmutil.opener(self.path)
36 self.wopener = scmutil.opener(self.root)
36 self.wopener = scmutil.opener(self.root)
37 self.baseui = baseui
37 self.baseui = baseui
38 self.ui = baseui.copy()
38 self.ui = baseui.copy()
39 self._dirtyphases = False
39 self._dirtyphases = False
40 # A list of callback to shape the phase if no data were found.
40 # A list of callback to shape the phase if no data were found.
41 # Callback are in the form: func(repo, roots) --> processed root.
41 # Callback are in the form: func(repo, roots) --> processed root.
42 # This list it to be filled by extension during repo setup
42 # This list it to be filled by extension during repo setup
43 self._phasedefaults = []
43 self._phasedefaults = []
44
44
45 try:
45 try:
46 self.ui.readconfig(self.join("hgrc"), self.root)
46 self.ui.readconfig(self.join("hgrc"), self.root)
47 extensions.loadall(self.ui)
47 extensions.loadall(self.ui)
48 except IOError:
48 except IOError:
49 pass
49 pass
50
50
51 if not os.path.isdir(self.path):
51 if not os.path.isdir(self.path):
52 if create:
52 if create:
53 if not os.path.exists(path):
53 if not os.path.exists(path):
54 util.makedirs(path)
54 util.makedirs(path)
55 util.makedir(self.path, notindexed=True)
55 util.makedir(self.path, notindexed=True)
56 requirements = ["revlogv1"]
56 requirements = ["revlogv1"]
57 if self.ui.configbool('format', 'usestore', True):
57 if self.ui.configbool('format', 'usestore', True):
58 os.mkdir(os.path.join(self.path, "store"))
58 os.mkdir(os.path.join(self.path, "store"))
59 requirements.append("store")
59 requirements.append("store")
60 if self.ui.configbool('format', 'usefncache', True):
60 if self.ui.configbool('format', 'usefncache', True):
61 requirements.append("fncache")
61 requirements.append("fncache")
62 if self.ui.configbool('format', 'dotencode', True):
62 if self.ui.configbool('format', 'dotencode', True):
63 requirements.append('dotencode')
63 requirements.append('dotencode')
64 # create an invalid changelog
64 # create an invalid changelog
65 self.opener.append(
65 self.opener.append(
66 "00changelog.i",
66 "00changelog.i",
67 '\0\0\0\2' # represents revlogv2
67 '\0\0\0\2' # represents revlogv2
68 ' dummy changelog to prevent using the old repo layout'
68 ' dummy changelog to prevent using the old repo layout'
69 )
69 )
70 if self.ui.configbool('format', 'generaldelta', False):
70 if self.ui.configbool('format', 'generaldelta', False):
71 requirements.append("generaldelta")
71 requirements.append("generaldelta")
72 requirements = set(requirements)
72 requirements = set(requirements)
73 else:
73 else:
74 raise error.RepoError(_("repository %s not found") % path)
74 raise error.RepoError(_("repository %s not found") % path)
75 elif create:
75 elif create:
76 raise error.RepoError(_("repository %s already exists") % path)
76 raise error.RepoError(_("repository %s already exists") % path)
77 else:
77 else:
78 try:
78 try:
79 requirements = scmutil.readrequires(self.opener, self.supported)
79 requirements = scmutil.readrequires(self.opener, self.supported)
80 except IOError, inst:
80 except IOError, inst:
81 if inst.errno != errno.ENOENT:
81 if inst.errno != errno.ENOENT:
82 raise
82 raise
83 requirements = set()
83 requirements = set()
84
84
85 self.sharedpath = self.path
85 self.sharedpath = self.path
86 try:
86 try:
87 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
87 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
88 if not os.path.exists(s):
88 if not os.path.exists(s):
89 raise error.RepoError(
89 raise error.RepoError(
90 _('.hg/sharedpath points to nonexistent directory %s') % s)
90 _('.hg/sharedpath points to nonexistent directory %s') % s)
91 self.sharedpath = s
91 self.sharedpath = s
92 except IOError, inst:
92 except IOError, inst:
93 if inst.errno != errno.ENOENT:
93 if inst.errno != errno.ENOENT:
94 raise
94 raise
95
95
96 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
96 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
97 self.spath = self.store.path
97 self.spath = self.store.path
98 self.sopener = self.store.opener
98 self.sopener = self.store.opener
99 self.sjoin = self.store.join
99 self.sjoin = self.store.join
100 self.opener.createmode = self.store.createmode
100 self.opener.createmode = self.store.createmode
101 self._applyrequirements(requirements)
101 self._applyrequirements(requirements)
102 if create:
102 if create:
103 self._writerequirements()
103 self._writerequirements()
104
104
105
105
106 self._branchcache = None
106 self._branchcache = None
107 self._branchcachetip = None
107 self._branchcachetip = None
108 self.filterpats = {}
108 self.filterpats = {}
109 self._datafilters = {}
109 self._datafilters = {}
110 self._transref = self._lockref = self._wlockref = None
110 self._transref = self._lockref = self._wlockref = None
111
111
112 # A cache for various files under .hg/ that tracks file changes,
112 # A cache for various files under .hg/ that tracks file changes,
113 # (used by the filecache decorator)
113 # (used by the filecache decorator)
114 #
114 #
115 # Maps a property name to its util.filecacheentry
115 # Maps a property name to its util.filecacheentry
116 self._filecache = {}
116 self._filecache = {}
117
117
118 def _applyrequirements(self, requirements):
118 def _applyrequirements(self, requirements):
119 self.requirements = requirements
119 self.requirements = requirements
120 openerreqs = set(('revlogv1', 'generaldelta'))
120 openerreqs = set(('revlogv1', 'generaldelta'))
121 self.sopener.options = dict((r, 1) for r in requirements
121 self.sopener.options = dict((r, 1) for r in requirements
122 if r in openerreqs)
122 if r in openerreqs)
123
123
124 def _writerequirements(self):
124 def _writerequirements(self):
125 reqfile = self.opener("requires", "w")
125 reqfile = self.opener("requires", "w")
126 for r in self.requirements:
126 for r in self.requirements:
127 reqfile.write("%s\n" % r)
127 reqfile.write("%s\n" % r)
128 reqfile.close()
128 reqfile.close()
129
129
130 def _checknested(self, path):
130 def _checknested(self, path):
131 """Determine if path is a legal nested repository."""
131 """Determine if path is a legal nested repository."""
132 if not path.startswith(self.root):
132 if not path.startswith(self.root):
133 return False
133 return False
134 subpath = path[len(self.root) + 1:]
134 subpath = path[len(self.root) + 1:]
135 normsubpath = util.pconvert(subpath)
135 normsubpath = util.pconvert(subpath)
136
136
137 # XXX: Checking against the current working copy is wrong in
137 # XXX: Checking against the current working copy is wrong in
138 # the sense that it can reject things like
138 # the sense that it can reject things like
139 #
139 #
140 # $ hg cat -r 10 sub/x.txt
140 # $ hg cat -r 10 sub/x.txt
141 #
141 #
142 # if sub/ is no longer a subrepository in the working copy
142 # if sub/ is no longer a subrepository in the working copy
143 # parent revision.
143 # parent revision.
144 #
144 #
145 # However, it can of course also allow things that would have
145 # However, it can of course also allow things that would have
146 # been rejected before, such as the above cat command if sub/
146 # been rejected before, such as the above cat command if sub/
147 # is a subrepository now, but was a normal directory before.
147 # is a subrepository now, but was a normal directory before.
148 # The old path auditor would have rejected by mistake since it
148 # The old path auditor would have rejected by mistake since it
149 # panics when it sees sub/.hg/.
149 # panics when it sees sub/.hg/.
150 #
150 #
151 # All in all, checking against the working copy seems sensible
151 # All in all, checking against the working copy seems sensible
152 # since we want to prevent access to nested repositories on
152 # since we want to prevent access to nested repositories on
153 # the filesystem *now*.
153 # the filesystem *now*.
154 ctx = self[None]
154 ctx = self[None]
155 parts = util.splitpath(subpath)
155 parts = util.splitpath(subpath)
156 while parts:
156 while parts:
157 prefix = '/'.join(parts)
157 prefix = '/'.join(parts)
158 if prefix in ctx.substate:
158 if prefix in ctx.substate:
159 if prefix == normsubpath:
159 if prefix == normsubpath:
160 return True
160 return True
161 else:
161 else:
162 sub = ctx.sub(prefix)
162 sub = ctx.sub(prefix)
163 return sub.checknested(subpath[len(prefix) + 1:])
163 return sub.checknested(subpath[len(prefix) + 1:])
164 else:
164 else:
165 parts.pop()
165 parts.pop()
166 return False
166 return False
167
167
168 @filecache('bookmarks')
168 @filecache('bookmarks')
169 def _bookmarks(self):
169 def _bookmarks(self):
170 return bookmarks.read(self)
170 return bookmarks.read(self)
171
171
172 @filecache('bookmarks.current')
172 @filecache('bookmarks.current')
173 def _bookmarkcurrent(self):
173 def _bookmarkcurrent(self):
174 return bookmarks.readcurrent(self)
174 return bookmarks.readcurrent(self)
175
175
176 def _writebookmarks(self, marks):
176 def _writebookmarks(self, marks):
177 bookmarks.write(self)
177 bookmarks.write(self)
178
178
179 @filecache('phaseroots', True)
179 @filecache('phaseroots', True)
180 def _phaseroots(self):
180 def _phaseroots(self):
181 self._dirtyphases = False
181 self._dirtyphases = False
182 phaseroots = phases.readroots(self)
182 phaseroots = phases.readroots(self)
183 phases.filterunknown(self, phaseroots)
183 phases.filterunknown(self, phaseroots)
184 return phaseroots
184 return phaseroots
185
185
186 @propertycache
186 @propertycache
187 def _phaserev(self):
187 def _phaserev(self):
188 cache = [phases.public] * len(self)
188 cache = [phases.public] * len(self)
189 for phase in phases.trackedphases:
189 for phase in phases.trackedphases:
190 roots = map(self.changelog.rev, self._phaseroots[phase])
190 roots = map(self.changelog.rev, self._phaseroots[phase])
191 if roots:
191 if roots:
192 for rev in roots:
192 for rev in roots:
193 cache[rev] = phase
193 cache[rev] = phase
194 for rev in self.changelog.descendants(*roots):
194 for rev in self.changelog.descendants(*roots):
195 cache[rev] = phase
195 cache[rev] = phase
196 return cache
196 return cache
197
197
198 @filecache('00changelog.i', True)
198 @filecache('00changelog.i', True)
199 def changelog(self):
199 def changelog(self):
200 c = changelog.changelog(self.sopener)
200 c = changelog.changelog(self.sopener)
201 if 'HG_PENDING' in os.environ:
201 if 'HG_PENDING' in os.environ:
202 p = os.environ['HG_PENDING']
202 p = os.environ['HG_PENDING']
203 if p.startswith(self.root):
203 if p.startswith(self.root):
204 c.readpending('00changelog.i.a')
204 c.readpending('00changelog.i.a')
205 return c
205 return c
206
206
207 @filecache('00manifest.i', True)
207 @filecache('00manifest.i', True)
208 def manifest(self):
208 def manifest(self):
209 return manifest.manifest(self.sopener)
209 return manifest.manifest(self.sopener)
210
210
211 @filecache('dirstate')
211 @filecache('dirstate')
212 def dirstate(self):
212 def dirstate(self):
213 warned = [0]
213 warned = [0]
214 def validate(node):
214 def validate(node):
215 try:
215 try:
216 self.changelog.rev(node)
216 self.changelog.rev(node)
217 return node
217 return node
218 except error.LookupError:
218 except error.LookupError:
219 if not warned[0]:
219 if not warned[0]:
220 warned[0] = True
220 warned[0] = True
221 self.ui.warn(_("warning: ignoring unknown"
221 self.ui.warn(_("warning: ignoring unknown"
222 " working parent %s!\n") % short(node))
222 " working parent %s!\n") % short(node))
223 return nullid
223 return nullid
224
224
225 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
225 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
226
226
227 def __getitem__(self, changeid):
227 def __getitem__(self, changeid):
228 if changeid is None:
228 if changeid is None:
229 return context.workingctx(self)
229 return context.workingctx(self)
230 return context.changectx(self, changeid)
230 return context.changectx(self, changeid)
231
231
232 def __contains__(self, changeid):
232 def __contains__(self, changeid):
233 try:
233 try:
234 return bool(self.lookup(changeid))
234 return bool(self.lookup(changeid))
235 except error.RepoLookupError:
235 except error.RepoLookupError:
236 return False
236 return False
237
237
238 def __nonzero__(self):
238 def __nonzero__(self):
239 return True
239 return True
240
240
241 def __len__(self):
241 def __len__(self):
242 return len(self.changelog)
242 return len(self.changelog)
243
243
244 def __iter__(self):
244 def __iter__(self):
245 for i in xrange(len(self)):
245 for i in xrange(len(self)):
246 yield i
246 yield i
247
247
248 def revs(self, expr, *args):
248 def revs(self, expr, *args):
249 '''Return a list of revisions matching the given revset'''
249 '''Return a list of revisions matching the given revset'''
250 expr = revset.formatspec(expr, *args)
250 expr = revset.formatspec(expr, *args)
251 m = revset.match(None, expr)
251 m = revset.match(None, expr)
252 return [r for r in m(self, range(len(self)))]
252 return [r for r in m(self, range(len(self)))]
253
253
254 def set(self, expr, *args):
254 def set(self, expr, *args):
255 '''
255 '''
256 Yield a context for each matching revision, after doing arg
256 Yield a context for each matching revision, after doing arg
257 replacement via revset.formatspec
257 replacement via revset.formatspec
258 '''
258 '''
259 for r in self.revs(expr, *args):
259 for r in self.revs(expr, *args):
260 yield self[r]
260 yield self[r]
261
261
262 def url(self):
262 def url(self):
263 return 'file:' + self.root
263 return 'file:' + self.root
264
264
265 def hook(self, name, throw=False, **args):
265 def hook(self, name, throw=False, **args):
266 return hook.hook(self.ui, self, name, throw, **args)
266 return hook.hook(self.ui, self, name, throw, **args)
267
267
268 tag_disallowed = ':\r\n'
268 tag_disallowed = ':\r\n'
269
269
270 def _tag(self, names, node, message, local, user, date, extra={}):
270 def _tag(self, names, node, message, local, user, date, extra={}):
271 if isinstance(names, str):
271 if isinstance(names, str):
272 allchars = names
272 allchars = names
273 names = (names,)
273 names = (names,)
274 else:
274 else:
275 allchars = ''.join(names)
275 allchars = ''.join(names)
276 for c in self.tag_disallowed:
276 for c in self.tag_disallowed:
277 if c in allchars:
277 if c in allchars:
278 raise util.Abort(_('%r cannot be used in a tag name') % c)
278 raise util.Abort(_('%r cannot be used in a tag name') % c)
279
279
280 branches = self.branchmap()
280 branches = self.branchmap()
281 for name in names:
281 for name in names:
282 self.hook('pretag', throw=True, node=hex(node), tag=name,
282 self.hook('pretag', throw=True, node=hex(node), tag=name,
283 local=local)
283 local=local)
284 if name in branches:
284 if name in branches:
285 self.ui.warn(_("warning: tag %s conflicts with existing"
285 self.ui.warn(_("warning: tag %s conflicts with existing"
286 " branch name\n") % name)
286 " branch name\n") % name)
287
287
288 def writetags(fp, names, munge, prevtags):
288 def writetags(fp, names, munge, prevtags):
289 fp.seek(0, 2)
289 fp.seek(0, 2)
290 if prevtags and prevtags[-1] != '\n':
290 if prevtags and prevtags[-1] != '\n':
291 fp.write('\n')
291 fp.write('\n')
292 for name in names:
292 for name in names:
293 m = munge and munge(name) or name
293 m = munge and munge(name) or name
294 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
294 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
295 old = self.tags().get(name, nullid)
295 old = self.tags().get(name, nullid)
296 fp.write('%s %s\n' % (hex(old), m))
296 fp.write('%s %s\n' % (hex(old), m))
297 fp.write('%s %s\n' % (hex(node), m))
297 fp.write('%s %s\n' % (hex(node), m))
298 fp.close()
298 fp.close()
299
299
300 prevtags = ''
300 prevtags = ''
301 if local:
301 if local:
302 try:
302 try:
303 fp = self.opener('localtags', 'r+')
303 fp = self.opener('localtags', 'r+')
304 except IOError:
304 except IOError:
305 fp = self.opener('localtags', 'a')
305 fp = self.opener('localtags', 'a')
306 else:
306 else:
307 prevtags = fp.read()
307 prevtags = fp.read()
308
308
309 # local tags are stored in the current charset
309 # local tags are stored in the current charset
310 writetags(fp, names, None, prevtags)
310 writetags(fp, names, None, prevtags)
311 for name in names:
311 for name in names:
312 self.hook('tag', node=hex(node), tag=name, local=local)
312 self.hook('tag', node=hex(node), tag=name, local=local)
313 return
313 return
314
314
315 try:
315 try:
316 fp = self.wfile('.hgtags', 'rb+')
316 fp = self.wfile('.hgtags', 'rb+')
317 except IOError, e:
317 except IOError, e:
318 if e.errno != errno.ENOENT:
318 if e.errno != errno.ENOENT:
319 raise
319 raise
320 fp = self.wfile('.hgtags', 'ab')
320 fp = self.wfile('.hgtags', 'ab')
321 else:
321 else:
322 prevtags = fp.read()
322 prevtags = fp.read()
323
323
324 # committed tags are stored in UTF-8
324 # committed tags are stored in UTF-8
325 writetags(fp, names, encoding.fromlocal, prevtags)
325 writetags(fp, names, encoding.fromlocal, prevtags)
326
326
327 fp.close()
327 fp.close()
328
328
329 self.invalidatecaches()
329 self.invalidatecaches()
330
330
331 if '.hgtags' not in self.dirstate:
331 if '.hgtags' not in self.dirstate:
332 self[None].add(['.hgtags'])
332 self[None].add(['.hgtags'])
333
333
334 m = matchmod.exact(self.root, '', ['.hgtags'])
334 m = matchmod.exact(self.root, '', ['.hgtags'])
335 tagnode = self.commit(message, user, date, extra=extra, match=m)
335 tagnode = self.commit(message, user, date, extra=extra, match=m)
336
336
337 for name in names:
337 for name in names:
338 self.hook('tag', node=hex(node), tag=name, local=local)
338 self.hook('tag', node=hex(node), tag=name, local=local)
339
339
340 return tagnode
340 return tagnode
341
341
342 def tag(self, names, node, message, local, user, date):
342 def tag(self, names, node, message, local, user, date):
343 '''tag a revision with one or more symbolic names.
343 '''tag a revision with one or more symbolic names.
344
344
345 names is a list of strings or, when adding a single tag, names may be a
345 names is a list of strings or, when adding a single tag, names may be a
346 string.
346 string.
347
347
348 if local is True, the tags are stored in a per-repository file.
348 if local is True, the tags are stored in a per-repository file.
349 otherwise, they are stored in the .hgtags file, and a new
349 otherwise, they are stored in the .hgtags file, and a new
350 changeset is committed with the change.
350 changeset is committed with the change.
351
351
352 keyword arguments:
352 keyword arguments:
353
353
354 local: whether to store tags in non-version-controlled file
354 local: whether to store tags in non-version-controlled file
355 (default False)
355 (default False)
356
356
357 message: commit message to use if committing
357 message: commit message to use if committing
358
358
359 user: name of user to use if committing
359 user: name of user to use if committing
360
360
361 date: date tuple to use if committing'''
361 date: date tuple to use if committing'''
362
362
363 if not local:
363 if not local:
364 for x in self.status()[:5]:
364 for x in self.status()[:5]:
365 if '.hgtags' in x:
365 if '.hgtags' in x:
366 raise util.Abort(_('working copy of .hgtags is changed '
366 raise util.Abort(_('working copy of .hgtags is changed '
367 '(please commit .hgtags manually)'))
367 '(please commit .hgtags manually)'))
368
368
369 self.tags() # instantiate the cache
369 self.tags() # instantiate the cache
370 self._tag(names, node, message, local, user, date)
370 self._tag(names, node, message, local, user, date)
371
371
372 @propertycache
372 @propertycache
373 def _tagscache(self):
373 def _tagscache(self):
374 '''Returns a tagscache object that contains various tags related caches.'''
374 '''Returns a tagscache object that contains various tags related caches.'''
375
375
376 # This simplifies its cache management by having one decorated
376 # This simplifies its cache management by having one decorated
377 # function (this one) and the rest simply fetch things from it.
377 # function (this one) and the rest simply fetch things from it.
378 class tagscache(object):
378 class tagscache(object):
379 def __init__(self):
379 def __init__(self):
380 # These two define the set of tags for this repository. tags
380 # These two define the set of tags for this repository. tags
381 # maps tag name to node; tagtypes maps tag name to 'global' or
381 # maps tag name to node; tagtypes maps tag name to 'global' or
382 # 'local'. (Global tags are defined by .hgtags across all
382 # 'local'. (Global tags are defined by .hgtags across all
383 # heads, and local tags are defined in .hg/localtags.)
383 # heads, and local tags are defined in .hg/localtags.)
384 # They constitute the in-memory cache of tags.
384 # They constitute the in-memory cache of tags.
385 self.tags = self.tagtypes = None
385 self.tags = self.tagtypes = None
386
386
387 self.nodetagscache = self.tagslist = None
387 self.nodetagscache = self.tagslist = None
388
388
389 cache = tagscache()
389 cache = tagscache()
390 cache.tags, cache.tagtypes = self._findtags()
390 cache.tags, cache.tagtypes = self._findtags()
391
391
392 return cache
392 return cache
393
393
394 def tags(self):
394 def tags(self):
395 '''return a mapping of tag to node'''
395 '''return a mapping of tag to node'''
396 return self._tagscache.tags
396 return self._tagscache.tags
397
397
398 def _findtags(self):
398 def _findtags(self):
399 '''Do the hard work of finding tags. Return a pair of dicts
399 '''Do the hard work of finding tags. Return a pair of dicts
400 (tags, tagtypes) where tags maps tag name to node, and tagtypes
400 (tags, tagtypes) where tags maps tag name to node, and tagtypes
401 maps tag name to a string like \'global\' or \'local\'.
401 maps tag name to a string like \'global\' or \'local\'.
402 Subclasses or extensions are free to add their own tags, but
402 Subclasses or extensions are free to add their own tags, but
403 should be aware that the returned dicts will be retained for the
403 should be aware that the returned dicts will be retained for the
404 duration of the localrepo object.'''
404 duration of the localrepo object.'''
405
405
406 # XXX what tagtype should subclasses/extensions use? Currently
406 # XXX what tagtype should subclasses/extensions use? Currently
407 # mq and bookmarks add tags, but do not set the tagtype at all.
407 # mq and bookmarks add tags, but do not set the tagtype at all.
408 # Should each extension invent its own tag type? Should there
408 # Should each extension invent its own tag type? Should there
409 # be one tagtype for all such "virtual" tags? Or is the status
409 # be one tagtype for all such "virtual" tags? Or is the status
410 # quo fine?
410 # quo fine?
411
411
412 alltags = {} # map tag name to (node, hist)
412 alltags = {} # map tag name to (node, hist)
413 tagtypes = {}
413 tagtypes = {}
414
414
415 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
415 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
416 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
416 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
417
417
418 # Build the return dicts. Have to re-encode tag names because
418 # Build the return dicts. Have to re-encode tag names because
419 # the tags module always uses UTF-8 (in order not to lose info
419 # the tags module always uses UTF-8 (in order not to lose info
420 # writing to the cache), but the rest of Mercurial wants them in
420 # writing to the cache), but the rest of Mercurial wants them in
421 # local encoding.
421 # local encoding.
422 tags = {}
422 tags = {}
423 for (name, (node, hist)) in alltags.iteritems():
423 for (name, (node, hist)) in alltags.iteritems():
424 if node != nullid:
424 if node != nullid:
425 try:
425 try:
426 # ignore tags to unknown nodes
426 # ignore tags to unknown nodes
427 self.changelog.lookup(node)
427 self.changelog.lookup(node)
428 tags[encoding.tolocal(name)] = node
428 tags[encoding.tolocal(name)] = node
429 except error.LookupError:
429 except error.LookupError:
430 pass
430 pass
431 tags['tip'] = self.changelog.tip()
431 tags['tip'] = self.changelog.tip()
432 tagtypes = dict([(encoding.tolocal(name), value)
432 tagtypes = dict([(encoding.tolocal(name), value)
433 for (name, value) in tagtypes.iteritems()])
433 for (name, value) in tagtypes.iteritems()])
434 return (tags, tagtypes)
434 return (tags, tagtypes)
435
435
436 def tagtype(self, tagname):
436 def tagtype(self, tagname):
437 '''
437 '''
438 return the type of the given tag. result can be:
438 return the type of the given tag. result can be:
439
439
440 'local' : a local tag
440 'local' : a local tag
441 'global' : a global tag
441 'global' : a global tag
442 None : tag does not exist
442 None : tag does not exist
443 '''
443 '''
444
444
445 return self._tagscache.tagtypes.get(tagname)
445 return self._tagscache.tagtypes.get(tagname)
446
446
447 def tagslist(self):
447 def tagslist(self):
448 '''return a list of tags ordered by revision'''
448 '''return a list of tags ordered by revision'''
449 if not self._tagscache.tagslist:
449 if not self._tagscache.tagslist:
450 l = []
450 l = []
451 for t, n in self.tags().iteritems():
451 for t, n in self.tags().iteritems():
452 r = self.changelog.rev(n)
452 r = self.changelog.rev(n)
453 l.append((r, t, n))
453 l.append((r, t, n))
454 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
454 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
455
455
456 return self._tagscache.tagslist
456 return self._tagscache.tagslist
457
457
458 def nodetags(self, node):
458 def nodetags(self, node):
459 '''return the tags associated with a node'''
459 '''return the tags associated with a node'''
460 if not self._tagscache.nodetagscache:
460 if not self._tagscache.nodetagscache:
461 nodetagscache = {}
461 nodetagscache = {}
462 for t, n in self.tags().iteritems():
462 for t, n in self.tags().iteritems():
463 nodetagscache.setdefault(n, []).append(t)
463 nodetagscache.setdefault(n, []).append(t)
464 for tags in nodetagscache.itervalues():
464 for tags in nodetagscache.itervalues():
465 tags.sort()
465 tags.sort()
466 self._tagscache.nodetagscache = nodetagscache
466 self._tagscache.nodetagscache = nodetagscache
467 return self._tagscache.nodetagscache.get(node, [])
467 return self._tagscache.nodetagscache.get(node, [])
468
468
469 def nodebookmarks(self, node):
469 def nodebookmarks(self, node):
470 marks = []
470 marks = []
471 for bookmark, n in self._bookmarks.iteritems():
471 for bookmark, n in self._bookmarks.iteritems():
472 if n == node:
472 if n == node:
473 marks.append(bookmark)
473 marks.append(bookmark)
474 return sorted(marks)
474 return sorted(marks)
475
475
476 def _branchtags(self, partial, lrev):
476 def _branchtags(self, partial, lrev):
477 # TODO: rename this function?
477 # TODO: rename this function?
478 tiprev = len(self) - 1
478 tiprev = len(self) - 1
479 if lrev != tiprev:
479 if lrev != tiprev:
480 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
480 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
481 self._updatebranchcache(partial, ctxgen)
481 self._updatebranchcache(partial, ctxgen)
482 self._writebranchcache(partial, self.changelog.tip(), tiprev)
482 self._writebranchcache(partial, self.changelog.tip(), tiprev)
483
483
484 return partial
484 return partial
485
485
486 def updatebranchcache(self):
486 def updatebranchcache(self):
487 tip = self.changelog.tip()
487 tip = self.changelog.tip()
488 if self._branchcache is not None and self._branchcachetip == tip:
488 if self._branchcache is not None and self._branchcachetip == tip:
489 return
489 return
490
490
491 oldtip = self._branchcachetip
491 oldtip = self._branchcachetip
492 self._branchcachetip = tip
492 self._branchcachetip = tip
493 if oldtip is None or oldtip not in self.changelog.nodemap:
493 if oldtip is None or oldtip not in self.changelog.nodemap:
494 partial, last, lrev = self._readbranchcache()
494 partial, last, lrev = self._readbranchcache()
495 else:
495 else:
496 lrev = self.changelog.rev(oldtip)
496 lrev = self.changelog.rev(oldtip)
497 partial = self._branchcache
497 partial = self._branchcache
498
498
499 self._branchtags(partial, lrev)
499 self._branchtags(partial, lrev)
500 # this private cache holds all heads (not just tips)
500 # this private cache holds all heads (not just tips)
501 self._branchcache = partial
501 self._branchcache = partial
502
502
503 def branchmap(self):
503 def branchmap(self):
504 '''returns a dictionary {branch: [branchheads]}'''
504 '''returns a dictionary {branch: [branchheads]}'''
505 self.updatebranchcache()
505 self.updatebranchcache()
506 return self._branchcache
506 return self._branchcache
507
507
508 def branchtags(self):
508 def branchtags(self):
509 '''return a dict where branch names map to the tipmost head of
509 '''return a dict where branch names map to the tipmost head of
510 the branch, open heads come before closed'''
510 the branch, open heads come before closed'''
511 bt = {}
511 bt = {}
512 for bn, heads in self.branchmap().iteritems():
512 for bn, heads in self.branchmap().iteritems():
513 tip = heads[-1]
513 tip = heads[-1]
514 for h in reversed(heads):
514 for h in reversed(heads):
515 if 'close' not in self.changelog.read(h)[5]:
515 if 'close' not in self.changelog.read(h)[5]:
516 tip = h
516 tip = h
517 break
517 break
518 bt[bn] = tip
518 bt[bn] = tip
519 return bt
519 return bt
520
520
521 def _readbranchcache(self):
521 def _readbranchcache(self):
522 partial = {}
522 partial = {}
523 try:
523 try:
524 f = self.opener("cache/branchheads")
524 f = self.opener("cache/branchheads")
525 lines = f.read().split('\n')
525 lines = f.read().split('\n')
526 f.close()
526 f.close()
527 except (IOError, OSError):
527 except (IOError, OSError):
528 return {}, nullid, nullrev
528 return {}, nullid, nullrev
529
529
530 try:
530 try:
531 last, lrev = lines.pop(0).split(" ", 1)
531 last, lrev = lines.pop(0).split(" ", 1)
532 last, lrev = bin(last), int(lrev)
532 last, lrev = bin(last), int(lrev)
533 if lrev >= len(self) or self[lrev].node() != last:
533 if lrev >= len(self) or self[lrev].node() != last:
534 # invalidate the cache
534 # invalidate the cache
535 raise ValueError('invalidating branch cache (tip differs)')
535 raise ValueError('invalidating branch cache (tip differs)')
536 for l in lines:
536 for l in lines:
537 if not l:
537 if not l:
538 continue
538 continue
539 node, label = l.split(" ", 1)
539 node, label = l.split(" ", 1)
540 label = encoding.tolocal(label.strip())
540 label = encoding.tolocal(label.strip())
541 partial.setdefault(label, []).append(bin(node))
541 partial.setdefault(label, []).append(bin(node))
542 except KeyboardInterrupt:
542 except KeyboardInterrupt:
543 raise
543 raise
544 except Exception, inst:
544 except Exception, inst:
545 if self.ui.debugflag:
545 if self.ui.debugflag:
546 self.ui.warn(str(inst), '\n')
546 self.ui.warn(str(inst), '\n')
547 partial, last, lrev = {}, nullid, nullrev
547 partial, last, lrev = {}, nullid, nullrev
548 return partial, last, lrev
548 return partial, last, lrev
549
549
550 def _writebranchcache(self, branches, tip, tiprev):
550 def _writebranchcache(self, branches, tip, tiprev):
551 try:
551 try:
552 f = self.opener("cache/branchheads", "w", atomictemp=True)
552 f = self.opener("cache/branchheads", "w", atomictemp=True)
553 f.write("%s %s\n" % (hex(tip), tiprev))
553 f.write("%s %s\n" % (hex(tip), tiprev))
554 for label, nodes in branches.iteritems():
554 for label, nodes in branches.iteritems():
555 for node in nodes:
555 for node in nodes:
556 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
556 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
557 f.close()
557 f.close()
558 except (IOError, OSError):
558 except (IOError, OSError):
559 pass
559 pass
560
560
561 def _updatebranchcache(self, partial, ctxgen):
561 def _updatebranchcache(self, partial, ctxgen):
562 # collect new branch entries
562 # collect new branch entries
563 newbranches = {}
563 newbranches = {}
564 for c in ctxgen:
564 for c in ctxgen:
565 newbranches.setdefault(c.branch(), []).append(c.node())
565 newbranches.setdefault(c.branch(), []).append(c.node())
566 # if older branchheads are reachable from new ones, they aren't
566 # if older branchheads are reachable from new ones, they aren't
567 # really branchheads. Note checking parents is insufficient:
567 # really branchheads. Note checking parents is insufficient:
568 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
568 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
569 for branch, newnodes in newbranches.iteritems():
569 for branch, newnodes in newbranches.iteritems():
570 bheads = partial.setdefault(branch, [])
570 bheads = partial.setdefault(branch, [])
571 bheads.extend(newnodes)
571 bheads.extend(newnodes)
572 if len(bheads) <= 1:
572 if len(bheads) <= 1:
573 continue
573 continue
574 bheads = sorted(bheads, key=lambda x: self[x].rev())
574 bheads = sorted(bheads, key=lambda x: self[x].rev())
575 # starting from tip means fewer passes over reachable
575 # starting from tip means fewer passes over reachable
576 while newnodes:
576 while newnodes:
577 latest = newnodes.pop()
577 latest = newnodes.pop()
578 if latest not in bheads:
578 if latest not in bheads:
579 continue
579 continue
580 minbhrev = self[bheads[0]].node()
580 minbhrev = self[bheads[0]].node()
581 reachable = self.changelog.reachable(latest, minbhrev)
581 reachable = self.changelog.reachable(latest, minbhrev)
582 reachable.remove(latest)
582 reachable.remove(latest)
583 if reachable:
583 if reachable:
584 bheads = [b for b in bheads if b not in reachable]
584 bheads = [b for b in bheads if b not in reachable]
585 partial[branch] = bheads
585 partial[branch] = bheads
586
586
587 def lookup(self, key):
587 def lookup(self, key):
588 if isinstance(key, int):
588 if isinstance(key, int):
589 return self.changelog.node(key)
589 return self.changelog.node(key)
590 elif key == '.':
590 elif key == '.':
591 return self.dirstate.p1()
591 return self.dirstate.p1()
592 elif key == 'null':
592 elif key == 'null':
593 return nullid
593 return nullid
594 elif key == 'tip':
594 elif key == 'tip':
595 return self.changelog.tip()
595 return self.changelog.tip()
596 n = self.changelog._match(key)
596 n = self.changelog._match(key)
597 if n:
597 if n:
598 return n
598 return n
599 if key in self._bookmarks:
599 if key in self._bookmarks:
600 return self._bookmarks[key]
600 return self._bookmarks[key]
601 if key in self.tags():
601 if key in self.tags():
602 return self.tags()[key]
602 return self.tags()[key]
603 if key in self.branchtags():
603 if key in self.branchtags():
604 return self.branchtags()[key]
604 return self.branchtags()[key]
605 n = self.changelog._partialmatch(key)
605 n = self.changelog._partialmatch(key)
606 if n:
606 if n:
607 return n
607 return n
608
608
609 # can't find key, check if it might have come from damaged dirstate
609 # can't find key, check if it might have come from damaged dirstate
610 if key in self.dirstate.parents():
610 if key in self.dirstate.parents():
611 raise error.Abort(_("working directory has unknown parent '%s'!")
611 raise error.Abort(_("working directory has unknown parent '%s'!")
612 % short(key))
612 % short(key))
613 try:
613 try:
614 if len(key) == 20:
614 if len(key) == 20:
615 key = hex(key)
615 key = hex(key)
616 except TypeError:
616 except TypeError:
617 pass
617 pass
618 raise error.RepoLookupError(_("unknown revision '%s'") % key)
618 raise error.RepoLookupError(_("unknown revision '%s'") % key)
619
619
620 def lookupbranch(self, key, remote=None):
620 def lookupbranch(self, key, remote=None):
621 repo = remote or self
621 repo = remote or self
622 if key in repo.branchmap():
622 if key in repo.branchmap():
623 return key
623 return key
624
624
625 repo = (remote and remote.local()) and remote or self
625 repo = (remote and remote.local()) and remote or self
626 return repo[key].branch()
626 return repo[key].branch()
627
627
628 def known(self, nodes):
628 def known(self, nodes):
629 nm = self.changelog.nodemap
629 nm = self.changelog.nodemap
630 result = []
630 result = []
631 for n in nodes:
631 for n in nodes:
632 r = nm.get(n)
632 r = nm.get(n)
633 resp = not (r is None or self._phaserev[r] >= phases.secret)
633 resp = not (r is None or self._phaserev[r] >= phases.secret)
634 result.append(resp)
634 result.append(resp)
635 return result
635 return result
636
636
637 def local(self):
637 def local(self):
638 return self
638 return self
639
639
640 def cancopy(self):
640 def cancopy(self):
641 return (repo.repository.cancopy(self)
641 return (repo.repository.cancopy(self)
642 and not self._phaseroots[phases.secret])
642 and not self._phaseroots[phases.secret])
643
643
644 def join(self, f):
644 def join(self, f):
645 return os.path.join(self.path, f)
645 return os.path.join(self.path, f)
646
646
647 def wjoin(self, f):
647 def wjoin(self, f):
648 return os.path.join(self.root, f)
648 return os.path.join(self.root, f)
649
649
650 def file(self, f):
650 def file(self, f):
651 if f[0] == '/':
651 if f[0] == '/':
652 f = f[1:]
652 f = f[1:]
653 return filelog.filelog(self.sopener, f)
653 return filelog.filelog(self.sopener, f)
654
654
655 def changectx(self, changeid):
655 def changectx(self, changeid):
656 return self[changeid]
656 return self[changeid]
657
657
658 def parents(self, changeid=None):
658 def parents(self, changeid=None):
659 '''get list of changectxs for parents of changeid'''
659 '''get list of changectxs for parents of changeid'''
660 return self[changeid].parents()
660 return self[changeid].parents()
661
661
662 def filectx(self, path, changeid=None, fileid=None):
662 def filectx(self, path, changeid=None, fileid=None):
663 """changeid can be a changeset revision, node, or tag.
663 """changeid can be a changeset revision, node, or tag.
664 fileid can be a file revision or node."""
664 fileid can be a file revision or node."""
665 return context.filectx(self, path, changeid, fileid)
665 return context.filectx(self, path, changeid, fileid)
666
666
667 def getcwd(self):
667 def getcwd(self):
668 return self.dirstate.getcwd()
668 return self.dirstate.getcwd()
669
669
670 def pathto(self, f, cwd=None):
670 def pathto(self, f, cwd=None):
671 return self.dirstate.pathto(f, cwd)
671 return self.dirstate.pathto(f, cwd)
672
672
673 def wfile(self, f, mode='r'):
673 def wfile(self, f, mode='r'):
674 return self.wopener(f, mode)
674 return self.wopener(f, mode)
675
675
676 def _link(self, f):
676 def _link(self, f):
677 return os.path.islink(self.wjoin(f))
677 return os.path.islink(self.wjoin(f))
678
678
679 def _loadfilter(self, filter):
679 def _loadfilter(self, filter):
680 if filter not in self.filterpats:
680 if filter not in self.filterpats:
681 l = []
681 l = []
682 for pat, cmd in self.ui.configitems(filter):
682 for pat, cmd in self.ui.configitems(filter):
683 if cmd == '!':
683 if cmd == '!':
684 continue
684 continue
685 mf = matchmod.match(self.root, '', [pat])
685 mf = matchmod.match(self.root, '', [pat])
686 fn = None
686 fn = None
687 params = cmd
687 params = cmd
688 for name, filterfn in self._datafilters.iteritems():
688 for name, filterfn in self._datafilters.iteritems():
689 if cmd.startswith(name):
689 if cmd.startswith(name):
690 fn = filterfn
690 fn = filterfn
691 params = cmd[len(name):].lstrip()
691 params = cmd[len(name):].lstrip()
692 break
692 break
693 if not fn:
693 if not fn:
694 fn = lambda s, c, **kwargs: util.filter(s, c)
694 fn = lambda s, c, **kwargs: util.filter(s, c)
695 # Wrap old filters not supporting keyword arguments
695 # Wrap old filters not supporting keyword arguments
696 if not inspect.getargspec(fn)[2]:
696 if not inspect.getargspec(fn)[2]:
697 oldfn = fn
697 oldfn = fn
698 fn = lambda s, c, **kwargs: oldfn(s, c)
698 fn = lambda s, c, **kwargs: oldfn(s, c)
699 l.append((mf, fn, params))
699 l.append((mf, fn, params))
700 self.filterpats[filter] = l
700 self.filterpats[filter] = l
701 return self.filterpats[filter]
701 return self.filterpats[filter]
702
702
703 def _filter(self, filterpats, filename, data):
703 def _filter(self, filterpats, filename, data):
704 for mf, fn, cmd in filterpats:
704 for mf, fn, cmd in filterpats:
705 if mf(filename):
705 if mf(filename):
706 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
706 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
707 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
707 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
708 break
708 break
709
709
710 return data
710 return data
711
711
712 @propertycache
712 @propertycache
713 def _encodefilterpats(self):
713 def _encodefilterpats(self):
714 return self._loadfilter('encode')
714 return self._loadfilter('encode')
715
715
716 @propertycache
716 @propertycache
717 def _decodefilterpats(self):
717 def _decodefilterpats(self):
718 return self._loadfilter('decode')
718 return self._loadfilter('decode')
719
719
720 def adddatafilter(self, name, filter):
720 def adddatafilter(self, name, filter):
721 self._datafilters[name] = filter
721 self._datafilters[name] = filter
722
722
723 def wread(self, filename):
723 def wread(self, filename):
724 if self._link(filename):
724 if self._link(filename):
725 data = os.readlink(self.wjoin(filename))
725 data = os.readlink(self.wjoin(filename))
726 else:
726 else:
727 data = self.wopener.read(filename)
727 data = self.wopener.read(filename)
728 return self._filter(self._encodefilterpats, filename, data)
728 return self._filter(self._encodefilterpats, filename, data)
729
729
730 def wwrite(self, filename, data, flags):
730 def wwrite(self, filename, data, flags):
731 data = self._filter(self._decodefilterpats, filename, data)
731 data = self._filter(self._decodefilterpats, filename, data)
732 if 'l' in flags:
732 if 'l' in flags:
733 self.wopener.symlink(data, filename)
733 self.wopener.symlink(data, filename)
734 else:
734 else:
735 self.wopener.write(filename, data)
735 self.wopener.write(filename, data)
736 if 'x' in flags:
736 if 'x' in flags:
737 util.setflags(self.wjoin(filename), False, True)
737 util.setflags(self.wjoin(filename), False, True)
738
738
739 def wwritedata(self, filename, data):
739 def wwritedata(self, filename, data):
740 return self._filter(self._decodefilterpats, filename, data)
740 return self._filter(self._decodefilterpats, filename, data)
741
741
742 def transaction(self, desc):
742 def transaction(self, desc):
743 tr = self._transref and self._transref() or None
743 tr = self._transref and self._transref() or None
744 if tr and tr.running():
744 if tr and tr.running():
745 return tr.nest()
745 return tr.nest()
746
746
747 # abort here if the journal already exists
747 # abort here if the journal already exists
748 if os.path.exists(self.sjoin("journal")):
748 if os.path.exists(self.sjoin("journal")):
749 raise error.RepoError(
749 raise error.RepoError(
750 _("abandoned transaction found - run hg recover"))
750 _("abandoned transaction found - run hg recover"))
751
751
752 journalfiles = self._writejournal(desc)
752 journalfiles = self._writejournal(desc)
753 renames = [(x, undoname(x)) for x in journalfiles]
753 renames = [(x, undoname(x)) for x in journalfiles]
754
754
755 tr = transaction.transaction(self.ui.warn, self.sopener,
755 tr = transaction.transaction(self.ui.warn, self.sopener,
756 self.sjoin("journal"),
756 self.sjoin("journal"),
757 aftertrans(renames),
757 aftertrans(renames),
758 self.store.createmode)
758 self.store.createmode)
759 self._transref = weakref.ref(tr)
759 self._transref = weakref.ref(tr)
760 return tr
760 return tr
761
761
762 def _writejournal(self, desc):
762 def _writejournal(self, desc):
763 # save dirstate for rollback
763 # save dirstate for rollback
764 try:
764 try:
765 ds = self.opener.read("dirstate")
765 ds = self.opener.read("dirstate")
766 except IOError:
766 except IOError:
767 ds = ""
767 ds = ""
768 self.opener.write("journal.dirstate", ds)
768 self.opener.write("journal.dirstate", ds)
769 self.opener.write("journal.branch",
769 self.opener.write("journal.branch",
770 encoding.fromlocal(self.dirstate.branch()))
770 encoding.fromlocal(self.dirstate.branch()))
771 self.opener.write("journal.desc",
771 self.opener.write("journal.desc",
772 "%d\n%s\n" % (len(self), desc))
772 "%d\n%s\n" % (len(self), desc))
773
773
774 bkname = self.join('bookmarks')
774 bkname = self.join('bookmarks')
775 if os.path.exists(bkname):
775 if os.path.exists(bkname):
776 util.copyfile(bkname, self.join('journal.bookmarks'))
776 util.copyfile(bkname, self.join('journal.bookmarks'))
777 else:
777 else:
778 self.opener.write('journal.bookmarks', '')
778 self.opener.write('journal.bookmarks', '')
779 phasesname = self.sjoin('phaseroots')
779 phasesname = self.sjoin('phaseroots')
780 if os.path.exists(phasesname):
780 if os.path.exists(phasesname):
781 util.copyfile(phasesname, self.sjoin('journal.phaseroots'))
781 util.copyfile(phasesname, self.sjoin('journal.phaseroots'))
782 else:
782 else:
783 self.sopener.write('journal.phaseroots', '')
783 self.sopener.write('journal.phaseroots', '')
784
784
785 return (self.sjoin('journal'), self.join('journal.dirstate'),
785 return (self.sjoin('journal'), self.join('journal.dirstate'),
786 self.join('journal.branch'), self.join('journal.desc'),
786 self.join('journal.branch'), self.join('journal.desc'),
787 self.join('journal.bookmarks'),
787 self.join('journal.bookmarks'),
788 self.sjoin('journal.phaseroots'))
788 self.sjoin('journal.phaseroots'))
789
789
790 def recover(self):
790 def recover(self):
791 lock = self.lock()
791 lock = self.lock()
792 try:
792 try:
793 if os.path.exists(self.sjoin("journal")):
793 if os.path.exists(self.sjoin("journal")):
794 self.ui.status(_("rolling back interrupted transaction\n"))
794 self.ui.status(_("rolling back interrupted transaction\n"))
795 transaction.rollback(self.sopener, self.sjoin("journal"),
795 transaction.rollback(self.sopener, self.sjoin("journal"),
796 self.ui.warn)
796 self.ui.warn)
797 self.invalidate()
797 self.invalidate()
798 return True
798 return True
799 else:
799 else:
800 self.ui.warn(_("no interrupted transaction available\n"))
800 self.ui.warn(_("no interrupted transaction available\n"))
801 return False
801 return False
802 finally:
802 finally:
803 lock.release()
803 lock.release()
804
804
805 def rollback(self, dryrun=False, force=False):
805 def rollback(self, dryrun=False, force=False):
806 wlock = lock = None
806 wlock = lock = None
807 try:
807 try:
808 wlock = self.wlock()
808 wlock = self.wlock()
809 lock = self.lock()
809 lock = self.lock()
810 if os.path.exists(self.sjoin("undo")):
810 if os.path.exists(self.sjoin("undo")):
811 return self._rollback(dryrun, force)
811 return self._rollback(dryrun, force)
812 else:
812 else:
813 self.ui.warn(_("no rollback information available\n"))
813 self.ui.warn(_("no rollback information available\n"))
814 return 1
814 return 1
815 finally:
815 finally:
816 release(lock, wlock)
816 release(lock, wlock)
817
817
818 def _rollback(self, dryrun, force):
818 def _rollback(self, dryrun, force):
819 ui = self.ui
819 ui = self.ui
820 try:
820 try:
821 args = self.opener.read('undo.desc').splitlines()
821 args = self.opener.read('undo.desc').splitlines()
822 (oldlen, desc, detail) = (int(args[0]), args[1], None)
822 (oldlen, desc, detail) = (int(args[0]), args[1], None)
823 if len(args) >= 3:
823 if len(args) >= 3:
824 detail = args[2]
824 detail = args[2]
825 oldtip = oldlen - 1
825 oldtip = oldlen - 1
826
826
827 if detail and ui.verbose:
827 if detail and ui.verbose:
828 msg = (_('repository tip rolled back to revision %s'
828 msg = (_('repository tip rolled back to revision %s'
829 ' (undo %s: %s)\n')
829 ' (undo %s: %s)\n')
830 % (oldtip, desc, detail))
830 % (oldtip, desc, detail))
831 else:
831 else:
832 msg = (_('repository tip rolled back to revision %s'
832 msg = (_('repository tip rolled back to revision %s'
833 ' (undo %s)\n')
833 ' (undo %s)\n')
834 % (oldtip, desc))
834 % (oldtip, desc))
835 except IOError:
835 except IOError:
836 msg = _('rolling back unknown transaction\n')
836 msg = _('rolling back unknown transaction\n')
837 desc = None
837 desc = None
838
838
839 if not force and self['.'] != self['tip'] and desc == 'commit':
839 if not force and self['.'] != self['tip'] and desc == 'commit':
840 raise util.Abort(
840 raise util.Abort(
841 _('rollback of last commit while not checked out '
841 _('rollback of last commit while not checked out '
842 'may lose data'), hint=_('use -f to force'))
842 'may lose data'), hint=_('use -f to force'))
843
843
844 ui.status(msg)
844 ui.status(msg)
845 if dryrun:
845 if dryrun:
846 return 0
846 return 0
847
847
848 parents = self.dirstate.parents()
848 parents = self.dirstate.parents()
849 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
849 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
850 if os.path.exists(self.join('undo.bookmarks')):
850 if os.path.exists(self.join('undo.bookmarks')):
851 util.rename(self.join('undo.bookmarks'),
851 util.rename(self.join('undo.bookmarks'),
852 self.join('bookmarks'))
852 self.join('bookmarks'))
853 if os.path.exists(self.sjoin('undo.phaseroots')):
853 if os.path.exists(self.sjoin('undo.phaseroots')):
854 util.rename(self.sjoin('undo.phaseroots'),
854 util.rename(self.sjoin('undo.phaseroots'),
855 self.sjoin('phaseroots'))
855 self.sjoin('phaseroots'))
856 self.invalidate()
856 self.invalidate()
857
857
858 parentgone = (parents[0] not in self.changelog.nodemap or
858 parentgone = (parents[0] not in self.changelog.nodemap or
859 parents[1] not in self.changelog.nodemap)
859 parents[1] not in self.changelog.nodemap)
860 if parentgone:
860 if parentgone:
861 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
861 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
862 try:
862 try:
863 branch = self.opener.read('undo.branch')
863 branch = self.opener.read('undo.branch')
864 self.dirstate.setbranch(branch)
864 self.dirstate.setbranch(branch)
865 except IOError:
865 except IOError:
866 ui.warn(_('named branch could not be reset: '
866 ui.warn(_('named branch could not be reset: '
867 'current branch is still \'%s\'\n')
867 'current branch is still \'%s\'\n')
868 % self.dirstate.branch())
868 % self.dirstate.branch())
869
869
870 self.dirstate.invalidate()
870 self.dirstate.invalidate()
871 parents = tuple([p.rev() for p in self.parents()])
871 parents = tuple([p.rev() for p in self.parents()])
872 if len(parents) > 1:
872 if len(parents) > 1:
873 ui.status(_('working directory now based on '
873 ui.status(_('working directory now based on '
874 'revisions %d and %d\n') % parents)
874 'revisions %d and %d\n') % parents)
875 else:
875 else:
876 ui.status(_('working directory now based on '
876 ui.status(_('working directory now based on '
877 'revision %d\n') % parents)
877 'revision %d\n') % parents)
878 self.destroyed()
878 self.destroyed()
879 return 0
879 return 0
880
880
881 def invalidatecaches(self):
881 def invalidatecaches(self):
882 def delcache(name):
882 try:
883 try:
883 delattr(self, '_tagscache')
884 delattr(self, name)
884 except AttributeError:
885 except AttributeError:
885 pass
886 pass
886
887
888 delcache('_tagscache')
889 delcache('_phaserev')
890
887 self._branchcache = None # in UTF-8
891 self._branchcache = None # in UTF-8
888 self._branchcachetip = None
892 self._branchcachetip = None
889
893
890 def invalidatedirstate(self):
894 def invalidatedirstate(self):
891 '''Invalidates the dirstate, causing the next call to dirstate
895 '''Invalidates the dirstate, causing the next call to dirstate
892 to check if it was modified since the last time it was read,
896 to check if it was modified since the last time it was read,
893 rereading it if it has.
897 rereading it if it has.
894
898
895 This is different to dirstate.invalidate() that it doesn't always
899 This is different to dirstate.invalidate() that it doesn't always
896 rereads the dirstate. Use dirstate.invalidate() if you want to
900 rereads the dirstate. Use dirstate.invalidate() if you want to
897 explicitly read the dirstate again (i.e. restoring it to a previous
901 explicitly read the dirstate again (i.e. restoring it to a previous
898 known good state).'''
902 known good state).'''
899 try:
903 try:
900 delattr(self, 'dirstate')
904 delattr(self, 'dirstate')
901 except AttributeError:
905 except AttributeError:
902 pass
906 pass
903
907
904 def invalidate(self):
908 def invalidate(self):
905 for k in self._filecache:
909 for k in self._filecache:
906 # dirstate is invalidated separately in invalidatedirstate()
910 # dirstate is invalidated separately in invalidatedirstate()
907 if k == 'dirstate':
911 if k == 'dirstate':
908 continue
912 continue
909
913
910 try:
914 try:
911 delattr(self, k)
915 delattr(self, k)
912 except AttributeError:
916 except AttributeError:
913 pass
917 pass
914 self.invalidatecaches()
918 self.invalidatecaches()
915
919
916 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
920 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
917 try:
921 try:
918 l = lock.lock(lockname, 0, releasefn, desc=desc)
922 l = lock.lock(lockname, 0, releasefn, desc=desc)
919 except error.LockHeld, inst:
923 except error.LockHeld, inst:
920 if not wait:
924 if not wait:
921 raise
925 raise
922 self.ui.warn(_("waiting for lock on %s held by %r\n") %
926 self.ui.warn(_("waiting for lock on %s held by %r\n") %
923 (desc, inst.locker))
927 (desc, inst.locker))
924 # default to 600 seconds timeout
928 # default to 600 seconds timeout
925 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
929 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
926 releasefn, desc=desc)
930 releasefn, desc=desc)
927 if acquirefn:
931 if acquirefn:
928 acquirefn()
932 acquirefn()
929 return l
933 return l
930
934
931 def _afterlock(self, callback):
935 def _afterlock(self, callback):
932 """add a callback to the current repository lock.
936 """add a callback to the current repository lock.
933
937
934 The callback will be executed on lock release."""
938 The callback will be executed on lock release."""
935 l = self._lockref and self._lockref()
939 l = self._lockref and self._lockref()
936 if l:
940 if l:
937 l.postrelease.append(callback)
941 l.postrelease.append(callback)
938
942
939 def lock(self, wait=True):
943 def lock(self, wait=True):
940 '''Lock the repository store (.hg/store) and return a weak reference
944 '''Lock the repository store (.hg/store) and return a weak reference
941 to the lock. Use this before modifying the store (e.g. committing or
945 to the lock. Use this before modifying the store (e.g. committing or
942 stripping). If you are opening a transaction, get a lock as well.)'''
946 stripping). If you are opening a transaction, get a lock as well.)'''
943 l = self._lockref and self._lockref()
947 l = self._lockref and self._lockref()
944 if l is not None and l.held:
948 if l is not None and l.held:
945 l.lock()
949 l.lock()
946 return l
950 return l
947
951
948 def unlock():
952 def unlock():
949 self.store.write()
953 self.store.write()
950 if self._dirtyphases:
954 if self._dirtyphases:
951 phases.writeroots(self)
955 phases.writeroots(self)
952 for k, ce in self._filecache.items():
956 for k, ce in self._filecache.items():
953 if k == 'dirstate':
957 if k == 'dirstate':
954 continue
958 continue
955 ce.refresh()
959 ce.refresh()
956
960
957 l = self._lock(self.sjoin("lock"), wait, unlock,
961 l = self._lock(self.sjoin("lock"), wait, unlock,
958 self.invalidate, _('repository %s') % self.origroot)
962 self.invalidate, _('repository %s') % self.origroot)
959 self._lockref = weakref.ref(l)
963 self._lockref = weakref.ref(l)
960 return l
964 return l
961
965
962 def wlock(self, wait=True):
966 def wlock(self, wait=True):
963 '''Lock the non-store parts of the repository (everything under
967 '''Lock the non-store parts of the repository (everything under
964 .hg except .hg/store) and return a weak reference to the lock.
968 .hg except .hg/store) and return a weak reference to the lock.
965 Use this before modifying files in .hg.'''
969 Use this before modifying files in .hg.'''
966 l = self._wlockref and self._wlockref()
970 l = self._wlockref and self._wlockref()
967 if l is not None and l.held:
971 if l is not None and l.held:
968 l.lock()
972 l.lock()
969 return l
973 return l
970
974
971 def unlock():
975 def unlock():
972 self.dirstate.write()
976 self.dirstate.write()
973 ce = self._filecache.get('dirstate')
977 ce = self._filecache.get('dirstate')
974 if ce:
978 if ce:
975 ce.refresh()
979 ce.refresh()
976
980
977 l = self._lock(self.join("wlock"), wait, unlock,
981 l = self._lock(self.join("wlock"), wait, unlock,
978 self.invalidatedirstate, _('working directory of %s') %
982 self.invalidatedirstate, _('working directory of %s') %
979 self.origroot)
983 self.origroot)
980 self._wlockref = weakref.ref(l)
984 self._wlockref = weakref.ref(l)
981 return l
985 return l
982
986
983 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
987 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
984 """
988 """
985 commit an individual file as part of a larger transaction
989 commit an individual file as part of a larger transaction
986 """
990 """
987
991
988 fname = fctx.path()
992 fname = fctx.path()
989 text = fctx.data()
993 text = fctx.data()
990 flog = self.file(fname)
994 flog = self.file(fname)
991 fparent1 = manifest1.get(fname, nullid)
995 fparent1 = manifest1.get(fname, nullid)
992 fparent2 = fparent2o = manifest2.get(fname, nullid)
996 fparent2 = fparent2o = manifest2.get(fname, nullid)
993
997
994 meta = {}
998 meta = {}
995 copy = fctx.renamed()
999 copy = fctx.renamed()
996 if copy and copy[0] != fname:
1000 if copy and copy[0] != fname:
997 # Mark the new revision of this file as a copy of another
1001 # Mark the new revision of this file as a copy of another
998 # file. This copy data will effectively act as a parent
1002 # file. This copy data will effectively act as a parent
999 # of this new revision. If this is a merge, the first
1003 # of this new revision. If this is a merge, the first
1000 # parent will be the nullid (meaning "look up the copy data")
1004 # parent will be the nullid (meaning "look up the copy data")
1001 # and the second one will be the other parent. For example:
1005 # and the second one will be the other parent. For example:
1002 #
1006 #
1003 # 0 --- 1 --- 3 rev1 changes file foo
1007 # 0 --- 1 --- 3 rev1 changes file foo
1004 # \ / rev2 renames foo to bar and changes it
1008 # \ / rev2 renames foo to bar and changes it
1005 # \- 2 -/ rev3 should have bar with all changes and
1009 # \- 2 -/ rev3 should have bar with all changes and
1006 # should record that bar descends from
1010 # should record that bar descends from
1007 # bar in rev2 and foo in rev1
1011 # bar in rev2 and foo in rev1
1008 #
1012 #
1009 # this allows this merge to succeed:
1013 # this allows this merge to succeed:
1010 #
1014 #
1011 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1015 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1012 # \ / merging rev3 and rev4 should use bar@rev2
1016 # \ / merging rev3 and rev4 should use bar@rev2
1013 # \- 2 --- 4 as the merge base
1017 # \- 2 --- 4 as the merge base
1014 #
1018 #
1015
1019
1016 cfname = copy[0]
1020 cfname = copy[0]
1017 crev = manifest1.get(cfname)
1021 crev = manifest1.get(cfname)
1018 newfparent = fparent2
1022 newfparent = fparent2
1019
1023
1020 if manifest2: # branch merge
1024 if manifest2: # branch merge
1021 if fparent2 == nullid or crev is None: # copied on remote side
1025 if fparent2 == nullid or crev is None: # copied on remote side
1022 if cfname in manifest2:
1026 if cfname in manifest2:
1023 crev = manifest2[cfname]
1027 crev = manifest2[cfname]
1024 newfparent = fparent1
1028 newfparent = fparent1
1025
1029
1026 # find source in nearest ancestor if we've lost track
1030 # find source in nearest ancestor if we've lost track
1027 if not crev:
1031 if not crev:
1028 self.ui.debug(" %s: searching for copy revision for %s\n" %
1032 self.ui.debug(" %s: searching for copy revision for %s\n" %
1029 (fname, cfname))
1033 (fname, cfname))
1030 for ancestor in self[None].ancestors():
1034 for ancestor in self[None].ancestors():
1031 if cfname in ancestor:
1035 if cfname in ancestor:
1032 crev = ancestor[cfname].filenode()
1036 crev = ancestor[cfname].filenode()
1033 break
1037 break
1034
1038
1035 if crev:
1039 if crev:
1036 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1040 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1037 meta["copy"] = cfname
1041 meta["copy"] = cfname
1038 meta["copyrev"] = hex(crev)
1042 meta["copyrev"] = hex(crev)
1039 fparent1, fparent2 = nullid, newfparent
1043 fparent1, fparent2 = nullid, newfparent
1040 else:
1044 else:
1041 self.ui.warn(_("warning: can't find ancestor for '%s' "
1045 self.ui.warn(_("warning: can't find ancestor for '%s' "
1042 "copied from '%s'!\n") % (fname, cfname))
1046 "copied from '%s'!\n") % (fname, cfname))
1043
1047
1044 elif fparent2 != nullid:
1048 elif fparent2 != nullid:
1045 # is one parent an ancestor of the other?
1049 # is one parent an ancestor of the other?
1046 fparentancestor = flog.ancestor(fparent1, fparent2)
1050 fparentancestor = flog.ancestor(fparent1, fparent2)
1047 if fparentancestor == fparent1:
1051 if fparentancestor == fparent1:
1048 fparent1, fparent2 = fparent2, nullid
1052 fparent1, fparent2 = fparent2, nullid
1049 elif fparentancestor == fparent2:
1053 elif fparentancestor == fparent2:
1050 fparent2 = nullid
1054 fparent2 = nullid
1051
1055
1052 # is the file changed?
1056 # is the file changed?
1053 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1057 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1054 changelist.append(fname)
1058 changelist.append(fname)
1055 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1059 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1056
1060
1057 # are just the flags changed during merge?
1061 # are just the flags changed during merge?
1058 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1062 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1059 changelist.append(fname)
1063 changelist.append(fname)
1060
1064
1061 return fparent1
1065 return fparent1
1062
1066
1063 def commit(self, text="", user=None, date=None, match=None, force=False,
1067 def commit(self, text="", user=None, date=None, match=None, force=False,
1064 editor=False, extra={}):
1068 editor=False, extra={}):
1065 """Add a new revision to current repository.
1069 """Add a new revision to current repository.
1066
1070
1067 Revision information is gathered from the working directory,
1071 Revision information is gathered from the working directory,
1068 match can be used to filter the committed files. If editor is
1072 match can be used to filter the committed files. If editor is
1069 supplied, it is called to get a commit message.
1073 supplied, it is called to get a commit message.
1070 """
1074 """
1071
1075
1072 def fail(f, msg):
1076 def fail(f, msg):
1073 raise util.Abort('%s: %s' % (f, msg))
1077 raise util.Abort('%s: %s' % (f, msg))
1074
1078
1075 if not match:
1079 if not match:
1076 match = matchmod.always(self.root, '')
1080 match = matchmod.always(self.root, '')
1077
1081
1078 if not force:
1082 if not force:
1079 vdirs = []
1083 vdirs = []
1080 match.dir = vdirs.append
1084 match.dir = vdirs.append
1081 match.bad = fail
1085 match.bad = fail
1082
1086
1083 wlock = self.wlock()
1087 wlock = self.wlock()
1084 try:
1088 try:
1085 wctx = self[None]
1089 wctx = self[None]
1086 merge = len(wctx.parents()) > 1
1090 merge = len(wctx.parents()) > 1
1087
1091
1088 if (not force and merge and match and
1092 if (not force and merge and match and
1089 (match.files() or match.anypats())):
1093 (match.files() or match.anypats())):
1090 raise util.Abort(_('cannot partially commit a merge '
1094 raise util.Abort(_('cannot partially commit a merge '
1091 '(do not specify files or patterns)'))
1095 '(do not specify files or patterns)'))
1092
1096
1093 changes = self.status(match=match, clean=force)
1097 changes = self.status(match=match, clean=force)
1094 if force:
1098 if force:
1095 changes[0].extend(changes[6]) # mq may commit unchanged files
1099 changes[0].extend(changes[6]) # mq may commit unchanged files
1096
1100
1097 # check subrepos
1101 # check subrepos
1098 subs = []
1102 subs = []
1099 removedsubs = set()
1103 removedsubs = set()
1100 if '.hgsub' in wctx:
1104 if '.hgsub' in wctx:
1101 # only manage subrepos and .hgsubstate if .hgsub is present
1105 # only manage subrepos and .hgsubstate if .hgsub is present
1102 for p in wctx.parents():
1106 for p in wctx.parents():
1103 removedsubs.update(s for s in p.substate if match(s))
1107 removedsubs.update(s for s in p.substate if match(s))
1104 for s in wctx.substate:
1108 for s in wctx.substate:
1105 removedsubs.discard(s)
1109 removedsubs.discard(s)
1106 if match(s) and wctx.sub(s).dirty():
1110 if match(s) and wctx.sub(s).dirty():
1107 subs.append(s)
1111 subs.append(s)
1108 if (subs or removedsubs):
1112 if (subs or removedsubs):
1109 if (not match('.hgsub') and
1113 if (not match('.hgsub') and
1110 '.hgsub' in (wctx.modified() + wctx.added())):
1114 '.hgsub' in (wctx.modified() + wctx.added())):
1111 raise util.Abort(
1115 raise util.Abort(
1112 _("can't commit subrepos without .hgsub"))
1116 _("can't commit subrepos without .hgsub"))
1113 if '.hgsubstate' not in changes[0]:
1117 if '.hgsubstate' not in changes[0]:
1114 changes[0].insert(0, '.hgsubstate')
1118 changes[0].insert(0, '.hgsubstate')
1115 if '.hgsubstate' in changes[2]:
1119 if '.hgsubstate' in changes[2]:
1116 changes[2].remove('.hgsubstate')
1120 changes[2].remove('.hgsubstate')
1117 elif '.hgsub' in changes[2]:
1121 elif '.hgsub' in changes[2]:
1118 # clean up .hgsubstate when .hgsub is removed
1122 # clean up .hgsubstate when .hgsub is removed
1119 if ('.hgsubstate' in wctx and
1123 if ('.hgsubstate' in wctx and
1120 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1124 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1121 changes[2].insert(0, '.hgsubstate')
1125 changes[2].insert(0, '.hgsubstate')
1122
1126
1123 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1127 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1124 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1128 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1125 if changedsubs:
1129 if changedsubs:
1126 raise util.Abort(_("uncommitted changes in subrepo %s")
1130 raise util.Abort(_("uncommitted changes in subrepo %s")
1127 % changedsubs[0],
1131 % changedsubs[0],
1128 hint=_("use --subrepos for recursive commit"))
1132 hint=_("use --subrepos for recursive commit"))
1129
1133
1130 # make sure all explicit patterns are matched
1134 # make sure all explicit patterns are matched
1131 if not force and match.files():
1135 if not force and match.files():
1132 matched = set(changes[0] + changes[1] + changes[2])
1136 matched = set(changes[0] + changes[1] + changes[2])
1133
1137
1134 for f in match.files():
1138 for f in match.files():
1135 if f == '.' or f in matched or f in wctx.substate:
1139 if f == '.' or f in matched or f in wctx.substate:
1136 continue
1140 continue
1137 if f in changes[3]: # missing
1141 if f in changes[3]: # missing
1138 fail(f, _('file not found!'))
1142 fail(f, _('file not found!'))
1139 if f in vdirs: # visited directory
1143 if f in vdirs: # visited directory
1140 d = f + '/'
1144 d = f + '/'
1141 for mf in matched:
1145 for mf in matched:
1142 if mf.startswith(d):
1146 if mf.startswith(d):
1143 break
1147 break
1144 else:
1148 else:
1145 fail(f, _("no match under directory!"))
1149 fail(f, _("no match under directory!"))
1146 elif f not in self.dirstate:
1150 elif f not in self.dirstate:
1147 fail(f, _("file not tracked!"))
1151 fail(f, _("file not tracked!"))
1148
1152
1149 if (not force and not extra.get("close") and not merge
1153 if (not force and not extra.get("close") and not merge
1150 and not (changes[0] or changes[1] or changes[2])
1154 and not (changes[0] or changes[1] or changes[2])
1151 and wctx.branch() == wctx.p1().branch()):
1155 and wctx.branch() == wctx.p1().branch()):
1152 return None
1156 return None
1153
1157
1154 ms = mergemod.mergestate(self)
1158 ms = mergemod.mergestate(self)
1155 for f in changes[0]:
1159 for f in changes[0]:
1156 if f in ms and ms[f] == 'u':
1160 if f in ms and ms[f] == 'u':
1157 raise util.Abort(_("unresolved merge conflicts "
1161 raise util.Abort(_("unresolved merge conflicts "
1158 "(see hg help resolve)"))
1162 "(see hg help resolve)"))
1159
1163
1160 cctx = context.workingctx(self, text, user, date, extra, changes)
1164 cctx = context.workingctx(self, text, user, date, extra, changes)
1161 if editor:
1165 if editor:
1162 cctx._text = editor(self, cctx, subs)
1166 cctx._text = editor(self, cctx, subs)
1163 edited = (text != cctx._text)
1167 edited = (text != cctx._text)
1164
1168
1165 # commit subs
1169 # commit subs
1166 if subs or removedsubs:
1170 if subs or removedsubs:
1167 state = wctx.substate.copy()
1171 state = wctx.substate.copy()
1168 for s in sorted(subs):
1172 for s in sorted(subs):
1169 sub = wctx.sub(s)
1173 sub = wctx.sub(s)
1170 self.ui.status(_('committing subrepository %s\n') %
1174 self.ui.status(_('committing subrepository %s\n') %
1171 subrepo.subrelpath(sub))
1175 subrepo.subrelpath(sub))
1172 sr = sub.commit(cctx._text, user, date)
1176 sr = sub.commit(cctx._text, user, date)
1173 state[s] = (state[s][0], sr)
1177 state[s] = (state[s][0], sr)
1174 subrepo.writestate(self, state)
1178 subrepo.writestate(self, state)
1175
1179
1176 # Save commit message in case this transaction gets rolled back
1180 # Save commit message in case this transaction gets rolled back
1177 # (e.g. by a pretxncommit hook). Leave the content alone on
1181 # (e.g. by a pretxncommit hook). Leave the content alone on
1178 # the assumption that the user will use the same editor again.
1182 # the assumption that the user will use the same editor again.
1179 msgfn = self.savecommitmessage(cctx._text)
1183 msgfn = self.savecommitmessage(cctx._text)
1180
1184
1181 p1, p2 = self.dirstate.parents()
1185 p1, p2 = self.dirstate.parents()
1182 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1186 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1183 try:
1187 try:
1184 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1188 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1185 ret = self.commitctx(cctx, True)
1189 ret = self.commitctx(cctx, True)
1186 except:
1190 except:
1187 if edited:
1191 if edited:
1188 self.ui.write(
1192 self.ui.write(
1189 _('note: commit message saved in %s\n') % msgfn)
1193 _('note: commit message saved in %s\n') % msgfn)
1190 raise
1194 raise
1191
1195
1192 # update bookmarks, dirstate and mergestate
1196 # update bookmarks, dirstate and mergestate
1193 bookmarks.update(self, p1, ret)
1197 bookmarks.update(self, p1, ret)
1194 for f in changes[0] + changes[1]:
1198 for f in changes[0] + changes[1]:
1195 self.dirstate.normal(f)
1199 self.dirstate.normal(f)
1196 for f in changes[2]:
1200 for f in changes[2]:
1197 self.dirstate.drop(f)
1201 self.dirstate.drop(f)
1198 self.dirstate.setparents(ret)
1202 self.dirstate.setparents(ret)
1199 ms.reset()
1203 ms.reset()
1200 finally:
1204 finally:
1201 wlock.release()
1205 wlock.release()
1202
1206
1203 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1207 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1204 return ret
1208 return ret
1205
1209
1206 def commitctx(self, ctx, error=False):
1210 def commitctx(self, ctx, error=False):
1207 """Add a new revision to current repository.
1211 """Add a new revision to current repository.
1208 Revision information is passed via the context argument.
1212 Revision information is passed via the context argument.
1209 """
1213 """
1210
1214
1211 tr = lock = None
1215 tr = lock = None
1212 removed = list(ctx.removed())
1216 removed = list(ctx.removed())
1213 p1, p2 = ctx.p1(), ctx.p2()
1217 p1, p2 = ctx.p1(), ctx.p2()
1214 user = ctx.user()
1218 user = ctx.user()
1215
1219
1216 lock = self.lock()
1220 lock = self.lock()
1217 try:
1221 try:
1218 tr = self.transaction("commit")
1222 tr = self.transaction("commit")
1219 trp = weakref.proxy(tr)
1223 trp = weakref.proxy(tr)
1220
1224
1221 if ctx.files():
1225 if ctx.files():
1222 m1 = p1.manifest().copy()
1226 m1 = p1.manifest().copy()
1223 m2 = p2.manifest()
1227 m2 = p2.manifest()
1224
1228
1225 # check in files
1229 # check in files
1226 new = {}
1230 new = {}
1227 changed = []
1231 changed = []
1228 linkrev = len(self)
1232 linkrev = len(self)
1229 for f in sorted(ctx.modified() + ctx.added()):
1233 for f in sorted(ctx.modified() + ctx.added()):
1230 self.ui.note(f + "\n")
1234 self.ui.note(f + "\n")
1231 try:
1235 try:
1232 fctx = ctx[f]
1236 fctx = ctx[f]
1233 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1237 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1234 changed)
1238 changed)
1235 m1.set(f, fctx.flags())
1239 m1.set(f, fctx.flags())
1236 except OSError, inst:
1240 except OSError, inst:
1237 self.ui.warn(_("trouble committing %s!\n") % f)
1241 self.ui.warn(_("trouble committing %s!\n") % f)
1238 raise
1242 raise
1239 except IOError, inst:
1243 except IOError, inst:
1240 errcode = getattr(inst, 'errno', errno.ENOENT)
1244 errcode = getattr(inst, 'errno', errno.ENOENT)
1241 if error or errcode and errcode != errno.ENOENT:
1245 if error or errcode and errcode != errno.ENOENT:
1242 self.ui.warn(_("trouble committing %s!\n") % f)
1246 self.ui.warn(_("trouble committing %s!\n") % f)
1243 raise
1247 raise
1244 else:
1248 else:
1245 removed.append(f)
1249 removed.append(f)
1246
1250
1247 # update manifest
1251 # update manifest
1248 m1.update(new)
1252 m1.update(new)
1249 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1253 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1250 drop = [f for f in removed if f in m1]
1254 drop = [f for f in removed if f in m1]
1251 for f in drop:
1255 for f in drop:
1252 del m1[f]
1256 del m1[f]
1253 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1257 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1254 p2.manifestnode(), (new, drop))
1258 p2.manifestnode(), (new, drop))
1255 files = changed + removed
1259 files = changed + removed
1256 else:
1260 else:
1257 mn = p1.manifestnode()
1261 mn = p1.manifestnode()
1258 files = []
1262 files = []
1259
1263
1260 # update changelog
1264 # update changelog
1261 self.changelog.delayupdate()
1265 self.changelog.delayupdate()
1262 n = self.changelog.add(mn, files, ctx.description(),
1266 n = self.changelog.add(mn, files, ctx.description(),
1263 trp, p1.node(), p2.node(),
1267 trp, p1.node(), p2.node(),
1264 user, ctx.date(), ctx.extra().copy())
1268 user, ctx.date(), ctx.extra().copy())
1265 p = lambda: self.changelog.writepending() and self.root or ""
1269 p = lambda: self.changelog.writepending() and self.root or ""
1266 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1270 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1267 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1271 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1268 parent2=xp2, pending=p)
1272 parent2=xp2, pending=p)
1269 self.changelog.finalize(trp)
1273 self.changelog.finalize(trp)
1270 # set the new commit is proper phase
1274 # set the new commit is proper phase
1271 targetphase = self.ui.configint('phases', 'new-commit',
1275 targetphase = self.ui.configint('phases', 'new-commit',
1272 phases.draft)
1276 phases.draft)
1273 if targetphase:
1277 if targetphase:
1274 # retract boundary do not alter parent changeset.
1278 # retract boundary do not alter parent changeset.
1275 # if a parent have higher the resulting phase will
1279 # if a parent have higher the resulting phase will
1276 # be compliant anyway
1280 # be compliant anyway
1277 #
1281 #
1278 # if minimal phase was 0 we don't need to retract anything
1282 # if minimal phase was 0 we don't need to retract anything
1279 phases.retractboundary(self, targetphase, [n])
1283 phases.retractboundary(self, targetphase, [n])
1280 tr.close()
1284 tr.close()
1281 self.updatebranchcache()
1285 self.updatebranchcache()
1282 return n
1286 return n
1283 finally:
1287 finally:
1284 if tr:
1288 if tr:
1285 tr.release()
1289 tr.release()
1286 lock.release()
1290 lock.release()
1287
1291
1288 def destroyed(self):
1292 def destroyed(self):
1289 '''Inform the repository that nodes have been destroyed.
1293 '''Inform the repository that nodes have been destroyed.
1290 Intended for use by strip and rollback, so there's a common
1294 Intended for use by strip and rollback, so there's a common
1291 place for anything that has to be done after destroying history.'''
1295 place for anything that has to be done after destroying history.'''
1292 # XXX it might be nice if we could take the list of destroyed
1296 # XXX it might be nice if we could take the list of destroyed
1293 # nodes, but I don't see an easy way for rollback() to do that
1297 # nodes, but I don't see an easy way for rollback() to do that
1294
1298
1295 # Ensure the persistent tag cache is updated. Doing it now
1299 # Ensure the persistent tag cache is updated. Doing it now
1296 # means that the tag cache only has to worry about destroyed
1300 # means that the tag cache only has to worry about destroyed
1297 # heads immediately after a strip/rollback. That in turn
1301 # heads immediately after a strip/rollback. That in turn
1298 # guarantees that "cachetip == currenttip" (comparing both rev
1302 # guarantees that "cachetip == currenttip" (comparing both rev
1299 # and node) always means no nodes have been added or destroyed.
1303 # and node) always means no nodes have been added or destroyed.
1300
1304
1301 # XXX this is suboptimal when qrefresh'ing: we strip the current
1305 # XXX this is suboptimal when qrefresh'ing: we strip the current
1302 # head, refresh the tag cache, then immediately add a new head.
1306 # head, refresh the tag cache, then immediately add a new head.
1303 # But I think doing it this way is necessary for the "instant
1307 # But I think doing it this way is necessary for the "instant
1304 # tag cache retrieval" case to work.
1308 # tag cache retrieval" case to work.
1305 self.invalidatecaches()
1309 self.invalidatecaches()
1306
1310
1307 def walk(self, match, node=None):
1311 def walk(self, match, node=None):
1308 '''
1312 '''
1309 walk recursively through the directory tree or a given
1313 walk recursively through the directory tree or a given
1310 changeset, finding all files matched by the match
1314 changeset, finding all files matched by the match
1311 function
1315 function
1312 '''
1316 '''
1313 return self[node].walk(match)
1317 return self[node].walk(match)
1314
1318
1315 def status(self, node1='.', node2=None, match=None,
1319 def status(self, node1='.', node2=None, match=None,
1316 ignored=False, clean=False, unknown=False,
1320 ignored=False, clean=False, unknown=False,
1317 listsubrepos=False):
1321 listsubrepos=False):
1318 """return status of files between two nodes or node and working directory
1322 """return status of files between two nodes or node and working directory
1319
1323
1320 If node1 is None, use the first dirstate parent instead.
1324 If node1 is None, use the first dirstate parent instead.
1321 If node2 is None, compare node1 with working directory.
1325 If node2 is None, compare node1 with working directory.
1322 """
1326 """
1323
1327
1324 def mfmatches(ctx):
1328 def mfmatches(ctx):
1325 mf = ctx.manifest().copy()
1329 mf = ctx.manifest().copy()
1326 for fn in mf.keys():
1330 for fn in mf.keys():
1327 if not match(fn):
1331 if not match(fn):
1328 del mf[fn]
1332 del mf[fn]
1329 return mf
1333 return mf
1330
1334
1331 if isinstance(node1, context.changectx):
1335 if isinstance(node1, context.changectx):
1332 ctx1 = node1
1336 ctx1 = node1
1333 else:
1337 else:
1334 ctx1 = self[node1]
1338 ctx1 = self[node1]
1335 if isinstance(node2, context.changectx):
1339 if isinstance(node2, context.changectx):
1336 ctx2 = node2
1340 ctx2 = node2
1337 else:
1341 else:
1338 ctx2 = self[node2]
1342 ctx2 = self[node2]
1339
1343
1340 working = ctx2.rev() is None
1344 working = ctx2.rev() is None
1341 parentworking = working and ctx1 == self['.']
1345 parentworking = working and ctx1 == self['.']
1342 match = match or matchmod.always(self.root, self.getcwd())
1346 match = match or matchmod.always(self.root, self.getcwd())
1343 listignored, listclean, listunknown = ignored, clean, unknown
1347 listignored, listclean, listunknown = ignored, clean, unknown
1344
1348
1345 # load earliest manifest first for caching reasons
1349 # load earliest manifest first for caching reasons
1346 if not working and ctx2.rev() < ctx1.rev():
1350 if not working and ctx2.rev() < ctx1.rev():
1347 ctx2.manifest()
1351 ctx2.manifest()
1348
1352
1349 if not parentworking:
1353 if not parentworking:
1350 def bad(f, msg):
1354 def bad(f, msg):
1351 if f not in ctx1:
1355 if f not in ctx1:
1352 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1356 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1353 match.bad = bad
1357 match.bad = bad
1354
1358
1355 if working: # we need to scan the working dir
1359 if working: # we need to scan the working dir
1356 subrepos = []
1360 subrepos = []
1357 if '.hgsub' in self.dirstate:
1361 if '.hgsub' in self.dirstate:
1358 subrepos = ctx2.substate.keys()
1362 subrepos = ctx2.substate.keys()
1359 s = self.dirstate.status(match, subrepos, listignored,
1363 s = self.dirstate.status(match, subrepos, listignored,
1360 listclean, listunknown)
1364 listclean, listunknown)
1361 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1365 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1362
1366
1363 # check for any possibly clean files
1367 # check for any possibly clean files
1364 if parentworking and cmp:
1368 if parentworking and cmp:
1365 fixup = []
1369 fixup = []
1366 # do a full compare of any files that might have changed
1370 # do a full compare of any files that might have changed
1367 for f in sorted(cmp):
1371 for f in sorted(cmp):
1368 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1372 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1369 or ctx1[f].cmp(ctx2[f])):
1373 or ctx1[f].cmp(ctx2[f])):
1370 modified.append(f)
1374 modified.append(f)
1371 else:
1375 else:
1372 fixup.append(f)
1376 fixup.append(f)
1373
1377
1374 # update dirstate for files that are actually clean
1378 # update dirstate for files that are actually clean
1375 if fixup:
1379 if fixup:
1376 if listclean:
1380 if listclean:
1377 clean += fixup
1381 clean += fixup
1378
1382
1379 try:
1383 try:
1380 # updating the dirstate is optional
1384 # updating the dirstate is optional
1381 # so we don't wait on the lock
1385 # so we don't wait on the lock
1382 wlock = self.wlock(False)
1386 wlock = self.wlock(False)
1383 try:
1387 try:
1384 for f in fixup:
1388 for f in fixup:
1385 self.dirstate.normal(f)
1389 self.dirstate.normal(f)
1386 finally:
1390 finally:
1387 wlock.release()
1391 wlock.release()
1388 except error.LockError:
1392 except error.LockError:
1389 pass
1393 pass
1390
1394
1391 if not parentworking:
1395 if not parentworking:
1392 mf1 = mfmatches(ctx1)
1396 mf1 = mfmatches(ctx1)
1393 if working:
1397 if working:
1394 # we are comparing working dir against non-parent
1398 # we are comparing working dir against non-parent
1395 # generate a pseudo-manifest for the working dir
1399 # generate a pseudo-manifest for the working dir
1396 mf2 = mfmatches(self['.'])
1400 mf2 = mfmatches(self['.'])
1397 for f in cmp + modified + added:
1401 for f in cmp + modified + added:
1398 mf2[f] = None
1402 mf2[f] = None
1399 mf2.set(f, ctx2.flags(f))
1403 mf2.set(f, ctx2.flags(f))
1400 for f in removed:
1404 for f in removed:
1401 if f in mf2:
1405 if f in mf2:
1402 del mf2[f]
1406 del mf2[f]
1403 else:
1407 else:
1404 # we are comparing two revisions
1408 # we are comparing two revisions
1405 deleted, unknown, ignored = [], [], []
1409 deleted, unknown, ignored = [], [], []
1406 mf2 = mfmatches(ctx2)
1410 mf2 = mfmatches(ctx2)
1407
1411
1408 modified, added, clean = [], [], []
1412 modified, added, clean = [], [], []
1409 for fn in mf2:
1413 for fn in mf2:
1410 if fn in mf1:
1414 if fn in mf1:
1411 if (fn not in deleted and
1415 if (fn not in deleted and
1412 (mf1.flags(fn) != mf2.flags(fn) or
1416 (mf1.flags(fn) != mf2.flags(fn) or
1413 (mf1[fn] != mf2[fn] and
1417 (mf1[fn] != mf2[fn] and
1414 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1418 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1415 modified.append(fn)
1419 modified.append(fn)
1416 elif listclean:
1420 elif listclean:
1417 clean.append(fn)
1421 clean.append(fn)
1418 del mf1[fn]
1422 del mf1[fn]
1419 elif fn not in deleted:
1423 elif fn not in deleted:
1420 added.append(fn)
1424 added.append(fn)
1421 removed = mf1.keys()
1425 removed = mf1.keys()
1422
1426
1423 if working and modified and not self.dirstate._checklink:
1427 if working and modified and not self.dirstate._checklink:
1424 # Symlink placeholders may get non-symlink-like contents
1428 # Symlink placeholders may get non-symlink-like contents
1425 # via user error or dereferencing by NFS or Samba servers,
1429 # via user error or dereferencing by NFS or Samba servers,
1426 # so we filter out any placeholders that don't look like a
1430 # so we filter out any placeholders that don't look like a
1427 # symlink
1431 # symlink
1428 sane = []
1432 sane = []
1429 for f in modified:
1433 for f in modified:
1430 if ctx2.flags(f) == 'l':
1434 if ctx2.flags(f) == 'l':
1431 d = ctx2[f].data()
1435 d = ctx2[f].data()
1432 if len(d) >= 1024 or '\n' in d or util.binary(d):
1436 if len(d) >= 1024 or '\n' in d or util.binary(d):
1433 self.ui.debug('ignoring suspect symlink placeholder'
1437 self.ui.debug('ignoring suspect symlink placeholder'
1434 ' "%s"\n' % f)
1438 ' "%s"\n' % f)
1435 continue
1439 continue
1436 sane.append(f)
1440 sane.append(f)
1437 modified = sane
1441 modified = sane
1438
1442
1439 r = modified, added, removed, deleted, unknown, ignored, clean
1443 r = modified, added, removed, deleted, unknown, ignored, clean
1440
1444
1441 if listsubrepos:
1445 if listsubrepos:
1442 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1446 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1443 if working:
1447 if working:
1444 rev2 = None
1448 rev2 = None
1445 else:
1449 else:
1446 rev2 = ctx2.substate[subpath][1]
1450 rev2 = ctx2.substate[subpath][1]
1447 try:
1451 try:
1448 submatch = matchmod.narrowmatcher(subpath, match)
1452 submatch = matchmod.narrowmatcher(subpath, match)
1449 s = sub.status(rev2, match=submatch, ignored=listignored,
1453 s = sub.status(rev2, match=submatch, ignored=listignored,
1450 clean=listclean, unknown=listunknown,
1454 clean=listclean, unknown=listunknown,
1451 listsubrepos=True)
1455 listsubrepos=True)
1452 for rfiles, sfiles in zip(r, s):
1456 for rfiles, sfiles in zip(r, s):
1453 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1457 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1454 except error.LookupError:
1458 except error.LookupError:
1455 self.ui.status(_("skipping missing subrepository: %s\n")
1459 self.ui.status(_("skipping missing subrepository: %s\n")
1456 % subpath)
1460 % subpath)
1457
1461
1458 for l in r:
1462 for l in r:
1459 l.sort()
1463 l.sort()
1460 return r
1464 return r
1461
1465
1462 def heads(self, start=None):
1466 def heads(self, start=None):
1463 heads = self.changelog.heads(start)
1467 heads = self.changelog.heads(start)
1464 # sort the output in rev descending order
1468 # sort the output in rev descending order
1465 return sorted(heads, key=self.changelog.rev, reverse=True)
1469 return sorted(heads, key=self.changelog.rev, reverse=True)
1466
1470
1467 def branchheads(self, branch=None, start=None, closed=False):
1471 def branchheads(self, branch=None, start=None, closed=False):
1468 '''return a (possibly filtered) list of heads for the given branch
1472 '''return a (possibly filtered) list of heads for the given branch
1469
1473
1470 Heads are returned in topological order, from newest to oldest.
1474 Heads are returned in topological order, from newest to oldest.
1471 If branch is None, use the dirstate branch.
1475 If branch is None, use the dirstate branch.
1472 If start is not None, return only heads reachable from start.
1476 If start is not None, return only heads reachable from start.
1473 If closed is True, return heads that are marked as closed as well.
1477 If closed is True, return heads that are marked as closed as well.
1474 '''
1478 '''
1475 if branch is None:
1479 if branch is None:
1476 branch = self[None].branch()
1480 branch = self[None].branch()
1477 branches = self.branchmap()
1481 branches = self.branchmap()
1478 if branch not in branches:
1482 if branch not in branches:
1479 return []
1483 return []
1480 # the cache returns heads ordered lowest to highest
1484 # the cache returns heads ordered lowest to highest
1481 bheads = list(reversed(branches[branch]))
1485 bheads = list(reversed(branches[branch]))
1482 if start is not None:
1486 if start is not None:
1483 # filter out the heads that cannot be reached from startrev
1487 # filter out the heads that cannot be reached from startrev
1484 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1488 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1485 bheads = [h for h in bheads if h in fbheads]
1489 bheads = [h for h in bheads if h in fbheads]
1486 if not closed:
1490 if not closed:
1487 bheads = [h for h in bheads if
1491 bheads = [h for h in bheads if
1488 ('close' not in self.changelog.read(h)[5])]
1492 ('close' not in self.changelog.read(h)[5])]
1489 return bheads
1493 return bheads
1490
1494
1491 def branches(self, nodes):
1495 def branches(self, nodes):
1492 if not nodes:
1496 if not nodes:
1493 nodes = [self.changelog.tip()]
1497 nodes = [self.changelog.tip()]
1494 b = []
1498 b = []
1495 for n in nodes:
1499 for n in nodes:
1496 t = n
1500 t = n
1497 while True:
1501 while True:
1498 p = self.changelog.parents(n)
1502 p = self.changelog.parents(n)
1499 if p[1] != nullid or p[0] == nullid:
1503 if p[1] != nullid or p[0] == nullid:
1500 b.append((t, n, p[0], p[1]))
1504 b.append((t, n, p[0], p[1]))
1501 break
1505 break
1502 n = p[0]
1506 n = p[0]
1503 return b
1507 return b
1504
1508
1505 def between(self, pairs):
1509 def between(self, pairs):
1506 r = []
1510 r = []
1507
1511
1508 for top, bottom in pairs:
1512 for top, bottom in pairs:
1509 n, l, i = top, [], 0
1513 n, l, i = top, [], 0
1510 f = 1
1514 f = 1
1511
1515
1512 while n != bottom and n != nullid:
1516 while n != bottom and n != nullid:
1513 p = self.changelog.parents(n)[0]
1517 p = self.changelog.parents(n)[0]
1514 if i == f:
1518 if i == f:
1515 l.append(n)
1519 l.append(n)
1516 f = f * 2
1520 f = f * 2
1517 n = p
1521 n = p
1518 i += 1
1522 i += 1
1519
1523
1520 r.append(l)
1524 r.append(l)
1521
1525
1522 return r
1526 return r
1523
1527
1524 def pull(self, remote, heads=None, force=False):
1528 def pull(self, remote, heads=None, force=False):
1525 lock = self.lock()
1529 lock = self.lock()
1526 try:
1530 try:
1527 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1531 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1528 force=force)
1532 force=force)
1529 common, fetch, rheads = tmp
1533 common, fetch, rheads = tmp
1530 if not fetch:
1534 if not fetch:
1531 self.ui.status(_("no changes found\n"))
1535 self.ui.status(_("no changes found\n"))
1532 added = []
1536 added = []
1533 result = 0
1537 result = 0
1534 else:
1538 else:
1535 if heads is None and list(common) == [nullid]:
1539 if heads is None and list(common) == [nullid]:
1536 self.ui.status(_("requesting all changes\n"))
1540 self.ui.status(_("requesting all changes\n"))
1537 elif heads is None and remote.capable('changegroupsubset'):
1541 elif heads is None and remote.capable('changegroupsubset'):
1538 # issue1320, avoid a race if remote changed after discovery
1542 # issue1320, avoid a race if remote changed after discovery
1539 heads = rheads
1543 heads = rheads
1540
1544
1541 if remote.capable('getbundle'):
1545 if remote.capable('getbundle'):
1542 cg = remote.getbundle('pull', common=common,
1546 cg = remote.getbundle('pull', common=common,
1543 heads=heads or rheads)
1547 heads=heads or rheads)
1544 elif heads is None:
1548 elif heads is None:
1545 cg = remote.changegroup(fetch, 'pull')
1549 cg = remote.changegroup(fetch, 'pull')
1546 elif not remote.capable('changegroupsubset'):
1550 elif not remote.capable('changegroupsubset'):
1547 raise util.Abort(_("partial pull cannot be done because "
1551 raise util.Abort(_("partial pull cannot be done because "
1548 "other repository doesn't support "
1552 "other repository doesn't support "
1549 "changegroupsubset."))
1553 "changegroupsubset."))
1550 else:
1554 else:
1551 cg = remote.changegroupsubset(fetch, heads, 'pull')
1555 cg = remote.changegroupsubset(fetch, heads, 'pull')
1552 clstart = len(self.changelog)
1556 clstart = len(self.changelog)
1553 result = self.addchangegroup(cg, 'pull', remote.url())
1557 result = self.addchangegroup(cg, 'pull', remote.url())
1554 clend = len(self.changelog)
1558 clend = len(self.changelog)
1555 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1559 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1556
1560
1557 # compute target subset
1561 # compute target subset
1558 if heads is None:
1562 if heads is None:
1559 # We pulled every thing possible
1563 # We pulled every thing possible
1560 # sync on everything common
1564 # sync on everything common
1561 subset = common + added
1565 subset = common + added
1562 else:
1566 else:
1563 # We pulled a specific subset
1567 # We pulled a specific subset
1564 # sync on this subset
1568 # sync on this subset
1565 subset = heads
1569 subset = heads
1566
1570
1567 # Get remote phases data from remote
1571 # Get remote phases data from remote
1568 remotephases = remote.listkeys('phases')
1572 remotephases = remote.listkeys('phases')
1569 publishing = bool(remotephases.get('publishing', False))
1573 publishing = bool(remotephases.get('publishing', False))
1570 if remotephases and not publishing:
1574 if remotephases and not publishing:
1571 # remote is new and unpublishing
1575 # remote is new and unpublishing
1572 pheads, _dr = phases.analyzeremotephases(self, subset,
1576 pheads, _dr = phases.analyzeremotephases(self, subset,
1573 remotephases)
1577 remotephases)
1574 phases.advanceboundary(self, phases.public, pheads)
1578 phases.advanceboundary(self, phases.public, pheads)
1575 phases.advanceboundary(self, phases.draft, subset)
1579 phases.advanceboundary(self, phases.draft, subset)
1576 else:
1580 else:
1577 # Remote is old or publishing all common changesets
1581 # Remote is old or publishing all common changesets
1578 # should be seen as public
1582 # should be seen as public
1579 phases.advanceboundary(self, phases.public, subset)
1583 phases.advanceboundary(self, phases.public, subset)
1580 finally:
1584 finally:
1581 lock.release()
1585 lock.release()
1582
1586
1583 return result
1587 return result
1584
1588
1585 def checkpush(self, force, revs):
1589 def checkpush(self, force, revs):
1586 """Extensions can override this function if additional checks have
1590 """Extensions can override this function if additional checks have
1587 to be performed before pushing, or call it if they override push
1591 to be performed before pushing, or call it if they override push
1588 command.
1592 command.
1589 """
1593 """
1590 pass
1594 pass
1591
1595
1592 def push(self, remote, force=False, revs=None, newbranch=False):
1596 def push(self, remote, force=False, revs=None, newbranch=False):
1593 '''Push outgoing changesets (limited by revs) from the current
1597 '''Push outgoing changesets (limited by revs) from the current
1594 repository to remote. Return an integer:
1598 repository to remote. Return an integer:
1595 - 0 means HTTP error *or* nothing to push
1599 - 0 means HTTP error *or* nothing to push
1596 - 1 means we pushed and remote head count is unchanged *or*
1600 - 1 means we pushed and remote head count is unchanged *or*
1597 we have outgoing changesets but refused to push
1601 we have outgoing changesets but refused to push
1598 - other values as described by addchangegroup()
1602 - other values as described by addchangegroup()
1599 '''
1603 '''
1600 # there are two ways to push to remote repo:
1604 # there are two ways to push to remote repo:
1601 #
1605 #
1602 # addchangegroup assumes local user can lock remote
1606 # addchangegroup assumes local user can lock remote
1603 # repo (local filesystem, old ssh servers).
1607 # repo (local filesystem, old ssh servers).
1604 #
1608 #
1605 # unbundle assumes local user cannot lock remote repo (new ssh
1609 # unbundle assumes local user cannot lock remote repo (new ssh
1606 # servers, http servers).
1610 # servers, http servers).
1607
1611
1608 # get local lock as we might write phase data
1612 # get local lock as we might write phase data
1609 locallock = self.lock()
1613 locallock = self.lock()
1610 try:
1614 try:
1611 self.checkpush(force, revs)
1615 self.checkpush(force, revs)
1612 lock = None
1616 lock = None
1613 unbundle = remote.capable('unbundle')
1617 unbundle = remote.capable('unbundle')
1614 if not unbundle:
1618 if not unbundle:
1615 lock = remote.lock()
1619 lock = remote.lock()
1616 try:
1620 try:
1617 # discovery
1621 # discovery
1618 fci = discovery.findcommonincoming
1622 fci = discovery.findcommonincoming
1619 commoninc = fci(self, remote, force=force)
1623 commoninc = fci(self, remote, force=force)
1620 common, inc, remoteheads = commoninc
1624 common, inc, remoteheads = commoninc
1621 fco = discovery.findcommonoutgoing
1625 fco = discovery.findcommonoutgoing
1622 outgoing = fco(self, remote, onlyheads=revs,
1626 outgoing = fco(self, remote, onlyheads=revs,
1623 commoninc=commoninc, force=force)
1627 commoninc=commoninc, force=force)
1624
1628
1625
1629
1626 if not outgoing.missing:
1630 if not outgoing.missing:
1627 # nothing to push
1631 # nothing to push
1628 if outgoing.excluded:
1632 if outgoing.excluded:
1629 msg = "no changes to push but %i secret changesets\n"
1633 msg = "no changes to push but %i secret changesets\n"
1630 self.ui.status(_(msg) % len(outgoing.excluded))
1634 self.ui.status(_(msg) % len(outgoing.excluded))
1631 else:
1635 else:
1632 self.ui.status(_("no changes found\n"))
1636 self.ui.status(_("no changes found\n"))
1633 ret = 1
1637 ret = 1
1634 else:
1638 else:
1635 # something to push
1639 # something to push
1636 if not force:
1640 if not force:
1637 discovery.checkheads(self, remote, outgoing,
1641 discovery.checkheads(self, remote, outgoing,
1638 remoteheads, newbranch,
1642 remoteheads, newbranch,
1639 bool(inc))
1643 bool(inc))
1640
1644
1641 # create a changegroup from local
1645 # create a changegroup from local
1642 if revs is None and not outgoing.excluded:
1646 if revs is None and not outgoing.excluded:
1643 # push everything,
1647 # push everything,
1644 # use the fast path, no race possible on push
1648 # use the fast path, no race possible on push
1645 cg = self._changegroup(outgoing.missing, 'push')
1649 cg = self._changegroup(outgoing.missing, 'push')
1646 else:
1650 else:
1647 cg = self.getlocalbundle('push', outgoing)
1651 cg = self.getlocalbundle('push', outgoing)
1648
1652
1649 # apply changegroup to remote
1653 # apply changegroup to remote
1650 if unbundle:
1654 if unbundle:
1651 # local repo finds heads on server, finds out what
1655 # local repo finds heads on server, finds out what
1652 # revs it must push. once revs transferred, if server
1656 # revs it must push. once revs transferred, if server
1653 # finds it has different heads (someone else won
1657 # finds it has different heads (someone else won
1654 # commit/push race), server aborts.
1658 # commit/push race), server aborts.
1655 if force:
1659 if force:
1656 remoteheads = ['force']
1660 remoteheads = ['force']
1657 # ssh: return remote's addchangegroup()
1661 # ssh: return remote's addchangegroup()
1658 # http: return remote's addchangegroup() or 0 for error
1662 # http: return remote's addchangegroup() or 0 for error
1659 ret = remote.unbundle(cg, remoteheads, 'push')
1663 ret = remote.unbundle(cg, remoteheads, 'push')
1660 else:
1664 else:
1661 # we return an integer indicating remote head count change
1665 # we return an integer indicating remote head count change
1662 ret = remote.addchangegroup(cg, 'push', self.url())
1666 ret = remote.addchangegroup(cg, 'push', self.url())
1663
1667
1664 if ret:
1668 if ret:
1665 # push succeed, synchonize target of the push
1669 # push succeed, synchonize target of the push
1666 cheads = outgoing.missingheads
1670 cheads = outgoing.missingheads
1667 elif revs is None:
1671 elif revs is None:
1668 # All out push fails. synchronize all common
1672 # All out push fails. synchronize all common
1669 cheads = outgoing.commonheads
1673 cheads = outgoing.commonheads
1670 else:
1674 else:
1671 # I want cheads = heads(::missingheads and ::commonheads)
1675 # I want cheads = heads(::missingheads and ::commonheads)
1672 # (missingheads is revs with secret changeset filtered out)
1676 # (missingheads is revs with secret changeset filtered out)
1673 #
1677 #
1674 # This can be expressed as:
1678 # This can be expressed as:
1675 # cheads = ( (missingheads and ::commonheads)
1679 # cheads = ( (missingheads and ::commonheads)
1676 # + (commonheads and ::missingheads))"
1680 # + (commonheads and ::missingheads))"
1677 # )
1681 # )
1678 #
1682 #
1679 # while trying to push we already computed the following:
1683 # while trying to push we already computed the following:
1680 # common = (::commonheads)
1684 # common = (::commonheads)
1681 # missing = ((commonheads::missingheads) - commonheads)
1685 # missing = ((commonheads::missingheads) - commonheads)
1682 #
1686 #
1683 # We can pick:
1687 # We can pick:
1684 # * missingheads part of comon (::commonheads)
1688 # * missingheads part of comon (::commonheads)
1685 common = set(outgoing.common)
1689 common = set(outgoing.common)
1686 cheads = [n for node in revs if n in common]
1690 cheads = [n for node in revs if n in common]
1687 # and
1691 # and
1688 # * commonheads parents on missing
1692 # * commonheads parents on missing
1689 rvset = repo.revset('%ln and parents(roots(%ln))',
1693 rvset = repo.revset('%ln and parents(roots(%ln))',
1690 outgoing.commonheads,
1694 outgoing.commonheads,
1691 outgoing.missing)
1695 outgoing.missing)
1692 cheads.extend(c.node() for c in rvset)
1696 cheads.extend(c.node() for c in rvset)
1693 # even when we don't push, exchanging phase data is useful
1697 # even when we don't push, exchanging phase data is useful
1694 remotephases = remote.listkeys('phases')
1698 remotephases = remote.listkeys('phases')
1695 if not remotephases: # old server or public only repo
1699 if not remotephases: # old server or public only repo
1696 phases.advanceboundary(self, phases.public, cheads)
1700 phases.advanceboundary(self, phases.public, cheads)
1697 # don't push any phase data as there is nothing to push
1701 # don't push any phase data as there is nothing to push
1698 else:
1702 else:
1699 ana = phases.analyzeremotephases(self, cheads, remotephases)
1703 ana = phases.analyzeremotephases(self, cheads, remotephases)
1700 pheads, droots = ana
1704 pheads, droots = ana
1701 ### Apply remote phase on local
1705 ### Apply remote phase on local
1702 if remotephases.get('publishing', False):
1706 if remotephases.get('publishing', False):
1703 phases.advanceboundary(self, phases.public, cheads)
1707 phases.advanceboundary(self, phases.public, cheads)
1704 else: # publish = False
1708 else: # publish = False
1705 phases.advanceboundary(self, phases.public, pheads)
1709 phases.advanceboundary(self, phases.public, pheads)
1706 phases.advanceboundary(self, phases.draft, cheads)
1710 phases.advanceboundary(self, phases.draft, cheads)
1707 ### Apply local phase on remote
1711 ### Apply local phase on remote
1708
1712
1709 # Get the list of all revs draft on remote by public here.
1713 # Get the list of all revs draft on remote by public here.
1710 # XXX Beware that revset break if droots is not strictly
1714 # XXX Beware that revset break if droots is not strictly
1711 # XXX root we may want to ensure it is but it is costly
1715 # XXX root we may want to ensure it is but it is costly
1712 outdated = self.set('heads((%ln::%ln) and public())',
1716 outdated = self.set('heads((%ln::%ln) and public())',
1713 droots, cheads)
1717 droots, cheads)
1714 for newremotehead in outdated:
1718 for newremotehead in outdated:
1715 r = remote.pushkey('phases',
1719 r = remote.pushkey('phases',
1716 newremotehead.hex(),
1720 newremotehead.hex(),
1717 str(phases.draft),
1721 str(phases.draft),
1718 str(phases.public))
1722 str(phases.public))
1719 if not r:
1723 if not r:
1720 self.ui.warn(_('updating %s to public failed!\n')
1724 self.ui.warn(_('updating %s to public failed!\n')
1721 % newremotehead)
1725 % newremotehead)
1722 finally:
1726 finally:
1723 if lock is not None:
1727 if lock is not None:
1724 lock.release()
1728 lock.release()
1725 finally:
1729 finally:
1726 locallock.release()
1730 locallock.release()
1727
1731
1728 self.ui.debug("checking for updated bookmarks\n")
1732 self.ui.debug("checking for updated bookmarks\n")
1729 rb = remote.listkeys('bookmarks')
1733 rb = remote.listkeys('bookmarks')
1730 for k in rb.keys():
1734 for k in rb.keys():
1731 if k in self._bookmarks:
1735 if k in self._bookmarks:
1732 nr, nl = rb[k], hex(self._bookmarks[k])
1736 nr, nl = rb[k], hex(self._bookmarks[k])
1733 if nr in self:
1737 if nr in self:
1734 cr = self[nr]
1738 cr = self[nr]
1735 cl = self[nl]
1739 cl = self[nl]
1736 if cl in cr.descendants():
1740 if cl in cr.descendants():
1737 r = remote.pushkey('bookmarks', k, nr, nl)
1741 r = remote.pushkey('bookmarks', k, nr, nl)
1738 if r:
1742 if r:
1739 self.ui.status(_("updating bookmark %s\n") % k)
1743 self.ui.status(_("updating bookmark %s\n") % k)
1740 else:
1744 else:
1741 self.ui.warn(_('updating bookmark %s'
1745 self.ui.warn(_('updating bookmark %s'
1742 ' failed!\n') % k)
1746 ' failed!\n') % k)
1743
1747
1744 return ret
1748 return ret
1745
1749
1746 def changegroupinfo(self, nodes, source):
1750 def changegroupinfo(self, nodes, source):
1747 if self.ui.verbose or source == 'bundle':
1751 if self.ui.verbose or source == 'bundle':
1748 self.ui.status(_("%d changesets found\n") % len(nodes))
1752 self.ui.status(_("%d changesets found\n") % len(nodes))
1749 if self.ui.debugflag:
1753 if self.ui.debugflag:
1750 self.ui.debug("list of changesets:\n")
1754 self.ui.debug("list of changesets:\n")
1751 for node in nodes:
1755 for node in nodes:
1752 self.ui.debug("%s\n" % hex(node))
1756 self.ui.debug("%s\n" % hex(node))
1753
1757
1754 def changegroupsubset(self, bases, heads, source):
1758 def changegroupsubset(self, bases, heads, source):
1755 """Compute a changegroup consisting of all the nodes that are
1759 """Compute a changegroup consisting of all the nodes that are
1756 descendants of any of the bases and ancestors of any of the heads.
1760 descendants of any of the bases and ancestors of any of the heads.
1757 Return a chunkbuffer object whose read() method will return
1761 Return a chunkbuffer object whose read() method will return
1758 successive changegroup chunks.
1762 successive changegroup chunks.
1759
1763
1760 It is fairly complex as determining which filenodes and which
1764 It is fairly complex as determining which filenodes and which
1761 manifest nodes need to be included for the changeset to be complete
1765 manifest nodes need to be included for the changeset to be complete
1762 is non-trivial.
1766 is non-trivial.
1763
1767
1764 Another wrinkle is doing the reverse, figuring out which changeset in
1768 Another wrinkle is doing the reverse, figuring out which changeset in
1765 the changegroup a particular filenode or manifestnode belongs to.
1769 the changegroup a particular filenode or manifestnode belongs to.
1766 """
1770 """
1767 cl = self.changelog
1771 cl = self.changelog
1768 if not bases:
1772 if not bases:
1769 bases = [nullid]
1773 bases = [nullid]
1770 csets, bases, heads = cl.nodesbetween(bases, heads)
1774 csets, bases, heads = cl.nodesbetween(bases, heads)
1771 # We assume that all ancestors of bases are known
1775 # We assume that all ancestors of bases are known
1772 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1776 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1773 return self._changegroupsubset(common, csets, heads, source)
1777 return self._changegroupsubset(common, csets, heads, source)
1774
1778
1775 def getlocalbundle(self, source, outgoing):
1779 def getlocalbundle(self, source, outgoing):
1776 """Like getbundle, but taking a discovery.outgoing as an argument.
1780 """Like getbundle, but taking a discovery.outgoing as an argument.
1777
1781
1778 This is only implemented for local repos and reuses potentially
1782 This is only implemented for local repos and reuses potentially
1779 precomputed sets in outgoing."""
1783 precomputed sets in outgoing."""
1780 if not outgoing.missing:
1784 if not outgoing.missing:
1781 return None
1785 return None
1782 return self._changegroupsubset(outgoing.common,
1786 return self._changegroupsubset(outgoing.common,
1783 outgoing.missing,
1787 outgoing.missing,
1784 outgoing.missingheads,
1788 outgoing.missingheads,
1785 source)
1789 source)
1786
1790
1787 def getbundle(self, source, heads=None, common=None):
1791 def getbundle(self, source, heads=None, common=None):
1788 """Like changegroupsubset, but returns the set difference between the
1792 """Like changegroupsubset, but returns the set difference between the
1789 ancestors of heads and the ancestors common.
1793 ancestors of heads and the ancestors common.
1790
1794
1791 If heads is None, use the local heads. If common is None, use [nullid].
1795 If heads is None, use the local heads. If common is None, use [nullid].
1792
1796
1793 The nodes in common might not all be known locally due to the way the
1797 The nodes in common might not all be known locally due to the way the
1794 current discovery protocol works.
1798 current discovery protocol works.
1795 """
1799 """
1796 cl = self.changelog
1800 cl = self.changelog
1797 if common:
1801 if common:
1798 nm = cl.nodemap
1802 nm = cl.nodemap
1799 common = [n for n in common if n in nm]
1803 common = [n for n in common if n in nm]
1800 else:
1804 else:
1801 common = [nullid]
1805 common = [nullid]
1802 if not heads:
1806 if not heads:
1803 heads = cl.heads()
1807 heads = cl.heads()
1804 return self.getlocalbundle(source,
1808 return self.getlocalbundle(source,
1805 discovery.outgoing(cl, common, heads))
1809 discovery.outgoing(cl, common, heads))
1806
1810
1807 def _changegroupsubset(self, commonrevs, csets, heads, source):
1811 def _changegroupsubset(self, commonrevs, csets, heads, source):
1808
1812
1809 cl = self.changelog
1813 cl = self.changelog
1810 mf = self.manifest
1814 mf = self.manifest
1811 mfs = {} # needed manifests
1815 mfs = {} # needed manifests
1812 fnodes = {} # needed file nodes
1816 fnodes = {} # needed file nodes
1813 changedfiles = set()
1817 changedfiles = set()
1814 fstate = ['', {}]
1818 fstate = ['', {}]
1815 count = [0]
1819 count = [0]
1816
1820
1817 # can we go through the fast path ?
1821 # can we go through the fast path ?
1818 heads.sort()
1822 heads.sort()
1819 if heads == sorted(self.heads()):
1823 if heads == sorted(self.heads()):
1820 return self._changegroup(csets, source)
1824 return self._changegroup(csets, source)
1821
1825
1822 # slow path
1826 # slow path
1823 self.hook('preoutgoing', throw=True, source=source)
1827 self.hook('preoutgoing', throw=True, source=source)
1824 self.changegroupinfo(csets, source)
1828 self.changegroupinfo(csets, source)
1825
1829
1826 # filter any nodes that claim to be part of the known set
1830 # filter any nodes that claim to be part of the known set
1827 def prune(revlog, missing):
1831 def prune(revlog, missing):
1828 return [n for n in missing
1832 return [n for n in missing
1829 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1833 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1830
1834
1831 def lookup(revlog, x):
1835 def lookup(revlog, x):
1832 if revlog == cl:
1836 if revlog == cl:
1833 c = cl.read(x)
1837 c = cl.read(x)
1834 changedfiles.update(c[3])
1838 changedfiles.update(c[3])
1835 mfs.setdefault(c[0], x)
1839 mfs.setdefault(c[0], x)
1836 count[0] += 1
1840 count[0] += 1
1837 self.ui.progress(_('bundling'), count[0],
1841 self.ui.progress(_('bundling'), count[0],
1838 unit=_('changesets'), total=len(csets))
1842 unit=_('changesets'), total=len(csets))
1839 return x
1843 return x
1840 elif revlog == mf:
1844 elif revlog == mf:
1841 clnode = mfs[x]
1845 clnode = mfs[x]
1842 mdata = mf.readfast(x)
1846 mdata = mf.readfast(x)
1843 for f in changedfiles:
1847 for f in changedfiles:
1844 if f in mdata:
1848 if f in mdata:
1845 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1849 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1846 count[0] += 1
1850 count[0] += 1
1847 self.ui.progress(_('bundling'), count[0],
1851 self.ui.progress(_('bundling'), count[0],
1848 unit=_('manifests'), total=len(mfs))
1852 unit=_('manifests'), total=len(mfs))
1849 return mfs[x]
1853 return mfs[x]
1850 else:
1854 else:
1851 self.ui.progress(
1855 self.ui.progress(
1852 _('bundling'), count[0], item=fstate[0],
1856 _('bundling'), count[0], item=fstate[0],
1853 unit=_('files'), total=len(changedfiles))
1857 unit=_('files'), total=len(changedfiles))
1854 return fstate[1][x]
1858 return fstate[1][x]
1855
1859
1856 bundler = changegroup.bundle10(lookup)
1860 bundler = changegroup.bundle10(lookup)
1857 reorder = self.ui.config('bundle', 'reorder', 'auto')
1861 reorder = self.ui.config('bundle', 'reorder', 'auto')
1858 if reorder == 'auto':
1862 if reorder == 'auto':
1859 reorder = None
1863 reorder = None
1860 else:
1864 else:
1861 reorder = util.parsebool(reorder)
1865 reorder = util.parsebool(reorder)
1862
1866
1863 def gengroup():
1867 def gengroup():
1864 # Create a changenode group generator that will call our functions
1868 # Create a changenode group generator that will call our functions
1865 # back to lookup the owning changenode and collect information.
1869 # back to lookup the owning changenode and collect information.
1866 for chunk in cl.group(csets, bundler, reorder=reorder):
1870 for chunk in cl.group(csets, bundler, reorder=reorder):
1867 yield chunk
1871 yield chunk
1868 self.ui.progress(_('bundling'), None)
1872 self.ui.progress(_('bundling'), None)
1869
1873
1870 # Create a generator for the manifestnodes that calls our lookup
1874 # Create a generator for the manifestnodes that calls our lookup
1871 # and data collection functions back.
1875 # and data collection functions back.
1872 count[0] = 0
1876 count[0] = 0
1873 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1877 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1874 yield chunk
1878 yield chunk
1875 self.ui.progress(_('bundling'), None)
1879 self.ui.progress(_('bundling'), None)
1876
1880
1877 mfs.clear()
1881 mfs.clear()
1878
1882
1879 # Go through all our files in order sorted by name.
1883 # Go through all our files in order sorted by name.
1880 count[0] = 0
1884 count[0] = 0
1881 for fname in sorted(changedfiles):
1885 for fname in sorted(changedfiles):
1882 filerevlog = self.file(fname)
1886 filerevlog = self.file(fname)
1883 if not len(filerevlog):
1887 if not len(filerevlog):
1884 raise util.Abort(_("empty or missing revlog for %s") % fname)
1888 raise util.Abort(_("empty or missing revlog for %s") % fname)
1885 fstate[0] = fname
1889 fstate[0] = fname
1886 fstate[1] = fnodes.pop(fname, {})
1890 fstate[1] = fnodes.pop(fname, {})
1887
1891
1888 nodelist = prune(filerevlog, fstate[1])
1892 nodelist = prune(filerevlog, fstate[1])
1889 if nodelist:
1893 if nodelist:
1890 count[0] += 1
1894 count[0] += 1
1891 yield bundler.fileheader(fname)
1895 yield bundler.fileheader(fname)
1892 for chunk in filerevlog.group(nodelist, bundler, reorder):
1896 for chunk in filerevlog.group(nodelist, bundler, reorder):
1893 yield chunk
1897 yield chunk
1894
1898
1895 # Signal that no more groups are left.
1899 # Signal that no more groups are left.
1896 yield bundler.close()
1900 yield bundler.close()
1897 self.ui.progress(_('bundling'), None)
1901 self.ui.progress(_('bundling'), None)
1898
1902
1899 if csets:
1903 if csets:
1900 self.hook('outgoing', node=hex(csets[0]), source=source)
1904 self.hook('outgoing', node=hex(csets[0]), source=source)
1901
1905
1902 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1906 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1903
1907
1904 def changegroup(self, basenodes, source):
1908 def changegroup(self, basenodes, source):
1905 # to avoid a race we use changegroupsubset() (issue1320)
1909 # to avoid a race we use changegroupsubset() (issue1320)
1906 return self.changegroupsubset(basenodes, self.heads(), source)
1910 return self.changegroupsubset(basenodes, self.heads(), source)
1907
1911
1908 def _changegroup(self, nodes, source):
1912 def _changegroup(self, nodes, source):
1909 """Compute the changegroup of all nodes that we have that a recipient
1913 """Compute the changegroup of all nodes that we have that a recipient
1910 doesn't. Return a chunkbuffer object whose read() method will return
1914 doesn't. Return a chunkbuffer object whose read() method will return
1911 successive changegroup chunks.
1915 successive changegroup chunks.
1912
1916
1913 This is much easier than the previous function as we can assume that
1917 This is much easier than the previous function as we can assume that
1914 the recipient has any changenode we aren't sending them.
1918 the recipient has any changenode we aren't sending them.
1915
1919
1916 nodes is the set of nodes to send"""
1920 nodes is the set of nodes to send"""
1917
1921
1918 cl = self.changelog
1922 cl = self.changelog
1919 mf = self.manifest
1923 mf = self.manifest
1920 mfs = {}
1924 mfs = {}
1921 changedfiles = set()
1925 changedfiles = set()
1922 fstate = ['']
1926 fstate = ['']
1923 count = [0]
1927 count = [0]
1924
1928
1925 self.hook('preoutgoing', throw=True, source=source)
1929 self.hook('preoutgoing', throw=True, source=source)
1926 self.changegroupinfo(nodes, source)
1930 self.changegroupinfo(nodes, source)
1927
1931
1928 revset = set([cl.rev(n) for n in nodes])
1932 revset = set([cl.rev(n) for n in nodes])
1929
1933
1930 def gennodelst(log):
1934 def gennodelst(log):
1931 return [log.node(r) for r in log if log.linkrev(r) in revset]
1935 return [log.node(r) for r in log if log.linkrev(r) in revset]
1932
1936
1933 def lookup(revlog, x):
1937 def lookup(revlog, x):
1934 if revlog == cl:
1938 if revlog == cl:
1935 c = cl.read(x)
1939 c = cl.read(x)
1936 changedfiles.update(c[3])
1940 changedfiles.update(c[3])
1937 mfs.setdefault(c[0], x)
1941 mfs.setdefault(c[0], x)
1938 count[0] += 1
1942 count[0] += 1
1939 self.ui.progress(_('bundling'), count[0],
1943 self.ui.progress(_('bundling'), count[0],
1940 unit=_('changesets'), total=len(nodes))
1944 unit=_('changesets'), total=len(nodes))
1941 return x
1945 return x
1942 elif revlog == mf:
1946 elif revlog == mf:
1943 count[0] += 1
1947 count[0] += 1
1944 self.ui.progress(_('bundling'), count[0],
1948 self.ui.progress(_('bundling'), count[0],
1945 unit=_('manifests'), total=len(mfs))
1949 unit=_('manifests'), total=len(mfs))
1946 return cl.node(revlog.linkrev(revlog.rev(x)))
1950 return cl.node(revlog.linkrev(revlog.rev(x)))
1947 else:
1951 else:
1948 self.ui.progress(
1952 self.ui.progress(
1949 _('bundling'), count[0], item=fstate[0],
1953 _('bundling'), count[0], item=fstate[0],
1950 total=len(changedfiles), unit=_('files'))
1954 total=len(changedfiles), unit=_('files'))
1951 return cl.node(revlog.linkrev(revlog.rev(x)))
1955 return cl.node(revlog.linkrev(revlog.rev(x)))
1952
1956
1953 bundler = changegroup.bundle10(lookup)
1957 bundler = changegroup.bundle10(lookup)
1954 reorder = self.ui.config('bundle', 'reorder', 'auto')
1958 reorder = self.ui.config('bundle', 'reorder', 'auto')
1955 if reorder == 'auto':
1959 if reorder == 'auto':
1956 reorder = None
1960 reorder = None
1957 else:
1961 else:
1958 reorder = util.parsebool(reorder)
1962 reorder = util.parsebool(reorder)
1959
1963
1960 def gengroup():
1964 def gengroup():
1961 '''yield a sequence of changegroup chunks (strings)'''
1965 '''yield a sequence of changegroup chunks (strings)'''
1962 # construct a list of all changed files
1966 # construct a list of all changed files
1963
1967
1964 for chunk in cl.group(nodes, bundler, reorder=reorder):
1968 for chunk in cl.group(nodes, bundler, reorder=reorder):
1965 yield chunk
1969 yield chunk
1966 self.ui.progress(_('bundling'), None)
1970 self.ui.progress(_('bundling'), None)
1967
1971
1968 count[0] = 0
1972 count[0] = 0
1969 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1973 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1970 yield chunk
1974 yield chunk
1971 self.ui.progress(_('bundling'), None)
1975 self.ui.progress(_('bundling'), None)
1972
1976
1973 count[0] = 0
1977 count[0] = 0
1974 for fname in sorted(changedfiles):
1978 for fname in sorted(changedfiles):
1975 filerevlog = self.file(fname)
1979 filerevlog = self.file(fname)
1976 if not len(filerevlog):
1980 if not len(filerevlog):
1977 raise util.Abort(_("empty or missing revlog for %s") % fname)
1981 raise util.Abort(_("empty or missing revlog for %s") % fname)
1978 fstate[0] = fname
1982 fstate[0] = fname
1979 nodelist = gennodelst(filerevlog)
1983 nodelist = gennodelst(filerevlog)
1980 if nodelist:
1984 if nodelist:
1981 count[0] += 1
1985 count[0] += 1
1982 yield bundler.fileheader(fname)
1986 yield bundler.fileheader(fname)
1983 for chunk in filerevlog.group(nodelist, bundler, reorder):
1987 for chunk in filerevlog.group(nodelist, bundler, reorder):
1984 yield chunk
1988 yield chunk
1985 yield bundler.close()
1989 yield bundler.close()
1986 self.ui.progress(_('bundling'), None)
1990 self.ui.progress(_('bundling'), None)
1987
1991
1988 if nodes:
1992 if nodes:
1989 self.hook('outgoing', node=hex(nodes[0]), source=source)
1993 self.hook('outgoing', node=hex(nodes[0]), source=source)
1990
1994
1991 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1995 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1992
1996
1993 def addchangegroup(self, source, srctype, url, emptyok=False):
1997 def addchangegroup(self, source, srctype, url, emptyok=False):
1994 """Add the changegroup returned by source.read() to this repo.
1998 """Add the changegroup returned by source.read() to this repo.
1995 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1999 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1996 the URL of the repo where this changegroup is coming from.
2000 the URL of the repo where this changegroup is coming from.
1997
2001
1998 Return an integer summarizing the change to this repo:
2002 Return an integer summarizing the change to this repo:
1999 - nothing changed or no source: 0
2003 - nothing changed or no source: 0
2000 - more heads than before: 1+added heads (2..n)
2004 - more heads than before: 1+added heads (2..n)
2001 - fewer heads than before: -1-removed heads (-2..-n)
2005 - fewer heads than before: -1-removed heads (-2..-n)
2002 - number of heads stays the same: 1
2006 - number of heads stays the same: 1
2003 """
2007 """
2004 def csmap(x):
2008 def csmap(x):
2005 self.ui.debug("add changeset %s\n" % short(x))
2009 self.ui.debug("add changeset %s\n" % short(x))
2006 return len(cl)
2010 return len(cl)
2007
2011
2008 def revmap(x):
2012 def revmap(x):
2009 return cl.rev(x)
2013 return cl.rev(x)
2010
2014
2011 if not source:
2015 if not source:
2012 return 0
2016 return 0
2013
2017
2014 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2018 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2015
2019
2016 changesets = files = revisions = 0
2020 changesets = files = revisions = 0
2017 efiles = set()
2021 efiles = set()
2018
2022
2019 # write changelog data to temp files so concurrent readers will not see
2023 # write changelog data to temp files so concurrent readers will not see
2020 # inconsistent view
2024 # inconsistent view
2021 cl = self.changelog
2025 cl = self.changelog
2022 cl.delayupdate()
2026 cl.delayupdate()
2023 oldheads = cl.heads()
2027 oldheads = cl.heads()
2024
2028
2025 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2029 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2026 try:
2030 try:
2027 trp = weakref.proxy(tr)
2031 trp = weakref.proxy(tr)
2028 # pull off the changeset group
2032 # pull off the changeset group
2029 self.ui.status(_("adding changesets\n"))
2033 self.ui.status(_("adding changesets\n"))
2030 clstart = len(cl)
2034 clstart = len(cl)
2031 class prog(object):
2035 class prog(object):
2032 step = _('changesets')
2036 step = _('changesets')
2033 count = 1
2037 count = 1
2034 ui = self.ui
2038 ui = self.ui
2035 total = None
2039 total = None
2036 def __call__(self):
2040 def __call__(self):
2037 self.ui.progress(self.step, self.count, unit=_('chunks'),
2041 self.ui.progress(self.step, self.count, unit=_('chunks'),
2038 total=self.total)
2042 total=self.total)
2039 self.count += 1
2043 self.count += 1
2040 pr = prog()
2044 pr = prog()
2041 source.callback = pr
2045 source.callback = pr
2042
2046
2043 source.changelogheader()
2047 source.changelogheader()
2044 srccontent = cl.addgroup(source, csmap, trp)
2048 srccontent = cl.addgroup(source, csmap, trp)
2045 if not (srccontent or emptyok):
2049 if not (srccontent or emptyok):
2046 raise util.Abort(_("received changelog group is empty"))
2050 raise util.Abort(_("received changelog group is empty"))
2047 clend = len(cl)
2051 clend = len(cl)
2048 changesets = clend - clstart
2052 changesets = clend - clstart
2049 for c in xrange(clstart, clend):
2053 for c in xrange(clstart, clend):
2050 efiles.update(self[c].files())
2054 efiles.update(self[c].files())
2051 efiles = len(efiles)
2055 efiles = len(efiles)
2052 self.ui.progress(_('changesets'), None)
2056 self.ui.progress(_('changesets'), None)
2053
2057
2054 # pull off the manifest group
2058 # pull off the manifest group
2055 self.ui.status(_("adding manifests\n"))
2059 self.ui.status(_("adding manifests\n"))
2056 pr.step = _('manifests')
2060 pr.step = _('manifests')
2057 pr.count = 1
2061 pr.count = 1
2058 pr.total = changesets # manifests <= changesets
2062 pr.total = changesets # manifests <= changesets
2059 # no need to check for empty manifest group here:
2063 # no need to check for empty manifest group here:
2060 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2064 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2061 # no new manifest will be created and the manifest group will
2065 # no new manifest will be created and the manifest group will
2062 # be empty during the pull
2066 # be empty during the pull
2063 source.manifestheader()
2067 source.manifestheader()
2064 self.manifest.addgroup(source, revmap, trp)
2068 self.manifest.addgroup(source, revmap, trp)
2065 self.ui.progress(_('manifests'), None)
2069 self.ui.progress(_('manifests'), None)
2066
2070
2067 needfiles = {}
2071 needfiles = {}
2068 if self.ui.configbool('server', 'validate', default=False):
2072 if self.ui.configbool('server', 'validate', default=False):
2069 # validate incoming csets have their manifests
2073 # validate incoming csets have their manifests
2070 for cset in xrange(clstart, clend):
2074 for cset in xrange(clstart, clend):
2071 mfest = self.changelog.read(self.changelog.node(cset))[0]
2075 mfest = self.changelog.read(self.changelog.node(cset))[0]
2072 mfest = self.manifest.readdelta(mfest)
2076 mfest = self.manifest.readdelta(mfest)
2073 # store file nodes we must see
2077 # store file nodes we must see
2074 for f, n in mfest.iteritems():
2078 for f, n in mfest.iteritems():
2075 needfiles.setdefault(f, set()).add(n)
2079 needfiles.setdefault(f, set()).add(n)
2076
2080
2077 # process the files
2081 # process the files
2078 self.ui.status(_("adding file changes\n"))
2082 self.ui.status(_("adding file changes\n"))
2079 pr.step = _('files')
2083 pr.step = _('files')
2080 pr.count = 1
2084 pr.count = 1
2081 pr.total = efiles
2085 pr.total = efiles
2082 source.callback = None
2086 source.callback = None
2083
2087
2084 while True:
2088 while True:
2085 chunkdata = source.filelogheader()
2089 chunkdata = source.filelogheader()
2086 if not chunkdata:
2090 if not chunkdata:
2087 break
2091 break
2088 f = chunkdata["filename"]
2092 f = chunkdata["filename"]
2089 self.ui.debug("adding %s revisions\n" % f)
2093 self.ui.debug("adding %s revisions\n" % f)
2090 pr()
2094 pr()
2091 fl = self.file(f)
2095 fl = self.file(f)
2092 o = len(fl)
2096 o = len(fl)
2093 if not fl.addgroup(source, revmap, trp):
2097 if not fl.addgroup(source, revmap, trp):
2094 raise util.Abort(_("received file revlog group is empty"))
2098 raise util.Abort(_("received file revlog group is empty"))
2095 revisions += len(fl) - o
2099 revisions += len(fl) - o
2096 files += 1
2100 files += 1
2097 if f in needfiles:
2101 if f in needfiles:
2098 needs = needfiles[f]
2102 needs = needfiles[f]
2099 for new in xrange(o, len(fl)):
2103 for new in xrange(o, len(fl)):
2100 n = fl.node(new)
2104 n = fl.node(new)
2101 if n in needs:
2105 if n in needs:
2102 needs.remove(n)
2106 needs.remove(n)
2103 if not needs:
2107 if not needs:
2104 del needfiles[f]
2108 del needfiles[f]
2105 self.ui.progress(_('files'), None)
2109 self.ui.progress(_('files'), None)
2106
2110
2107 for f, needs in needfiles.iteritems():
2111 for f, needs in needfiles.iteritems():
2108 fl = self.file(f)
2112 fl = self.file(f)
2109 for n in needs:
2113 for n in needs:
2110 try:
2114 try:
2111 fl.rev(n)
2115 fl.rev(n)
2112 except error.LookupError:
2116 except error.LookupError:
2113 raise util.Abort(
2117 raise util.Abort(
2114 _('missing file data for %s:%s - run hg verify') %
2118 _('missing file data for %s:%s - run hg verify') %
2115 (f, hex(n)))
2119 (f, hex(n)))
2116
2120
2117 dh = 0
2121 dh = 0
2118 if oldheads:
2122 if oldheads:
2119 heads = cl.heads()
2123 heads = cl.heads()
2120 dh = len(heads) - len(oldheads)
2124 dh = len(heads) - len(oldheads)
2121 for h in heads:
2125 for h in heads:
2122 if h not in oldheads and 'close' in self[h].extra():
2126 if h not in oldheads and 'close' in self[h].extra():
2123 dh -= 1
2127 dh -= 1
2124 htext = ""
2128 htext = ""
2125 if dh:
2129 if dh:
2126 htext = _(" (%+d heads)") % dh
2130 htext = _(" (%+d heads)") % dh
2127
2131
2128 self.ui.status(_("added %d changesets"
2132 self.ui.status(_("added %d changesets"
2129 " with %d changes to %d files%s\n")
2133 " with %d changes to %d files%s\n")
2130 % (changesets, revisions, files, htext))
2134 % (changesets, revisions, files, htext))
2131
2135
2132 if changesets > 0:
2136 if changesets > 0:
2133 p = lambda: cl.writepending() and self.root or ""
2137 p = lambda: cl.writepending() and self.root or ""
2134 self.hook('pretxnchangegroup', throw=True,
2138 self.hook('pretxnchangegroup', throw=True,
2135 node=hex(cl.node(clstart)), source=srctype,
2139 node=hex(cl.node(clstart)), source=srctype,
2136 url=url, pending=p)
2140 url=url, pending=p)
2137
2141
2138 added = [cl.node(r) for r in xrange(clstart, clend)]
2142 added = [cl.node(r) for r in xrange(clstart, clend)]
2139 publishing = self.ui.configbool('phases', 'publish', True)
2143 publishing = self.ui.configbool('phases', 'publish', True)
2140 if srctype == 'push':
2144 if srctype == 'push':
2141 # Old server can not push the boundary themself.
2145 # Old server can not push the boundary themself.
2142 # New server won't push the boundary if changeset already
2146 # New server won't push the boundary if changeset already
2143 # existed locally as secrete
2147 # existed locally as secrete
2144 #
2148 #
2145 # We should not use added here but the list of all change in
2149 # We should not use added here but the list of all change in
2146 # the bundle
2150 # the bundle
2147 if publishing:
2151 if publishing:
2148 phases.advanceboundary(self, phases.public, srccontent)
2152 phases.advanceboundary(self, phases.public, srccontent)
2149 else:
2153 else:
2150 phases.advanceboundary(self, phases.draft, srccontent)
2154 phases.advanceboundary(self, phases.draft, srccontent)
2151 phases.retractboundary(self, phases.draft, added)
2155 phases.retractboundary(self, phases.draft, added)
2152 elif srctype != 'strip':
2156 elif srctype != 'strip':
2153 # publishing only alter behavior during push
2157 # publishing only alter behavior during push
2154 #
2158 #
2155 # strip should not touch boundary at all
2159 # strip should not touch boundary at all
2156 phases.retractboundary(self, phases.draft, added)
2160 phases.retractboundary(self, phases.draft, added)
2157
2161
2158 # make changelog see real files again
2162 # make changelog see real files again
2159 cl.finalize(trp)
2163 cl.finalize(trp)
2160
2164
2161 tr.close()
2165 tr.close()
2162
2166
2163 if changesets > 0:
2167 if changesets > 0:
2164 def runhooks():
2168 def runhooks():
2165 # forcefully update the on-disk branch cache
2169 # forcefully update the on-disk branch cache
2166 self.ui.debug("updating the branch cache\n")
2170 self.ui.debug("updating the branch cache\n")
2167 self.updatebranchcache()
2171 self.updatebranchcache()
2168 self.hook("changegroup", node=hex(cl.node(clstart)),
2172 self.hook("changegroup", node=hex(cl.node(clstart)),
2169 source=srctype, url=url)
2173 source=srctype, url=url)
2170
2174
2171 for n in added:
2175 for n in added:
2172 self.hook("incoming", node=hex(n), source=srctype,
2176 self.hook("incoming", node=hex(n), source=srctype,
2173 url=url)
2177 url=url)
2174 self._afterlock(runhooks)
2178 self._afterlock(runhooks)
2175
2179
2176 finally:
2180 finally:
2177 tr.release()
2181 tr.release()
2178 # never return 0 here:
2182 # never return 0 here:
2179 if dh < 0:
2183 if dh < 0:
2180 return dh - 1
2184 return dh - 1
2181 else:
2185 else:
2182 return dh + 1
2186 return dh + 1
2183
2187
2184 def stream_in(self, remote, requirements):
2188 def stream_in(self, remote, requirements):
2185 lock = self.lock()
2189 lock = self.lock()
2186 try:
2190 try:
2187 fp = remote.stream_out()
2191 fp = remote.stream_out()
2188 l = fp.readline()
2192 l = fp.readline()
2189 try:
2193 try:
2190 resp = int(l)
2194 resp = int(l)
2191 except ValueError:
2195 except ValueError:
2192 raise error.ResponseError(
2196 raise error.ResponseError(
2193 _('Unexpected response from remote server:'), l)
2197 _('Unexpected response from remote server:'), l)
2194 if resp == 1:
2198 if resp == 1:
2195 raise util.Abort(_('operation forbidden by server'))
2199 raise util.Abort(_('operation forbidden by server'))
2196 elif resp == 2:
2200 elif resp == 2:
2197 raise util.Abort(_('locking the remote repository failed'))
2201 raise util.Abort(_('locking the remote repository failed'))
2198 elif resp != 0:
2202 elif resp != 0:
2199 raise util.Abort(_('the server sent an unknown error code'))
2203 raise util.Abort(_('the server sent an unknown error code'))
2200 self.ui.status(_('streaming all changes\n'))
2204 self.ui.status(_('streaming all changes\n'))
2201 l = fp.readline()
2205 l = fp.readline()
2202 try:
2206 try:
2203 total_files, total_bytes = map(int, l.split(' ', 1))
2207 total_files, total_bytes = map(int, l.split(' ', 1))
2204 except (ValueError, TypeError):
2208 except (ValueError, TypeError):
2205 raise error.ResponseError(
2209 raise error.ResponseError(
2206 _('Unexpected response from remote server:'), l)
2210 _('Unexpected response from remote server:'), l)
2207 self.ui.status(_('%d files to transfer, %s of data\n') %
2211 self.ui.status(_('%d files to transfer, %s of data\n') %
2208 (total_files, util.bytecount(total_bytes)))
2212 (total_files, util.bytecount(total_bytes)))
2209 start = time.time()
2213 start = time.time()
2210 for i in xrange(total_files):
2214 for i in xrange(total_files):
2211 # XXX doesn't support '\n' or '\r' in filenames
2215 # XXX doesn't support '\n' or '\r' in filenames
2212 l = fp.readline()
2216 l = fp.readline()
2213 try:
2217 try:
2214 name, size = l.split('\0', 1)
2218 name, size = l.split('\0', 1)
2215 size = int(size)
2219 size = int(size)
2216 except (ValueError, TypeError):
2220 except (ValueError, TypeError):
2217 raise error.ResponseError(
2221 raise error.ResponseError(
2218 _('Unexpected response from remote server:'), l)
2222 _('Unexpected response from remote server:'), l)
2219 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2223 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2220 # for backwards compat, name was partially encoded
2224 # for backwards compat, name was partially encoded
2221 ofp = self.sopener(store.decodedir(name), 'w')
2225 ofp = self.sopener(store.decodedir(name), 'w')
2222 for chunk in util.filechunkiter(fp, limit=size):
2226 for chunk in util.filechunkiter(fp, limit=size):
2223 ofp.write(chunk)
2227 ofp.write(chunk)
2224 ofp.close()
2228 ofp.close()
2225 elapsed = time.time() - start
2229 elapsed = time.time() - start
2226 if elapsed <= 0:
2230 if elapsed <= 0:
2227 elapsed = 0.001
2231 elapsed = 0.001
2228 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2232 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2229 (util.bytecount(total_bytes), elapsed,
2233 (util.bytecount(total_bytes), elapsed,
2230 util.bytecount(total_bytes / elapsed)))
2234 util.bytecount(total_bytes / elapsed)))
2231
2235
2232 # new requirements = old non-format requirements + new format-related
2236 # new requirements = old non-format requirements + new format-related
2233 # requirements from the streamed-in repository
2237 # requirements from the streamed-in repository
2234 requirements.update(set(self.requirements) - self.supportedformats)
2238 requirements.update(set(self.requirements) - self.supportedformats)
2235 self._applyrequirements(requirements)
2239 self._applyrequirements(requirements)
2236 self._writerequirements()
2240 self._writerequirements()
2237
2241
2238 self.invalidate()
2242 self.invalidate()
2239 return len(self.heads()) + 1
2243 return len(self.heads()) + 1
2240 finally:
2244 finally:
2241 lock.release()
2245 lock.release()
2242
2246
2243 def clone(self, remote, heads=[], stream=False):
2247 def clone(self, remote, heads=[], stream=False):
2244 '''clone remote repository.
2248 '''clone remote repository.
2245
2249
2246 keyword arguments:
2250 keyword arguments:
2247 heads: list of revs to clone (forces use of pull)
2251 heads: list of revs to clone (forces use of pull)
2248 stream: use streaming clone if possible'''
2252 stream: use streaming clone if possible'''
2249
2253
2250 # now, all clients that can request uncompressed clones can
2254 # now, all clients that can request uncompressed clones can
2251 # read repo formats supported by all servers that can serve
2255 # read repo formats supported by all servers that can serve
2252 # them.
2256 # them.
2253
2257
2254 # if revlog format changes, client will have to check version
2258 # if revlog format changes, client will have to check version
2255 # and format flags on "stream" capability, and use
2259 # and format flags on "stream" capability, and use
2256 # uncompressed only if compatible.
2260 # uncompressed only if compatible.
2257
2261
2258 if stream and not heads:
2262 if stream and not heads:
2259 # 'stream' means remote revlog format is revlogv1 only
2263 # 'stream' means remote revlog format is revlogv1 only
2260 if remote.capable('stream'):
2264 if remote.capable('stream'):
2261 return self.stream_in(remote, set(('revlogv1',)))
2265 return self.stream_in(remote, set(('revlogv1',)))
2262 # otherwise, 'streamreqs' contains the remote revlog format
2266 # otherwise, 'streamreqs' contains the remote revlog format
2263 streamreqs = remote.capable('streamreqs')
2267 streamreqs = remote.capable('streamreqs')
2264 if streamreqs:
2268 if streamreqs:
2265 streamreqs = set(streamreqs.split(','))
2269 streamreqs = set(streamreqs.split(','))
2266 # if we support it, stream in and adjust our requirements
2270 # if we support it, stream in and adjust our requirements
2267 if not streamreqs - self.supportedformats:
2271 if not streamreqs - self.supportedformats:
2268 return self.stream_in(remote, streamreqs)
2272 return self.stream_in(remote, streamreqs)
2269 return self.pull(remote, heads)
2273 return self.pull(remote, heads)
2270
2274
2271 def pushkey(self, namespace, key, old, new):
2275 def pushkey(self, namespace, key, old, new):
2272 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2276 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2273 old=old, new=new)
2277 old=old, new=new)
2274 ret = pushkey.push(self, namespace, key, old, new)
2278 ret = pushkey.push(self, namespace, key, old, new)
2275 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2279 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2276 ret=ret)
2280 ret=ret)
2277 return ret
2281 return ret
2278
2282
2279 def listkeys(self, namespace):
2283 def listkeys(self, namespace):
2280 self.hook('prelistkeys', throw=True, namespace=namespace)
2284 self.hook('prelistkeys', throw=True, namespace=namespace)
2281 values = pushkey.list(self, namespace)
2285 values = pushkey.list(self, namespace)
2282 self.hook('listkeys', namespace=namespace, values=values)
2286 self.hook('listkeys', namespace=namespace, values=values)
2283 return values
2287 return values
2284
2288
2285 def debugwireargs(self, one, two, three=None, four=None, five=None):
2289 def debugwireargs(self, one, two, three=None, four=None, five=None):
2286 '''used to test argument passing over the wire'''
2290 '''used to test argument passing over the wire'''
2287 return "%s %s %s %s %s" % (one, two, three, four, five)
2291 return "%s %s %s %s %s" % (one, two, three, four, five)
2288
2292
2289 def savecommitmessage(self, text):
2293 def savecommitmessage(self, text):
2290 fp = self.opener('last-message.txt', 'wb')
2294 fp = self.opener('last-message.txt', 'wb')
2291 try:
2295 try:
2292 fp.write(text)
2296 fp.write(text)
2293 finally:
2297 finally:
2294 fp.close()
2298 fp.close()
2295 return self.pathto(fp.name[len(self.root)+1:])
2299 return self.pathto(fp.name[len(self.root)+1:])
2296
2300
2297 # used to avoid circular references so destructors work
2301 # used to avoid circular references so destructors work
2298 def aftertrans(files):
2302 def aftertrans(files):
2299 renamefiles = [tuple(t) for t in files]
2303 renamefiles = [tuple(t) for t in files]
2300 def a():
2304 def a():
2301 for src, dest in renamefiles:
2305 for src, dest in renamefiles:
2302 util.rename(src, dest)
2306 util.rename(src, dest)
2303 return a
2307 return a
2304
2308
2305 def undoname(fn):
2309 def undoname(fn):
2306 base, name = os.path.split(fn)
2310 base, name = os.path.split(fn)
2307 assert name.startswith('journal')
2311 assert name.startswith('journal')
2308 return os.path.join(base, name.replace('journal', 'undo', 1))
2312 return os.path.join(base, name.replace('journal', 'undo', 1))
2309
2313
2310 def instance(ui, path, create):
2314 def instance(ui, path, create):
2311 return localrepository(ui, util.urllocalpath(path), create)
2315 return localrepository(ui, util.urllocalpath(path), create)
2312
2316
2313 def islocal(path):
2317 def islocal(path):
2314 return True
2318 return True
General Comments 0
You need to be logged in to leave comments. Login now