##// END OF EJS Templates
localrepo: convert _updatebranchcache from nodespace to revspace...
Joshua Redstone -
r17012:ea97744c default
parent child Browse files
Show More
@@ -1,2372 +1,2369 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import scmutil, util, extensions, hook, error, revset
13 import scmutil, util, extensions, hook, error, revset
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20 filecache = scmutil.filecache
20 filecache = scmutil.filecache
21
21
22 class storecache(filecache):
22 class storecache(filecache):
23 """filecache for files in the store"""
23 """filecache for files in the store"""
24 def join(self, obj, fname):
24 def join(self, obj, fname):
25 return obj.sjoin(fname)
25 return obj.sjoin(fname)
26
26
27 class localrepository(repo.repository):
27 class localrepository(repo.repository):
28 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
28 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
29 'known', 'getbundle'))
29 'known', 'getbundle'))
30 supportedformats = set(('revlogv1', 'generaldelta'))
30 supportedformats = set(('revlogv1', 'generaldelta'))
31 supported = supportedformats | set(('store', 'fncache', 'shared',
31 supported = supportedformats | set(('store', 'fncache', 'shared',
32 'dotencode'))
32 'dotencode'))
33
33
34 def __init__(self, baseui, path=None, create=False):
34 def __init__(self, baseui, path=None, create=False):
35 repo.repository.__init__(self)
35 repo.repository.__init__(self)
36 self.root = os.path.realpath(util.expandpath(path))
36 self.root = os.path.realpath(util.expandpath(path))
37 self.path = os.path.join(self.root, ".hg")
37 self.path = os.path.join(self.root, ".hg")
38 self.origroot = path
38 self.origroot = path
39 self.auditor = scmutil.pathauditor(self.root, self._checknested)
39 self.auditor = scmutil.pathauditor(self.root, self._checknested)
40 self.opener = scmutil.opener(self.path)
40 self.opener = scmutil.opener(self.path)
41 self.wopener = scmutil.opener(self.root)
41 self.wopener = scmutil.opener(self.root)
42 self.baseui = baseui
42 self.baseui = baseui
43 self.ui = baseui.copy()
43 self.ui = baseui.copy()
44 # A list of callback to shape the phase if no data were found.
44 # A list of callback to shape the phase if no data were found.
45 # Callback are in the form: func(repo, roots) --> processed root.
45 # Callback are in the form: func(repo, roots) --> processed root.
46 # This list it to be filled by extension during repo setup
46 # This list it to be filled by extension during repo setup
47 self._phasedefaults = []
47 self._phasedefaults = []
48
48
49 try:
49 try:
50 self.ui.readconfig(self.join("hgrc"), self.root)
50 self.ui.readconfig(self.join("hgrc"), self.root)
51 extensions.loadall(self.ui)
51 extensions.loadall(self.ui)
52 except IOError:
52 except IOError:
53 pass
53 pass
54
54
55 if not os.path.isdir(self.path):
55 if not os.path.isdir(self.path):
56 if create:
56 if create:
57 if not os.path.exists(path):
57 if not os.path.exists(path):
58 util.makedirs(path)
58 util.makedirs(path)
59 util.makedir(self.path, notindexed=True)
59 util.makedir(self.path, notindexed=True)
60 requirements = ["revlogv1"]
60 requirements = ["revlogv1"]
61 if self.ui.configbool('format', 'usestore', True):
61 if self.ui.configbool('format', 'usestore', True):
62 os.mkdir(os.path.join(self.path, "store"))
62 os.mkdir(os.path.join(self.path, "store"))
63 requirements.append("store")
63 requirements.append("store")
64 if self.ui.configbool('format', 'usefncache', True):
64 if self.ui.configbool('format', 'usefncache', True):
65 requirements.append("fncache")
65 requirements.append("fncache")
66 if self.ui.configbool('format', 'dotencode', True):
66 if self.ui.configbool('format', 'dotencode', True):
67 requirements.append('dotencode')
67 requirements.append('dotencode')
68 # create an invalid changelog
68 # create an invalid changelog
69 self.opener.append(
69 self.opener.append(
70 "00changelog.i",
70 "00changelog.i",
71 '\0\0\0\2' # represents revlogv2
71 '\0\0\0\2' # represents revlogv2
72 ' dummy changelog to prevent using the old repo layout'
72 ' dummy changelog to prevent using the old repo layout'
73 )
73 )
74 if self.ui.configbool('format', 'generaldelta', False):
74 if self.ui.configbool('format', 'generaldelta', False):
75 requirements.append("generaldelta")
75 requirements.append("generaldelta")
76 requirements = set(requirements)
76 requirements = set(requirements)
77 else:
77 else:
78 raise error.RepoError(_("repository %s not found") % path)
78 raise error.RepoError(_("repository %s not found") % path)
79 elif create:
79 elif create:
80 raise error.RepoError(_("repository %s already exists") % path)
80 raise error.RepoError(_("repository %s already exists") % path)
81 else:
81 else:
82 try:
82 try:
83 requirements = scmutil.readrequires(self.opener, self.supported)
83 requirements = scmutil.readrequires(self.opener, self.supported)
84 except IOError, inst:
84 except IOError, inst:
85 if inst.errno != errno.ENOENT:
85 if inst.errno != errno.ENOENT:
86 raise
86 raise
87 requirements = set()
87 requirements = set()
88
88
89 self.sharedpath = self.path
89 self.sharedpath = self.path
90 try:
90 try:
91 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
91 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
92 if not os.path.exists(s):
92 if not os.path.exists(s):
93 raise error.RepoError(
93 raise error.RepoError(
94 _('.hg/sharedpath points to nonexistent directory %s') % s)
94 _('.hg/sharedpath points to nonexistent directory %s') % s)
95 self.sharedpath = s
95 self.sharedpath = s
96 except IOError, inst:
96 except IOError, inst:
97 if inst.errno != errno.ENOENT:
97 if inst.errno != errno.ENOENT:
98 raise
98 raise
99
99
100 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
100 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
101 self.spath = self.store.path
101 self.spath = self.store.path
102 self.sopener = self.store.opener
102 self.sopener = self.store.opener
103 self.sjoin = self.store.join
103 self.sjoin = self.store.join
104 self.opener.createmode = self.store.createmode
104 self.opener.createmode = self.store.createmode
105 self._applyrequirements(requirements)
105 self._applyrequirements(requirements)
106 if create:
106 if create:
107 self._writerequirements()
107 self._writerequirements()
108
108
109
109
110 self._branchcache = None
110 self._branchcache = None
111 self._branchcachetip = None
111 self._branchcachetip = None
112 self.filterpats = {}
112 self.filterpats = {}
113 self._datafilters = {}
113 self._datafilters = {}
114 self._transref = self._lockref = self._wlockref = None
114 self._transref = self._lockref = self._wlockref = None
115
115
116 # A cache for various files under .hg/ that tracks file changes,
116 # A cache for various files under .hg/ that tracks file changes,
117 # (used by the filecache decorator)
117 # (used by the filecache decorator)
118 #
118 #
119 # Maps a property name to its util.filecacheentry
119 # Maps a property name to its util.filecacheentry
120 self._filecache = {}
120 self._filecache = {}
121
121
122 def _applyrequirements(self, requirements):
122 def _applyrequirements(self, requirements):
123 self.requirements = requirements
123 self.requirements = requirements
124 openerreqs = set(('revlogv1', 'generaldelta'))
124 openerreqs = set(('revlogv1', 'generaldelta'))
125 self.sopener.options = dict((r, 1) for r in requirements
125 self.sopener.options = dict((r, 1) for r in requirements
126 if r in openerreqs)
126 if r in openerreqs)
127
127
128 def _writerequirements(self):
128 def _writerequirements(self):
129 reqfile = self.opener("requires", "w")
129 reqfile = self.opener("requires", "w")
130 for r in self.requirements:
130 for r in self.requirements:
131 reqfile.write("%s\n" % r)
131 reqfile.write("%s\n" % r)
132 reqfile.close()
132 reqfile.close()
133
133
134 def _checknested(self, path):
134 def _checknested(self, path):
135 """Determine if path is a legal nested repository."""
135 """Determine if path is a legal nested repository."""
136 if not path.startswith(self.root):
136 if not path.startswith(self.root):
137 return False
137 return False
138 subpath = path[len(self.root) + 1:]
138 subpath = path[len(self.root) + 1:]
139 normsubpath = util.pconvert(subpath)
139 normsubpath = util.pconvert(subpath)
140
140
141 # XXX: Checking against the current working copy is wrong in
141 # XXX: Checking against the current working copy is wrong in
142 # the sense that it can reject things like
142 # the sense that it can reject things like
143 #
143 #
144 # $ hg cat -r 10 sub/x.txt
144 # $ hg cat -r 10 sub/x.txt
145 #
145 #
146 # if sub/ is no longer a subrepository in the working copy
146 # if sub/ is no longer a subrepository in the working copy
147 # parent revision.
147 # parent revision.
148 #
148 #
149 # However, it can of course also allow things that would have
149 # However, it can of course also allow things that would have
150 # been rejected before, such as the above cat command if sub/
150 # been rejected before, such as the above cat command if sub/
151 # is a subrepository now, but was a normal directory before.
151 # is a subrepository now, but was a normal directory before.
152 # The old path auditor would have rejected by mistake since it
152 # The old path auditor would have rejected by mistake since it
153 # panics when it sees sub/.hg/.
153 # panics when it sees sub/.hg/.
154 #
154 #
155 # All in all, checking against the working copy seems sensible
155 # All in all, checking against the working copy seems sensible
156 # since we want to prevent access to nested repositories on
156 # since we want to prevent access to nested repositories on
157 # the filesystem *now*.
157 # the filesystem *now*.
158 ctx = self[None]
158 ctx = self[None]
159 parts = util.splitpath(subpath)
159 parts = util.splitpath(subpath)
160 while parts:
160 while parts:
161 prefix = '/'.join(parts)
161 prefix = '/'.join(parts)
162 if prefix in ctx.substate:
162 if prefix in ctx.substate:
163 if prefix == normsubpath:
163 if prefix == normsubpath:
164 return True
164 return True
165 else:
165 else:
166 sub = ctx.sub(prefix)
166 sub = ctx.sub(prefix)
167 return sub.checknested(subpath[len(prefix) + 1:])
167 return sub.checknested(subpath[len(prefix) + 1:])
168 else:
168 else:
169 parts.pop()
169 parts.pop()
170 return False
170 return False
171
171
172 @filecache('bookmarks')
172 @filecache('bookmarks')
173 def _bookmarks(self):
173 def _bookmarks(self):
174 return bookmarks.read(self)
174 return bookmarks.read(self)
175
175
176 @filecache('bookmarks.current')
176 @filecache('bookmarks.current')
177 def _bookmarkcurrent(self):
177 def _bookmarkcurrent(self):
178 return bookmarks.readcurrent(self)
178 return bookmarks.readcurrent(self)
179
179
180 def _writebookmarks(self, marks):
180 def _writebookmarks(self, marks):
181 bookmarks.write(self)
181 bookmarks.write(self)
182
182
183 def bookmarkheads(self, bookmark):
183 def bookmarkheads(self, bookmark):
184 name = bookmark.split('@', 1)[0]
184 name = bookmark.split('@', 1)[0]
185 heads = []
185 heads = []
186 for mark, n in self._bookmarks.iteritems():
186 for mark, n in self._bookmarks.iteritems():
187 if mark.split('@', 1)[0] == name:
187 if mark.split('@', 1)[0] == name:
188 heads.append(n)
188 heads.append(n)
189 return heads
189 return heads
190
190
191 @storecache('phaseroots')
191 @storecache('phaseroots')
192 def _phasecache(self):
192 def _phasecache(self):
193 return phases.phasecache(self, self._phasedefaults)
193 return phases.phasecache(self, self._phasedefaults)
194
194
195 @storecache('00changelog.i')
195 @storecache('00changelog.i')
196 def changelog(self):
196 def changelog(self):
197 c = changelog.changelog(self.sopener)
197 c = changelog.changelog(self.sopener)
198 if 'HG_PENDING' in os.environ:
198 if 'HG_PENDING' in os.environ:
199 p = os.environ['HG_PENDING']
199 p = os.environ['HG_PENDING']
200 if p.startswith(self.root):
200 if p.startswith(self.root):
201 c.readpending('00changelog.i.a')
201 c.readpending('00changelog.i.a')
202 return c
202 return c
203
203
204 @storecache('00manifest.i')
204 @storecache('00manifest.i')
205 def manifest(self):
205 def manifest(self):
206 return manifest.manifest(self.sopener)
206 return manifest.manifest(self.sopener)
207
207
208 @filecache('dirstate')
208 @filecache('dirstate')
209 def dirstate(self):
209 def dirstate(self):
210 warned = [0]
210 warned = [0]
211 def validate(node):
211 def validate(node):
212 try:
212 try:
213 self.changelog.rev(node)
213 self.changelog.rev(node)
214 return node
214 return node
215 except error.LookupError:
215 except error.LookupError:
216 if not warned[0]:
216 if not warned[0]:
217 warned[0] = True
217 warned[0] = True
218 self.ui.warn(_("warning: ignoring unknown"
218 self.ui.warn(_("warning: ignoring unknown"
219 " working parent %s!\n") % short(node))
219 " working parent %s!\n") % short(node))
220 return nullid
220 return nullid
221
221
222 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
222 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
223
223
224 def __getitem__(self, changeid):
224 def __getitem__(self, changeid):
225 if changeid is None:
225 if changeid is None:
226 return context.workingctx(self)
226 return context.workingctx(self)
227 return context.changectx(self, changeid)
227 return context.changectx(self, changeid)
228
228
229 def __contains__(self, changeid):
229 def __contains__(self, changeid):
230 try:
230 try:
231 return bool(self.lookup(changeid))
231 return bool(self.lookup(changeid))
232 except error.RepoLookupError:
232 except error.RepoLookupError:
233 return False
233 return False
234
234
235 def __nonzero__(self):
235 def __nonzero__(self):
236 return True
236 return True
237
237
238 def __len__(self):
238 def __len__(self):
239 return len(self.changelog)
239 return len(self.changelog)
240
240
241 def __iter__(self):
241 def __iter__(self):
242 for i in xrange(len(self)):
242 for i in xrange(len(self)):
243 yield i
243 yield i
244
244
245 def revs(self, expr, *args):
245 def revs(self, expr, *args):
246 '''Return a list of revisions matching the given revset'''
246 '''Return a list of revisions matching the given revset'''
247 expr = revset.formatspec(expr, *args)
247 expr = revset.formatspec(expr, *args)
248 m = revset.match(None, expr)
248 m = revset.match(None, expr)
249 return [r for r in m(self, range(len(self)))]
249 return [r for r in m(self, range(len(self)))]
250
250
251 def set(self, expr, *args):
251 def set(self, expr, *args):
252 '''
252 '''
253 Yield a context for each matching revision, after doing arg
253 Yield a context for each matching revision, after doing arg
254 replacement via revset.formatspec
254 replacement via revset.formatspec
255 '''
255 '''
256 for r in self.revs(expr, *args):
256 for r in self.revs(expr, *args):
257 yield self[r]
257 yield self[r]
258
258
259 def url(self):
259 def url(self):
260 return 'file:' + self.root
260 return 'file:' + self.root
261
261
262 def hook(self, name, throw=False, **args):
262 def hook(self, name, throw=False, **args):
263 return hook.hook(self.ui, self, name, throw, **args)
263 return hook.hook(self.ui, self, name, throw, **args)
264
264
265 tag_disallowed = ':\r\n'
265 tag_disallowed = ':\r\n'
266
266
267 def _tag(self, names, node, message, local, user, date, extra={}):
267 def _tag(self, names, node, message, local, user, date, extra={}):
268 if isinstance(names, str):
268 if isinstance(names, str):
269 allchars = names
269 allchars = names
270 names = (names,)
270 names = (names,)
271 else:
271 else:
272 allchars = ''.join(names)
272 allchars = ''.join(names)
273 for c in self.tag_disallowed:
273 for c in self.tag_disallowed:
274 if c in allchars:
274 if c in allchars:
275 raise util.Abort(_('%r cannot be used in a tag name') % c)
275 raise util.Abort(_('%r cannot be used in a tag name') % c)
276
276
277 branches = self.branchmap()
277 branches = self.branchmap()
278 for name in names:
278 for name in names:
279 self.hook('pretag', throw=True, node=hex(node), tag=name,
279 self.hook('pretag', throw=True, node=hex(node), tag=name,
280 local=local)
280 local=local)
281 if name in branches:
281 if name in branches:
282 self.ui.warn(_("warning: tag %s conflicts with existing"
282 self.ui.warn(_("warning: tag %s conflicts with existing"
283 " branch name\n") % name)
283 " branch name\n") % name)
284
284
285 def writetags(fp, names, munge, prevtags):
285 def writetags(fp, names, munge, prevtags):
286 fp.seek(0, 2)
286 fp.seek(0, 2)
287 if prevtags and prevtags[-1] != '\n':
287 if prevtags and prevtags[-1] != '\n':
288 fp.write('\n')
288 fp.write('\n')
289 for name in names:
289 for name in names:
290 m = munge and munge(name) or name
290 m = munge and munge(name) or name
291 if (self._tagscache.tagtypes and
291 if (self._tagscache.tagtypes and
292 name in self._tagscache.tagtypes):
292 name in self._tagscache.tagtypes):
293 old = self.tags().get(name, nullid)
293 old = self.tags().get(name, nullid)
294 fp.write('%s %s\n' % (hex(old), m))
294 fp.write('%s %s\n' % (hex(old), m))
295 fp.write('%s %s\n' % (hex(node), m))
295 fp.write('%s %s\n' % (hex(node), m))
296 fp.close()
296 fp.close()
297
297
298 prevtags = ''
298 prevtags = ''
299 if local:
299 if local:
300 try:
300 try:
301 fp = self.opener('localtags', 'r+')
301 fp = self.opener('localtags', 'r+')
302 except IOError:
302 except IOError:
303 fp = self.opener('localtags', 'a')
303 fp = self.opener('localtags', 'a')
304 else:
304 else:
305 prevtags = fp.read()
305 prevtags = fp.read()
306
306
307 # local tags are stored in the current charset
307 # local tags are stored in the current charset
308 writetags(fp, names, None, prevtags)
308 writetags(fp, names, None, prevtags)
309 for name in names:
309 for name in names:
310 self.hook('tag', node=hex(node), tag=name, local=local)
310 self.hook('tag', node=hex(node), tag=name, local=local)
311 return
311 return
312
312
313 try:
313 try:
314 fp = self.wfile('.hgtags', 'rb+')
314 fp = self.wfile('.hgtags', 'rb+')
315 except IOError, e:
315 except IOError, e:
316 if e.errno != errno.ENOENT:
316 if e.errno != errno.ENOENT:
317 raise
317 raise
318 fp = self.wfile('.hgtags', 'ab')
318 fp = self.wfile('.hgtags', 'ab')
319 else:
319 else:
320 prevtags = fp.read()
320 prevtags = fp.read()
321
321
322 # committed tags are stored in UTF-8
322 # committed tags are stored in UTF-8
323 writetags(fp, names, encoding.fromlocal, prevtags)
323 writetags(fp, names, encoding.fromlocal, prevtags)
324
324
325 fp.close()
325 fp.close()
326
326
327 self.invalidatecaches()
327 self.invalidatecaches()
328
328
329 if '.hgtags' not in self.dirstate:
329 if '.hgtags' not in self.dirstate:
330 self[None].add(['.hgtags'])
330 self[None].add(['.hgtags'])
331
331
332 m = matchmod.exact(self.root, '', ['.hgtags'])
332 m = matchmod.exact(self.root, '', ['.hgtags'])
333 tagnode = self.commit(message, user, date, extra=extra, match=m)
333 tagnode = self.commit(message, user, date, extra=extra, match=m)
334
334
335 for name in names:
335 for name in names:
336 self.hook('tag', node=hex(node), tag=name, local=local)
336 self.hook('tag', node=hex(node), tag=name, local=local)
337
337
338 return tagnode
338 return tagnode
339
339
340 def tag(self, names, node, message, local, user, date):
340 def tag(self, names, node, message, local, user, date):
341 '''tag a revision with one or more symbolic names.
341 '''tag a revision with one or more symbolic names.
342
342
343 names is a list of strings or, when adding a single tag, names may be a
343 names is a list of strings or, when adding a single tag, names may be a
344 string.
344 string.
345
345
346 if local is True, the tags are stored in a per-repository file.
346 if local is True, the tags are stored in a per-repository file.
347 otherwise, they are stored in the .hgtags file, and a new
347 otherwise, they are stored in the .hgtags file, and a new
348 changeset is committed with the change.
348 changeset is committed with the change.
349
349
350 keyword arguments:
350 keyword arguments:
351
351
352 local: whether to store tags in non-version-controlled file
352 local: whether to store tags in non-version-controlled file
353 (default False)
353 (default False)
354
354
355 message: commit message to use if committing
355 message: commit message to use if committing
356
356
357 user: name of user to use if committing
357 user: name of user to use if committing
358
358
359 date: date tuple to use if committing'''
359 date: date tuple to use if committing'''
360
360
361 if not local:
361 if not local:
362 for x in self.status()[:5]:
362 for x in self.status()[:5]:
363 if '.hgtags' in x:
363 if '.hgtags' in x:
364 raise util.Abort(_('working copy of .hgtags is changed '
364 raise util.Abort(_('working copy of .hgtags is changed '
365 '(please commit .hgtags manually)'))
365 '(please commit .hgtags manually)'))
366
366
367 self.tags() # instantiate the cache
367 self.tags() # instantiate the cache
368 self._tag(names, node, message, local, user, date)
368 self._tag(names, node, message, local, user, date)
369
369
370 @propertycache
370 @propertycache
371 def _tagscache(self):
371 def _tagscache(self):
372 '''Returns a tagscache object that contains various tags related
372 '''Returns a tagscache object that contains various tags related
373 caches.'''
373 caches.'''
374
374
375 # This simplifies its cache management by having one decorated
375 # This simplifies its cache management by having one decorated
376 # function (this one) and the rest simply fetch things from it.
376 # function (this one) and the rest simply fetch things from it.
377 class tagscache(object):
377 class tagscache(object):
378 def __init__(self):
378 def __init__(self):
379 # These two define the set of tags for this repository. tags
379 # These two define the set of tags for this repository. tags
380 # maps tag name to node; tagtypes maps tag name to 'global' or
380 # maps tag name to node; tagtypes maps tag name to 'global' or
381 # 'local'. (Global tags are defined by .hgtags across all
381 # 'local'. (Global tags are defined by .hgtags across all
382 # heads, and local tags are defined in .hg/localtags.)
382 # heads, and local tags are defined in .hg/localtags.)
383 # They constitute the in-memory cache of tags.
383 # They constitute the in-memory cache of tags.
384 self.tags = self.tagtypes = None
384 self.tags = self.tagtypes = None
385
385
386 self.nodetagscache = self.tagslist = None
386 self.nodetagscache = self.tagslist = None
387
387
388 cache = tagscache()
388 cache = tagscache()
389 cache.tags, cache.tagtypes = self._findtags()
389 cache.tags, cache.tagtypes = self._findtags()
390
390
391 return cache
391 return cache
392
392
393 def tags(self):
393 def tags(self):
394 '''return a mapping of tag to node'''
394 '''return a mapping of tag to node'''
395 t = {}
395 t = {}
396 for k, v in self._tagscache.tags.iteritems():
396 for k, v in self._tagscache.tags.iteritems():
397 try:
397 try:
398 # ignore tags to unknown nodes
398 # ignore tags to unknown nodes
399 self.changelog.rev(v)
399 self.changelog.rev(v)
400 t[k] = v
400 t[k] = v
401 except (error.LookupError, ValueError):
401 except (error.LookupError, ValueError):
402 pass
402 pass
403 return t
403 return t
404
404
405 def _findtags(self):
405 def _findtags(self):
406 '''Do the hard work of finding tags. Return a pair of dicts
406 '''Do the hard work of finding tags. Return a pair of dicts
407 (tags, tagtypes) where tags maps tag name to node, and tagtypes
407 (tags, tagtypes) where tags maps tag name to node, and tagtypes
408 maps tag name to a string like \'global\' or \'local\'.
408 maps tag name to a string like \'global\' or \'local\'.
409 Subclasses or extensions are free to add their own tags, but
409 Subclasses or extensions are free to add their own tags, but
410 should be aware that the returned dicts will be retained for the
410 should be aware that the returned dicts will be retained for the
411 duration of the localrepo object.'''
411 duration of the localrepo object.'''
412
412
413 # XXX what tagtype should subclasses/extensions use? Currently
413 # XXX what tagtype should subclasses/extensions use? Currently
414 # mq and bookmarks add tags, but do not set the tagtype at all.
414 # mq and bookmarks add tags, but do not set the tagtype at all.
415 # Should each extension invent its own tag type? Should there
415 # Should each extension invent its own tag type? Should there
416 # be one tagtype for all such "virtual" tags? Or is the status
416 # be one tagtype for all such "virtual" tags? Or is the status
417 # quo fine?
417 # quo fine?
418
418
419 alltags = {} # map tag name to (node, hist)
419 alltags = {} # map tag name to (node, hist)
420 tagtypes = {}
420 tagtypes = {}
421
421
422 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
422 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
423 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
423 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
424
424
425 # Build the return dicts. Have to re-encode tag names because
425 # Build the return dicts. Have to re-encode tag names because
426 # the tags module always uses UTF-8 (in order not to lose info
426 # the tags module always uses UTF-8 (in order not to lose info
427 # writing to the cache), but the rest of Mercurial wants them in
427 # writing to the cache), but the rest of Mercurial wants them in
428 # local encoding.
428 # local encoding.
429 tags = {}
429 tags = {}
430 for (name, (node, hist)) in alltags.iteritems():
430 for (name, (node, hist)) in alltags.iteritems():
431 if node != nullid:
431 if node != nullid:
432 tags[encoding.tolocal(name)] = node
432 tags[encoding.tolocal(name)] = node
433 tags['tip'] = self.changelog.tip()
433 tags['tip'] = self.changelog.tip()
434 tagtypes = dict([(encoding.tolocal(name), value)
434 tagtypes = dict([(encoding.tolocal(name), value)
435 for (name, value) in tagtypes.iteritems()])
435 for (name, value) in tagtypes.iteritems()])
436 return (tags, tagtypes)
436 return (tags, tagtypes)
437
437
438 def tagtype(self, tagname):
438 def tagtype(self, tagname):
439 '''
439 '''
440 return the type of the given tag. result can be:
440 return the type of the given tag. result can be:
441
441
442 'local' : a local tag
442 'local' : a local tag
443 'global' : a global tag
443 'global' : a global tag
444 None : tag does not exist
444 None : tag does not exist
445 '''
445 '''
446
446
447 return self._tagscache.tagtypes.get(tagname)
447 return self._tagscache.tagtypes.get(tagname)
448
448
449 def tagslist(self):
449 def tagslist(self):
450 '''return a list of tags ordered by revision'''
450 '''return a list of tags ordered by revision'''
451 if not self._tagscache.tagslist:
451 if not self._tagscache.tagslist:
452 l = []
452 l = []
453 for t, n in self.tags().iteritems():
453 for t, n in self.tags().iteritems():
454 r = self.changelog.rev(n)
454 r = self.changelog.rev(n)
455 l.append((r, t, n))
455 l.append((r, t, n))
456 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
456 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
457
457
458 return self._tagscache.tagslist
458 return self._tagscache.tagslist
459
459
460 def nodetags(self, node):
460 def nodetags(self, node):
461 '''return the tags associated with a node'''
461 '''return the tags associated with a node'''
462 if not self._tagscache.nodetagscache:
462 if not self._tagscache.nodetagscache:
463 nodetagscache = {}
463 nodetagscache = {}
464 for t, n in self._tagscache.tags.iteritems():
464 for t, n in self._tagscache.tags.iteritems():
465 nodetagscache.setdefault(n, []).append(t)
465 nodetagscache.setdefault(n, []).append(t)
466 for tags in nodetagscache.itervalues():
466 for tags in nodetagscache.itervalues():
467 tags.sort()
467 tags.sort()
468 self._tagscache.nodetagscache = nodetagscache
468 self._tagscache.nodetagscache = nodetagscache
469 return self._tagscache.nodetagscache.get(node, [])
469 return self._tagscache.nodetagscache.get(node, [])
470
470
471 def nodebookmarks(self, node):
471 def nodebookmarks(self, node):
472 marks = []
472 marks = []
473 for bookmark, n in self._bookmarks.iteritems():
473 for bookmark, n in self._bookmarks.iteritems():
474 if n == node:
474 if n == node:
475 marks.append(bookmark)
475 marks.append(bookmark)
476 return sorted(marks)
476 return sorted(marks)
477
477
478 def _branchtags(self, partial, lrev):
478 def _branchtags(self, partial, lrev):
479 # TODO: rename this function?
479 # TODO: rename this function?
480 tiprev = len(self) - 1
480 tiprev = len(self) - 1
481 if lrev != tiprev:
481 if lrev != tiprev:
482 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
482 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
483 self._updatebranchcache(partial, ctxgen)
483 self._updatebranchcache(partial, ctxgen)
484 self._writebranchcache(partial, self.changelog.tip(), tiprev)
484 self._writebranchcache(partial, self.changelog.tip(), tiprev)
485
485
486 return partial
486 return partial
487
487
488 def updatebranchcache(self):
488 def updatebranchcache(self):
489 tip = self.changelog.tip()
489 tip = self.changelog.tip()
490 if self._branchcache is not None and self._branchcachetip == tip:
490 if self._branchcache is not None and self._branchcachetip == tip:
491 return
491 return
492
492
493 oldtip = self._branchcachetip
493 oldtip = self._branchcachetip
494 self._branchcachetip = tip
494 self._branchcachetip = tip
495 if oldtip is None or oldtip not in self.changelog.nodemap:
495 if oldtip is None or oldtip not in self.changelog.nodemap:
496 partial, last, lrev = self._readbranchcache()
496 partial, last, lrev = self._readbranchcache()
497 else:
497 else:
498 lrev = self.changelog.rev(oldtip)
498 lrev = self.changelog.rev(oldtip)
499 partial = self._branchcache
499 partial = self._branchcache
500
500
501 self._branchtags(partial, lrev)
501 self._branchtags(partial, lrev)
502 # this private cache holds all heads (not just the branch tips)
502 # this private cache holds all heads (not just the branch tips)
503 self._branchcache = partial
503 self._branchcache = partial
504
504
505 def branchmap(self):
505 def branchmap(self):
506 '''returns a dictionary {branch: [branchheads]}'''
506 '''returns a dictionary {branch: [branchheads]}'''
507 self.updatebranchcache()
507 self.updatebranchcache()
508 return self._branchcache
508 return self._branchcache
509
509
510 def _branchtip(self, heads):
510 def _branchtip(self, heads):
511 '''return the tipmost branch head in heads'''
511 '''return the tipmost branch head in heads'''
512 tip = heads[-1]
512 tip = heads[-1]
513 for h in reversed(heads):
513 for h in reversed(heads):
514 if not self[h].closesbranch():
514 if not self[h].closesbranch():
515 tip = h
515 tip = h
516 break
516 break
517 return tip
517 return tip
518
518
519 def branchtip(self, branch):
519 def branchtip(self, branch):
520 '''return the tip node for a given branch'''
520 '''return the tip node for a given branch'''
521 if branch not in self.branchmap():
521 if branch not in self.branchmap():
522 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
522 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
523 return self._branchtip(self.branchmap()[branch])
523 return self._branchtip(self.branchmap()[branch])
524
524
525 def branchtags(self):
525 def branchtags(self):
526 '''return a dict where branch names map to the tipmost head of
526 '''return a dict where branch names map to the tipmost head of
527 the branch, open heads come before closed'''
527 the branch, open heads come before closed'''
528 bt = {}
528 bt = {}
529 for bn, heads in self.branchmap().iteritems():
529 for bn, heads in self.branchmap().iteritems():
530 bt[bn] = self._branchtip(heads)
530 bt[bn] = self._branchtip(heads)
531 return bt
531 return bt
532
532
533 def _readbranchcache(self):
533 def _readbranchcache(self):
534 partial = {}
534 partial = {}
535 try:
535 try:
536 f = self.opener("cache/branchheads")
536 f = self.opener("cache/branchheads")
537 lines = f.read().split('\n')
537 lines = f.read().split('\n')
538 f.close()
538 f.close()
539 except (IOError, OSError):
539 except (IOError, OSError):
540 return {}, nullid, nullrev
540 return {}, nullid, nullrev
541
541
542 try:
542 try:
543 last, lrev = lines.pop(0).split(" ", 1)
543 last, lrev = lines.pop(0).split(" ", 1)
544 last, lrev = bin(last), int(lrev)
544 last, lrev = bin(last), int(lrev)
545 if lrev >= len(self) or self[lrev].node() != last:
545 if lrev >= len(self) or self[lrev].node() != last:
546 # invalidate the cache
546 # invalidate the cache
547 raise ValueError('invalidating branch cache (tip differs)')
547 raise ValueError('invalidating branch cache (tip differs)')
548 for l in lines:
548 for l in lines:
549 if not l:
549 if not l:
550 continue
550 continue
551 node, label = l.split(" ", 1)
551 node, label = l.split(" ", 1)
552 label = encoding.tolocal(label.strip())
552 label = encoding.tolocal(label.strip())
553 partial.setdefault(label, []).append(bin(node))
553 partial.setdefault(label, []).append(bin(node))
554 except KeyboardInterrupt:
554 except KeyboardInterrupt:
555 raise
555 raise
556 except Exception, inst:
556 except Exception, inst:
557 if self.ui.debugflag:
557 if self.ui.debugflag:
558 self.ui.warn(str(inst), '\n')
558 self.ui.warn(str(inst), '\n')
559 partial, last, lrev = {}, nullid, nullrev
559 partial, last, lrev = {}, nullid, nullrev
560 return partial, last, lrev
560 return partial, last, lrev
561
561
562 def _writebranchcache(self, branches, tip, tiprev):
562 def _writebranchcache(self, branches, tip, tiprev):
563 try:
563 try:
564 f = self.opener("cache/branchheads", "w", atomictemp=True)
564 f = self.opener("cache/branchheads", "w", atomictemp=True)
565 f.write("%s %s\n" % (hex(tip), tiprev))
565 f.write("%s %s\n" % (hex(tip), tiprev))
566 for label, nodes in branches.iteritems():
566 for label, nodes in branches.iteritems():
567 for node in nodes:
567 for node in nodes:
568 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
568 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
569 f.close()
569 f.close()
570 except (IOError, OSError):
570 except (IOError, OSError):
571 pass
571 pass
572
572
573 def _updatebranchcache(self, partial, ctxgen):
573 def _updatebranchcache(self, partial, ctxgen):
574 # collect new branch entries
574 # collect new branch entries
575 newbranches = {}
575 newbranches = {}
576 for c in ctxgen:
576 for c in ctxgen:
577 newbranches.setdefault(c.branch(), []).append(c.node())
577 newbranches.setdefault(c.branch(), []).append(c.rev())
578 # if older branchheads are reachable from new ones, they aren't
578 # if older branchheads are reachable from new ones, they aren't
579 # really branchheads. Note checking parents is insufficient:
579 # really branchheads. Note checking parents is insufficient:
580 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
580 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
581 for branch, newnodes in newbranches.iteritems():
581 for branch, newrevs in newbranches.iteritems():
582 bheads = partial.setdefault(branch, [])
582 bheadrevs = [self.changelog.rev(node) for node in
583 bheads.extend(newnodes)
583 partial.setdefault(branch, [])]
584 if len(bheads) <= 1:
584 bheadrevs.extend(newrevs)
585 continue
585 bheadrevs.sort()
586 bheads = sorted(bheads, key=lambda x: self[x].rev())
586 # starting from tip means fewer passes over ancestors
587 # starting from tip means fewer passes over reachable
587 newrevs.sort()
588 while newnodes:
588 while newrevs:
589 latest = newnodes.pop()
589 latest = newrevs.pop()
590 if latest not in bheads:
590 if latest not in bheadrevs:
591 continue
591 continue
592 minbhnode = self[bheads[0]].node()
592 ancestors = set(self.changelog.ancestors([latest],
593 cl = self.changelog
593 bheadrevs[0]))
594 ancestors = cl.ancestors([cl.rev(latest)],
594 if ancestors:
595 cl.rev(minbhnode))
595 bheadrevs = [b for b in bheadrevs if b not in ancestors]
596 reachable = [cl.node(rev) for rev in ancestors]
596 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
597 if reachable:
598 bheads = [b for b in bheads if b not in reachable]
599 partial[branch] = bheads
600
597
601 def lookup(self, key):
598 def lookup(self, key):
602 return self[key].node()
599 return self[key].node()
603
600
604 def lookupbranch(self, key, remote=None):
601 def lookupbranch(self, key, remote=None):
605 repo = remote or self
602 repo = remote or self
606 if key in repo.branchmap():
603 if key in repo.branchmap():
607 return key
604 return key
608
605
609 repo = (remote and remote.local()) and remote or self
606 repo = (remote and remote.local()) and remote or self
610 return repo[key].branch()
607 return repo[key].branch()
611
608
612 def known(self, nodes):
609 def known(self, nodes):
613 nm = self.changelog.nodemap
610 nm = self.changelog.nodemap
614 pc = self._phasecache
611 pc = self._phasecache
615 result = []
612 result = []
616 for n in nodes:
613 for n in nodes:
617 r = nm.get(n)
614 r = nm.get(n)
618 resp = not (r is None or pc.phase(self, r) >= phases.secret)
615 resp = not (r is None or pc.phase(self, r) >= phases.secret)
619 result.append(resp)
616 result.append(resp)
620 return result
617 return result
621
618
622 def local(self):
619 def local(self):
623 return self
620 return self
624
621
625 def join(self, f):
622 def join(self, f):
626 return os.path.join(self.path, f)
623 return os.path.join(self.path, f)
627
624
628 def wjoin(self, f):
625 def wjoin(self, f):
629 return os.path.join(self.root, f)
626 return os.path.join(self.root, f)
630
627
631 def file(self, f):
628 def file(self, f):
632 if f[0] == '/':
629 if f[0] == '/':
633 f = f[1:]
630 f = f[1:]
634 return filelog.filelog(self.sopener, f)
631 return filelog.filelog(self.sopener, f)
635
632
636 def changectx(self, changeid):
633 def changectx(self, changeid):
637 return self[changeid]
634 return self[changeid]
638
635
639 def parents(self, changeid=None):
636 def parents(self, changeid=None):
640 '''get list of changectxs for parents of changeid'''
637 '''get list of changectxs for parents of changeid'''
641 return self[changeid].parents()
638 return self[changeid].parents()
642
639
643 def setparents(self, p1, p2=nullid):
640 def setparents(self, p1, p2=nullid):
644 copies = self.dirstate.setparents(p1, p2)
641 copies = self.dirstate.setparents(p1, p2)
645 if copies:
642 if copies:
646 # Adjust copy records, the dirstate cannot do it, it
643 # Adjust copy records, the dirstate cannot do it, it
647 # requires access to parents manifests. Preserve them
644 # requires access to parents manifests. Preserve them
648 # only for entries added to first parent.
645 # only for entries added to first parent.
649 pctx = self[p1]
646 pctx = self[p1]
650 for f in copies:
647 for f in copies:
651 if f not in pctx and copies[f] in pctx:
648 if f not in pctx and copies[f] in pctx:
652 self.dirstate.copy(copies[f], f)
649 self.dirstate.copy(copies[f], f)
653
650
654 def filectx(self, path, changeid=None, fileid=None):
651 def filectx(self, path, changeid=None, fileid=None):
655 """changeid can be a changeset revision, node, or tag.
652 """changeid can be a changeset revision, node, or tag.
656 fileid can be a file revision or node."""
653 fileid can be a file revision or node."""
657 return context.filectx(self, path, changeid, fileid)
654 return context.filectx(self, path, changeid, fileid)
658
655
659 def getcwd(self):
656 def getcwd(self):
660 return self.dirstate.getcwd()
657 return self.dirstate.getcwd()
661
658
662 def pathto(self, f, cwd=None):
659 def pathto(self, f, cwd=None):
663 return self.dirstate.pathto(f, cwd)
660 return self.dirstate.pathto(f, cwd)
664
661
665 def wfile(self, f, mode='r'):
662 def wfile(self, f, mode='r'):
666 return self.wopener(f, mode)
663 return self.wopener(f, mode)
667
664
668 def _link(self, f):
665 def _link(self, f):
669 return os.path.islink(self.wjoin(f))
666 return os.path.islink(self.wjoin(f))
670
667
671 def _loadfilter(self, filter):
668 def _loadfilter(self, filter):
672 if filter not in self.filterpats:
669 if filter not in self.filterpats:
673 l = []
670 l = []
674 for pat, cmd in self.ui.configitems(filter):
671 for pat, cmd in self.ui.configitems(filter):
675 if cmd == '!':
672 if cmd == '!':
676 continue
673 continue
677 mf = matchmod.match(self.root, '', [pat])
674 mf = matchmod.match(self.root, '', [pat])
678 fn = None
675 fn = None
679 params = cmd
676 params = cmd
680 for name, filterfn in self._datafilters.iteritems():
677 for name, filterfn in self._datafilters.iteritems():
681 if cmd.startswith(name):
678 if cmd.startswith(name):
682 fn = filterfn
679 fn = filterfn
683 params = cmd[len(name):].lstrip()
680 params = cmd[len(name):].lstrip()
684 break
681 break
685 if not fn:
682 if not fn:
686 fn = lambda s, c, **kwargs: util.filter(s, c)
683 fn = lambda s, c, **kwargs: util.filter(s, c)
687 # Wrap old filters not supporting keyword arguments
684 # Wrap old filters not supporting keyword arguments
688 if not inspect.getargspec(fn)[2]:
685 if not inspect.getargspec(fn)[2]:
689 oldfn = fn
686 oldfn = fn
690 fn = lambda s, c, **kwargs: oldfn(s, c)
687 fn = lambda s, c, **kwargs: oldfn(s, c)
691 l.append((mf, fn, params))
688 l.append((mf, fn, params))
692 self.filterpats[filter] = l
689 self.filterpats[filter] = l
693 return self.filterpats[filter]
690 return self.filterpats[filter]
694
691
695 def _filter(self, filterpats, filename, data):
692 def _filter(self, filterpats, filename, data):
696 for mf, fn, cmd in filterpats:
693 for mf, fn, cmd in filterpats:
697 if mf(filename):
694 if mf(filename):
698 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
695 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
699 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
696 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
700 break
697 break
701
698
702 return data
699 return data
703
700
704 @propertycache
701 @propertycache
705 def _encodefilterpats(self):
702 def _encodefilterpats(self):
706 return self._loadfilter('encode')
703 return self._loadfilter('encode')
707
704
708 @propertycache
705 @propertycache
709 def _decodefilterpats(self):
706 def _decodefilterpats(self):
710 return self._loadfilter('decode')
707 return self._loadfilter('decode')
711
708
712 def adddatafilter(self, name, filter):
709 def adddatafilter(self, name, filter):
713 self._datafilters[name] = filter
710 self._datafilters[name] = filter
714
711
715 def wread(self, filename):
712 def wread(self, filename):
716 if self._link(filename):
713 if self._link(filename):
717 data = os.readlink(self.wjoin(filename))
714 data = os.readlink(self.wjoin(filename))
718 else:
715 else:
719 data = self.wopener.read(filename)
716 data = self.wopener.read(filename)
720 return self._filter(self._encodefilterpats, filename, data)
717 return self._filter(self._encodefilterpats, filename, data)
721
718
722 def wwrite(self, filename, data, flags):
719 def wwrite(self, filename, data, flags):
723 data = self._filter(self._decodefilterpats, filename, data)
720 data = self._filter(self._decodefilterpats, filename, data)
724 if 'l' in flags:
721 if 'l' in flags:
725 self.wopener.symlink(data, filename)
722 self.wopener.symlink(data, filename)
726 else:
723 else:
727 self.wopener.write(filename, data)
724 self.wopener.write(filename, data)
728 if 'x' in flags:
725 if 'x' in flags:
729 util.setflags(self.wjoin(filename), False, True)
726 util.setflags(self.wjoin(filename), False, True)
730
727
731 def wwritedata(self, filename, data):
728 def wwritedata(self, filename, data):
732 return self._filter(self._decodefilterpats, filename, data)
729 return self._filter(self._decodefilterpats, filename, data)
733
730
734 def transaction(self, desc):
731 def transaction(self, desc):
735 tr = self._transref and self._transref() or None
732 tr = self._transref and self._transref() or None
736 if tr and tr.running():
733 if tr and tr.running():
737 return tr.nest()
734 return tr.nest()
738
735
739 # abort here if the journal already exists
736 # abort here if the journal already exists
740 if os.path.exists(self.sjoin("journal")):
737 if os.path.exists(self.sjoin("journal")):
741 raise error.RepoError(
738 raise error.RepoError(
742 _("abandoned transaction found - run hg recover"))
739 _("abandoned transaction found - run hg recover"))
743
740
744 self._writejournal(desc)
741 self._writejournal(desc)
745 renames = [(x, undoname(x)) for x in self._journalfiles()]
742 renames = [(x, undoname(x)) for x in self._journalfiles()]
746
743
747 tr = transaction.transaction(self.ui.warn, self.sopener,
744 tr = transaction.transaction(self.ui.warn, self.sopener,
748 self.sjoin("journal"),
745 self.sjoin("journal"),
749 aftertrans(renames),
746 aftertrans(renames),
750 self.store.createmode)
747 self.store.createmode)
751 self._transref = weakref.ref(tr)
748 self._transref = weakref.ref(tr)
752 return tr
749 return tr
753
750
754 def _journalfiles(self):
751 def _journalfiles(self):
755 return (self.sjoin('journal'), self.join('journal.dirstate'),
752 return (self.sjoin('journal'), self.join('journal.dirstate'),
756 self.join('journal.branch'), self.join('journal.desc'),
753 self.join('journal.branch'), self.join('journal.desc'),
757 self.join('journal.bookmarks'),
754 self.join('journal.bookmarks'),
758 self.sjoin('journal.phaseroots'))
755 self.sjoin('journal.phaseroots'))
759
756
760 def undofiles(self):
757 def undofiles(self):
761 return [undoname(x) for x in self._journalfiles()]
758 return [undoname(x) for x in self._journalfiles()]
762
759
763 def _writejournal(self, desc):
760 def _writejournal(self, desc):
764 self.opener.write("journal.dirstate",
761 self.opener.write("journal.dirstate",
765 self.opener.tryread("dirstate"))
762 self.opener.tryread("dirstate"))
766 self.opener.write("journal.branch",
763 self.opener.write("journal.branch",
767 encoding.fromlocal(self.dirstate.branch()))
764 encoding.fromlocal(self.dirstate.branch()))
768 self.opener.write("journal.desc",
765 self.opener.write("journal.desc",
769 "%d\n%s\n" % (len(self), desc))
766 "%d\n%s\n" % (len(self), desc))
770 self.opener.write("journal.bookmarks",
767 self.opener.write("journal.bookmarks",
771 self.opener.tryread("bookmarks"))
768 self.opener.tryread("bookmarks"))
772 self.sopener.write("journal.phaseroots",
769 self.sopener.write("journal.phaseroots",
773 self.sopener.tryread("phaseroots"))
770 self.sopener.tryread("phaseroots"))
774
771
775 def recover(self):
772 def recover(self):
776 lock = self.lock()
773 lock = self.lock()
777 try:
774 try:
778 if os.path.exists(self.sjoin("journal")):
775 if os.path.exists(self.sjoin("journal")):
779 self.ui.status(_("rolling back interrupted transaction\n"))
776 self.ui.status(_("rolling back interrupted transaction\n"))
780 transaction.rollback(self.sopener, self.sjoin("journal"),
777 transaction.rollback(self.sopener, self.sjoin("journal"),
781 self.ui.warn)
778 self.ui.warn)
782 self.invalidate()
779 self.invalidate()
783 return True
780 return True
784 else:
781 else:
785 self.ui.warn(_("no interrupted transaction available\n"))
782 self.ui.warn(_("no interrupted transaction available\n"))
786 return False
783 return False
787 finally:
784 finally:
788 lock.release()
785 lock.release()
789
786
790 def rollback(self, dryrun=False, force=False):
787 def rollback(self, dryrun=False, force=False):
791 wlock = lock = None
788 wlock = lock = None
792 try:
789 try:
793 wlock = self.wlock()
790 wlock = self.wlock()
794 lock = self.lock()
791 lock = self.lock()
795 if os.path.exists(self.sjoin("undo")):
792 if os.path.exists(self.sjoin("undo")):
796 return self._rollback(dryrun, force)
793 return self._rollback(dryrun, force)
797 else:
794 else:
798 self.ui.warn(_("no rollback information available\n"))
795 self.ui.warn(_("no rollback information available\n"))
799 return 1
796 return 1
800 finally:
797 finally:
801 release(lock, wlock)
798 release(lock, wlock)
802
799
803 def _rollback(self, dryrun, force):
800 def _rollback(self, dryrun, force):
804 ui = self.ui
801 ui = self.ui
805 try:
802 try:
806 args = self.opener.read('undo.desc').splitlines()
803 args = self.opener.read('undo.desc').splitlines()
807 (oldlen, desc, detail) = (int(args[0]), args[1], None)
804 (oldlen, desc, detail) = (int(args[0]), args[1], None)
808 if len(args) >= 3:
805 if len(args) >= 3:
809 detail = args[2]
806 detail = args[2]
810 oldtip = oldlen - 1
807 oldtip = oldlen - 1
811
808
812 if detail and ui.verbose:
809 if detail and ui.verbose:
813 msg = (_('repository tip rolled back to revision %s'
810 msg = (_('repository tip rolled back to revision %s'
814 ' (undo %s: %s)\n')
811 ' (undo %s: %s)\n')
815 % (oldtip, desc, detail))
812 % (oldtip, desc, detail))
816 else:
813 else:
817 msg = (_('repository tip rolled back to revision %s'
814 msg = (_('repository tip rolled back to revision %s'
818 ' (undo %s)\n')
815 ' (undo %s)\n')
819 % (oldtip, desc))
816 % (oldtip, desc))
820 except IOError:
817 except IOError:
821 msg = _('rolling back unknown transaction\n')
818 msg = _('rolling back unknown transaction\n')
822 desc = None
819 desc = None
823
820
824 if not force and self['.'] != self['tip'] and desc == 'commit':
821 if not force and self['.'] != self['tip'] and desc == 'commit':
825 raise util.Abort(
822 raise util.Abort(
826 _('rollback of last commit while not checked out '
823 _('rollback of last commit while not checked out '
827 'may lose data'), hint=_('use -f to force'))
824 'may lose data'), hint=_('use -f to force'))
828
825
829 ui.status(msg)
826 ui.status(msg)
830 if dryrun:
827 if dryrun:
831 return 0
828 return 0
832
829
833 parents = self.dirstate.parents()
830 parents = self.dirstate.parents()
834 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
831 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
835 if os.path.exists(self.join('undo.bookmarks')):
832 if os.path.exists(self.join('undo.bookmarks')):
836 util.rename(self.join('undo.bookmarks'),
833 util.rename(self.join('undo.bookmarks'),
837 self.join('bookmarks'))
834 self.join('bookmarks'))
838 if os.path.exists(self.sjoin('undo.phaseroots')):
835 if os.path.exists(self.sjoin('undo.phaseroots')):
839 util.rename(self.sjoin('undo.phaseroots'),
836 util.rename(self.sjoin('undo.phaseroots'),
840 self.sjoin('phaseroots'))
837 self.sjoin('phaseroots'))
841 self.invalidate()
838 self.invalidate()
842
839
843 parentgone = (parents[0] not in self.changelog.nodemap or
840 parentgone = (parents[0] not in self.changelog.nodemap or
844 parents[1] not in self.changelog.nodemap)
841 parents[1] not in self.changelog.nodemap)
845 if parentgone:
842 if parentgone:
846 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
843 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
847 try:
844 try:
848 branch = self.opener.read('undo.branch')
845 branch = self.opener.read('undo.branch')
849 self.dirstate.setbranch(branch)
846 self.dirstate.setbranch(branch)
850 except IOError:
847 except IOError:
851 ui.warn(_('named branch could not be reset: '
848 ui.warn(_('named branch could not be reset: '
852 'current branch is still \'%s\'\n')
849 'current branch is still \'%s\'\n')
853 % self.dirstate.branch())
850 % self.dirstate.branch())
854
851
855 self.dirstate.invalidate()
852 self.dirstate.invalidate()
856 parents = tuple([p.rev() for p in self.parents()])
853 parents = tuple([p.rev() for p in self.parents()])
857 if len(parents) > 1:
854 if len(parents) > 1:
858 ui.status(_('working directory now based on '
855 ui.status(_('working directory now based on '
859 'revisions %d and %d\n') % parents)
856 'revisions %d and %d\n') % parents)
860 else:
857 else:
861 ui.status(_('working directory now based on '
858 ui.status(_('working directory now based on '
862 'revision %d\n') % parents)
859 'revision %d\n') % parents)
863 self.destroyed()
860 self.destroyed()
864 return 0
861 return 0
865
862
866 def invalidatecaches(self):
863 def invalidatecaches(self):
867 def delcache(name):
864 def delcache(name):
868 try:
865 try:
869 delattr(self, name)
866 delattr(self, name)
870 except AttributeError:
867 except AttributeError:
871 pass
868 pass
872
869
873 delcache('_tagscache')
870 delcache('_tagscache')
874
871
875 self._branchcache = None # in UTF-8
872 self._branchcache = None # in UTF-8
876 self._branchcachetip = None
873 self._branchcachetip = None
877
874
878 def invalidatedirstate(self):
875 def invalidatedirstate(self):
879 '''Invalidates the dirstate, causing the next call to dirstate
876 '''Invalidates the dirstate, causing the next call to dirstate
880 to check if it was modified since the last time it was read,
877 to check if it was modified since the last time it was read,
881 rereading it if it has.
878 rereading it if it has.
882
879
883 This is different to dirstate.invalidate() that it doesn't always
880 This is different to dirstate.invalidate() that it doesn't always
884 rereads the dirstate. Use dirstate.invalidate() if you want to
881 rereads the dirstate. Use dirstate.invalidate() if you want to
885 explicitly read the dirstate again (i.e. restoring it to a previous
882 explicitly read the dirstate again (i.e. restoring it to a previous
886 known good state).'''
883 known good state).'''
887 if 'dirstate' in self.__dict__:
884 if 'dirstate' in self.__dict__:
888 for k in self.dirstate._filecache:
885 for k in self.dirstate._filecache:
889 try:
886 try:
890 delattr(self.dirstate, k)
887 delattr(self.dirstate, k)
891 except AttributeError:
888 except AttributeError:
892 pass
889 pass
893 delattr(self, 'dirstate')
890 delattr(self, 'dirstate')
894
891
895 def invalidate(self):
892 def invalidate(self):
896 for k in self._filecache:
893 for k in self._filecache:
897 # dirstate is invalidated separately in invalidatedirstate()
894 # dirstate is invalidated separately in invalidatedirstate()
898 if k == 'dirstate':
895 if k == 'dirstate':
899 continue
896 continue
900
897
901 try:
898 try:
902 delattr(self, k)
899 delattr(self, k)
903 except AttributeError:
900 except AttributeError:
904 pass
901 pass
905 self.invalidatecaches()
902 self.invalidatecaches()
906
903
907 # Discard all cache entries to force reloading everything.
904 # Discard all cache entries to force reloading everything.
908 self._filecache.clear()
905 self._filecache.clear()
909
906
910 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
907 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
911 try:
908 try:
912 l = lock.lock(lockname, 0, releasefn, desc=desc)
909 l = lock.lock(lockname, 0, releasefn, desc=desc)
913 except error.LockHeld, inst:
910 except error.LockHeld, inst:
914 if not wait:
911 if not wait:
915 raise
912 raise
916 self.ui.warn(_("waiting for lock on %s held by %r\n") %
913 self.ui.warn(_("waiting for lock on %s held by %r\n") %
917 (desc, inst.locker))
914 (desc, inst.locker))
918 # default to 600 seconds timeout
915 # default to 600 seconds timeout
919 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
916 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
920 releasefn, desc=desc)
917 releasefn, desc=desc)
921 if acquirefn:
918 if acquirefn:
922 acquirefn()
919 acquirefn()
923 return l
920 return l
924
921
925 def _afterlock(self, callback):
922 def _afterlock(self, callback):
926 """add a callback to the current repository lock.
923 """add a callback to the current repository lock.
927
924
928 The callback will be executed on lock release."""
925 The callback will be executed on lock release."""
929 l = self._lockref and self._lockref()
926 l = self._lockref and self._lockref()
930 if l:
927 if l:
931 l.postrelease.append(callback)
928 l.postrelease.append(callback)
932 else:
929 else:
933 callback()
930 callback()
934
931
935 def lock(self, wait=True):
932 def lock(self, wait=True):
936 '''Lock the repository store (.hg/store) and return a weak reference
933 '''Lock the repository store (.hg/store) and return a weak reference
937 to the lock. Use this before modifying the store (e.g. committing or
934 to the lock. Use this before modifying the store (e.g. committing or
938 stripping). If you are opening a transaction, get a lock as well.)'''
935 stripping). If you are opening a transaction, get a lock as well.)'''
939 l = self._lockref and self._lockref()
936 l = self._lockref and self._lockref()
940 if l is not None and l.held:
937 if l is not None and l.held:
941 l.lock()
938 l.lock()
942 return l
939 return l
943
940
944 def unlock():
941 def unlock():
945 self.store.write()
942 self.store.write()
946 if '_phasecache' in vars(self):
943 if '_phasecache' in vars(self):
947 self._phasecache.write()
944 self._phasecache.write()
948 for k, ce in self._filecache.items():
945 for k, ce in self._filecache.items():
949 if k == 'dirstate':
946 if k == 'dirstate':
950 continue
947 continue
951 ce.refresh()
948 ce.refresh()
952
949
953 l = self._lock(self.sjoin("lock"), wait, unlock,
950 l = self._lock(self.sjoin("lock"), wait, unlock,
954 self.invalidate, _('repository %s') % self.origroot)
951 self.invalidate, _('repository %s') % self.origroot)
955 self._lockref = weakref.ref(l)
952 self._lockref = weakref.ref(l)
956 return l
953 return l
957
954
958 def wlock(self, wait=True):
955 def wlock(self, wait=True):
959 '''Lock the non-store parts of the repository (everything under
956 '''Lock the non-store parts of the repository (everything under
960 .hg except .hg/store) and return a weak reference to the lock.
957 .hg except .hg/store) and return a weak reference to the lock.
961 Use this before modifying files in .hg.'''
958 Use this before modifying files in .hg.'''
962 l = self._wlockref and self._wlockref()
959 l = self._wlockref and self._wlockref()
963 if l is not None and l.held:
960 if l is not None and l.held:
964 l.lock()
961 l.lock()
965 return l
962 return l
966
963
967 def unlock():
964 def unlock():
968 self.dirstate.write()
965 self.dirstate.write()
969 ce = self._filecache.get('dirstate')
966 ce = self._filecache.get('dirstate')
970 if ce:
967 if ce:
971 ce.refresh()
968 ce.refresh()
972
969
973 l = self._lock(self.join("wlock"), wait, unlock,
970 l = self._lock(self.join("wlock"), wait, unlock,
974 self.invalidatedirstate, _('working directory of %s') %
971 self.invalidatedirstate, _('working directory of %s') %
975 self.origroot)
972 self.origroot)
976 self._wlockref = weakref.ref(l)
973 self._wlockref = weakref.ref(l)
977 return l
974 return l
978
975
979 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
976 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
980 """
977 """
981 commit an individual file as part of a larger transaction
978 commit an individual file as part of a larger transaction
982 """
979 """
983
980
984 fname = fctx.path()
981 fname = fctx.path()
985 text = fctx.data()
982 text = fctx.data()
986 flog = self.file(fname)
983 flog = self.file(fname)
987 fparent1 = manifest1.get(fname, nullid)
984 fparent1 = manifest1.get(fname, nullid)
988 fparent2 = fparent2o = manifest2.get(fname, nullid)
985 fparent2 = fparent2o = manifest2.get(fname, nullid)
989
986
990 meta = {}
987 meta = {}
991 copy = fctx.renamed()
988 copy = fctx.renamed()
992 if copy and copy[0] != fname:
989 if copy and copy[0] != fname:
993 # Mark the new revision of this file as a copy of another
990 # Mark the new revision of this file as a copy of another
994 # file. This copy data will effectively act as a parent
991 # file. This copy data will effectively act as a parent
995 # of this new revision. If this is a merge, the first
992 # of this new revision. If this is a merge, the first
996 # parent will be the nullid (meaning "look up the copy data")
993 # parent will be the nullid (meaning "look up the copy data")
997 # and the second one will be the other parent. For example:
994 # and the second one will be the other parent. For example:
998 #
995 #
999 # 0 --- 1 --- 3 rev1 changes file foo
996 # 0 --- 1 --- 3 rev1 changes file foo
1000 # \ / rev2 renames foo to bar and changes it
997 # \ / rev2 renames foo to bar and changes it
1001 # \- 2 -/ rev3 should have bar with all changes and
998 # \- 2 -/ rev3 should have bar with all changes and
1002 # should record that bar descends from
999 # should record that bar descends from
1003 # bar in rev2 and foo in rev1
1000 # bar in rev2 and foo in rev1
1004 #
1001 #
1005 # this allows this merge to succeed:
1002 # this allows this merge to succeed:
1006 #
1003 #
1007 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1004 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1008 # \ / merging rev3 and rev4 should use bar@rev2
1005 # \ / merging rev3 and rev4 should use bar@rev2
1009 # \- 2 --- 4 as the merge base
1006 # \- 2 --- 4 as the merge base
1010 #
1007 #
1011
1008
1012 cfname = copy[0]
1009 cfname = copy[0]
1013 crev = manifest1.get(cfname)
1010 crev = manifest1.get(cfname)
1014 newfparent = fparent2
1011 newfparent = fparent2
1015
1012
1016 if manifest2: # branch merge
1013 if manifest2: # branch merge
1017 if fparent2 == nullid or crev is None: # copied on remote side
1014 if fparent2 == nullid or crev is None: # copied on remote side
1018 if cfname in manifest2:
1015 if cfname in manifest2:
1019 crev = manifest2[cfname]
1016 crev = manifest2[cfname]
1020 newfparent = fparent1
1017 newfparent = fparent1
1021
1018
1022 # find source in nearest ancestor if we've lost track
1019 # find source in nearest ancestor if we've lost track
1023 if not crev:
1020 if not crev:
1024 self.ui.debug(" %s: searching for copy revision for %s\n" %
1021 self.ui.debug(" %s: searching for copy revision for %s\n" %
1025 (fname, cfname))
1022 (fname, cfname))
1026 for ancestor in self[None].ancestors():
1023 for ancestor in self[None].ancestors():
1027 if cfname in ancestor:
1024 if cfname in ancestor:
1028 crev = ancestor[cfname].filenode()
1025 crev = ancestor[cfname].filenode()
1029 break
1026 break
1030
1027
1031 if crev:
1028 if crev:
1032 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1029 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1033 meta["copy"] = cfname
1030 meta["copy"] = cfname
1034 meta["copyrev"] = hex(crev)
1031 meta["copyrev"] = hex(crev)
1035 fparent1, fparent2 = nullid, newfparent
1032 fparent1, fparent2 = nullid, newfparent
1036 else:
1033 else:
1037 self.ui.warn(_("warning: can't find ancestor for '%s' "
1034 self.ui.warn(_("warning: can't find ancestor for '%s' "
1038 "copied from '%s'!\n") % (fname, cfname))
1035 "copied from '%s'!\n") % (fname, cfname))
1039
1036
1040 elif fparent2 != nullid:
1037 elif fparent2 != nullid:
1041 # is one parent an ancestor of the other?
1038 # is one parent an ancestor of the other?
1042 fparentancestor = flog.ancestor(fparent1, fparent2)
1039 fparentancestor = flog.ancestor(fparent1, fparent2)
1043 if fparentancestor == fparent1:
1040 if fparentancestor == fparent1:
1044 fparent1, fparent2 = fparent2, nullid
1041 fparent1, fparent2 = fparent2, nullid
1045 elif fparentancestor == fparent2:
1042 elif fparentancestor == fparent2:
1046 fparent2 = nullid
1043 fparent2 = nullid
1047
1044
1048 # is the file changed?
1045 # is the file changed?
1049 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1046 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1050 changelist.append(fname)
1047 changelist.append(fname)
1051 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1048 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1052
1049
1053 # are just the flags changed during merge?
1050 # are just the flags changed during merge?
1054 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1051 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1055 changelist.append(fname)
1052 changelist.append(fname)
1056
1053
1057 return fparent1
1054 return fparent1
1058
1055
1059 def commit(self, text="", user=None, date=None, match=None, force=False,
1056 def commit(self, text="", user=None, date=None, match=None, force=False,
1060 editor=False, extra={}):
1057 editor=False, extra={}):
1061 """Add a new revision to current repository.
1058 """Add a new revision to current repository.
1062
1059
1063 Revision information is gathered from the working directory,
1060 Revision information is gathered from the working directory,
1064 match can be used to filter the committed files. If editor is
1061 match can be used to filter the committed files. If editor is
1065 supplied, it is called to get a commit message.
1062 supplied, it is called to get a commit message.
1066 """
1063 """
1067
1064
1068 def fail(f, msg):
1065 def fail(f, msg):
1069 raise util.Abort('%s: %s' % (f, msg))
1066 raise util.Abort('%s: %s' % (f, msg))
1070
1067
1071 if not match:
1068 if not match:
1072 match = matchmod.always(self.root, '')
1069 match = matchmod.always(self.root, '')
1073
1070
1074 if not force:
1071 if not force:
1075 vdirs = []
1072 vdirs = []
1076 match.dir = vdirs.append
1073 match.dir = vdirs.append
1077 match.bad = fail
1074 match.bad = fail
1078
1075
1079 wlock = self.wlock()
1076 wlock = self.wlock()
1080 try:
1077 try:
1081 wctx = self[None]
1078 wctx = self[None]
1082 merge = len(wctx.parents()) > 1
1079 merge = len(wctx.parents()) > 1
1083
1080
1084 if (not force and merge and match and
1081 if (not force and merge and match and
1085 (match.files() or match.anypats())):
1082 (match.files() or match.anypats())):
1086 raise util.Abort(_('cannot partially commit a merge '
1083 raise util.Abort(_('cannot partially commit a merge '
1087 '(do not specify files or patterns)'))
1084 '(do not specify files or patterns)'))
1088
1085
1089 changes = self.status(match=match, clean=force)
1086 changes = self.status(match=match, clean=force)
1090 if force:
1087 if force:
1091 changes[0].extend(changes[6]) # mq may commit unchanged files
1088 changes[0].extend(changes[6]) # mq may commit unchanged files
1092
1089
1093 # check subrepos
1090 # check subrepos
1094 subs = []
1091 subs = []
1095 commitsubs = set()
1092 commitsubs = set()
1096 newstate = wctx.substate.copy()
1093 newstate = wctx.substate.copy()
1097 # only manage subrepos and .hgsubstate if .hgsub is present
1094 # only manage subrepos and .hgsubstate if .hgsub is present
1098 if '.hgsub' in wctx:
1095 if '.hgsub' in wctx:
1099 # we'll decide whether to track this ourselves, thanks
1096 # we'll decide whether to track this ourselves, thanks
1100 if '.hgsubstate' in changes[0]:
1097 if '.hgsubstate' in changes[0]:
1101 changes[0].remove('.hgsubstate')
1098 changes[0].remove('.hgsubstate')
1102 if '.hgsubstate' in changes[2]:
1099 if '.hgsubstate' in changes[2]:
1103 changes[2].remove('.hgsubstate')
1100 changes[2].remove('.hgsubstate')
1104
1101
1105 # compare current state to last committed state
1102 # compare current state to last committed state
1106 # build new substate based on last committed state
1103 # build new substate based on last committed state
1107 oldstate = wctx.p1().substate
1104 oldstate = wctx.p1().substate
1108 for s in sorted(newstate.keys()):
1105 for s in sorted(newstate.keys()):
1109 if not match(s):
1106 if not match(s):
1110 # ignore working copy, use old state if present
1107 # ignore working copy, use old state if present
1111 if s in oldstate:
1108 if s in oldstate:
1112 newstate[s] = oldstate[s]
1109 newstate[s] = oldstate[s]
1113 continue
1110 continue
1114 if not force:
1111 if not force:
1115 raise util.Abort(
1112 raise util.Abort(
1116 _("commit with new subrepo %s excluded") % s)
1113 _("commit with new subrepo %s excluded") % s)
1117 if wctx.sub(s).dirty(True):
1114 if wctx.sub(s).dirty(True):
1118 if not self.ui.configbool('ui', 'commitsubrepos'):
1115 if not self.ui.configbool('ui', 'commitsubrepos'):
1119 raise util.Abort(
1116 raise util.Abort(
1120 _("uncommitted changes in subrepo %s") % s,
1117 _("uncommitted changes in subrepo %s") % s,
1121 hint=_("use --subrepos for recursive commit"))
1118 hint=_("use --subrepos for recursive commit"))
1122 subs.append(s)
1119 subs.append(s)
1123 commitsubs.add(s)
1120 commitsubs.add(s)
1124 else:
1121 else:
1125 bs = wctx.sub(s).basestate()
1122 bs = wctx.sub(s).basestate()
1126 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1123 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1127 if oldstate.get(s, (None, None, None))[1] != bs:
1124 if oldstate.get(s, (None, None, None))[1] != bs:
1128 subs.append(s)
1125 subs.append(s)
1129
1126
1130 # check for removed subrepos
1127 # check for removed subrepos
1131 for p in wctx.parents():
1128 for p in wctx.parents():
1132 r = [s for s in p.substate if s not in newstate]
1129 r = [s for s in p.substate if s not in newstate]
1133 subs += [s for s in r if match(s)]
1130 subs += [s for s in r if match(s)]
1134 if subs:
1131 if subs:
1135 if (not match('.hgsub') and
1132 if (not match('.hgsub') and
1136 '.hgsub' in (wctx.modified() + wctx.added())):
1133 '.hgsub' in (wctx.modified() + wctx.added())):
1137 raise util.Abort(
1134 raise util.Abort(
1138 _("can't commit subrepos without .hgsub"))
1135 _("can't commit subrepos without .hgsub"))
1139 changes[0].insert(0, '.hgsubstate')
1136 changes[0].insert(0, '.hgsubstate')
1140
1137
1141 elif '.hgsub' in changes[2]:
1138 elif '.hgsub' in changes[2]:
1142 # clean up .hgsubstate when .hgsub is removed
1139 # clean up .hgsubstate when .hgsub is removed
1143 if ('.hgsubstate' in wctx and
1140 if ('.hgsubstate' in wctx and
1144 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1141 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1145 changes[2].insert(0, '.hgsubstate')
1142 changes[2].insert(0, '.hgsubstate')
1146
1143
1147 # make sure all explicit patterns are matched
1144 # make sure all explicit patterns are matched
1148 if not force and match.files():
1145 if not force and match.files():
1149 matched = set(changes[0] + changes[1] + changes[2])
1146 matched = set(changes[0] + changes[1] + changes[2])
1150
1147
1151 for f in match.files():
1148 for f in match.files():
1152 if f == '.' or f in matched or f in wctx.substate:
1149 if f == '.' or f in matched or f in wctx.substate:
1153 continue
1150 continue
1154 if f in changes[3]: # missing
1151 if f in changes[3]: # missing
1155 fail(f, _('file not found!'))
1152 fail(f, _('file not found!'))
1156 if f in vdirs: # visited directory
1153 if f in vdirs: # visited directory
1157 d = f + '/'
1154 d = f + '/'
1158 for mf in matched:
1155 for mf in matched:
1159 if mf.startswith(d):
1156 if mf.startswith(d):
1160 break
1157 break
1161 else:
1158 else:
1162 fail(f, _("no match under directory!"))
1159 fail(f, _("no match under directory!"))
1163 elif f not in self.dirstate:
1160 elif f not in self.dirstate:
1164 fail(f, _("file not tracked!"))
1161 fail(f, _("file not tracked!"))
1165
1162
1166 if (not force and not extra.get("close") and not merge
1163 if (not force and not extra.get("close") and not merge
1167 and not (changes[0] or changes[1] or changes[2])
1164 and not (changes[0] or changes[1] or changes[2])
1168 and wctx.branch() == wctx.p1().branch()):
1165 and wctx.branch() == wctx.p1().branch()):
1169 return None
1166 return None
1170
1167
1171 if merge and changes[3]:
1168 if merge and changes[3]:
1172 raise util.Abort(_("cannot commit merge with missing files"))
1169 raise util.Abort(_("cannot commit merge with missing files"))
1173
1170
1174 ms = mergemod.mergestate(self)
1171 ms = mergemod.mergestate(self)
1175 for f in changes[0]:
1172 for f in changes[0]:
1176 if f in ms and ms[f] == 'u':
1173 if f in ms and ms[f] == 'u':
1177 raise util.Abort(_("unresolved merge conflicts "
1174 raise util.Abort(_("unresolved merge conflicts "
1178 "(see hg help resolve)"))
1175 "(see hg help resolve)"))
1179
1176
1180 cctx = context.workingctx(self, text, user, date, extra, changes)
1177 cctx = context.workingctx(self, text, user, date, extra, changes)
1181 if editor:
1178 if editor:
1182 cctx._text = editor(self, cctx, subs)
1179 cctx._text = editor(self, cctx, subs)
1183 edited = (text != cctx._text)
1180 edited = (text != cctx._text)
1184
1181
1185 # commit subs and write new state
1182 # commit subs and write new state
1186 if subs:
1183 if subs:
1187 for s in sorted(commitsubs):
1184 for s in sorted(commitsubs):
1188 sub = wctx.sub(s)
1185 sub = wctx.sub(s)
1189 self.ui.status(_('committing subrepository %s\n') %
1186 self.ui.status(_('committing subrepository %s\n') %
1190 subrepo.subrelpath(sub))
1187 subrepo.subrelpath(sub))
1191 sr = sub.commit(cctx._text, user, date)
1188 sr = sub.commit(cctx._text, user, date)
1192 newstate[s] = (newstate[s][0], sr)
1189 newstate[s] = (newstate[s][0], sr)
1193 subrepo.writestate(self, newstate)
1190 subrepo.writestate(self, newstate)
1194
1191
1195 # Save commit message in case this transaction gets rolled back
1192 # Save commit message in case this transaction gets rolled back
1196 # (e.g. by a pretxncommit hook). Leave the content alone on
1193 # (e.g. by a pretxncommit hook). Leave the content alone on
1197 # the assumption that the user will use the same editor again.
1194 # the assumption that the user will use the same editor again.
1198 msgfn = self.savecommitmessage(cctx._text)
1195 msgfn = self.savecommitmessage(cctx._text)
1199
1196
1200 p1, p2 = self.dirstate.parents()
1197 p1, p2 = self.dirstate.parents()
1201 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1198 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1202 try:
1199 try:
1203 self.hook("precommit", throw=True, parent1=hookp1,
1200 self.hook("precommit", throw=True, parent1=hookp1,
1204 parent2=hookp2)
1201 parent2=hookp2)
1205 ret = self.commitctx(cctx, True)
1202 ret = self.commitctx(cctx, True)
1206 except: # re-raises
1203 except: # re-raises
1207 if edited:
1204 if edited:
1208 self.ui.write(
1205 self.ui.write(
1209 _('note: commit message saved in %s\n') % msgfn)
1206 _('note: commit message saved in %s\n') % msgfn)
1210 raise
1207 raise
1211
1208
1212 # update bookmarks, dirstate and mergestate
1209 # update bookmarks, dirstate and mergestate
1213 bookmarks.update(self, [p1, p2], ret)
1210 bookmarks.update(self, [p1, p2], ret)
1214 for f in changes[0] + changes[1]:
1211 for f in changes[0] + changes[1]:
1215 self.dirstate.normal(f)
1212 self.dirstate.normal(f)
1216 for f in changes[2]:
1213 for f in changes[2]:
1217 self.dirstate.drop(f)
1214 self.dirstate.drop(f)
1218 self.dirstate.setparents(ret)
1215 self.dirstate.setparents(ret)
1219 ms.reset()
1216 ms.reset()
1220 finally:
1217 finally:
1221 wlock.release()
1218 wlock.release()
1222
1219
1223 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1220 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1224 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1221 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1225 self._afterlock(commithook)
1222 self._afterlock(commithook)
1226 return ret
1223 return ret
1227
1224
1228 def commitctx(self, ctx, error=False):
1225 def commitctx(self, ctx, error=False):
1229 """Add a new revision to current repository.
1226 """Add a new revision to current repository.
1230 Revision information is passed via the context argument.
1227 Revision information is passed via the context argument.
1231 """
1228 """
1232
1229
1233 tr = lock = None
1230 tr = lock = None
1234 removed = list(ctx.removed())
1231 removed = list(ctx.removed())
1235 p1, p2 = ctx.p1(), ctx.p2()
1232 p1, p2 = ctx.p1(), ctx.p2()
1236 user = ctx.user()
1233 user = ctx.user()
1237
1234
1238 lock = self.lock()
1235 lock = self.lock()
1239 try:
1236 try:
1240 tr = self.transaction("commit")
1237 tr = self.transaction("commit")
1241 trp = weakref.proxy(tr)
1238 trp = weakref.proxy(tr)
1242
1239
1243 if ctx.files():
1240 if ctx.files():
1244 m1 = p1.manifest().copy()
1241 m1 = p1.manifest().copy()
1245 m2 = p2.manifest()
1242 m2 = p2.manifest()
1246
1243
1247 # check in files
1244 # check in files
1248 new = {}
1245 new = {}
1249 changed = []
1246 changed = []
1250 linkrev = len(self)
1247 linkrev = len(self)
1251 for f in sorted(ctx.modified() + ctx.added()):
1248 for f in sorted(ctx.modified() + ctx.added()):
1252 self.ui.note(f + "\n")
1249 self.ui.note(f + "\n")
1253 try:
1250 try:
1254 fctx = ctx[f]
1251 fctx = ctx[f]
1255 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1252 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1256 changed)
1253 changed)
1257 m1.set(f, fctx.flags())
1254 m1.set(f, fctx.flags())
1258 except OSError, inst:
1255 except OSError, inst:
1259 self.ui.warn(_("trouble committing %s!\n") % f)
1256 self.ui.warn(_("trouble committing %s!\n") % f)
1260 raise
1257 raise
1261 except IOError, inst:
1258 except IOError, inst:
1262 errcode = getattr(inst, 'errno', errno.ENOENT)
1259 errcode = getattr(inst, 'errno', errno.ENOENT)
1263 if error or errcode and errcode != errno.ENOENT:
1260 if error or errcode and errcode != errno.ENOENT:
1264 self.ui.warn(_("trouble committing %s!\n") % f)
1261 self.ui.warn(_("trouble committing %s!\n") % f)
1265 raise
1262 raise
1266 else:
1263 else:
1267 removed.append(f)
1264 removed.append(f)
1268
1265
1269 # update manifest
1266 # update manifest
1270 m1.update(new)
1267 m1.update(new)
1271 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1268 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1272 drop = [f for f in removed if f in m1]
1269 drop = [f for f in removed if f in m1]
1273 for f in drop:
1270 for f in drop:
1274 del m1[f]
1271 del m1[f]
1275 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1272 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1276 p2.manifestnode(), (new, drop))
1273 p2.manifestnode(), (new, drop))
1277 files = changed + removed
1274 files = changed + removed
1278 else:
1275 else:
1279 mn = p1.manifestnode()
1276 mn = p1.manifestnode()
1280 files = []
1277 files = []
1281
1278
1282 # update changelog
1279 # update changelog
1283 self.changelog.delayupdate()
1280 self.changelog.delayupdate()
1284 n = self.changelog.add(mn, files, ctx.description(),
1281 n = self.changelog.add(mn, files, ctx.description(),
1285 trp, p1.node(), p2.node(),
1282 trp, p1.node(), p2.node(),
1286 user, ctx.date(), ctx.extra().copy())
1283 user, ctx.date(), ctx.extra().copy())
1287 p = lambda: self.changelog.writepending() and self.root or ""
1284 p = lambda: self.changelog.writepending() and self.root or ""
1288 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1285 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1289 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1286 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1290 parent2=xp2, pending=p)
1287 parent2=xp2, pending=p)
1291 self.changelog.finalize(trp)
1288 self.changelog.finalize(trp)
1292 # set the new commit is proper phase
1289 # set the new commit is proper phase
1293 targetphase = phases.newcommitphase(self.ui)
1290 targetphase = phases.newcommitphase(self.ui)
1294 if targetphase:
1291 if targetphase:
1295 # retract boundary do not alter parent changeset.
1292 # retract boundary do not alter parent changeset.
1296 # if a parent have higher the resulting phase will
1293 # if a parent have higher the resulting phase will
1297 # be compliant anyway
1294 # be compliant anyway
1298 #
1295 #
1299 # if minimal phase was 0 we don't need to retract anything
1296 # if minimal phase was 0 we don't need to retract anything
1300 phases.retractboundary(self, targetphase, [n])
1297 phases.retractboundary(self, targetphase, [n])
1301 tr.close()
1298 tr.close()
1302 self.updatebranchcache()
1299 self.updatebranchcache()
1303 return n
1300 return n
1304 finally:
1301 finally:
1305 if tr:
1302 if tr:
1306 tr.release()
1303 tr.release()
1307 lock.release()
1304 lock.release()
1308
1305
1309 def destroyed(self):
1306 def destroyed(self):
1310 '''Inform the repository that nodes have been destroyed.
1307 '''Inform the repository that nodes have been destroyed.
1311 Intended for use by strip and rollback, so there's a common
1308 Intended for use by strip and rollback, so there's a common
1312 place for anything that has to be done after destroying history.'''
1309 place for anything that has to be done after destroying history.'''
1313 # XXX it might be nice if we could take the list of destroyed
1310 # XXX it might be nice if we could take the list of destroyed
1314 # nodes, but I don't see an easy way for rollback() to do that
1311 # nodes, but I don't see an easy way for rollback() to do that
1315
1312
1316 # Ensure the persistent tag cache is updated. Doing it now
1313 # Ensure the persistent tag cache is updated. Doing it now
1317 # means that the tag cache only has to worry about destroyed
1314 # means that the tag cache only has to worry about destroyed
1318 # heads immediately after a strip/rollback. That in turn
1315 # heads immediately after a strip/rollback. That in turn
1319 # guarantees that "cachetip == currenttip" (comparing both rev
1316 # guarantees that "cachetip == currenttip" (comparing both rev
1320 # and node) always means no nodes have been added or destroyed.
1317 # and node) always means no nodes have been added or destroyed.
1321
1318
1322 # XXX this is suboptimal when qrefresh'ing: we strip the current
1319 # XXX this is suboptimal when qrefresh'ing: we strip the current
1323 # head, refresh the tag cache, then immediately add a new head.
1320 # head, refresh the tag cache, then immediately add a new head.
1324 # But I think doing it this way is necessary for the "instant
1321 # But I think doing it this way is necessary for the "instant
1325 # tag cache retrieval" case to work.
1322 # tag cache retrieval" case to work.
1326 self.invalidatecaches()
1323 self.invalidatecaches()
1327
1324
1328 def walk(self, match, node=None):
1325 def walk(self, match, node=None):
1329 '''
1326 '''
1330 walk recursively through the directory tree or a given
1327 walk recursively through the directory tree or a given
1331 changeset, finding all files matched by the match
1328 changeset, finding all files matched by the match
1332 function
1329 function
1333 '''
1330 '''
1334 return self[node].walk(match)
1331 return self[node].walk(match)
1335
1332
1336 def status(self, node1='.', node2=None, match=None,
1333 def status(self, node1='.', node2=None, match=None,
1337 ignored=False, clean=False, unknown=False,
1334 ignored=False, clean=False, unknown=False,
1338 listsubrepos=False):
1335 listsubrepos=False):
1339 """return status of files between two nodes or node and working
1336 """return status of files between two nodes or node and working
1340 directory.
1337 directory.
1341
1338
1342 If node1 is None, use the first dirstate parent instead.
1339 If node1 is None, use the first dirstate parent instead.
1343 If node2 is None, compare node1 with working directory.
1340 If node2 is None, compare node1 with working directory.
1344 """
1341 """
1345
1342
1346 def mfmatches(ctx):
1343 def mfmatches(ctx):
1347 mf = ctx.manifest().copy()
1344 mf = ctx.manifest().copy()
1348 if match.always():
1345 if match.always():
1349 return mf
1346 return mf
1350 for fn in mf.keys():
1347 for fn in mf.keys():
1351 if not match(fn):
1348 if not match(fn):
1352 del mf[fn]
1349 del mf[fn]
1353 return mf
1350 return mf
1354
1351
1355 if isinstance(node1, context.changectx):
1352 if isinstance(node1, context.changectx):
1356 ctx1 = node1
1353 ctx1 = node1
1357 else:
1354 else:
1358 ctx1 = self[node1]
1355 ctx1 = self[node1]
1359 if isinstance(node2, context.changectx):
1356 if isinstance(node2, context.changectx):
1360 ctx2 = node2
1357 ctx2 = node2
1361 else:
1358 else:
1362 ctx2 = self[node2]
1359 ctx2 = self[node2]
1363
1360
1364 working = ctx2.rev() is None
1361 working = ctx2.rev() is None
1365 parentworking = working and ctx1 == self['.']
1362 parentworking = working and ctx1 == self['.']
1366 match = match or matchmod.always(self.root, self.getcwd())
1363 match = match or matchmod.always(self.root, self.getcwd())
1367 listignored, listclean, listunknown = ignored, clean, unknown
1364 listignored, listclean, listunknown = ignored, clean, unknown
1368
1365
1369 # load earliest manifest first for caching reasons
1366 # load earliest manifest first for caching reasons
1370 if not working and ctx2.rev() < ctx1.rev():
1367 if not working and ctx2.rev() < ctx1.rev():
1371 ctx2.manifest()
1368 ctx2.manifest()
1372
1369
1373 if not parentworking:
1370 if not parentworking:
1374 def bad(f, msg):
1371 def bad(f, msg):
1375 # 'f' may be a directory pattern from 'match.files()',
1372 # 'f' may be a directory pattern from 'match.files()',
1376 # so 'f not in ctx1' is not enough
1373 # so 'f not in ctx1' is not enough
1377 if f not in ctx1 and f not in ctx1.dirs():
1374 if f not in ctx1 and f not in ctx1.dirs():
1378 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1375 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1379 match.bad = bad
1376 match.bad = bad
1380
1377
1381 if working: # we need to scan the working dir
1378 if working: # we need to scan the working dir
1382 subrepos = []
1379 subrepos = []
1383 if '.hgsub' in self.dirstate:
1380 if '.hgsub' in self.dirstate:
1384 subrepos = ctx2.substate.keys()
1381 subrepos = ctx2.substate.keys()
1385 s = self.dirstate.status(match, subrepos, listignored,
1382 s = self.dirstate.status(match, subrepos, listignored,
1386 listclean, listunknown)
1383 listclean, listunknown)
1387 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1384 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1388
1385
1389 # check for any possibly clean files
1386 # check for any possibly clean files
1390 if parentworking and cmp:
1387 if parentworking and cmp:
1391 fixup = []
1388 fixup = []
1392 # do a full compare of any files that might have changed
1389 # do a full compare of any files that might have changed
1393 for f in sorted(cmp):
1390 for f in sorted(cmp):
1394 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1391 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1395 or ctx1[f].cmp(ctx2[f])):
1392 or ctx1[f].cmp(ctx2[f])):
1396 modified.append(f)
1393 modified.append(f)
1397 else:
1394 else:
1398 fixup.append(f)
1395 fixup.append(f)
1399
1396
1400 # update dirstate for files that are actually clean
1397 # update dirstate for files that are actually clean
1401 if fixup:
1398 if fixup:
1402 if listclean:
1399 if listclean:
1403 clean += fixup
1400 clean += fixup
1404
1401
1405 try:
1402 try:
1406 # updating the dirstate is optional
1403 # updating the dirstate is optional
1407 # so we don't wait on the lock
1404 # so we don't wait on the lock
1408 wlock = self.wlock(False)
1405 wlock = self.wlock(False)
1409 try:
1406 try:
1410 for f in fixup:
1407 for f in fixup:
1411 self.dirstate.normal(f)
1408 self.dirstate.normal(f)
1412 finally:
1409 finally:
1413 wlock.release()
1410 wlock.release()
1414 except error.LockError:
1411 except error.LockError:
1415 pass
1412 pass
1416
1413
1417 if not parentworking:
1414 if not parentworking:
1418 mf1 = mfmatches(ctx1)
1415 mf1 = mfmatches(ctx1)
1419 if working:
1416 if working:
1420 # we are comparing working dir against non-parent
1417 # we are comparing working dir against non-parent
1421 # generate a pseudo-manifest for the working dir
1418 # generate a pseudo-manifest for the working dir
1422 mf2 = mfmatches(self['.'])
1419 mf2 = mfmatches(self['.'])
1423 for f in cmp + modified + added:
1420 for f in cmp + modified + added:
1424 mf2[f] = None
1421 mf2[f] = None
1425 mf2.set(f, ctx2.flags(f))
1422 mf2.set(f, ctx2.flags(f))
1426 for f in removed:
1423 for f in removed:
1427 if f in mf2:
1424 if f in mf2:
1428 del mf2[f]
1425 del mf2[f]
1429 else:
1426 else:
1430 # we are comparing two revisions
1427 # we are comparing two revisions
1431 deleted, unknown, ignored = [], [], []
1428 deleted, unknown, ignored = [], [], []
1432 mf2 = mfmatches(ctx2)
1429 mf2 = mfmatches(ctx2)
1433
1430
1434 modified, added, clean = [], [], []
1431 modified, added, clean = [], [], []
1435 withflags = mf1.withflags() | mf2.withflags()
1432 withflags = mf1.withflags() | mf2.withflags()
1436 for fn in mf2:
1433 for fn in mf2:
1437 if fn in mf1:
1434 if fn in mf1:
1438 if (fn not in deleted and
1435 if (fn not in deleted and
1439 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1436 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1440 (mf1[fn] != mf2[fn] and
1437 (mf1[fn] != mf2[fn] and
1441 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1438 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1442 modified.append(fn)
1439 modified.append(fn)
1443 elif listclean:
1440 elif listclean:
1444 clean.append(fn)
1441 clean.append(fn)
1445 del mf1[fn]
1442 del mf1[fn]
1446 elif fn not in deleted:
1443 elif fn not in deleted:
1447 added.append(fn)
1444 added.append(fn)
1448 removed = mf1.keys()
1445 removed = mf1.keys()
1449
1446
1450 if working and modified and not self.dirstate._checklink:
1447 if working and modified and not self.dirstate._checklink:
1451 # Symlink placeholders may get non-symlink-like contents
1448 # Symlink placeholders may get non-symlink-like contents
1452 # via user error or dereferencing by NFS or Samba servers,
1449 # via user error or dereferencing by NFS or Samba servers,
1453 # so we filter out any placeholders that don't look like a
1450 # so we filter out any placeholders that don't look like a
1454 # symlink
1451 # symlink
1455 sane = []
1452 sane = []
1456 for f in modified:
1453 for f in modified:
1457 if ctx2.flags(f) == 'l':
1454 if ctx2.flags(f) == 'l':
1458 d = ctx2[f].data()
1455 d = ctx2[f].data()
1459 if len(d) >= 1024 or '\n' in d or util.binary(d):
1456 if len(d) >= 1024 or '\n' in d or util.binary(d):
1460 self.ui.debug('ignoring suspect symlink placeholder'
1457 self.ui.debug('ignoring suspect symlink placeholder'
1461 ' "%s"\n' % f)
1458 ' "%s"\n' % f)
1462 continue
1459 continue
1463 sane.append(f)
1460 sane.append(f)
1464 modified = sane
1461 modified = sane
1465
1462
1466 r = modified, added, removed, deleted, unknown, ignored, clean
1463 r = modified, added, removed, deleted, unknown, ignored, clean
1467
1464
1468 if listsubrepos:
1465 if listsubrepos:
1469 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1466 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1470 if working:
1467 if working:
1471 rev2 = None
1468 rev2 = None
1472 else:
1469 else:
1473 rev2 = ctx2.substate[subpath][1]
1470 rev2 = ctx2.substate[subpath][1]
1474 try:
1471 try:
1475 submatch = matchmod.narrowmatcher(subpath, match)
1472 submatch = matchmod.narrowmatcher(subpath, match)
1476 s = sub.status(rev2, match=submatch, ignored=listignored,
1473 s = sub.status(rev2, match=submatch, ignored=listignored,
1477 clean=listclean, unknown=listunknown,
1474 clean=listclean, unknown=listunknown,
1478 listsubrepos=True)
1475 listsubrepos=True)
1479 for rfiles, sfiles in zip(r, s):
1476 for rfiles, sfiles in zip(r, s):
1480 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1477 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1481 except error.LookupError:
1478 except error.LookupError:
1482 self.ui.status(_("skipping missing subrepository: %s\n")
1479 self.ui.status(_("skipping missing subrepository: %s\n")
1483 % subpath)
1480 % subpath)
1484
1481
1485 for l in r:
1482 for l in r:
1486 l.sort()
1483 l.sort()
1487 return r
1484 return r
1488
1485
1489 def heads(self, start=None):
1486 def heads(self, start=None):
1490 heads = self.changelog.heads(start)
1487 heads = self.changelog.heads(start)
1491 # sort the output in rev descending order
1488 # sort the output in rev descending order
1492 return sorted(heads, key=self.changelog.rev, reverse=True)
1489 return sorted(heads, key=self.changelog.rev, reverse=True)
1493
1490
1494 def branchheads(self, branch=None, start=None, closed=False):
1491 def branchheads(self, branch=None, start=None, closed=False):
1495 '''return a (possibly filtered) list of heads for the given branch
1492 '''return a (possibly filtered) list of heads for the given branch
1496
1493
1497 Heads are returned in topological order, from newest to oldest.
1494 Heads are returned in topological order, from newest to oldest.
1498 If branch is None, use the dirstate branch.
1495 If branch is None, use the dirstate branch.
1499 If start is not None, return only heads reachable from start.
1496 If start is not None, return only heads reachable from start.
1500 If closed is True, return heads that are marked as closed as well.
1497 If closed is True, return heads that are marked as closed as well.
1501 '''
1498 '''
1502 if branch is None:
1499 if branch is None:
1503 branch = self[None].branch()
1500 branch = self[None].branch()
1504 branches = self.branchmap()
1501 branches = self.branchmap()
1505 if branch not in branches:
1502 if branch not in branches:
1506 return []
1503 return []
1507 # the cache returns heads ordered lowest to highest
1504 # the cache returns heads ordered lowest to highest
1508 bheads = list(reversed(branches[branch]))
1505 bheads = list(reversed(branches[branch]))
1509 if start is not None:
1506 if start is not None:
1510 # filter out the heads that cannot be reached from startrev
1507 # filter out the heads that cannot be reached from startrev
1511 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1508 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1512 bheads = [h for h in bheads if h in fbheads]
1509 bheads = [h for h in bheads if h in fbheads]
1513 if not closed:
1510 if not closed:
1514 bheads = [h for h in bheads if not self[h].closesbranch()]
1511 bheads = [h for h in bheads if not self[h].closesbranch()]
1515 return bheads
1512 return bheads
1516
1513
1517 def branches(self, nodes):
1514 def branches(self, nodes):
1518 if not nodes:
1515 if not nodes:
1519 nodes = [self.changelog.tip()]
1516 nodes = [self.changelog.tip()]
1520 b = []
1517 b = []
1521 for n in nodes:
1518 for n in nodes:
1522 t = n
1519 t = n
1523 while True:
1520 while True:
1524 p = self.changelog.parents(n)
1521 p = self.changelog.parents(n)
1525 if p[1] != nullid or p[0] == nullid:
1522 if p[1] != nullid or p[0] == nullid:
1526 b.append((t, n, p[0], p[1]))
1523 b.append((t, n, p[0], p[1]))
1527 break
1524 break
1528 n = p[0]
1525 n = p[0]
1529 return b
1526 return b
1530
1527
1531 def between(self, pairs):
1528 def between(self, pairs):
1532 r = []
1529 r = []
1533
1530
1534 for top, bottom in pairs:
1531 for top, bottom in pairs:
1535 n, l, i = top, [], 0
1532 n, l, i = top, [], 0
1536 f = 1
1533 f = 1
1537
1534
1538 while n != bottom and n != nullid:
1535 while n != bottom and n != nullid:
1539 p = self.changelog.parents(n)[0]
1536 p = self.changelog.parents(n)[0]
1540 if i == f:
1537 if i == f:
1541 l.append(n)
1538 l.append(n)
1542 f = f * 2
1539 f = f * 2
1543 n = p
1540 n = p
1544 i += 1
1541 i += 1
1545
1542
1546 r.append(l)
1543 r.append(l)
1547
1544
1548 return r
1545 return r
1549
1546
1550 def pull(self, remote, heads=None, force=False):
1547 def pull(self, remote, heads=None, force=False):
1551 lock = self.lock()
1548 lock = self.lock()
1552 try:
1549 try:
1553 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1550 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1554 force=force)
1551 force=force)
1555 common, fetch, rheads = tmp
1552 common, fetch, rheads = tmp
1556 if not fetch:
1553 if not fetch:
1557 self.ui.status(_("no changes found\n"))
1554 self.ui.status(_("no changes found\n"))
1558 added = []
1555 added = []
1559 result = 0
1556 result = 0
1560 else:
1557 else:
1561 if heads is None and list(common) == [nullid]:
1558 if heads is None and list(common) == [nullid]:
1562 self.ui.status(_("requesting all changes\n"))
1559 self.ui.status(_("requesting all changes\n"))
1563 elif heads is None and remote.capable('changegroupsubset'):
1560 elif heads is None and remote.capable('changegroupsubset'):
1564 # issue1320, avoid a race if remote changed after discovery
1561 # issue1320, avoid a race if remote changed after discovery
1565 heads = rheads
1562 heads = rheads
1566
1563
1567 if remote.capable('getbundle'):
1564 if remote.capable('getbundle'):
1568 cg = remote.getbundle('pull', common=common,
1565 cg = remote.getbundle('pull', common=common,
1569 heads=heads or rheads)
1566 heads=heads or rheads)
1570 elif heads is None:
1567 elif heads is None:
1571 cg = remote.changegroup(fetch, 'pull')
1568 cg = remote.changegroup(fetch, 'pull')
1572 elif not remote.capable('changegroupsubset'):
1569 elif not remote.capable('changegroupsubset'):
1573 raise util.Abort(_("partial pull cannot be done because "
1570 raise util.Abort(_("partial pull cannot be done because "
1574 "other repository doesn't support "
1571 "other repository doesn't support "
1575 "changegroupsubset."))
1572 "changegroupsubset."))
1576 else:
1573 else:
1577 cg = remote.changegroupsubset(fetch, heads, 'pull')
1574 cg = remote.changegroupsubset(fetch, heads, 'pull')
1578 clstart = len(self.changelog)
1575 clstart = len(self.changelog)
1579 result = self.addchangegroup(cg, 'pull', remote.url())
1576 result = self.addchangegroup(cg, 'pull', remote.url())
1580 clend = len(self.changelog)
1577 clend = len(self.changelog)
1581 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1578 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1582
1579
1583 # compute target subset
1580 # compute target subset
1584 if heads is None:
1581 if heads is None:
1585 # We pulled every thing possible
1582 # We pulled every thing possible
1586 # sync on everything common
1583 # sync on everything common
1587 subset = common + added
1584 subset = common + added
1588 else:
1585 else:
1589 # We pulled a specific subset
1586 # We pulled a specific subset
1590 # sync on this subset
1587 # sync on this subset
1591 subset = heads
1588 subset = heads
1592
1589
1593 # Get remote phases data from remote
1590 # Get remote phases data from remote
1594 remotephases = remote.listkeys('phases')
1591 remotephases = remote.listkeys('phases')
1595 publishing = bool(remotephases.get('publishing', False))
1592 publishing = bool(remotephases.get('publishing', False))
1596 if remotephases and not publishing:
1593 if remotephases and not publishing:
1597 # remote is new and unpublishing
1594 # remote is new and unpublishing
1598 pheads, _dr = phases.analyzeremotephases(self, subset,
1595 pheads, _dr = phases.analyzeremotephases(self, subset,
1599 remotephases)
1596 remotephases)
1600 phases.advanceboundary(self, phases.public, pheads)
1597 phases.advanceboundary(self, phases.public, pheads)
1601 phases.advanceboundary(self, phases.draft, subset)
1598 phases.advanceboundary(self, phases.draft, subset)
1602 else:
1599 else:
1603 # Remote is old or publishing all common changesets
1600 # Remote is old or publishing all common changesets
1604 # should be seen as public
1601 # should be seen as public
1605 phases.advanceboundary(self, phases.public, subset)
1602 phases.advanceboundary(self, phases.public, subset)
1606 finally:
1603 finally:
1607 lock.release()
1604 lock.release()
1608
1605
1609 return result
1606 return result
1610
1607
1611 def checkpush(self, force, revs):
1608 def checkpush(self, force, revs):
1612 """Extensions can override this function if additional checks have
1609 """Extensions can override this function if additional checks have
1613 to be performed before pushing, or call it if they override push
1610 to be performed before pushing, or call it if they override push
1614 command.
1611 command.
1615 """
1612 """
1616 pass
1613 pass
1617
1614
1618 def push(self, remote, force=False, revs=None, newbranch=False):
1615 def push(self, remote, force=False, revs=None, newbranch=False):
1619 '''Push outgoing changesets (limited by revs) from the current
1616 '''Push outgoing changesets (limited by revs) from the current
1620 repository to remote. Return an integer:
1617 repository to remote. Return an integer:
1621 - None means nothing to push
1618 - None means nothing to push
1622 - 0 means HTTP error
1619 - 0 means HTTP error
1623 - 1 means we pushed and remote head count is unchanged *or*
1620 - 1 means we pushed and remote head count is unchanged *or*
1624 we have outgoing changesets but refused to push
1621 we have outgoing changesets but refused to push
1625 - other values as described by addchangegroup()
1622 - other values as described by addchangegroup()
1626 '''
1623 '''
1627 # there are two ways to push to remote repo:
1624 # there are two ways to push to remote repo:
1628 #
1625 #
1629 # addchangegroup assumes local user can lock remote
1626 # addchangegroup assumes local user can lock remote
1630 # repo (local filesystem, old ssh servers).
1627 # repo (local filesystem, old ssh servers).
1631 #
1628 #
1632 # unbundle assumes local user cannot lock remote repo (new ssh
1629 # unbundle assumes local user cannot lock remote repo (new ssh
1633 # servers, http servers).
1630 # servers, http servers).
1634
1631
1635 # get local lock as we might write phase data
1632 # get local lock as we might write phase data
1636 locallock = self.lock()
1633 locallock = self.lock()
1637 try:
1634 try:
1638 self.checkpush(force, revs)
1635 self.checkpush(force, revs)
1639 lock = None
1636 lock = None
1640 unbundle = remote.capable('unbundle')
1637 unbundle = remote.capable('unbundle')
1641 if not unbundle:
1638 if not unbundle:
1642 lock = remote.lock()
1639 lock = remote.lock()
1643 try:
1640 try:
1644 # discovery
1641 # discovery
1645 fci = discovery.findcommonincoming
1642 fci = discovery.findcommonincoming
1646 commoninc = fci(self, remote, force=force)
1643 commoninc = fci(self, remote, force=force)
1647 common, inc, remoteheads = commoninc
1644 common, inc, remoteheads = commoninc
1648 fco = discovery.findcommonoutgoing
1645 fco = discovery.findcommonoutgoing
1649 outgoing = fco(self, remote, onlyheads=revs,
1646 outgoing = fco(self, remote, onlyheads=revs,
1650 commoninc=commoninc, force=force)
1647 commoninc=commoninc, force=force)
1651
1648
1652
1649
1653 if not outgoing.missing:
1650 if not outgoing.missing:
1654 # nothing to push
1651 # nothing to push
1655 scmutil.nochangesfound(self.ui, outgoing.excluded)
1652 scmutil.nochangesfound(self.ui, outgoing.excluded)
1656 ret = None
1653 ret = None
1657 else:
1654 else:
1658 # something to push
1655 # something to push
1659 if not force:
1656 if not force:
1660 discovery.checkheads(self, remote, outgoing,
1657 discovery.checkheads(self, remote, outgoing,
1661 remoteheads, newbranch,
1658 remoteheads, newbranch,
1662 bool(inc))
1659 bool(inc))
1663
1660
1664 # create a changegroup from local
1661 # create a changegroup from local
1665 if revs is None and not outgoing.excluded:
1662 if revs is None and not outgoing.excluded:
1666 # push everything,
1663 # push everything,
1667 # use the fast path, no race possible on push
1664 # use the fast path, no race possible on push
1668 cg = self._changegroup(outgoing.missing, 'push')
1665 cg = self._changegroup(outgoing.missing, 'push')
1669 else:
1666 else:
1670 cg = self.getlocalbundle('push', outgoing)
1667 cg = self.getlocalbundle('push', outgoing)
1671
1668
1672 # apply changegroup to remote
1669 # apply changegroup to remote
1673 if unbundle:
1670 if unbundle:
1674 # local repo finds heads on server, finds out what
1671 # local repo finds heads on server, finds out what
1675 # revs it must push. once revs transferred, if server
1672 # revs it must push. once revs transferred, if server
1676 # finds it has different heads (someone else won
1673 # finds it has different heads (someone else won
1677 # commit/push race), server aborts.
1674 # commit/push race), server aborts.
1678 if force:
1675 if force:
1679 remoteheads = ['force']
1676 remoteheads = ['force']
1680 # ssh: return remote's addchangegroup()
1677 # ssh: return remote's addchangegroup()
1681 # http: return remote's addchangegroup() or 0 for error
1678 # http: return remote's addchangegroup() or 0 for error
1682 ret = remote.unbundle(cg, remoteheads, 'push')
1679 ret = remote.unbundle(cg, remoteheads, 'push')
1683 else:
1680 else:
1684 # we return an integer indicating remote head count
1681 # we return an integer indicating remote head count
1685 # change
1682 # change
1686 ret = remote.addchangegroup(cg, 'push', self.url())
1683 ret = remote.addchangegroup(cg, 'push', self.url())
1687
1684
1688 if ret:
1685 if ret:
1689 # push succeed, synchonize target of the push
1686 # push succeed, synchonize target of the push
1690 cheads = outgoing.missingheads
1687 cheads = outgoing.missingheads
1691 elif revs is None:
1688 elif revs is None:
1692 # All out push fails. synchronize all common
1689 # All out push fails. synchronize all common
1693 cheads = outgoing.commonheads
1690 cheads = outgoing.commonheads
1694 else:
1691 else:
1695 # I want cheads = heads(::missingheads and ::commonheads)
1692 # I want cheads = heads(::missingheads and ::commonheads)
1696 # (missingheads is revs with secret changeset filtered out)
1693 # (missingheads is revs with secret changeset filtered out)
1697 #
1694 #
1698 # This can be expressed as:
1695 # This can be expressed as:
1699 # cheads = ( (missingheads and ::commonheads)
1696 # cheads = ( (missingheads and ::commonheads)
1700 # + (commonheads and ::missingheads))"
1697 # + (commonheads and ::missingheads))"
1701 # )
1698 # )
1702 #
1699 #
1703 # while trying to push we already computed the following:
1700 # while trying to push we already computed the following:
1704 # common = (::commonheads)
1701 # common = (::commonheads)
1705 # missing = ((commonheads::missingheads) - commonheads)
1702 # missing = ((commonheads::missingheads) - commonheads)
1706 #
1703 #
1707 # We can pick:
1704 # We can pick:
1708 # * missingheads part of comon (::commonheads)
1705 # * missingheads part of comon (::commonheads)
1709 common = set(outgoing.common)
1706 common = set(outgoing.common)
1710 cheads = [node for node in revs if node in common]
1707 cheads = [node for node in revs if node in common]
1711 # and
1708 # and
1712 # * commonheads parents on missing
1709 # * commonheads parents on missing
1713 revset = self.set('%ln and parents(roots(%ln))',
1710 revset = self.set('%ln and parents(roots(%ln))',
1714 outgoing.commonheads,
1711 outgoing.commonheads,
1715 outgoing.missing)
1712 outgoing.missing)
1716 cheads.extend(c.node() for c in revset)
1713 cheads.extend(c.node() for c in revset)
1717 # even when we don't push, exchanging phase data is useful
1714 # even when we don't push, exchanging phase data is useful
1718 remotephases = remote.listkeys('phases')
1715 remotephases = remote.listkeys('phases')
1719 if not remotephases: # old server or public only repo
1716 if not remotephases: # old server or public only repo
1720 phases.advanceboundary(self, phases.public, cheads)
1717 phases.advanceboundary(self, phases.public, cheads)
1721 # don't push any phase data as there is nothing to push
1718 # don't push any phase data as there is nothing to push
1722 else:
1719 else:
1723 ana = phases.analyzeremotephases(self, cheads, remotephases)
1720 ana = phases.analyzeremotephases(self, cheads, remotephases)
1724 pheads, droots = ana
1721 pheads, droots = ana
1725 ### Apply remote phase on local
1722 ### Apply remote phase on local
1726 if remotephases.get('publishing', False):
1723 if remotephases.get('publishing', False):
1727 phases.advanceboundary(self, phases.public, cheads)
1724 phases.advanceboundary(self, phases.public, cheads)
1728 else: # publish = False
1725 else: # publish = False
1729 phases.advanceboundary(self, phases.public, pheads)
1726 phases.advanceboundary(self, phases.public, pheads)
1730 phases.advanceboundary(self, phases.draft, cheads)
1727 phases.advanceboundary(self, phases.draft, cheads)
1731 ### Apply local phase on remote
1728 ### Apply local phase on remote
1732
1729
1733 # Get the list of all revs draft on remote by public here.
1730 # Get the list of all revs draft on remote by public here.
1734 # XXX Beware that revset break if droots is not strictly
1731 # XXX Beware that revset break if droots is not strictly
1735 # XXX root we may want to ensure it is but it is costly
1732 # XXX root we may want to ensure it is but it is costly
1736 outdated = self.set('heads((%ln::%ln) and public())',
1733 outdated = self.set('heads((%ln::%ln) and public())',
1737 droots, cheads)
1734 droots, cheads)
1738 for newremotehead in outdated:
1735 for newremotehead in outdated:
1739 r = remote.pushkey('phases',
1736 r = remote.pushkey('phases',
1740 newremotehead.hex(),
1737 newremotehead.hex(),
1741 str(phases.draft),
1738 str(phases.draft),
1742 str(phases.public))
1739 str(phases.public))
1743 if not r:
1740 if not r:
1744 self.ui.warn(_('updating %s to public failed!\n')
1741 self.ui.warn(_('updating %s to public failed!\n')
1745 % newremotehead)
1742 % newremotehead)
1746 finally:
1743 finally:
1747 if lock is not None:
1744 if lock is not None:
1748 lock.release()
1745 lock.release()
1749 finally:
1746 finally:
1750 locallock.release()
1747 locallock.release()
1751
1748
1752 self.ui.debug("checking for updated bookmarks\n")
1749 self.ui.debug("checking for updated bookmarks\n")
1753 rb = remote.listkeys('bookmarks')
1750 rb = remote.listkeys('bookmarks')
1754 for k in rb.keys():
1751 for k in rb.keys():
1755 if k in self._bookmarks:
1752 if k in self._bookmarks:
1756 nr, nl = rb[k], hex(self._bookmarks[k])
1753 nr, nl = rb[k], hex(self._bookmarks[k])
1757 if nr in self:
1754 if nr in self:
1758 cr = self[nr]
1755 cr = self[nr]
1759 cl = self[nl]
1756 cl = self[nl]
1760 if cl in cr.descendants():
1757 if cl in cr.descendants():
1761 r = remote.pushkey('bookmarks', k, nr, nl)
1758 r = remote.pushkey('bookmarks', k, nr, nl)
1762 if r:
1759 if r:
1763 self.ui.status(_("updating bookmark %s\n") % k)
1760 self.ui.status(_("updating bookmark %s\n") % k)
1764 else:
1761 else:
1765 self.ui.warn(_('updating bookmark %s'
1762 self.ui.warn(_('updating bookmark %s'
1766 ' failed!\n') % k)
1763 ' failed!\n') % k)
1767
1764
1768 return ret
1765 return ret
1769
1766
1770 def changegroupinfo(self, nodes, source):
1767 def changegroupinfo(self, nodes, source):
1771 if self.ui.verbose or source == 'bundle':
1768 if self.ui.verbose or source == 'bundle':
1772 self.ui.status(_("%d changesets found\n") % len(nodes))
1769 self.ui.status(_("%d changesets found\n") % len(nodes))
1773 if self.ui.debugflag:
1770 if self.ui.debugflag:
1774 self.ui.debug("list of changesets:\n")
1771 self.ui.debug("list of changesets:\n")
1775 for node in nodes:
1772 for node in nodes:
1776 self.ui.debug("%s\n" % hex(node))
1773 self.ui.debug("%s\n" % hex(node))
1777
1774
1778 def changegroupsubset(self, bases, heads, source):
1775 def changegroupsubset(self, bases, heads, source):
1779 """Compute a changegroup consisting of all the nodes that are
1776 """Compute a changegroup consisting of all the nodes that are
1780 descendants of any of the bases and ancestors of any of the heads.
1777 descendants of any of the bases and ancestors of any of the heads.
1781 Return a chunkbuffer object whose read() method will return
1778 Return a chunkbuffer object whose read() method will return
1782 successive changegroup chunks.
1779 successive changegroup chunks.
1783
1780
1784 It is fairly complex as determining which filenodes and which
1781 It is fairly complex as determining which filenodes and which
1785 manifest nodes need to be included for the changeset to be complete
1782 manifest nodes need to be included for the changeset to be complete
1786 is non-trivial.
1783 is non-trivial.
1787
1784
1788 Another wrinkle is doing the reverse, figuring out which changeset in
1785 Another wrinkle is doing the reverse, figuring out which changeset in
1789 the changegroup a particular filenode or manifestnode belongs to.
1786 the changegroup a particular filenode or manifestnode belongs to.
1790 """
1787 """
1791 cl = self.changelog
1788 cl = self.changelog
1792 if not bases:
1789 if not bases:
1793 bases = [nullid]
1790 bases = [nullid]
1794 csets, bases, heads = cl.nodesbetween(bases, heads)
1791 csets, bases, heads = cl.nodesbetween(bases, heads)
1795 # We assume that all ancestors of bases are known
1792 # We assume that all ancestors of bases are known
1796 common = set(cl.ancestors([cl.rev(n) for n in bases]))
1793 common = set(cl.ancestors([cl.rev(n) for n in bases]))
1797 return self._changegroupsubset(common, csets, heads, source)
1794 return self._changegroupsubset(common, csets, heads, source)
1798
1795
1799 def getlocalbundle(self, source, outgoing):
1796 def getlocalbundle(self, source, outgoing):
1800 """Like getbundle, but taking a discovery.outgoing as an argument.
1797 """Like getbundle, but taking a discovery.outgoing as an argument.
1801
1798
1802 This is only implemented for local repos and reuses potentially
1799 This is only implemented for local repos and reuses potentially
1803 precomputed sets in outgoing."""
1800 precomputed sets in outgoing."""
1804 if not outgoing.missing:
1801 if not outgoing.missing:
1805 return None
1802 return None
1806 return self._changegroupsubset(outgoing.common,
1803 return self._changegroupsubset(outgoing.common,
1807 outgoing.missing,
1804 outgoing.missing,
1808 outgoing.missingheads,
1805 outgoing.missingheads,
1809 source)
1806 source)
1810
1807
1811 def getbundle(self, source, heads=None, common=None):
1808 def getbundle(self, source, heads=None, common=None):
1812 """Like changegroupsubset, but returns the set difference between the
1809 """Like changegroupsubset, but returns the set difference between the
1813 ancestors of heads and the ancestors common.
1810 ancestors of heads and the ancestors common.
1814
1811
1815 If heads is None, use the local heads. If common is None, use [nullid].
1812 If heads is None, use the local heads. If common is None, use [nullid].
1816
1813
1817 The nodes in common might not all be known locally due to the way the
1814 The nodes in common might not all be known locally due to the way the
1818 current discovery protocol works.
1815 current discovery protocol works.
1819 """
1816 """
1820 cl = self.changelog
1817 cl = self.changelog
1821 if common:
1818 if common:
1822 nm = cl.nodemap
1819 nm = cl.nodemap
1823 common = [n for n in common if n in nm]
1820 common = [n for n in common if n in nm]
1824 else:
1821 else:
1825 common = [nullid]
1822 common = [nullid]
1826 if not heads:
1823 if not heads:
1827 heads = cl.heads()
1824 heads = cl.heads()
1828 return self.getlocalbundle(source,
1825 return self.getlocalbundle(source,
1829 discovery.outgoing(cl, common, heads))
1826 discovery.outgoing(cl, common, heads))
1830
1827
1831 def _changegroupsubset(self, commonrevs, csets, heads, source):
1828 def _changegroupsubset(self, commonrevs, csets, heads, source):
1832
1829
1833 cl = self.changelog
1830 cl = self.changelog
1834 mf = self.manifest
1831 mf = self.manifest
1835 mfs = {} # needed manifests
1832 mfs = {} # needed manifests
1836 fnodes = {} # needed file nodes
1833 fnodes = {} # needed file nodes
1837 changedfiles = set()
1834 changedfiles = set()
1838 fstate = ['', {}]
1835 fstate = ['', {}]
1839 count = [0, 0]
1836 count = [0, 0]
1840
1837
1841 # can we go through the fast path ?
1838 # can we go through the fast path ?
1842 heads.sort()
1839 heads.sort()
1843 if heads == sorted(self.heads()):
1840 if heads == sorted(self.heads()):
1844 return self._changegroup(csets, source)
1841 return self._changegroup(csets, source)
1845
1842
1846 # slow path
1843 # slow path
1847 self.hook('preoutgoing', throw=True, source=source)
1844 self.hook('preoutgoing', throw=True, source=source)
1848 self.changegroupinfo(csets, source)
1845 self.changegroupinfo(csets, source)
1849
1846
1850 # filter any nodes that claim to be part of the known set
1847 # filter any nodes that claim to be part of the known set
1851 def prune(revlog, missing):
1848 def prune(revlog, missing):
1852 rr, rl = revlog.rev, revlog.linkrev
1849 rr, rl = revlog.rev, revlog.linkrev
1853 return [n for n in missing
1850 return [n for n in missing
1854 if rl(rr(n)) not in commonrevs]
1851 if rl(rr(n)) not in commonrevs]
1855
1852
1856 progress = self.ui.progress
1853 progress = self.ui.progress
1857 _bundling = _('bundling')
1854 _bundling = _('bundling')
1858 _changesets = _('changesets')
1855 _changesets = _('changesets')
1859 _manifests = _('manifests')
1856 _manifests = _('manifests')
1860 _files = _('files')
1857 _files = _('files')
1861
1858
1862 def lookup(revlog, x):
1859 def lookup(revlog, x):
1863 if revlog == cl:
1860 if revlog == cl:
1864 c = cl.read(x)
1861 c = cl.read(x)
1865 changedfiles.update(c[3])
1862 changedfiles.update(c[3])
1866 mfs.setdefault(c[0], x)
1863 mfs.setdefault(c[0], x)
1867 count[0] += 1
1864 count[0] += 1
1868 progress(_bundling, count[0],
1865 progress(_bundling, count[0],
1869 unit=_changesets, total=count[1])
1866 unit=_changesets, total=count[1])
1870 return x
1867 return x
1871 elif revlog == mf:
1868 elif revlog == mf:
1872 clnode = mfs[x]
1869 clnode = mfs[x]
1873 mdata = mf.readfast(x)
1870 mdata = mf.readfast(x)
1874 for f, n in mdata.iteritems():
1871 for f, n in mdata.iteritems():
1875 if f in changedfiles:
1872 if f in changedfiles:
1876 fnodes[f].setdefault(n, clnode)
1873 fnodes[f].setdefault(n, clnode)
1877 count[0] += 1
1874 count[0] += 1
1878 progress(_bundling, count[0],
1875 progress(_bundling, count[0],
1879 unit=_manifests, total=count[1])
1876 unit=_manifests, total=count[1])
1880 return clnode
1877 return clnode
1881 else:
1878 else:
1882 progress(_bundling, count[0], item=fstate[0],
1879 progress(_bundling, count[0], item=fstate[0],
1883 unit=_files, total=count[1])
1880 unit=_files, total=count[1])
1884 return fstate[1][x]
1881 return fstate[1][x]
1885
1882
1886 bundler = changegroup.bundle10(lookup)
1883 bundler = changegroup.bundle10(lookup)
1887 reorder = self.ui.config('bundle', 'reorder', 'auto')
1884 reorder = self.ui.config('bundle', 'reorder', 'auto')
1888 if reorder == 'auto':
1885 if reorder == 'auto':
1889 reorder = None
1886 reorder = None
1890 else:
1887 else:
1891 reorder = util.parsebool(reorder)
1888 reorder = util.parsebool(reorder)
1892
1889
1893 def gengroup():
1890 def gengroup():
1894 # Create a changenode group generator that will call our functions
1891 # Create a changenode group generator that will call our functions
1895 # back to lookup the owning changenode and collect information.
1892 # back to lookup the owning changenode and collect information.
1896 count[:] = [0, len(csets)]
1893 count[:] = [0, len(csets)]
1897 for chunk in cl.group(csets, bundler, reorder=reorder):
1894 for chunk in cl.group(csets, bundler, reorder=reorder):
1898 yield chunk
1895 yield chunk
1899 progress(_bundling, None)
1896 progress(_bundling, None)
1900
1897
1901 # Create a generator for the manifestnodes that calls our lookup
1898 # Create a generator for the manifestnodes that calls our lookup
1902 # and data collection functions back.
1899 # and data collection functions back.
1903 for f in changedfiles:
1900 for f in changedfiles:
1904 fnodes[f] = {}
1901 fnodes[f] = {}
1905 count[:] = [0, len(mfs)]
1902 count[:] = [0, len(mfs)]
1906 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1903 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1907 yield chunk
1904 yield chunk
1908 progress(_bundling, None)
1905 progress(_bundling, None)
1909
1906
1910 mfs.clear()
1907 mfs.clear()
1911
1908
1912 # Go through all our files in order sorted by name.
1909 # Go through all our files in order sorted by name.
1913 count[:] = [0, len(changedfiles)]
1910 count[:] = [0, len(changedfiles)]
1914 for fname in sorted(changedfiles):
1911 for fname in sorted(changedfiles):
1915 filerevlog = self.file(fname)
1912 filerevlog = self.file(fname)
1916 if not len(filerevlog):
1913 if not len(filerevlog):
1917 raise util.Abort(_("empty or missing revlog for %s")
1914 raise util.Abort(_("empty or missing revlog for %s")
1918 % fname)
1915 % fname)
1919 fstate[0] = fname
1916 fstate[0] = fname
1920 fstate[1] = fnodes.pop(fname, {})
1917 fstate[1] = fnodes.pop(fname, {})
1921
1918
1922 nodelist = prune(filerevlog, fstate[1])
1919 nodelist = prune(filerevlog, fstate[1])
1923 if nodelist:
1920 if nodelist:
1924 count[0] += 1
1921 count[0] += 1
1925 yield bundler.fileheader(fname)
1922 yield bundler.fileheader(fname)
1926 for chunk in filerevlog.group(nodelist, bundler, reorder):
1923 for chunk in filerevlog.group(nodelist, bundler, reorder):
1927 yield chunk
1924 yield chunk
1928
1925
1929 # Signal that no more groups are left.
1926 # Signal that no more groups are left.
1930 yield bundler.close()
1927 yield bundler.close()
1931 progress(_bundling, None)
1928 progress(_bundling, None)
1932
1929
1933 if csets:
1930 if csets:
1934 self.hook('outgoing', node=hex(csets[0]), source=source)
1931 self.hook('outgoing', node=hex(csets[0]), source=source)
1935
1932
1936 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1933 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1937
1934
1938 def changegroup(self, basenodes, source):
1935 def changegroup(self, basenodes, source):
1939 # to avoid a race we use changegroupsubset() (issue1320)
1936 # to avoid a race we use changegroupsubset() (issue1320)
1940 return self.changegroupsubset(basenodes, self.heads(), source)
1937 return self.changegroupsubset(basenodes, self.heads(), source)
1941
1938
1942 def _changegroup(self, nodes, source):
1939 def _changegroup(self, nodes, source):
1943 """Compute the changegroup of all nodes that we have that a recipient
1940 """Compute the changegroup of all nodes that we have that a recipient
1944 doesn't. Return a chunkbuffer object whose read() method will return
1941 doesn't. Return a chunkbuffer object whose read() method will return
1945 successive changegroup chunks.
1942 successive changegroup chunks.
1946
1943
1947 This is much easier than the previous function as we can assume that
1944 This is much easier than the previous function as we can assume that
1948 the recipient has any changenode we aren't sending them.
1945 the recipient has any changenode we aren't sending them.
1949
1946
1950 nodes is the set of nodes to send"""
1947 nodes is the set of nodes to send"""
1951
1948
1952 cl = self.changelog
1949 cl = self.changelog
1953 mf = self.manifest
1950 mf = self.manifest
1954 mfs = {}
1951 mfs = {}
1955 changedfiles = set()
1952 changedfiles = set()
1956 fstate = ['']
1953 fstate = ['']
1957 count = [0, 0]
1954 count = [0, 0]
1958
1955
1959 self.hook('preoutgoing', throw=True, source=source)
1956 self.hook('preoutgoing', throw=True, source=source)
1960 self.changegroupinfo(nodes, source)
1957 self.changegroupinfo(nodes, source)
1961
1958
1962 revset = set([cl.rev(n) for n in nodes])
1959 revset = set([cl.rev(n) for n in nodes])
1963
1960
1964 def gennodelst(log):
1961 def gennodelst(log):
1965 ln, llr = log.node, log.linkrev
1962 ln, llr = log.node, log.linkrev
1966 return [ln(r) for r in log if llr(r) in revset]
1963 return [ln(r) for r in log if llr(r) in revset]
1967
1964
1968 progress = self.ui.progress
1965 progress = self.ui.progress
1969 _bundling = _('bundling')
1966 _bundling = _('bundling')
1970 _changesets = _('changesets')
1967 _changesets = _('changesets')
1971 _manifests = _('manifests')
1968 _manifests = _('manifests')
1972 _files = _('files')
1969 _files = _('files')
1973
1970
1974 def lookup(revlog, x):
1971 def lookup(revlog, x):
1975 if revlog == cl:
1972 if revlog == cl:
1976 c = cl.read(x)
1973 c = cl.read(x)
1977 changedfiles.update(c[3])
1974 changedfiles.update(c[3])
1978 mfs.setdefault(c[0], x)
1975 mfs.setdefault(c[0], x)
1979 count[0] += 1
1976 count[0] += 1
1980 progress(_bundling, count[0],
1977 progress(_bundling, count[0],
1981 unit=_changesets, total=count[1])
1978 unit=_changesets, total=count[1])
1982 return x
1979 return x
1983 elif revlog == mf:
1980 elif revlog == mf:
1984 count[0] += 1
1981 count[0] += 1
1985 progress(_bundling, count[0],
1982 progress(_bundling, count[0],
1986 unit=_manifests, total=count[1])
1983 unit=_manifests, total=count[1])
1987 return cl.node(revlog.linkrev(revlog.rev(x)))
1984 return cl.node(revlog.linkrev(revlog.rev(x)))
1988 else:
1985 else:
1989 progress(_bundling, count[0], item=fstate[0],
1986 progress(_bundling, count[0], item=fstate[0],
1990 total=count[1], unit=_files)
1987 total=count[1], unit=_files)
1991 return cl.node(revlog.linkrev(revlog.rev(x)))
1988 return cl.node(revlog.linkrev(revlog.rev(x)))
1992
1989
1993 bundler = changegroup.bundle10(lookup)
1990 bundler = changegroup.bundle10(lookup)
1994 reorder = self.ui.config('bundle', 'reorder', 'auto')
1991 reorder = self.ui.config('bundle', 'reorder', 'auto')
1995 if reorder == 'auto':
1992 if reorder == 'auto':
1996 reorder = None
1993 reorder = None
1997 else:
1994 else:
1998 reorder = util.parsebool(reorder)
1995 reorder = util.parsebool(reorder)
1999
1996
2000 def gengroup():
1997 def gengroup():
2001 '''yield a sequence of changegroup chunks (strings)'''
1998 '''yield a sequence of changegroup chunks (strings)'''
2002 # construct a list of all changed files
1999 # construct a list of all changed files
2003
2000
2004 count[:] = [0, len(nodes)]
2001 count[:] = [0, len(nodes)]
2005 for chunk in cl.group(nodes, bundler, reorder=reorder):
2002 for chunk in cl.group(nodes, bundler, reorder=reorder):
2006 yield chunk
2003 yield chunk
2007 progress(_bundling, None)
2004 progress(_bundling, None)
2008
2005
2009 count[:] = [0, len(mfs)]
2006 count[:] = [0, len(mfs)]
2010 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2007 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2011 yield chunk
2008 yield chunk
2012 progress(_bundling, None)
2009 progress(_bundling, None)
2013
2010
2014 count[:] = [0, len(changedfiles)]
2011 count[:] = [0, len(changedfiles)]
2015 for fname in sorted(changedfiles):
2012 for fname in sorted(changedfiles):
2016 filerevlog = self.file(fname)
2013 filerevlog = self.file(fname)
2017 if not len(filerevlog):
2014 if not len(filerevlog):
2018 raise util.Abort(_("empty or missing revlog for %s")
2015 raise util.Abort(_("empty or missing revlog for %s")
2019 % fname)
2016 % fname)
2020 fstate[0] = fname
2017 fstate[0] = fname
2021 nodelist = gennodelst(filerevlog)
2018 nodelist = gennodelst(filerevlog)
2022 if nodelist:
2019 if nodelist:
2023 count[0] += 1
2020 count[0] += 1
2024 yield bundler.fileheader(fname)
2021 yield bundler.fileheader(fname)
2025 for chunk in filerevlog.group(nodelist, bundler, reorder):
2022 for chunk in filerevlog.group(nodelist, bundler, reorder):
2026 yield chunk
2023 yield chunk
2027 yield bundler.close()
2024 yield bundler.close()
2028 progress(_bundling, None)
2025 progress(_bundling, None)
2029
2026
2030 if nodes:
2027 if nodes:
2031 self.hook('outgoing', node=hex(nodes[0]), source=source)
2028 self.hook('outgoing', node=hex(nodes[0]), source=source)
2032
2029
2033 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2030 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2034
2031
2035 def addchangegroup(self, source, srctype, url, emptyok=False):
2032 def addchangegroup(self, source, srctype, url, emptyok=False):
2036 """Add the changegroup returned by source.read() to this repo.
2033 """Add the changegroup returned by source.read() to this repo.
2037 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2034 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2038 the URL of the repo where this changegroup is coming from.
2035 the URL of the repo where this changegroup is coming from.
2039
2036
2040 Return an integer summarizing the change to this repo:
2037 Return an integer summarizing the change to this repo:
2041 - nothing changed or no source: 0
2038 - nothing changed or no source: 0
2042 - more heads than before: 1+added heads (2..n)
2039 - more heads than before: 1+added heads (2..n)
2043 - fewer heads than before: -1-removed heads (-2..-n)
2040 - fewer heads than before: -1-removed heads (-2..-n)
2044 - number of heads stays the same: 1
2041 - number of heads stays the same: 1
2045 """
2042 """
2046 def csmap(x):
2043 def csmap(x):
2047 self.ui.debug("add changeset %s\n" % short(x))
2044 self.ui.debug("add changeset %s\n" % short(x))
2048 return len(cl)
2045 return len(cl)
2049
2046
2050 def revmap(x):
2047 def revmap(x):
2051 return cl.rev(x)
2048 return cl.rev(x)
2052
2049
2053 if not source:
2050 if not source:
2054 return 0
2051 return 0
2055
2052
2056 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2053 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2057
2054
2058 changesets = files = revisions = 0
2055 changesets = files = revisions = 0
2059 efiles = set()
2056 efiles = set()
2060
2057
2061 # write changelog data to temp files so concurrent readers will not see
2058 # write changelog data to temp files so concurrent readers will not see
2062 # inconsistent view
2059 # inconsistent view
2063 cl = self.changelog
2060 cl = self.changelog
2064 cl.delayupdate()
2061 cl.delayupdate()
2065 oldheads = cl.heads()
2062 oldheads = cl.heads()
2066
2063
2067 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2064 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2068 try:
2065 try:
2069 trp = weakref.proxy(tr)
2066 trp = weakref.proxy(tr)
2070 # pull off the changeset group
2067 # pull off the changeset group
2071 self.ui.status(_("adding changesets\n"))
2068 self.ui.status(_("adding changesets\n"))
2072 clstart = len(cl)
2069 clstart = len(cl)
2073 class prog(object):
2070 class prog(object):
2074 step = _('changesets')
2071 step = _('changesets')
2075 count = 1
2072 count = 1
2076 ui = self.ui
2073 ui = self.ui
2077 total = None
2074 total = None
2078 def __call__(self):
2075 def __call__(self):
2079 self.ui.progress(self.step, self.count, unit=_('chunks'),
2076 self.ui.progress(self.step, self.count, unit=_('chunks'),
2080 total=self.total)
2077 total=self.total)
2081 self.count += 1
2078 self.count += 1
2082 pr = prog()
2079 pr = prog()
2083 source.callback = pr
2080 source.callback = pr
2084
2081
2085 source.changelogheader()
2082 source.changelogheader()
2086 srccontent = cl.addgroup(source, csmap, trp)
2083 srccontent = cl.addgroup(source, csmap, trp)
2087 if not (srccontent or emptyok):
2084 if not (srccontent or emptyok):
2088 raise util.Abort(_("received changelog group is empty"))
2085 raise util.Abort(_("received changelog group is empty"))
2089 clend = len(cl)
2086 clend = len(cl)
2090 changesets = clend - clstart
2087 changesets = clend - clstart
2091 for c in xrange(clstart, clend):
2088 for c in xrange(clstart, clend):
2092 efiles.update(self[c].files())
2089 efiles.update(self[c].files())
2093 efiles = len(efiles)
2090 efiles = len(efiles)
2094 self.ui.progress(_('changesets'), None)
2091 self.ui.progress(_('changesets'), None)
2095
2092
2096 # pull off the manifest group
2093 # pull off the manifest group
2097 self.ui.status(_("adding manifests\n"))
2094 self.ui.status(_("adding manifests\n"))
2098 pr.step = _('manifests')
2095 pr.step = _('manifests')
2099 pr.count = 1
2096 pr.count = 1
2100 pr.total = changesets # manifests <= changesets
2097 pr.total = changesets # manifests <= changesets
2101 # no need to check for empty manifest group here:
2098 # no need to check for empty manifest group here:
2102 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2099 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2103 # no new manifest will be created and the manifest group will
2100 # no new manifest will be created and the manifest group will
2104 # be empty during the pull
2101 # be empty during the pull
2105 source.manifestheader()
2102 source.manifestheader()
2106 self.manifest.addgroup(source, revmap, trp)
2103 self.manifest.addgroup(source, revmap, trp)
2107 self.ui.progress(_('manifests'), None)
2104 self.ui.progress(_('manifests'), None)
2108
2105
2109 needfiles = {}
2106 needfiles = {}
2110 if self.ui.configbool('server', 'validate', default=False):
2107 if self.ui.configbool('server', 'validate', default=False):
2111 # validate incoming csets have their manifests
2108 # validate incoming csets have their manifests
2112 for cset in xrange(clstart, clend):
2109 for cset in xrange(clstart, clend):
2113 mfest = self.changelog.read(self.changelog.node(cset))[0]
2110 mfest = self.changelog.read(self.changelog.node(cset))[0]
2114 mfest = self.manifest.readdelta(mfest)
2111 mfest = self.manifest.readdelta(mfest)
2115 # store file nodes we must see
2112 # store file nodes we must see
2116 for f, n in mfest.iteritems():
2113 for f, n in mfest.iteritems():
2117 needfiles.setdefault(f, set()).add(n)
2114 needfiles.setdefault(f, set()).add(n)
2118
2115
2119 # process the files
2116 # process the files
2120 self.ui.status(_("adding file changes\n"))
2117 self.ui.status(_("adding file changes\n"))
2121 pr.step = _('files')
2118 pr.step = _('files')
2122 pr.count = 1
2119 pr.count = 1
2123 pr.total = efiles
2120 pr.total = efiles
2124 source.callback = None
2121 source.callback = None
2125
2122
2126 while True:
2123 while True:
2127 chunkdata = source.filelogheader()
2124 chunkdata = source.filelogheader()
2128 if not chunkdata:
2125 if not chunkdata:
2129 break
2126 break
2130 f = chunkdata["filename"]
2127 f = chunkdata["filename"]
2131 self.ui.debug("adding %s revisions\n" % f)
2128 self.ui.debug("adding %s revisions\n" % f)
2132 pr()
2129 pr()
2133 fl = self.file(f)
2130 fl = self.file(f)
2134 o = len(fl)
2131 o = len(fl)
2135 if not fl.addgroup(source, revmap, trp):
2132 if not fl.addgroup(source, revmap, trp):
2136 raise util.Abort(_("received file revlog group is empty"))
2133 raise util.Abort(_("received file revlog group is empty"))
2137 revisions += len(fl) - o
2134 revisions += len(fl) - o
2138 files += 1
2135 files += 1
2139 if f in needfiles:
2136 if f in needfiles:
2140 needs = needfiles[f]
2137 needs = needfiles[f]
2141 for new in xrange(o, len(fl)):
2138 for new in xrange(o, len(fl)):
2142 n = fl.node(new)
2139 n = fl.node(new)
2143 if n in needs:
2140 if n in needs:
2144 needs.remove(n)
2141 needs.remove(n)
2145 if not needs:
2142 if not needs:
2146 del needfiles[f]
2143 del needfiles[f]
2147 self.ui.progress(_('files'), None)
2144 self.ui.progress(_('files'), None)
2148
2145
2149 for f, needs in needfiles.iteritems():
2146 for f, needs in needfiles.iteritems():
2150 fl = self.file(f)
2147 fl = self.file(f)
2151 for n in needs:
2148 for n in needs:
2152 try:
2149 try:
2153 fl.rev(n)
2150 fl.rev(n)
2154 except error.LookupError:
2151 except error.LookupError:
2155 raise util.Abort(
2152 raise util.Abort(
2156 _('missing file data for %s:%s - run hg verify') %
2153 _('missing file data for %s:%s - run hg verify') %
2157 (f, hex(n)))
2154 (f, hex(n)))
2158
2155
2159 dh = 0
2156 dh = 0
2160 if oldheads:
2157 if oldheads:
2161 heads = cl.heads()
2158 heads = cl.heads()
2162 dh = len(heads) - len(oldheads)
2159 dh = len(heads) - len(oldheads)
2163 for h in heads:
2160 for h in heads:
2164 if h not in oldheads and self[h].closesbranch():
2161 if h not in oldheads and self[h].closesbranch():
2165 dh -= 1
2162 dh -= 1
2166 htext = ""
2163 htext = ""
2167 if dh:
2164 if dh:
2168 htext = _(" (%+d heads)") % dh
2165 htext = _(" (%+d heads)") % dh
2169
2166
2170 self.ui.status(_("added %d changesets"
2167 self.ui.status(_("added %d changesets"
2171 " with %d changes to %d files%s\n")
2168 " with %d changes to %d files%s\n")
2172 % (changesets, revisions, files, htext))
2169 % (changesets, revisions, files, htext))
2173
2170
2174 if changesets > 0:
2171 if changesets > 0:
2175 p = lambda: cl.writepending() and self.root or ""
2172 p = lambda: cl.writepending() and self.root or ""
2176 self.hook('pretxnchangegroup', throw=True,
2173 self.hook('pretxnchangegroup', throw=True,
2177 node=hex(cl.node(clstart)), source=srctype,
2174 node=hex(cl.node(clstart)), source=srctype,
2178 url=url, pending=p)
2175 url=url, pending=p)
2179
2176
2180 added = [cl.node(r) for r in xrange(clstart, clend)]
2177 added = [cl.node(r) for r in xrange(clstart, clend)]
2181 publishing = self.ui.configbool('phases', 'publish', True)
2178 publishing = self.ui.configbool('phases', 'publish', True)
2182 if srctype == 'push':
2179 if srctype == 'push':
2183 # Old server can not push the boundary themself.
2180 # Old server can not push the boundary themself.
2184 # New server won't push the boundary if changeset already
2181 # New server won't push the boundary if changeset already
2185 # existed locally as secrete
2182 # existed locally as secrete
2186 #
2183 #
2187 # We should not use added here but the list of all change in
2184 # We should not use added here but the list of all change in
2188 # the bundle
2185 # the bundle
2189 if publishing:
2186 if publishing:
2190 phases.advanceboundary(self, phases.public, srccontent)
2187 phases.advanceboundary(self, phases.public, srccontent)
2191 else:
2188 else:
2192 phases.advanceboundary(self, phases.draft, srccontent)
2189 phases.advanceboundary(self, phases.draft, srccontent)
2193 phases.retractboundary(self, phases.draft, added)
2190 phases.retractboundary(self, phases.draft, added)
2194 elif srctype != 'strip':
2191 elif srctype != 'strip':
2195 # publishing only alter behavior during push
2192 # publishing only alter behavior during push
2196 #
2193 #
2197 # strip should not touch boundary at all
2194 # strip should not touch boundary at all
2198 phases.retractboundary(self, phases.draft, added)
2195 phases.retractboundary(self, phases.draft, added)
2199
2196
2200 # make changelog see real files again
2197 # make changelog see real files again
2201 cl.finalize(trp)
2198 cl.finalize(trp)
2202
2199
2203 tr.close()
2200 tr.close()
2204
2201
2205 if changesets > 0:
2202 if changesets > 0:
2206 def runhooks():
2203 def runhooks():
2207 # forcefully update the on-disk branch cache
2204 # forcefully update the on-disk branch cache
2208 self.ui.debug("updating the branch cache\n")
2205 self.ui.debug("updating the branch cache\n")
2209 self.updatebranchcache()
2206 self.updatebranchcache()
2210 self.hook("changegroup", node=hex(cl.node(clstart)),
2207 self.hook("changegroup", node=hex(cl.node(clstart)),
2211 source=srctype, url=url)
2208 source=srctype, url=url)
2212
2209
2213 for n in added:
2210 for n in added:
2214 self.hook("incoming", node=hex(n), source=srctype,
2211 self.hook("incoming", node=hex(n), source=srctype,
2215 url=url)
2212 url=url)
2216 self._afterlock(runhooks)
2213 self._afterlock(runhooks)
2217
2214
2218 finally:
2215 finally:
2219 tr.release()
2216 tr.release()
2220 # never return 0 here:
2217 # never return 0 here:
2221 if dh < 0:
2218 if dh < 0:
2222 return dh - 1
2219 return dh - 1
2223 else:
2220 else:
2224 return dh + 1
2221 return dh + 1
2225
2222
2226 def stream_in(self, remote, requirements):
2223 def stream_in(self, remote, requirements):
2227 lock = self.lock()
2224 lock = self.lock()
2228 try:
2225 try:
2229 fp = remote.stream_out()
2226 fp = remote.stream_out()
2230 l = fp.readline()
2227 l = fp.readline()
2231 try:
2228 try:
2232 resp = int(l)
2229 resp = int(l)
2233 except ValueError:
2230 except ValueError:
2234 raise error.ResponseError(
2231 raise error.ResponseError(
2235 _('unexpected response from remote server:'), l)
2232 _('unexpected response from remote server:'), l)
2236 if resp == 1:
2233 if resp == 1:
2237 raise util.Abort(_('operation forbidden by server'))
2234 raise util.Abort(_('operation forbidden by server'))
2238 elif resp == 2:
2235 elif resp == 2:
2239 raise util.Abort(_('locking the remote repository failed'))
2236 raise util.Abort(_('locking the remote repository failed'))
2240 elif resp != 0:
2237 elif resp != 0:
2241 raise util.Abort(_('the server sent an unknown error code'))
2238 raise util.Abort(_('the server sent an unknown error code'))
2242 self.ui.status(_('streaming all changes\n'))
2239 self.ui.status(_('streaming all changes\n'))
2243 l = fp.readline()
2240 l = fp.readline()
2244 try:
2241 try:
2245 total_files, total_bytes = map(int, l.split(' ', 1))
2242 total_files, total_bytes = map(int, l.split(' ', 1))
2246 except (ValueError, TypeError):
2243 except (ValueError, TypeError):
2247 raise error.ResponseError(
2244 raise error.ResponseError(
2248 _('unexpected response from remote server:'), l)
2245 _('unexpected response from remote server:'), l)
2249 self.ui.status(_('%d files to transfer, %s of data\n') %
2246 self.ui.status(_('%d files to transfer, %s of data\n') %
2250 (total_files, util.bytecount(total_bytes)))
2247 (total_files, util.bytecount(total_bytes)))
2251 handled_bytes = 0
2248 handled_bytes = 0
2252 self.ui.progress(_('clone'), 0, total=total_bytes)
2249 self.ui.progress(_('clone'), 0, total=total_bytes)
2253 start = time.time()
2250 start = time.time()
2254 for i in xrange(total_files):
2251 for i in xrange(total_files):
2255 # XXX doesn't support '\n' or '\r' in filenames
2252 # XXX doesn't support '\n' or '\r' in filenames
2256 l = fp.readline()
2253 l = fp.readline()
2257 try:
2254 try:
2258 name, size = l.split('\0', 1)
2255 name, size = l.split('\0', 1)
2259 size = int(size)
2256 size = int(size)
2260 except (ValueError, TypeError):
2257 except (ValueError, TypeError):
2261 raise error.ResponseError(
2258 raise error.ResponseError(
2262 _('unexpected response from remote server:'), l)
2259 _('unexpected response from remote server:'), l)
2263 if self.ui.debugflag:
2260 if self.ui.debugflag:
2264 self.ui.debug('adding %s (%s)\n' %
2261 self.ui.debug('adding %s (%s)\n' %
2265 (name, util.bytecount(size)))
2262 (name, util.bytecount(size)))
2266 # for backwards compat, name was partially encoded
2263 # for backwards compat, name was partially encoded
2267 ofp = self.sopener(store.decodedir(name), 'w')
2264 ofp = self.sopener(store.decodedir(name), 'w')
2268 for chunk in util.filechunkiter(fp, limit=size):
2265 for chunk in util.filechunkiter(fp, limit=size):
2269 handled_bytes += len(chunk)
2266 handled_bytes += len(chunk)
2270 self.ui.progress(_('clone'), handled_bytes,
2267 self.ui.progress(_('clone'), handled_bytes,
2271 total=total_bytes)
2268 total=total_bytes)
2272 ofp.write(chunk)
2269 ofp.write(chunk)
2273 ofp.close()
2270 ofp.close()
2274 elapsed = time.time() - start
2271 elapsed = time.time() - start
2275 if elapsed <= 0:
2272 if elapsed <= 0:
2276 elapsed = 0.001
2273 elapsed = 0.001
2277 self.ui.progress(_('clone'), None)
2274 self.ui.progress(_('clone'), None)
2278 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2275 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2279 (util.bytecount(total_bytes), elapsed,
2276 (util.bytecount(total_bytes), elapsed,
2280 util.bytecount(total_bytes / elapsed)))
2277 util.bytecount(total_bytes / elapsed)))
2281
2278
2282 # new requirements = old non-format requirements +
2279 # new requirements = old non-format requirements +
2283 # new format-related
2280 # new format-related
2284 # requirements from the streamed-in repository
2281 # requirements from the streamed-in repository
2285 requirements.update(set(self.requirements) - self.supportedformats)
2282 requirements.update(set(self.requirements) - self.supportedformats)
2286 self._applyrequirements(requirements)
2283 self._applyrequirements(requirements)
2287 self._writerequirements()
2284 self._writerequirements()
2288
2285
2289 self.invalidate()
2286 self.invalidate()
2290 return len(self.heads()) + 1
2287 return len(self.heads()) + 1
2291 finally:
2288 finally:
2292 lock.release()
2289 lock.release()
2293
2290
2294 def clone(self, remote, heads=[], stream=False):
2291 def clone(self, remote, heads=[], stream=False):
2295 '''clone remote repository.
2292 '''clone remote repository.
2296
2293
2297 keyword arguments:
2294 keyword arguments:
2298 heads: list of revs to clone (forces use of pull)
2295 heads: list of revs to clone (forces use of pull)
2299 stream: use streaming clone if possible'''
2296 stream: use streaming clone if possible'''
2300
2297
2301 # now, all clients that can request uncompressed clones can
2298 # now, all clients that can request uncompressed clones can
2302 # read repo formats supported by all servers that can serve
2299 # read repo formats supported by all servers that can serve
2303 # them.
2300 # them.
2304
2301
2305 # if revlog format changes, client will have to check version
2302 # if revlog format changes, client will have to check version
2306 # and format flags on "stream" capability, and use
2303 # and format flags on "stream" capability, and use
2307 # uncompressed only if compatible.
2304 # uncompressed only if compatible.
2308
2305
2309 if not stream:
2306 if not stream:
2310 # if the server explicitely prefer to stream (for fast LANs)
2307 # if the server explicitely prefer to stream (for fast LANs)
2311 stream = remote.capable('stream-preferred')
2308 stream = remote.capable('stream-preferred')
2312
2309
2313 if stream and not heads:
2310 if stream and not heads:
2314 # 'stream' means remote revlog format is revlogv1 only
2311 # 'stream' means remote revlog format is revlogv1 only
2315 if remote.capable('stream'):
2312 if remote.capable('stream'):
2316 return self.stream_in(remote, set(('revlogv1',)))
2313 return self.stream_in(remote, set(('revlogv1',)))
2317 # otherwise, 'streamreqs' contains the remote revlog format
2314 # otherwise, 'streamreqs' contains the remote revlog format
2318 streamreqs = remote.capable('streamreqs')
2315 streamreqs = remote.capable('streamreqs')
2319 if streamreqs:
2316 if streamreqs:
2320 streamreqs = set(streamreqs.split(','))
2317 streamreqs = set(streamreqs.split(','))
2321 # if we support it, stream in and adjust our requirements
2318 # if we support it, stream in and adjust our requirements
2322 if not streamreqs - self.supportedformats:
2319 if not streamreqs - self.supportedformats:
2323 return self.stream_in(remote, streamreqs)
2320 return self.stream_in(remote, streamreqs)
2324 return self.pull(remote, heads)
2321 return self.pull(remote, heads)
2325
2322
2326 def pushkey(self, namespace, key, old, new):
2323 def pushkey(self, namespace, key, old, new):
2327 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2324 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2328 old=old, new=new)
2325 old=old, new=new)
2329 ret = pushkey.push(self, namespace, key, old, new)
2326 ret = pushkey.push(self, namespace, key, old, new)
2330 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2327 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2331 ret=ret)
2328 ret=ret)
2332 return ret
2329 return ret
2333
2330
2334 def listkeys(self, namespace):
2331 def listkeys(self, namespace):
2335 self.hook('prelistkeys', throw=True, namespace=namespace)
2332 self.hook('prelistkeys', throw=True, namespace=namespace)
2336 values = pushkey.list(self, namespace)
2333 values = pushkey.list(self, namespace)
2337 self.hook('listkeys', namespace=namespace, values=values)
2334 self.hook('listkeys', namespace=namespace, values=values)
2338 return values
2335 return values
2339
2336
2340 def debugwireargs(self, one, two, three=None, four=None, five=None):
2337 def debugwireargs(self, one, two, three=None, four=None, five=None):
2341 '''used to test argument passing over the wire'''
2338 '''used to test argument passing over the wire'''
2342 return "%s %s %s %s %s" % (one, two, three, four, five)
2339 return "%s %s %s %s %s" % (one, two, three, four, five)
2343
2340
2344 def savecommitmessage(self, text):
2341 def savecommitmessage(self, text):
2345 fp = self.opener('last-message.txt', 'wb')
2342 fp = self.opener('last-message.txt', 'wb')
2346 try:
2343 try:
2347 fp.write(text)
2344 fp.write(text)
2348 finally:
2345 finally:
2349 fp.close()
2346 fp.close()
2350 return self.pathto(fp.name[len(self.root)+1:])
2347 return self.pathto(fp.name[len(self.root)+1:])
2351
2348
2352 # used to avoid circular references so destructors work
2349 # used to avoid circular references so destructors work
2353 def aftertrans(files):
2350 def aftertrans(files):
2354 renamefiles = [tuple(t) for t in files]
2351 renamefiles = [tuple(t) for t in files]
2355 def a():
2352 def a():
2356 for src, dest in renamefiles:
2353 for src, dest in renamefiles:
2357 try:
2354 try:
2358 util.rename(src, dest)
2355 util.rename(src, dest)
2359 except OSError: # journal file does not yet exist
2356 except OSError: # journal file does not yet exist
2360 pass
2357 pass
2361 return a
2358 return a
2362
2359
2363 def undoname(fn):
2360 def undoname(fn):
2364 base, name = os.path.split(fn)
2361 base, name = os.path.split(fn)
2365 assert name.startswith('journal')
2362 assert name.startswith('journal')
2366 return os.path.join(base, name.replace('journal', 'undo', 1))
2363 return os.path.join(base, name.replace('journal', 'undo', 1))
2367
2364
2368 def instance(ui, path, create):
2365 def instance(ui, path, create):
2369 return localrepository(ui, util.urllocalpath(path), create)
2366 return localrepository(ui, util.urllocalpath(path), create)
2370
2367
2371 def islocal(path):
2368 def islocal(path):
2372 return True
2369 return True
General Comments 0
You need to be logged in to leave comments. Login now