##// END OF EJS Templates
localrepo: lowercase "unexpected response" message
Martin Geisler -
r16941:a1eb17be default
parent child Browse files
Show More
@@ -1,2370 +1,2370
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import scmutil, util, extensions, hook, error, revset
13 import scmutil, util, extensions, hook, error, revset
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20 filecache = scmutil.filecache
20 filecache = scmutil.filecache
21
21
22 class storecache(filecache):
22 class storecache(filecache):
23 """filecache for files in the store"""
23 """filecache for files in the store"""
24 def join(self, obj, fname):
24 def join(self, obj, fname):
25 return obj.sjoin(fname)
25 return obj.sjoin(fname)
26
26
27 class localrepository(repo.repository):
27 class localrepository(repo.repository):
28 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
28 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
29 'known', 'getbundle'))
29 'known', 'getbundle'))
30 supportedformats = set(('revlogv1', 'generaldelta'))
30 supportedformats = set(('revlogv1', 'generaldelta'))
31 supported = supportedformats | set(('store', 'fncache', 'shared',
31 supported = supportedformats | set(('store', 'fncache', 'shared',
32 'dotencode'))
32 'dotencode'))
33
33
34 def __init__(self, baseui, path=None, create=False):
34 def __init__(self, baseui, path=None, create=False):
35 repo.repository.__init__(self)
35 repo.repository.__init__(self)
36 self.root = os.path.realpath(util.expandpath(path))
36 self.root = os.path.realpath(util.expandpath(path))
37 self.path = os.path.join(self.root, ".hg")
37 self.path = os.path.join(self.root, ".hg")
38 self.origroot = path
38 self.origroot = path
39 self.auditor = scmutil.pathauditor(self.root, self._checknested)
39 self.auditor = scmutil.pathauditor(self.root, self._checknested)
40 self.opener = scmutil.opener(self.path)
40 self.opener = scmutil.opener(self.path)
41 self.wopener = scmutil.opener(self.root)
41 self.wopener = scmutil.opener(self.root)
42 self.baseui = baseui
42 self.baseui = baseui
43 self.ui = baseui.copy()
43 self.ui = baseui.copy()
44 # A list of callback to shape the phase if no data were found.
44 # A list of callback to shape the phase if no data were found.
45 # Callback are in the form: func(repo, roots) --> processed root.
45 # Callback are in the form: func(repo, roots) --> processed root.
46 # This list it to be filled by extension during repo setup
46 # This list it to be filled by extension during repo setup
47 self._phasedefaults = []
47 self._phasedefaults = []
48
48
49 try:
49 try:
50 self.ui.readconfig(self.join("hgrc"), self.root)
50 self.ui.readconfig(self.join("hgrc"), self.root)
51 extensions.loadall(self.ui)
51 extensions.loadall(self.ui)
52 except IOError:
52 except IOError:
53 pass
53 pass
54
54
55 if not os.path.isdir(self.path):
55 if not os.path.isdir(self.path):
56 if create:
56 if create:
57 if not os.path.exists(path):
57 if not os.path.exists(path):
58 util.makedirs(path)
58 util.makedirs(path)
59 util.makedir(self.path, notindexed=True)
59 util.makedir(self.path, notindexed=True)
60 requirements = ["revlogv1"]
60 requirements = ["revlogv1"]
61 if self.ui.configbool('format', 'usestore', True):
61 if self.ui.configbool('format', 'usestore', True):
62 os.mkdir(os.path.join(self.path, "store"))
62 os.mkdir(os.path.join(self.path, "store"))
63 requirements.append("store")
63 requirements.append("store")
64 if self.ui.configbool('format', 'usefncache', True):
64 if self.ui.configbool('format', 'usefncache', True):
65 requirements.append("fncache")
65 requirements.append("fncache")
66 if self.ui.configbool('format', 'dotencode', True):
66 if self.ui.configbool('format', 'dotencode', True):
67 requirements.append('dotencode')
67 requirements.append('dotencode')
68 # create an invalid changelog
68 # create an invalid changelog
69 self.opener.append(
69 self.opener.append(
70 "00changelog.i",
70 "00changelog.i",
71 '\0\0\0\2' # represents revlogv2
71 '\0\0\0\2' # represents revlogv2
72 ' dummy changelog to prevent using the old repo layout'
72 ' dummy changelog to prevent using the old repo layout'
73 )
73 )
74 if self.ui.configbool('format', 'generaldelta', False):
74 if self.ui.configbool('format', 'generaldelta', False):
75 requirements.append("generaldelta")
75 requirements.append("generaldelta")
76 requirements = set(requirements)
76 requirements = set(requirements)
77 else:
77 else:
78 raise error.RepoError(_("repository %s not found") % path)
78 raise error.RepoError(_("repository %s not found") % path)
79 elif create:
79 elif create:
80 raise error.RepoError(_("repository %s already exists") % path)
80 raise error.RepoError(_("repository %s already exists") % path)
81 else:
81 else:
82 try:
82 try:
83 requirements = scmutil.readrequires(self.opener, self.supported)
83 requirements = scmutil.readrequires(self.opener, self.supported)
84 except IOError, inst:
84 except IOError, inst:
85 if inst.errno != errno.ENOENT:
85 if inst.errno != errno.ENOENT:
86 raise
86 raise
87 requirements = set()
87 requirements = set()
88
88
89 self.sharedpath = self.path
89 self.sharedpath = self.path
90 try:
90 try:
91 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
91 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
92 if not os.path.exists(s):
92 if not os.path.exists(s):
93 raise error.RepoError(
93 raise error.RepoError(
94 _('.hg/sharedpath points to nonexistent directory %s') % s)
94 _('.hg/sharedpath points to nonexistent directory %s') % s)
95 self.sharedpath = s
95 self.sharedpath = s
96 except IOError, inst:
96 except IOError, inst:
97 if inst.errno != errno.ENOENT:
97 if inst.errno != errno.ENOENT:
98 raise
98 raise
99
99
100 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
100 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
101 self.spath = self.store.path
101 self.spath = self.store.path
102 self.sopener = self.store.opener
102 self.sopener = self.store.opener
103 self.sjoin = self.store.join
103 self.sjoin = self.store.join
104 self.opener.createmode = self.store.createmode
104 self.opener.createmode = self.store.createmode
105 self._applyrequirements(requirements)
105 self._applyrequirements(requirements)
106 if create:
106 if create:
107 self._writerequirements()
107 self._writerequirements()
108
108
109
109
110 self._branchcache = None
110 self._branchcache = None
111 self._branchcachetip = None
111 self._branchcachetip = None
112 self.filterpats = {}
112 self.filterpats = {}
113 self._datafilters = {}
113 self._datafilters = {}
114 self._transref = self._lockref = self._wlockref = None
114 self._transref = self._lockref = self._wlockref = None
115
115
116 # A cache for various files under .hg/ that tracks file changes,
116 # A cache for various files under .hg/ that tracks file changes,
117 # (used by the filecache decorator)
117 # (used by the filecache decorator)
118 #
118 #
119 # Maps a property name to its util.filecacheentry
119 # Maps a property name to its util.filecacheentry
120 self._filecache = {}
120 self._filecache = {}
121
121
122 def _applyrequirements(self, requirements):
122 def _applyrequirements(self, requirements):
123 self.requirements = requirements
123 self.requirements = requirements
124 openerreqs = set(('revlogv1', 'generaldelta'))
124 openerreqs = set(('revlogv1', 'generaldelta'))
125 self.sopener.options = dict((r, 1) for r in requirements
125 self.sopener.options = dict((r, 1) for r in requirements
126 if r in openerreqs)
126 if r in openerreqs)
127
127
128 def _writerequirements(self):
128 def _writerequirements(self):
129 reqfile = self.opener("requires", "w")
129 reqfile = self.opener("requires", "w")
130 for r in self.requirements:
130 for r in self.requirements:
131 reqfile.write("%s\n" % r)
131 reqfile.write("%s\n" % r)
132 reqfile.close()
132 reqfile.close()
133
133
134 def _checknested(self, path):
134 def _checknested(self, path):
135 """Determine if path is a legal nested repository."""
135 """Determine if path is a legal nested repository."""
136 if not path.startswith(self.root):
136 if not path.startswith(self.root):
137 return False
137 return False
138 subpath = path[len(self.root) + 1:]
138 subpath = path[len(self.root) + 1:]
139 normsubpath = util.pconvert(subpath)
139 normsubpath = util.pconvert(subpath)
140
140
141 # XXX: Checking against the current working copy is wrong in
141 # XXX: Checking against the current working copy is wrong in
142 # the sense that it can reject things like
142 # the sense that it can reject things like
143 #
143 #
144 # $ hg cat -r 10 sub/x.txt
144 # $ hg cat -r 10 sub/x.txt
145 #
145 #
146 # if sub/ is no longer a subrepository in the working copy
146 # if sub/ is no longer a subrepository in the working copy
147 # parent revision.
147 # parent revision.
148 #
148 #
149 # However, it can of course also allow things that would have
149 # However, it can of course also allow things that would have
150 # been rejected before, such as the above cat command if sub/
150 # been rejected before, such as the above cat command if sub/
151 # is a subrepository now, but was a normal directory before.
151 # is a subrepository now, but was a normal directory before.
152 # The old path auditor would have rejected by mistake since it
152 # The old path auditor would have rejected by mistake since it
153 # panics when it sees sub/.hg/.
153 # panics when it sees sub/.hg/.
154 #
154 #
155 # All in all, checking against the working copy seems sensible
155 # All in all, checking against the working copy seems sensible
156 # since we want to prevent access to nested repositories on
156 # since we want to prevent access to nested repositories on
157 # the filesystem *now*.
157 # the filesystem *now*.
158 ctx = self[None]
158 ctx = self[None]
159 parts = util.splitpath(subpath)
159 parts = util.splitpath(subpath)
160 while parts:
160 while parts:
161 prefix = '/'.join(parts)
161 prefix = '/'.join(parts)
162 if prefix in ctx.substate:
162 if prefix in ctx.substate:
163 if prefix == normsubpath:
163 if prefix == normsubpath:
164 return True
164 return True
165 else:
165 else:
166 sub = ctx.sub(prefix)
166 sub = ctx.sub(prefix)
167 return sub.checknested(subpath[len(prefix) + 1:])
167 return sub.checknested(subpath[len(prefix) + 1:])
168 else:
168 else:
169 parts.pop()
169 parts.pop()
170 return False
170 return False
171
171
172 @filecache('bookmarks')
172 @filecache('bookmarks')
173 def _bookmarks(self):
173 def _bookmarks(self):
174 return bookmarks.read(self)
174 return bookmarks.read(self)
175
175
176 @filecache('bookmarks.current')
176 @filecache('bookmarks.current')
177 def _bookmarkcurrent(self):
177 def _bookmarkcurrent(self):
178 return bookmarks.readcurrent(self)
178 return bookmarks.readcurrent(self)
179
179
180 def _writebookmarks(self, marks):
180 def _writebookmarks(self, marks):
181 bookmarks.write(self)
181 bookmarks.write(self)
182
182
183 def bookmarkheads(self, bookmark):
183 def bookmarkheads(self, bookmark):
184 name = bookmark.split('@', 1)[0]
184 name = bookmark.split('@', 1)[0]
185 heads = []
185 heads = []
186 for mark, n in self._bookmarks.iteritems():
186 for mark, n in self._bookmarks.iteritems():
187 if mark.split('@', 1)[0] == name:
187 if mark.split('@', 1)[0] == name:
188 heads.append(n)
188 heads.append(n)
189 return heads
189 return heads
190
190
191 @storecache('phaseroots')
191 @storecache('phaseroots')
192 def _phasecache(self):
192 def _phasecache(self):
193 return phases.phasecache(self, self._phasedefaults)
193 return phases.phasecache(self, self._phasedefaults)
194
194
195 @storecache('00changelog.i')
195 @storecache('00changelog.i')
196 def changelog(self):
196 def changelog(self):
197 c = changelog.changelog(self.sopener)
197 c = changelog.changelog(self.sopener)
198 if 'HG_PENDING' in os.environ:
198 if 'HG_PENDING' in os.environ:
199 p = os.environ['HG_PENDING']
199 p = os.environ['HG_PENDING']
200 if p.startswith(self.root):
200 if p.startswith(self.root):
201 c.readpending('00changelog.i.a')
201 c.readpending('00changelog.i.a')
202 return c
202 return c
203
203
204 @storecache('00manifest.i')
204 @storecache('00manifest.i')
205 def manifest(self):
205 def manifest(self):
206 return manifest.manifest(self.sopener)
206 return manifest.manifest(self.sopener)
207
207
208 @filecache('dirstate')
208 @filecache('dirstate')
209 def dirstate(self):
209 def dirstate(self):
210 warned = [0]
210 warned = [0]
211 def validate(node):
211 def validate(node):
212 try:
212 try:
213 self.changelog.rev(node)
213 self.changelog.rev(node)
214 return node
214 return node
215 except error.LookupError:
215 except error.LookupError:
216 if not warned[0]:
216 if not warned[0]:
217 warned[0] = True
217 warned[0] = True
218 self.ui.warn(_("warning: ignoring unknown"
218 self.ui.warn(_("warning: ignoring unknown"
219 " working parent %s!\n") % short(node))
219 " working parent %s!\n") % short(node))
220 return nullid
220 return nullid
221
221
222 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
222 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
223
223
224 def __getitem__(self, changeid):
224 def __getitem__(self, changeid):
225 if changeid is None:
225 if changeid is None:
226 return context.workingctx(self)
226 return context.workingctx(self)
227 return context.changectx(self, changeid)
227 return context.changectx(self, changeid)
228
228
229 def __contains__(self, changeid):
229 def __contains__(self, changeid):
230 try:
230 try:
231 return bool(self.lookup(changeid))
231 return bool(self.lookup(changeid))
232 except error.RepoLookupError:
232 except error.RepoLookupError:
233 return False
233 return False
234
234
235 def __nonzero__(self):
235 def __nonzero__(self):
236 return True
236 return True
237
237
238 def __len__(self):
238 def __len__(self):
239 return len(self.changelog)
239 return len(self.changelog)
240
240
241 def __iter__(self):
241 def __iter__(self):
242 for i in xrange(len(self)):
242 for i in xrange(len(self)):
243 yield i
243 yield i
244
244
245 def revs(self, expr, *args):
245 def revs(self, expr, *args):
246 '''Return a list of revisions matching the given revset'''
246 '''Return a list of revisions matching the given revset'''
247 expr = revset.formatspec(expr, *args)
247 expr = revset.formatspec(expr, *args)
248 m = revset.match(None, expr)
248 m = revset.match(None, expr)
249 return [r for r in m(self, range(len(self)))]
249 return [r for r in m(self, range(len(self)))]
250
250
251 def set(self, expr, *args):
251 def set(self, expr, *args):
252 '''
252 '''
253 Yield a context for each matching revision, after doing arg
253 Yield a context for each matching revision, after doing arg
254 replacement via revset.formatspec
254 replacement via revset.formatspec
255 '''
255 '''
256 for r in self.revs(expr, *args):
256 for r in self.revs(expr, *args):
257 yield self[r]
257 yield self[r]
258
258
259 def url(self):
259 def url(self):
260 return 'file:' + self.root
260 return 'file:' + self.root
261
261
262 def hook(self, name, throw=False, **args):
262 def hook(self, name, throw=False, **args):
263 return hook.hook(self.ui, self, name, throw, **args)
263 return hook.hook(self.ui, self, name, throw, **args)
264
264
265 tag_disallowed = ':\r\n'
265 tag_disallowed = ':\r\n'
266
266
267 def _tag(self, names, node, message, local, user, date, extra={}):
267 def _tag(self, names, node, message, local, user, date, extra={}):
268 if isinstance(names, str):
268 if isinstance(names, str):
269 allchars = names
269 allchars = names
270 names = (names,)
270 names = (names,)
271 else:
271 else:
272 allchars = ''.join(names)
272 allchars = ''.join(names)
273 for c in self.tag_disallowed:
273 for c in self.tag_disallowed:
274 if c in allchars:
274 if c in allchars:
275 raise util.Abort(_('%r cannot be used in a tag name') % c)
275 raise util.Abort(_('%r cannot be used in a tag name') % c)
276
276
277 branches = self.branchmap()
277 branches = self.branchmap()
278 for name in names:
278 for name in names:
279 self.hook('pretag', throw=True, node=hex(node), tag=name,
279 self.hook('pretag', throw=True, node=hex(node), tag=name,
280 local=local)
280 local=local)
281 if name in branches:
281 if name in branches:
282 self.ui.warn(_("warning: tag %s conflicts with existing"
282 self.ui.warn(_("warning: tag %s conflicts with existing"
283 " branch name\n") % name)
283 " branch name\n") % name)
284
284
285 def writetags(fp, names, munge, prevtags):
285 def writetags(fp, names, munge, prevtags):
286 fp.seek(0, 2)
286 fp.seek(0, 2)
287 if prevtags and prevtags[-1] != '\n':
287 if prevtags and prevtags[-1] != '\n':
288 fp.write('\n')
288 fp.write('\n')
289 for name in names:
289 for name in names:
290 m = munge and munge(name) or name
290 m = munge and munge(name) or name
291 if (self._tagscache.tagtypes and
291 if (self._tagscache.tagtypes and
292 name in self._tagscache.tagtypes):
292 name in self._tagscache.tagtypes):
293 old = self.tags().get(name, nullid)
293 old = self.tags().get(name, nullid)
294 fp.write('%s %s\n' % (hex(old), m))
294 fp.write('%s %s\n' % (hex(old), m))
295 fp.write('%s %s\n' % (hex(node), m))
295 fp.write('%s %s\n' % (hex(node), m))
296 fp.close()
296 fp.close()
297
297
298 prevtags = ''
298 prevtags = ''
299 if local:
299 if local:
300 try:
300 try:
301 fp = self.opener('localtags', 'r+')
301 fp = self.opener('localtags', 'r+')
302 except IOError:
302 except IOError:
303 fp = self.opener('localtags', 'a')
303 fp = self.opener('localtags', 'a')
304 else:
304 else:
305 prevtags = fp.read()
305 prevtags = fp.read()
306
306
307 # local tags are stored in the current charset
307 # local tags are stored in the current charset
308 writetags(fp, names, None, prevtags)
308 writetags(fp, names, None, prevtags)
309 for name in names:
309 for name in names:
310 self.hook('tag', node=hex(node), tag=name, local=local)
310 self.hook('tag', node=hex(node), tag=name, local=local)
311 return
311 return
312
312
313 try:
313 try:
314 fp = self.wfile('.hgtags', 'rb+')
314 fp = self.wfile('.hgtags', 'rb+')
315 except IOError, e:
315 except IOError, e:
316 if e.errno != errno.ENOENT:
316 if e.errno != errno.ENOENT:
317 raise
317 raise
318 fp = self.wfile('.hgtags', 'ab')
318 fp = self.wfile('.hgtags', 'ab')
319 else:
319 else:
320 prevtags = fp.read()
320 prevtags = fp.read()
321
321
322 # committed tags are stored in UTF-8
322 # committed tags are stored in UTF-8
323 writetags(fp, names, encoding.fromlocal, prevtags)
323 writetags(fp, names, encoding.fromlocal, prevtags)
324
324
325 fp.close()
325 fp.close()
326
326
327 self.invalidatecaches()
327 self.invalidatecaches()
328
328
329 if '.hgtags' not in self.dirstate:
329 if '.hgtags' not in self.dirstate:
330 self[None].add(['.hgtags'])
330 self[None].add(['.hgtags'])
331
331
332 m = matchmod.exact(self.root, '', ['.hgtags'])
332 m = matchmod.exact(self.root, '', ['.hgtags'])
333 tagnode = self.commit(message, user, date, extra=extra, match=m)
333 tagnode = self.commit(message, user, date, extra=extra, match=m)
334
334
335 for name in names:
335 for name in names:
336 self.hook('tag', node=hex(node), tag=name, local=local)
336 self.hook('tag', node=hex(node), tag=name, local=local)
337
337
338 return tagnode
338 return tagnode
339
339
340 def tag(self, names, node, message, local, user, date):
340 def tag(self, names, node, message, local, user, date):
341 '''tag a revision with one or more symbolic names.
341 '''tag a revision with one or more symbolic names.
342
342
343 names is a list of strings or, when adding a single tag, names may be a
343 names is a list of strings or, when adding a single tag, names may be a
344 string.
344 string.
345
345
346 if local is True, the tags are stored in a per-repository file.
346 if local is True, the tags are stored in a per-repository file.
347 otherwise, they are stored in the .hgtags file, and a new
347 otherwise, they are stored in the .hgtags file, and a new
348 changeset is committed with the change.
348 changeset is committed with the change.
349
349
350 keyword arguments:
350 keyword arguments:
351
351
352 local: whether to store tags in non-version-controlled file
352 local: whether to store tags in non-version-controlled file
353 (default False)
353 (default False)
354
354
355 message: commit message to use if committing
355 message: commit message to use if committing
356
356
357 user: name of user to use if committing
357 user: name of user to use if committing
358
358
359 date: date tuple to use if committing'''
359 date: date tuple to use if committing'''
360
360
361 if not local:
361 if not local:
362 for x in self.status()[:5]:
362 for x in self.status()[:5]:
363 if '.hgtags' in x:
363 if '.hgtags' in x:
364 raise util.Abort(_('working copy of .hgtags is changed '
364 raise util.Abort(_('working copy of .hgtags is changed '
365 '(please commit .hgtags manually)'))
365 '(please commit .hgtags manually)'))
366
366
367 self.tags() # instantiate the cache
367 self.tags() # instantiate the cache
368 self._tag(names, node, message, local, user, date)
368 self._tag(names, node, message, local, user, date)
369
369
370 @propertycache
370 @propertycache
371 def _tagscache(self):
371 def _tagscache(self):
372 '''Returns a tagscache object that contains various tags related
372 '''Returns a tagscache object that contains various tags related
373 caches.'''
373 caches.'''
374
374
375 # This simplifies its cache management by having one decorated
375 # This simplifies its cache management by having one decorated
376 # function (this one) and the rest simply fetch things from it.
376 # function (this one) and the rest simply fetch things from it.
377 class tagscache(object):
377 class tagscache(object):
378 def __init__(self):
378 def __init__(self):
379 # These two define the set of tags for this repository. tags
379 # These two define the set of tags for this repository. tags
380 # maps tag name to node; tagtypes maps tag name to 'global' or
380 # maps tag name to node; tagtypes maps tag name to 'global' or
381 # 'local'. (Global tags are defined by .hgtags across all
381 # 'local'. (Global tags are defined by .hgtags across all
382 # heads, and local tags are defined in .hg/localtags.)
382 # heads, and local tags are defined in .hg/localtags.)
383 # They constitute the in-memory cache of tags.
383 # They constitute the in-memory cache of tags.
384 self.tags = self.tagtypes = None
384 self.tags = self.tagtypes = None
385
385
386 self.nodetagscache = self.tagslist = None
386 self.nodetagscache = self.tagslist = None
387
387
388 cache = tagscache()
388 cache = tagscache()
389 cache.tags, cache.tagtypes = self._findtags()
389 cache.tags, cache.tagtypes = self._findtags()
390
390
391 return cache
391 return cache
392
392
393 def tags(self):
393 def tags(self):
394 '''return a mapping of tag to node'''
394 '''return a mapping of tag to node'''
395 t = {}
395 t = {}
396 for k, v in self._tagscache.tags.iteritems():
396 for k, v in self._tagscache.tags.iteritems():
397 try:
397 try:
398 # ignore tags to unknown nodes
398 # ignore tags to unknown nodes
399 self.changelog.rev(v)
399 self.changelog.rev(v)
400 t[k] = v
400 t[k] = v
401 except (error.LookupError, ValueError):
401 except (error.LookupError, ValueError):
402 pass
402 pass
403 return t
403 return t
404
404
405 def _findtags(self):
405 def _findtags(self):
406 '''Do the hard work of finding tags. Return a pair of dicts
406 '''Do the hard work of finding tags. Return a pair of dicts
407 (tags, tagtypes) where tags maps tag name to node, and tagtypes
407 (tags, tagtypes) where tags maps tag name to node, and tagtypes
408 maps tag name to a string like \'global\' or \'local\'.
408 maps tag name to a string like \'global\' or \'local\'.
409 Subclasses or extensions are free to add their own tags, but
409 Subclasses or extensions are free to add their own tags, but
410 should be aware that the returned dicts will be retained for the
410 should be aware that the returned dicts will be retained for the
411 duration of the localrepo object.'''
411 duration of the localrepo object.'''
412
412
413 # XXX what tagtype should subclasses/extensions use? Currently
413 # XXX what tagtype should subclasses/extensions use? Currently
414 # mq and bookmarks add tags, but do not set the tagtype at all.
414 # mq and bookmarks add tags, but do not set the tagtype at all.
415 # Should each extension invent its own tag type? Should there
415 # Should each extension invent its own tag type? Should there
416 # be one tagtype for all such "virtual" tags? Or is the status
416 # be one tagtype for all such "virtual" tags? Or is the status
417 # quo fine?
417 # quo fine?
418
418
419 alltags = {} # map tag name to (node, hist)
419 alltags = {} # map tag name to (node, hist)
420 tagtypes = {}
420 tagtypes = {}
421
421
422 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
422 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
423 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
423 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
424
424
425 # Build the return dicts. Have to re-encode tag names because
425 # Build the return dicts. Have to re-encode tag names because
426 # the tags module always uses UTF-8 (in order not to lose info
426 # the tags module always uses UTF-8 (in order not to lose info
427 # writing to the cache), but the rest of Mercurial wants them in
427 # writing to the cache), but the rest of Mercurial wants them in
428 # local encoding.
428 # local encoding.
429 tags = {}
429 tags = {}
430 for (name, (node, hist)) in alltags.iteritems():
430 for (name, (node, hist)) in alltags.iteritems():
431 if node != nullid:
431 if node != nullid:
432 tags[encoding.tolocal(name)] = node
432 tags[encoding.tolocal(name)] = node
433 tags['tip'] = self.changelog.tip()
433 tags['tip'] = self.changelog.tip()
434 tagtypes = dict([(encoding.tolocal(name), value)
434 tagtypes = dict([(encoding.tolocal(name), value)
435 for (name, value) in tagtypes.iteritems()])
435 for (name, value) in tagtypes.iteritems()])
436 return (tags, tagtypes)
436 return (tags, tagtypes)
437
437
438 def tagtype(self, tagname):
438 def tagtype(self, tagname):
439 '''
439 '''
440 return the type of the given tag. result can be:
440 return the type of the given tag. result can be:
441
441
442 'local' : a local tag
442 'local' : a local tag
443 'global' : a global tag
443 'global' : a global tag
444 None : tag does not exist
444 None : tag does not exist
445 '''
445 '''
446
446
447 return self._tagscache.tagtypes.get(tagname)
447 return self._tagscache.tagtypes.get(tagname)
448
448
449 def tagslist(self):
449 def tagslist(self):
450 '''return a list of tags ordered by revision'''
450 '''return a list of tags ordered by revision'''
451 if not self._tagscache.tagslist:
451 if not self._tagscache.tagslist:
452 l = []
452 l = []
453 for t, n in self.tags().iteritems():
453 for t, n in self.tags().iteritems():
454 r = self.changelog.rev(n)
454 r = self.changelog.rev(n)
455 l.append((r, t, n))
455 l.append((r, t, n))
456 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
456 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
457
457
458 return self._tagscache.tagslist
458 return self._tagscache.tagslist
459
459
460 def nodetags(self, node):
460 def nodetags(self, node):
461 '''return the tags associated with a node'''
461 '''return the tags associated with a node'''
462 if not self._tagscache.nodetagscache:
462 if not self._tagscache.nodetagscache:
463 nodetagscache = {}
463 nodetagscache = {}
464 for t, n in self._tagscache.tags.iteritems():
464 for t, n in self._tagscache.tags.iteritems():
465 nodetagscache.setdefault(n, []).append(t)
465 nodetagscache.setdefault(n, []).append(t)
466 for tags in nodetagscache.itervalues():
466 for tags in nodetagscache.itervalues():
467 tags.sort()
467 tags.sort()
468 self._tagscache.nodetagscache = nodetagscache
468 self._tagscache.nodetagscache = nodetagscache
469 return self._tagscache.nodetagscache.get(node, [])
469 return self._tagscache.nodetagscache.get(node, [])
470
470
471 def nodebookmarks(self, node):
471 def nodebookmarks(self, node):
472 marks = []
472 marks = []
473 for bookmark, n in self._bookmarks.iteritems():
473 for bookmark, n in self._bookmarks.iteritems():
474 if n == node:
474 if n == node:
475 marks.append(bookmark)
475 marks.append(bookmark)
476 return sorted(marks)
476 return sorted(marks)
477
477
478 def _branchtags(self, partial, lrev):
478 def _branchtags(self, partial, lrev):
479 # TODO: rename this function?
479 # TODO: rename this function?
480 tiprev = len(self) - 1
480 tiprev = len(self) - 1
481 if lrev != tiprev:
481 if lrev != tiprev:
482 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
482 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
483 self._updatebranchcache(partial, ctxgen)
483 self._updatebranchcache(partial, ctxgen)
484 self._writebranchcache(partial, self.changelog.tip(), tiprev)
484 self._writebranchcache(partial, self.changelog.tip(), tiprev)
485
485
486 return partial
486 return partial
487
487
488 def updatebranchcache(self):
488 def updatebranchcache(self):
489 tip = self.changelog.tip()
489 tip = self.changelog.tip()
490 if self._branchcache is not None and self._branchcachetip == tip:
490 if self._branchcache is not None and self._branchcachetip == tip:
491 return
491 return
492
492
493 oldtip = self._branchcachetip
493 oldtip = self._branchcachetip
494 self._branchcachetip = tip
494 self._branchcachetip = tip
495 if oldtip is None or oldtip not in self.changelog.nodemap:
495 if oldtip is None or oldtip not in self.changelog.nodemap:
496 partial, last, lrev = self._readbranchcache()
496 partial, last, lrev = self._readbranchcache()
497 else:
497 else:
498 lrev = self.changelog.rev(oldtip)
498 lrev = self.changelog.rev(oldtip)
499 partial = self._branchcache
499 partial = self._branchcache
500
500
501 self._branchtags(partial, lrev)
501 self._branchtags(partial, lrev)
502 # this private cache holds all heads (not just the branch tips)
502 # this private cache holds all heads (not just the branch tips)
503 self._branchcache = partial
503 self._branchcache = partial
504
504
505 def branchmap(self):
505 def branchmap(self):
506 '''returns a dictionary {branch: [branchheads]}'''
506 '''returns a dictionary {branch: [branchheads]}'''
507 self.updatebranchcache()
507 self.updatebranchcache()
508 return self._branchcache
508 return self._branchcache
509
509
510 def _branchtip(self, heads):
510 def _branchtip(self, heads):
511 '''return the tipmost branch head in heads'''
511 '''return the tipmost branch head in heads'''
512 tip = heads[-1]
512 tip = heads[-1]
513 for h in reversed(heads):
513 for h in reversed(heads):
514 if not self[h].closesbranch():
514 if not self[h].closesbranch():
515 tip = h
515 tip = h
516 break
516 break
517 return tip
517 return tip
518
518
519 def branchtip(self, branch):
519 def branchtip(self, branch):
520 '''return the tip node for a given branch'''
520 '''return the tip node for a given branch'''
521 if branch not in self.branchmap():
521 if branch not in self.branchmap():
522 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
522 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
523 return self._branchtip(self.branchmap()[branch])
523 return self._branchtip(self.branchmap()[branch])
524
524
525 def branchtags(self):
525 def branchtags(self):
526 '''return a dict where branch names map to the tipmost head of
526 '''return a dict where branch names map to the tipmost head of
527 the branch, open heads come before closed'''
527 the branch, open heads come before closed'''
528 bt = {}
528 bt = {}
529 for bn, heads in self.branchmap().iteritems():
529 for bn, heads in self.branchmap().iteritems():
530 bt[bn] = self._branchtip(heads)
530 bt[bn] = self._branchtip(heads)
531 return bt
531 return bt
532
532
533 def _readbranchcache(self):
533 def _readbranchcache(self):
534 partial = {}
534 partial = {}
535 try:
535 try:
536 f = self.opener("cache/branchheads")
536 f = self.opener("cache/branchheads")
537 lines = f.read().split('\n')
537 lines = f.read().split('\n')
538 f.close()
538 f.close()
539 except (IOError, OSError):
539 except (IOError, OSError):
540 return {}, nullid, nullrev
540 return {}, nullid, nullrev
541
541
542 try:
542 try:
543 last, lrev = lines.pop(0).split(" ", 1)
543 last, lrev = lines.pop(0).split(" ", 1)
544 last, lrev = bin(last), int(lrev)
544 last, lrev = bin(last), int(lrev)
545 if lrev >= len(self) or self[lrev].node() != last:
545 if lrev >= len(self) or self[lrev].node() != last:
546 # invalidate the cache
546 # invalidate the cache
547 raise ValueError('invalidating branch cache (tip differs)')
547 raise ValueError('invalidating branch cache (tip differs)')
548 for l in lines:
548 for l in lines:
549 if not l:
549 if not l:
550 continue
550 continue
551 node, label = l.split(" ", 1)
551 node, label = l.split(" ", 1)
552 label = encoding.tolocal(label.strip())
552 label = encoding.tolocal(label.strip())
553 partial.setdefault(label, []).append(bin(node))
553 partial.setdefault(label, []).append(bin(node))
554 except KeyboardInterrupt:
554 except KeyboardInterrupt:
555 raise
555 raise
556 except Exception, inst:
556 except Exception, inst:
557 if self.ui.debugflag:
557 if self.ui.debugflag:
558 self.ui.warn(str(inst), '\n')
558 self.ui.warn(str(inst), '\n')
559 partial, last, lrev = {}, nullid, nullrev
559 partial, last, lrev = {}, nullid, nullrev
560 return partial, last, lrev
560 return partial, last, lrev
561
561
562 def _writebranchcache(self, branches, tip, tiprev):
562 def _writebranchcache(self, branches, tip, tiprev):
563 try:
563 try:
564 f = self.opener("cache/branchheads", "w", atomictemp=True)
564 f = self.opener("cache/branchheads", "w", atomictemp=True)
565 f.write("%s %s\n" % (hex(tip), tiprev))
565 f.write("%s %s\n" % (hex(tip), tiprev))
566 for label, nodes in branches.iteritems():
566 for label, nodes in branches.iteritems():
567 for node in nodes:
567 for node in nodes:
568 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
568 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
569 f.close()
569 f.close()
570 except (IOError, OSError):
570 except (IOError, OSError):
571 pass
571 pass
572
572
573 def _updatebranchcache(self, partial, ctxgen):
573 def _updatebranchcache(self, partial, ctxgen):
574 # collect new branch entries
574 # collect new branch entries
575 newbranches = {}
575 newbranches = {}
576 for c in ctxgen:
576 for c in ctxgen:
577 newbranches.setdefault(c.branch(), []).append(c.node())
577 newbranches.setdefault(c.branch(), []).append(c.node())
578 # if older branchheads are reachable from new ones, they aren't
578 # if older branchheads are reachable from new ones, they aren't
579 # really branchheads. Note checking parents is insufficient:
579 # really branchheads. Note checking parents is insufficient:
580 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
580 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
581 for branch, newnodes in newbranches.iteritems():
581 for branch, newnodes in newbranches.iteritems():
582 bheads = partial.setdefault(branch, [])
582 bheads = partial.setdefault(branch, [])
583 bheads.extend(newnodes)
583 bheads.extend(newnodes)
584 if len(bheads) <= 1:
584 if len(bheads) <= 1:
585 continue
585 continue
586 bheads = sorted(bheads, key=lambda x: self[x].rev())
586 bheads = sorted(bheads, key=lambda x: self[x].rev())
587 # starting from tip means fewer passes over reachable
587 # starting from tip means fewer passes over reachable
588 while newnodes:
588 while newnodes:
589 latest = newnodes.pop()
589 latest = newnodes.pop()
590 if latest not in bheads:
590 if latest not in bheads:
591 continue
591 continue
592 minbhnode = self[bheads[0]].node()
592 minbhnode = self[bheads[0]].node()
593 reachable = self.changelog.reachable(latest, minbhnode)
593 reachable = self.changelog.reachable(latest, minbhnode)
594 reachable.remove(latest)
594 reachable.remove(latest)
595 if reachable:
595 if reachable:
596 bheads = [b for b in bheads if b not in reachable]
596 bheads = [b for b in bheads if b not in reachable]
597 partial[branch] = bheads
597 partial[branch] = bheads
598
598
599 def lookup(self, key):
599 def lookup(self, key):
600 return self[key].node()
600 return self[key].node()
601
601
602 def lookupbranch(self, key, remote=None):
602 def lookupbranch(self, key, remote=None):
603 repo = remote or self
603 repo = remote or self
604 if key in repo.branchmap():
604 if key in repo.branchmap():
605 return key
605 return key
606
606
607 repo = (remote and remote.local()) and remote or self
607 repo = (remote and remote.local()) and remote or self
608 return repo[key].branch()
608 return repo[key].branch()
609
609
610 def known(self, nodes):
610 def known(self, nodes):
611 nm = self.changelog.nodemap
611 nm = self.changelog.nodemap
612 pc = self._phasecache
612 pc = self._phasecache
613 result = []
613 result = []
614 for n in nodes:
614 for n in nodes:
615 r = nm.get(n)
615 r = nm.get(n)
616 resp = not (r is None or pc.phase(self, r) >= phases.secret)
616 resp = not (r is None or pc.phase(self, r) >= phases.secret)
617 result.append(resp)
617 result.append(resp)
618 return result
618 return result
619
619
620 def local(self):
620 def local(self):
621 return self
621 return self
622
622
623 def join(self, f):
623 def join(self, f):
624 return os.path.join(self.path, f)
624 return os.path.join(self.path, f)
625
625
626 def wjoin(self, f):
626 def wjoin(self, f):
627 return os.path.join(self.root, f)
627 return os.path.join(self.root, f)
628
628
629 def file(self, f):
629 def file(self, f):
630 if f[0] == '/':
630 if f[0] == '/':
631 f = f[1:]
631 f = f[1:]
632 return filelog.filelog(self.sopener, f)
632 return filelog.filelog(self.sopener, f)
633
633
634 def changectx(self, changeid):
634 def changectx(self, changeid):
635 return self[changeid]
635 return self[changeid]
636
636
637 def parents(self, changeid=None):
637 def parents(self, changeid=None):
638 '''get list of changectxs for parents of changeid'''
638 '''get list of changectxs for parents of changeid'''
639 return self[changeid].parents()
639 return self[changeid].parents()
640
640
641 def setparents(self, p1, p2=nullid):
641 def setparents(self, p1, p2=nullid):
642 copies = self.dirstate.setparents(p1, p2)
642 copies = self.dirstate.setparents(p1, p2)
643 if copies:
643 if copies:
644 # Adjust copy records, the dirstate cannot do it, it
644 # Adjust copy records, the dirstate cannot do it, it
645 # requires access to parents manifests. Preserve them
645 # requires access to parents manifests. Preserve them
646 # only for entries added to first parent.
646 # only for entries added to first parent.
647 pctx = self[p1]
647 pctx = self[p1]
648 for f in copies:
648 for f in copies:
649 if f not in pctx and copies[f] in pctx:
649 if f not in pctx and copies[f] in pctx:
650 self.dirstate.copy(copies[f], f)
650 self.dirstate.copy(copies[f], f)
651
651
652 def filectx(self, path, changeid=None, fileid=None):
652 def filectx(self, path, changeid=None, fileid=None):
653 """changeid can be a changeset revision, node, or tag.
653 """changeid can be a changeset revision, node, or tag.
654 fileid can be a file revision or node."""
654 fileid can be a file revision or node."""
655 return context.filectx(self, path, changeid, fileid)
655 return context.filectx(self, path, changeid, fileid)
656
656
657 def getcwd(self):
657 def getcwd(self):
658 return self.dirstate.getcwd()
658 return self.dirstate.getcwd()
659
659
660 def pathto(self, f, cwd=None):
660 def pathto(self, f, cwd=None):
661 return self.dirstate.pathto(f, cwd)
661 return self.dirstate.pathto(f, cwd)
662
662
663 def wfile(self, f, mode='r'):
663 def wfile(self, f, mode='r'):
664 return self.wopener(f, mode)
664 return self.wopener(f, mode)
665
665
666 def _link(self, f):
666 def _link(self, f):
667 return os.path.islink(self.wjoin(f))
667 return os.path.islink(self.wjoin(f))
668
668
669 def _loadfilter(self, filter):
669 def _loadfilter(self, filter):
670 if filter not in self.filterpats:
670 if filter not in self.filterpats:
671 l = []
671 l = []
672 for pat, cmd in self.ui.configitems(filter):
672 for pat, cmd in self.ui.configitems(filter):
673 if cmd == '!':
673 if cmd == '!':
674 continue
674 continue
675 mf = matchmod.match(self.root, '', [pat])
675 mf = matchmod.match(self.root, '', [pat])
676 fn = None
676 fn = None
677 params = cmd
677 params = cmd
678 for name, filterfn in self._datafilters.iteritems():
678 for name, filterfn in self._datafilters.iteritems():
679 if cmd.startswith(name):
679 if cmd.startswith(name):
680 fn = filterfn
680 fn = filterfn
681 params = cmd[len(name):].lstrip()
681 params = cmd[len(name):].lstrip()
682 break
682 break
683 if not fn:
683 if not fn:
684 fn = lambda s, c, **kwargs: util.filter(s, c)
684 fn = lambda s, c, **kwargs: util.filter(s, c)
685 # Wrap old filters not supporting keyword arguments
685 # Wrap old filters not supporting keyword arguments
686 if not inspect.getargspec(fn)[2]:
686 if not inspect.getargspec(fn)[2]:
687 oldfn = fn
687 oldfn = fn
688 fn = lambda s, c, **kwargs: oldfn(s, c)
688 fn = lambda s, c, **kwargs: oldfn(s, c)
689 l.append((mf, fn, params))
689 l.append((mf, fn, params))
690 self.filterpats[filter] = l
690 self.filterpats[filter] = l
691 return self.filterpats[filter]
691 return self.filterpats[filter]
692
692
693 def _filter(self, filterpats, filename, data):
693 def _filter(self, filterpats, filename, data):
694 for mf, fn, cmd in filterpats:
694 for mf, fn, cmd in filterpats:
695 if mf(filename):
695 if mf(filename):
696 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
696 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
697 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
697 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
698 break
698 break
699
699
700 return data
700 return data
701
701
702 @propertycache
702 @propertycache
703 def _encodefilterpats(self):
703 def _encodefilterpats(self):
704 return self._loadfilter('encode')
704 return self._loadfilter('encode')
705
705
706 @propertycache
706 @propertycache
707 def _decodefilterpats(self):
707 def _decodefilterpats(self):
708 return self._loadfilter('decode')
708 return self._loadfilter('decode')
709
709
710 def adddatafilter(self, name, filter):
710 def adddatafilter(self, name, filter):
711 self._datafilters[name] = filter
711 self._datafilters[name] = filter
712
712
713 def wread(self, filename):
713 def wread(self, filename):
714 if self._link(filename):
714 if self._link(filename):
715 data = os.readlink(self.wjoin(filename))
715 data = os.readlink(self.wjoin(filename))
716 else:
716 else:
717 data = self.wopener.read(filename)
717 data = self.wopener.read(filename)
718 return self._filter(self._encodefilterpats, filename, data)
718 return self._filter(self._encodefilterpats, filename, data)
719
719
720 def wwrite(self, filename, data, flags):
720 def wwrite(self, filename, data, flags):
721 data = self._filter(self._decodefilterpats, filename, data)
721 data = self._filter(self._decodefilterpats, filename, data)
722 if 'l' in flags:
722 if 'l' in flags:
723 self.wopener.symlink(data, filename)
723 self.wopener.symlink(data, filename)
724 else:
724 else:
725 self.wopener.write(filename, data)
725 self.wopener.write(filename, data)
726 if 'x' in flags:
726 if 'x' in flags:
727 util.setflags(self.wjoin(filename), False, True)
727 util.setflags(self.wjoin(filename), False, True)
728
728
729 def wwritedata(self, filename, data):
729 def wwritedata(self, filename, data):
730 return self._filter(self._decodefilterpats, filename, data)
730 return self._filter(self._decodefilterpats, filename, data)
731
731
732 def transaction(self, desc):
732 def transaction(self, desc):
733 tr = self._transref and self._transref() or None
733 tr = self._transref and self._transref() or None
734 if tr and tr.running():
734 if tr and tr.running():
735 return tr.nest()
735 return tr.nest()
736
736
737 # abort here if the journal already exists
737 # abort here if the journal already exists
738 if os.path.exists(self.sjoin("journal")):
738 if os.path.exists(self.sjoin("journal")):
739 raise error.RepoError(
739 raise error.RepoError(
740 _("abandoned transaction found - run hg recover"))
740 _("abandoned transaction found - run hg recover"))
741
741
742 self._writejournal(desc)
742 self._writejournal(desc)
743 renames = [(x, undoname(x)) for x in self._journalfiles()]
743 renames = [(x, undoname(x)) for x in self._journalfiles()]
744
744
745 tr = transaction.transaction(self.ui.warn, self.sopener,
745 tr = transaction.transaction(self.ui.warn, self.sopener,
746 self.sjoin("journal"),
746 self.sjoin("journal"),
747 aftertrans(renames),
747 aftertrans(renames),
748 self.store.createmode)
748 self.store.createmode)
749 self._transref = weakref.ref(tr)
749 self._transref = weakref.ref(tr)
750 return tr
750 return tr
751
751
752 def _journalfiles(self):
752 def _journalfiles(self):
753 return (self.sjoin('journal'), self.join('journal.dirstate'),
753 return (self.sjoin('journal'), self.join('journal.dirstate'),
754 self.join('journal.branch'), self.join('journal.desc'),
754 self.join('journal.branch'), self.join('journal.desc'),
755 self.join('journal.bookmarks'),
755 self.join('journal.bookmarks'),
756 self.sjoin('journal.phaseroots'))
756 self.sjoin('journal.phaseroots'))
757
757
758 def undofiles(self):
758 def undofiles(self):
759 return [undoname(x) for x in self._journalfiles()]
759 return [undoname(x) for x in self._journalfiles()]
760
760
761 def _writejournal(self, desc):
761 def _writejournal(self, desc):
762 self.opener.write("journal.dirstate",
762 self.opener.write("journal.dirstate",
763 self.opener.tryread("dirstate"))
763 self.opener.tryread("dirstate"))
764 self.opener.write("journal.branch",
764 self.opener.write("journal.branch",
765 encoding.fromlocal(self.dirstate.branch()))
765 encoding.fromlocal(self.dirstate.branch()))
766 self.opener.write("journal.desc",
766 self.opener.write("journal.desc",
767 "%d\n%s\n" % (len(self), desc))
767 "%d\n%s\n" % (len(self), desc))
768 self.opener.write("journal.bookmarks",
768 self.opener.write("journal.bookmarks",
769 self.opener.tryread("bookmarks"))
769 self.opener.tryread("bookmarks"))
770 self.sopener.write("journal.phaseroots",
770 self.sopener.write("journal.phaseroots",
771 self.sopener.tryread("phaseroots"))
771 self.sopener.tryread("phaseroots"))
772
772
773 def recover(self):
773 def recover(self):
774 lock = self.lock()
774 lock = self.lock()
775 try:
775 try:
776 if os.path.exists(self.sjoin("journal")):
776 if os.path.exists(self.sjoin("journal")):
777 self.ui.status(_("rolling back interrupted transaction\n"))
777 self.ui.status(_("rolling back interrupted transaction\n"))
778 transaction.rollback(self.sopener, self.sjoin("journal"),
778 transaction.rollback(self.sopener, self.sjoin("journal"),
779 self.ui.warn)
779 self.ui.warn)
780 self.invalidate()
780 self.invalidate()
781 return True
781 return True
782 else:
782 else:
783 self.ui.warn(_("no interrupted transaction available\n"))
783 self.ui.warn(_("no interrupted transaction available\n"))
784 return False
784 return False
785 finally:
785 finally:
786 lock.release()
786 lock.release()
787
787
788 def rollback(self, dryrun=False, force=False):
788 def rollback(self, dryrun=False, force=False):
789 wlock = lock = None
789 wlock = lock = None
790 try:
790 try:
791 wlock = self.wlock()
791 wlock = self.wlock()
792 lock = self.lock()
792 lock = self.lock()
793 if os.path.exists(self.sjoin("undo")):
793 if os.path.exists(self.sjoin("undo")):
794 return self._rollback(dryrun, force)
794 return self._rollback(dryrun, force)
795 else:
795 else:
796 self.ui.warn(_("no rollback information available\n"))
796 self.ui.warn(_("no rollback information available\n"))
797 return 1
797 return 1
798 finally:
798 finally:
799 release(lock, wlock)
799 release(lock, wlock)
800
800
801 def _rollback(self, dryrun, force):
801 def _rollback(self, dryrun, force):
802 ui = self.ui
802 ui = self.ui
803 try:
803 try:
804 args = self.opener.read('undo.desc').splitlines()
804 args = self.opener.read('undo.desc').splitlines()
805 (oldlen, desc, detail) = (int(args[0]), args[1], None)
805 (oldlen, desc, detail) = (int(args[0]), args[1], None)
806 if len(args) >= 3:
806 if len(args) >= 3:
807 detail = args[2]
807 detail = args[2]
808 oldtip = oldlen - 1
808 oldtip = oldlen - 1
809
809
810 if detail and ui.verbose:
810 if detail and ui.verbose:
811 msg = (_('repository tip rolled back to revision %s'
811 msg = (_('repository tip rolled back to revision %s'
812 ' (undo %s: %s)\n')
812 ' (undo %s: %s)\n')
813 % (oldtip, desc, detail))
813 % (oldtip, desc, detail))
814 else:
814 else:
815 msg = (_('repository tip rolled back to revision %s'
815 msg = (_('repository tip rolled back to revision %s'
816 ' (undo %s)\n')
816 ' (undo %s)\n')
817 % (oldtip, desc))
817 % (oldtip, desc))
818 except IOError:
818 except IOError:
819 msg = _('rolling back unknown transaction\n')
819 msg = _('rolling back unknown transaction\n')
820 desc = None
820 desc = None
821
821
822 if not force and self['.'] != self['tip'] and desc == 'commit':
822 if not force and self['.'] != self['tip'] and desc == 'commit':
823 raise util.Abort(
823 raise util.Abort(
824 _('rollback of last commit while not checked out '
824 _('rollback of last commit while not checked out '
825 'may lose data'), hint=_('use -f to force'))
825 'may lose data'), hint=_('use -f to force'))
826
826
827 ui.status(msg)
827 ui.status(msg)
828 if dryrun:
828 if dryrun:
829 return 0
829 return 0
830
830
831 parents = self.dirstate.parents()
831 parents = self.dirstate.parents()
832 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
832 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
833 if os.path.exists(self.join('undo.bookmarks')):
833 if os.path.exists(self.join('undo.bookmarks')):
834 util.rename(self.join('undo.bookmarks'),
834 util.rename(self.join('undo.bookmarks'),
835 self.join('bookmarks'))
835 self.join('bookmarks'))
836 if os.path.exists(self.sjoin('undo.phaseroots')):
836 if os.path.exists(self.sjoin('undo.phaseroots')):
837 util.rename(self.sjoin('undo.phaseroots'),
837 util.rename(self.sjoin('undo.phaseroots'),
838 self.sjoin('phaseroots'))
838 self.sjoin('phaseroots'))
839 self.invalidate()
839 self.invalidate()
840
840
841 parentgone = (parents[0] not in self.changelog.nodemap or
841 parentgone = (parents[0] not in self.changelog.nodemap or
842 parents[1] not in self.changelog.nodemap)
842 parents[1] not in self.changelog.nodemap)
843 if parentgone:
843 if parentgone:
844 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
844 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
845 try:
845 try:
846 branch = self.opener.read('undo.branch')
846 branch = self.opener.read('undo.branch')
847 self.dirstate.setbranch(branch)
847 self.dirstate.setbranch(branch)
848 except IOError:
848 except IOError:
849 ui.warn(_('named branch could not be reset: '
849 ui.warn(_('named branch could not be reset: '
850 'current branch is still \'%s\'\n')
850 'current branch is still \'%s\'\n')
851 % self.dirstate.branch())
851 % self.dirstate.branch())
852
852
853 self.dirstate.invalidate()
853 self.dirstate.invalidate()
854 parents = tuple([p.rev() for p in self.parents()])
854 parents = tuple([p.rev() for p in self.parents()])
855 if len(parents) > 1:
855 if len(parents) > 1:
856 ui.status(_('working directory now based on '
856 ui.status(_('working directory now based on '
857 'revisions %d and %d\n') % parents)
857 'revisions %d and %d\n') % parents)
858 else:
858 else:
859 ui.status(_('working directory now based on '
859 ui.status(_('working directory now based on '
860 'revision %d\n') % parents)
860 'revision %d\n') % parents)
861 self.destroyed()
861 self.destroyed()
862 return 0
862 return 0
863
863
864 def invalidatecaches(self):
864 def invalidatecaches(self):
865 def delcache(name):
865 def delcache(name):
866 try:
866 try:
867 delattr(self, name)
867 delattr(self, name)
868 except AttributeError:
868 except AttributeError:
869 pass
869 pass
870
870
871 delcache('_tagscache')
871 delcache('_tagscache')
872
872
873 self._branchcache = None # in UTF-8
873 self._branchcache = None # in UTF-8
874 self._branchcachetip = None
874 self._branchcachetip = None
875
875
876 def invalidatedirstate(self):
876 def invalidatedirstate(self):
877 '''Invalidates the dirstate, causing the next call to dirstate
877 '''Invalidates the dirstate, causing the next call to dirstate
878 to check if it was modified since the last time it was read,
878 to check if it was modified since the last time it was read,
879 rereading it if it has.
879 rereading it if it has.
880
880
881 This is different to dirstate.invalidate() that it doesn't always
881 This is different to dirstate.invalidate() that it doesn't always
882 rereads the dirstate. Use dirstate.invalidate() if you want to
882 rereads the dirstate. Use dirstate.invalidate() if you want to
883 explicitly read the dirstate again (i.e. restoring it to a previous
883 explicitly read the dirstate again (i.e. restoring it to a previous
884 known good state).'''
884 known good state).'''
885 if 'dirstate' in self.__dict__:
885 if 'dirstate' in self.__dict__:
886 for k in self.dirstate._filecache:
886 for k in self.dirstate._filecache:
887 try:
887 try:
888 delattr(self.dirstate, k)
888 delattr(self.dirstate, k)
889 except AttributeError:
889 except AttributeError:
890 pass
890 pass
891 delattr(self, 'dirstate')
891 delattr(self, 'dirstate')
892
892
893 def invalidate(self):
893 def invalidate(self):
894 for k in self._filecache:
894 for k in self._filecache:
895 # dirstate is invalidated separately in invalidatedirstate()
895 # dirstate is invalidated separately in invalidatedirstate()
896 if k == 'dirstate':
896 if k == 'dirstate':
897 continue
897 continue
898
898
899 try:
899 try:
900 delattr(self, k)
900 delattr(self, k)
901 except AttributeError:
901 except AttributeError:
902 pass
902 pass
903 self.invalidatecaches()
903 self.invalidatecaches()
904
904
905 # Discard all cache entries to force reloading everything.
905 # Discard all cache entries to force reloading everything.
906 self._filecache.clear()
906 self._filecache.clear()
907
907
908 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
908 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
909 try:
909 try:
910 l = lock.lock(lockname, 0, releasefn, desc=desc)
910 l = lock.lock(lockname, 0, releasefn, desc=desc)
911 except error.LockHeld, inst:
911 except error.LockHeld, inst:
912 if not wait:
912 if not wait:
913 raise
913 raise
914 self.ui.warn(_("waiting for lock on %s held by %r\n") %
914 self.ui.warn(_("waiting for lock on %s held by %r\n") %
915 (desc, inst.locker))
915 (desc, inst.locker))
916 # default to 600 seconds timeout
916 # default to 600 seconds timeout
917 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
917 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
918 releasefn, desc=desc)
918 releasefn, desc=desc)
919 if acquirefn:
919 if acquirefn:
920 acquirefn()
920 acquirefn()
921 return l
921 return l
922
922
923 def _afterlock(self, callback):
923 def _afterlock(self, callback):
924 """add a callback to the current repository lock.
924 """add a callback to the current repository lock.
925
925
926 The callback will be executed on lock release."""
926 The callback will be executed on lock release."""
927 l = self._lockref and self._lockref()
927 l = self._lockref and self._lockref()
928 if l:
928 if l:
929 l.postrelease.append(callback)
929 l.postrelease.append(callback)
930 else:
930 else:
931 callback()
931 callback()
932
932
933 def lock(self, wait=True):
933 def lock(self, wait=True):
934 '''Lock the repository store (.hg/store) and return a weak reference
934 '''Lock the repository store (.hg/store) and return a weak reference
935 to the lock. Use this before modifying the store (e.g. committing or
935 to the lock. Use this before modifying the store (e.g. committing or
936 stripping). If you are opening a transaction, get a lock as well.)'''
936 stripping). If you are opening a transaction, get a lock as well.)'''
937 l = self._lockref and self._lockref()
937 l = self._lockref and self._lockref()
938 if l is not None and l.held:
938 if l is not None and l.held:
939 l.lock()
939 l.lock()
940 return l
940 return l
941
941
942 def unlock():
942 def unlock():
943 self.store.write()
943 self.store.write()
944 if '_phasecache' in vars(self):
944 if '_phasecache' in vars(self):
945 self._phasecache.write()
945 self._phasecache.write()
946 for k, ce in self._filecache.items():
946 for k, ce in self._filecache.items():
947 if k == 'dirstate':
947 if k == 'dirstate':
948 continue
948 continue
949 ce.refresh()
949 ce.refresh()
950
950
951 l = self._lock(self.sjoin("lock"), wait, unlock,
951 l = self._lock(self.sjoin("lock"), wait, unlock,
952 self.invalidate, _('repository %s') % self.origroot)
952 self.invalidate, _('repository %s') % self.origroot)
953 self._lockref = weakref.ref(l)
953 self._lockref = weakref.ref(l)
954 return l
954 return l
955
955
956 def wlock(self, wait=True):
956 def wlock(self, wait=True):
957 '''Lock the non-store parts of the repository (everything under
957 '''Lock the non-store parts of the repository (everything under
958 .hg except .hg/store) and return a weak reference to the lock.
958 .hg except .hg/store) and return a weak reference to the lock.
959 Use this before modifying files in .hg.'''
959 Use this before modifying files in .hg.'''
960 l = self._wlockref and self._wlockref()
960 l = self._wlockref and self._wlockref()
961 if l is not None and l.held:
961 if l is not None and l.held:
962 l.lock()
962 l.lock()
963 return l
963 return l
964
964
965 def unlock():
965 def unlock():
966 self.dirstate.write()
966 self.dirstate.write()
967 ce = self._filecache.get('dirstate')
967 ce = self._filecache.get('dirstate')
968 if ce:
968 if ce:
969 ce.refresh()
969 ce.refresh()
970
970
971 l = self._lock(self.join("wlock"), wait, unlock,
971 l = self._lock(self.join("wlock"), wait, unlock,
972 self.invalidatedirstate, _('working directory of %s') %
972 self.invalidatedirstate, _('working directory of %s') %
973 self.origroot)
973 self.origroot)
974 self._wlockref = weakref.ref(l)
974 self._wlockref = weakref.ref(l)
975 return l
975 return l
976
976
977 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
977 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
978 """
978 """
979 commit an individual file as part of a larger transaction
979 commit an individual file as part of a larger transaction
980 """
980 """
981
981
982 fname = fctx.path()
982 fname = fctx.path()
983 text = fctx.data()
983 text = fctx.data()
984 flog = self.file(fname)
984 flog = self.file(fname)
985 fparent1 = manifest1.get(fname, nullid)
985 fparent1 = manifest1.get(fname, nullid)
986 fparent2 = fparent2o = manifest2.get(fname, nullid)
986 fparent2 = fparent2o = manifest2.get(fname, nullid)
987
987
988 meta = {}
988 meta = {}
989 copy = fctx.renamed()
989 copy = fctx.renamed()
990 if copy and copy[0] != fname:
990 if copy and copy[0] != fname:
991 # Mark the new revision of this file as a copy of another
991 # Mark the new revision of this file as a copy of another
992 # file. This copy data will effectively act as a parent
992 # file. This copy data will effectively act as a parent
993 # of this new revision. If this is a merge, the first
993 # of this new revision. If this is a merge, the first
994 # parent will be the nullid (meaning "look up the copy data")
994 # parent will be the nullid (meaning "look up the copy data")
995 # and the second one will be the other parent. For example:
995 # and the second one will be the other parent. For example:
996 #
996 #
997 # 0 --- 1 --- 3 rev1 changes file foo
997 # 0 --- 1 --- 3 rev1 changes file foo
998 # \ / rev2 renames foo to bar and changes it
998 # \ / rev2 renames foo to bar and changes it
999 # \- 2 -/ rev3 should have bar with all changes and
999 # \- 2 -/ rev3 should have bar with all changes and
1000 # should record that bar descends from
1000 # should record that bar descends from
1001 # bar in rev2 and foo in rev1
1001 # bar in rev2 and foo in rev1
1002 #
1002 #
1003 # this allows this merge to succeed:
1003 # this allows this merge to succeed:
1004 #
1004 #
1005 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1005 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1006 # \ / merging rev3 and rev4 should use bar@rev2
1006 # \ / merging rev3 and rev4 should use bar@rev2
1007 # \- 2 --- 4 as the merge base
1007 # \- 2 --- 4 as the merge base
1008 #
1008 #
1009
1009
1010 cfname = copy[0]
1010 cfname = copy[0]
1011 crev = manifest1.get(cfname)
1011 crev = manifest1.get(cfname)
1012 newfparent = fparent2
1012 newfparent = fparent2
1013
1013
1014 if manifest2: # branch merge
1014 if manifest2: # branch merge
1015 if fparent2 == nullid or crev is None: # copied on remote side
1015 if fparent2 == nullid or crev is None: # copied on remote side
1016 if cfname in manifest2:
1016 if cfname in manifest2:
1017 crev = manifest2[cfname]
1017 crev = manifest2[cfname]
1018 newfparent = fparent1
1018 newfparent = fparent1
1019
1019
1020 # find source in nearest ancestor if we've lost track
1020 # find source in nearest ancestor if we've lost track
1021 if not crev:
1021 if not crev:
1022 self.ui.debug(" %s: searching for copy revision for %s\n" %
1022 self.ui.debug(" %s: searching for copy revision for %s\n" %
1023 (fname, cfname))
1023 (fname, cfname))
1024 for ancestor in self[None].ancestors():
1024 for ancestor in self[None].ancestors():
1025 if cfname in ancestor:
1025 if cfname in ancestor:
1026 crev = ancestor[cfname].filenode()
1026 crev = ancestor[cfname].filenode()
1027 break
1027 break
1028
1028
1029 if crev:
1029 if crev:
1030 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1030 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1031 meta["copy"] = cfname
1031 meta["copy"] = cfname
1032 meta["copyrev"] = hex(crev)
1032 meta["copyrev"] = hex(crev)
1033 fparent1, fparent2 = nullid, newfparent
1033 fparent1, fparent2 = nullid, newfparent
1034 else:
1034 else:
1035 self.ui.warn(_("warning: can't find ancestor for '%s' "
1035 self.ui.warn(_("warning: can't find ancestor for '%s' "
1036 "copied from '%s'!\n") % (fname, cfname))
1036 "copied from '%s'!\n") % (fname, cfname))
1037
1037
1038 elif fparent2 != nullid:
1038 elif fparent2 != nullid:
1039 # is one parent an ancestor of the other?
1039 # is one parent an ancestor of the other?
1040 fparentancestor = flog.ancestor(fparent1, fparent2)
1040 fparentancestor = flog.ancestor(fparent1, fparent2)
1041 if fparentancestor == fparent1:
1041 if fparentancestor == fparent1:
1042 fparent1, fparent2 = fparent2, nullid
1042 fparent1, fparent2 = fparent2, nullid
1043 elif fparentancestor == fparent2:
1043 elif fparentancestor == fparent2:
1044 fparent2 = nullid
1044 fparent2 = nullid
1045
1045
1046 # is the file changed?
1046 # is the file changed?
1047 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1047 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1048 changelist.append(fname)
1048 changelist.append(fname)
1049 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1049 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1050
1050
1051 # are just the flags changed during merge?
1051 # are just the flags changed during merge?
1052 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1052 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1053 changelist.append(fname)
1053 changelist.append(fname)
1054
1054
1055 return fparent1
1055 return fparent1
1056
1056
1057 def commit(self, text="", user=None, date=None, match=None, force=False,
1057 def commit(self, text="", user=None, date=None, match=None, force=False,
1058 editor=False, extra={}):
1058 editor=False, extra={}):
1059 """Add a new revision to current repository.
1059 """Add a new revision to current repository.
1060
1060
1061 Revision information is gathered from the working directory,
1061 Revision information is gathered from the working directory,
1062 match can be used to filter the committed files. If editor is
1062 match can be used to filter the committed files. If editor is
1063 supplied, it is called to get a commit message.
1063 supplied, it is called to get a commit message.
1064 """
1064 """
1065
1065
1066 def fail(f, msg):
1066 def fail(f, msg):
1067 raise util.Abort('%s: %s' % (f, msg))
1067 raise util.Abort('%s: %s' % (f, msg))
1068
1068
1069 if not match:
1069 if not match:
1070 match = matchmod.always(self.root, '')
1070 match = matchmod.always(self.root, '')
1071
1071
1072 if not force:
1072 if not force:
1073 vdirs = []
1073 vdirs = []
1074 match.dir = vdirs.append
1074 match.dir = vdirs.append
1075 match.bad = fail
1075 match.bad = fail
1076
1076
1077 wlock = self.wlock()
1077 wlock = self.wlock()
1078 try:
1078 try:
1079 wctx = self[None]
1079 wctx = self[None]
1080 merge = len(wctx.parents()) > 1
1080 merge = len(wctx.parents()) > 1
1081
1081
1082 if (not force and merge and match and
1082 if (not force and merge and match and
1083 (match.files() or match.anypats())):
1083 (match.files() or match.anypats())):
1084 raise util.Abort(_('cannot partially commit a merge '
1084 raise util.Abort(_('cannot partially commit a merge '
1085 '(do not specify files or patterns)'))
1085 '(do not specify files or patterns)'))
1086
1086
1087 changes = self.status(match=match, clean=force)
1087 changes = self.status(match=match, clean=force)
1088 if force:
1088 if force:
1089 changes[0].extend(changes[6]) # mq may commit unchanged files
1089 changes[0].extend(changes[6]) # mq may commit unchanged files
1090
1090
1091 # check subrepos
1091 # check subrepos
1092 subs = []
1092 subs = []
1093 commitsubs = set()
1093 commitsubs = set()
1094 newstate = wctx.substate.copy()
1094 newstate = wctx.substate.copy()
1095 # only manage subrepos and .hgsubstate if .hgsub is present
1095 # only manage subrepos and .hgsubstate if .hgsub is present
1096 if '.hgsub' in wctx:
1096 if '.hgsub' in wctx:
1097 # we'll decide whether to track this ourselves, thanks
1097 # we'll decide whether to track this ourselves, thanks
1098 if '.hgsubstate' in changes[0]:
1098 if '.hgsubstate' in changes[0]:
1099 changes[0].remove('.hgsubstate')
1099 changes[0].remove('.hgsubstate')
1100 if '.hgsubstate' in changes[2]:
1100 if '.hgsubstate' in changes[2]:
1101 changes[2].remove('.hgsubstate')
1101 changes[2].remove('.hgsubstate')
1102
1102
1103 # compare current state to last committed state
1103 # compare current state to last committed state
1104 # build new substate based on last committed state
1104 # build new substate based on last committed state
1105 oldstate = wctx.p1().substate
1105 oldstate = wctx.p1().substate
1106 for s in sorted(newstate.keys()):
1106 for s in sorted(newstate.keys()):
1107 if not match(s):
1107 if not match(s):
1108 # ignore working copy, use old state if present
1108 # ignore working copy, use old state if present
1109 if s in oldstate:
1109 if s in oldstate:
1110 newstate[s] = oldstate[s]
1110 newstate[s] = oldstate[s]
1111 continue
1111 continue
1112 if not force:
1112 if not force:
1113 raise util.Abort(
1113 raise util.Abort(
1114 _("commit with new subrepo %s excluded") % s)
1114 _("commit with new subrepo %s excluded") % s)
1115 if wctx.sub(s).dirty(True):
1115 if wctx.sub(s).dirty(True):
1116 if not self.ui.configbool('ui', 'commitsubrepos'):
1116 if not self.ui.configbool('ui', 'commitsubrepos'):
1117 raise util.Abort(
1117 raise util.Abort(
1118 _("uncommitted changes in subrepo %s") % s,
1118 _("uncommitted changes in subrepo %s") % s,
1119 hint=_("use --subrepos for recursive commit"))
1119 hint=_("use --subrepos for recursive commit"))
1120 subs.append(s)
1120 subs.append(s)
1121 commitsubs.add(s)
1121 commitsubs.add(s)
1122 else:
1122 else:
1123 bs = wctx.sub(s).basestate()
1123 bs = wctx.sub(s).basestate()
1124 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1124 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1125 if oldstate.get(s, (None, None, None))[1] != bs:
1125 if oldstate.get(s, (None, None, None))[1] != bs:
1126 subs.append(s)
1126 subs.append(s)
1127
1127
1128 # check for removed subrepos
1128 # check for removed subrepos
1129 for p in wctx.parents():
1129 for p in wctx.parents():
1130 r = [s for s in p.substate if s not in newstate]
1130 r = [s for s in p.substate if s not in newstate]
1131 subs += [s for s in r if match(s)]
1131 subs += [s for s in r if match(s)]
1132 if subs:
1132 if subs:
1133 if (not match('.hgsub') and
1133 if (not match('.hgsub') and
1134 '.hgsub' in (wctx.modified() + wctx.added())):
1134 '.hgsub' in (wctx.modified() + wctx.added())):
1135 raise util.Abort(
1135 raise util.Abort(
1136 _("can't commit subrepos without .hgsub"))
1136 _("can't commit subrepos without .hgsub"))
1137 changes[0].insert(0, '.hgsubstate')
1137 changes[0].insert(0, '.hgsubstate')
1138
1138
1139 elif '.hgsub' in changes[2]:
1139 elif '.hgsub' in changes[2]:
1140 # clean up .hgsubstate when .hgsub is removed
1140 # clean up .hgsubstate when .hgsub is removed
1141 if ('.hgsubstate' in wctx and
1141 if ('.hgsubstate' in wctx and
1142 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1142 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1143 changes[2].insert(0, '.hgsubstate')
1143 changes[2].insert(0, '.hgsubstate')
1144
1144
1145 # make sure all explicit patterns are matched
1145 # make sure all explicit patterns are matched
1146 if not force and match.files():
1146 if not force and match.files():
1147 matched = set(changes[0] + changes[1] + changes[2])
1147 matched = set(changes[0] + changes[1] + changes[2])
1148
1148
1149 for f in match.files():
1149 for f in match.files():
1150 if f == '.' or f in matched or f in wctx.substate:
1150 if f == '.' or f in matched or f in wctx.substate:
1151 continue
1151 continue
1152 if f in changes[3]: # missing
1152 if f in changes[3]: # missing
1153 fail(f, _('file not found!'))
1153 fail(f, _('file not found!'))
1154 if f in vdirs: # visited directory
1154 if f in vdirs: # visited directory
1155 d = f + '/'
1155 d = f + '/'
1156 for mf in matched:
1156 for mf in matched:
1157 if mf.startswith(d):
1157 if mf.startswith(d):
1158 break
1158 break
1159 else:
1159 else:
1160 fail(f, _("no match under directory!"))
1160 fail(f, _("no match under directory!"))
1161 elif f not in self.dirstate:
1161 elif f not in self.dirstate:
1162 fail(f, _("file not tracked!"))
1162 fail(f, _("file not tracked!"))
1163
1163
1164 if (not force and not extra.get("close") and not merge
1164 if (not force and not extra.get("close") and not merge
1165 and not (changes[0] or changes[1] or changes[2])
1165 and not (changes[0] or changes[1] or changes[2])
1166 and wctx.branch() == wctx.p1().branch()):
1166 and wctx.branch() == wctx.p1().branch()):
1167 return None
1167 return None
1168
1168
1169 if merge and changes[3]:
1169 if merge and changes[3]:
1170 raise util.Abort(_("cannot commit merge with missing files"))
1170 raise util.Abort(_("cannot commit merge with missing files"))
1171
1171
1172 ms = mergemod.mergestate(self)
1172 ms = mergemod.mergestate(self)
1173 for f in changes[0]:
1173 for f in changes[0]:
1174 if f in ms and ms[f] == 'u':
1174 if f in ms and ms[f] == 'u':
1175 raise util.Abort(_("unresolved merge conflicts "
1175 raise util.Abort(_("unresolved merge conflicts "
1176 "(see hg help resolve)"))
1176 "(see hg help resolve)"))
1177
1177
1178 cctx = context.workingctx(self, text, user, date, extra, changes)
1178 cctx = context.workingctx(self, text, user, date, extra, changes)
1179 if editor:
1179 if editor:
1180 cctx._text = editor(self, cctx, subs)
1180 cctx._text = editor(self, cctx, subs)
1181 edited = (text != cctx._text)
1181 edited = (text != cctx._text)
1182
1182
1183 # commit subs and write new state
1183 # commit subs and write new state
1184 if subs:
1184 if subs:
1185 for s in sorted(commitsubs):
1185 for s in sorted(commitsubs):
1186 sub = wctx.sub(s)
1186 sub = wctx.sub(s)
1187 self.ui.status(_('committing subrepository %s\n') %
1187 self.ui.status(_('committing subrepository %s\n') %
1188 subrepo.subrelpath(sub))
1188 subrepo.subrelpath(sub))
1189 sr = sub.commit(cctx._text, user, date)
1189 sr = sub.commit(cctx._text, user, date)
1190 newstate[s] = (newstate[s][0], sr)
1190 newstate[s] = (newstate[s][0], sr)
1191 subrepo.writestate(self, newstate)
1191 subrepo.writestate(self, newstate)
1192
1192
1193 # Save commit message in case this transaction gets rolled back
1193 # Save commit message in case this transaction gets rolled back
1194 # (e.g. by a pretxncommit hook). Leave the content alone on
1194 # (e.g. by a pretxncommit hook). Leave the content alone on
1195 # the assumption that the user will use the same editor again.
1195 # the assumption that the user will use the same editor again.
1196 msgfn = self.savecommitmessage(cctx._text)
1196 msgfn = self.savecommitmessage(cctx._text)
1197
1197
1198 p1, p2 = self.dirstate.parents()
1198 p1, p2 = self.dirstate.parents()
1199 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1199 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1200 try:
1200 try:
1201 self.hook("precommit", throw=True, parent1=hookp1,
1201 self.hook("precommit", throw=True, parent1=hookp1,
1202 parent2=hookp2)
1202 parent2=hookp2)
1203 ret = self.commitctx(cctx, True)
1203 ret = self.commitctx(cctx, True)
1204 except: # re-raises
1204 except: # re-raises
1205 if edited:
1205 if edited:
1206 self.ui.write(
1206 self.ui.write(
1207 _('note: commit message saved in %s\n') % msgfn)
1207 _('note: commit message saved in %s\n') % msgfn)
1208 raise
1208 raise
1209
1209
1210 # update bookmarks, dirstate and mergestate
1210 # update bookmarks, dirstate and mergestate
1211 bookmarks.update(self, [p1, p2], ret)
1211 bookmarks.update(self, [p1, p2], ret)
1212 for f in changes[0] + changes[1]:
1212 for f in changes[0] + changes[1]:
1213 self.dirstate.normal(f)
1213 self.dirstate.normal(f)
1214 for f in changes[2]:
1214 for f in changes[2]:
1215 self.dirstate.drop(f)
1215 self.dirstate.drop(f)
1216 self.dirstate.setparents(ret)
1216 self.dirstate.setparents(ret)
1217 ms.reset()
1217 ms.reset()
1218 finally:
1218 finally:
1219 wlock.release()
1219 wlock.release()
1220
1220
1221 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1221 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1222 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1222 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1223 self._afterlock(commithook)
1223 self._afterlock(commithook)
1224 return ret
1224 return ret
1225
1225
1226 def commitctx(self, ctx, error=False):
1226 def commitctx(self, ctx, error=False):
1227 """Add a new revision to current repository.
1227 """Add a new revision to current repository.
1228 Revision information is passed via the context argument.
1228 Revision information is passed via the context argument.
1229 """
1229 """
1230
1230
1231 tr = lock = None
1231 tr = lock = None
1232 removed = list(ctx.removed())
1232 removed = list(ctx.removed())
1233 p1, p2 = ctx.p1(), ctx.p2()
1233 p1, p2 = ctx.p1(), ctx.p2()
1234 user = ctx.user()
1234 user = ctx.user()
1235
1235
1236 lock = self.lock()
1236 lock = self.lock()
1237 try:
1237 try:
1238 tr = self.transaction("commit")
1238 tr = self.transaction("commit")
1239 trp = weakref.proxy(tr)
1239 trp = weakref.proxy(tr)
1240
1240
1241 if ctx.files():
1241 if ctx.files():
1242 m1 = p1.manifest().copy()
1242 m1 = p1.manifest().copy()
1243 m2 = p2.manifest()
1243 m2 = p2.manifest()
1244
1244
1245 # check in files
1245 # check in files
1246 new = {}
1246 new = {}
1247 changed = []
1247 changed = []
1248 linkrev = len(self)
1248 linkrev = len(self)
1249 for f in sorted(ctx.modified() + ctx.added()):
1249 for f in sorted(ctx.modified() + ctx.added()):
1250 self.ui.note(f + "\n")
1250 self.ui.note(f + "\n")
1251 try:
1251 try:
1252 fctx = ctx[f]
1252 fctx = ctx[f]
1253 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1253 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1254 changed)
1254 changed)
1255 m1.set(f, fctx.flags())
1255 m1.set(f, fctx.flags())
1256 except OSError, inst:
1256 except OSError, inst:
1257 self.ui.warn(_("trouble committing %s!\n") % f)
1257 self.ui.warn(_("trouble committing %s!\n") % f)
1258 raise
1258 raise
1259 except IOError, inst:
1259 except IOError, inst:
1260 errcode = getattr(inst, 'errno', errno.ENOENT)
1260 errcode = getattr(inst, 'errno', errno.ENOENT)
1261 if error or errcode and errcode != errno.ENOENT:
1261 if error or errcode and errcode != errno.ENOENT:
1262 self.ui.warn(_("trouble committing %s!\n") % f)
1262 self.ui.warn(_("trouble committing %s!\n") % f)
1263 raise
1263 raise
1264 else:
1264 else:
1265 removed.append(f)
1265 removed.append(f)
1266
1266
1267 # update manifest
1267 # update manifest
1268 m1.update(new)
1268 m1.update(new)
1269 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1269 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1270 drop = [f for f in removed if f in m1]
1270 drop = [f for f in removed if f in m1]
1271 for f in drop:
1271 for f in drop:
1272 del m1[f]
1272 del m1[f]
1273 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1273 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1274 p2.manifestnode(), (new, drop))
1274 p2.manifestnode(), (new, drop))
1275 files = changed + removed
1275 files = changed + removed
1276 else:
1276 else:
1277 mn = p1.manifestnode()
1277 mn = p1.manifestnode()
1278 files = []
1278 files = []
1279
1279
1280 # update changelog
1280 # update changelog
1281 self.changelog.delayupdate()
1281 self.changelog.delayupdate()
1282 n = self.changelog.add(mn, files, ctx.description(),
1282 n = self.changelog.add(mn, files, ctx.description(),
1283 trp, p1.node(), p2.node(),
1283 trp, p1.node(), p2.node(),
1284 user, ctx.date(), ctx.extra().copy())
1284 user, ctx.date(), ctx.extra().copy())
1285 p = lambda: self.changelog.writepending() and self.root or ""
1285 p = lambda: self.changelog.writepending() and self.root or ""
1286 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1286 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1287 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1287 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1288 parent2=xp2, pending=p)
1288 parent2=xp2, pending=p)
1289 self.changelog.finalize(trp)
1289 self.changelog.finalize(trp)
1290 # set the new commit is proper phase
1290 # set the new commit is proper phase
1291 targetphase = phases.newcommitphase(self.ui)
1291 targetphase = phases.newcommitphase(self.ui)
1292 if targetphase:
1292 if targetphase:
1293 # retract boundary do not alter parent changeset.
1293 # retract boundary do not alter parent changeset.
1294 # if a parent have higher the resulting phase will
1294 # if a parent have higher the resulting phase will
1295 # be compliant anyway
1295 # be compliant anyway
1296 #
1296 #
1297 # if minimal phase was 0 we don't need to retract anything
1297 # if minimal phase was 0 we don't need to retract anything
1298 phases.retractboundary(self, targetphase, [n])
1298 phases.retractboundary(self, targetphase, [n])
1299 tr.close()
1299 tr.close()
1300 self.updatebranchcache()
1300 self.updatebranchcache()
1301 return n
1301 return n
1302 finally:
1302 finally:
1303 if tr:
1303 if tr:
1304 tr.release()
1304 tr.release()
1305 lock.release()
1305 lock.release()
1306
1306
1307 def destroyed(self):
1307 def destroyed(self):
1308 '''Inform the repository that nodes have been destroyed.
1308 '''Inform the repository that nodes have been destroyed.
1309 Intended for use by strip and rollback, so there's a common
1309 Intended for use by strip and rollback, so there's a common
1310 place for anything that has to be done after destroying history.'''
1310 place for anything that has to be done after destroying history.'''
1311 # XXX it might be nice if we could take the list of destroyed
1311 # XXX it might be nice if we could take the list of destroyed
1312 # nodes, but I don't see an easy way for rollback() to do that
1312 # nodes, but I don't see an easy way for rollback() to do that
1313
1313
1314 # Ensure the persistent tag cache is updated. Doing it now
1314 # Ensure the persistent tag cache is updated. Doing it now
1315 # means that the tag cache only has to worry about destroyed
1315 # means that the tag cache only has to worry about destroyed
1316 # heads immediately after a strip/rollback. That in turn
1316 # heads immediately after a strip/rollback. That in turn
1317 # guarantees that "cachetip == currenttip" (comparing both rev
1317 # guarantees that "cachetip == currenttip" (comparing both rev
1318 # and node) always means no nodes have been added or destroyed.
1318 # and node) always means no nodes have been added or destroyed.
1319
1319
1320 # XXX this is suboptimal when qrefresh'ing: we strip the current
1320 # XXX this is suboptimal when qrefresh'ing: we strip the current
1321 # head, refresh the tag cache, then immediately add a new head.
1321 # head, refresh the tag cache, then immediately add a new head.
1322 # But I think doing it this way is necessary for the "instant
1322 # But I think doing it this way is necessary for the "instant
1323 # tag cache retrieval" case to work.
1323 # tag cache retrieval" case to work.
1324 self.invalidatecaches()
1324 self.invalidatecaches()
1325
1325
1326 def walk(self, match, node=None):
1326 def walk(self, match, node=None):
1327 '''
1327 '''
1328 walk recursively through the directory tree or a given
1328 walk recursively through the directory tree or a given
1329 changeset, finding all files matched by the match
1329 changeset, finding all files matched by the match
1330 function
1330 function
1331 '''
1331 '''
1332 return self[node].walk(match)
1332 return self[node].walk(match)
1333
1333
1334 def status(self, node1='.', node2=None, match=None,
1334 def status(self, node1='.', node2=None, match=None,
1335 ignored=False, clean=False, unknown=False,
1335 ignored=False, clean=False, unknown=False,
1336 listsubrepos=False):
1336 listsubrepos=False):
1337 """return status of files between two nodes or node and working
1337 """return status of files between two nodes or node and working
1338 directory.
1338 directory.
1339
1339
1340 If node1 is None, use the first dirstate parent instead.
1340 If node1 is None, use the first dirstate parent instead.
1341 If node2 is None, compare node1 with working directory.
1341 If node2 is None, compare node1 with working directory.
1342 """
1342 """
1343
1343
1344 def mfmatches(ctx):
1344 def mfmatches(ctx):
1345 mf = ctx.manifest().copy()
1345 mf = ctx.manifest().copy()
1346 if match.always():
1346 if match.always():
1347 return mf
1347 return mf
1348 for fn in mf.keys():
1348 for fn in mf.keys():
1349 if not match(fn):
1349 if not match(fn):
1350 del mf[fn]
1350 del mf[fn]
1351 return mf
1351 return mf
1352
1352
1353 if isinstance(node1, context.changectx):
1353 if isinstance(node1, context.changectx):
1354 ctx1 = node1
1354 ctx1 = node1
1355 else:
1355 else:
1356 ctx1 = self[node1]
1356 ctx1 = self[node1]
1357 if isinstance(node2, context.changectx):
1357 if isinstance(node2, context.changectx):
1358 ctx2 = node2
1358 ctx2 = node2
1359 else:
1359 else:
1360 ctx2 = self[node2]
1360 ctx2 = self[node2]
1361
1361
1362 working = ctx2.rev() is None
1362 working = ctx2.rev() is None
1363 parentworking = working and ctx1 == self['.']
1363 parentworking = working and ctx1 == self['.']
1364 match = match or matchmod.always(self.root, self.getcwd())
1364 match = match or matchmod.always(self.root, self.getcwd())
1365 listignored, listclean, listunknown = ignored, clean, unknown
1365 listignored, listclean, listunknown = ignored, clean, unknown
1366
1366
1367 # load earliest manifest first for caching reasons
1367 # load earliest manifest first for caching reasons
1368 if not working and ctx2.rev() < ctx1.rev():
1368 if not working and ctx2.rev() < ctx1.rev():
1369 ctx2.manifest()
1369 ctx2.manifest()
1370
1370
1371 if not parentworking:
1371 if not parentworking:
1372 def bad(f, msg):
1372 def bad(f, msg):
1373 # 'f' may be a directory pattern from 'match.files()',
1373 # 'f' may be a directory pattern from 'match.files()',
1374 # so 'f not in ctx1' is not enough
1374 # so 'f not in ctx1' is not enough
1375 if f not in ctx1 and f not in ctx1.dirs():
1375 if f not in ctx1 and f not in ctx1.dirs():
1376 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1376 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1377 match.bad = bad
1377 match.bad = bad
1378
1378
1379 if working: # we need to scan the working dir
1379 if working: # we need to scan the working dir
1380 subrepos = []
1380 subrepos = []
1381 if '.hgsub' in self.dirstate:
1381 if '.hgsub' in self.dirstate:
1382 subrepos = ctx2.substate.keys()
1382 subrepos = ctx2.substate.keys()
1383 s = self.dirstate.status(match, subrepos, listignored,
1383 s = self.dirstate.status(match, subrepos, listignored,
1384 listclean, listunknown)
1384 listclean, listunknown)
1385 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1385 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1386
1386
1387 # check for any possibly clean files
1387 # check for any possibly clean files
1388 if parentworking and cmp:
1388 if parentworking and cmp:
1389 fixup = []
1389 fixup = []
1390 # do a full compare of any files that might have changed
1390 # do a full compare of any files that might have changed
1391 for f in sorted(cmp):
1391 for f in sorted(cmp):
1392 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1392 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1393 or ctx1[f].cmp(ctx2[f])):
1393 or ctx1[f].cmp(ctx2[f])):
1394 modified.append(f)
1394 modified.append(f)
1395 else:
1395 else:
1396 fixup.append(f)
1396 fixup.append(f)
1397
1397
1398 # update dirstate for files that are actually clean
1398 # update dirstate for files that are actually clean
1399 if fixup:
1399 if fixup:
1400 if listclean:
1400 if listclean:
1401 clean += fixup
1401 clean += fixup
1402
1402
1403 try:
1403 try:
1404 # updating the dirstate is optional
1404 # updating the dirstate is optional
1405 # so we don't wait on the lock
1405 # so we don't wait on the lock
1406 wlock = self.wlock(False)
1406 wlock = self.wlock(False)
1407 try:
1407 try:
1408 for f in fixup:
1408 for f in fixup:
1409 self.dirstate.normal(f)
1409 self.dirstate.normal(f)
1410 finally:
1410 finally:
1411 wlock.release()
1411 wlock.release()
1412 except error.LockError:
1412 except error.LockError:
1413 pass
1413 pass
1414
1414
1415 if not parentworking:
1415 if not parentworking:
1416 mf1 = mfmatches(ctx1)
1416 mf1 = mfmatches(ctx1)
1417 if working:
1417 if working:
1418 # we are comparing working dir against non-parent
1418 # we are comparing working dir against non-parent
1419 # generate a pseudo-manifest for the working dir
1419 # generate a pseudo-manifest for the working dir
1420 mf2 = mfmatches(self['.'])
1420 mf2 = mfmatches(self['.'])
1421 for f in cmp + modified + added:
1421 for f in cmp + modified + added:
1422 mf2[f] = None
1422 mf2[f] = None
1423 mf2.set(f, ctx2.flags(f))
1423 mf2.set(f, ctx2.flags(f))
1424 for f in removed:
1424 for f in removed:
1425 if f in mf2:
1425 if f in mf2:
1426 del mf2[f]
1426 del mf2[f]
1427 else:
1427 else:
1428 # we are comparing two revisions
1428 # we are comparing two revisions
1429 deleted, unknown, ignored = [], [], []
1429 deleted, unknown, ignored = [], [], []
1430 mf2 = mfmatches(ctx2)
1430 mf2 = mfmatches(ctx2)
1431
1431
1432 modified, added, clean = [], [], []
1432 modified, added, clean = [], [], []
1433 withflags = mf1.withflags() | mf2.withflags()
1433 withflags = mf1.withflags() | mf2.withflags()
1434 for fn in mf2:
1434 for fn in mf2:
1435 if fn in mf1:
1435 if fn in mf1:
1436 if (fn not in deleted and
1436 if (fn not in deleted and
1437 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1437 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1438 (mf1[fn] != mf2[fn] and
1438 (mf1[fn] != mf2[fn] and
1439 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1439 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1440 modified.append(fn)
1440 modified.append(fn)
1441 elif listclean:
1441 elif listclean:
1442 clean.append(fn)
1442 clean.append(fn)
1443 del mf1[fn]
1443 del mf1[fn]
1444 elif fn not in deleted:
1444 elif fn not in deleted:
1445 added.append(fn)
1445 added.append(fn)
1446 removed = mf1.keys()
1446 removed = mf1.keys()
1447
1447
1448 if working and modified and not self.dirstate._checklink:
1448 if working and modified and not self.dirstate._checklink:
1449 # Symlink placeholders may get non-symlink-like contents
1449 # Symlink placeholders may get non-symlink-like contents
1450 # via user error or dereferencing by NFS or Samba servers,
1450 # via user error or dereferencing by NFS or Samba servers,
1451 # so we filter out any placeholders that don't look like a
1451 # so we filter out any placeholders that don't look like a
1452 # symlink
1452 # symlink
1453 sane = []
1453 sane = []
1454 for f in modified:
1454 for f in modified:
1455 if ctx2.flags(f) == 'l':
1455 if ctx2.flags(f) == 'l':
1456 d = ctx2[f].data()
1456 d = ctx2[f].data()
1457 if len(d) >= 1024 or '\n' in d or util.binary(d):
1457 if len(d) >= 1024 or '\n' in d or util.binary(d):
1458 self.ui.debug('ignoring suspect symlink placeholder'
1458 self.ui.debug('ignoring suspect symlink placeholder'
1459 ' "%s"\n' % f)
1459 ' "%s"\n' % f)
1460 continue
1460 continue
1461 sane.append(f)
1461 sane.append(f)
1462 modified = sane
1462 modified = sane
1463
1463
1464 r = modified, added, removed, deleted, unknown, ignored, clean
1464 r = modified, added, removed, deleted, unknown, ignored, clean
1465
1465
1466 if listsubrepos:
1466 if listsubrepos:
1467 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1467 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1468 if working:
1468 if working:
1469 rev2 = None
1469 rev2 = None
1470 else:
1470 else:
1471 rev2 = ctx2.substate[subpath][1]
1471 rev2 = ctx2.substate[subpath][1]
1472 try:
1472 try:
1473 submatch = matchmod.narrowmatcher(subpath, match)
1473 submatch = matchmod.narrowmatcher(subpath, match)
1474 s = sub.status(rev2, match=submatch, ignored=listignored,
1474 s = sub.status(rev2, match=submatch, ignored=listignored,
1475 clean=listclean, unknown=listunknown,
1475 clean=listclean, unknown=listunknown,
1476 listsubrepos=True)
1476 listsubrepos=True)
1477 for rfiles, sfiles in zip(r, s):
1477 for rfiles, sfiles in zip(r, s):
1478 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1478 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1479 except error.LookupError:
1479 except error.LookupError:
1480 self.ui.status(_("skipping missing subrepository: %s\n")
1480 self.ui.status(_("skipping missing subrepository: %s\n")
1481 % subpath)
1481 % subpath)
1482
1482
1483 for l in r:
1483 for l in r:
1484 l.sort()
1484 l.sort()
1485 return r
1485 return r
1486
1486
1487 def heads(self, start=None):
1487 def heads(self, start=None):
1488 heads = self.changelog.heads(start)
1488 heads = self.changelog.heads(start)
1489 # sort the output in rev descending order
1489 # sort the output in rev descending order
1490 return sorted(heads, key=self.changelog.rev, reverse=True)
1490 return sorted(heads, key=self.changelog.rev, reverse=True)
1491
1491
1492 def branchheads(self, branch=None, start=None, closed=False):
1492 def branchheads(self, branch=None, start=None, closed=False):
1493 '''return a (possibly filtered) list of heads for the given branch
1493 '''return a (possibly filtered) list of heads for the given branch
1494
1494
1495 Heads are returned in topological order, from newest to oldest.
1495 Heads are returned in topological order, from newest to oldest.
1496 If branch is None, use the dirstate branch.
1496 If branch is None, use the dirstate branch.
1497 If start is not None, return only heads reachable from start.
1497 If start is not None, return only heads reachable from start.
1498 If closed is True, return heads that are marked as closed as well.
1498 If closed is True, return heads that are marked as closed as well.
1499 '''
1499 '''
1500 if branch is None:
1500 if branch is None:
1501 branch = self[None].branch()
1501 branch = self[None].branch()
1502 branches = self.branchmap()
1502 branches = self.branchmap()
1503 if branch not in branches:
1503 if branch not in branches:
1504 return []
1504 return []
1505 # the cache returns heads ordered lowest to highest
1505 # the cache returns heads ordered lowest to highest
1506 bheads = list(reversed(branches[branch]))
1506 bheads = list(reversed(branches[branch]))
1507 if start is not None:
1507 if start is not None:
1508 # filter out the heads that cannot be reached from startrev
1508 # filter out the heads that cannot be reached from startrev
1509 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1509 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1510 bheads = [h for h in bheads if h in fbheads]
1510 bheads = [h for h in bheads if h in fbheads]
1511 if not closed:
1511 if not closed:
1512 bheads = [h for h in bheads if not self[h].closesbranch()]
1512 bheads = [h for h in bheads if not self[h].closesbranch()]
1513 return bheads
1513 return bheads
1514
1514
1515 def branches(self, nodes):
1515 def branches(self, nodes):
1516 if not nodes:
1516 if not nodes:
1517 nodes = [self.changelog.tip()]
1517 nodes = [self.changelog.tip()]
1518 b = []
1518 b = []
1519 for n in nodes:
1519 for n in nodes:
1520 t = n
1520 t = n
1521 while True:
1521 while True:
1522 p = self.changelog.parents(n)
1522 p = self.changelog.parents(n)
1523 if p[1] != nullid or p[0] == nullid:
1523 if p[1] != nullid or p[0] == nullid:
1524 b.append((t, n, p[0], p[1]))
1524 b.append((t, n, p[0], p[1]))
1525 break
1525 break
1526 n = p[0]
1526 n = p[0]
1527 return b
1527 return b
1528
1528
1529 def between(self, pairs):
1529 def between(self, pairs):
1530 r = []
1530 r = []
1531
1531
1532 for top, bottom in pairs:
1532 for top, bottom in pairs:
1533 n, l, i = top, [], 0
1533 n, l, i = top, [], 0
1534 f = 1
1534 f = 1
1535
1535
1536 while n != bottom and n != nullid:
1536 while n != bottom and n != nullid:
1537 p = self.changelog.parents(n)[0]
1537 p = self.changelog.parents(n)[0]
1538 if i == f:
1538 if i == f:
1539 l.append(n)
1539 l.append(n)
1540 f = f * 2
1540 f = f * 2
1541 n = p
1541 n = p
1542 i += 1
1542 i += 1
1543
1543
1544 r.append(l)
1544 r.append(l)
1545
1545
1546 return r
1546 return r
1547
1547
1548 def pull(self, remote, heads=None, force=False):
1548 def pull(self, remote, heads=None, force=False):
1549 lock = self.lock()
1549 lock = self.lock()
1550 try:
1550 try:
1551 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1551 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1552 force=force)
1552 force=force)
1553 common, fetch, rheads = tmp
1553 common, fetch, rheads = tmp
1554 if not fetch:
1554 if not fetch:
1555 self.ui.status(_("no changes found\n"))
1555 self.ui.status(_("no changes found\n"))
1556 added = []
1556 added = []
1557 result = 0
1557 result = 0
1558 else:
1558 else:
1559 if heads is None and list(common) == [nullid]:
1559 if heads is None and list(common) == [nullid]:
1560 self.ui.status(_("requesting all changes\n"))
1560 self.ui.status(_("requesting all changes\n"))
1561 elif heads is None and remote.capable('changegroupsubset'):
1561 elif heads is None and remote.capable('changegroupsubset'):
1562 # issue1320, avoid a race if remote changed after discovery
1562 # issue1320, avoid a race if remote changed after discovery
1563 heads = rheads
1563 heads = rheads
1564
1564
1565 if remote.capable('getbundle'):
1565 if remote.capable('getbundle'):
1566 cg = remote.getbundle('pull', common=common,
1566 cg = remote.getbundle('pull', common=common,
1567 heads=heads or rheads)
1567 heads=heads or rheads)
1568 elif heads is None:
1568 elif heads is None:
1569 cg = remote.changegroup(fetch, 'pull')
1569 cg = remote.changegroup(fetch, 'pull')
1570 elif not remote.capable('changegroupsubset'):
1570 elif not remote.capable('changegroupsubset'):
1571 raise util.Abort(_("partial pull cannot be done because "
1571 raise util.Abort(_("partial pull cannot be done because "
1572 "other repository doesn't support "
1572 "other repository doesn't support "
1573 "changegroupsubset."))
1573 "changegroupsubset."))
1574 else:
1574 else:
1575 cg = remote.changegroupsubset(fetch, heads, 'pull')
1575 cg = remote.changegroupsubset(fetch, heads, 'pull')
1576 clstart = len(self.changelog)
1576 clstart = len(self.changelog)
1577 result = self.addchangegroup(cg, 'pull', remote.url())
1577 result = self.addchangegroup(cg, 'pull', remote.url())
1578 clend = len(self.changelog)
1578 clend = len(self.changelog)
1579 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1579 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1580
1580
1581 # compute target subset
1581 # compute target subset
1582 if heads is None:
1582 if heads is None:
1583 # We pulled every thing possible
1583 # We pulled every thing possible
1584 # sync on everything common
1584 # sync on everything common
1585 subset = common + added
1585 subset = common + added
1586 else:
1586 else:
1587 # We pulled a specific subset
1587 # We pulled a specific subset
1588 # sync on this subset
1588 # sync on this subset
1589 subset = heads
1589 subset = heads
1590
1590
1591 # Get remote phases data from remote
1591 # Get remote phases data from remote
1592 remotephases = remote.listkeys('phases')
1592 remotephases = remote.listkeys('phases')
1593 publishing = bool(remotephases.get('publishing', False))
1593 publishing = bool(remotephases.get('publishing', False))
1594 if remotephases and not publishing:
1594 if remotephases and not publishing:
1595 # remote is new and unpublishing
1595 # remote is new and unpublishing
1596 pheads, _dr = phases.analyzeremotephases(self, subset,
1596 pheads, _dr = phases.analyzeremotephases(self, subset,
1597 remotephases)
1597 remotephases)
1598 phases.advanceboundary(self, phases.public, pheads)
1598 phases.advanceboundary(self, phases.public, pheads)
1599 phases.advanceboundary(self, phases.draft, subset)
1599 phases.advanceboundary(self, phases.draft, subset)
1600 else:
1600 else:
1601 # Remote is old or publishing all common changesets
1601 # Remote is old or publishing all common changesets
1602 # should be seen as public
1602 # should be seen as public
1603 phases.advanceboundary(self, phases.public, subset)
1603 phases.advanceboundary(self, phases.public, subset)
1604 finally:
1604 finally:
1605 lock.release()
1605 lock.release()
1606
1606
1607 return result
1607 return result
1608
1608
1609 def checkpush(self, force, revs):
1609 def checkpush(self, force, revs):
1610 """Extensions can override this function if additional checks have
1610 """Extensions can override this function if additional checks have
1611 to be performed before pushing, or call it if they override push
1611 to be performed before pushing, or call it if they override push
1612 command.
1612 command.
1613 """
1613 """
1614 pass
1614 pass
1615
1615
1616 def push(self, remote, force=False, revs=None, newbranch=False):
1616 def push(self, remote, force=False, revs=None, newbranch=False):
1617 '''Push outgoing changesets (limited by revs) from the current
1617 '''Push outgoing changesets (limited by revs) from the current
1618 repository to remote. Return an integer:
1618 repository to remote. Return an integer:
1619 - None means nothing to push
1619 - None means nothing to push
1620 - 0 means HTTP error
1620 - 0 means HTTP error
1621 - 1 means we pushed and remote head count is unchanged *or*
1621 - 1 means we pushed and remote head count is unchanged *or*
1622 we have outgoing changesets but refused to push
1622 we have outgoing changesets but refused to push
1623 - other values as described by addchangegroup()
1623 - other values as described by addchangegroup()
1624 '''
1624 '''
1625 # there are two ways to push to remote repo:
1625 # there are two ways to push to remote repo:
1626 #
1626 #
1627 # addchangegroup assumes local user can lock remote
1627 # addchangegroup assumes local user can lock remote
1628 # repo (local filesystem, old ssh servers).
1628 # repo (local filesystem, old ssh servers).
1629 #
1629 #
1630 # unbundle assumes local user cannot lock remote repo (new ssh
1630 # unbundle assumes local user cannot lock remote repo (new ssh
1631 # servers, http servers).
1631 # servers, http servers).
1632
1632
1633 # get local lock as we might write phase data
1633 # get local lock as we might write phase data
1634 locallock = self.lock()
1634 locallock = self.lock()
1635 try:
1635 try:
1636 self.checkpush(force, revs)
1636 self.checkpush(force, revs)
1637 lock = None
1637 lock = None
1638 unbundle = remote.capable('unbundle')
1638 unbundle = remote.capable('unbundle')
1639 if not unbundle:
1639 if not unbundle:
1640 lock = remote.lock()
1640 lock = remote.lock()
1641 try:
1641 try:
1642 # discovery
1642 # discovery
1643 fci = discovery.findcommonincoming
1643 fci = discovery.findcommonincoming
1644 commoninc = fci(self, remote, force=force)
1644 commoninc = fci(self, remote, force=force)
1645 common, inc, remoteheads = commoninc
1645 common, inc, remoteheads = commoninc
1646 fco = discovery.findcommonoutgoing
1646 fco = discovery.findcommonoutgoing
1647 outgoing = fco(self, remote, onlyheads=revs,
1647 outgoing = fco(self, remote, onlyheads=revs,
1648 commoninc=commoninc, force=force)
1648 commoninc=commoninc, force=force)
1649
1649
1650
1650
1651 if not outgoing.missing:
1651 if not outgoing.missing:
1652 # nothing to push
1652 # nothing to push
1653 scmutil.nochangesfound(self.ui, outgoing.excluded)
1653 scmutil.nochangesfound(self.ui, outgoing.excluded)
1654 ret = None
1654 ret = None
1655 else:
1655 else:
1656 # something to push
1656 # something to push
1657 if not force:
1657 if not force:
1658 discovery.checkheads(self, remote, outgoing,
1658 discovery.checkheads(self, remote, outgoing,
1659 remoteheads, newbranch,
1659 remoteheads, newbranch,
1660 bool(inc))
1660 bool(inc))
1661
1661
1662 # create a changegroup from local
1662 # create a changegroup from local
1663 if revs is None and not outgoing.excluded:
1663 if revs is None and not outgoing.excluded:
1664 # push everything,
1664 # push everything,
1665 # use the fast path, no race possible on push
1665 # use the fast path, no race possible on push
1666 cg = self._changegroup(outgoing.missing, 'push')
1666 cg = self._changegroup(outgoing.missing, 'push')
1667 else:
1667 else:
1668 cg = self.getlocalbundle('push', outgoing)
1668 cg = self.getlocalbundle('push', outgoing)
1669
1669
1670 # apply changegroup to remote
1670 # apply changegroup to remote
1671 if unbundle:
1671 if unbundle:
1672 # local repo finds heads on server, finds out what
1672 # local repo finds heads on server, finds out what
1673 # revs it must push. once revs transferred, if server
1673 # revs it must push. once revs transferred, if server
1674 # finds it has different heads (someone else won
1674 # finds it has different heads (someone else won
1675 # commit/push race), server aborts.
1675 # commit/push race), server aborts.
1676 if force:
1676 if force:
1677 remoteheads = ['force']
1677 remoteheads = ['force']
1678 # ssh: return remote's addchangegroup()
1678 # ssh: return remote's addchangegroup()
1679 # http: return remote's addchangegroup() or 0 for error
1679 # http: return remote's addchangegroup() or 0 for error
1680 ret = remote.unbundle(cg, remoteheads, 'push')
1680 ret = remote.unbundle(cg, remoteheads, 'push')
1681 else:
1681 else:
1682 # we return an integer indicating remote head count
1682 # we return an integer indicating remote head count
1683 # change
1683 # change
1684 ret = remote.addchangegroup(cg, 'push', self.url())
1684 ret = remote.addchangegroup(cg, 'push', self.url())
1685
1685
1686 if ret:
1686 if ret:
1687 # push succeed, synchonize target of the push
1687 # push succeed, synchonize target of the push
1688 cheads = outgoing.missingheads
1688 cheads = outgoing.missingheads
1689 elif revs is None:
1689 elif revs is None:
1690 # All out push fails. synchronize all common
1690 # All out push fails. synchronize all common
1691 cheads = outgoing.commonheads
1691 cheads = outgoing.commonheads
1692 else:
1692 else:
1693 # I want cheads = heads(::missingheads and ::commonheads)
1693 # I want cheads = heads(::missingheads and ::commonheads)
1694 # (missingheads is revs with secret changeset filtered out)
1694 # (missingheads is revs with secret changeset filtered out)
1695 #
1695 #
1696 # This can be expressed as:
1696 # This can be expressed as:
1697 # cheads = ( (missingheads and ::commonheads)
1697 # cheads = ( (missingheads and ::commonheads)
1698 # + (commonheads and ::missingheads))"
1698 # + (commonheads and ::missingheads))"
1699 # )
1699 # )
1700 #
1700 #
1701 # while trying to push we already computed the following:
1701 # while trying to push we already computed the following:
1702 # common = (::commonheads)
1702 # common = (::commonheads)
1703 # missing = ((commonheads::missingheads) - commonheads)
1703 # missing = ((commonheads::missingheads) - commonheads)
1704 #
1704 #
1705 # We can pick:
1705 # We can pick:
1706 # * missingheads part of comon (::commonheads)
1706 # * missingheads part of comon (::commonheads)
1707 common = set(outgoing.common)
1707 common = set(outgoing.common)
1708 cheads = [node for node in revs if node in common]
1708 cheads = [node for node in revs if node in common]
1709 # and
1709 # and
1710 # * commonheads parents on missing
1710 # * commonheads parents on missing
1711 revset = self.set('%ln and parents(roots(%ln))',
1711 revset = self.set('%ln and parents(roots(%ln))',
1712 outgoing.commonheads,
1712 outgoing.commonheads,
1713 outgoing.missing)
1713 outgoing.missing)
1714 cheads.extend(c.node() for c in revset)
1714 cheads.extend(c.node() for c in revset)
1715 # even when we don't push, exchanging phase data is useful
1715 # even when we don't push, exchanging phase data is useful
1716 remotephases = remote.listkeys('phases')
1716 remotephases = remote.listkeys('phases')
1717 if not remotephases: # old server or public only repo
1717 if not remotephases: # old server or public only repo
1718 phases.advanceboundary(self, phases.public, cheads)
1718 phases.advanceboundary(self, phases.public, cheads)
1719 # don't push any phase data as there is nothing to push
1719 # don't push any phase data as there is nothing to push
1720 else:
1720 else:
1721 ana = phases.analyzeremotephases(self, cheads, remotephases)
1721 ana = phases.analyzeremotephases(self, cheads, remotephases)
1722 pheads, droots = ana
1722 pheads, droots = ana
1723 ### Apply remote phase on local
1723 ### Apply remote phase on local
1724 if remotephases.get('publishing', False):
1724 if remotephases.get('publishing', False):
1725 phases.advanceboundary(self, phases.public, cheads)
1725 phases.advanceboundary(self, phases.public, cheads)
1726 else: # publish = False
1726 else: # publish = False
1727 phases.advanceboundary(self, phases.public, pheads)
1727 phases.advanceboundary(self, phases.public, pheads)
1728 phases.advanceboundary(self, phases.draft, cheads)
1728 phases.advanceboundary(self, phases.draft, cheads)
1729 ### Apply local phase on remote
1729 ### Apply local phase on remote
1730
1730
1731 # Get the list of all revs draft on remote by public here.
1731 # Get the list of all revs draft on remote by public here.
1732 # XXX Beware that revset break if droots is not strictly
1732 # XXX Beware that revset break if droots is not strictly
1733 # XXX root we may want to ensure it is but it is costly
1733 # XXX root we may want to ensure it is but it is costly
1734 outdated = self.set('heads((%ln::%ln) and public())',
1734 outdated = self.set('heads((%ln::%ln) and public())',
1735 droots, cheads)
1735 droots, cheads)
1736 for newremotehead in outdated:
1736 for newremotehead in outdated:
1737 r = remote.pushkey('phases',
1737 r = remote.pushkey('phases',
1738 newremotehead.hex(),
1738 newremotehead.hex(),
1739 str(phases.draft),
1739 str(phases.draft),
1740 str(phases.public))
1740 str(phases.public))
1741 if not r:
1741 if not r:
1742 self.ui.warn(_('updating %s to public failed!\n')
1742 self.ui.warn(_('updating %s to public failed!\n')
1743 % newremotehead)
1743 % newremotehead)
1744 finally:
1744 finally:
1745 if lock is not None:
1745 if lock is not None:
1746 lock.release()
1746 lock.release()
1747 finally:
1747 finally:
1748 locallock.release()
1748 locallock.release()
1749
1749
1750 self.ui.debug("checking for updated bookmarks\n")
1750 self.ui.debug("checking for updated bookmarks\n")
1751 rb = remote.listkeys('bookmarks')
1751 rb = remote.listkeys('bookmarks')
1752 for k in rb.keys():
1752 for k in rb.keys():
1753 if k in self._bookmarks:
1753 if k in self._bookmarks:
1754 nr, nl = rb[k], hex(self._bookmarks[k])
1754 nr, nl = rb[k], hex(self._bookmarks[k])
1755 if nr in self:
1755 if nr in self:
1756 cr = self[nr]
1756 cr = self[nr]
1757 cl = self[nl]
1757 cl = self[nl]
1758 if cl in cr.descendants():
1758 if cl in cr.descendants():
1759 r = remote.pushkey('bookmarks', k, nr, nl)
1759 r = remote.pushkey('bookmarks', k, nr, nl)
1760 if r:
1760 if r:
1761 self.ui.status(_("updating bookmark %s\n") % k)
1761 self.ui.status(_("updating bookmark %s\n") % k)
1762 else:
1762 else:
1763 self.ui.warn(_('updating bookmark %s'
1763 self.ui.warn(_('updating bookmark %s'
1764 ' failed!\n') % k)
1764 ' failed!\n') % k)
1765
1765
1766 return ret
1766 return ret
1767
1767
1768 def changegroupinfo(self, nodes, source):
1768 def changegroupinfo(self, nodes, source):
1769 if self.ui.verbose or source == 'bundle':
1769 if self.ui.verbose or source == 'bundle':
1770 self.ui.status(_("%d changesets found\n") % len(nodes))
1770 self.ui.status(_("%d changesets found\n") % len(nodes))
1771 if self.ui.debugflag:
1771 if self.ui.debugflag:
1772 self.ui.debug("list of changesets:\n")
1772 self.ui.debug("list of changesets:\n")
1773 for node in nodes:
1773 for node in nodes:
1774 self.ui.debug("%s\n" % hex(node))
1774 self.ui.debug("%s\n" % hex(node))
1775
1775
1776 def changegroupsubset(self, bases, heads, source):
1776 def changegroupsubset(self, bases, heads, source):
1777 """Compute a changegroup consisting of all the nodes that are
1777 """Compute a changegroup consisting of all the nodes that are
1778 descendants of any of the bases and ancestors of any of the heads.
1778 descendants of any of the bases and ancestors of any of the heads.
1779 Return a chunkbuffer object whose read() method will return
1779 Return a chunkbuffer object whose read() method will return
1780 successive changegroup chunks.
1780 successive changegroup chunks.
1781
1781
1782 It is fairly complex as determining which filenodes and which
1782 It is fairly complex as determining which filenodes and which
1783 manifest nodes need to be included for the changeset to be complete
1783 manifest nodes need to be included for the changeset to be complete
1784 is non-trivial.
1784 is non-trivial.
1785
1785
1786 Another wrinkle is doing the reverse, figuring out which changeset in
1786 Another wrinkle is doing the reverse, figuring out which changeset in
1787 the changegroup a particular filenode or manifestnode belongs to.
1787 the changegroup a particular filenode or manifestnode belongs to.
1788 """
1788 """
1789 cl = self.changelog
1789 cl = self.changelog
1790 if not bases:
1790 if not bases:
1791 bases = [nullid]
1791 bases = [nullid]
1792 csets, bases, heads = cl.nodesbetween(bases, heads)
1792 csets, bases, heads = cl.nodesbetween(bases, heads)
1793 # We assume that all ancestors of bases are known
1793 # We assume that all ancestors of bases are known
1794 common = set(cl.ancestors([cl.rev(n) for n in bases]))
1794 common = set(cl.ancestors([cl.rev(n) for n in bases]))
1795 return self._changegroupsubset(common, csets, heads, source)
1795 return self._changegroupsubset(common, csets, heads, source)
1796
1796
1797 def getlocalbundle(self, source, outgoing):
1797 def getlocalbundle(self, source, outgoing):
1798 """Like getbundle, but taking a discovery.outgoing as an argument.
1798 """Like getbundle, but taking a discovery.outgoing as an argument.
1799
1799
1800 This is only implemented for local repos and reuses potentially
1800 This is only implemented for local repos and reuses potentially
1801 precomputed sets in outgoing."""
1801 precomputed sets in outgoing."""
1802 if not outgoing.missing:
1802 if not outgoing.missing:
1803 return None
1803 return None
1804 return self._changegroupsubset(outgoing.common,
1804 return self._changegroupsubset(outgoing.common,
1805 outgoing.missing,
1805 outgoing.missing,
1806 outgoing.missingheads,
1806 outgoing.missingheads,
1807 source)
1807 source)
1808
1808
1809 def getbundle(self, source, heads=None, common=None):
1809 def getbundle(self, source, heads=None, common=None):
1810 """Like changegroupsubset, but returns the set difference between the
1810 """Like changegroupsubset, but returns the set difference between the
1811 ancestors of heads and the ancestors common.
1811 ancestors of heads and the ancestors common.
1812
1812
1813 If heads is None, use the local heads. If common is None, use [nullid].
1813 If heads is None, use the local heads. If common is None, use [nullid].
1814
1814
1815 The nodes in common might not all be known locally due to the way the
1815 The nodes in common might not all be known locally due to the way the
1816 current discovery protocol works.
1816 current discovery protocol works.
1817 """
1817 """
1818 cl = self.changelog
1818 cl = self.changelog
1819 if common:
1819 if common:
1820 nm = cl.nodemap
1820 nm = cl.nodemap
1821 common = [n for n in common if n in nm]
1821 common = [n for n in common if n in nm]
1822 else:
1822 else:
1823 common = [nullid]
1823 common = [nullid]
1824 if not heads:
1824 if not heads:
1825 heads = cl.heads()
1825 heads = cl.heads()
1826 return self.getlocalbundle(source,
1826 return self.getlocalbundle(source,
1827 discovery.outgoing(cl, common, heads))
1827 discovery.outgoing(cl, common, heads))
1828
1828
1829 def _changegroupsubset(self, commonrevs, csets, heads, source):
1829 def _changegroupsubset(self, commonrevs, csets, heads, source):
1830
1830
1831 cl = self.changelog
1831 cl = self.changelog
1832 mf = self.manifest
1832 mf = self.manifest
1833 mfs = {} # needed manifests
1833 mfs = {} # needed manifests
1834 fnodes = {} # needed file nodes
1834 fnodes = {} # needed file nodes
1835 changedfiles = set()
1835 changedfiles = set()
1836 fstate = ['', {}]
1836 fstate = ['', {}]
1837 count = [0, 0]
1837 count = [0, 0]
1838
1838
1839 # can we go through the fast path ?
1839 # can we go through the fast path ?
1840 heads.sort()
1840 heads.sort()
1841 if heads == sorted(self.heads()):
1841 if heads == sorted(self.heads()):
1842 return self._changegroup(csets, source)
1842 return self._changegroup(csets, source)
1843
1843
1844 # slow path
1844 # slow path
1845 self.hook('preoutgoing', throw=True, source=source)
1845 self.hook('preoutgoing', throw=True, source=source)
1846 self.changegroupinfo(csets, source)
1846 self.changegroupinfo(csets, source)
1847
1847
1848 # filter any nodes that claim to be part of the known set
1848 # filter any nodes that claim to be part of the known set
1849 def prune(revlog, missing):
1849 def prune(revlog, missing):
1850 rr, rl = revlog.rev, revlog.linkrev
1850 rr, rl = revlog.rev, revlog.linkrev
1851 return [n for n in missing
1851 return [n for n in missing
1852 if rl(rr(n)) not in commonrevs]
1852 if rl(rr(n)) not in commonrevs]
1853
1853
1854 progress = self.ui.progress
1854 progress = self.ui.progress
1855 _bundling = _('bundling')
1855 _bundling = _('bundling')
1856 _changesets = _('changesets')
1856 _changesets = _('changesets')
1857 _manifests = _('manifests')
1857 _manifests = _('manifests')
1858 _files = _('files')
1858 _files = _('files')
1859
1859
1860 def lookup(revlog, x):
1860 def lookup(revlog, x):
1861 if revlog == cl:
1861 if revlog == cl:
1862 c = cl.read(x)
1862 c = cl.read(x)
1863 changedfiles.update(c[3])
1863 changedfiles.update(c[3])
1864 mfs.setdefault(c[0], x)
1864 mfs.setdefault(c[0], x)
1865 count[0] += 1
1865 count[0] += 1
1866 progress(_bundling, count[0],
1866 progress(_bundling, count[0],
1867 unit=_changesets, total=count[1])
1867 unit=_changesets, total=count[1])
1868 return x
1868 return x
1869 elif revlog == mf:
1869 elif revlog == mf:
1870 clnode = mfs[x]
1870 clnode = mfs[x]
1871 mdata = mf.readfast(x)
1871 mdata = mf.readfast(x)
1872 for f, n in mdata.iteritems():
1872 for f, n in mdata.iteritems():
1873 if f in changedfiles:
1873 if f in changedfiles:
1874 fnodes[f].setdefault(n, clnode)
1874 fnodes[f].setdefault(n, clnode)
1875 count[0] += 1
1875 count[0] += 1
1876 progress(_bundling, count[0],
1876 progress(_bundling, count[0],
1877 unit=_manifests, total=count[1])
1877 unit=_manifests, total=count[1])
1878 return clnode
1878 return clnode
1879 else:
1879 else:
1880 progress(_bundling, count[0], item=fstate[0],
1880 progress(_bundling, count[0], item=fstate[0],
1881 unit=_files, total=count[1])
1881 unit=_files, total=count[1])
1882 return fstate[1][x]
1882 return fstate[1][x]
1883
1883
1884 bundler = changegroup.bundle10(lookup)
1884 bundler = changegroup.bundle10(lookup)
1885 reorder = self.ui.config('bundle', 'reorder', 'auto')
1885 reorder = self.ui.config('bundle', 'reorder', 'auto')
1886 if reorder == 'auto':
1886 if reorder == 'auto':
1887 reorder = None
1887 reorder = None
1888 else:
1888 else:
1889 reorder = util.parsebool(reorder)
1889 reorder = util.parsebool(reorder)
1890
1890
1891 def gengroup():
1891 def gengroup():
1892 # Create a changenode group generator that will call our functions
1892 # Create a changenode group generator that will call our functions
1893 # back to lookup the owning changenode and collect information.
1893 # back to lookup the owning changenode and collect information.
1894 count[:] = [0, len(csets)]
1894 count[:] = [0, len(csets)]
1895 for chunk in cl.group(csets, bundler, reorder=reorder):
1895 for chunk in cl.group(csets, bundler, reorder=reorder):
1896 yield chunk
1896 yield chunk
1897 progress(_bundling, None)
1897 progress(_bundling, None)
1898
1898
1899 # Create a generator for the manifestnodes that calls our lookup
1899 # Create a generator for the manifestnodes that calls our lookup
1900 # and data collection functions back.
1900 # and data collection functions back.
1901 for f in changedfiles:
1901 for f in changedfiles:
1902 fnodes[f] = {}
1902 fnodes[f] = {}
1903 count[:] = [0, len(mfs)]
1903 count[:] = [0, len(mfs)]
1904 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1904 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1905 yield chunk
1905 yield chunk
1906 progress(_bundling, None)
1906 progress(_bundling, None)
1907
1907
1908 mfs.clear()
1908 mfs.clear()
1909
1909
1910 # Go through all our files in order sorted by name.
1910 # Go through all our files in order sorted by name.
1911 count[:] = [0, len(changedfiles)]
1911 count[:] = [0, len(changedfiles)]
1912 for fname in sorted(changedfiles):
1912 for fname in sorted(changedfiles):
1913 filerevlog = self.file(fname)
1913 filerevlog = self.file(fname)
1914 if not len(filerevlog):
1914 if not len(filerevlog):
1915 raise util.Abort(_("empty or missing revlog for %s")
1915 raise util.Abort(_("empty or missing revlog for %s")
1916 % fname)
1916 % fname)
1917 fstate[0] = fname
1917 fstate[0] = fname
1918 fstate[1] = fnodes.pop(fname, {})
1918 fstate[1] = fnodes.pop(fname, {})
1919
1919
1920 nodelist = prune(filerevlog, fstate[1])
1920 nodelist = prune(filerevlog, fstate[1])
1921 if nodelist:
1921 if nodelist:
1922 count[0] += 1
1922 count[0] += 1
1923 yield bundler.fileheader(fname)
1923 yield bundler.fileheader(fname)
1924 for chunk in filerevlog.group(nodelist, bundler, reorder):
1924 for chunk in filerevlog.group(nodelist, bundler, reorder):
1925 yield chunk
1925 yield chunk
1926
1926
1927 # Signal that no more groups are left.
1927 # Signal that no more groups are left.
1928 yield bundler.close()
1928 yield bundler.close()
1929 progress(_bundling, None)
1929 progress(_bundling, None)
1930
1930
1931 if csets:
1931 if csets:
1932 self.hook('outgoing', node=hex(csets[0]), source=source)
1932 self.hook('outgoing', node=hex(csets[0]), source=source)
1933
1933
1934 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1934 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1935
1935
1936 def changegroup(self, basenodes, source):
1936 def changegroup(self, basenodes, source):
1937 # to avoid a race we use changegroupsubset() (issue1320)
1937 # to avoid a race we use changegroupsubset() (issue1320)
1938 return self.changegroupsubset(basenodes, self.heads(), source)
1938 return self.changegroupsubset(basenodes, self.heads(), source)
1939
1939
1940 def _changegroup(self, nodes, source):
1940 def _changegroup(self, nodes, source):
1941 """Compute the changegroup of all nodes that we have that a recipient
1941 """Compute the changegroup of all nodes that we have that a recipient
1942 doesn't. Return a chunkbuffer object whose read() method will return
1942 doesn't. Return a chunkbuffer object whose read() method will return
1943 successive changegroup chunks.
1943 successive changegroup chunks.
1944
1944
1945 This is much easier than the previous function as we can assume that
1945 This is much easier than the previous function as we can assume that
1946 the recipient has any changenode we aren't sending them.
1946 the recipient has any changenode we aren't sending them.
1947
1947
1948 nodes is the set of nodes to send"""
1948 nodes is the set of nodes to send"""
1949
1949
1950 cl = self.changelog
1950 cl = self.changelog
1951 mf = self.manifest
1951 mf = self.manifest
1952 mfs = {}
1952 mfs = {}
1953 changedfiles = set()
1953 changedfiles = set()
1954 fstate = ['']
1954 fstate = ['']
1955 count = [0, 0]
1955 count = [0, 0]
1956
1956
1957 self.hook('preoutgoing', throw=True, source=source)
1957 self.hook('preoutgoing', throw=True, source=source)
1958 self.changegroupinfo(nodes, source)
1958 self.changegroupinfo(nodes, source)
1959
1959
1960 revset = set([cl.rev(n) for n in nodes])
1960 revset = set([cl.rev(n) for n in nodes])
1961
1961
1962 def gennodelst(log):
1962 def gennodelst(log):
1963 ln, llr = log.node, log.linkrev
1963 ln, llr = log.node, log.linkrev
1964 return [ln(r) for r in log if llr(r) in revset]
1964 return [ln(r) for r in log if llr(r) in revset]
1965
1965
1966 progress = self.ui.progress
1966 progress = self.ui.progress
1967 _bundling = _('bundling')
1967 _bundling = _('bundling')
1968 _changesets = _('changesets')
1968 _changesets = _('changesets')
1969 _manifests = _('manifests')
1969 _manifests = _('manifests')
1970 _files = _('files')
1970 _files = _('files')
1971
1971
1972 def lookup(revlog, x):
1972 def lookup(revlog, x):
1973 if revlog == cl:
1973 if revlog == cl:
1974 c = cl.read(x)
1974 c = cl.read(x)
1975 changedfiles.update(c[3])
1975 changedfiles.update(c[3])
1976 mfs.setdefault(c[0], x)
1976 mfs.setdefault(c[0], x)
1977 count[0] += 1
1977 count[0] += 1
1978 progress(_bundling, count[0],
1978 progress(_bundling, count[0],
1979 unit=_changesets, total=count[1])
1979 unit=_changesets, total=count[1])
1980 return x
1980 return x
1981 elif revlog == mf:
1981 elif revlog == mf:
1982 count[0] += 1
1982 count[0] += 1
1983 progress(_bundling, count[0],
1983 progress(_bundling, count[0],
1984 unit=_manifests, total=count[1])
1984 unit=_manifests, total=count[1])
1985 return cl.node(revlog.linkrev(revlog.rev(x)))
1985 return cl.node(revlog.linkrev(revlog.rev(x)))
1986 else:
1986 else:
1987 progress(_bundling, count[0], item=fstate[0],
1987 progress(_bundling, count[0], item=fstate[0],
1988 total=count[1], unit=_files)
1988 total=count[1], unit=_files)
1989 return cl.node(revlog.linkrev(revlog.rev(x)))
1989 return cl.node(revlog.linkrev(revlog.rev(x)))
1990
1990
1991 bundler = changegroup.bundle10(lookup)
1991 bundler = changegroup.bundle10(lookup)
1992 reorder = self.ui.config('bundle', 'reorder', 'auto')
1992 reorder = self.ui.config('bundle', 'reorder', 'auto')
1993 if reorder == 'auto':
1993 if reorder == 'auto':
1994 reorder = None
1994 reorder = None
1995 else:
1995 else:
1996 reorder = util.parsebool(reorder)
1996 reorder = util.parsebool(reorder)
1997
1997
1998 def gengroup():
1998 def gengroup():
1999 '''yield a sequence of changegroup chunks (strings)'''
1999 '''yield a sequence of changegroup chunks (strings)'''
2000 # construct a list of all changed files
2000 # construct a list of all changed files
2001
2001
2002 count[:] = [0, len(nodes)]
2002 count[:] = [0, len(nodes)]
2003 for chunk in cl.group(nodes, bundler, reorder=reorder):
2003 for chunk in cl.group(nodes, bundler, reorder=reorder):
2004 yield chunk
2004 yield chunk
2005 progress(_bundling, None)
2005 progress(_bundling, None)
2006
2006
2007 count[:] = [0, len(mfs)]
2007 count[:] = [0, len(mfs)]
2008 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2008 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2009 yield chunk
2009 yield chunk
2010 progress(_bundling, None)
2010 progress(_bundling, None)
2011
2011
2012 count[:] = [0, len(changedfiles)]
2012 count[:] = [0, len(changedfiles)]
2013 for fname in sorted(changedfiles):
2013 for fname in sorted(changedfiles):
2014 filerevlog = self.file(fname)
2014 filerevlog = self.file(fname)
2015 if not len(filerevlog):
2015 if not len(filerevlog):
2016 raise util.Abort(_("empty or missing revlog for %s")
2016 raise util.Abort(_("empty or missing revlog for %s")
2017 % fname)
2017 % fname)
2018 fstate[0] = fname
2018 fstate[0] = fname
2019 nodelist = gennodelst(filerevlog)
2019 nodelist = gennodelst(filerevlog)
2020 if nodelist:
2020 if nodelist:
2021 count[0] += 1
2021 count[0] += 1
2022 yield bundler.fileheader(fname)
2022 yield bundler.fileheader(fname)
2023 for chunk in filerevlog.group(nodelist, bundler, reorder):
2023 for chunk in filerevlog.group(nodelist, bundler, reorder):
2024 yield chunk
2024 yield chunk
2025 yield bundler.close()
2025 yield bundler.close()
2026 progress(_bundling, None)
2026 progress(_bundling, None)
2027
2027
2028 if nodes:
2028 if nodes:
2029 self.hook('outgoing', node=hex(nodes[0]), source=source)
2029 self.hook('outgoing', node=hex(nodes[0]), source=source)
2030
2030
2031 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2031 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2032
2032
2033 def addchangegroup(self, source, srctype, url, emptyok=False):
2033 def addchangegroup(self, source, srctype, url, emptyok=False):
2034 """Add the changegroup returned by source.read() to this repo.
2034 """Add the changegroup returned by source.read() to this repo.
2035 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2035 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2036 the URL of the repo where this changegroup is coming from.
2036 the URL of the repo where this changegroup is coming from.
2037
2037
2038 Return an integer summarizing the change to this repo:
2038 Return an integer summarizing the change to this repo:
2039 - nothing changed or no source: 0
2039 - nothing changed or no source: 0
2040 - more heads than before: 1+added heads (2..n)
2040 - more heads than before: 1+added heads (2..n)
2041 - fewer heads than before: -1-removed heads (-2..-n)
2041 - fewer heads than before: -1-removed heads (-2..-n)
2042 - number of heads stays the same: 1
2042 - number of heads stays the same: 1
2043 """
2043 """
2044 def csmap(x):
2044 def csmap(x):
2045 self.ui.debug("add changeset %s\n" % short(x))
2045 self.ui.debug("add changeset %s\n" % short(x))
2046 return len(cl)
2046 return len(cl)
2047
2047
2048 def revmap(x):
2048 def revmap(x):
2049 return cl.rev(x)
2049 return cl.rev(x)
2050
2050
2051 if not source:
2051 if not source:
2052 return 0
2052 return 0
2053
2053
2054 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2054 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2055
2055
2056 changesets = files = revisions = 0
2056 changesets = files = revisions = 0
2057 efiles = set()
2057 efiles = set()
2058
2058
2059 # write changelog data to temp files so concurrent readers will not see
2059 # write changelog data to temp files so concurrent readers will not see
2060 # inconsistent view
2060 # inconsistent view
2061 cl = self.changelog
2061 cl = self.changelog
2062 cl.delayupdate()
2062 cl.delayupdate()
2063 oldheads = cl.heads()
2063 oldheads = cl.heads()
2064
2064
2065 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2065 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2066 try:
2066 try:
2067 trp = weakref.proxy(tr)
2067 trp = weakref.proxy(tr)
2068 # pull off the changeset group
2068 # pull off the changeset group
2069 self.ui.status(_("adding changesets\n"))
2069 self.ui.status(_("adding changesets\n"))
2070 clstart = len(cl)
2070 clstart = len(cl)
2071 class prog(object):
2071 class prog(object):
2072 step = _('changesets')
2072 step = _('changesets')
2073 count = 1
2073 count = 1
2074 ui = self.ui
2074 ui = self.ui
2075 total = None
2075 total = None
2076 def __call__(self):
2076 def __call__(self):
2077 self.ui.progress(self.step, self.count, unit=_('chunks'),
2077 self.ui.progress(self.step, self.count, unit=_('chunks'),
2078 total=self.total)
2078 total=self.total)
2079 self.count += 1
2079 self.count += 1
2080 pr = prog()
2080 pr = prog()
2081 source.callback = pr
2081 source.callback = pr
2082
2082
2083 source.changelogheader()
2083 source.changelogheader()
2084 srccontent = cl.addgroup(source, csmap, trp)
2084 srccontent = cl.addgroup(source, csmap, trp)
2085 if not (srccontent or emptyok):
2085 if not (srccontent or emptyok):
2086 raise util.Abort(_("received changelog group is empty"))
2086 raise util.Abort(_("received changelog group is empty"))
2087 clend = len(cl)
2087 clend = len(cl)
2088 changesets = clend - clstart
2088 changesets = clend - clstart
2089 for c in xrange(clstart, clend):
2089 for c in xrange(clstart, clend):
2090 efiles.update(self[c].files())
2090 efiles.update(self[c].files())
2091 efiles = len(efiles)
2091 efiles = len(efiles)
2092 self.ui.progress(_('changesets'), None)
2092 self.ui.progress(_('changesets'), None)
2093
2093
2094 # pull off the manifest group
2094 # pull off the manifest group
2095 self.ui.status(_("adding manifests\n"))
2095 self.ui.status(_("adding manifests\n"))
2096 pr.step = _('manifests')
2096 pr.step = _('manifests')
2097 pr.count = 1
2097 pr.count = 1
2098 pr.total = changesets # manifests <= changesets
2098 pr.total = changesets # manifests <= changesets
2099 # no need to check for empty manifest group here:
2099 # no need to check for empty manifest group here:
2100 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2100 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2101 # no new manifest will be created and the manifest group will
2101 # no new manifest will be created and the manifest group will
2102 # be empty during the pull
2102 # be empty during the pull
2103 source.manifestheader()
2103 source.manifestheader()
2104 self.manifest.addgroup(source, revmap, trp)
2104 self.manifest.addgroup(source, revmap, trp)
2105 self.ui.progress(_('manifests'), None)
2105 self.ui.progress(_('manifests'), None)
2106
2106
2107 needfiles = {}
2107 needfiles = {}
2108 if self.ui.configbool('server', 'validate', default=False):
2108 if self.ui.configbool('server', 'validate', default=False):
2109 # validate incoming csets have their manifests
2109 # validate incoming csets have their manifests
2110 for cset in xrange(clstart, clend):
2110 for cset in xrange(clstart, clend):
2111 mfest = self.changelog.read(self.changelog.node(cset))[0]
2111 mfest = self.changelog.read(self.changelog.node(cset))[0]
2112 mfest = self.manifest.readdelta(mfest)
2112 mfest = self.manifest.readdelta(mfest)
2113 # store file nodes we must see
2113 # store file nodes we must see
2114 for f, n in mfest.iteritems():
2114 for f, n in mfest.iteritems():
2115 needfiles.setdefault(f, set()).add(n)
2115 needfiles.setdefault(f, set()).add(n)
2116
2116
2117 # process the files
2117 # process the files
2118 self.ui.status(_("adding file changes\n"))
2118 self.ui.status(_("adding file changes\n"))
2119 pr.step = _('files')
2119 pr.step = _('files')
2120 pr.count = 1
2120 pr.count = 1
2121 pr.total = efiles
2121 pr.total = efiles
2122 source.callback = None
2122 source.callback = None
2123
2123
2124 while True:
2124 while True:
2125 chunkdata = source.filelogheader()
2125 chunkdata = source.filelogheader()
2126 if not chunkdata:
2126 if not chunkdata:
2127 break
2127 break
2128 f = chunkdata["filename"]
2128 f = chunkdata["filename"]
2129 self.ui.debug("adding %s revisions\n" % f)
2129 self.ui.debug("adding %s revisions\n" % f)
2130 pr()
2130 pr()
2131 fl = self.file(f)
2131 fl = self.file(f)
2132 o = len(fl)
2132 o = len(fl)
2133 if not fl.addgroup(source, revmap, trp):
2133 if not fl.addgroup(source, revmap, trp):
2134 raise util.Abort(_("received file revlog group is empty"))
2134 raise util.Abort(_("received file revlog group is empty"))
2135 revisions += len(fl) - o
2135 revisions += len(fl) - o
2136 files += 1
2136 files += 1
2137 if f in needfiles:
2137 if f in needfiles:
2138 needs = needfiles[f]
2138 needs = needfiles[f]
2139 for new in xrange(o, len(fl)):
2139 for new in xrange(o, len(fl)):
2140 n = fl.node(new)
2140 n = fl.node(new)
2141 if n in needs:
2141 if n in needs:
2142 needs.remove(n)
2142 needs.remove(n)
2143 if not needs:
2143 if not needs:
2144 del needfiles[f]
2144 del needfiles[f]
2145 self.ui.progress(_('files'), None)
2145 self.ui.progress(_('files'), None)
2146
2146
2147 for f, needs in needfiles.iteritems():
2147 for f, needs in needfiles.iteritems():
2148 fl = self.file(f)
2148 fl = self.file(f)
2149 for n in needs:
2149 for n in needs:
2150 try:
2150 try:
2151 fl.rev(n)
2151 fl.rev(n)
2152 except error.LookupError:
2152 except error.LookupError:
2153 raise util.Abort(
2153 raise util.Abort(
2154 _('missing file data for %s:%s - run hg verify') %
2154 _('missing file data for %s:%s - run hg verify') %
2155 (f, hex(n)))
2155 (f, hex(n)))
2156
2156
2157 dh = 0
2157 dh = 0
2158 if oldheads:
2158 if oldheads:
2159 heads = cl.heads()
2159 heads = cl.heads()
2160 dh = len(heads) - len(oldheads)
2160 dh = len(heads) - len(oldheads)
2161 for h in heads:
2161 for h in heads:
2162 if h not in oldheads and self[h].closesbranch():
2162 if h not in oldheads and self[h].closesbranch():
2163 dh -= 1
2163 dh -= 1
2164 htext = ""
2164 htext = ""
2165 if dh:
2165 if dh:
2166 htext = _(" (%+d heads)") % dh
2166 htext = _(" (%+d heads)") % dh
2167
2167
2168 self.ui.status(_("added %d changesets"
2168 self.ui.status(_("added %d changesets"
2169 " with %d changes to %d files%s\n")
2169 " with %d changes to %d files%s\n")
2170 % (changesets, revisions, files, htext))
2170 % (changesets, revisions, files, htext))
2171
2171
2172 if changesets > 0:
2172 if changesets > 0:
2173 p = lambda: cl.writepending() and self.root or ""
2173 p = lambda: cl.writepending() and self.root or ""
2174 self.hook('pretxnchangegroup', throw=True,
2174 self.hook('pretxnchangegroup', throw=True,
2175 node=hex(cl.node(clstart)), source=srctype,
2175 node=hex(cl.node(clstart)), source=srctype,
2176 url=url, pending=p)
2176 url=url, pending=p)
2177
2177
2178 added = [cl.node(r) for r in xrange(clstart, clend)]
2178 added = [cl.node(r) for r in xrange(clstart, clend)]
2179 publishing = self.ui.configbool('phases', 'publish', True)
2179 publishing = self.ui.configbool('phases', 'publish', True)
2180 if srctype == 'push':
2180 if srctype == 'push':
2181 # Old server can not push the boundary themself.
2181 # Old server can not push the boundary themself.
2182 # New server won't push the boundary if changeset already
2182 # New server won't push the boundary if changeset already
2183 # existed locally as secrete
2183 # existed locally as secrete
2184 #
2184 #
2185 # We should not use added here but the list of all change in
2185 # We should not use added here but the list of all change in
2186 # the bundle
2186 # the bundle
2187 if publishing:
2187 if publishing:
2188 phases.advanceboundary(self, phases.public, srccontent)
2188 phases.advanceboundary(self, phases.public, srccontent)
2189 else:
2189 else:
2190 phases.advanceboundary(self, phases.draft, srccontent)
2190 phases.advanceboundary(self, phases.draft, srccontent)
2191 phases.retractboundary(self, phases.draft, added)
2191 phases.retractboundary(self, phases.draft, added)
2192 elif srctype != 'strip':
2192 elif srctype != 'strip':
2193 # publishing only alter behavior during push
2193 # publishing only alter behavior during push
2194 #
2194 #
2195 # strip should not touch boundary at all
2195 # strip should not touch boundary at all
2196 phases.retractboundary(self, phases.draft, added)
2196 phases.retractboundary(self, phases.draft, added)
2197
2197
2198 # make changelog see real files again
2198 # make changelog see real files again
2199 cl.finalize(trp)
2199 cl.finalize(trp)
2200
2200
2201 tr.close()
2201 tr.close()
2202
2202
2203 if changesets > 0:
2203 if changesets > 0:
2204 def runhooks():
2204 def runhooks():
2205 # forcefully update the on-disk branch cache
2205 # forcefully update the on-disk branch cache
2206 self.ui.debug("updating the branch cache\n")
2206 self.ui.debug("updating the branch cache\n")
2207 self.updatebranchcache()
2207 self.updatebranchcache()
2208 self.hook("changegroup", node=hex(cl.node(clstart)),
2208 self.hook("changegroup", node=hex(cl.node(clstart)),
2209 source=srctype, url=url)
2209 source=srctype, url=url)
2210
2210
2211 for n in added:
2211 for n in added:
2212 self.hook("incoming", node=hex(n), source=srctype,
2212 self.hook("incoming", node=hex(n), source=srctype,
2213 url=url)
2213 url=url)
2214 self._afterlock(runhooks)
2214 self._afterlock(runhooks)
2215
2215
2216 finally:
2216 finally:
2217 tr.release()
2217 tr.release()
2218 # never return 0 here:
2218 # never return 0 here:
2219 if dh < 0:
2219 if dh < 0:
2220 return dh - 1
2220 return dh - 1
2221 else:
2221 else:
2222 return dh + 1
2222 return dh + 1
2223
2223
2224 def stream_in(self, remote, requirements):
2224 def stream_in(self, remote, requirements):
2225 lock = self.lock()
2225 lock = self.lock()
2226 try:
2226 try:
2227 fp = remote.stream_out()
2227 fp = remote.stream_out()
2228 l = fp.readline()
2228 l = fp.readline()
2229 try:
2229 try:
2230 resp = int(l)
2230 resp = int(l)
2231 except ValueError:
2231 except ValueError:
2232 raise error.ResponseError(
2232 raise error.ResponseError(
2233 _('Unexpected response from remote server:'), l)
2233 _('unexpected response from remote server:'), l)
2234 if resp == 1:
2234 if resp == 1:
2235 raise util.Abort(_('operation forbidden by server'))
2235 raise util.Abort(_('operation forbidden by server'))
2236 elif resp == 2:
2236 elif resp == 2:
2237 raise util.Abort(_('locking the remote repository failed'))
2237 raise util.Abort(_('locking the remote repository failed'))
2238 elif resp != 0:
2238 elif resp != 0:
2239 raise util.Abort(_('the server sent an unknown error code'))
2239 raise util.Abort(_('the server sent an unknown error code'))
2240 self.ui.status(_('streaming all changes\n'))
2240 self.ui.status(_('streaming all changes\n'))
2241 l = fp.readline()
2241 l = fp.readline()
2242 try:
2242 try:
2243 total_files, total_bytes = map(int, l.split(' ', 1))
2243 total_files, total_bytes = map(int, l.split(' ', 1))
2244 except (ValueError, TypeError):
2244 except (ValueError, TypeError):
2245 raise error.ResponseError(
2245 raise error.ResponseError(
2246 _('Unexpected response from remote server:'), l)
2246 _('unexpected response from remote server:'), l)
2247 self.ui.status(_('%d files to transfer, %s of data\n') %
2247 self.ui.status(_('%d files to transfer, %s of data\n') %
2248 (total_files, util.bytecount(total_bytes)))
2248 (total_files, util.bytecount(total_bytes)))
2249 handled_bytes = 0
2249 handled_bytes = 0
2250 self.ui.progress(_('clone'), 0, total=total_bytes)
2250 self.ui.progress(_('clone'), 0, total=total_bytes)
2251 start = time.time()
2251 start = time.time()
2252 for i in xrange(total_files):
2252 for i in xrange(total_files):
2253 # XXX doesn't support '\n' or '\r' in filenames
2253 # XXX doesn't support '\n' or '\r' in filenames
2254 l = fp.readline()
2254 l = fp.readline()
2255 try:
2255 try:
2256 name, size = l.split('\0', 1)
2256 name, size = l.split('\0', 1)
2257 size = int(size)
2257 size = int(size)
2258 except (ValueError, TypeError):
2258 except (ValueError, TypeError):
2259 raise error.ResponseError(
2259 raise error.ResponseError(
2260 _('Unexpected response from remote server:'), l)
2260 _('unexpected response from remote server:'), l)
2261 if self.ui.debugflag:
2261 if self.ui.debugflag:
2262 self.ui.debug('adding %s (%s)\n' %
2262 self.ui.debug('adding %s (%s)\n' %
2263 (name, util.bytecount(size)))
2263 (name, util.bytecount(size)))
2264 # for backwards compat, name was partially encoded
2264 # for backwards compat, name was partially encoded
2265 ofp = self.sopener(store.decodedir(name), 'w')
2265 ofp = self.sopener(store.decodedir(name), 'w')
2266 for chunk in util.filechunkiter(fp, limit=size):
2266 for chunk in util.filechunkiter(fp, limit=size):
2267 handled_bytes += len(chunk)
2267 handled_bytes += len(chunk)
2268 self.ui.progress(_('clone'), handled_bytes,
2268 self.ui.progress(_('clone'), handled_bytes,
2269 total=total_bytes)
2269 total=total_bytes)
2270 ofp.write(chunk)
2270 ofp.write(chunk)
2271 ofp.close()
2271 ofp.close()
2272 elapsed = time.time() - start
2272 elapsed = time.time() - start
2273 if elapsed <= 0:
2273 if elapsed <= 0:
2274 elapsed = 0.001
2274 elapsed = 0.001
2275 self.ui.progress(_('clone'), None)
2275 self.ui.progress(_('clone'), None)
2276 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2276 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2277 (util.bytecount(total_bytes), elapsed,
2277 (util.bytecount(total_bytes), elapsed,
2278 util.bytecount(total_bytes / elapsed)))
2278 util.bytecount(total_bytes / elapsed)))
2279
2279
2280 # new requirements = old non-format requirements +
2280 # new requirements = old non-format requirements +
2281 # new format-related
2281 # new format-related
2282 # requirements from the streamed-in repository
2282 # requirements from the streamed-in repository
2283 requirements.update(set(self.requirements) - self.supportedformats)
2283 requirements.update(set(self.requirements) - self.supportedformats)
2284 self._applyrequirements(requirements)
2284 self._applyrequirements(requirements)
2285 self._writerequirements()
2285 self._writerequirements()
2286
2286
2287 self.invalidate()
2287 self.invalidate()
2288 return len(self.heads()) + 1
2288 return len(self.heads()) + 1
2289 finally:
2289 finally:
2290 lock.release()
2290 lock.release()
2291
2291
2292 def clone(self, remote, heads=[], stream=False):
2292 def clone(self, remote, heads=[], stream=False):
2293 '''clone remote repository.
2293 '''clone remote repository.
2294
2294
2295 keyword arguments:
2295 keyword arguments:
2296 heads: list of revs to clone (forces use of pull)
2296 heads: list of revs to clone (forces use of pull)
2297 stream: use streaming clone if possible'''
2297 stream: use streaming clone if possible'''
2298
2298
2299 # now, all clients that can request uncompressed clones can
2299 # now, all clients that can request uncompressed clones can
2300 # read repo formats supported by all servers that can serve
2300 # read repo formats supported by all servers that can serve
2301 # them.
2301 # them.
2302
2302
2303 # if revlog format changes, client will have to check version
2303 # if revlog format changes, client will have to check version
2304 # and format flags on "stream" capability, and use
2304 # and format flags on "stream" capability, and use
2305 # uncompressed only if compatible.
2305 # uncompressed only if compatible.
2306
2306
2307 if not stream:
2307 if not stream:
2308 # if the server explicitely prefer to stream (for fast LANs)
2308 # if the server explicitely prefer to stream (for fast LANs)
2309 stream = remote.capable('stream-preferred')
2309 stream = remote.capable('stream-preferred')
2310
2310
2311 if stream and not heads:
2311 if stream and not heads:
2312 # 'stream' means remote revlog format is revlogv1 only
2312 # 'stream' means remote revlog format is revlogv1 only
2313 if remote.capable('stream'):
2313 if remote.capable('stream'):
2314 return self.stream_in(remote, set(('revlogv1',)))
2314 return self.stream_in(remote, set(('revlogv1',)))
2315 # otherwise, 'streamreqs' contains the remote revlog format
2315 # otherwise, 'streamreqs' contains the remote revlog format
2316 streamreqs = remote.capable('streamreqs')
2316 streamreqs = remote.capable('streamreqs')
2317 if streamreqs:
2317 if streamreqs:
2318 streamreqs = set(streamreqs.split(','))
2318 streamreqs = set(streamreqs.split(','))
2319 # if we support it, stream in and adjust our requirements
2319 # if we support it, stream in and adjust our requirements
2320 if not streamreqs - self.supportedformats:
2320 if not streamreqs - self.supportedformats:
2321 return self.stream_in(remote, streamreqs)
2321 return self.stream_in(remote, streamreqs)
2322 return self.pull(remote, heads)
2322 return self.pull(remote, heads)
2323
2323
2324 def pushkey(self, namespace, key, old, new):
2324 def pushkey(self, namespace, key, old, new):
2325 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2325 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2326 old=old, new=new)
2326 old=old, new=new)
2327 ret = pushkey.push(self, namespace, key, old, new)
2327 ret = pushkey.push(self, namespace, key, old, new)
2328 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2328 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2329 ret=ret)
2329 ret=ret)
2330 return ret
2330 return ret
2331
2331
2332 def listkeys(self, namespace):
2332 def listkeys(self, namespace):
2333 self.hook('prelistkeys', throw=True, namespace=namespace)
2333 self.hook('prelistkeys', throw=True, namespace=namespace)
2334 values = pushkey.list(self, namespace)
2334 values = pushkey.list(self, namespace)
2335 self.hook('listkeys', namespace=namespace, values=values)
2335 self.hook('listkeys', namespace=namespace, values=values)
2336 return values
2336 return values
2337
2337
2338 def debugwireargs(self, one, two, three=None, four=None, five=None):
2338 def debugwireargs(self, one, two, three=None, four=None, five=None):
2339 '''used to test argument passing over the wire'''
2339 '''used to test argument passing over the wire'''
2340 return "%s %s %s %s %s" % (one, two, three, four, five)
2340 return "%s %s %s %s %s" % (one, two, three, four, five)
2341
2341
2342 def savecommitmessage(self, text):
2342 def savecommitmessage(self, text):
2343 fp = self.opener('last-message.txt', 'wb')
2343 fp = self.opener('last-message.txt', 'wb')
2344 try:
2344 try:
2345 fp.write(text)
2345 fp.write(text)
2346 finally:
2346 finally:
2347 fp.close()
2347 fp.close()
2348 return self.pathto(fp.name[len(self.root)+1:])
2348 return self.pathto(fp.name[len(self.root)+1:])
2349
2349
2350 # used to avoid circular references so destructors work
2350 # used to avoid circular references so destructors work
2351 def aftertrans(files):
2351 def aftertrans(files):
2352 renamefiles = [tuple(t) for t in files]
2352 renamefiles = [tuple(t) for t in files]
2353 def a():
2353 def a():
2354 for src, dest in renamefiles:
2354 for src, dest in renamefiles:
2355 try:
2355 try:
2356 util.rename(src, dest)
2356 util.rename(src, dest)
2357 except OSError: # journal file does not yet exist
2357 except OSError: # journal file does not yet exist
2358 pass
2358 pass
2359 return a
2359 return a
2360
2360
2361 def undoname(fn):
2361 def undoname(fn):
2362 base, name = os.path.split(fn)
2362 base, name = os.path.split(fn)
2363 assert name.startswith('journal')
2363 assert name.startswith('journal')
2364 return os.path.join(base, name.replace('journal', 'undo', 1))
2364 return os.path.join(base, name.replace('journal', 'undo', 1))
2365
2365
2366 def instance(ui, path, create):
2366 def instance(ui, path, create):
2367 return localrepository(ui, util.urllocalpath(path), create)
2367 return localrepository(ui, util.urllocalpath(path), create)
2368
2368
2369 def islocal(path):
2369 def islocal(path):
2370 return True
2370 return True
General Comments 0
You need to be logged in to leave comments. Login now