##// END OF EJS Templates
phases: only synchronize on common changeset when push fails...
Pierre-Yves David -
r15933:b8696a66 default
parent child Browse files
Show More
@@ -1,2283 +1,2281 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import scmutil, util, extensions, hook, error, revset
13 import scmutil, util, extensions, hook, error, revset
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20 filecache = scmutil.filecache
20 filecache = scmutil.filecache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 'known', 'getbundle'))
24 'known', 'getbundle'))
25 supportedformats = set(('revlogv1', 'generaldelta'))
25 supportedformats = set(('revlogv1', 'generaldelta'))
26 supported = supportedformats | set(('store', 'fncache', 'shared',
26 supported = supportedformats | set(('store', 'fncache', 'shared',
27 'dotencode'))
27 'dotencode'))
28
28
29 def __init__(self, baseui, path=None, create=False):
29 def __init__(self, baseui, path=None, create=False):
30 repo.repository.__init__(self)
30 repo.repository.__init__(self)
31 self.root = os.path.realpath(util.expandpath(path))
31 self.root = os.path.realpath(util.expandpath(path))
32 self.path = os.path.join(self.root, ".hg")
32 self.path = os.path.join(self.root, ".hg")
33 self.origroot = path
33 self.origroot = path
34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
35 self.opener = scmutil.opener(self.path)
35 self.opener = scmutil.opener(self.path)
36 self.wopener = scmutil.opener(self.root)
36 self.wopener = scmutil.opener(self.root)
37 self.baseui = baseui
37 self.baseui = baseui
38 self.ui = baseui.copy()
38 self.ui = baseui.copy()
39 self._dirtyphases = False
39 self._dirtyphases = False
40 # A list of callback to shape the phase if no data were found.
40 # A list of callback to shape the phase if no data were found.
41 # Callback are in the form: func(repo, roots) --> processed root.
41 # Callback are in the form: func(repo, roots) --> processed root.
42 # This list it to be filled by extension during repo setup
42 # This list it to be filled by extension during repo setup
43 self._phasedefaults = []
43 self._phasedefaults = []
44
44
45 try:
45 try:
46 self.ui.readconfig(self.join("hgrc"), self.root)
46 self.ui.readconfig(self.join("hgrc"), self.root)
47 extensions.loadall(self.ui)
47 extensions.loadall(self.ui)
48 except IOError:
48 except IOError:
49 pass
49 pass
50
50
51 if not os.path.isdir(self.path):
51 if not os.path.isdir(self.path):
52 if create:
52 if create:
53 if not os.path.exists(path):
53 if not os.path.exists(path):
54 util.makedirs(path)
54 util.makedirs(path)
55 util.makedir(self.path, notindexed=True)
55 util.makedir(self.path, notindexed=True)
56 requirements = ["revlogv1"]
56 requirements = ["revlogv1"]
57 if self.ui.configbool('format', 'usestore', True):
57 if self.ui.configbool('format', 'usestore', True):
58 os.mkdir(os.path.join(self.path, "store"))
58 os.mkdir(os.path.join(self.path, "store"))
59 requirements.append("store")
59 requirements.append("store")
60 if self.ui.configbool('format', 'usefncache', True):
60 if self.ui.configbool('format', 'usefncache', True):
61 requirements.append("fncache")
61 requirements.append("fncache")
62 if self.ui.configbool('format', 'dotencode', True):
62 if self.ui.configbool('format', 'dotencode', True):
63 requirements.append('dotencode')
63 requirements.append('dotencode')
64 # create an invalid changelog
64 # create an invalid changelog
65 self.opener.append(
65 self.opener.append(
66 "00changelog.i",
66 "00changelog.i",
67 '\0\0\0\2' # represents revlogv2
67 '\0\0\0\2' # represents revlogv2
68 ' dummy changelog to prevent using the old repo layout'
68 ' dummy changelog to prevent using the old repo layout'
69 )
69 )
70 if self.ui.configbool('format', 'generaldelta', False):
70 if self.ui.configbool('format', 'generaldelta', False):
71 requirements.append("generaldelta")
71 requirements.append("generaldelta")
72 requirements = set(requirements)
72 requirements = set(requirements)
73 else:
73 else:
74 raise error.RepoError(_("repository %s not found") % path)
74 raise error.RepoError(_("repository %s not found") % path)
75 elif create:
75 elif create:
76 raise error.RepoError(_("repository %s already exists") % path)
76 raise error.RepoError(_("repository %s already exists") % path)
77 else:
77 else:
78 try:
78 try:
79 requirements = scmutil.readrequires(self.opener, self.supported)
79 requirements = scmutil.readrequires(self.opener, self.supported)
80 except IOError, inst:
80 except IOError, inst:
81 if inst.errno != errno.ENOENT:
81 if inst.errno != errno.ENOENT:
82 raise
82 raise
83 requirements = set()
83 requirements = set()
84
84
85 self.sharedpath = self.path
85 self.sharedpath = self.path
86 try:
86 try:
87 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
87 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
88 if not os.path.exists(s):
88 if not os.path.exists(s):
89 raise error.RepoError(
89 raise error.RepoError(
90 _('.hg/sharedpath points to nonexistent directory %s') % s)
90 _('.hg/sharedpath points to nonexistent directory %s') % s)
91 self.sharedpath = s
91 self.sharedpath = s
92 except IOError, inst:
92 except IOError, inst:
93 if inst.errno != errno.ENOENT:
93 if inst.errno != errno.ENOENT:
94 raise
94 raise
95
95
96 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
96 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
97 self.spath = self.store.path
97 self.spath = self.store.path
98 self.sopener = self.store.opener
98 self.sopener = self.store.opener
99 self.sjoin = self.store.join
99 self.sjoin = self.store.join
100 self.opener.createmode = self.store.createmode
100 self.opener.createmode = self.store.createmode
101 self._applyrequirements(requirements)
101 self._applyrequirements(requirements)
102 if create:
102 if create:
103 self._writerequirements()
103 self._writerequirements()
104
104
105
105
106 self._branchcache = None
106 self._branchcache = None
107 self._branchcachetip = None
107 self._branchcachetip = None
108 self.filterpats = {}
108 self.filterpats = {}
109 self._datafilters = {}
109 self._datafilters = {}
110 self._transref = self._lockref = self._wlockref = None
110 self._transref = self._lockref = self._wlockref = None
111
111
112 # A cache for various files under .hg/ that tracks file changes,
112 # A cache for various files under .hg/ that tracks file changes,
113 # (used by the filecache decorator)
113 # (used by the filecache decorator)
114 #
114 #
115 # Maps a property name to its util.filecacheentry
115 # Maps a property name to its util.filecacheentry
116 self._filecache = {}
116 self._filecache = {}
117
117
118 def _applyrequirements(self, requirements):
118 def _applyrequirements(self, requirements):
119 self.requirements = requirements
119 self.requirements = requirements
120 openerreqs = set(('revlogv1', 'generaldelta'))
120 openerreqs = set(('revlogv1', 'generaldelta'))
121 self.sopener.options = dict((r, 1) for r in requirements
121 self.sopener.options = dict((r, 1) for r in requirements
122 if r in openerreqs)
122 if r in openerreqs)
123
123
124 def _writerequirements(self):
124 def _writerequirements(self):
125 reqfile = self.opener("requires", "w")
125 reqfile = self.opener("requires", "w")
126 for r in self.requirements:
126 for r in self.requirements:
127 reqfile.write("%s\n" % r)
127 reqfile.write("%s\n" % r)
128 reqfile.close()
128 reqfile.close()
129
129
130 def _checknested(self, path):
130 def _checknested(self, path):
131 """Determine if path is a legal nested repository."""
131 """Determine if path is a legal nested repository."""
132 if not path.startswith(self.root):
132 if not path.startswith(self.root):
133 return False
133 return False
134 subpath = path[len(self.root) + 1:]
134 subpath = path[len(self.root) + 1:]
135 normsubpath = util.pconvert(subpath)
135 normsubpath = util.pconvert(subpath)
136
136
137 # XXX: Checking against the current working copy is wrong in
137 # XXX: Checking against the current working copy is wrong in
138 # the sense that it can reject things like
138 # the sense that it can reject things like
139 #
139 #
140 # $ hg cat -r 10 sub/x.txt
140 # $ hg cat -r 10 sub/x.txt
141 #
141 #
142 # if sub/ is no longer a subrepository in the working copy
142 # if sub/ is no longer a subrepository in the working copy
143 # parent revision.
143 # parent revision.
144 #
144 #
145 # However, it can of course also allow things that would have
145 # However, it can of course also allow things that would have
146 # been rejected before, such as the above cat command if sub/
146 # been rejected before, such as the above cat command if sub/
147 # is a subrepository now, but was a normal directory before.
147 # is a subrepository now, but was a normal directory before.
148 # The old path auditor would have rejected by mistake since it
148 # The old path auditor would have rejected by mistake since it
149 # panics when it sees sub/.hg/.
149 # panics when it sees sub/.hg/.
150 #
150 #
151 # All in all, checking against the working copy seems sensible
151 # All in all, checking against the working copy seems sensible
152 # since we want to prevent access to nested repositories on
152 # since we want to prevent access to nested repositories on
153 # the filesystem *now*.
153 # the filesystem *now*.
154 ctx = self[None]
154 ctx = self[None]
155 parts = util.splitpath(subpath)
155 parts = util.splitpath(subpath)
156 while parts:
156 while parts:
157 prefix = '/'.join(parts)
157 prefix = '/'.join(parts)
158 if prefix in ctx.substate:
158 if prefix in ctx.substate:
159 if prefix == normsubpath:
159 if prefix == normsubpath:
160 return True
160 return True
161 else:
161 else:
162 sub = ctx.sub(prefix)
162 sub = ctx.sub(prefix)
163 return sub.checknested(subpath[len(prefix) + 1:])
163 return sub.checknested(subpath[len(prefix) + 1:])
164 else:
164 else:
165 parts.pop()
165 parts.pop()
166 return False
166 return False
167
167
168 @filecache('bookmarks')
168 @filecache('bookmarks')
169 def _bookmarks(self):
169 def _bookmarks(self):
170 return bookmarks.read(self)
170 return bookmarks.read(self)
171
171
172 @filecache('bookmarks.current')
172 @filecache('bookmarks.current')
173 def _bookmarkcurrent(self):
173 def _bookmarkcurrent(self):
174 return bookmarks.readcurrent(self)
174 return bookmarks.readcurrent(self)
175
175
176 def _writebookmarks(self, marks):
176 def _writebookmarks(self, marks):
177 bookmarks.write(self)
177 bookmarks.write(self)
178
178
179 @filecache('phaseroots')
179 @filecache('phaseroots')
180 def _phaseroots(self):
180 def _phaseroots(self):
181 self._dirtyphases = False
181 self._dirtyphases = False
182 phaseroots = phases.readroots(self)
182 phaseroots = phases.readroots(self)
183 phases.filterunknown(self, phaseroots)
183 phases.filterunknown(self, phaseroots)
184 return phaseroots
184 return phaseroots
185
185
186 @propertycache
186 @propertycache
187 def _phaserev(self):
187 def _phaserev(self):
188 cache = [phases.public] * len(self)
188 cache = [phases.public] * len(self)
189 for phase in phases.trackedphases:
189 for phase in phases.trackedphases:
190 roots = map(self.changelog.rev, self._phaseroots[phase])
190 roots = map(self.changelog.rev, self._phaseroots[phase])
191 if roots:
191 if roots:
192 for rev in roots:
192 for rev in roots:
193 cache[rev] = phase
193 cache[rev] = phase
194 for rev in self.changelog.descendants(*roots):
194 for rev in self.changelog.descendants(*roots):
195 cache[rev] = phase
195 cache[rev] = phase
196 return cache
196 return cache
197
197
198 @filecache('00changelog.i', True)
198 @filecache('00changelog.i', True)
199 def changelog(self):
199 def changelog(self):
200 c = changelog.changelog(self.sopener)
200 c = changelog.changelog(self.sopener)
201 if 'HG_PENDING' in os.environ:
201 if 'HG_PENDING' in os.environ:
202 p = os.environ['HG_PENDING']
202 p = os.environ['HG_PENDING']
203 if p.startswith(self.root):
203 if p.startswith(self.root):
204 c.readpending('00changelog.i.a')
204 c.readpending('00changelog.i.a')
205 return c
205 return c
206
206
207 @filecache('00manifest.i', True)
207 @filecache('00manifest.i', True)
208 def manifest(self):
208 def manifest(self):
209 return manifest.manifest(self.sopener)
209 return manifest.manifest(self.sopener)
210
210
211 @filecache('dirstate')
211 @filecache('dirstate')
212 def dirstate(self):
212 def dirstate(self):
213 warned = [0]
213 warned = [0]
214 def validate(node):
214 def validate(node):
215 try:
215 try:
216 self.changelog.rev(node)
216 self.changelog.rev(node)
217 return node
217 return node
218 except error.LookupError:
218 except error.LookupError:
219 if not warned[0]:
219 if not warned[0]:
220 warned[0] = True
220 warned[0] = True
221 self.ui.warn(_("warning: ignoring unknown"
221 self.ui.warn(_("warning: ignoring unknown"
222 " working parent %s!\n") % short(node))
222 " working parent %s!\n") % short(node))
223 return nullid
223 return nullid
224
224
225 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
225 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
226
226
227 def __getitem__(self, changeid):
227 def __getitem__(self, changeid):
228 if changeid is None:
228 if changeid is None:
229 return context.workingctx(self)
229 return context.workingctx(self)
230 return context.changectx(self, changeid)
230 return context.changectx(self, changeid)
231
231
232 def __contains__(self, changeid):
232 def __contains__(self, changeid):
233 try:
233 try:
234 return bool(self.lookup(changeid))
234 return bool(self.lookup(changeid))
235 except error.RepoLookupError:
235 except error.RepoLookupError:
236 return False
236 return False
237
237
238 def __nonzero__(self):
238 def __nonzero__(self):
239 return True
239 return True
240
240
241 def __len__(self):
241 def __len__(self):
242 return len(self.changelog)
242 return len(self.changelog)
243
243
244 def __iter__(self):
244 def __iter__(self):
245 for i in xrange(len(self)):
245 for i in xrange(len(self)):
246 yield i
246 yield i
247
247
248 def revs(self, expr, *args):
248 def revs(self, expr, *args):
249 '''Return a list of revisions matching the given revset'''
249 '''Return a list of revisions matching the given revset'''
250 expr = revset.formatspec(expr, *args)
250 expr = revset.formatspec(expr, *args)
251 m = revset.match(None, expr)
251 m = revset.match(None, expr)
252 return [r for r in m(self, range(len(self)))]
252 return [r for r in m(self, range(len(self)))]
253
253
254 def set(self, expr, *args):
254 def set(self, expr, *args):
255 '''
255 '''
256 Yield a context for each matching revision, after doing arg
256 Yield a context for each matching revision, after doing arg
257 replacement via revset.formatspec
257 replacement via revset.formatspec
258 '''
258 '''
259 for r in self.revs(expr, *args):
259 for r in self.revs(expr, *args):
260 yield self[r]
260 yield self[r]
261
261
262 def url(self):
262 def url(self):
263 return 'file:' + self.root
263 return 'file:' + self.root
264
264
265 def hook(self, name, throw=False, **args):
265 def hook(self, name, throw=False, **args):
266 return hook.hook(self.ui, self, name, throw, **args)
266 return hook.hook(self.ui, self, name, throw, **args)
267
267
268 tag_disallowed = ':\r\n'
268 tag_disallowed = ':\r\n'
269
269
270 def _tag(self, names, node, message, local, user, date, extra={}):
270 def _tag(self, names, node, message, local, user, date, extra={}):
271 if isinstance(names, str):
271 if isinstance(names, str):
272 allchars = names
272 allchars = names
273 names = (names,)
273 names = (names,)
274 else:
274 else:
275 allchars = ''.join(names)
275 allchars = ''.join(names)
276 for c in self.tag_disallowed:
276 for c in self.tag_disallowed:
277 if c in allchars:
277 if c in allchars:
278 raise util.Abort(_('%r cannot be used in a tag name') % c)
278 raise util.Abort(_('%r cannot be used in a tag name') % c)
279
279
280 branches = self.branchmap()
280 branches = self.branchmap()
281 for name in names:
281 for name in names:
282 self.hook('pretag', throw=True, node=hex(node), tag=name,
282 self.hook('pretag', throw=True, node=hex(node), tag=name,
283 local=local)
283 local=local)
284 if name in branches:
284 if name in branches:
285 self.ui.warn(_("warning: tag %s conflicts with existing"
285 self.ui.warn(_("warning: tag %s conflicts with existing"
286 " branch name\n") % name)
286 " branch name\n") % name)
287
287
288 def writetags(fp, names, munge, prevtags):
288 def writetags(fp, names, munge, prevtags):
289 fp.seek(0, 2)
289 fp.seek(0, 2)
290 if prevtags and prevtags[-1] != '\n':
290 if prevtags and prevtags[-1] != '\n':
291 fp.write('\n')
291 fp.write('\n')
292 for name in names:
292 for name in names:
293 m = munge and munge(name) or name
293 m = munge and munge(name) or name
294 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
294 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
295 old = self.tags().get(name, nullid)
295 old = self.tags().get(name, nullid)
296 fp.write('%s %s\n' % (hex(old), m))
296 fp.write('%s %s\n' % (hex(old), m))
297 fp.write('%s %s\n' % (hex(node), m))
297 fp.write('%s %s\n' % (hex(node), m))
298 fp.close()
298 fp.close()
299
299
300 prevtags = ''
300 prevtags = ''
301 if local:
301 if local:
302 try:
302 try:
303 fp = self.opener('localtags', 'r+')
303 fp = self.opener('localtags', 'r+')
304 except IOError:
304 except IOError:
305 fp = self.opener('localtags', 'a')
305 fp = self.opener('localtags', 'a')
306 else:
306 else:
307 prevtags = fp.read()
307 prevtags = fp.read()
308
308
309 # local tags are stored in the current charset
309 # local tags are stored in the current charset
310 writetags(fp, names, None, prevtags)
310 writetags(fp, names, None, prevtags)
311 for name in names:
311 for name in names:
312 self.hook('tag', node=hex(node), tag=name, local=local)
312 self.hook('tag', node=hex(node), tag=name, local=local)
313 return
313 return
314
314
315 try:
315 try:
316 fp = self.wfile('.hgtags', 'rb+')
316 fp = self.wfile('.hgtags', 'rb+')
317 except IOError, e:
317 except IOError, e:
318 if e.errno != errno.ENOENT:
318 if e.errno != errno.ENOENT:
319 raise
319 raise
320 fp = self.wfile('.hgtags', 'ab')
320 fp = self.wfile('.hgtags', 'ab')
321 else:
321 else:
322 prevtags = fp.read()
322 prevtags = fp.read()
323
323
324 # committed tags are stored in UTF-8
324 # committed tags are stored in UTF-8
325 writetags(fp, names, encoding.fromlocal, prevtags)
325 writetags(fp, names, encoding.fromlocal, prevtags)
326
326
327 fp.close()
327 fp.close()
328
328
329 self.invalidatecaches()
329 self.invalidatecaches()
330
330
331 if '.hgtags' not in self.dirstate:
331 if '.hgtags' not in self.dirstate:
332 self[None].add(['.hgtags'])
332 self[None].add(['.hgtags'])
333
333
334 m = matchmod.exact(self.root, '', ['.hgtags'])
334 m = matchmod.exact(self.root, '', ['.hgtags'])
335 tagnode = self.commit(message, user, date, extra=extra, match=m)
335 tagnode = self.commit(message, user, date, extra=extra, match=m)
336
336
337 for name in names:
337 for name in names:
338 self.hook('tag', node=hex(node), tag=name, local=local)
338 self.hook('tag', node=hex(node), tag=name, local=local)
339
339
340 return tagnode
340 return tagnode
341
341
342 def tag(self, names, node, message, local, user, date):
342 def tag(self, names, node, message, local, user, date):
343 '''tag a revision with one or more symbolic names.
343 '''tag a revision with one or more symbolic names.
344
344
345 names is a list of strings or, when adding a single tag, names may be a
345 names is a list of strings or, when adding a single tag, names may be a
346 string.
346 string.
347
347
348 if local is True, the tags are stored in a per-repository file.
348 if local is True, the tags are stored in a per-repository file.
349 otherwise, they are stored in the .hgtags file, and a new
349 otherwise, they are stored in the .hgtags file, and a new
350 changeset is committed with the change.
350 changeset is committed with the change.
351
351
352 keyword arguments:
352 keyword arguments:
353
353
354 local: whether to store tags in non-version-controlled file
354 local: whether to store tags in non-version-controlled file
355 (default False)
355 (default False)
356
356
357 message: commit message to use if committing
357 message: commit message to use if committing
358
358
359 user: name of user to use if committing
359 user: name of user to use if committing
360
360
361 date: date tuple to use if committing'''
361 date: date tuple to use if committing'''
362
362
363 if not local:
363 if not local:
364 for x in self.status()[:5]:
364 for x in self.status()[:5]:
365 if '.hgtags' in x:
365 if '.hgtags' in x:
366 raise util.Abort(_('working copy of .hgtags is changed '
366 raise util.Abort(_('working copy of .hgtags is changed '
367 '(please commit .hgtags manually)'))
367 '(please commit .hgtags manually)'))
368
368
369 self.tags() # instantiate the cache
369 self.tags() # instantiate the cache
370 self._tag(names, node, message, local, user, date)
370 self._tag(names, node, message, local, user, date)
371
371
372 @propertycache
372 @propertycache
373 def _tagscache(self):
373 def _tagscache(self):
374 '''Returns a tagscache object that contains various tags related caches.'''
374 '''Returns a tagscache object that contains various tags related caches.'''
375
375
376 # This simplifies its cache management by having one decorated
376 # This simplifies its cache management by having one decorated
377 # function (this one) and the rest simply fetch things from it.
377 # function (this one) and the rest simply fetch things from it.
378 class tagscache(object):
378 class tagscache(object):
379 def __init__(self):
379 def __init__(self):
380 # These two define the set of tags for this repository. tags
380 # These two define the set of tags for this repository. tags
381 # maps tag name to node; tagtypes maps tag name to 'global' or
381 # maps tag name to node; tagtypes maps tag name to 'global' or
382 # 'local'. (Global tags are defined by .hgtags across all
382 # 'local'. (Global tags are defined by .hgtags across all
383 # heads, and local tags are defined in .hg/localtags.)
383 # heads, and local tags are defined in .hg/localtags.)
384 # They constitute the in-memory cache of tags.
384 # They constitute the in-memory cache of tags.
385 self.tags = self.tagtypes = None
385 self.tags = self.tagtypes = None
386
386
387 self.nodetagscache = self.tagslist = None
387 self.nodetagscache = self.tagslist = None
388
388
389 cache = tagscache()
389 cache = tagscache()
390 cache.tags, cache.tagtypes = self._findtags()
390 cache.tags, cache.tagtypes = self._findtags()
391
391
392 return cache
392 return cache
393
393
394 def tags(self):
394 def tags(self):
395 '''return a mapping of tag to node'''
395 '''return a mapping of tag to node'''
396 return self._tagscache.tags
396 return self._tagscache.tags
397
397
398 def _findtags(self):
398 def _findtags(self):
399 '''Do the hard work of finding tags. Return a pair of dicts
399 '''Do the hard work of finding tags. Return a pair of dicts
400 (tags, tagtypes) where tags maps tag name to node, and tagtypes
400 (tags, tagtypes) where tags maps tag name to node, and tagtypes
401 maps tag name to a string like \'global\' or \'local\'.
401 maps tag name to a string like \'global\' or \'local\'.
402 Subclasses or extensions are free to add their own tags, but
402 Subclasses or extensions are free to add their own tags, but
403 should be aware that the returned dicts will be retained for the
403 should be aware that the returned dicts will be retained for the
404 duration of the localrepo object.'''
404 duration of the localrepo object.'''
405
405
406 # XXX what tagtype should subclasses/extensions use? Currently
406 # XXX what tagtype should subclasses/extensions use? Currently
407 # mq and bookmarks add tags, but do not set the tagtype at all.
407 # mq and bookmarks add tags, but do not set the tagtype at all.
408 # Should each extension invent its own tag type? Should there
408 # Should each extension invent its own tag type? Should there
409 # be one tagtype for all such "virtual" tags? Or is the status
409 # be one tagtype for all such "virtual" tags? Or is the status
410 # quo fine?
410 # quo fine?
411
411
412 alltags = {} # map tag name to (node, hist)
412 alltags = {} # map tag name to (node, hist)
413 tagtypes = {}
413 tagtypes = {}
414
414
415 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
415 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
416 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
416 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
417
417
418 # Build the return dicts. Have to re-encode tag names because
418 # Build the return dicts. Have to re-encode tag names because
419 # the tags module always uses UTF-8 (in order not to lose info
419 # the tags module always uses UTF-8 (in order not to lose info
420 # writing to the cache), but the rest of Mercurial wants them in
420 # writing to the cache), but the rest of Mercurial wants them in
421 # local encoding.
421 # local encoding.
422 tags = {}
422 tags = {}
423 for (name, (node, hist)) in alltags.iteritems():
423 for (name, (node, hist)) in alltags.iteritems():
424 if node != nullid:
424 if node != nullid:
425 try:
425 try:
426 # ignore tags to unknown nodes
426 # ignore tags to unknown nodes
427 self.changelog.lookup(node)
427 self.changelog.lookup(node)
428 tags[encoding.tolocal(name)] = node
428 tags[encoding.tolocal(name)] = node
429 except error.LookupError:
429 except error.LookupError:
430 pass
430 pass
431 tags['tip'] = self.changelog.tip()
431 tags['tip'] = self.changelog.tip()
432 tagtypes = dict([(encoding.tolocal(name), value)
432 tagtypes = dict([(encoding.tolocal(name), value)
433 for (name, value) in tagtypes.iteritems()])
433 for (name, value) in tagtypes.iteritems()])
434 return (tags, tagtypes)
434 return (tags, tagtypes)
435
435
436 def tagtype(self, tagname):
436 def tagtype(self, tagname):
437 '''
437 '''
438 return the type of the given tag. result can be:
438 return the type of the given tag. result can be:
439
439
440 'local' : a local tag
440 'local' : a local tag
441 'global' : a global tag
441 'global' : a global tag
442 None : tag does not exist
442 None : tag does not exist
443 '''
443 '''
444
444
445 return self._tagscache.tagtypes.get(tagname)
445 return self._tagscache.tagtypes.get(tagname)
446
446
447 def tagslist(self):
447 def tagslist(self):
448 '''return a list of tags ordered by revision'''
448 '''return a list of tags ordered by revision'''
449 if not self._tagscache.tagslist:
449 if not self._tagscache.tagslist:
450 l = []
450 l = []
451 for t, n in self.tags().iteritems():
451 for t, n in self.tags().iteritems():
452 r = self.changelog.rev(n)
452 r = self.changelog.rev(n)
453 l.append((r, t, n))
453 l.append((r, t, n))
454 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
454 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
455
455
456 return self._tagscache.tagslist
456 return self._tagscache.tagslist
457
457
458 def nodetags(self, node):
458 def nodetags(self, node):
459 '''return the tags associated with a node'''
459 '''return the tags associated with a node'''
460 if not self._tagscache.nodetagscache:
460 if not self._tagscache.nodetagscache:
461 nodetagscache = {}
461 nodetagscache = {}
462 for t, n in self.tags().iteritems():
462 for t, n in self.tags().iteritems():
463 nodetagscache.setdefault(n, []).append(t)
463 nodetagscache.setdefault(n, []).append(t)
464 for tags in nodetagscache.itervalues():
464 for tags in nodetagscache.itervalues():
465 tags.sort()
465 tags.sort()
466 self._tagscache.nodetagscache = nodetagscache
466 self._tagscache.nodetagscache = nodetagscache
467 return self._tagscache.nodetagscache.get(node, [])
467 return self._tagscache.nodetagscache.get(node, [])
468
468
469 def nodebookmarks(self, node):
469 def nodebookmarks(self, node):
470 marks = []
470 marks = []
471 for bookmark, n in self._bookmarks.iteritems():
471 for bookmark, n in self._bookmarks.iteritems():
472 if n == node:
472 if n == node:
473 marks.append(bookmark)
473 marks.append(bookmark)
474 return sorted(marks)
474 return sorted(marks)
475
475
476 def _branchtags(self, partial, lrev):
476 def _branchtags(self, partial, lrev):
477 # TODO: rename this function?
477 # TODO: rename this function?
478 tiprev = len(self) - 1
478 tiprev = len(self) - 1
479 if lrev != tiprev:
479 if lrev != tiprev:
480 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
480 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
481 self._updatebranchcache(partial, ctxgen)
481 self._updatebranchcache(partial, ctxgen)
482 self._writebranchcache(partial, self.changelog.tip(), tiprev)
482 self._writebranchcache(partial, self.changelog.tip(), tiprev)
483
483
484 return partial
484 return partial
485
485
486 def updatebranchcache(self):
486 def updatebranchcache(self):
487 tip = self.changelog.tip()
487 tip = self.changelog.tip()
488 if self._branchcache is not None and self._branchcachetip == tip:
488 if self._branchcache is not None and self._branchcachetip == tip:
489 return
489 return
490
490
491 oldtip = self._branchcachetip
491 oldtip = self._branchcachetip
492 self._branchcachetip = tip
492 self._branchcachetip = tip
493 if oldtip is None or oldtip not in self.changelog.nodemap:
493 if oldtip is None or oldtip not in self.changelog.nodemap:
494 partial, last, lrev = self._readbranchcache()
494 partial, last, lrev = self._readbranchcache()
495 else:
495 else:
496 lrev = self.changelog.rev(oldtip)
496 lrev = self.changelog.rev(oldtip)
497 partial = self._branchcache
497 partial = self._branchcache
498
498
499 self._branchtags(partial, lrev)
499 self._branchtags(partial, lrev)
500 # this private cache holds all heads (not just tips)
500 # this private cache holds all heads (not just tips)
501 self._branchcache = partial
501 self._branchcache = partial
502
502
503 def branchmap(self):
503 def branchmap(self):
504 '''returns a dictionary {branch: [branchheads]}'''
504 '''returns a dictionary {branch: [branchheads]}'''
505 self.updatebranchcache()
505 self.updatebranchcache()
506 return self._branchcache
506 return self._branchcache
507
507
508 def branchtags(self):
508 def branchtags(self):
509 '''return a dict where branch names map to the tipmost head of
509 '''return a dict where branch names map to the tipmost head of
510 the branch, open heads come before closed'''
510 the branch, open heads come before closed'''
511 bt = {}
511 bt = {}
512 for bn, heads in self.branchmap().iteritems():
512 for bn, heads in self.branchmap().iteritems():
513 tip = heads[-1]
513 tip = heads[-1]
514 for h in reversed(heads):
514 for h in reversed(heads):
515 if 'close' not in self.changelog.read(h)[5]:
515 if 'close' not in self.changelog.read(h)[5]:
516 tip = h
516 tip = h
517 break
517 break
518 bt[bn] = tip
518 bt[bn] = tip
519 return bt
519 return bt
520
520
521 def _readbranchcache(self):
521 def _readbranchcache(self):
522 partial = {}
522 partial = {}
523 try:
523 try:
524 f = self.opener("cache/branchheads")
524 f = self.opener("cache/branchheads")
525 lines = f.read().split('\n')
525 lines = f.read().split('\n')
526 f.close()
526 f.close()
527 except (IOError, OSError):
527 except (IOError, OSError):
528 return {}, nullid, nullrev
528 return {}, nullid, nullrev
529
529
530 try:
530 try:
531 last, lrev = lines.pop(0).split(" ", 1)
531 last, lrev = lines.pop(0).split(" ", 1)
532 last, lrev = bin(last), int(lrev)
532 last, lrev = bin(last), int(lrev)
533 if lrev >= len(self) or self[lrev].node() != last:
533 if lrev >= len(self) or self[lrev].node() != last:
534 # invalidate the cache
534 # invalidate the cache
535 raise ValueError('invalidating branch cache (tip differs)')
535 raise ValueError('invalidating branch cache (tip differs)')
536 for l in lines:
536 for l in lines:
537 if not l:
537 if not l:
538 continue
538 continue
539 node, label = l.split(" ", 1)
539 node, label = l.split(" ", 1)
540 label = encoding.tolocal(label.strip())
540 label = encoding.tolocal(label.strip())
541 partial.setdefault(label, []).append(bin(node))
541 partial.setdefault(label, []).append(bin(node))
542 except KeyboardInterrupt:
542 except KeyboardInterrupt:
543 raise
543 raise
544 except Exception, inst:
544 except Exception, inst:
545 if self.ui.debugflag:
545 if self.ui.debugflag:
546 self.ui.warn(str(inst), '\n')
546 self.ui.warn(str(inst), '\n')
547 partial, last, lrev = {}, nullid, nullrev
547 partial, last, lrev = {}, nullid, nullrev
548 return partial, last, lrev
548 return partial, last, lrev
549
549
550 def _writebranchcache(self, branches, tip, tiprev):
550 def _writebranchcache(self, branches, tip, tiprev):
551 try:
551 try:
552 f = self.opener("cache/branchheads", "w", atomictemp=True)
552 f = self.opener("cache/branchheads", "w", atomictemp=True)
553 f.write("%s %s\n" % (hex(tip), tiprev))
553 f.write("%s %s\n" % (hex(tip), tiprev))
554 for label, nodes in branches.iteritems():
554 for label, nodes in branches.iteritems():
555 for node in nodes:
555 for node in nodes:
556 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
556 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
557 f.close()
557 f.close()
558 except (IOError, OSError):
558 except (IOError, OSError):
559 pass
559 pass
560
560
561 def _updatebranchcache(self, partial, ctxgen):
561 def _updatebranchcache(self, partial, ctxgen):
562 # collect new branch entries
562 # collect new branch entries
563 newbranches = {}
563 newbranches = {}
564 for c in ctxgen:
564 for c in ctxgen:
565 newbranches.setdefault(c.branch(), []).append(c.node())
565 newbranches.setdefault(c.branch(), []).append(c.node())
566 # if older branchheads are reachable from new ones, they aren't
566 # if older branchheads are reachable from new ones, they aren't
567 # really branchheads. Note checking parents is insufficient:
567 # really branchheads. Note checking parents is insufficient:
568 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
568 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
569 for branch, newnodes in newbranches.iteritems():
569 for branch, newnodes in newbranches.iteritems():
570 bheads = partial.setdefault(branch, [])
570 bheads = partial.setdefault(branch, [])
571 bheads.extend(newnodes)
571 bheads.extend(newnodes)
572 if len(bheads) <= 1:
572 if len(bheads) <= 1:
573 continue
573 continue
574 bheads = sorted(bheads, key=lambda x: self[x].rev())
574 bheads = sorted(bheads, key=lambda x: self[x].rev())
575 # starting from tip means fewer passes over reachable
575 # starting from tip means fewer passes over reachable
576 while newnodes:
576 while newnodes:
577 latest = newnodes.pop()
577 latest = newnodes.pop()
578 if latest not in bheads:
578 if latest not in bheads:
579 continue
579 continue
580 minbhrev = self[bheads[0]].node()
580 minbhrev = self[bheads[0]].node()
581 reachable = self.changelog.reachable(latest, minbhrev)
581 reachable = self.changelog.reachable(latest, minbhrev)
582 reachable.remove(latest)
582 reachable.remove(latest)
583 if reachable:
583 if reachable:
584 bheads = [b for b in bheads if b not in reachable]
584 bheads = [b for b in bheads if b not in reachable]
585 partial[branch] = bheads
585 partial[branch] = bheads
586
586
587 def lookup(self, key):
587 def lookup(self, key):
588 if isinstance(key, int):
588 if isinstance(key, int):
589 return self.changelog.node(key)
589 return self.changelog.node(key)
590 elif key == '.':
590 elif key == '.':
591 return self.dirstate.p1()
591 return self.dirstate.p1()
592 elif key == 'null':
592 elif key == 'null':
593 return nullid
593 return nullid
594 elif key == 'tip':
594 elif key == 'tip':
595 return self.changelog.tip()
595 return self.changelog.tip()
596 n = self.changelog._match(key)
596 n = self.changelog._match(key)
597 if n:
597 if n:
598 return n
598 return n
599 if key in self._bookmarks:
599 if key in self._bookmarks:
600 return self._bookmarks[key]
600 return self._bookmarks[key]
601 if key in self.tags():
601 if key in self.tags():
602 return self.tags()[key]
602 return self.tags()[key]
603 if key in self.branchtags():
603 if key in self.branchtags():
604 return self.branchtags()[key]
604 return self.branchtags()[key]
605 n = self.changelog._partialmatch(key)
605 n = self.changelog._partialmatch(key)
606 if n:
606 if n:
607 return n
607 return n
608
608
609 # can't find key, check if it might have come from damaged dirstate
609 # can't find key, check if it might have come from damaged dirstate
610 if key in self.dirstate.parents():
610 if key in self.dirstate.parents():
611 raise error.Abort(_("working directory has unknown parent '%s'!")
611 raise error.Abort(_("working directory has unknown parent '%s'!")
612 % short(key))
612 % short(key))
613 try:
613 try:
614 if len(key) == 20:
614 if len(key) == 20:
615 key = hex(key)
615 key = hex(key)
616 except TypeError:
616 except TypeError:
617 pass
617 pass
618 raise error.RepoLookupError(_("unknown revision '%s'") % key)
618 raise error.RepoLookupError(_("unknown revision '%s'") % key)
619
619
620 def lookupbranch(self, key, remote=None):
620 def lookupbranch(self, key, remote=None):
621 repo = remote or self
621 repo = remote or self
622 if key in repo.branchmap():
622 if key in repo.branchmap():
623 return key
623 return key
624
624
625 repo = (remote and remote.local()) and remote or self
625 repo = (remote and remote.local()) and remote or self
626 return repo[key].branch()
626 return repo[key].branch()
627
627
628 def known(self, nodes):
628 def known(self, nodes):
629 nm = self.changelog.nodemap
629 nm = self.changelog.nodemap
630 result = []
630 result = []
631 for n in nodes:
631 for n in nodes:
632 r = nm.get(n)
632 r = nm.get(n)
633 resp = not (r is None or self._phaserev[r] >= phases.secret)
633 resp = not (r is None or self._phaserev[r] >= phases.secret)
634 result.append(resp)
634 result.append(resp)
635 return result
635 return result
636
636
637 def local(self):
637 def local(self):
638 return self
638 return self
639
639
640 def cancopy(self):
640 def cancopy(self):
641 return (repo.repository.cancopy(self)
641 return (repo.repository.cancopy(self)
642 and not self._phaseroots[phases.secret])
642 and not self._phaseroots[phases.secret])
643
643
644 def join(self, f):
644 def join(self, f):
645 return os.path.join(self.path, f)
645 return os.path.join(self.path, f)
646
646
647 def wjoin(self, f):
647 def wjoin(self, f):
648 return os.path.join(self.root, f)
648 return os.path.join(self.root, f)
649
649
650 def file(self, f):
650 def file(self, f):
651 if f[0] == '/':
651 if f[0] == '/':
652 f = f[1:]
652 f = f[1:]
653 return filelog.filelog(self.sopener, f)
653 return filelog.filelog(self.sopener, f)
654
654
655 def changectx(self, changeid):
655 def changectx(self, changeid):
656 return self[changeid]
656 return self[changeid]
657
657
658 def parents(self, changeid=None):
658 def parents(self, changeid=None):
659 '''get list of changectxs for parents of changeid'''
659 '''get list of changectxs for parents of changeid'''
660 return self[changeid].parents()
660 return self[changeid].parents()
661
661
662 def filectx(self, path, changeid=None, fileid=None):
662 def filectx(self, path, changeid=None, fileid=None):
663 """changeid can be a changeset revision, node, or tag.
663 """changeid can be a changeset revision, node, or tag.
664 fileid can be a file revision or node."""
664 fileid can be a file revision or node."""
665 return context.filectx(self, path, changeid, fileid)
665 return context.filectx(self, path, changeid, fileid)
666
666
667 def getcwd(self):
667 def getcwd(self):
668 return self.dirstate.getcwd()
668 return self.dirstate.getcwd()
669
669
670 def pathto(self, f, cwd=None):
670 def pathto(self, f, cwd=None):
671 return self.dirstate.pathto(f, cwd)
671 return self.dirstate.pathto(f, cwd)
672
672
673 def wfile(self, f, mode='r'):
673 def wfile(self, f, mode='r'):
674 return self.wopener(f, mode)
674 return self.wopener(f, mode)
675
675
676 def _link(self, f):
676 def _link(self, f):
677 return os.path.islink(self.wjoin(f))
677 return os.path.islink(self.wjoin(f))
678
678
679 def _loadfilter(self, filter):
679 def _loadfilter(self, filter):
680 if filter not in self.filterpats:
680 if filter not in self.filterpats:
681 l = []
681 l = []
682 for pat, cmd in self.ui.configitems(filter):
682 for pat, cmd in self.ui.configitems(filter):
683 if cmd == '!':
683 if cmd == '!':
684 continue
684 continue
685 mf = matchmod.match(self.root, '', [pat])
685 mf = matchmod.match(self.root, '', [pat])
686 fn = None
686 fn = None
687 params = cmd
687 params = cmd
688 for name, filterfn in self._datafilters.iteritems():
688 for name, filterfn in self._datafilters.iteritems():
689 if cmd.startswith(name):
689 if cmd.startswith(name):
690 fn = filterfn
690 fn = filterfn
691 params = cmd[len(name):].lstrip()
691 params = cmd[len(name):].lstrip()
692 break
692 break
693 if not fn:
693 if not fn:
694 fn = lambda s, c, **kwargs: util.filter(s, c)
694 fn = lambda s, c, **kwargs: util.filter(s, c)
695 # Wrap old filters not supporting keyword arguments
695 # Wrap old filters not supporting keyword arguments
696 if not inspect.getargspec(fn)[2]:
696 if not inspect.getargspec(fn)[2]:
697 oldfn = fn
697 oldfn = fn
698 fn = lambda s, c, **kwargs: oldfn(s, c)
698 fn = lambda s, c, **kwargs: oldfn(s, c)
699 l.append((mf, fn, params))
699 l.append((mf, fn, params))
700 self.filterpats[filter] = l
700 self.filterpats[filter] = l
701 return self.filterpats[filter]
701 return self.filterpats[filter]
702
702
703 def _filter(self, filterpats, filename, data):
703 def _filter(self, filterpats, filename, data):
704 for mf, fn, cmd in filterpats:
704 for mf, fn, cmd in filterpats:
705 if mf(filename):
705 if mf(filename):
706 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
706 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
707 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
707 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
708 break
708 break
709
709
710 return data
710 return data
711
711
712 @propertycache
712 @propertycache
713 def _encodefilterpats(self):
713 def _encodefilterpats(self):
714 return self._loadfilter('encode')
714 return self._loadfilter('encode')
715
715
716 @propertycache
716 @propertycache
717 def _decodefilterpats(self):
717 def _decodefilterpats(self):
718 return self._loadfilter('decode')
718 return self._loadfilter('decode')
719
719
720 def adddatafilter(self, name, filter):
720 def adddatafilter(self, name, filter):
721 self._datafilters[name] = filter
721 self._datafilters[name] = filter
722
722
723 def wread(self, filename):
723 def wread(self, filename):
724 if self._link(filename):
724 if self._link(filename):
725 data = os.readlink(self.wjoin(filename))
725 data = os.readlink(self.wjoin(filename))
726 else:
726 else:
727 data = self.wopener.read(filename)
727 data = self.wopener.read(filename)
728 return self._filter(self._encodefilterpats, filename, data)
728 return self._filter(self._encodefilterpats, filename, data)
729
729
730 def wwrite(self, filename, data, flags):
730 def wwrite(self, filename, data, flags):
731 data = self._filter(self._decodefilterpats, filename, data)
731 data = self._filter(self._decodefilterpats, filename, data)
732 if 'l' in flags:
732 if 'l' in flags:
733 self.wopener.symlink(data, filename)
733 self.wopener.symlink(data, filename)
734 else:
734 else:
735 self.wopener.write(filename, data)
735 self.wopener.write(filename, data)
736 if 'x' in flags:
736 if 'x' in flags:
737 util.setflags(self.wjoin(filename), False, True)
737 util.setflags(self.wjoin(filename), False, True)
738
738
739 def wwritedata(self, filename, data):
739 def wwritedata(self, filename, data):
740 return self._filter(self._decodefilterpats, filename, data)
740 return self._filter(self._decodefilterpats, filename, data)
741
741
742 def transaction(self, desc):
742 def transaction(self, desc):
743 tr = self._transref and self._transref() or None
743 tr = self._transref and self._transref() or None
744 if tr and tr.running():
744 if tr and tr.running():
745 return tr.nest()
745 return tr.nest()
746
746
747 # abort here if the journal already exists
747 # abort here if the journal already exists
748 if os.path.exists(self.sjoin("journal")):
748 if os.path.exists(self.sjoin("journal")):
749 raise error.RepoError(
749 raise error.RepoError(
750 _("abandoned transaction found - run hg recover"))
750 _("abandoned transaction found - run hg recover"))
751
751
752 journalfiles = self._writejournal(desc)
752 journalfiles = self._writejournal(desc)
753 renames = [(x, undoname(x)) for x in journalfiles]
753 renames = [(x, undoname(x)) for x in journalfiles]
754
754
755 tr = transaction.transaction(self.ui.warn, self.sopener,
755 tr = transaction.transaction(self.ui.warn, self.sopener,
756 self.sjoin("journal"),
756 self.sjoin("journal"),
757 aftertrans(renames),
757 aftertrans(renames),
758 self.store.createmode)
758 self.store.createmode)
759 self._transref = weakref.ref(tr)
759 self._transref = weakref.ref(tr)
760 return tr
760 return tr
761
761
762 def _writejournal(self, desc):
762 def _writejournal(self, desc):
763 # save dirstate for rollback
763 # save dirstate for rollback
764 try:
764 try:
765 ds = self.opener.read("dirstate")
765 ds = self.opener.read("dirstate")
766 except IOError:
766 except IOError:
767 ds = ""
767 ds = ""
768 self.opener.write("journal.dirstate", ds)
768 self.opener.write("journal.dirstate", ds)
769 self.opener.write("journal.branch",
769 self.opener.write("journal.branch",
770 encoding.fromlocal(self.dirstate.branch()))
770 encoding.fromlocal(self.dirstate.branch()))
771 self.opener.write("journal.desc",
771 self.opener.write("journal.desc",
772 "%d\n%s\n" % (len(self), desc))
772 "%d\n%s\n" % (len(self), desc))
773
773
774 bkname = self.join('bookmarks')
774 bkname = self.join('bookmarks')
775 if os.path.exists(bkname):
775 if os.path.exists(bkname):
776 util.copyfile(bkname, self.join('journal.bookmarks'))
776 util.copyfile(bkname, self.join('journal.bookmarks'))
777 else:
777 else:
778 self.opener.write('journal.bookmarks', '')
778 self.opener.write('journal.bookmarks', '')
779 phasesname = self.sjoin('phaseroots')
779 phasesname = self.sjoin('phaseroots')
780 if os.path.exists(phasesname):
780 if os.path.exists(phasesname):
781 util.copyfile(phasesname, self.sjoin('journal.phaseroots'))
781 util.copyfile(phasesname, self.sjoin('journal.phaseroots'))
782 else:
782 else:
783 self.sopener.write('journal.phaseroots', '')
783 self.sopener.write('journal.phaseroots', '')
784
784
785 return (self.sjoin('journal'), self.join('journal.dirstate'),
785 return (self.sjoin('journal'), self.join('journal.dirstate'),
786 self.join('journal.branch'), self.join('journal.desc'),
786 self.join('journal.branch'), self.join('journal.desc'),
787 self.join('journal.bookmarks'),
787 self.join('journal.bookmarks'),
788 self.sjoin('journal.phaseroots'))
788 self.sjoin('journal.phaseroots'))
789
789
790 def recover(self):
790 def recover(self):
791 lock = self.lock()
791 lock = self.lock()
792 try:
792 try:
793 if os.path.exists(self.sjoin("journal")):
793 if os.path.exists(self.sjoin("journal")):
794 self.ui.status(_("rolling back interrupted transaction\n"))
794 self.ui.status(_("rolling back interrupted transaction\n"))
795 transaction.rollback(self.sopener, self.sjoin("journal"),
795 transaction.rollback(self.sopener, self.sjoin("journal"),
796 self.ui.warn)
796 self.ui.warn)
797 self.invalidate()
797 self.invalidate()
798 return True
798 return True
799 else:
799 else:
800 self.ui.warn(_("no interrupted transaction available\n"))
800 self.ui.warn(_("no interrupted transaction available\n"))
801 return False
801 return False
802 finally:
802 finally:
803 lock.release()
803 lock.release()
804
804
805 def rollback(self, dryrun=False, force=False):
805 def rollback(self, dryrun=False, force=False):
806 wlock = lock = None
806 wlock = lock = None
807 try:
807 try:
808 wlock = self.wlock()
808 wlock = self.wlock()
809 lock = self.lock()
809 lock = self.lock()
810 if os.path.exists(self.sjoin("undo")):
810 if os.path.exists(self.sjoin("undo")):
811 return self._rollback(dryrun, force)
811 return self._rollback(dryrun, force)
812 else:
812 else:
813 self.ui.warn(_("no rollback information available\n"))
813 self.ui.warn(_("no rollback information available\n"))
814 return 1
814 return 1
815 finally:
815 finally:
816 release(lock, wlock)
816 release(lock, wlock)
817
817
818 def _rollback(self, dryrun, force):
818 def _rollback(self, dryrun, force):
819 ui = self.ui
819 ui = self.ui
820 try:
820 try:
821 args = self.opener.read('undo.desc').splitlines()
821 args = self.opener.read('undo.desc').splitlines()
822 (oldlen, desc, detail) = (int(args[0]), args[1], None)
822 (oldlen, desc, detail) = (int(args[0]), args[1], None)
823 if len(args) >= 3:
823 if len(args) >= 3:
824 detail = args[2]
824 detail = args[2]
825 oldtip = oldlen - 1
825 oldtip = oldlen - 1
826
826
827 if detail and ui.verbose:
827 if detail and ui.verbose:
828 msg = (_('repository tip rolled back to revision %s'
828 msg = (_('repository tip rolled back to revision %s'
829 ' (undo %s: %s)\n')
829 ' (undo %s: %s)\n')
830 % (oldtip, desc, detail))
830 % (oldtip, desc, detail))
831 else:
831 else:
832 msg = (_('repository tip rolled back to revision %s'
832 msg = (_('repository tip rolled back to revision %s'
833 ' (undo %s)\n')
833 ' (undo %s)\n')
834 % (oldtip, desc))
834 % (oldtip, desc))
835 except IOError:
835 except IOError:
836 msg = _('rolling back unknown transaction\n')
836 msg = _('rolling back unknown transaction\n')
837 desc = None
837 desc = None
838
838
839 if not force and self['.'] != self['tip'] and desc == 'commit':
839 if not force and self['.'] != self['tip'] and desc == 'commit':
840 raise util.Abort(
840 raise util.Abort(
841 _('rollback of last commit while not checked out '
841 _('rollback of last commit while not checked out '
842 'may lose data'), hint=_('use -f to force'))
842 'may lose data'), hint=_('use -f to force'))
843
843
844 ui.status(msg)
844 ui.status(msg)
845 if dryrun:
845 if dryrun:
846 return 0
846 return 0
847
847
848 parents = self.dirstate.parents()
848 parents = self.dirstate.parents()
849 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
849 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
850 if os.path.exists(self.join('undo.bookmarks')):
850 if os.path.exists(self.join('undo.bookmarks')):
851 util.rename(self.join('undo.bookmarks'),
851 util.rename(self.join('undo.bookmarks'),
852 self.join('bookmarks'))
852 self.join('bookmarks'))
853 if os.path.exists(self.sjoin('undo.phaseroots')):
853 if os.path.exists(self.sjoin('undo.phaseroots')):
854 util.rename(self.sjoin('undo.phaseroots'),
854 util.rename(self.sjoin('undo.phaseroots'),
855 self.sjoin('phaseroots'))
855 self.sjoin('phaseroots'))
856 self.invalidate()
856 self.invalidate()
857
857
858 parentgone = (parents[0] not in self.changelog.nodemap or
858 parentgone = (parents[0] not in self.changelog.nodemap or
859 parents[1] not in self.changelog.nodemap)
859 parents[1] not in self.changelog.nodemap)
860 if parentgone:
860 if parentgone:
861 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
861 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
862 try:
862 try:
863 branch = self.opener.read('undo.branch')
863 branch = self.opener.read('undo.branch')
864 self.dirstate.setbranch(branch)
864 self.dirstate.setbranch(branch)
865 except IOError:
865 except IOError:
866 ui.warn(_('named branch could not be reset: '
866 ui.warn(_('named branch could not be reset: '
867 'current branch is still \'%s\'\n')
867 'current branch is still \'%s\'\n')
868 % self.dirstate.branch())
868 % self.dirstate.branch())
869
869
870 self.dirstate.invalidate()
870 self.dirstate.invalidate()
871 parents = tuple([p.rev() for p in self.parents()])
871 parents = tuple([p.rev() for p in self.parents()])
872 if len(parents) > 1:
872 if len(parents) > 1:
873 ui.status(_('working directory now based on '
873 ui.status(_('working directory now based on '
874 'revisions %d and %d\n') % parents)
874 'revisions %d and %d\n') % parents)
875 else:
875 else:
876 ui.status(_('working directory now based on '
876 ui.status(_('working directory now based on '
877 'revision %d\n') % parents)
877 'revision %d\n') % parents)
878 self.destroyed()
878 self.destroyed()
879 return 0
879 return 0
880
880
881 def invalidatecaches(self):
881 def invalidatecaches(self):
882 try:
882 try:
883 delattr(self, '_tagscache')
883 delattr(self, '_tagscache')
884 except AttributeError:
884 except AttributeError:
885 pass
885 pass
886
886
887 self._branchcache = None # in UTF-8
887 self._branchcache = None # in UTF-8
888 self._branchcachetip = None
888 self._branchcachetip = None
889
889
890 def invalidatedirstate(self):
890 def invalidatedirstate(self):
891 '''Invalidates the dirstate, causing the next call to dirstate
891 '''Invalidates the dirstate, causing the next call to dirstate
892 to check if it was modified since the last time it was read,
892 to check if it was modified since the last time it was read,
893 rereading it if it has.
893 rereading it if it has.
894
894
895 This is different to dirstate.invalidate() that it doesn't always
895 This is different to dirstate.invalidate() that it doesn't always
896 rereads the dirstate. Use dirstate.invalidate() if you want to
896 rereads the dirstate. Use dirstate.invalidate() if you want to
897 explicitly read the dirstate again (i.e. restoring it to a previous
897 explicitly read the dirstate again (i.e. restoring it to a previous
898 known good state).'''
898 known good state).'''
899 try:
899 try:
900 delattr(self, 'dirstate')
900 delattr(self, 'dirstate')
901 except AttributeError:
901 except AttributeError:
902 pass
902 pass
903
903
904 def invalidate(self):
904 def invalidate(self):
905 for k in self._filecache:
905 for k in self._filecache:
906 # dirstate is invalidated separately in invalidatedirstate()
906 # dirstate is invalidated separately in invalidatedirstate()
907 if k == 'dirstate':
907 if k == 'dirstate':
908 continue
908 continue
909
909
910 try:
910 try:
911 delattr(self, k)
911 delattr(self, k)
912 except AttributeError:
912 except AttributeError:
913 pass
913 pass
914 self.invalidatecaches()
914 self.invalidatecaches()
915
915
916 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
916 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
917 try:
917 try:
918 l = lock.lock(lockname, 0, releasefn, desc=desc)
918 l = lock.lock(lockname, 0, releasefn, desc=desc)
919 except error.LockHeld, inst:
919 except error.LockHeld, inst:
920 if not wait:
920 if not wait:
921 raise
921 raise
922 self.ui.warn(_("waiting for lock on %s held by %r\n") %
922 self.ui.warn(_("waiting for lock on %s held by %r\n") %
923 (desc, inst.locker))
923 (desc, inst.locker))
924 # default to 600 seconds timeout
924 # default to 600 seconds timeout
925 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
925 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
926 releasefn, desc=desc)
926 releasefn, desc=desc)
927 if acquirefn:
927 if acquirefn:
928 acquirefn()
928 acquirefn()
929 return l
929 return l
930
930
931 def _afterlock(self, callback):
931 def _afterlock(self, callback):
932 """add a callback to the current repository lock.
932 """add a callback to the current repository lock.
933
933
934 The callback will be executed on lock release."""
934 The callback will be executed on lock release."""
935 l = self._lockref and self._lockref()
935 l = self._lockref and self._lockref()
936 if l:
936 if l:
937 l.postrelease.append(callback)
937 l.postrelease.append(callback)
938
938
939 def lock(self, wait=True):
939 def lock(self, wait=True):
940 '''Lock the repository store (.hg/store) and return a weak reference
940 '''Lock the repository store (.hg/store) and return a weak reference
941 to the lock. Use this before modifying the store (e.g. committing or
941 to the lock. Use this before modifying the store (e.g. committing or
942 stripping). If you are opening a transaction, get a lock as well.)'''
942 stripping). If you are opening a transaction, get a lock as well.)'''
943 l = self._lockref and self._lockref()
943 l = self._lockref and self._lockref()
944 if l is not None and l.held:
944 if l is not None and l.held:
945 l.lock()
945 l.lock()
946 return l
946 return l
947
947
948 def unlock():
948 def unlock():
949 self.store.write()
949 self.store.write()
950 if self._dirtyphases:
950 if self._dirtyphases:
951 phases.writeroots(self)
951 phases.writeroots(self)
952 for k, ce in self._filecache.items():
952 for k, ce in self._filecache.items():
953 if k == 'dirstate':
953 if k == 'dirstate':
954 continue
954 continue
955 ce.refresh()
955 ce.refresh()
956
956
957 l = self._lock(self.sjoin("lock"), wait, unlock,
957 l = self._lock(self.sjoin("lock"), wait, unlock,
958 self.invalidate, _('repository %s') % self.origroot)
958 self.invalidate, _('repository %s') % self.origroot)
959 self._lockref = weakref.ref(l)
959 self._lockref = weakref.ref(l)
960 return l
960 return l
961
961
962 def wlock(self, wait=True):
962 def wlock(self, wait=True):
963 '''Lock the non-store parts of the repository (everything under
963 '''Lock the non-store parts of the repository (everything under
964 .hg except .hg/store) and return a weak reference to the lock.
964 .hg except .hg/store) and return a weak reference to the lock.
965 Use this before modifying files in .hg.'''
965 Use this before modifying files in .hg.'''
966 l = self._wlockref and self._wlockref()
966 l = self._wlockref and self._wlockref()
967 if l is not None and l.held:
967 if l is not None and l.held:
968 l.lock()
968 l.lock()
969 return l
969 return l
970
970
971 def unlock():
971 def unlock():
972 self.dirstate.write()
972 self.dirstate.write()
973 ce = self._filecache.get('dirstate')
973 ce = self._filecache.get('dirstate')
974 if ce:
974 if ce:
975 ce.refresh()
975 ce.refresh()
976
976
977 l = self._lock(self.join("wlock"), wait, unlock,
977 l = self._lock(self.join("wlock"), wait, unlock,
978 self.invalidatedirstate, _('working directory of %s') %
978 self.invalidatedirstate, _('working directory of %s') %
979 self.origroot)
979 self.origroot)
980 self._wlockref = weakref.ref(l)
980 self._wlockref = weakref.ref(l)
981 return l
981 return l
982
982
983 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
983 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
984 """
984 """
985 commit an individual file as part of a larger transaction
985 commit an individual file as part of a larger transaction
986 """
986 """
987
987
988 fname = fctx.path()
988 fname = fctx.path()
989 text = fctx.data()
989 text = fctx.data()
990 flog = self.file(fname)
990 flog = self.file(fname)
991 fparent1 = manifest1.get(fname, nullid)
991 fparent1 = manifest1.get(fname, nullid)
992 fparent2 = fparent2o = manifest2.get(fname, nullid)
992 fparent2 = fparent2o = manifest2.get(fname, nullid)
993
993
994 meta = {}
994 meta = {}
995 copy = fctx.renamed()
995 copy = fctx.renamed()
996 if copy and copy[0] != fname:
996 if copy and copy[0] != fname:
997 # Mark the new revision of this file as a copy of another
997 # Mark the new revision of this file as a copy of another
998 # file. This copy data will effectively act as a parent
998 # file. This copy data will effectively act as a parent
999 # of this new revision. If this is a merge, the first
999 # of this new revision. If this is a merge, the first
1000 # parent will be the nullid (meaning "look up the copy data")
1000 # parent will be the nullid (meaning "look up the copy data")
1001 # and the second one will be the other parent. For example:
1001 # and the second one will be the other parent. For example:
1002 #
1002 #
1003 # 0 --- 1 --- 3 rev1 changes file foo
1003 # 0 --- 1 --- 3 rev1 changes file foo
1004 # \ / rev2 renames foo to bar and changes it
1004 # \ / rev2 renames foo to bar and changes it
1005 # \- 2 -/ rev3 should have bar with all changes and
1005 # \- 2 -/ rev3 should have bar with all changes and
1006 # should record that bar descends from
1006 # should record that bar descends from
1007 # bar in rev2 and foo in rev1
1007 # bar in rev2 and foo in rev1
1008 #
1008 #
1009 # this allows this merge to succeed:
1009 # this allows this merge to succeed:
1010 #
1010 #
1011 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1011 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1012 # \ / merging rev3 and rev4 should use bar@rev2
1012 # \ / merging rev3 and rev4 should use bar@rev2
1013 # \- 2 --- 4 as the merge base
1013 # \- 2 --- 4 as the merge base
1014 #
1014 #
1015
1015
1016 cfname = copy[0]
1016 cfname = copy[0]
1017 crev = manifest1.get(cfname)
1017 crev = manifest1.get(cfname)
1018 newfparent = fparent2
1018 newfparent = fparent2
1019
1019
1020 if manifest2: # branch merge
1020 if manifest2: # branch merge
1021 if fparent2 == nullid or crev is None: # copied on remote side
1021 if fparent2 == nullid or crev is None: # copied on remote side
1022 if cfname in manifest2:
1022 if cfname in manifest2:
1023 crev = manifest2[cfname]
1023 crev = manifest2[cfname]
1024 newfparent = fparent1
1024 newfparent = fparent1
1025
1025
1026 # find source in nearest ancestor if we've lost track
1026 # find source in nearest ancestor if we've lost track
1027 if not crev:
1027 if not crev:
1028 self.ui.debug(" %s: searching for copy revision for %s\n" %
1028 self.ui.debug(" %s: searching for copy revision for %s\n" %
1029 (fname, cfname))
1029 (fname, cfname))
1030 for ancestor in self[None].ancestors():
1030 for ancestor in self[None].ancestors():
1031 if cfname in ancestor:
1031 if cfname in ancestor:
1032 crev = ancestor[cfname].filenode()
1032 crev = ancestor[cfname].filenode()
1033 break
1033 break
1034
1034
1035 if crev:
1035 if crev:
1036 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1036 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1037 meta["copy"] = cfname
1037 meta["copy"] = cfname
1038 meta["copyrev"] = hex(crev)
1038 meta["copyrev"] = hex(crev)
1039 fparent1, fparent2 = nullid, newfparent
1039 fparent1, fparent2 = nullid, newfparent
1040 else:
1040 else:
1041 self.ui.warn(_("warning: can't find ancestor for '%s' "
1041 self.ui.warn(_("warning: can't find ancestor for '%s' "
1042 "copied from '%s'!\n") % (fname, cfname))
1042 "copied from '%s'!\n") % (fname, cfname))
1043
1043
1044 elif fparent2 != nullid:
1044 elif fparent2 != nullid:
1045 # is one parent an ancestor of the other?
1045 # is one parent an ancestor of the other?
1046 fparentancestor = flog.ancestor(fparent1, fparent2)
1046 fparentancestor = flog.ancestor(fparent1, fparent2)
1047 if fparentancestor == fparent1:
1047 if fparentancestor == fparent1:
1048 fparent1, fparent2 = fparent2, nullid
1048 fparent1, fparent2 = fparent2, nullid
1049 elif fparentancestor == fparent2:
1049 elif fparentancestor == fparent2:
1050 fparent2 = nullid
1050 fparent2 = nullid
1051
1051
1052 # is the file changed?
1052 # is the file changed?
1053 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1053 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1054 changelist.append(fname)
1054 changelist.append(fname)
1055 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1055 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1056
1056
1057 # are just the flags changed during merge?
1057 # are just the flags changed during merge?
1058 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1058 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1059 changelist.append(fname)
1059 changelist.append(fname)
1060
1060
1061 return fparent1
1061 return fparent1
1062
1062
1063 def commit(self, text="", user=None, date=None, match=None, force=False,
1063 def commit(self, text="", user=None, date=None, match=None, force=False,
1064 editor=False, extra={}):
1064 editor=False, extra={}):
1065 """Add a new revision to current repository.
1065 """Add a new revision to current repository.
1066
1066
1067 Revision information is gathered from the working directory,
1067 Revision information is gathered from the working directory,
1068 match can be used to filter the committed files. If editor is
1068 match can be used to filter the committed files. If editor is
1069 supplied, it is called to get a commit message.
1069 supplied, it is called to get a commit message.
1070 """
1070 """
1071
1071
1072 def fail(f, msg):
1072 def fail(f, msg):
1073 raise util.Abort('%s: %s' % (f, msg))
1073 raise util.Abort('%s: %s' % (f, msg))
1074
1074
1075 if not match:
1075 if not match:
1076 match = matchmod.always(self.root, '')
1076 match = matchmod.always(self.root, '')
1077
1077
1078 if not force:
1078 if not force:
1079 vdirs = []
1079 vdirs = []
1080 match.dir = vdirs.append
1080 match.dir = vdirs.append
1081 match.bad = fail
1081 match.bad = fail
1082
1082
1083 wlock = self.wlock()
1083 wlock = self.wlock()
1084 try:
1084 try:
1085 wctx = self[None]
1085 wctx = self[None]
1086 merge = len(wctx.parents()) > 1
1086 merge = len(wctx.parents()) > 1
1087
1087
1088 if (not force and merge and match and
1088 if (not force and merge and match and
1089 (match.files() or match.anypats())):
1089 (match.files() or match.anypats())):
1090 raise util.Abort(_('cannot partially commit a merge '
1090 raise util.Abort(_('cannot partially commit a merge '
1091 '(do not specify files or patterns)'))
1091 '(do not specify files or patterns)'))
1092
1092
1093 changes = self.status(match=match, clean=force)
1093 changes = self.status(match=match, clean=force)
1094 if force:
1094 if force:
1095 changes[0].extend(changes[6]) # mq may commit unchanged files
1095 changes[0].extend(changes[6]) # mq may commit unchanged files
1096
1096
1097 # check subrepos
1097 # check subrepos
1098 subs = []
1098 subs = []
1099 removedsubs = set()
1099 removedsubs = set()
1100 if '.hgsub' in wctx:
1100 if '.hgsub' in wctx:
1101 # only manage subrepos and .hgsubstate if .hgsub is present
1101 # only manage subrepos and .hgsubstate if .hgsub is present
1102 for p in wctx.parents():
1102 for p in wctx.parents():
1103 removedsubs.update(s for s in p.substate if match(s))
1103 removedsubs.update(s for s in p.substate if match(s))
1104 for s in wctx.substate:
1104 for s in wctx.substate:
1105 removedsubs.discard(s)
1105 removedsubs.discard(s)
1106 if match(s) and wctx.sub(s).dirty():
1106 if match(s) and wctx.sub(s).dirty():
1107 subs.append(s)
1107 subs.append(s)
1108 if (subs or removedsubs):
1108 if (subs or removedsubs):
1109 if (not match('.hgsub') and
1109 if (not match('.hgsub') and
1110 '.hgsub' in (wctx.modified() + wctx.added())):
1110 '.hgsub' in (wctx.modified() + wctx.added())):
1111 raise util.Abort(
1111 raise util.Abort(
1112 _("can't commit subrepos without .hgsub"))
1112 _("can't commit subrepos without .hgsub"))
1113 if '.hgsubstate' not in changes[0]:
1113 if '.hgsubstate' not in changes[0]:
1114 changes[0].insert(0, '.hgsubstate')
1114 changes[0].insert(0, '.hgsubstate')
1115 if '.hgsubstate' in changes[2]:
1115 if '.hgsubstate' in changes[2]:
1116 changes[2].remove('.hgsubstate')
1116 changes[2].remove('.hgsubstate')
1117 elif '.hgsub' in changes[2]:
1117 elif '.hgsub' in changes[2]:
1118 # clean up .hgsubstate when .hgsub is removed
1118 # clean up .hgsubstate when .hgsub is removed
1119 if ('.hgsubstate' in wctx and
1119 if ('.hgsubstate' in wctx and
1120 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1120 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1121 changes[2].insert(0, '.hgsubstate')
1121 changes[2].insert(0, '.hgsubstate')
1122
1122
1123 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1123 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1124 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1124 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1125 if changedsubs:
1125 if changedsubs:
1126 raise util.Abort(_("uncommitted changes in subrepo %s")
1126 raise util.Abort(_("uncommitted changes in subrepo %s")
1127 % changedsubs[0],
1127 % changedsubs[0],
1128 hint=_("use --subrepos for recursive commit"))
1128 hint=_("use --subrepos for recursive commit"))
1129
1129
1130 # make sure all explicit patterns are matched
1130 # make sure all explicit patterns are matched
1131 if not force and match.files():
1131 if not force and match.files():
1132 matched = set(changes[0] + changes[1] + changes[2])
1132 matched = set(changes[0] + changes[1] + changes[2])
1133
1133
1134 for f in match.files():
1134 for f in match.files():
1135 if f == '.' or f in matched or f in wctx.substate:
1135 if f == '.' or f in matched or f in wctx.substate:
1136 continue
1136 continue
1137 if f in changes[3]: # missing
1137 if f in changes[3]: # missing
1138 fail(f, _('file not found!'))
1138 fail(f, _('file not found!'))
1139 if f in vdirs: # visited directory
1139 if f in vdirs: # visited directory
1140 d = f + '/'
1140 d = f + '/'
1141 for mf in matched:
1141 for mf in matched:
1142 if mf.startswith(d):
1142 if mf.startswith(d):
1143 break
1143 break
1144 else:
1144 else:
1145 fail(f, _("no match under directory!"))
1145 fail(f, _("no match under directory!"))
1146 elif f not in self.dirstate:
1146 elif f not in self.dirstate:
1147 fail(f, _("file not tracked!"))
1147 fail(f, _("file not tracked!"))
1148
1148
1149 if (not force and not extra.get("close") and not merge
1149 if (not force and not extra.get("close") and not merge
1150 and not (changes[0] or changes[1] or changes[2])
1150 and not (changes[0] or changes[1] or changes[2])
1151 and wctx.branch() == wctx.p1().branch()):
1151 and wctx.branch() == wctx.p1().branch()):
1152 return None
1152 return None
1153
1153
1154 ms = mergemod.mergestate(self)
1154 ms = mergemod.mergestate(self)
1155 for f in changes[0]:
1155 for f in changes[0]:
1156 if f in ms and ms[f] == 'u':
1156 if f in ms and ms[f] == 'u':
1157 raise util.Abort(_("unresolved merge conflicts "
1157 raise util.Abort(_("unresolved merge conflicts "
1158 "(see hg help resolve)"))
1158 "(see hg help resolve)"))
1159
1159
1160 cctx = context.workingctx(self, text, user, date, extra, changes)
1160 cctx = context.workingctx(self, text, user, date, extra, changes)
1161 if editor:
1161 if editor:
1162 cctx._text = editor(self, cctx, subs)
1162 cctx._text = editor(self, cctx, subs)
1163 edited = (text != cctx._text)
1163 edited = (text != cctx._text)
1164
1164
1165 # commit subs
1165 # commit subs
1166 if subs or removedsubs:
1166 if subs or removedsubs:
1167 state = wctx.substate.copy()
1167 state = wctx.substate.copy()
1168 for s in sorted(subs):
1168 for s in sorted(subs):
1169 sub = wctx.sub(s)
1169 sub = wctx.sub(s)
1170 self.ui.status(_('committing subrepository %s\n') %
1170 self.ui.status(_('committing subrepository %s\n') %
1171 subrepo.subrelpath(sub))
1171 subrepo.subrelpath(sub))
1172 sr = sub.commit(cctx._text, user, date)
1172 sr = sub.commit(cctx._text, user, date)
1173 state[s] = (state[s][0], sr)
1173 state[s] = (state[s][0], sr)
1174 subrepo.writestate(self, state)
1174 subrepo.writestate(self, state)
1175
1175
1176 # Save commit message in case this transaction gets rolled back
1176 # Save commit message in case this transaction gets rolled back
1177 # (e.g. by a pretxncommit hook). Leave the content alone on
1177 # (e.g. by a pretxncommit hook). Leave the content alone on
1178 # the assumption that the user will use the same editor again.
1178 # the assumption that the user will use the same editor again.
1179 msgfn = self.savecommitmessage(cctx._text)
1179 msgfn = self.savecommitmessage(cctx._text)
1180
1180
1181 p1, p2 = self.dirstate.parents()
1181 p1, p2 = self.dirstate.parents()
1182 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1182 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1183 try:
1183 try:
1184 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1184 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1185 ret = self.commitctx(cctx, True)
1185 ret = self.commitctx(cctx, True)
1186 except:
1186 except:
1187 if edited:
1187 if edited:
1188 self.ui.write(
1188 self.ui.write(
1189 _('note: commit message saved in %s\n') % msgfn)
1189 _('note: commit message saved in %s\n') % msgfn)
1190 raise
1190 raise
1191
1191
1192 # update bookmarks, dirstate and mergestate
1192 # update bookmarks, dirstate and mergestate
1193 bookmarks.update(self, p1, ret)
1193 bookmarks.update(self, p1, ret)
1194 for f in changes[0] + changes[1]:
1194 for f in changes[0] + changes[1]:
1195 self.dirstate.normal(f)
1195 self.dirstate.normal(f)
1196 for f in changes[2]:
1196 for f in changes[2]:
1197 self.dirstate.drop(f)
1197 self.dirstate.drop(f)
1198 self.dirstate.setparents(ret)
1198 self.dirstate.setparents(ret)
1199 ms.reset()
1199 ms.reset()
1200 finally:
1200 finally:
1201 wlock.release()
1201 wlock.release()
1202
1202
1203 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1203 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1204 return ret
1204 return ret
1205
1205
1206 def commitctx(self, ctx, error=False):
1206 def commitctx(self, ctx, error=False):
1207 """Add a new revision to current repository.
1207 """Add a new revision to current repository.
1208 Revision information is passed via the context argument.
1208 Revision information is passed via the context argument.
1209 """
1209 """
1210
1210
1211 tr = lock = None
1211 tr = lock = None
1212 removed = list(ctx.removed())
1212 removed = list(ctx.removed())
1213 p1, p2 = ctx.p1(), ctx.p2()
1213 p1, p2 = ctx.p1(), ctx.p2()
1214 user = ctx.user()
1214 user = ctx.user()
1215
1215
1216 lock = self.lock()
1216 lock = self.lock()
1217 try:
1217 try:
1218 tr = self.transaction("commit")
1218 tr = self.transaction("commit")
1219 trp = weakref.proxy(tr)
1219 trp = weakref.proxy(tr)
1220
1220
1221 if ctx.files():
1221 if ctx.files():
1222 m1 = p1.manifest().copy()
1222 m1 = p1.manifest().copy()
1223 m2 = p2.manifest()
1223 m2 = p2.manifest()
1224
1224
1225 # check in files
1225 # check in files
1226 new = {}
1226 new = {}
1227 changed = []
1227 changed = []
1228 linkrev = len(self)
1228 linkrev = len(self)
1229 for f in sorted(ctx.modified() + ctx.added()):
1229 for f in sorted(ctx.modified() + ctx.added()):
1230 self.ui.note(f + "\n")
1230 self.ui.note(f + "\n")
1231 try:
1231 try:
1232 fctx = ctx[f]
1232 fctx = ctx[f]
1233 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1233 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1234 changed)
1234 changed)
1235 m1.set(f, fctx.flags())
1235 m1.set(f, fctx.flags())
1236 except OSError, inst:
1236 except OSError, inst:
1237 self.ui.warn(_("trouble committing %s!\n") % f)
1237 self.ui.warn(_("trouble committing %s!\n") % f)
1238 raise
1238 raise
1239 except IOError, inst:
1239 except IOError, inst:
1240 errcode = getattr(inst, 'errno', errno.ENOENT)
1240 errcode = getattr(inst, 'errno', errno.ENOENT)
1241 if error or errcode and errcode != errno.ENOENT:
1241 if error or errcode and errcode != errno.ENOENT:
1242 self.ui.warn(_("trouble committing %s!\n") % f)
1242 self.ui.warn(_("trouble committing %s!\n") % f)
1243 raise
1243 raise
1244 else:
1244 else:
1245 removed.append(f)
1245 removed.append(f)
1246
1246
1247 # update manifest
1247 # update manifest
1248 m1.update(new)
1248 m1.update(new)
1249 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1249 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1250 drop = [f for f in removed if f in m1]
1250 drop = [f for f in removed if f in m1]
1251 for f in drop:
1251 for f in drop:
1252 del m1[f]
1252 del m1[f]
1253 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1253 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1254 p2.manifestnode(), (new, drop))
1254 p2.manifestnode(), (new, drop))
1255 files = changed + removed
1255 files = changed + removed
1256 else:
1256 else:
1257 mn = p1.manifestnode()
1257 mn = p1.manifestnode()
1258 files = []
1258 files = []
1259
1259
1260 # update changelog
1260 # update changelog
1261 self.changelog.delayupdate()
1261 self.changelog.delayupdate()
1262 n = self.changelog.add(mn, files, ctx.description(),
1262 n = self.changelog.add(mn, files, ctx.description(),
1263 trp, p1.node(), p2.node(),
1263 trp, p1.node(), p2.node(),
1264 user, ctx.date(), ctx.extra().copy())
1264 user, ctx.date(), ctx.extra().copy())
1265 p = lambda: self.changelog.writepending() and self.root or ""
1265 p = lambda: self.changelog.writepending() and self.root or ""
1266 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1266 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1267 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1267 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1268 parent2=xp2, pending=p)
1268 parent2=xp2, pending=p)
1269 self.changelog.finalize(trp)
1269 self.changelog.finalize(trp)
1270 # set the new commit is proper phase
1270 # set the new commit is proper phase
1271 targetphase = self.ui.configint('phases', 'new-commit',
1271 targetphase = self.ui.configint('phases', 'new-commit',
1272 phases.draft)
1272 phases.draft)
1273 if targetphase:
1273 if targetphase:
1274 # retract boundary do not alter parent changeset.
1274 # retract boundary do not alter parent changeset.
1275 # if a parent have higher the resulting phase will
1275 # if a parent have higher the resulting phase will
1276 # be compliant anyway
1276 # be compliant anyway
1277 #
1277 #
1278 # if minimal phase was 0 we don't need to retract anything
1278 # if minimal phase was 0 we don't need to retract anything
1279 phases.retractboundary(self, targetphase, [n])
1279 phases.retractboundary(self, targetphase, [n])
1280 tr.close()
1280 tr.close()
1281 self.updatebranchcache()
1281 self.updatebranchcache()
1282 return n
1282 return n
1283 finally:
1283 finally:
1284 if tr:
1284 if tr:
1285 tr.release()
1285 tr.release()
1286 lock.release()
1286 lock.release()
1287
1287
1288 def destroyed(self):
1288 def destroyed(self):
1289 '''Inform the repository that nodes have been destroyed.
1289 '''Inform the repository that nodes have been destroyed.
1290 Intended for use by strip and rollback, so there's a common
1290 Intended for use by strip and rollback, so there's a common
1291 place for anything that has to be done after destroying history.'''
1291 place for anything that has to be done after destroying history.'''
1292 # XXX it might be nice if we could take the list of destroyed
1292 # XXX it might be nice if we could take the list of destroyed
1293 # nodes, but I don't see an easy way for rollback() to do that
1293 # nodes, but I don't see an easy way for rollback() to do that
1294
1294
1295 # Ensure the persistent tag cache is updated. Doing it now
1295 # Ensure the persistent tag cache is updated. Doing it now
1296 # means that the tag cache only has to worry about destroyed
1296 # means that the tag cache only has to worry about destroyed
1297 # heads immediately after a strip/rollback. That in turn
1297 # heads immediately after a strip/rollback. That in turn
1298 # guarantees that "cachetip == currenttip" (comparing both rev
1298 # guarantees that "cachetip == currenttip" (comparing both rev
1299 # and node) always means no nodes have been added or destroyed.
1299 # and node) always means no nodes have been added or destroyed.
1300
1300
1301 # XXX this is suboptimal when qrefresh'ing: we strip the current
1301 # XXX this is suboptimal when qrefresh'ing: we strip the current
1302 # head, refresh the tag cache, then immediately add a new head.
1302 # head, refresh the tag cache, then immediately add a new head.
1303 # But I think doing it this way is necessary for the "instant
1303 # But I think doing it this way is necessary for the "instant
1304 # tag cache retrieval" case to work.
1304 # tag cache retrieval" case to work.
1305 self.invalidatecaches()
1305 self.invalidatecaches()
1306
1306
1307 def walk(self, match, node=None):
1307 def walk(self, match, node=None):
1308 '''
1308 '''
1309 walk recursively through the directory tree or a given
1309 walk recursively through the directory tree or a given
1310 changeset, finding all files matched by the match
1310 changeset, finding all files matched by the match
1311 function
1311 function
1312 '''
1312 '''
1313 return self[node].walk(match)
1313 return self[node].walk(match)
1314
1314
1315 def status(self, node1='.', node2=None, match=None,
1315 def status(self, node1='.', node2=None, match=None,
1316 ignored=False, clean=False, unknown=False,
1316 ignored=False, clean=False, unknown=False,
1317 listsubrepos=False):
1317 listsubrepos=False):
1318 """return status of files between two nodes or node and working directory
1318 """return status of files between two nodes or node and working directory
1319
1319
1320 If node1 is None, use the first dirstate parent instead.
1320 If node1 is None, use the first dirstate parent instead.
1321 If node2 is None, compare node1 with working directory.
1321 If node2 is None, compare node1 with working directory.
1322 """
1322 """
1323
1323
1324 def mfmatches(ctx):
1324 def mfmatches(ctx):
1325 mf = ctx.manifest().copy()
1325 mf = ctx.manifest().copy()
1326 for fn in mf.keys():
1326 for fn in mf.keys():
1327 if not match(fn):
1327 if not match(fn):
1328 del mf[fn]
1328 del mf[fn]
1329 return mf
1329 return mf
1330
1330
1331 if isinstance(node1, context.changectx):
1331 if isinstance(node1, context.changectx):
1332 ctx1 = node1
1332 ctx1 = node1
1333 else:
1333 else:
1334 ctx1 = self[node1]
1334 ctx1 = self[node1]
1335 if isinstance(node2, context.changectx):
1335 if isinstance(node2, context.changectx):
1336 ctx2 = node2
1336 ctx2 = node2
1337 else:
1337 else:
1338 ctx2 = self[node2]
1338 ctx2 = self[node2]
1339
1339
1340 working = ctx2.rev() is None
1340 working = ctx2.rev() is None
1341 parentworking = working and ctx1 == self['.']
1341 parentworking = working and ctx1 == self['.']
1342 match = match or matchmod.always(self.root, self.getcwd())
1342 match = match or matchmod.always(self.root, self.getcwd())
1343 listignored, listclean, listunknown = ignored, clean, unknown
1343 listignored, listclean, listunknown = ignored, clean, unknown
1344
1344
1345 # load earliest manifest first for caching reasons
1345 # load earliest manifest first for caching reasons
1346 if not working and ctx2.rev() < ctx1.rev():
1346 if not working and ctx2.rev() < ctx1.rev():
1347 ctx2.manifest()
1347 ctx2.manifest()
1348
1348
1349 if not parentworking:
1349 if not parentworking:
1350 def bad(f, msg):
1350 def bad(f, msg):
1351 if f not in ctx1:
1351 if f not in ctx1:
1352 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1352 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1353 match.bad = bad
1353 match.bad = bad
1354
1354
1355 if working: # we need to scan the working dir
1355 if working: # we need to scan the working dir
1356 subrepos = []
1356 subrepos = []
1357 if '.hgsub' in self.dirstate:
1357 if '.hgsub' in self.dirstate:
1358 subrepos = ctx2.substate.keys()
1358 subrepos = ctx2.substate.keys()
1359 s = self.dirstate.status(match, subrepos, listignored,
1359 s = self.dirstate.status(match, subrepos, listignored,
1360 listclean, listunknown)
1360 listclean, listunknown)
1361 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1361 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1362
1362
1363 # check for any possibly clean files
1363 # check for any possibly clean files
1364 if parentworking and cmp:
1364 if parentworking and cmp:
1365 fixup = []
1365 fixup = []
1366 # do a full compare of any files that might have changed
1366 # do a full compare of any files that might have changed
1367 for f in sorted(cmp):
1367 for f in sorted(cmp):
1368 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1368 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1369 or ctx1[f].cmp(ctx2[f])):
1369 or ctx1[f].cmp(ctx2[f])):
1370 modified.append(f)
1370 modified.append(f)
1371 else:
1371 else:
1372 fixup.append(f)
1372 fixup.append(f)
1373
1373
1374 # update dirstate for files that are actually clean
1374 # update dirstate for files that are actually clean
1375 if fixup:
1375 if fixup:
1376 if listclean:
1376 if listclean:
1377 clean += fixup
1377 clean += fixup
1378
1378
1379 try:
1379 try:
1380 # updating the dirstate is optional
1380 # updating the dirstate is optional
1381 # so we don't wait on the lock
1381 # so we don't wait on the lock
1382 wlock = self.wlock(False)
1382 wlock = self.wlock(False)
1383 try:
1383 try:
1384 for f in fixup:
1384 for f in fixup:
1385 self.dirstate.normal(f)
1385 self.dirstate.normal(f)
1386 finally:
1386 finally:
1387 wlock.release()
1387 wlock.release()
1388 except error.LockError:
1388 except error.LockError:
1389 pass
1389 pass
1390
1390
1391 if not parentworking:
1391 if not parentworking:
1392 mf1 = mfmatches(ctx1)
1392 mf1 = mfmatches(ctx1)
1393 if working:
1393 if working:
1394 # we are comparing working dir against non-parent
1394 # we are comparing working dir against non-parent
1395 # generate a pseudo-manifest for the working dir
1395 # generate a pseudo-manifest for the working dir
1396 mf2 = mfmatches(self['.'])
1396 mf2 = mfmatches(self['.'])
1397 for f in cmp + modified + added:
1397 for f in cmp + modified + added:
1398 mf2[f] = None
1398 mf2[f] = None
1399 mf2.set(f, ctx2.flags(f))
1399 mf2.set(f, ctx2.flags(f))
1400 for f in removed:
1400 for f in removed:
1401 if f in mf2:
1401 if f in mf2:
1402 del mf2[f]
1402 del mf2[f]
1403 else:
1403 else:
1404 # we are comparing two revisions
1404 # we are comparing two revisions
1405 deleted, unknown, ignored = [], [], []
1405 deleted, unknown, ignored = [], [], []
1406 mf2 = mfmatches(ctx2)
1406 mf2 = mfmatches(ctx2)
1407
1407
1408 modified, added, clean = [], [], []
1408 modified, added, clean = [], [], []
1409 for fn in mf2:
1409 for fn in mf2:
1410 if fn in mf1:
1410 if fn in mf1:
1411 if (fn not in deleted and
1411 if (fn not in deleted and
1412 (mf1.flags(fn) != mf2.flags(fn) or
1412 (mf1.flags(fn) != mf2.flags(fn) or
1413 (mf1[fn] != mf2[fn] and
1413 (mf1[fn] != mf2[fn] and
1414 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1414 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1415 modified.append(fn)
1415 modified.append(fn)
1416 elif listclean:
1416 elif listclean:
1417 clean.append(fn)
1417 clean.append(fn)
1418 del mf1[fn]
1418 del mf1[fn]
1419 elif fn not in deleted:
1419 elif fn not in deleted:
1420 added.append(fn)
1420 added.append(fn)
1421 removed = mf1.keys()
1421 removed = mf1.keys()
1422
1422
1423 if working and modified and not self.dirstate._checklink:
1423 if working and modified and not self.dirstate._checklink:
1424 # Symlink placeholders may get non-symlink-like contents
1424 # Symlink placeholders may get non-symlink-like contents
1425 # via user error or dereferencing by NFS or Samba servers,
1425 # via user error or dereferencing by NFS or Samba servers,
1426 # so we filter out any placeholders that don't look like a
1426 # so we filter out any placeholders that don't look like a
1427 # symlink
1427 # symlink
1428 sane = []
1428 sane = []
1429 for f in modified:
1429 for f in modified:
1430 if ctx2.flags(f) == 'l':
1430 if ctx2.flags(f) == 'l':
1431 d = ctx2[f].data()
1431 d = ctx2[f].data()
1432 if len(d) >= 1024 or '\n' in d or util.binary(d):
1432 if len(d) >= 1024 or '\n' in d or util.binary(d):
1433 self.ui.debug('ignoring suspect symlink placeholder'
1433 self.ui.debug('ignoring suspect symlink placeholder'
1434 ' "%s"\n' % f)
1434 ' "%s"\n' % f)
1435 continue
1435 continue
1436 sane.append(f)
1436 sane.append(f)
1437 modified = sane
1437 modified = sane
1438
1438
1439 r = modified, added, removed, deleted, unknown, ignored, clean
1439 r = modified, added, removed, deleted, unknown, ignored, clean
1440
1440
1441 if listsubrepos:
1441 if listsubrepos:
1442 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1442 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1443 if working:
1443 if working:
1444 rev2 = None
1444 rev2 = None
1445 else:
1445 else:
1446 rev2 = ctx2.substate[subpath][1]
1446 rev2 = ctx2.substate[subpath][1]
1447 try:
1447 try:
1448 submatch = matchmod.narrowmatcher(subpath, match)
1448 submatch = matchmod.narrowmatcher(subpath, match)
1449 s = sub.status(rev2, match=submatch, ignored=listignored,
1449 s = sub.status(rev2, match=submatch, ignored=listignored,
1450 clean=listclean, unknown=listunknown,
1450 clean=listclean, unknown=listunknown,
1451 listsubrepos=True)
1451 listsubrepos=True)
1452 for rfiles, sfiles in zip(r, s):
1452 for rfiles, sfiles in zip(r, s):
1453 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1453 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1454 except error.LookupError:
1454 except error.LookupError:
1455 self.ui.status(_("skipping missing subrepository: %s\n")
1455 self.ui.status(_("skipping missing subrepository: %s\n")
1456 % subpath)
1456 % subpath)
1457
1457
1458 for l in r:
1458 for l in r:
1459 l.sort()
1459 l.sort()
1460 return r
1460 return r
1461
1461
1462 def heads(self, start=None):
1462 def heads(self, start=None):
1463 heads = self.changelog.heads(start)
1463 heads = self.changelog.heads(start)
1464 # sort the output in rev descending order
1464 # sort the output in rev descending order
1465 return sorted(heads, key=self.changelog.rev, reverse=True)
1465 return sorted(heads, key=self.changelog.rev, reverse=True)
1466
1466
1467 def branchheads(self, branch=None, start=None, closed=False):
1467 def branchheads(self, branch=None, start=None, closed=False):
1468 '''return a (possibly filtered) list of heads for the given branch
1468 '''return a (possibly filtered) list of heads for the given branch
1469
1469
1470 Heads are returned in topological order, from newest to oldest.
1470 Heads are returned in topological order, from newest to oldest.
1471 If branch is None, use the dirstate branch.
1471 If branch is None, use the dirstate branch.
1472 If start is not None, return only heads reachable from start.
1472 If start is not None, return only heads reachable from start.
1473 If closed is True, return heads that are marked as closed as well.
1473 If closed is True, return heads that are marked as closed as well.
1474 '''
1474 '''
1475 if branch is None:
1475 if branch is None:
1476 branch = self[None].branch()
1476 branch = self[None].branch()
1477 branches = self.branchmap()
1477 branches = self.branchmap()
1478 if branch not in branches:
1478 if branch not in branches:
1479 return []
1479 return []
1480 # the cache returns heads ordered lowest to highest
1480 # the cache returns heads ordered lowest to highest
1481 bheads = list(reversed(branches[branch]))
1481 bheads = list(reversed(branches[branch]))
1482 if start is not None:
1482 if start is not None:
1483 # filter out the heads that cannot be reached from startrev
1483 # filter out the heads that cannot be reached from startrev
1484 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1484 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1485 bheads = [h for h in bheads if h in fbheads]
1485 bheads = [h for h in bheads if h in fbheads]
1486 if not closed:
1486 if not closed:
1487 bheads = [h for h in bheads if
1487 bheads = [h for h in bheads if
1488 ('close' not in self.changelog.read(h)[5])]
1488 ('close' not in self.changelog.read(h)[5])]
1489 return bheads
1489 return bheads
1490
1490
1491 def branches(self, nodes):
1491 def branches(self, nodes):
1492 if not nodes:
1492 if not nodes:
1493 nodes = [self.changelog.tip()]
1493 nodes = [self.changelog.tip()]
1494 b = []
1494 b = []
1495 for n in nodes:
1495 for n in nodes:
1496 t = n
1496 t = n
1497 while True:
1497 while True:
1498 p = self.changelog.parents(n)
1498 p = self.changelog.parents(n)
1499 if p[1] != nullid or p[0] == nullid:
1499 if p[1] != nullid or p[0] == nullid:
1500 b.append((t, n, p[0], p[1]))
1500 b.append((t, n, p[0], p[1]))
1501 break
1501 break
1502 n = p[0]
1502 n = p[0]
1503 return b
1503 return b
1504
1504
1505 def between(self, pairs):
1505 def between(self, pairs):
1506 r = []
1506 r = []
1507
1507
1508 for top, bottom in pairs:
1508 for top, bottom in pairs:
1509 n, l, i = top, [], 0
1509 n, l, i = top, [], 0
1510 f = 1
1510 f = 1
1511
1511
1512 while n != bottom and n != nullid:
1512 while n != bottom and n != nullid:
1513 p = self.changelog.parents(n)[0]
1513 p = self.changelog.parents(n)[0]
1514 if i == f:
1514 if i == f:
1515 l.append(n)
1515 l.append(n)
1516 f = f * 2
1516 f = f * 2
1517 n = p
1517 n = p
1518 i += 1
1518 i += 1
1519
1519
1520 r.append(l)
1520 r.append(l)
1521
1521
1522 return r
1522 return r
1523
1523
1524 def pull(self, remote, heads=None, force=False):
1524 def pull(self, remote, heads=None, force=False):
1525 lock = self.lock()
1525 lock = self.lock()
1526 try:
1526 try:
1527 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1527 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1528 force=force)
1528 force=force)
1529 common, fetch, rheads = tmp
1529 common, fetch, rheads = tmp
1530 if not fetch:
1530 if not fetch:
1531 self.ui.status(_("no changes found\n"))
1531 self.ui.status(_("no changes found\n"))
1532 added = []
1532 added = []
1533 result = 0
1533 result = 0
1534 else:
1534 else:
1535 if heads is None and list(common) == [nullid]:
1535 if heads is None and list(common) == [nullid]:
1536 self.ui.status(_("requesting all changes\n"))
1536 self.ui.status(_("requesting all changes\n"))
1537 elif heads is None and remote.capable('changegroupsubset'):
1537 elif heads is None and remote.capable('changegroupsubset'):
1538 # issue1320, avoid a race if remote changed after discovery
1538 # issue1320, avoid a race if remote changed after discovery
1539 heads = rheads
1539 heads = rheads
1540
1540
1541 if remote.capable('getbundle'):
1541 if remote.capable('getbundle'):
1542 cg = remote.getbundle('pull', common=common,
1542 cg = remote.getbundle('pull', common=common,
1543 heads=heads or rheads)
1543 heads=heads or rheads)
1544 elif heads is None:
1544 elif heads is None:
1545 cg = remote.changegroup(fetch, 'pull')
1545 cg = remote.changegroup(fetch, 'pull')
1546 elif not remote.capable('changegroupsubset'):
1546 elif not remote.capable('changegroupsubset'):
1547 raise util.Abort(_("partial pull cannot be done because "
1547 raise util.Abort(_("partial pull cannot be done because "
1548 "other repository doesn't support "
1548 "other repository doesn't support "
1549 "changegroupsubset."))
1549 "changegroupsubset."))
1550 else:
1550 else:
1551 cg = remote.changegroupsubset(fetch, heads, 'pull')
1551 cg = remote.changegroupsubset(fetch, heads, 'pull')
1552 clstart = len(self.changelog)
1552 clstart = len(self.changelog)
1553 result = self.addchangegroup(cg, 'pull', remote.url())
1553 result = self.addchangegroup(cg, 'pull', remote.url())
1554 clend = len(self.changelog)
1554 clend = len(self.changelog)
1555 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1555 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1556
1556
1557
1557
1558 # Get remote phases data from remote
1558 # Get remote phases data from remote
1559 remotephases = remote.listkeys('phases')
1559 remotephases = remote.listkeys('phases')
1560 publishing = bool(remotephases.get('publishing', False))
1560 publishing = bool(remotephases.get('publishing', False))
1561 if remotephases and not publishing:
1561 if remotephases and not publishing:
1562 # remote is new and unpublishing
1562 # remote is new and unpublishing
1563 subset = common + added
1563 subset = common + added
1564 pheads, _dr = phases.analyzeremotephases(self, subset,
1564 pheads, _dr = phases.analyzeremotephases(self, subset,
1565 remotephases)
1565 remotephases)
1566 phases.advanceboundary(self, phases.public, pheads)
1566 phases.advanceboundary(self, phases.public, pheads)
1567 phases.advanceboundary(self, phases.draft, common + added)
1567 phases.advanceboundary(self, phases.draft, common + added)
1568 else:
1568 else:
1569 # Remote is old or publishing all common changesets
1569 # Remote is old or publishing all common changesets
1570 # should be seen as public
1570 # should be seen as public
1571 phases.advanceboundary(self, phases.public, common + added)
1571 phases.advanceboundary(self, phases.public, common + added)
1572 finally:
1572 finally:
1573 lock.release()
1573 lock.release()
1574
1574
1575 return result
1575 return result
1576
1576
1577 def checkpush(self, force, revs):
1577 def checkpush(self, force, revs):
1578 """Extensions can override this function if additional checks have
1578 """Extensions can override this function if additional checks have
1579 to be performed before pushing, or call it if they override push
1579 to be performed before pushing, or call it if they override push
1580 command.
1580 command.
1581 """
1581 """
1582 pass
1582 pass
1583
1583
1584 def push(self, remote, force=False, revs=None, newbranch=False):
1584 def push(self, remote, force=False, revs=None, newbranch=False):
1585 '''Push outgoing changesets (limited by revs) from the current
1585 '''Push outgoing changesets (limited by revs) from the current
1586 repository to remote. Return an integer:
1586 repository to remote. Return an integer:
1587 - 0 means HTTP error *or* nothing to push
1587 - 0 means HTTP error *or* nothing to push
1588 - 1 means we pushed and remote head count is unchanged *or*
1588 - 1 means we pushed and remote head count is unchanged *or*
1589 we have outgoing changesets but refused to push
1589 we have outgoing changesets but refused to push
1590 - other values as described by addchangegroup()
1590 - other values as described by addchangegroup()
1591 '''
1591 '''
1592 # there are two ways to push to remote repo:
1592 # there are two ways to push to remote repo:
1593 #
1593 #
1594 # addchangegroup assumes local user can lock remote
1594 # addchangegroup assumes local user can lock remote
1595 # repo (local filesystem, old ssh servers).
1595 # repo (local filesystem, old ssh servers).
1596 #
1596 #
1597 # unbundle assumes local user cannot lock remote repo (new ssh
1597 # unbundle assumes local user cannot lock remote repo (new ssh
1598 # servers, http servers).
1598 # servers, http servers).
1599
1599
1600 self.checkpush(force, revs)
1600 self.checkpush(force, revs)
1601 lock = None
1601 lock = None
1602 unbundle = remote.capable('unbundle')
1602 unbundle = remote.capable('unbundle')
1603 if not unbundle:
1603 if not unbundle:
1604 lock = remote.lock()
1604 lock = remote.lock()
1605 try:
1605 try:
1606 # get local lock as we might write phase data
1606 # get local lock as we might write phase data
1607 locallock = self.lock()
1607 locallock = self.lock()
1608 try:
1608 try:
1609 # discovery
1609 # discovery
1610 fci = discovery.findcommonincoming
1610 fci = discovery.findcommonincoming
1611 commoninc = fci(self, remote, force=force)
1611 commoninc = fci(self, remote, force=force)
1612 common, inc, remoteheads = commoninc
1612 common, inc, remoteheads = commoninc
1613 fco = discovery.findcommonoutgoing
1613 fco = discovery.findcommonoutgoing
1614 outgoing = fco(self, remote, onlyheads=revs,
1614 outgoing = fco(self, remote, onlyheads=revs,
1615 commoninc=commoninc, force=force)
1615 commoninc=commoninc, force=force)
1616
1616
1617
1617
1618 if not outgoing.missing:
1618 if not outgoing.missing:
1619 # nothing to push
1619 # nothing to push
1620 if outgoing.excluded:
1620 if outgoing.excluded:
1621 msg = "no changes to push but %i secret changesets\n"
1621 msg = "no changes to push but %i secret changesets\n"
1622 self.ui.status(_(msg) % len(outgoing.excluded))
1622 self.ui.status(_(msg) % len(outgoing.excluded))
1623 else:
1623 else:
1624 self.ui.status(_("no changes found\n"))
1624 self.ui.status(_("no changes found\n"))
1625 fut = outgoing.common
1626 ret = 1
1625 ret = 1
1627 else:
1626 else:
1628 # something to push
1627 # something to push
1629 if not force:
1628 if not force:
1630 discovery.checkheads(self, remote, outgoing,
1629 discovery.checkheads(self, remote, outgoing,
1631 remoteheads, newbranch)
1630 remoteheads, newbranch)
1632
1631
1633 # create a changegroup from local
1632 # create a changegroup from local
1634 if revs is None and not outgoing.excluded:
1633 if revs is None and not outgoing.excluded:
1635 # push everything,
1634 # push everything,
1636 # use the fast path, no race possible on push
1635 # use the fast path, no race possible on push
1637 cg = self._changegroup(outgoing.missing, 'push')
1636 cg = self._changegroup(outgoing.missing, 'push')
1638 else:
1637 else:
1639 cg = self.getlocalbundle('push', outgoing)
1638 cg = self.getlocalbundle('push', outgoing)
1640
1639
1641 # apply changegroup to remote
1640 # apply changegroup to remote
1642 if unbundle:
1641 if unbundle:
1643 # local repo finds heads on server, finds out what
1642 # local repo finds heads on server, finds out what
1644 # revs it must push. once revs transferred, if server
1643 # revs it must push. once revs transferred, if server
1645 # finds it has different heads (someone else won
1644 # finds it has different heads (someone else won
1646 # commit/push race), server aborts.
1645 # commit/push race), server aborts.
1647 if force:
1646 if force:
1648 remoteheads = ['force']
1647 remoteheads = ['force']
1649 # ssh: return remote's addchangegroup()
1648 # ssh: return remote's addchangegroup()
1650 # http: return remote's addchangegroup() or 0 for error
1649 # http: return remote's addchangegroup() or 0 for error
1651 ret = remote.unbundle(cg, remoteheads, 'push')
1650 ret = remote.unbundle(cg, remoteheads, 'push')
1652 else:
1651 else:
1653 # we return an integer indicating remote head count change
1652 # we return an integer indicating remote head count change
1654 ret = remote.addchangegroup(cg, 'push', self.url())
1653 ret = remote.addchangegroup(cg, 'push', self.url())
1655
1654
1656 # compute what should be the now common
1655 cheads = outgoing.commonheads[:]
1657 #
1656 if ret:
1658 # XXX If push failed we should use strict common and not
1657 # push succeed, synchonize common + pushed
1659 # future to avoid pushing phase data on unknown changeset.
1658 # this is a no-op if there was nothing to push
1660 # This is to done later.
1659 cheads += outgoing.missingheads
1661 fut = outgoing.commonheads + outgoing.missingheads
1662 # even when we don't push, exchanging phase data is useful
1660 # even when we don't push, exchanging phase data is useful
1663 remotephases = remote.listkeys('phases')
1661 remotephases = remote.listkeys('phases')
1664 if not remotephases: # old server or public only repo
1662 if not remotephases: # old server or public only repo
1665 phases.advanceboundary(self, phases.public, fut)
1663 phases.advanceboundary(self, phases.public, cheads)
1666 # don't push any phase data as there is nothing to push
1664 # don't push any phase data as there is nothing to push
1667 else:
1665 else:
1668 ana = phases.analyzeremotephases(self, fut, remotephases)
1666 ana = phases.analyzeremotephases(self, cheads, remotephases)
1669 pheads, droots = ana
1667 pheads, droots = ana
1670 ### Apply remote phase on local
1668 ### Apply remote phase on local
1671 if remotephases.get('publishing', False):
1669 if remotephases.get('publishing', False):
1672 phases.advanceboundary(self, phases.public, fut)
1670 phases.advanceboundary(self, phases.public, cheads)
1673 else: # publish = False
1671 else: # publish = False
1674 phases.advanceboundary(self, phases.public, pheads)
1672 phases.advanceboundary(self, phases.public, pheads)
1675 phases.advanceboundary(self, phases.draft, fut)
1673 phases.advanceboundary(self, phases.draft, cheads)
1676 ### Apply local phase on remote
1674 ### Apply local phase on remote
1677
1675
1678 # Get the list of all revs draft on remote by public here.
1676 # Get the list of all revs draft on remote by public here.
1679 # XXX Beware that revset break if droots is not strictly
1677 # XXX Beware that revset break if droots is not strictly
1680 # XXX root we may want to ensure it is but it is costly
1678 # XXX root we may want to ensure it is but it is costly
1681 outdated = self.set('heads((%ln::%ln) and public())',
1679 outdated = self.set('heads((%ln::%ln) and public())',
1682 droots, fut)
1680 droots, cheads)
1683 for newremotehead in outdated:
1681 for newremotehead in outdated:
1684 r = remote.pushkey('phases',
1682 r = remote.pushkey('phases',
1685 newremotehead.hex(),
1683 newremotehead.hex(),
1686 str(phases.draft),
1684 str(phases.draft),
1687 str(phases.public))
1685 str(phases.public))
1688 if not r:
1686 if not r:
1689 self.ui.warn(_('updating %s to public failed!\n')
1687 self.ui.warn(_('updating %s to public failed!\n')
1690 % newremotehead)
1688 % newremotehead)
1691 finally:
1689 finally:
1692 locallock.release()
1690 locallock.release()
1693 finally:
1691 finally:
1694 if lock is not None:
1692 if lock is not None:
1695 lock.release()
1693 lock.release()
1696
1694
1697 self.ui.debug("checking for updated bookmarks\n")
1695 self.ui.debug("checking for updated bookmarks\n")
1698 rb = remote.listkeys('bookmarks')
1696 rb = remote.listkeys('bookmarks')
1699 for k in rb.keys():
1697 for k in rb.keys():
1700 if k in self._bookmarks:
1698 if k in self._bookmarks:
1701 nr, nl = rb[k], hex(self._bookmarks[k])
1699 nr, nl = rb[k], hex(self._bookmarks[k])
1702 if nr in self:
1700 if nr in self:
1703 cr = self[nr]
1701 cr = self[nr]
1704 cl = self[nl]
1702 cl = self[nl]
1705 if cl in cr.descendants():
1703 if cl in cr.descendants():
1706 r = remote.pushkey('bookmarks', k, nr, nl)
1704 r = remote.pushkey('bookmarks', k, nr, nl)
1707 if r:
1705 if r:
1708 self.ui.status(_("updating bookmark %s\n") % k)
1706 self.ui.status(_("updating bookmark %s\n") % k)
1709 else:
1707 else:
1710 self.ui.warn(_('updating bookmark %s'
1708 self.ui.warn(_('updating bookmark %s'
1711 ' failed!\n') % k)
1709 ' failed!\n') % k)
1712
1710
1713 return ret
1711 return ret
1714
1712
1715 def changegroupinfo(self, nodes, source):
1713 def changegroupinfo(self, nodes, source):
1716 if self.ui.verbose or source == 'bundle':
1714 if self.ui.verbose or source == 'bundle':
1717 self.ui.status(_("%d changesets found\n") % len(nodes))
1715 self.ui.status(_("%d changesets found\n") % len(nodes))
1718 if self.ui.debugflag:
1716 if self.ui.debugflag:
1719 self.ui.debug("list of changesets:\n")
1717 self.ui.debug("list of changesets:\n")
1720 for node in nodes:
1718 for node in nodes:
1721 self.ui.debug("%s\n" % hex(node))
1719 self.ui.debug("%s\n" % hex(node))
1722
1720
1723 def changegroupsubset(self, bases, heads, source):
1721 def changegroupsubset(self, bases, heads, source):
1724 """Compute a changegroup consisting of all the nodes that are
1722 """Compute a changegroup consisting of all the nodes that are
1725 descendants of any of the bases and ancestors of any of the heads.
1723 descendants of any of the bases and ancestors of any of the heads.
1726 Return a chunkbuffer object whose read() method will return
1724 Return a chunkbuffer object whose read() method will return
1727 successive changegroup chunks.
1725 successive changegroup chunks.
1728
1726
1729 It is fairly complex as determining which filenodes and which
1727 It is fairly complex as determining which filenodes and which
1730 manifest nodes need to be included for the changeset to be complete
1728 manifest nodes need to be included for the changeset to be complete
1731 is non-trivial.
1729 is non-trivial.
1732
1730
1733 Another wrinkle is doing the reverse, figuring out which changeset in
1731 Another wrinkle is doing the reverse, figuring out which changeset in
1734 the changegroup a particular filenode or manifestnode belongs to.
1732 the changegroup a particular filenode or manifestnode belongs to.
1735 """
1733 """
1736 cl = self.changelog
1734 cl = self.changelog
1737 if not bases:
1735 if not bases:
1738 bases = [nullid]
1736 bases = [nullid]
1739 csets, bases, heads = cl.nodesbetween(bases, heads)
1737 csets, bases, heads = cl.nodesbetween(bases, heads)
1740 # We assume that all ancestors of bases are known
1738 # We assume that all ancestors of bases are known
1741 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1739 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1742 return self._changegroupsubset(common, csets, heads, source)
1740 return self._changegroupsubset(common, csets, heads, source)
1743
1741
1744 def getlocalbundle(self, source, outgoing):
1742 def getlocalbundle(self, source, outgoing):
1745 """Like getbundle, but taking a discovery.outgoing as an argument.
1743 """Like getbundle, but taking a discovery.outgoing as an argument.
1746
1744
1747 This is only implemented for local repos and reuses potentially
1745 This is only implemented for local repos and reuses potentially
1748 precomputed sets in outgoing."""
1746 precomputed sets in outgoing."""
1749 if not outgoing.missing:
1747 if not outgoing.missing:
1750 return None
1748 return None
1751 return self._changegroupsubset(outgoing.common,
1749 return self._changegroupsubset(outgoing.common,
1752 outgoing.missing,
1750 outgoing.missing,
1753 outgoing.missingheads,
1751 outgoing.missingheads,
1754 source)
1752 source)
1755
1753
1756 def getbundle(self, source, heads=None, common=None):
1754 def getbundle(self, source, heads=None, common=None):
1757 """Like changegroupsubset, but returns the set difference between the
1755 """Like changegroupsubset, but returns the set difference between the
1758 ancestors of heads and the ancestors common.
1756 ancestors of heads and the ancestors common.
1759
1757
1760 If heads is None, use the local heads. If common is None, use [nullid].
1758 If heads is None, use the local heads. If common is None, use [nullid].
1761
1759
1762 The nodes in common might not all be known locally due to the way the
1760 The nodes in common might not all be known locally due to the way the
1763 current discovery protocol works.
1761 current discovery protocol works.
1764 """
1762 """
1765 cl = self.changelog
1763 cl = self.changelog
1766 if common:
1764 if common:
1767 nm = cl.nodemap
1765 nm = cl.nodemap
1768 common = [n for n in common if n in nm]
1766 common = [n for n in common if n in nm]
1769 else:
1767 else:
1770 common = [nullid]
1768 common = [nullid]
1771 if not heads:
1769 if not heads:
1772 heads = cl.heads()
1770 heads = cl.heads()
1773 return self.getlocalbundle(source,
1771 return self.getlocalbundle(source,
1774 discovery.outgoing(cl, common, heads))
1772 discovery.outgoing(cl, common, heads))
1775
1773
1776 def _changegroupsubset(self, commonrevs, csets, heads, source):
1774 def _changegroupsubset(self, commonrevs, csets, heads, source):
1777
1775
1778 cl = self.changelog
1776 cl = self.changelog
1779 mf = self.manifest
1777 mf = self.manifest
1780 mfs = {} # needed manifests
1778 mfs = {} # needed manifests
1781 fnodes = {} # needed file nodes
1779 fnodes = {} # needed file nodes
1782 changedfiles = set()
1780 changedfiles = set()
1783 fstate = ['', {}]
1781 fstate = ['', {}]
1784 count = [0]
1782 count = [0]
1785
1783
1786 # can we go through the fast path ?
1784 # can we go through the fast path ?
1787 heads.sort()
1785 heads.sort()
1788 if heads == sorted(self.heads()):
1786 if heads == sorted(self.heads()):
1789 return self._changegroup(csets, source)
1787 return self._changegroup(csets, source)
1790
1788
1791 # slow path
1789 # slow path
1792 self.hook('preoutgoing', throw=True, source=source)
1790 self.hook('preoutgoing', throw=True, source=source)
1793 self.changegroupinfo(csets, source)
1791 self.changegroupinfo(csets, source)
1794
1792
1795 # filter any nodes that claim to be part of the known set
1793 # filter any nodes that claim to be part of the known set
1796 def prune(revlog, missing):
1794 def prune(revlog, missing):
1797 return [n for n in missing
1795 return [n for n in missing
1798 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1796 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1799
1797
1800 def lookup(revlog, x):
1798 def lookup(revlog, x):
1801 if revlog == cl:
1799 if revlog == cl:
1802 c = cl.read(x)
1800 c = cl.read(x)
1803 changedfiles.update(c[3])
1801 changedfiles.update(c[3])
1804 mfs.setdefault(c[0], x)
1802 mfs.setdefault(c[0], x)
1805 count[0] += 1
1803 count[0] += 1
1806 self.ui.progress(_('bundling'), count[0],
1804 self.ui.progress(_('bundling'), count[0],
1807 unit=_('changesets'), total=len(csets))
1805 unit=_('changesets'), total=len(csets))
1808 return x
1806 return x
1809 elif revlog == mf:
1807 elif revlog == mf:
1810 clnode = mfs[x]
1808 clnode = mfs[x]
1811 mdata = mf.readfast(x)
1809 mdata = mf.readfast(x)
1812 for f in changedfiles:
1810 for f in changedfiles:
1813 if f in mdata:
1811 if f in mdata:
1814 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1812 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1815 count[0] += 1
1813 count[0] += 1
1816 self.ui.progress(_('bundling'), count[0],
1814 self.ui.progress(_('bundling'), count[0],
1817 unit=_('manifests'), total=len(mfs))
1815 unit=_('manifests'), total=len(mfs))
1818 return mfs[x]
1816 return mfs[x]
1819 else:
1817 else:
1820 self.ui.progress(
1818 self.ui.progress(
1821 _('bundling'), count[0], item=fstate[0],
1819 _('bundling'), count[0], item=fstate[0],
1822 unit=_('files'), total=len(changedfiles))
1820 unit=_('files'), total=len(changedfiles))
1823 return fstate[1][x]
1821 return fstate[1][x]
1824
1822
1825 bundler = changegroup.bundle10(lookup)
1823 bundler = changegroup.bundle10(lookup)
1826 reorder = self.ui.config('bundle', 'reorder', 'auto')
1824 reorder = self.ui.config('bundle', 'reorder', 'auto')
1827 if reorder == 'auto':
1825 if reorder == 'auto':
1828 reorder = None
1826 reorder = None
1829 else:
1827 else:
1830 reorder = util.parsebool(reorder)
1828 reorder = util.parsebool(reorder)
1831
1829
1832 def gengroup():
1830 def gengroup():
1833 # Create a changenode group generator that will call our functions
1831 # Create a changenode group generator that will call our functions
1834 # back to lookup the owning changenode and collect information.
1832 # back to lookup the owning changenode and collect information.
1835 for chunk in cl.group(csets, bundler, reorder=reorder):
1833 for chunk in cl.group(csets, bundler, reorder=reorder):
1836 yield chunk
1834 yield chunk
1837 self.ui.progress(_('bundling'), None)
1835 self.ui.progress(_('bundling'), None)
1838
1836
1839 # Create a generator for the manifestnodes that calls our lookup
1837 # Create a generator for the manifestnodes that calls our lookup
1840 # and data collection functions back.
1838 # and data collection functions back.
1841 count[0] = 0
1839 count[0] = 0
1842 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1840 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1843 yield chunk
1841 yield chunk
1844 self.ui.progress(_('bundling'), None)
1842 self.ui.progress(_('bundling'), None)
1845
1843
1846 mfs.clear()
1844 mfs.clear()
1847
1845
1848 # Go through all our files in order sorted by name.
1846 # Go through all our files in order sorted by name.
1849 count[0] = 0
1847 count[0] = 0
1850 for fname in sorted(changedfiles):
1848 for fname in sorted(changedfiles):
1851 filerevlog = self.file(fname)
1849 filerevlog = self.file(fname)
1852 if not len(filerevlog):
1850 if not len(filerevlog):
1853 raise util.Abort(_("empty or missing revlog for %s") % fname)
1851 raise util.Abort(_("empty or missing revlog for %s") % fname)
1854 fstate[0] = fname
1852 fstate[0] = fname
1855 fstate[1] = fnodes.pop(fname, {})
1853 fstate[1] = fnodes.pop(fname, {})
1856
1854
1857 nodelist = prune(filerevlog, fstate[1])
1855 nodelist = prune(filerevlog, fstate[1])
1858 if nodelist:
1856 if nodelist:
1859 count[0] += 1
1857 count[0] += 1
1860 yield bundler.fileheader(fname)
1858 yield bundler.fileheader(fname)
1861 for chunk in filerevlog.group(nodelist, bundler, reorder):
1859 for chunk in filerevlog.group(nodelist, bundler, reorder):
1862 yield chunk
1860 yield chunk
1863
1861
1864 # Signal that no more groups are left.
1862 # Signal that no more groups are left.
1865 yield bundler.close()
1863 yield bundler.close()
1866 self.ui.progress(_('bundling'), None)
1864 self.ui.progress(_('bundling'), None)
1867
1865
1868 if csets:
1866 if csets:
1869 self.hook('outgoing', node=hex(csets[0]), source=source)
1867 self.hook('outgoing', node=hex(csets[0]), source=source)
1870
1868
1871 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1869 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1872
1870
1873 def changegroup(self, basenodes, source):
1871 def changegroup(self, basenodes, source):
1874 # to avoid a race we use changegroupsubset() (issue1320)
1872 # to avoid a race we use changegroupsubset() (issue1320)
1875 return self.changegroupsubset(basenodes, self.heads(), source)
1873 return self.changegroupsubset(basenodes, self.heads(), source)
1876
1874
1877 def _changegroup(self, nodes, source):
1875 def _changegroup(self, nodes, source):
1878 """Compute the changegroup of all nodes that we have that a recipient
1876 """Compute the changegroup of all nodes that we have that a recipient
1879 doesn't. Return a chunkbuffer object whose read() method will return
1877 doesn't. Return a chunkbuffer object whose read() method will return
1880 successive changegroup chunks.
1878 successive changegroup chunks.
1881
1879
1882 This is much easier than the previous function as we can assume that
1880 This is much easier than the previous function as we can assume that
1883 the recipient has any changenode we aren't sending them.
1881 the recipient has any changenode we aren't sending them.
1884
1882
1885 nodes is the set of nodes to send"""
1883 nodes is the set of nodes to send"""
1886
1884
1887 cl = self.changelog
1885 cl = self.changelog
1888 mf = self.manifest
1886 mf = self.manifest
1889 mfs = {}
1887 mfs = {}
1890 changedfiles = set()
1888 changedfiles = set()
1891 fstate = ['']
1889 fstate = ['']
1892 count = [0]
1890 count = [0]
1893
1891
1894 self.hook('preoutgoing', throw=True, source=source)
1892 self.hook('preoutgoing', throw=True, source=source)
1895 self.changegroupinfo(nodes, source)
1893 self.changegroupinfo(nodes, source)
1896
1894
1897 revset = set([cl.rev(n) for n in nodes])
1895 revset = set([cl.rev(n) for n in nodes])
1898
1896
1899 def gennodelst(log):
1897 def gennodelst(log):
1900 return [log.node(r) for r in log if log.linkrev(r) in revset]
1898 return [log.node(r) for r in log if log.linkrev(r) in revset]
1901
1899
1902 def lookup(revlog, x):
1900 def lookup(revlog, x):
1903 if revlog == cl:
1901 if revlog == cl:
1904 c = cl.read(x)
1902 c = cl.read(x)
1905 changedfiles.update(c[3])
1903 changedfiles.update(c[3])
1906 mfs.setdefault(c[0], x)
1904 mfs.setdefault(c[0], x)
1907 count[0] += 1
1905 count[0] += 1
1908 self.ui.progress(_('bundling'), count[0],
1906 self.ui.progress(_('bundling'), count[0],
1909 unit=_('changesets'), total=len(nodes))
1907 unit=_('changesets'), total=len(nodes))
1910 return x
1908 return x
1911 elif revlog == mf:
1909 elif revlog == mf:
1912 count[0] += 1
1910 count[0] += 1
1913 self.ui.progress(_('bundling'), count[0],
1911 self.ui.progress(_('bundling'), count[0],
1914 unit=_('manifests'), total=len(mfs))
1912 unit=_('manifests'), total=len(mfs))
1915 return cl.node(revlog.linkrev(revlog.rev(x)))
1913 return cl.node(revlog.linkrev(revlog.rev(x)))
1916 else:
1914 else:
1917 self.ui.progress(
1915 self.ui.progress(
1918 _('bundling'), count[0], item=fstate[0],
1916 _('bundling'), count[0], item=fstate[0],
1919 total=len(changedfiles), unit=_('files'))
1917 total=len(changedfiles), unit=_('files'))
1920 return cl.node(revlog.linkrev(revlog.rev(x)))
1918 return cl.node(revlog.linkrev(revlog.rev(x)))
1921
1919
1922 bundler = changegroup.bundle10(lookup)
1920 bundler = changegroup.bundle10(lookup)
1923 reorder = self.ui.config('bundle', 'reorder', 'auto')
1921 reorder = self.ui.config('bundle', 'reorder', 'auto')
1924 if reorder == 'auto':
1922 if reorder == 'auto':
1925 reorder = None
1923 reorder = None
1926 else:
1924 else:
1927 reorder = util.parsebool(reorder)
1925 reorder = util.parsebool(reorder)
1928
1926
1929 def gengroup():
1927 def gengroup():
1930 '''yield a sequence of changegroup chunks (strings)'''
1928 '''yield a sequence of changegroup chunks (strings)'''
1931 # construct a list of all changed files
1929 # construct a list of all changed files
1932
1930
1933 for chunk in cl.group(nodes, bundler, reorder=reorder):
1931 for chunk in cl.group(nodes, bundler, reorder=reorder):
1934 yield chunk
1932 yield chunk
1935 self.ui.progress(_('bundling'), None)
1933 self.ui.progress(_('bundling'), None)
1936
1934
1937 count[0] = 0
1935 count[0] = 0
1938 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1936 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1939 yield chunk
1937 yield chunk
1940 self.ui.progress(_('bundling'), None)
1938 self.ui.progress(_('bundling'), None)
1941
1939
1942 count[0] = 0
1940 count[0] = 0
1943 for fname in sorted(changedfiles):
1941 for fname in sorted(changedfiles):
1944 filerevlog = self.file(fname)
1942 filerevlog = self.file(fname)
1945 if not len(filerevlog):
1943 if not len(filerevlog):
1946 raise util.Abort(_("empty or missing revlog for %s") % fname)
1944 raise util.Abort(_("empty or missing revlog for %s") % fname)
1947 fstate[0] = fname
1945 fstate[0] = fname
1948 nodelist = gennodelst(filerevlog)
1946 nodelist = gennodelst(filerevlog)
1949 if nodelist:
1947 if nodelist:
1950 count[0] += 1
1948 count[0] += 1
1951 yield bundler.fileheader(fname)
1949 yield bundler.fileheader(fname)
1952 for chunk in filerevlog.group(nodelist, bundler, reorder):
1950 for chunk in filerevlog.group(nodelist, bundler, reorder):
1953 yield chunk
1951 yield chunk
1954 yield bundler.close()
1952 yield bundler.close()
1955 self.ui.progress(_('bundling'), None)
1953 self.ui.progress(_('bundling'), None)
1956
1954
1957 if nodes:
1955 if nodes:
1958 self.hook('outgoing', node=hex(nodes[0]), source=source)
1956 self.hook('outgoing', node=hex(nodes[0]), source=source)
1959
1957
1960 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1958 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1961
1959
1962 def addchangegroup(self, source, srctype, url, emptyok=False):
1960 def addchangegroup(self, source, srctype, url, emptyok=False):
1963 """Add the changegroup returned by source.read() to this repo.
1961 """Add the changegroup returned by source.read() to this repo.
1964 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1962 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1965 the URL of the repo where this changegroup is coming from.
1963 the URL of the repo where this changegroup is coming from.
1966
1964
1967 Return an integer summarizing the change to this repo:
1965 Return an integer summarizing the change to this repo:
1968 - nothing changed or no source: 0
1966 - nothing changed or no source: 0
1969 - more heads than before: 1+added heads (2..n)
1967 - more heads than before: 1+added heads (2..n)
1970 - fewer heads than before: -1-removed heads (-2..-n)
1968 - fewer heads than before: -1-removed heads (-2..-n)
1971 - number of heads stays the same: 1
1969 - number of heads stays the same: 1
1972 """
1970 """
1973 def csmap(x):
1971 def csmap(x):
1974 self.ui.debug("add changeset %s\n" % short(x))
1972 self.ui.debug("add changeset %s\n" % short(x))
1975 return len(cl)
1973 return len(cl)
1976
1974
1977 def revmap(x):
1975 def revmap(x):
1978 return cl.rev(x)
1976 return cl.rev(x)
1979
1977
1980 if not source:
1978 if not source:
1981 return 0
1979 return 0
1982
1980
1983 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1981 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1984
1982
1985 changesets = files = revisions = 0
1983 changesets = files = revisions = 0
1986 efiles = set()
1984 efiles = set()
1987
1985
1988 # write changelog data to temp files so concurrent readers will not see
1986 # write changelog data to temp files so concurrent readers will not see
1989 # inconsistent view
1987 # inconsistent view
1990 cl = self.changelog
1988 cl = self.changelog
1991 cl.delayupdate()
1989 cl.delayupdate()
1992 oldheads = cl.heads()
1990 oldheads = cl.heads()
1993
1991
1994 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1992 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1995 try:
1993 try:
1996 trp = weakref.proxy(tr)
1994 trp = weakref.proxy(tr)
1997 # pull off the changeset group
1995 # pull off the changeset group
1998 self.ui.status(_("adding changesets\n"))
1996 self.ui.status(_("adding changesets\n"))
1999 clstart = len(cl)
1997 clstart = len(cl)
2000 class prog(object):
1998 class prog(object):
2001 step = _('changesets')
1999 step = _('changesets')
2002 count = 1
2000 count = 1
2003 ui = self.ui
2001 ui = self.ui
2004 total = None
2002 total = None
2005 def __call__(self):
2003 def __call__(self):
2006 self.ui.progress(self.step, self.count, unit=_('chunks'),
2004 self.ui.progress(self.step, self.count, unit=_('chunks'),
2007 total=self.total)
2005 total=self.total)
2008 self.count += 1
2006 self.count += 1
2009 pr = prog()
2007 pr = prog()
2010 source.callback = pr
2008 source.callback = pr
2011
2009
2012 source.changelogheader()
2010 source.changelogheader()
2013 srccontent = cl.addgroup(source, csmap, trp)
2011 srccontent = cl.addgroup(source, csmap, trp)
2014 if not (srccontent or emptyok):
2012 if not (srccontent or emptyok):
2015 raise util.Abort(_("received changelog group is empty"))
2013 raise util.Abort(_("received changelog group is empty"))
2016 clend = len(cl)
2014 clend = len(cl)
2017 changesets = clend - clstart
2015 changesets = clend - clstart
2018 for c in xrange(clstart, clend):
2016 for c in xrange(clstart, clend):
2019 efiles.update(self[c].files())
2017 efiles.update(self[c].files())
2020 efiles = len(efiles)
2018 efiles = len(efiles)
2021 self.ui.progress(_('changesets'), None)
2019 self.ui.progress(_('changesets'), None)
2022
2020
2023 # pull off the manifest group
2021 # pull off the manifest group
2024 self.ui.status(_("adding manifests\n"))
2022 self.ui.status(_("adding manifests\n"))
2025 pr.step = _('manifests')
2023 pr.step = _('manifests')
2026 pr.count = 1
2024 pr.count = 1
2027 pr.total = changesets # manifests <= changesets
2025 pr.total = changesets # manifests <= changesets
2028 # no need to check for empty manifest group here:
2026 # no need to check for empty manifest group here:
2029 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2027 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2030 # no new manifest will be created and the manifest group will
2028 # no new manifest will be created and the manifest group will
2031 # be empty during the pull
2029 # be empty during the pull
2032 source.manifestheader()
2030 source.manifestheader()
2033 self.manifest.addgroup(source, revmap, trp)
2031 self.manifest.addgroup(source, revmap, trp)
2034 self.ui.progress(_('manifests'), None)
2032 self.ui.progress(_('manifests'), None)
2035
2033
2036 needfiles = {}
2034 needfiles = {}
2037 if self.ui.configbool('server', 'validate', default=False):
2035 if self.ui.configbool('server', 'validate', default=False):
2038 # validate incoming csets have their manifests
2036 # validate incoming csets have their manifests
2039 for cset in xrange(clstart, clend):
2037 for cset in xrange(clstart, clend):
2040 mfest = self.changelog.read(self.changelog.node(cset))[0]
2038 mfest = self.changelog.read(self.changelog.node(cset))[0]
2041 mfest = self.manifest.readdelta(mfest)
2039 mfest = self.manifest.readdelta(mfest)
2042 # store file nodes we must see
2040 # store file nodes we must see
2043 for f, n in mfest.iteritems():
2041 for f, n in mfest.iteritems():
2044 needfiles.setdefault(f, set()).add(n)
2042 needfiles.setdefault(f, set()).add(n)
2045
2043
2046 # process the files
2044 # process the files
2047 self.ui.status(_("adding file changes\n"))
2045 self.ui.status(_("adding file changes\n"))
2048 pr.step = _('files')
2046 pr.step = _('files')
2049 pr.count = 1
2047 pr.count = 1
2050 pr.total = efiles
2048 pr.total = efiles
2051 source.callback = None
2049 source.callback = None
2052
2050
2053 while True:
2051 while True:
2054 chunkdata = source.filelogheader()
2052 chunkdata = source.filelogheader()
2055 if not chunkdata:
2053 if not chunkdata:
2056 break
2054 break
2057 f = chunkdata["filename"]
2055 f = chunkdata["filename"]
2058 self.ui.debug("adding %s revisions\n" % f)
2056 self.ui.debug("adding %s revisions\n" % f)
2059 pr()
2057 pr()
2060 fl = self.file(f)
2058 fl = self.file(f)
2061 o = len(fl)
2059 o = len(fl)
2062 if not fl.addgroup(source, revmap, trp):
2060 if not fl.addgroup(source, revmap, trp):
2063 raise util.Abort(_("received file revlog group is empty"))
2061 raise util.Abort(_("received file revlog group is empty"))
2064 revisions += len(fl) - o
2062 revisions += len(fl) - o
2065 files += 1
2063 files += 1
2066 if f in needfiles:
2064 if f in needfiles:
2067 needs = needfiles[f]
2065 needs = needfiles[f]
2068 for new in xrange(o, len(fl)):
2066 for new in xrange(o, len(fl)):
2069 n = fl.node(new)
2067 n = fl.node(new)
2070 if n in needs:
2068 if n in needs:
2071 needs.remove(n)
2069 needs.remove(n)
2072 if not needs:
2070 if not needs:
2073 del needfiles[f]
2071 del needfiles[f]
2074 self.ui.progress(_('files'), None)
2072 self.ui.progress(_('files'), None)
2075
2073
2076 for f, needs in needfiles.iteritems():
2074 for f, needs in needfiles.iteritems():
2077 fl = self.file(f)
2075 fl = self.file(f)
2078 for n in needs:
2076 for n in needs:
2079 try:
2077 try:
2080 fl.rev(n)
2078 fl.rev(n)
2081 except error.LookupError:
2079 except error.LookupError:
2082 raise util.Abort(
2080 raise util.Abort(
2083 _('missing file data for %s:%s - run hg verify') %
2081 _('missing file data for %s:%s - run hg verify') %
2084 (f, hex(n)))
2082 (f, hex(n)))
2085
2083
2086 dh = 0
2084 dh = 0
2087 if oldheads:
2085 if oldheads:
2088 heads = cl.heads()
2086 heads = cl.heads()
2089 dh = len(heads) - len(oldheads)
2087 dh = len(heads) - len(oldheads)
2090 for h in heads:
2088 for h in heads:
2091 if h not in oldheads and 'close' in self[h].extra():
2089 if h not in oldheads and 'close' in self[h].extra():
2092 dh -= 1
2090 dh -= 1
2093 htext = ""
2091 htext = ""
2094 if dh:
2092 if dh:
2095 htext = _(" (%+d heads)") % dh
2093 htext = _(" (%+d heads)") % dh
2096
2094
2097 self.ui.status(_("added %d changesets"
2095 self.ui.status(_("added %d changesets"
2098 " with %d changes to %d files%s\n")
2096 " with %d changes to %d files%s\n")
2099 % (changesets, revisions, files, htext))
2097 % (changesets, revisions, files, htext))
2100
2098
2101 if changesets > 0:
2099 if changesets > 0:
2102 p = lambda: cl.writepending() and self.root or ""
2100 p = lambda: cl.writepending() and self.root or ""
2103 self.hook('pretxnchangegroup', throw=True,
2101 self.hook('pretxnchangegroup', throw=True,
2104 node=hex(cl.node(clstart)), source=srctype,
2102 node=hex(cl.node(clstart)), source=srctype,
2105 url=url, pending=p)
2103 url=url, pending=p)
2106
2104
2107 added = [cl.node(r) for r in xrange(clstart, clend)]
2105 added = [cl.node(r) for r in xrange(clstart, clend)]
2108 publishing = self.ui.configbool('phases', 'publish', True)
2106 publishing = self.ui.configbool('phases', 'publish', True)
2109 if srctype == 'push':
2107 if srctype == 'push':
2110 # Old server can not push the boundary themself.
2108 # Old server can not push the boundary themself.
2111 # New server won't push the boundary if changeset already
2109 # New server won't push the boundary if changeset already
2112 # existed locally as secrete
2110 # existed locally as secrete
2113 #
2111 #
2114 # We should not use added here but the list of all change in
2112 # We should not use added here but the list of all change in
2115 # the bundle
2113 # the bundle
2116 if publishing:
2114 if publishing:
2117 phases.advanceboundary(self, phases.public, srccontent)
2115 phases.advanceboundary(self, phases.public, srccontent)
2118 else:
2116 else:
2119 phases.advanceboundary(self, phases.draft, srccontent)
2117 phases.advanceboundary(self, phases.draft, srccontent)
2120 phases.retractboundary(self, phases.draft, added)
2118 phases.retractboundary(self, phases.draft, added)
2121 elif srctype != 'strip':
2119 elif srctype != 'strip':
2122 # publishing only alter behavior during push
2120 # publishing only alter behavior during push
2123 #
2121 #
2124 # strip should not touch boundary at all
2122 # strip should not touch boundary at all
2125 phases.retractboundary(self, phases.draft, added)
2123 phases.retractboundary(self, phases.draft, added)
2126
2124
2127 # make changelog see real files again
2125 # make changelog see real files again
2128 cl.finalize(trp)
2126 cl.finalize(trp)
2129
2127
2130 tr.close()
2128 tr.close()
2131
2129
2132 if changesets > 0:
2130 if changesets > 0:
2133 def runhooks():
2131 def runhooks():
2134 # forcefully update the on-disk branch cache
2132 # forcefully update the on-disk branch cache
2135 self.ui.debug("updating the branch cache\n")
2133 self.ui.debug("updating the branch cache\n")
2136 self.updatebranchcache()
2134 self.updatebranchcache()
2137 self.hook("changegroup", node=hex(cl.node(clstart)),
2135 self.hook("changegroup", node=hex(cl.node(clstart)),
2138 source=srctype, url=url)
2136 source=srctype, url=url)
2139
2137
2140 for n in added:
2138 for n in added:
2141 self.hook("incoming", node=hex(n), source=srctype,
2139 self.hook("incoming", node=hex(n), source=srctype,
2142 url=url)
2140 url=url)
2143 self._afterlock(runhooks)
2141 self._afterlock(runhooks)
2144
2142
2145 finally:
2143 finally:
2146 tr.release()
2144 tr.release()
2147 # never return 0 here:
2145 # never return 0 here:
2148 if dh < 0:
2146 if dh < 0:
2149 return dh - 1
2147 return dh - 1
2150 else:
2148 else:
2151 return dh + 1
2149 return dh + 1
2152
2150
2153 def stream_in(self, remote, requirements):
2151 def stream_in(self, remote, requirements):
2154 lock = self.lock()
2152 lock = self.lock()
2155 try:
2153 try:
2156 fp = remote.stream_out()
2154 fp = remote.stream_out()
2157 l = fp.readline()
2155 l = fp.readline()
2158 try:
2156 try:
2159 resp = int(l)
2157 resp = int(l)
2160 except ValueError:
2158 except ValueError:
2161 raise error.ResponseError(
2159 raise error.ResponseError(
2162 _('Unexpected response from remote server:'), l)
2160 _('Unexpected response from remote server:'), l)
2163 if resp == 1:
2161 if resp == 1:
2164 raise util.Abort(_('operation forbidden by server'))
2162 raise util.Abort(_('operation forbidden by server'))
2165 elif resp == 2:
2163 elif resp == 2:
2166 raise util.Abort(_('locking the remote repository failed'))
2164 raise util.Abort(_('locking the remote repository failed'))
2167 elif resp != 0:
2165 elif resp != 0:
2168 raise util.Abort(_('the server sent an unknown error code'))
2166 raise util.Abort(_('the server sent an unknown error code'))
2169 self.ui.status(_('streaming all changes\n'))
2167 self.ui.status(_('streaming all changes\n'))
2170 l = fp.readline()
2168 l = fp.readline()
2171 try:
2169 try:
2172 total_files, total_bytes = map(int, l.split(' ', 1))
2170 total_files, total_bytes = map(int, l.split(' ', 1))
2173 except (ValueError, TypeError):
2171 except (ValueError, TypeError):
2174 raise error.ResponseError(
2172 raise error.ResponseError(
2175 _('Unexpected response from remote server:'), l)
2173 _('Unexpected response from remote server:'), l)
2176 self.ui.status(_('%d files to transfer, %s of data\n') %
2174 self.ui.status(_('%d files to transfer, %s of data\n') %
2177 (total_files, util.bytecount(total_bytes)))
2175 (total_files, util.bytecount(total_bytes)))
2178 start = time.time()
2176 start = time.time()
2179 for i in xrange(total_files):
2177 for i in xrange(total_files):
2180 # XXX doesn't support '\n' or '\r' in filenames
2178 # XXX doesn't support '\n' or '\r' in filenames
2181 l = fp.readline()
2179 l = fp.readline()
2182 try:
2180 try:
2183 name, size = l.split('\0', 1)
2181 name, size = l.split('\0', 1)
2184 size = int(size)
2182 size = int(size)
2185 except (ValueError, TypeError):
2183 except (ValueError, TypeError):
2186 raise error.ResponseError(
2184 raise error.ResponseError(
2187 _('Unexpected response from remote server:'), l)
2185 _('Unexpected response from remote server:'), l)
2188 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2186 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2189 # for backwards compat, name was partially encoded
2187 # for backwards compat, name was partially encoded
2190 ofp = self.sopener(store.decodedir(name), 'w')
2188 ofp = self.sopener(store.decodedir(name), 'w')
2191 for chunk in util.filechunkiter(fp, limit=size):
2189 for chunk in util.filechunkiter(fp, limit=size):
2192 ofp.write(chunk)
2190 ofp.write(chunk)
2193 ofp.close()
2191 ofp.close()
2194 elapsed = time.time() - start
2192 elapsed = time.time() - start
2195 if elapsed <= 0:
2193 if elapsed <= 0:
2196 elapsed = 0.001
2194 elapsed = 0.001
2197 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2195 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2198 (util.bytecount(total_bytes), elapsed,
2196 (util.bytecount(total_bytes), elapsed,
2199 util.bytecount(total_bytes / elapsed)))
2197 util.bytecount(total_bytes / elapsed)))
2200
2198
2201 # new requirements = old non-format requirements + new format-related
2199 # new requirements = old non-format requirements + new format-related
2202 # requirements from the streamed-in repository
2200 # requirements from the streamed-in repository
2203 requirements.update(set(self.requirements) - self.supportedformats)
2201 requirements.update(set(self.requirements) - self.supportedformats)
2204 self._applyrequirements(requirements)
2202 self._applyrequirements(requirements)
2205 self._writerequirements()
2203 self._writerequirements()
2206
2204
2207 self.invalidate()
2205 self.invalidate()
2208 return len(self.heads()) + 1
2206 return len(self.heads()) + 1
2209 finally:
2207 finally:
2210 lock.release()
2208 lock.release()
2211
2209
2212 def clone(self, remote, heads=[], stream=False):
2210 def clone(self, remote, heads=[], stream=False):
2213 '''clone remote repository.
2211 '''clone remote repository.
2214
2212
2215 keyword arguments:
2213 keyword arguments:
2216 heads: list of revs to clone (forces use of pull)
2214 heads: list of revs to clone (forces use of pull)
2217 stream: use streaming clone if possible'''
2215 stream: use streaming clone if possible'''
2218
2216
2219 # now, all clients that can request uncompressed clones can
2217 # now, all clients that can request uncompressed clones can
2220 # read repo formats supported by all servers that can serve
2218 # read repo formats supported by all servers that can serve
2221 # them.
2219 # them.
2222
2220
2223 # if revlog format changes, client will have to check version
2221 # if revlog format changes, client will have to check version
2224 # and format flags on "stream" capability, and use
2222 # and format flags on "stream" capability, and use
2225 # uncompressed only if compatible.
2223 # uncompressed only if compatible.
2226
2224
2227 if stream and not heads:
2225 if stream and not heads:
2228 # 'stream' means remote revlog format is revlogv1 only
2226 # 'stream' means remote revlog format is revlogv1 only
2229 if remote.capable('stream'):
2227 if remote.capable('stream'):
2230 return self.stream_in(remote, set(('revlogv1',)))
2228 return self.stream_in(remote, set(('revlogv1',)))
2231 # otherwise, 'streamreqs' contains the remote revlog format
2229 # otherwise, 'streamreqs' contains the remote revlog format
2232 streamreqs = remote.capable('streamreqs')
2230 streamreqs = remote.capable('streamreqs')
2233 if streamreqs:
2231 if streamreqs:
2234 streamreqs = set(streamreqs.split(','))
2232 streamreqs = set(streamreqs.split(','))
2235 # if we support it, stream in and adjust our requirements
2233 # if we support it, stream in and adjust our requirements
2236 if not streamreqs - self.supportedformats:
2234 if not streamreqs - self.supportedformats:
2237 return self.stream_in(remote, streamreqs)
2235 return self.stream_in(remote, streamreqs)
2238 return self.pull(remote, heads)
2236 return self.pull(remote, heads)
2239
2237
2240 def pushkey(self, namespace, key, old, new):
2238 def pushkey(self, namespace, key, old, new):
2241 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2239 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2242 old=old, new=new)
2240 old=old, new=new)
2243 ret = pushkey.push(self, namespace, key, old, new)
2241 ret = pushkey.push(self, namespace, key, old, new)
2244 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2242 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2245 ret=ret)
2243 ret=ret)
2246 return ret
2244 return ret
2247
2245
2248 def listkeys(self, namespace):
2246 def listkeys(self, namespace):
2249 self.hook('prelistkeys', throw=True, namespace=namespace)
2247 self.hook('prelistkeys', throw=True, namespace=namespace)
2250 values = pushkey.list(self, namespace)
2248 values = pushkey.list(self, namespace)
2251 self.hook('listkeys', namespace=namespace, values=values)
2249 self.hook('listkeys', namespace=namespace, values=values)
2252 return values
2250 return values
2253
2251
2254 def debugwireargs(self, one, two, three=None, four=None, five=None):
2252 def debugwireargs(self, one, two, three=None, four=None, five=None):
2255 '''used to test argument passing over the wire'''
2253 '''used to test argument passing over the wire'''
2256 return "%s %s %s %s %s" % (one, two, three, four, five)
2254 return "%s %s %s %s %s" % (one, two, three, four, five)
2257
2255
2258 def savecommitmessage(self, text):
2256 def savecommitmessage(self, text):
2259 fp = self.opener('last-message.txt', 'wb')
2257 fp = self.opener('last-message.txt', 'wb')
2260 try:
2258 try:
2261 fp.write(text)
2259 fp.write(text)
2262 finally:
2260 finally:
2263 fp.close()
2261 fp.close()
2264 return self.pathto(fp.name[len(self.root)+1:])
2262 return self.pathto(fp.name[len(self.root)+1:])
2265
2263
2266 # used to avoid circular references so destructors work
2264 # used to avoid circular references so destructors work
2267 def aftertrans(files):
2265 def aftertrans(files):
2268 renamefiles = [tuple(t) for t in files]
2266 renamefiles = [tuple(t) for t in files]
2269 def a():
2267 def a():
2270 for src, dest in renamefiles:
2268 for src, dest in renamefiles:
2271 util.rename(src, dest)
2269 util.rename(src, dest)
2272 return a
2270 return a
2273
2271
2274 def undoname(fn):
2272 def undoname(fn):
2275 base, name = os.path.split(fn)
2273 base, name = os.path.split(fn)
2276 assert name.startswith('journal')
2274 assert name.startswith('journal')
2277 return os.path.join(base, name.replace('journal', 'undo', 1))
2275 return os.path.join(base, name.replace('journal', 'undo', 1))
2278
2276
2279 def instance(ui, path, create):
2277 def instance(ui, path, create):
2280 return localrepository(ui, util.urllocalpath(path), create)
2278 return localrepository(ui, util.urllocalpath(path), create)
2281
2279
2282 def islocal(path):
2280 def islocal(path):
2283 return True
2281 return True
@@ -1,121 +1,121 b''
1 $ "$TESTDIR/hghave" serve || exit 80
1 $ "$TESTDIR/hghave" serve || exit 80
2
2
3 $ hg init test
3 $ hg init test
4 $ cd test
4 $ cd test
5 $ echo a > a
5 $ echo a > a
6 $ hg ci -Ama
6 $ hg ci -Ama
7 adding a
7 adding a
8 $ cd ..
8 $ cd ..
9 $ hg clone test test2
9 $ hg clone test test2
10 updating to branch default
10 updating to branch default
11 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
11 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
12 $ cd test2
12 $ cd test2
13 $ echo a >> a
13 $ echo a >> a
14 $ hg ci -mb
14 $ hg ci -mb
15 $ req() {
15 $ req() {
16 > hg serve -p $HGPORT -d --pid-file=hg.pid -E errors.log
16 > hg serve -p $HGPORT -d --pid-file=hg.pid -E errors.log
17 > cat hg.pid >> $DAEMON_PIDS
17 > cat hg.pid >> $DAEMON_PIDS
18 > hg --cwd ../test2 push http://localhost:$HGPORT/
18 > hg --cwd ../test2 push http://localhost:$HGPORT/
19 > "$TESTDIR/killdaemons.py"
19 > "$TESTDIR/killdaemons.py"
20 > echo % serve errors
20 > echo % serve errors
21 > cat errors.log
21 > cat errors.log
22 > }
22 > }
23 $ cd ../test
23 $ cd ../test
24
24
25 expect ssl error
25 expect ssl error
26
26
27 $ req
27 $ req
28 pushing to http://localhost:$HGPORT/
28 pushing to http://localhost:$HGPORT/
29 searching for changes
29 searching for changes
30 remote: ssl required
30 remote: ssl required
31 remote: ssl required
31 remote: ssl required
32 updating ba677d0156c1 to public failed!
32 updating cb9a9f314b8b to public failed!
33 % serve errors
33 % serve errors
34
34
35 expect authorization error
35 expect authorization error
36
36
37 $ echo '[web]' > .hg/hgrc
37 $ echo '[web]' > .hg/hgrc
38 $ echo 'push_ssl = false' >> .hg/hgrc
38 $ echo 'push_ssl = false' >> .hg/hgrc
39 $ req
39 $ req
40 pushing to http://localhost:$HGPORT/
40 pushing to http://localhost:$HGPORT/
41 searching for changes
41 searching for changes
42 abort: authorization failed
42 abort: authorization failed
43 % serve errors
43 % serve errors
44
44
45 expect authorization error: must have authorized user
45 expect authorization error: must have authorized user
46
46
47 $ echo 'allow_push = unperson' >> .hg/hgrc
47 $ echo 'allow_push = unperson' >> .hg/hgrc
48 $ req
48 $ req
49 pushing to http://localhost:$HGPORT/
49 pushing to http://localhost:$HGPORT/
50 searching for changes
50 searching for changes
51 abort: authorization failed
51 abort: authorization failed
52 % serve errors
52 % serve errors
53
53
54 expect success
54 expect success
55
55
56 $ echo 'allow_push = *' >> .hg/hgrc
56 $ echo 'allow_push = *' >> .hg/hgrc
57 $ echo '[hooks]' >> .hg/hgrc
57 $ echo '[hooks]' >> .hg/hgrc
58 $ echo 'changegroup = python "$TESTDIR"/printenv.py changegroup 0' >> .hg/hgrc
58 $ echo 'changegroup = python "$TESTDIR"/printenv.py changegroup 0' >> .hg/hgrc
59 $ req
59 $ req
60 pushing to http://localhost:$HGPORT/
60 pushing to http://localhost:$HGPORT/
61 searching for changes
61 searching for changes
62 remote: adding changesets
62 remote: adding changesets
63 remote: adding manifests
63 remote: adding manifests
64 remote: adding file changes
64 remote: adding file changes
65 remote: added 1 changesets with 1 changes to 1 files
65 remote: added 1 changesets with 1 changes to 1 files
66 remote: changegroup hook: HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_URL=remote:http:*: (glob)
66 remote: changegroup hook: HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_URL=remote:http:*: (glob)
67 % serve errors
67 % serve errors
68 $ hg rollback
68 $ hg rollback
69 repository tip rolled back to revision 0 (undo serve)
69 repository tip rolled back to revision 0 (undo serve)
70
70
71 expect success, server lacks the httpheader capability
71 expect success, server lacks the httpheader capability
72
72
73 $ CAP=httpheader
73 $ CAP=httpheader
74 $ . "$TESTDIR/notcapable"
74 $ . "$TESTDIR/notcapable"
75 $ req
75 $ req
76 pushing to http://localhost:$HGPORT/
76 pushing to http://localhost:$HGPORT/
77 searching for changes
77 searching for changes
78 remote: adding changesets
78 remote: adding changesets
79 remote: adding manifests
79 remote: adding manifests
80 remote: adding file changes
80 remote: adding file changes
81 remote: added 1 changesets with 1 changes to 1 files
81 remote: added 1 changesets with 1 changes to 1 files
82 remote: changegroup hook: HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_URL=remote:http:*: (glob)
82 remote: changegroup hook: HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_URL=remote:http:*: (glob)
83 % serve errors
83 % serve errors
84 $ hg rollback
84 $ hg rollback
85 repository tip rolled back to revision 0 (undo serve)
85 repository tip rolled back to revision 0 (undo serve)
86
86
87 expect success, server lacks the unbundlehash capability
87 expect success, server lacks the unbundlehash capability
88
88
89 $ CAP=unbundlehash
89 $ CAP=unbundlehash
90 $ . "$TESTDIR/notcapable"
90 $ . "$TESTDIR/notcapable"
91 $ req
91 $ req
92 pushing to http://localhost:$HGPORT/
92 pushing to http://localhost:$HGPORT/
93 searching for changes
93 searching for changes
94 remote: adding changesets
94 remote: adding changesets
95 remote: adding manifests
95 remote: adding manifests
96 remote: adding file changes
96 remote: adding file changes
97 remote: added 1 changesets with 1 changes to 1 files
97 remote: added 1 changesets with 1 changes to 1 files
98 remote: changegroup hook: HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_URL=remote:http:*: (glob)
98 remote: changegroup hook: HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_URL=remote:http:*: (glob)
99 % serve errors
99 % serve errors
100 $ hg rollback
100 $ hg rollback
101 repository tip rolled back to revision 0 (undo serve)
101 repository tip rolled back to revision 0 (undo serve)
102
102
103 expect authorization error: all users denied
103 expect authorization error: all users denied
104
104
105 $ echo '[web]' > .hg/hgrc
105 $ echo '[web]' > .hg/hgrc
106 $ echo 'push_ssl = false' >> .hg/hgrc
106 $ echo 'push_ssl = false' >> .hg/hgrc
107 $ echo 'deny_push = *' >> .hg/hgrc
107 $ echo 'deny_push = *' >> .hg/hgrc
108 $ req
108 $ req
109 pushing to http://localhost:$HGPORT/
109 pushing to http://localhost:$HGPORT/
110 searching for changes
110 searching for changes
111 abort: authorization failed
111 abort: authorization failed
112 % serve errors
112 % serve errors
113
113
114 expect authorization error: some users denied, users must be authenticated
114 expect authorization error: some users denied, users must be authenticated
115
115
116 $ echo 'deny_push = unperson' >> .hg/hgrc
116 $ echo 'deny_push = unperson' >> .hg/hgrc
117 $ req
117 $ req
118 pushing to http://localhost:$HGPORT/
118 pushing to http://localhost:$HGPORT/
119 searching for changes
119 searching for changes
120 abort: authorization failed
120 abort: authorization failed
121 % serve errors
121 % serve errors
General Comments 0
You need to be logged in to leave comments. Login now