##// END OF EJS Templates
localrepo: introduce bookmarkheads...
David Soria Parra -
r16707:f8dee1a8 default
parent child Browse files
Show More
@@ -1,2347 +1,2355 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import scmutil, util, extensions, hook, error, revset
13 import scmutil, util, extensions, hook, error, revset
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20 filecache = scmutil.filecache
20 filecache = scmutil.filecache
21
21
22 class storecache(filecache):
22 class storecache(filecache):
23 """filecache for files in the store"""
23 """filecache for files in the store"""
24 def join(self, obj, fname):
24 def join(self, obj, fname):
25 return obj.sjoin(fname)
25 return obj.sjoin(fname)
26
26
27 class localrepository(repo.repository):
27 class localrepository(repo.repository):
28 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
28 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
29 'known', 'getbundle'))
29 'known', 'getbundle'))
30 supportedformats = set(('revlogv1', 'generaldelta'))
30 supportedformats = set(('revlogv1', 'generaldelta'))
31 supported = supportedformats | set(('store', 'fncache', 'shared',
31 supported = supportedformats | set(('store', 'fncache', 'shared',
32 'dotencode'))
32 'dotencode'))
33
33
34 def __init__(self, baseui, path=None, create=False):
34 def __init__(self, baseui, path=None, create=False):
35 repo.repository.__init__(self)
35 repo.repository.__init__(self)
36 self.root = os.path.realpath(util.expandpath(path))
36 self.root = os.path.realpath(util.expandpath(path))
37 self.path = os.path.join(self.root, ".hg")
37 self.path = os.path.join(self.root, ".hg")
38 self.origroot = path
38 self.origroot = path
39 self.auditor = scmutil.pathauditor(self.root, self._checknested)
39 self.auditor = scmutil.pathauditor(self.root, self._checknested)
40 self.opener = scmutil.opener(self.path)
40 self.opener = scmutil.opener(self.path)
41 self.wopener = scmutil.opener(self.root)
41 self.wopener = scmutil.opener(self.root)
42 self.baseui = baseui
42 self.baseui = baseui
43 self.ui = baseui.copy()
43 self.ui = baseui.copy()
44 # A list of callback to shape the phase if no data were found.
44 # A list of callback to shape the phase if no data were found.
45 # Callback are in the form: func(repo, roots) --> processed root.
45 # Callback are in the form: func(repo, roots) --> processed root.
46 # This list it to be filled by extension during repo setup
46 # This list it to be filled by extension during repo setup
47 self._phasedefaults = []
47 self._phasedefaults = []
48
48
49 try:
49 try:
50 self.ui.readconfig(self.join("hgrc"), self.root)
50 self.ui.readconfig(self.join("hgrc"), self.root)
51 extensions.loadall(self.ui)
51 extensions.loadall(self.ui)
52 except IOError:
52 except IOError:
53 pass
53 pass
54
54
55 if not os.path.isdir(self.path):
55 if not os.path.isdir(self.path):
56 if create:
56 if create:
57 if not os.path.exists(path):
57 if not os.path.exists(path):
58 util.makedirs(path)
58 util.makedirs(path)
59 util.makedir(self.path, notindexed=True)
59 util.makedir(self.path, notindexed=True)
60 requirements = ["revlogv1"]
60 requirements = ["revlogv1"]
61 if self.ui.configbool('format', 'usestore', True):
61 if self.ui.configbool('format', 'usestore', True):
62 os.mkdir(os.path.join(self.path, "store"))
62 os.mkdir(os.path.join(self.path, "store"))
63 requirements.append("store")
63 requirements.append("store")
64 if self.ui.configbool('format', 'usefncache', True):
64 if self.ui.configbool('format', 'usefncache', True):
65 requirements.append("fncache")
65 requirements.append("fncache")
66 if self.ui.configbool('format', 'dotencode', True):
66 if self.ui.configbool('format', 'dotencode', True):
67 requirements.append('dotencode')
67 requirements.append('dotencode')
68 # create an invalid changelog
68 # create an invalid changelog
69 self.opener.append(
69 self.opener.append(
70 "00changelog.i",
70 "00changelog.i",
71 '\0\0\0\2' # represents revlogv2
71 '\0\0\0\2' # represents revlogv2
72 ' dummy changelog to prevent using the old repo layout'
72 ' dummy changelog to prevent using the old repo layout'
73 )
73 )
74 if self.ui.configbool('format', 'generaldelta', False):
74 if self.ui.configbool('format', 'generaldelta', False):
75 requirements.append("generaldelta")
75 requirements.append("generaldelta")
76 requirements = set(requirements)
76 requirements = set(requirements)
77 else:
77 else:
78 raise error.RepoError(_("repository %s not found") % path)
78 raise error.RepoError(_("repository %s not found") % path)
79 elif create:
79 elif create:
80 raise error.RepoError(_("repository %s already exists") % path)
80 raise error.RepoError(_("repository %s already exists") % path)
81 else:
81 else:
82 try:
82 try:
83 requirements = scmutil.readrequires(self.opener, self.supported)
83 requirements = scmutil.readrequires(self.opener, self.supported)
84 except IOError, inst:
84 except IOError, inst:
85 if inst.errno != errno.ENOENT:
85 if inst.errno != errno.ENOENT:
86 raise
86 raise
87 requirements = set()
87 requirements = set()
88
88
89 self.sharedpath = self.path
89 self.sharedpath = self.path
90 try:
90 try:
91 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
91 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
92 if not os.path.exists(s):
92 if not os.path.exists(s):
93 raise error.RepoError(
93 raise error.RepoError(
94 _('.hg/sharedpath points to nonexistent directory %s') % s)
94 _('.hg/sharedpath points to nonexistent directory %s') % s)
95 self.sharedpath = s
95 self.sharedpath = s
96 except IOError, inst:
96 except IOError, inst:
97 if inst.errno != errno.ENOENT:
97 if inst.errno != errno.ENOENT:
98 raise
98 raise
99
99
100 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
100 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
101 self.spath = self.store.path
101 self.spath = self.store.path
102 self.sopener = self.store.opener
102 self.sopener = self.store.opener
103 self.sjoin = self.store.join
103 self.sjoin = self.store.join
104 self.opener.createmode = self.store.createmode
104 self.opener.createmode = self.store.createmode
105 self._applyrequirements(requirements)
105 self._applyrequirements(requirements)
106 if create:
106 if create:
107 self._writerequirements()
107 self._writerequirements()
108
108
109
109
110 self._branchcache = None
110 self._branchcache = None
111 self._branchcachetip = None
111 self._branchcachetip = None
112 self.filterpats = {}
112 self.filterpats = {}
113 self._datafilters = {}
113 self._datafilters = {}
114 self._transref = self._lockref = self._wlockref = None
114 self._transref = self._lockref = self._wlockref = None
115
115
116 # A cache for various files under .hg/ that tracks file changes,
116 # A cache for various files under .hg/ that tracks file changes,
117 # (used by the filecache decorator)
117 # (used by the filecache decorator)
118 #
118 #
119 # Maps a property name to its util.filecacheentry
119 # Maps a property name to its util.filecacheentry
120 self._filecache = {}
120 self._filecache = {}
121
121
122 def _applyrequirements(self, requirements):
122 def _applyrequirements(self, requirements):
123 self.requirements = requirements
123 self.requirements = requirements
124 openerreqs = set(('revlogv1', 'generaldelta'))
124 openerreqs = set(('revlogv1', 'generaldelta'))
125 self.sopener.options = dict((r, 1) for r in requirements
125 self.sopener.options = dict((r, 1) for r in requirements
126 if r in openerreqs)
126 if r in openerreqs)
127
127
128 def _writerequirements(self):
128 def _writerequirements(self):
129 reqfile = self.opener("requires", "w")
129 reqfile = self.opener("requires", "w")
130 for r in self.requirements:
130 for r in self.requirements:
131 reqfile.write("%s\n" % r)
131 reqfile.write("%s\n" % r)
132 reqfile.close()
132 reqfile.close()
133
133
134 def _checknested(self, path):
134 def _checknested(self, path):
135 """Determine if path is a legal nested repository."""
135 """Determine if path is a legal nested repository."""
136 if not path.startswith(self.root):
136 if not path.startswith(self.root):
137 return False
137 return False
138 subpath = path[len(self.root) + 1:]
138 subpath = path[len(self.root) + 1:]
139 normsubpath = util.pconvert(subpath)
139 normsubpath = util.pconvert(subpath)
140
140
141 # XXX: Checking against the current working copy is wrong in
141 # XXX: Checking against the current working copy is wrong in
142 # the sense that it can reject things like
142 # the sense that it can reject things like
143 #
143 #
144 # $ hg cat -r 10 sub/x.txt
144 # $ hg cat -r 10 sub/x.txt
145 #
145 #
146 # if sub/ is no longer a subrepository in the working copy
146 # if sub/ is no longer a subrepository in the working copy
147 # parent revision.
147 # parent revision.
148 #
148 #
149 # However, it can of course also allow things that would have
149 # However, it can of course also allow things that would have
150 # been rejected before, such as the above cat command if sub/
150 # been rejected before, such as the above cat command if sub/
151 # is a subrepository now, but was a normal directory before.
151 # is a subrepository now, but was a normal directory before.
152 # The old path auditor would have rejected by mistake since it
152 # The old path auditor would have rejected by mistake since it
153 # panics when it sees sub/.hg/.
153 # panics when it sees sub/.hg/.
154 #
154 #
155 # All in all, checking against the working copy seems sensible
155 # All in all, checking against the working copy seems sensible
156 # since we want to prevent access to nested repositories on
156 # since we want to prevent access to nested repositories on
157 # the filesystem *now*.
157 # the filesystem *now*.
158 ctx = self[None]
158 ctx = self[None]
159 parts = util.splitpath(subpath)
159 parts = util.splitpath(subpath)
160 while parts:
160 while parts:
161 prefix = '/'.join(parts)
161 prefix = '/'.join(parts)
162 if prefix in ctx.substate:
162 if prefix in ctx.substate:
163 if prefix == normsubpath:
163 if prefix == normsubpath:
164 return True
164 return True
165 else:
165 else:
166 sub = ctx.sub(prefix)
166 sub = ctx.sub(prefix)
167 return sub.checknested(subpath[len(prefix) + 1:])
167 return sub.checknested(subpath[len(prefix) + 1:])
168 else:
168 else:
169 parts.pop()
169 parts.pop()
170 return False
170 return False
171
171
172 @filecache('bookmarks')
172 @filecache('bookmarks')
173 def _bookmarks(self):
173 def _bookmarks(self):
174 return bookmarks.read(self)
174 return bookmarks.read(self)
175
175
176 @filecache('bookmarks.current')
176 @filecache('bookmarks.current')
177 def _bookmarkcurrent(self):
177 def _bookmarkcurrent(self):
178 return bookmarks.readcurrent(self)
178 return bookmarks.readcurrent(self)
179
179
180 def _writebookmarks(self, marks):
180 def _writebookmarks(self, marks):
181 bookmarks.write(self)
181 bookmarks.write(self)
182
182
183 def bookmarkheads(self, bookmark):
184 name = bookmark.split('@', 1)[0]
185 heads = []
186 for mark, n in self._bookmarks.iteritems():
187 if mark.split('@', 1)[0] == name:
188 heads.append(n)
189 return heads
190
183 @storecache('phaseroots')
191 @storecache('phaseroots')
184 def _phasecache(self):
192 def _phasecache(self):
185 return phases.phasecache(self, self._phasedefaults)
193 return phases.phasecache(self, self._phasedefaults)
186
194
187 @storecache('00changelog.i')
195 @storecache('00changelog.i')
188 def changelog(self):
196 def changelog(self):
189 c = changelog.changelog(self.sopener)
197 c = changelog.changelog(self.sopener)
190 if 'HG_PENDING' in os.environ:
198 if 'HG_PENDING' in os.environ:
191 p = os.environ['HG_PENDING']
199 p = os.environ['HG_PENDING']
192 if p.startswith(self.root):
200 if p.startswith(self.root):
193 c.readpending('00changelog.i.a')
201 c.readpending('00changelog.i.a')
194 return c
202 return c
195
203
196 @storecache('00manifest.i')
204 @storecache('00manifest.i')
197 def manifest(self):
205 def manifest(self):
198 return manifest.manifest(self.sopener)
206 return manifest.manifest(self.sopener)
199
207
200 @filecache('dirstate')
208 @filecache('dirstate')
201 def dirstate(self):
209 def dirstate(self):
202 warned = [0]
210 warned = [0]
203 def validate(node):
211 def validate(node):
204 try:
212 try:
205 self.changelog.rev(node)
213 self.changelog.rev(node)
206 return node
214 return node
207 except error.LookupError:
215 except error.LookupError:
208 if not warned[0]:
216 if not warned[0]:
209 warned[0] = True
217 warned[0] = True
210 self.ui.warn(_("warning: ignoring unknown"
218 self.ui.warn(_("warning: ignoring unknown"
211 " working parent %s!\n") % short(node))
219 " working parent %s!\n") % short(node))
212 return nullid
220 return nullid
213
221
214 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
222 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
215
223
216 def __getitem__(self, changeid):
224 def __getitem__(self, changeid):
217 if changeid is None:
225 if changeid is None:
218 return context.workingctx(self)
226 return context.workingctx(self)
219 return context.changectx(self, changeid)
227 return context.changectx(self, changeid)
220
228
221 def __contains__(self, changeid):
229 def __contains__(self, changeid):
222 try:
230 try:
223 return bool(self.lookup(changeid))
231 return bool(self.lookup(changeid))
224 except error.RepoLookupError:
232 except error.RepoLookupError:
225 return False
233 return False
226
234
227 def __nonzero__(self):
235 def __nonzero__(self):
228 return True
236 return True
229
237
230 def __len__(self):
238 def __len__(self):
231 return len(self.changelog)
239 return len(self.changelog)
232
240
233 def __iter__(self):
241 def __iter__(self):
234 for i in xrange(len(self)):
242 for i in xrange(len(self)):
235 yield i
243 yield i
236
244
237 def revs(self, expr, *args):
245 def revs(self, expr, *args):
238 '''Return a list of revisions matching the given revset'''
246 '''Return a list of revisions matching the given revset'''
239 expr = revset.formatspec(expr, *args)
247 expr = revset.formatspec(expr, *args)
240 m = revset.match(None, expr)
248 m = revset.match(None, expr)
241 return [r for r in m(self, range(len(self)))]
249 return [r for r in m(self, range(len(self)))]
242
250
243 def set(self, expr, *args):
251 def set(self, expr, *args):
244 '''
252 '''
245 Yield a context for each matching revision, after doing arg
253 Yield a context for each matching revision, after doing arg
246 replacement via revset.formatspec
254 replacement via revset.formatspec
247 '''
255 '''
248 for r in self.revs(expr, *args):
256 for r in self.revs(expr, *args):
249 yield self[r]
257 yield self[r]
250
258
251 def url(self):
259 def url(self):
252 return 'file:' + self.root
260 return 'file:' + self.root
253
261
254 def hook(self, name, throw=False, **args):
262 def hook(self, name, throw=False, **args):
255 return hook.hook(self.ui, self, name, throw, **args)
263 return hook.hook(self.ui, self, name, throw, **args)
256
264
257 tag_disallowed = ':\r\n'
265 tag_disallowed = ':\r\n'
258
266
259 def _tag(self, names, node, message, local, user, date, extra={}):
267 def _tag(self, names, node, message, local, user, date, extra={}):
260 if isinstance(names, str):
268 if isinstance(names, str):
261 allchars = names
269 allchars = names
262 names = (names,)
270 names = (names,)
263 else:
271 else:
264 allchars = ''.join(names)
272 allchars = ''.join(names)
265 for c in self.tag_disallowed:
273 for c in self.tag_disallowed:
266 if c in allchars:
274 if c in allchars:
267 raise util.Abort(_('%r cannot be used in a tag name') % c)
275 raise util.Abort(_('%r cannot be used in a tag name') % c)
268
276
269 branches = self.branchmap()
277 branches = self.branchmap()
270 for name in names:
278 for name in names:
271 self.hook('pretag', throw=True, node=hex(node), tag=name,
279 self.hook('pretag', throw=True, node=hex(node), tag=name,
272 local=local)
280 local=local)
273 if name in branches:
281 if name in branches:
274 self.ui.warn(_("warning: tag %s conflicts with existing"
282 self.ui.warn(_("warning: tag %s conflicts with existing"
275 " branch name\n") % name)
283 " branch name\n") % name)
276
284
277 def writetags(fp, names, munge, prevtags):
285 def writetags(fp, names, munge, prevtags):
278 fp.seek(0, 2)
286 fp.seek(0, 2)
279 if prevtags and prevtags[-1] != '\n':
287 if prevtags and prevtags[-1] != '\n':
280 fp.write('\n')
288 fp.write('\n')
281 for name in names:
289 for name in names:
282 m = munge and munge(name) or name
290 m = munge and munge(name) or name
283 if (self._tagscache.tagtypes and
291 if (self._tagscache.tagtypes and
284 name in self._tagscache.tagtypes):
292 name in self._tagscache.tagtypes):
285 old = self.tags().get(name, nullid)
293 old = self.tags().get(name, nullid)
286 fp.write('%s %s\n' % (hex(old), m))
294 fp.write('%s %s\n' % (hex(old), m))
287 fp.write('%s %s\n' % (hex(node), m))
295 fp.write('%s %s\n' % (hex(node), m))
288 fp.close()
296 fp.close()
289
297
290 prevtags = ''
298 prevtags = ''
291 if local:
299 if local:
292 try:
300 try:
293 fp = self.opener('localtags', 'r+')
301 fp = self.opener('localtags', 'r+')
294 except IOError:
302 except IOError:
295 fp = self.opener('localtags', 'a')
303 fp = self.opener('localtags', 'a')
296 else:
304 else:
297 prevtags = fp.read()
305 prevtags = fp.read()
298
306
299 # local tags are stored in the current charset
307 # local tags are stored in the current charset
300 writetags(fp, names, None, prevtags)
308 writetags(fp, names, None, prevtags)
301 for name in names:
309 for name in names:
302 self.hook('tag', node=hex(node), tag=name, local=local)
310 self.hook('tag', node=hex(node), tag=name, local=local)
303 return
311 return
304
312
305 try:
313 try:
306 fp = self.wfile('.hgtags', 'rb+')
314 fp = self.wfile('.hgtags', 'rb+')
307 except IOError, e:
315 except IOError, e:
308 if e.errno != errno.ENOENT:
316 if e.errno != errno.ENOENT:
309 raise
317 raise
310 fp = self.wfile('.hgtags', 'ab')
318 fp = self.wfile('.hgtags', 'ab')
311 else:
319 else:
312 prevtags = fp.read()
320 prevtags = fp.read()
313
321
314 # committed tags are stored in UTF-8
322 # committed tags are stored in UTF-8
315 writetags(fp, names, encoding.fromlocal, prevtags)
323 writetags(fp, names, encoding.fromlocal, prevtags)
316
324
317 fp.close()
325 fp.close()
318
326
319 self.invalidatecaches()
327 self.invalidatecaches()
320
328
321 if '.hgtags' not in self.dirstate:
329 if '.hgtags' not in self.dirstate:
322 self[None].add(['.hgtags'])
330 self[None].add(['.hgtags'])
323
331
324 m = matchmod.exact(self.root, '', ['.hgtags'])
332 m = matchmod.exact(self.root, '', ['.hgtags'])
325 tagnode = self.commit(message, user, date, extra=extra, match=m)
333 tagnode = self.commit(message, user, date, extra=extra, match=m)
326
334
327 for name in names:
335 for name in names:
328 self.hook('tag', node=hex(node), tag=name, local=local)
336 self.hook('tag', node=hex(node), tag=name, local=local)
329
337
330 return tagnode
338 return tagnode
331
339
332 def tag(self, names, node, message, local, user, date):
340 def tag(self, names, node, message, local, user, date):
333 '''tag a revision with one or more symbolic names.
341 '''tag a revision with one or more symbolic names.
334
342
335 names is a list of strings or, when adding a single tag, names may be a
343 names is a list of strings or, when adding a single tag, names may be a
336 string.
344 string.
337
345
338 if local is True, the tags are stored in a per-repository file.
346 if local is True, the tags are stored in a per-repository file.
339 otherwise, they are stored in the .hgtags file, and a new
347 otherwise, they are stored in the .hgtags file, and a new
340 changeset is committed with the change.
348 changeset is committed with the change.
341
349
342 keyword arguments:
350 keyword arguments:
343
351
344 local: whether to store tags in non-version-controlled file
352 local: whether to store tags in non-version-controlled file
345 (default False)
353 (default False)
346
354
347 message: commit message to use if committing
355 message: commit message to use if committing
348
356
349 user: name of user to use if committing
357 user: name of user to use if committing
350
358
351 date: date tuple to use if committing'''
359 date: date tuple to use if committing'''
352
360
353 if not local:
361 if not local:
354 for x in self.status()[:5]:
362 for x in self.status()[:5]:
355 if '.hgtags' in x:
363 if '.hgtags' in x:
356 raise util.Abort(_('working copy of .hgtags is changed '
364 raise util.Abort(_('working copy of .hgtags is changed '
357 '(please commit .hgtags manually)'))
365 '(please commit .hgtags manually)'))
358
366
359 self.tags() # instantiate the cache
367 self.tags() # instantiate the cache
360 self._tag(names, node, message, local, user, date)
368 self._tag(names, node, message, local, user, date)
361
369
362 @propertycache
370 @propertycache
363 def _tagscache(self):
371 def _tagscache(self):
364 '''Returns a tagscache object that contains various tags related
372 '''Returns a tagscache object that contains various tags related
365 caches.'''
373 caches.'''
366
374
367 # This simplifies its cache management by having one decorated
375 # This simplifies its cache management by having one decorated
368 # function (this one) and the rest simply fetch things from it.
376 # function (this one) and the rest simply fetch things from it.
369 class tagscache(object):
377 class tagscache(object):
370 def __init__(self):
378 def __init__(self):
371 # These two define the set of tags for this repository. tags
379 # These two define the set of tags for this repository. tags
372 # maps tag name to node; tagtypes maps tag name to 'global' or
380 # maps tag name to node; tagtypes maps tag name to 'global' or
373 # 'local'. (Global tags are defined by .hgtags across all
381 # 'local'. (Global tags are defined by .hgtags across all
374 # heads, and local tags are defined in .hg/localtags.)
382 # heads, and local tags are defined in .hg/localtags.)
375 # They constitute the in-memory cache of tags.
383 # They constitute the in-memory cache of tags.
376 self.tags = self.tagtypes = None
384 self.tags = self.tagtypes = None
377
385
378 self.nodetagscache = self.tagslist = None
386 self.nodetagscache = self.tagslist = None
379
387
380 cache = tagscache()
388 cache = tagscache()
381 cache.tags, cache.tagtypes = self._findtags()
389 cache.tags, cache.tagtypes = self._findtags()
382
390
383 return cache
391 return cache
384
392
385 def tags(self):
393 def tags(self):
386 '''return a mapping of tag to node'''
394 '''return a mapping of tag to node'''
387 t = {}
395 t = {}
388 for k, v in self._tagscache.tags.iteritems():
396 for k, v in self._tagscache.tags.iteritems():
389 try:
397 try:
390 # ignore tags to unknown nodes
398 # ignore tags to unknown nodes
391 self.changelog.rev(v)
399 self.changelog.rev(v)
392 t[k] = v
400 t[k] = v
393 except (error.LookupError, ValueError):
401 except (error.LookupError, ValueError):
394 pass
402 pass
395 return t
403 return t
396
404
397 def _findtags(self):
405 def _findtags(self):
398 '''Do the hard work of finding tags. Return a pair of dicts
406 '''Do the hard work of finding tags. Return a pair of dicts
399 (tags, tagtypes) where tags maps tag name to node, and tagtypes
407 (tags, tagtypes) where tags maps tag name to node, and tagtypes
400 maps tag name to a string like \'global\' or \'local\'.
408 maps tag name to a string like \'global\' or \'local\'.
401 Subclasses or extensions are free to add their own tags, but
409 Subclasses or extensions are free to add their own tags, but
402 should be aware that the returned dicts will be retained for the
410 should be aware that the returned dicts will be retained for the
403 duration of the localrepo object.'''
411 duration of the localrepo object.'''
404
412
405 # XXX what tagtype should subclasses/extensions use? Currently
413 # XXX what tagtype should subclasses/extensions use? Currently
406 # mq and bookmarks add tags, but do not set the tagtype at all.
414 # mq and bookmarks add tags, but do not set the tagtype at all.
407 # Should each extension invent its own tag type? Should there
415 # Should each extension invent its own tag type? Should there
408 # be one tagtype for all such "virtual" tags? Or is the status
416 # be one tagtype for all such "virtual" tags? Or is the status
409 # quo fine?
417 # quo fine?
410
418
411 alltags = {} # map tag name to (node, hist)
419 alltags = {} # map tag name to (node, hist)
412 tagtypes = {}
420 tagtypes = {}
413
421
414 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
422 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
415 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
423 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
416
424
417 # Build the return dicts. Have to re-encode tag names because
425 # Build the return dicts. Have to re-encode tag names because
418 # the tags module always uses UTF-8 (in order not to lose info
426 # the tags module always uses UTF-8 (in order not to lose info
419 # writing to the cache), but the rest of Mercurial wants them in
427 # writing to the cache), but the rest of Mercurial wants them in
420 # local encoding.
428 # local encoding.
421 tags = {}
429 tags = {}
422 for (name, (node, hist)) in alltags.iteritems():
430 for (name, (node, hist)) in alltags.iteritems():
423 if node != nullid:
431 if node != nullid:
424 tags[encoding.tolocal(name)] = node
432 tags[encoding.tolocal(name)] = node
425 tags['tip'] = self.changelog.tip()
433 tags['tip'] = self.changelog.tip()
426 tagtypes = dict([(encoding.tolocal(name), value)
434 tagtypes = dict([(encoding.tolocal(name), value)
427 for (name, value) in tagtypes.iteritems()])
435 for (name, value) in tagtypes.iteritems()])
428 return (tags, tagtypes)
436 return (tags, tagtypes)
429
437
430 def tagtype(self, tagname):
438 def tagtype(self, tagname):
431 '''
439 '''
432 return the type of the given tag. result can be:
440 return the type of the given tag. result can be:
433
441
434 'local' : a local tag
442 'local' : a local tag
435 'global' : a global tag
443 'global' : a global tag
436 None : tag does not exist
444 None : tag does not exist
437 '''
445 '''
438
446
439 return self._tagscache.tagtypes.get(tagname)
447 return self._tagscache.tagtypes.get(tagname)
440
448
441 def tagslist(self):
449 def tagslist(self):
442 '''return a list of tags ordered by revision'''
450 '''return a list of tags ordered by revision'''
443 if not self._tagscache.tagslist:
451 if not self._tagscache.tagslist:
444 l = []
452 l = []
445 for t, n in self.tags().iteritems():
453 for t, n in self.tags().iteritems():
446 r = self.changelog.rev(n)
454 r = self.changelog.rev(n)
447 l.append((r, t, n))
455 l.append((r, t, n))
448 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
456 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
449
457
450 return self._tagscache.tagslist
458 return self._tagscache.tagslist
451
459
452 def nodetags(self, node):
460 def nodetags(self, node):
453 '''return the tags associated with a node'''
461 '''return the tags associated with a node'''
454 if not self._tagscache.nodetagscache:
462 if not self._tagscache.nodetagscache:
455 nodetagscache = {}
463 nodetagscache = {}
456 for t, n in self._tagscache.tags.iteritems():
464 for t, n in self._tagscache.tags.iteritems():
457 nodetagscache.setdefault(n, []).append(t)
465 nodetagscache.setdefault(n, []).append(t)
458 for tags in nodetagscache.itervalues():
466 for tags in nodetagscache.itervalues():
459 tags.sort()
467 tags.sort()
460 self._tagscache.nodetagscache = nodetagscache
468 self._tagscache.nodetagscache = nodetagscache
461 return self._tagscache.nodetagscache.get(node, [])
469 return self._tagscache.nodetagscache.get(node, [])
462
470
463 def nodebookmarks(self, node):
471 def nodebookmarks(self, node):
464 marks = []
472 marks = []
465 for bookmark, n in self._bookmarks.iteritems():
473 for bookmark, n in self._bookmarks.iteritems():
466 if n == node:
474 if n == node:
467 marks.append(bookmark)
475 marks.append(bookmark)
468 return sorted(marks)
476 return sorted(marks)
469
477
470 def _branchtags(self, partial, lrev):
478 def _branchtags(self, partial, lrev):
471 # TODO: rename this function?
479 # TODO: rename this function?
472 tiprev = len(self) - 1
480 tiprev = len(self) - 1
473 if lrev != tiprev:
481 if lrev != tiprev:
474 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
482 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
475 self._updatebranchcache(partial, ctxgen)
483 self._updatebranchcache(partial, ctxgen)
476 self._writebranchcache(partial, self.changelog.tip(), tiprev)
484 self._writebranchcache(partial, self.changelog.tip(), tiprev)
477
485
478 return partial
486 return partial
479
487
480 def updatebranchcache(self):
488 def updatebranchcache(self):
481 tip = self.changelog.tip()
489 tip = self.changelog.tip()
482 if self._branchcache is not None and self._branchcachetip == tip:
490 if self._branchcache is not None and self._branchcachetip == tip:
483 return
491 return
484
492
485 oldtip = self._branchcachetip
493 oldtip = self._branchcachetip
486 self._branchcachetip = tip
494 self._branchcachetip = tip
487 if oldtip is None or oldtip not in self.changelog.nodemap:
495 if oldtip is None or oldtip not in self.changelog.nodemap:
488 partial, last, lrev = self._readbranchcache()
496 partial, last, lrev = self._readbranchcache()
489 else:
497 else:
490 lrev = self.changelog.rev(oldtip)
498 lrev = self.changelog.rev(oldtip)
491 partial = self._branchcache
499 partial = self._branchcache
492
500
493 self._branchtags(partial, lrev)
501 self._branchtags(partial, lrev)
494 # this private cache holds all heads (not just the branch tips)
502 # this private cache holds all heads (not just the branch tips)
495 self._branchcache = partial
503 self._branchcache = partial
496
504
497 def branchmap(self):
505 def branchmap(self):
498 '''returns a dictionary {branch: [branchheads]}'''
506 '''returns a dictionary {branch: [branchheads]}'''
499 self.updatebranchcache()
507 self.updatebranchcache()
500 return self._branchcache
508 return self._branchcache
501
509
502 def branchtags(self):
510 def branchtags(self):
503 '''return a dict where branch names map to the tipmost head of
511 '''return a dict where branch names map to the tipmost head of
504 the branch, open heads come before closed'''
512 the branch, open heads come before closed'''
505 bt = {}
513 bt = {}
506 for bn, heads in self.branchmap().iteritems():
514 for bn, heads in self.branchmap().iteritems():
507 tip = heads[-1]
515 tip = heads[-1]
508 for h in reversed(heads):
516 for h in reversed(heads):
509 if 'close' not in self.changelog.read(h)[5]:
517 if 'close' not in self.changelog.read(h)[5]:
510 tip = h
518 tip = h
511 break
519 break
512 bt[bn] = tip
520 bt[bn] = tip
513 return bt
521 return bt
514
522
515 def _readbranchcache(self):
523 def _readbranchcache(self):
516 partial = {}
524 partial = {}
517 try:
525 try:
518 f = self.opener("cache/branchheads")
526 f = self.opener("cache/branchheads")
519 lines = f.read().split('\n')
527 lines = f.read().split('\n')
520 f.close()
528 f.close()
521 except (IOError, OSError):
529 except (IOError, OSError):
522 return {}, nullid, nullrev
530 return {}, nullid, nullrev
523
531
524 try:
532 try:
525 last, lrev = lines.pop(0).split(" ", 1)
533 last, lrev = lines.pop(0).split(" ", 1)
526 last, lrev = bin(last), int(lrev)
534 last, lrev = bin(last), int(lrev)
527 if lrev >= len(self) or self[lrev].node() != last:
535 if lrev >= len(self) or self[lrev].node() != last:
528 # invalidate the cache
536 # invalidate the cache
529 raise ValueError('invalidating branch cache (tip differs)')
537 raise ValueError('invalidating branch cache (tip differs)')
530 for l in lines:
538 for l in lines:
531 if not l:
539 if not l:
532 continue
540 continue
533 node, label = l.split(" ", 1)
541 node, label = l.split(" ", 1)
534 label = encoding.tolocal(label.strip())
542 label = encoding.tolocal(label.strip())
535 partial.setdefault(label, []).append(bin(node))
543 partial.setdefault(label, []).append(bin(node))
536 except KeyboardInterrupt:
544 except KeyboardInterrupt:
537 raise
545 raise
538 except Exception, inst:
546 except Exception, inst:
539 if self.ui.debugflag:
547 if self.ui.debugflag:
540 self.ui.warn(str(inst), '\n')
548 self.ui.warn(str(inst), '\n')
541 partial, last, lrev = {}, nullid, nullrev
549 partial, last, lrev = {}, nullid, nullrev
542 return partial, last, lrev
550 return partial, last, lrev
543
551
544 def _writebranchcache(self, branches, tip, tiprev):
552 def _writebranchcache(self, branches, tip, tiprev):
545 try:
553 try:
546 f = self.opener("cache/branchheads", "w", atomictemp=True)
554 f = self.opener("cache/branchheads", "w", atomictemp=True)
547 f.write("%s %s\n" % (hex(tip), tiprev))
555 f.write("%s %s\n" % (hex(tip), tiprev))
548 for label, nodes in branches.iteritems():
556 for label, nodes in branches.iteritems():
549 for node in nodes:
557 for node in nodes:
550 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
558 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
551 f.close()
559 f.close()
552 except (IOError, OSError):
560 except (IOError, OSError):
553 pass
561 pass
554
562
555 def _updatebranchcache(self, partial, ctxgen):
563 def _updatebranchcache(self, partial, ctxgen):
556 # collect new branch entries
564 # collect new branch entries
557 newbranches = {}
565 newbranches = {}
558 for c in ctxgen:
566 for c in ctxgen:
559 newbranches.setdefault(c.branch(), []).append(c.node())
567 newbranches.setdefault(c.branch(), []).append(c.node())
560 # if older branchheads are reachable from new ones, they aren't
568 # if older branchheads are reachable from new ones, they aren't
561 # really branchheads. Note checking parents is insufficient:
569 # really branchheads. Note checking parents is insufficient:
562 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
570 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
563 for branch, newnodes in newbranches.iteritems():
571 for branch, newnodes in newbranches.iteritems():
564 bheads = partial.setdefault(branch, [])
572 bheads = partial.setdefault(branch, [])
565 bheads.extend(newnodes)
573 bheads.extend(newnodes)
566 if len(bheads) <= 1:
574 if len(bheads) <= 1:
567 continue
575 continue
568 bheads = sorted(bheads, key=lambda x: self[x].rev())
576 bheads = sorted(bheads, key=lambda x: self[x].rev())
569 # starting from tip means fewer passes over reachable
577 # starting from tip means fewer passes over reachable
570 while newnodes:
578 while newnodes:
571 latest = newnodes.pop()
579 latest = newnodes.pop()
572 if latest not in bheads:
580 if latest not in bheads:
573 continue
581 continue
574 minbhnode = self[bheads[0]].node()
582 minbhnode = self[bheads[0]].node()
575 reachable = self.changelog.reachable(latest, minbhnode)
583 reachable = self.changelog.reachable(latest, minbhnode)
576 reachable.remove(latest)
584 reachable.remove(latest)
577 if reachable:
585 if reachable:
578 bheads = [b for b in bheads if b not in reachable]
586 bheads = [b for b in bheads if b not in reachable]
579 partial[branch] = bheads
587 partial[branch] = bheads
580
588
581 def lookup(self, key):
589 def lookup(self, key):
582 return self[key].node()
590 return self[key].node()
583
591
584 def lookupbranch(self, key, remote=None):
592 def lookupbranch(self, key, remote=None):
585 repo = remote or self
593 repo = remote or self
586 if key in repo.branchmap():
594 if key in repo.branchmap():
587 return key
595 return key
588
596
589 repo = (remote and remote.local()) and remote or self
597 repo = (remote and remote.local()) and remote or self
590 return repo[key].branch()
598 return repo[key].branch()
591
599
592 def known(self, nodes):
600 def known(self, nodes):
593 nm = self.changelog.nodemap
601 nm = self.changelog.nodemap
594 pc = self._phasecache
602 pc = self._phasecache
595 result = []
603 result = []
596 for n in nodes:
604 for n in nodes:
597 r = nm.get(n)
605 r = nm.get(n)
598 resp = not (r is None or pc.phase(self, r) >= phases.secret)
606 resp = not (r is None or pc.phase(self, r) >= phases.secret)
599 result.append(resp)
607 result.append(resp)
600 return result
608 return result
601
609
602 def local(self):
610 def local(self):
603 return self
611 return self
604
612
605 def join(self, f):
613 def join(self, f):
606 return os.path.join(self.path, f)
614 return os.path.join(self.path, f)
607
615
608 def wjoin(self, f):
616 def wjoin(self, f):
609 return os.path.join(self.root, f)
617 return os.path.join(self.root, f)
610
618
611 def file(self, f):
619 def file(self, f):
612 if f[0] == '/':
620 if f[0] == '/':
613 f = f[1:]
621 f = f[1:]
614 return filelog.filelog(self.sopener, f)
622 return filelog.filelog(self.sopener, f)
615
623
616 def changectx(self, changeid):
624 def changectx(self, changeid):
617 return self[changeid]
625 return self[changeid]
618
626
619 def parents(self, changeid=None):
627 def parents(self, changeid=None):
620 '''get list of changectxs for parents of changeid'''
628 '''get list of changectxs for parents of changeid'''
621 return self[changeid].parents()
629 return self[changeid].parents()
622
630
623 def setparents(self, p1, p2=nullid):
631 def setparents(self, p1, p2=nullid):
624 copies = self.dirstate.setparents(p1, p2)
632 copies = self.dirstate.setparents(p1, p2)
625 if copies:
633 if copies:
626 # Adjust copy records, the dirstate cannot do it, it
634 # Adjust copy records, the dirstate cannot do it, it
627 # requires access to parents manifests. Preserve them
635 # requires access to parents manifests. Preserve them
628 # only for entries added to first parent.
636 # only for entries added to first parent.
629 pctx = self[p1]
637 pctx = self[p1]
630 for f in copies:
638 for f in copies:
631 if f not in pctx and copies[f] in pctx:
639 if f not in pctx and copies[f] in pctx:
632 self.dirstate.copy(copies[f], f)
640 self.dirstate.copy(copies[f], f)
633
641
634 def filectx(self, path, changeid=None, fileid=None):
642 def filectx(self, path, changeid=None, fileid=None):
635 """changeid can be a changeset revision, node, or tag.
643 """changeid can be a changeset revision, node, or tag.
636 fileid can be a file revision or node."""
644 fileid can be a file revision or node."""
637 return context.filectx(self, path, changeid, fileid)
645 return context.filectx(self, path, changeid, fileid)
638
646
639 def getcwd(self):
647 def getcwd(self):
640 return self.dirstate.getcwd()
648 return self.dirstate.getcwd()
641
649
642 def pathto(self, f, cwd=None):
650 def pathto(self, f, cwd=None):
643 return self.dirstate.pathto(f, cwd)
651 return self.dirstate.pathto(f, cwd)
644
652
645 def wfile(self, f, mode='r'):
653 def wfile(self, f, mode='r'):
646 return self.wopener(f, mode)
654 return self.wopener(f, mode)
647
655
648 def _link(self, f):
656 def _link(self, f):
649 return os.path.islink(self.wjoin(f))
657 return os.path.islink(self.wjoin(f))
650
658
651 def _loadfilter(self, filter):
659 def _loadfilter(self, filter):
652 if filter not in self.filterpats:
660 if filter not in self.filterpats:
653 l = []
661 l = []
654 for pat, cmd in self.ui.configitems(filter):
662 for pat, cmd in self.ui.configitems(filter):
655 if cmd == '!':
663 if cmd == '!':
656 continue
664 continue
657 mf = matchmod.match(self.root, '', [pat])
665 mf = matchmod.match(self.root, '', [pat])
658 fn = None
666 fn = None
659 params = cmd
667 params = cmd
660 for name, filterfn in self._datafilters.iteritems():
668 for name, filterfn in self._datafilters.iteritems():
661 if cmd.startswith(name):
669 if cmd.startswith(name):
662 fn = filterfn
670 fn = filterfn
663 params = cmd[len(name):].lstrip()
671 params = cmd[len(name):].lstrip()
664 break
672 break
665 if not fn:
673 if not fn:
666 fn = lambda s, c, **kwargs: util.filter(s, c)
674 fn = lambda s, c, **kwargs: util.filter(s, c)
667 # Wrap old filters not supporting keyword arguments
675 # Wrap old filters not supporting keyword arguments
668 if not inspect.getargspec(fn)[2]:
676 if not inspect.getargspec(fn)[2]:
669 oldfn = fn
677 oldfn = fn
670 fn = lambda s, c, **kwargs: oldfn(s, c)
678 fn = lambda s, c, **kwargs: oldfn(s, c)
671 l.append((mf, fn, params))
679 l.append((mf, fn, params))
672 self.filterpats[filter] = l
680 self.filterpats[filter] = l
673 return self.filterpats[filter]
681 return self.filterpats[filter]
674
682
675 def _filter(self, filterpats, filename, data):
683 def _filter(self, filterpats, filename, data):
676 for mf, fn, cmd in filterpats:
684 for mf, fn, cmd in filterpats:
677 if mf(filename):
685 if mf(filename):
678 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
686 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
679 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
687 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
680 break
688 break
681
689
682 return data
690 return data
683
691
684 @propertycache
692 @propertycache
685 def _encodefilterpats(self):
693 def _encodefilterpats(self):
686 return self._loadfilter('encode')
694 return self._loadfilter('encode')
687
695
688 @propertycache
696 @propertycache
689 def _decodefilterpats(self):
697 def _decodefilterpats(self):
690 return self._loadfilter('decode')
698 return self._loadfilter('decode')
691
699
692 def adddatafilter(self, name, filter):
700 def adddatafilter(self, name, filter):
693 self._datafilters[name] = filter
701 self._datafilters[name] = filter
694
702
695 def wread(self, filename):
703 def wread(self, filename):
696 if self._link(filename):
704 if self._link(filename):
697 data = os.readlink(self.wjoin(filename))
705 data = os.readlink(self.wjoin(filename))
698 else:
706 else:
699 data = self.wopener.read(filename)
707 data = self.wopener.read(filename)
700 return self._filter(self._encodefilterpats, filename, data)
708 return self._filter(self._encodefilterpats, filename, data)
701
709
702 def wwrite(self, filename, data, flags):
710 def wwrite(self, filename, data, flags):
703 data = self._filter(self._decodefilterpats, filename, data)
711 data = self._filter(self._decodefilterpats, filename, data)
704 if 'l' in flags:
712 if 'l' in flags:
705 self.wopener.symlink(data, filename)
713 self.wopener.symlink(data, filename)
706 else:
714 else:
707 self.wopener.write(filename, data)
715 self.wopener.write(filename, data)
708 if 'x' in flags:
716 if 'x' in flags:
709 util.setflags(self.wjoin(filename), False, True)
717 util.setflags(self.wjoin(filename), False, True)
710
718
711 def wwritedata(self, filename, data):
719 def wwritedata(self, filename, data):
712 return self._filter(self._decodefilterpats, filename, data)
720 return self._filter(self._decodefilterpats, filename, data)
713
721
714 def transaction(self, desc):
722 def transaction(self, desc):
715 tr = self._transref and self._transref() or None
723 tr = self._transref and self._transref() or None
716 if tr and tr.running():
724 if tr and tr.running():
717 return tr.nest()
725 return tr.nest()
718
726
719 # abort here if the journal already exists
727 # abort here if the journal already exists
720 if os.path.exists(self.sjoin("journal")):
728 if os.path.exists(self.sjoin("journal")):
721 raise error.RepoError(
729 raise error.RepoError(
722 _("abandoned transaction found - run hg recover"))
730 _("abandoned transaction found - run hg recover"))
723
731
724 self._writejournal(desc)
732 self._writejournal(desc)
725 renames = [(x, undoname(x)) for x in self._journalfiles()]
733 renames = [(x, undoname(x)) for x in self._journalfiles()]
726
734
727 tr = transaction.transaction(self.ui.warn, self.sopener,
735 tr = transaction.transaction(self.ui.warn, self.sopener,
728 self.sjoin("journal"),
736 self.sjoin("journal"),
729 aftertrans(renames),
737 aftertrans(renames),
730 self.store.createmode)
738 self.store.createmode)
731 self._transref = weakref.ref(tr)
739 self._transref = weakref.ref(tr)
732 return tr
740 return tr
733
741
734 def _journalfiles(self):
742 def _journalfiles(self):
735 return (self.sjoin('journal'), self.join('journal.dirstate'),
743 return (self.sjoin('journal'), self.join('journal.dirstate'),
736 self.join('journal.branch'), self.join('journal.desc'),
744 self.join('journal.branch'), self.join('journal.desc'),
737 self.join('journal.bookmarks'),
745 self.join('journal.bookmarks'),
738 self.sjoin('journal.phaseroots'))
746 self.sjoin('journal.phaseroots'))
739
747
740 def undofiles(self):
748 def undofiles(self):
741 return [undoname(x) for x in self._journalfiles()]
749 return [undoname(x) for x in self._journalfiles()]
742
750
743 def _writejournal(self, desc):
751 def _writejournal(self, desc):
744 self.opener.write("journal.dirstate",
752 self.opener.write("journal.dirstate",
745 self.opener.tryread("dirstate"))
753 self.opener.tryread("dirstate"))
746 self.opener.write("journal.branch",
754 self.opener.write("journal.branch",
747 encoding.fromlocal(self.dirstate.branch()))
755 encoding.fromlocal(self.dirstate.branch()))
748 self.opener.write("journal.desc",
756 self.opener.write("journal.desc",
749 "%d\n%s\n" % (len(self), desc))
757 "%d\n%s\n" % (len(self), desc))
750 self.opener.write("journal.bookmarks",
758 self.opener.write("journal.bookmarks",
751 self.opener.tryread("bookmarks"))
759 self.opener.tryread("bookmarks"))
752 self.sopener.write("journal.phaseroots",
760 self.sopener.write("journal.phaseroots",
753 self.sopener.tryread("phaseroots"))
761 self.sopener.tryread("phaseroots"))
754
762
755 def recover(self):
763 def recover(self):
756 lock = self.lock()
764 lock = self.lock()
757 try:
765 try:
758 if os.path.exists(self.sjoin("journal")):
766 if os.path.exists(self.sjoin("journal")):
759 self.ui.status(_("rolling back interrupted transaction\n"))
767 self.ui.status(_("rolling back interrupted transaction\n"))
760 transaction.rollback(self.sopener, self.sjoin("journal"),
768 transaction.rollback(self.sopener, self.sjoin("journal"),
761 self.ui.warn)
769 self.ui.warn)
762 self.invalidate()
770 self.invalidate()
763 return True
771 return True
764 else:
772 else:
765 self.ui.warn(_("no interrupted transaction available\n"))
773 self.ui.warn(_("no interrupted transaction available\n"))
766 return False
774 return False
767 finally:
775 finally:
768 lock.release()
776 lock.release()
769
777
770 def rollback(self, dryrun=False, force=False):
778 def rollback(self, dryrun=False, force=False):
771 wlock = lock = None
779 wlock = lock = None
772 try:
780 try:
773 wlock = self.wlock()
781 wlock = self.wlock()
774 lock = self.lock()
782 lock = self.lock()
775 if os.path.exists(self.sjoin("undo")):
783 if os.path.exists(self.sjoin("undo")):
776 return self._rollback(dryrun, force)
784 return self._rollback(dryrun, force)
777 else:
785 else:
778 self.ui.warn(_("no rollback information available\n"))
786 self.ui.warn(_("no rollback information available\n"))
779 return 1
787 return 1
780 finally:
788 finally:
781 release(lock, wlock)
789 release(lock, wlock)
782
790
783 def _rollback(self, dryrun, force):
791 def _rollback(self, dryrun, force):
784 ui = self.ui
792 ui = self.ui
785 try:
793 try:
786 args = self.opener.read('undo.desc').splitlines()
794 args = self.opener.read('undo.desc').splitlines()
787 (oldlen, desc, detail) = (int(args[0]), args[1], None)
795 (oldlen, desc, detail) = (int(args[0]), args[1], None)
788 if len(args) >= 3:
796 if len(args) >= 3:
789 detail = args[2]
797 detail = args[2]
790 oldtip = oldlen - 1
798 oldtip = oldlen - 1
791
799
792 if detail and ui.verbose:
800 if detail and ui.verbose:
793 msg = (_('repository tip rolled back to revision %s'
801 msg = (_('repository tip rolled back to revision %s'
794 ' (undo %s: %s)\n')
802 ' (undo %s: %s)\n')
795 % (oldtip, desc, detail))
803 % (oldtip, desc, detail))
796 else:
804 else:
797 msg = (_('repository tip rolled back to revision %s'
805 msg = (_('repository tip rolled back to revision %s'
798 ' (undo %s)\n')
806 ' (undo %s)\n')
799 % (oldtip, desc))
807 % (oldtip, desc))
800 except IOError:
808 except IOError:
801 msg = _('rolling back unknown transaction\n')
809 msg = _('rolling back unknown transaction\n')
802 desc = None
810 desc = None
803
811
804 if not force and self['.'] != self['tip'] and desc == 'commit':
812 if not force and self['.'] != self['tip'] and desc == 'commit':
805 raise util.Abort(
813 raise util.Abort(
806 _('rollback of last commit while not checked out '
814 _('rollback of last commit while not checked out '
807 'may lose data'), hint=_('use -f to force'))
815 'may lose data'), hint=_('use -f to force'))
808
816
809 ui.status(msg)
817 ui.status(msg)
810 if dryrun:
818 if dryrun:
811 return 0
819 return 0
812
820
813 parents = self.dirstate.parents()
821 parents = self.dirstate.parents()
814 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
822 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
815 if os.path.exists(self.join('undo.bookmarks')):
823 if os.path.exists(self.join('undo.bookmarks')):
816 util.rename(self.join('undo.bookmarks'),
824 util.rename(self.join('undo.bookmarks'),
817 self.join('bookmarks'))
825 self.join('bookmarks'))
818 if os.path.exists(self.sjoin('undo.phaseroots')):
826 if os.path.exists(self.sjoin('undo.phaseroots')):
819 util.rename(self.sjoin('undo.phaseroots'),
827 util.rename(self.sjoin('undo.phaseroots'),
820 self.sjoin('phaseroots'))
828 self.sjoin('phaseroots'))
821 self.invalidate()
829 self.invalidate()
822
830
823 parentgone = (parents[0] not in self.changelog.nodemap or
831 parentgone = (parents[0] not in self.changelog.nodemap or
824 parents[1] not in self.changelog.nodemap)
832 parents[1] not in self.changelog.nodemap)
825 if parentgone:
833 if parentgone:
826 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
834 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
827 try:
835 try:
828 branch = self.opener.read('undo.branch')
836 branch = self.opener.read('undo.branch')
829 self.dirstate.setbranch(branch)
837 self.dirstate.setbranch(branch)
830 except IOError:
838 except IOError:
831 ui.warn(_('named branch could not be reset: '
839 ui.warn(_('named branch could not be reset: '
832 'current branch is still \'%s\'\n')
840 'current branch is still \'%s\'\n')
833 % self.dirstate.branch())
841 % self.dirstate.branch())
834
842
835 self.dirstate.invalidate()
843 self.dirstate.invalidate()
836 parents = tuple([p.rev() for p in self.parents()])
844 parents = tuple([p.rev() for p in self.parents()])
837 if len(parents) > 1:
845 if len(parents) > 1:
838 ui.status(_('working directory now based on '
846 ui.status(_('working directory now based on '
839 'revisions %d and %d\n') % parents)
847 'revisions %d and %d\n') % parents)
840 else:
848 else:
841 ui.status(_('working directory now based on '
849 ui.status(_('working directory now based on '
842 'revision %d\n') % parents)
850 'revision %d\n') % parents)
843 self.destroyed()
851 self.destroyed()
844 return 0
852 return 0
845
853
846 def invalidatecaches(self):
854 def invalidatecaches(self):
847 def delcache(name):
855 def delcache(name):
848 try:
856 try:
849 delattr(self, name)
857 delattr(self, name)
850 except AttributeError:
858 except AttributeError:
851 pass
859 pass
852
860
853 delcache('_tagscache')
861 delcache('_tagscache')
854
862
855 self._branchcache = None # in UTF-8
863 self._branchcache = None # in UTF-8
856 self._branchcachetip = None
864 self._branchcachetip = None
857
865
858 def invalidatedirstate(self):
866 def invalidatedirstate(self):
859 '''Invalidates the dirstate, causing the next call to dirstate
867 '''Invalidates the dirstate, causing the next call to dirstate
860 to check if it was modified since the last time it was read,
868 to check if it was modified since the last time it was read,
861 rereading it if it has.
869 rereading it if it has.
862
870
863 This is different to dirstate.invalidate() that it doesn't always
871 This is different to dirstate.invalidate() that it doesn't always
864 rereads the dirstate. Use dirstate.invalidate() if you want to
872 rereads the dirstate. Use dirstate.invalidate() if you want to
865 explicitly read the dirstate again (i.e. restoring it to a previous
873 explicitly read the dirstate again (i.e. restoring it to a previous
866 known good state).'''
874 known good state).'''
867 if 'dirstate' in self.__dict__:
875 if 'dirstate' in self.__dict__:
868 for k in self.dirstate._filecache:
876 for k in self.dirstate._filecache:
869 try:
877 try:
870 delattr(self.dirstate, k)
878 delattr(self.dirstate, k)
871 except AttributeError:
879 except AttributeError:
872 pass
880 pass
873 delattr(self, 'dirstate')
881 delattr(self, 'dirstate')
874
882
875 def invalidate(self):
883 def invalidate(self):
876 for k in self._filecache:
884 for k in self._filecache:
877 # dirstate is invalidated separately in invalidatedirstate()
885 # dirstate is invalidated separately in invalidatedirstate()
878 if k == 'dirstate':
886 if k == 'dirstate':
879 continue
887 continue
880
888
881 try:
889 try:
882 delattr(self, k)
890 delattr(self, k)
883 except AttributeError:
891 except AttributeError:
884 pass
892 pass
885 self.invalidatecaches()
893 self.invalidatecaches()
886
894
887 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
895 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
888 try:
896 try:
889 l = lock.lock(lockname, 0, releasefn, desc=desc)
897 l = lock.lock(lockname, 0, releasefn, desc=desc)
890 except error.LockHeld, inst:
898 except error.LockHeld, inst:
891 if not wait:
899 if not wait:
892 raise
900 raise
893 self.ui.warn(_("waiting for lock on %s held by %r\n") %
901 self.ui.warn(_("waiting for lock on %s held by %r\n") %
894 (desc, inst.locker))
902 (desc, inst.locker))
895 # default to 600 seconds timeout
903 # default to 600 seconds timeout
896 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
904 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
897 releasefn, desc=desc)
905 releasefn, desc=desc)
898 if acquirefn:
906 if acquirefn:
899 acquirefn()
907 acquirefn()
900 return l
908 return l
901
909
902 def _afterlock(self, callback):
910 def _afterlock(self, callback):
903 """add a callback to the current repository lock.
911 """add a callback to the current repository lock.
904
912
905 The callback will be executed on lock release."""
913 The callback will be executed on lock release."""
906 l = self._lockref and self._lockref()
914 l = self._lockref and self._lockref()
907 if l:
915 if l:
908 l.postrelease.append(callback)
916 l.postrelease.append(callback)
909 else:
917 else:
910 callback()
918 callback()
911
919
912 def lock(self, wait=True):
920 def lock(self, wait=True):
913 '''Lock the repository store (.hg/store) and return a weak reference
921 '''Lock the repository store (.hg/store) and return a weak reference
914 to the lock. Use this before modifying the store (e.g. committing or
922 to the lock. Use this before modifying the store (e.g. committing or
915 stripping). If you are opening a transaction, get a lock as well.)'''
923 stripping). If you are opening a transaction, get a lock as well.)'''
916 l = self._lockref and self._lockref()
924 l = self._lockref and self._lockref()
917 if l is not None and l.held:
925 if l is not None and l.held:
918 l.lock()
926 l.lock()
919 return l
927 return l
920
928
921 def unlock():
929 def unlock():
922 self.store.write()
930 self.store.write()
923 if '_phasecache' in vars(self):
931 if '_phasecache' in vars(self):
924 self._phasecache.write()
932 self._phasecache.write()
925 for k, ce in self._filecache.items():
933 for k, ce in self._filecache.items():
926 if k == 'dirstate':
934 if k == 'dirstate':
927 continue
935 continue
928 ce.refresh()
936 ce.refresh()
929
937
930 l = self._lock(self.sjoin("lock"), wait, unlock,
938 l = self._lock(self.sjoin("lock"), wait, unlock,
931 self.invalidate, _('repository %s') % self.origroot)
939 self.invalidate, _('repository %s') % self.origroot)
932 self._lockref = weakref.ref(l)
940 self._lockref = weakref.ref(l)
933 return l
941 return l
934
942
935 def wlock(self, wait=True):
943 def wlock(self, wait=True):
936 '''Lock the non-store parts of the repository (everything under
944 '''Lock the non-store parts of the repository (everything under
937 .hg except .hg/store) and return a weak reference to the lock.
945 .hg except .hg/store) and return a weak reference to the lock.
938 Use this before modifying files in .hg.'''
946 Use this before modifying files in .hg.'''
939 l = self._wlockref and self._wlockref()
947 l = self._wlockref and self._wlockref()
940 if l is not None and l.held:
948 if l is not None and l.held:
941 l.lock()
949 l.lock()
942 return l
950 return l
943
951
944 def unlock():
952 def unlock():
945 self.dirstate.write()
953 self.dirstate.write()
946 ce = self._filecache.get('dirstate')
954 ce = self._filecache.get('dirstate')
947 if ce:
955 if ce:
948 ce.refresh()
956 ce.refresh()
949
957
950 l = self._lock(self.join("wlock"), wait, unlock,
958 l = self._lock(self.join("wlock"), wait, unlock,
951 self.invalidatedirstate, _('working directory of %s') %
959 self.invalidatedirstate, _('working directory of %s') %
952 self.origroot)
960 self.origroot)
953 self._wlockref = weakref.ref(l)
961 self._wlockref = weakref.ref(l)
954 return l
962 return l
955
963
956 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
964 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
957 """
965 """
958 commit an individual file as part of a larger transaction
966 commit an individual file as part of a larger transaction
959 """
967 """
960
968
961 fname = fctx.path()
969 fname = fctx.path()
962 text = fctx.data()
970 text = fctx.data()
963 flog = self.file(fname)
971 flog = self.file(fname)
964 fparent1 = manifest1.get(fname, nullid)
972 fparent1 = manifest1.get(fname, nullid)
965 fparent2 = fparent2o = manifest2.get(fname, nullid)
973 fparent2 = fparent2o = manifest2.get(fname, nullid)
966
974
967 meta = {}
975 meta = {}
968 copy = fctx.renamed()
976 copy = fctx.renamed()
969 if copy and copy[0] != fname:
977 if copy and copy[0] != fname:
970 # Mark the new revision of this file as a copy of another
978 # Mark the new revision of this file as a copy of another
971 # file. This copy data will effectively act as a parent
979 # file. This copy data will effectively act as a parent
972 # of this new revision. If this is a merge, the first
980 # of this new revision. If this is a merge, the first
973 # parent will be the nullid (meaning "look up the copy data")
981 # parent will be the nullid (meaning "look up the copy data")
974 # and the second one will be the other parent. For example:
982 # and the second one will be the other parent. For example:
975 #
983 #
976 # 0 --- 1 --- 3 rev1 changes file foo
984 # 0 --- 1 --- 3 rev1 changes file foo
977 # \ / rev2 renames foo to bar and changes it
985 # \ / rev2 renames foo to bar and changes it
978 # \- 2 -/ rev3 should have bar with all changes and
986 # \- 2 -/ rev3 should have bar with all changes and
979 # should record that bar descends from
987 # should record that bar descends from
980 # bar in rev2 and foo in rev1
988 # bar in rev2 and foo in rev1
981 #
989 #
982 # this allows this merge to succeed:
990 # this allows this merge to succeed:
983 #
991 #
984 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
992 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
985 # \ / merging rev3 and rev4 should use bar@rev2
993 # \ / merging rev3 and rev4 should use bar@rev2
986 # \- 2 --- 4 as the merge base
994 # \- 2 --- 4 as the merge base
987 #
995 #
988
996
989 cfname = copy[0]
997 cfname = copy[0]
990 crev = manifest1.get(cfname)
998 crev = manifest1.get(cfname)
991 newfparent = fparent2
999 newfparent = fparent2
992
1000
993 if manifest2: # branch merge
1001 if manifest2: # branch merge
994 if fparent2 == nullid or crev is None: # copied on remote side
1002 if fparent2 == nullid or crev is None: # copied on remote side
995 if cfname in manifest2:
1003 if cfname in manifest2:
996 crev = manifest2[cfname]
1004 crev = manifest2[cfname]
997 newfparent = fparent1
1005 newfparent = fparent1
998
1006
999 # find source in nearest ancestor if we've lost track
1007 # find source in nearest ancestor if we've lost track
1000 if not crev:
1008 if not crev:
1001 self.ui.debug(" %s: searching for copy revision for %s\n" %
1009 self.ui.debug(" %s: searching for copy revision for %s\n" %
1002 (fname, cfname))
1010 (fname, cfname))
1003 for ancestor in self[None].ancestors():
1011 for ancestor in self[None].ancestors():
1004 if cfname in ancestor:
1012 if cfname in ancestor:
1005 crev = ancestor[cfname].filenode()
1013 crev = ancestor[cfname].filenode()
1006 break
1014 break
1007
1015
1008 if crev:
1016 if crev:
1009 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1017 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1010 meta["copy"] = cfname
1018 meta["copy"] = cfname
1011 meta["copyrev"] = hex(crev)
1019 meta["copyrev"] = hex(crev)
1012 fparent1, fparent2 = nullid, newfparent
1020 fparent1, fparent2 = nullid, newfparent
1013 else:
1021 else:
1014 self.ui.warn(_("warning: can't find ancestor for '%s' "
1022 self.ui.warn(_("warning: can't find ancestor for '%s' "
1015 "copied from '%s'!\n") % (fname, cfname))
1023 "copied from '%s'!\n") % (fname, cfname))
1016
1024
1017 elif fparent2 != nullid:
1025 elif fparent2 != nullid:
1018 # is one parent an ancestor of the other?
1026 # is one parent an ancestor of the other?
1019 fparentancestor = flog.ancestor(fparent1, fparent2)
1027 fparentancestor = flog.ancestor(fparent1, fparent2)
1020 if fparentancestor == fparent1:
1028 if fparentancestor == fparent1:
1021 fparent1, fparent2 = fparent2, nullid
1029 fparent1, fparent2 = fparent2, nullid
1022 elif fparentancestor == fparent2:
1030 elif fparentancestor == fparent2:
1023 fparent2 = nullid
1031 fparent2 = nullid
1024
1032
1025 # is the file changed?
1033 # is the file changed?
1026 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1034 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1027 changelist.append(fname)
1035 changelist.append(fname)
1028 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1036 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1029
1037
1030 # are just the flags changed during merge?
1038 # are just the flags changed during merge?
1031 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1039 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1032 changelist.append(fname)
1040 changelist.append(fname)
1033
1041
1034 return fparent1
1042 return fparent1
1035
1043
1036 def commit(self, text="", user=None, date=None, match=None, force=False,
1044 def commit(self, text="", user=None, date=None, match=None, force=False,
1037 editor=False, extra={}):
1045 editor=False, extra={}):
1038 """Add a new revision to current repository.
1046 """Add a new revision to current repository.
1039
1047
1040 Revision information is gathered from the working directory,
1048 Revision information is gathered from the working directory,
1041 match can be used to filter the committed files. If editor is
1049 match can be used to filter the committed files. If editor is
1042 supplied, it is called to get a commit message.
1050 supplied, it is called to get a commit message.
1043 """
1051 """
1044
1052
1045 def fail(f, msg):
1053 def fail(f, msg):
1046 raise util.Abort('%s: %s' % (f, msg))
1054 raise util.Abort('%s: %s' % (f, msg))
1047
1055
1048 if not match:
1056 if not match:
1049 match = matchmod.always(self.root, '')
1057 match = matchmod.always(self.root, '')
1050
1058
1051 if not force:
1059 if not force:
1052 vdirs = []
1060 vdirs = []
1053 match.dir = vdirs.append
1061 match.dir = vdirs.append
1054 match.bad = fail
1062 match.bad = fail
1055
1063
1056 wlock = self.wlock()
1064 wlock = self.wlock()
1057 try:
1065 try:
1058 wctx = self[None]
1066 wctx = self[None]
1059 merge = len(wctx.parents()) > 1
1067 merge = len(wctx.parents()) > 1
1060
1068
1061 if (not force and merge and match and
1069 if (not force and merge and match and
1062 (match.files() or match.anypats())):
1070 (match.files() or match.anypats())):
1063 raise util.Abort(_('cannot partially commit a merge '
1071 raise util.Abort(_('cannot partially commit a merge '
1064 '(do not specify files or patterns)'))
1072 '(do not specify files or patterns)'))
1065
1073
1066 changes = self.status(match=match, clean=force)
1074 changes = self.status(match=match, clean=force)
1067 if force:
1075 if force:
1068 changes[0].extend(changes[6]) # mq may commit unchanged files
1076 changes[0].extend(changes[6]) # mq may commit unchanged files
1069
1077
1070 # check subrepos
1078 # check subrepos
1071 subs = []
1079 subs = []
1072 commitsubs = set()
1080 commitsubs = set()
1073 newstate = wctx.substate.copy()
1081 newstate = wctx.substate.copy()
1074 # only manage subrepos and .hgsubstate if .hgsub is present
1082 # only manage subrepos and .hgsubstate if .hgsub is present
1075 if '.hgsub' in wctx:
1083 if '.hgsub' in wctx:
1076 # we'll decide whether to track this ourselves, thanks
1084 # we'll decide whether to track this ourselves, thanks
1077 if '.hgsubstate' in changes[0]:
1085 if '.hgsubstate' in changes[0]:
1078 changes[0].remove('.hgsubstate')
1086 changes[0].remove('.hgsubstate')
1079 if '.hgsubstate' in changes[2]:
1087 if '.hgsubstate' in changes[2]:
1080 changes[2].remove('.hgsubstate')
1088 changes[2].remove('.hgsubstate')
1081
1089
1082 # compare current state to last committed state
1090 # compare current state to last committed state
1083 # build new substate based on last committed state
1091 # build new substate based on last committed state
1084 oldstate = wctx.p1().substate
1092 oldstate = wctx.p1().substate
1085 for s in sorted(newstate.keys()):
1093 for s in sorted(newstate.keys()):
1086 if not match(s):
1094 if not match(s):
1087 # ignore working copy, use old state if present
1095 # ignore working copy, use old state if present
1088 if s in oldstate:
1096 if s in oldstate:
1089 newstate[s] = oldstate[s]
1097 newstate[s] = oldstate[s]
1090 continue
1098 continue
1091 if not force:
1099 if not force:
1092 raise util.Abort(
1100 raise util.Abort(
1093 _("commit with new subrepo %s excluded") % s)
1101 _("commit with new subrepo %s excluded") % s)
1094 if wctx.sub(s).dirty(True):
1102 if wctx.sub(s).dirty(True):
1095 if not self.ui.configbool('ui', 'commitsubrepos'):
1103 if not self.ui.configbool('ui', 'commitsubrepos'):
1096 raise util.Abort(
1104 raise util.Abort(
1097 _("uncommitted changes in subrepo %s") % s,
1105 _("uncommitted changes in subrepo %s") % s,
1098 hint=_("use --subrepos for recursive commit"))
1106 hint=_("use --subrepos for recursive commit"))
1099 subs.append(s)
1107 subs.append(s)
1100 commitsubs.add(s)
1108 commitsubs.add(s)
1101 else:
1109 else:
1102 bs = wctx.sub(s).basestate()
1110 bs = wctx.sub(s).basestate()
1103 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1111 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1104 if oldstate.get(s, (None, None, None))[1] != bs:
1112 if oldstate.get(s, (None, None, None))[1] != bs:
1105 subs.append(s)
1113 subs.append(s)
1106
1114
1107 # check for removed subrepos
1115 # check for removed subrepos
1108 for p in wctx.parents():
1116 for p in wctx.parents():
1109 r = [s for s in p.substate if s not in newstate]
1117 r = [s for s in p.substate if s not in newstate]
1110 subs += [s for s in r if match(s)]
1118 subs += [s for s in r if match(s)]
1111 if subs:
1119 if subs:
1112 if (not match('.hgsub') and
1120 if (not match('.hgsub') and
1113 '.hgsub' in (wctx.modified() + wctx.added())):
1121 '.hgsub' in (wctx.modified() + wctx.added())):
1114 raise util.Abort(
1122 raise util.Abort(
1115 _("can't commit subrepos without .hgsub"))
1123 _("can't commit subrepos without .hgsub"))
1116 changes[0].insert(0, '.hgsubstate')
1124 changes[0].insert(0, '.hgsubstate')
1117
1125
1118 elif '.hgsub' in changes[2]:
1126 elif '.hgsub' in changes[2]:
1119 # clean up .hgsubstate when .hgsub is removed
1127 # clean up .hgsubstate when .hgsub is removed
1120 if ('.hgsubstate' in wctx and
1128 if ('.hgsubstate' in wctx and
1121 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1129 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1122 changes[2].insert(0, '.hgsubstate')
1130 changes[2].insert(0, '.hgsubstate')
1123
1131
1124 # make sure all explicit patterns are matched
1132 # make sure all explicit patterns are matched
1125 if not force and match.files():
1133 if not force and match.files():
1126 matched = set(changes[0] + changes[1] + changes[2])
1134 matched = set(changes[0] + changes[1] + changes[2])
1127
1135
1128 for f in match.files():
1136 for f in match.files():
1129 if f == '.' or f in matched or f in wctx.substate:
1137 if f == '.' or f in matched or f in wctx.substate:
1130 continue
1138 continue
1131 if f in changes[3]: # missing
1139 if f in changes[3]: # missing
1132 fail(f, _('file not found!'))
1140 fail(f, _('file not found!'))
1133 if f in vdirs: # visited directory
1141 if f in vdirs: # visited directory
1134 d = f + '/'
1142 d = f + '/'
1135 for mf in matched:
1143 for mf in matched:
1136 if mf.startswith(d):
1144 if mf.startswith(d):
1137 break
1145 break
1138 else:
1146 else:
1139 fail(f, _("no match under directory!"))
1147 fail(f, _("no match under directory!"))
1140 elif f not in self.dirstate:
1148 elif f not in self.dirstate:
1141 fail(f, _("file not tracked!"))
1149 fail(f, _("file not tracked!"))
1142
1150
1143 if (not force and not extra.get("close") and not merge
1151 if (not force and not extra.get("close") and not merge
1144 and not (changes[0] or changes[1] or changes[2])
1152 and not (changes[0] or changes[1] or changes[2])
1145 and wctx.branch() == wctx.p1().branch()):
1153 and wctx.branch() == wctx.p1().branch()):
1146 return None
1154 return None
1147
1155
1148 if merge and changes[3]:
1156 if merge and changes[3]:
1149 raise util.Abort(_("cannot commit merge with missing files"))
1157 raise util.Abort(_("cannot commit merge with missing files"))
1150
1158
1151 ms = mergemod.mergestate(self)
1159 ms = mergemod.mergestate(self)
1152 for f in changes[0]:
1160 for f in changes[0]:
1153 if f in ms and ms[f] == 'u':
1161 if f in ms and ms[f] == 'u':
1154 raise util.Abort(_("unresolved merge conflicts "
1162 raise util.Abort(_("unresolved merge conflicts "
1155 "(see hg help resolve)"))
1163 "(see hg help resolve)"))
1156
1164
1157 cctx = context.workingctx(self, text, user, date, extra, changes)
1165 cctx = context.workingctx(self, text, user, date, extra, changes)
1158 if editor:
1166 if editor:
1159 cctx._text = editor(self, cctx, subs)
1167 cctx._text = editor(self, cctx, subs)
1160 edited = (text != cctx._text)
1168 edited = (text != cctx._text)
1161
1169
1162 # commit subs and write new state
1170 # commit subs and write new state
1163 if subs:
1171 if subs:
1164 for s in sorted(commitsubs):
1172 for s in sorted(commitsubs):
1165 sub = wctx.sub(s)
1173 sub = wctx.sub(s)
1166 self.ui.status(_('committing subrepository %s\n') %
1174 self.ui.status(_('committing subrepository %s\n') %
1167 subrepo.subrelpath(sub))
1175 subrepo.subrelpath(sub))
1168 sr = sub.commit(cctx._text, user, date)
1176 sr = sub.commit(cctx._text, user, date)
1169 newstate[s] = (newstate[s][0], sr)
1177 newstate[s] = (newstate[s][0], sr)
1170 subrepo.writestate(self, newstate)
1178 subrepo.writestate(self, newstate)
1171
1179
1172 # Save commit message in case this transaction gets rolled back
1180 # Save commit message in case this transaction gets rolled back
1173 # (e.g. by a pretxncommit hook). Leave the content alone on
1181 # (e.g. by a pretxncommit hook). Leave the content alone on
1174 # the assumption that the user will use the same editor again.
1182 # the assumption that the user will use the same editor again.
1175 msgfn = self.savecommitmessage(cctx._text)
1183 msgfn = self.savecommitmessage(cctx._text)
1176
1184
1177 p1, p2 = self.dirstate.parents()
1185 p1, p2 = self.dirstate.parents()
1178 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1186 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1179 try:
1187 try:
1180 self.hook("precommit", throw=True, parent1=hookp1,
1188 self.hook("precommit", throw=True, parent1=hookp1,
1181 parent2=hookp2)
1189 parent2=hookp2)
1182 ret = self.commitctx(cctx, True)
1190 ret = self.commitctx(cctx, True)
1183 except: # re-raises
1191 except: # re-raises
1184 if edited:
1192 if edited:
1185 self.ui.write(
1193 self.ui.write(
1186 _('note: commit message saved in %s\n') % msgfn)
1194 _('note: commit message saved in %s\n') % msgfn)
1187 raise
1195 raise
1188
1196
1189 # update bookmarks, dirstate and mergestate
1197 # update bookmarks, dirstate and mergestate
1190 bookmarks.update(self, [p1, p2], ret)
1198 bookmarks.update(self, [p1, p2], ret)
1191 for f in changes[0] + changes[1]:
1199 for f in changes[0] + changes[1]:
1192 self.dirstate.normal(f)
1200 self.dirstate.normal(f)
1193 for f in changes[2]:
1201 for f in changes[2]:
1194 self.dirstate.drop(f)
1202 self.dirstate.drop(f)
1195 self.dirstate.setparents(ret)
1203 self.dirstate.setparents(ret)
1196 ms.reset()
1204 ms.reset()
1197 finally:
1205 finally:
1198 wlock.release()
1206 wlock.release()
1199
1207
1200 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1208 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1201 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1209 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1202 self._afterlock(commithook)
1210 self._afterlock(commithook)
1203 return ret
1211 return ret
1204
1212
1205 def commitctx(self, ctx, error=False):
1213 def commitctx(self, ctx, error=False):
1206 """Add a new revision to current repository.
1214 """Add a new revision to current repository.
1207 Revision information is passed via the context argument.
1215 Revision information is passed via the context argument.
1208 """
1216 """
1209
1217
1210 tr = lock = None
1218 tr = lock = None
1211 removed = list(ctx.removed())
1219 removed = list(ctx.removed())
1212 p1, p2 = ctx.p1(), ctx.p2()
1220 p1, p2 = ctx.p1(), ctx.p2()
1213 user = ctx.user()
1221 user = ctx.user()
1214
1222
1215 lock = self.lock()
1223 lock = self.lock()
1216 try:
1224 try:
1217 tr = self.transaction("commit")
1225 tr = self.transaction("commit")
1218 trp = weakref.proxy(tr)
1226 trp = weakref.proxy(tr)
1219
1227
1220 if ctx.files():
1228 if ctx.files():
1221 m1 = p1.manifest().copy()
1229 m1 = p1.manifest().copy()
1222 m2 = p2.manifest()
1230 m2 = p2.manifest()
1223
1231
1224 # check in files
1232 # check in files
1225 new = {}
1233 new = {}
1226 changed = []
1234 changed = []
1227 linkrev = len(self)
1235 linkrev = len(self)
1228 for f in sorted(ctx.modified() + ctx.added()):
1236 for f in sorted(ctx.modified() + ctx.added()):
1229 self.ui.note(f + "\n")
1237 self.ui.note(f + "\n")
1230 try:
1238 try:
1231 fctx = ctx[f]
1239 fctx = ctx[f]
1232 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1240 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1233 changed)
1241 changed)
1234 m1.set(f, fctx.flags())
1242 m1.set(f, fctx.flags())
1235 except OSError, inst:
1243 except OSError, inst:
1236 self.ui.warn(_("trouble committing %s!\n") % f)
1244 self.ui.warn(_("trouble committing %s!\n") % f)
1237 raise
1245 raise
1238 except IOError, inst:
1246 except IOError, inst:
1239 errcode = getattr(inst, 'errno', errno.ENOENT)
1247 errcode = getattr(inst, 'errno', errno.ENOENT)
1240 if error or errcode and errcode != errno.ENOENT:
1248 if error or errcode and errcode != errno.ENOENT:
1241 self.ui.warn(_("trouble committing %s!\n") % f)
1249 self.ui.warn(_("trouble committing %s!\n") % f)
1242 raise
1250 raise
1243 else:
1251 else:
1244 removed.append(f)
1252 removed.append(f)
1245
1253
1246 # update manifest
1254 # update manifest
1247 m1.update(new)
1255 m1.update(new)
1248 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1256 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1249 drop = [f for f in removed if f in m1]
1257 drop = [f for f in removed if f in m1]
1250 for f in drop:
1258 for f in drop:
1251 del m1[f]
1259 del m1[f]
1252 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1260 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1253 p2.manifestnode(), (new, drop))
1261 p2.manifestnode(), (new, drop))
1254 files = changed + removed
1262 files = changed + removed
1255 else:
1263 else:
1256 mn = p1.manifestnode()
1264 mn = p1.manifestnode()
1257 files = []
1265 files = []
1258
1266
1259 # update changelog
1267 # update changelog
1260 self.changelog.delayupdate()
1268 self.changelog.delayupdate()
1261 n = self.changelog.add(mn, files, ctx.description(),
1269 n = self.changelog.add(mn, files, ctx.description(),
1262 trp, p1.node(), p2.node(),
1270 trp, p1.node(), p2.node(),
1263 user, ctx.date(), ctx.extra().copy())
1271 user, ctx.date(), ctx.extra().copy())
1264 p = lambda: self.changelog.writepending() and self.root or ""
1272 p = lambda: self.changelog.writepending() and self.root or ""
1265 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1273 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1266 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1274 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1267 parent2=xp2, pending=p)
1275 parent2=xp2, pending=p)
1268 self.changelog.finalize(trp)
1276 self.changelog.finalize(trp)
1269 # set the new commit is proper phase
1277 # set the new commit is proper phase
1270 targetphase = phases.newcommitphase(self.ui)
1278 targetphase = phases.newcommitphase(self.ui)
1271 if targetphase:
1279 if targetphase:
1272 # retract boundary do not alter parent changeset.
1280 # retract boundary do not alter parent changeset.
1273 # if a parent have higher the resulting phase will
1281 # if a parent have higher the resulting phase will
1274 # be compliant anyway
1282 # be compliant anyway
1275 #
1283 #
1276 # if minimal phase was 0 we don't need to retract anything
1284 # if minimal phase was 0 we don't need to retract anything
1277 phases.retractboundary(self, targetphase, [n])
1285 phases.retractboundary(self, targetphase, [n])
1278 tr.close()
1286 tr.close()
1279 self.updatebranchcache()
1287 self.updatebranchcache()
1280 return n
1288 return n
1281 finally:
1289 finally:
1282 if tr:
1290 if tr:
1283 tr.release()
1291 tr.release()
1284 lock.release()
1292 lock.release()
1285
1293
1286 def destroyed(self):
1294 def destroyed(self):
1287 '''Inform the repository that nodes have been destroyed.
1295 '''Inform the repository that nodes have been destroyed.
1288 Intended for use by strip and rollback, so there's a common
1296 Intended for use by strip and rollback, so there's a common
1289 place for anything that has to be done after destroying history.'''
1297 place for anything that has to be done after destroying history.'''
1290 # XXX it might be nice if we could take the list of destroyed
1298 # XXX it might be nice if we could take the list of destroyed
1291 # nodes, but I don't see an easy way for rollback() to do that
1299 # nodes, but I don't see an easy way for rollback() to do that
1292
1300
1293 # Ensure the persistent tag cache is updated. Doing it now
1301 # Ensure the persistent tag cache is updated. Doing it now
1294 # means that the tag cache only has to worry about destroyed
1302 # means that the tag cache only has to worry about destroyed
1295 # heads immediately after a strip/rollback. That in turn
1303 # heads immediately after a strip/rollback. That in turn
1296 # guarantees that "cachetip == currenttip" (comparing both rev
1304 # guarantees that "cachetip == currenttip" (comparing both rev
1297 # and node) always means no nodes have been added or destroyed.
1305 # and node) always means no nodes have been added or destroyed.
1298
1306
1299 # XXX this is suboptimal when qrefresh'ing: we strip the current
1307 # XXX this is suboptimal when qrefresh'ing: we strip the current
1300 # head, refresh the tag cache, then immediately add a new head.
1308 # head, refresh the tag cache, then immediately add a new head.
1301 # But I think doing it this way is necessary for the "instant
1309 # But I think doing it this way is necessary for the "instant
1302 # tag cache retrieval" case to work.
1310 # tag cache retrieval" case to work.
1303 self.invalidatecaches()
1311 self.invalidatecaches()
1304
1312
1305 # Discard all cache entries to force reloading everything.
1313 # Discard all cache entries to force reloading everything.
1306 self._filecache.clear()
1314 self._filecache.clear()
1307
1315
1308 def walk(self, match, node=None):
1316 def walk(self, match, node=None):
1309 '''
1317 '''
1310 walk recursively through the directory tree or a given
1318 walk recursively through the directory tree or a given
1311 changeset, finding all files matched by the match
1319 changeset, finding all files matched by the match
1312 function
1320 function
1313 '''
1321 '''
1314 return self[node].walk(match)
1322 return self[node].walk(match)
1315
1323
1316 def status(self, node1='.', node2=None, match=None,
1324 def status(self, node1='.', node2=None, match=None,
1317 ignored=False, clean=False, unknown=False,
1325 ignored=False, clean=False, unknown=False,
1318 listsubrepos=False):
1326 listsubrepos=False):
1319 """return status of files between two nodes or node and working
1327 """return status of files between two nodes or node and working
1320 directory.
1328 directory.
1321
1329
1322 If node1 is None, use the first dirstate parent instead.
1330 If node1 is None, use the first dirstate parent instead.
1323 If node2 is None, compare node1 with working directory.
1331 If node2 is None, compare node1 with working directory.
1324 """
1332 """
1325
1333
1326 def mfmatches(ctx):
1334 def mfmatches(ctx):
1327 mf = ctx.manifest().copy()
1335 mf = ctx.manifest().copy()
1328 if match.always():
1336 if match.always():
1329 return mf
1337 return mf
1330 for fn in mf.keys():
1338 for fn in mf.keys():
1331 if not match(fn):
1339 if not match(fn):
1332 del mf[fn]
1340 del mf[fn]
1333 return mf
1341 return mf
1334
1342
1335 if isinstance(node1, context.changectx):
1343 if isinstance(node1, context.changectx):
1336 ctx1 = node1
1344 ctx1 = node1
1337 else:
1345 else:
1338 ctx1 = self[node1]
1346 ctx1 = self[node1]
1339 if isinstance(node2, context.changectx):
1347 if isinstance(node2, context.changectx):
1340 ctx2 = node2
1348 ctx2 = node2
1341 else:
1349 else:
1342 ctx2 = self[node2]
1350 ctx2 = self[node2]
1343
1351
1344 working = ctx2.rev() is None
1352 working = ctx2.rev() is None
1345 parentworking = working and ctx1 == self['.']
1353 parentworking = working and ctx1 == self['.']
1346 match = match or matchmod.always(self.root, self.getcwd())
1354 match = match or matchmod.always(self.root, self.getcwd())
1347 listignored, listclean, listunknown = ignored, clean, unknown
1355 listignored, listclean, listunknown = ignored, clean, unknown
1348
1356
1349 # load earliest manifest first for caching reasons
1357 # load earliest manifest first for caching reasons
1350 if not working and ctx2.rev() < ctx1.rev():
1358 if not working and ctx2.rev() < ctx1.rev():
1351 ctx2.manifest()
1359 ctx2.manifest()
1352
1360
1353 if not parentworking:
1361 if not parentworking:
1354 def bad(f, msg):
1362 def bad(f, msg):
1355 # 'f' may be a directory pattern from 'match.files()',
1363 # 'f' may be a directory pattern from 'match.files()',
1356 # so 'f not in ctx1' is not enough
1364 # so 'f not in ctx1' is not enough
1357 if f not in ctx1 and f not in ctx1.dirs():
1365 if f not in ctx1 and f not in ctx1.dirs():
1358 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1366 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1359 match.bad = bad
1367 match.bad = bad
1360
1368
1361 if working: # we need to scan the working dir
1369 if working: # we need to scan the working dir
1362 subrepos = []
1370 subrepos = []
1363 if '.hgsub' in self.dirstate:
1371 if '.hgsub' in self.dirstate:
1364 subrepos = ctx2.substate.keys()
1372 subrepos = ctx2.substate.keys()
1365 s = self.dirstate.status(match, subrepos, listignored,
1373 s = self.dirstate.status(match, subrepos, listignored,
1366 listclean, listunknown)
1374 listclean, listunknown)
1367 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1375 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1368
1376
1369 # check for any possibly clean files
1377 # check for any possibly clean files
1370 if parentworking and cmp:
1378 if parentworking and cmp:
1371 fixup = []
1379 fixup = []
1372 # do a full compare of any files that might have changed
1380 # do a full compare of any files that might have changed
1373 for f in sorted(cmp):
1381 for f in sorted(cmp):
1374 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1382 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1375 or ctx1[f].cmp(ctx2[f])):
1383 or ctx1[f].cmp(ctx2[f])):
1376 modified.append(f)
1384 modified.append(f)
1377 else:
1385 else:
1378 fixup.append(f)
1386 fixup.append(f)
1379
1387
1380 # update dirstate for files that are actually clean
1388 # update dirstate for files that are actually clean
1381 if fixup:
1389 if fixup:
1382 if listclean:
1390 if listclean:
1383 clean += fixup
1391 clean += fixup
1384
1392
1385 try:
1393 try:
1386 # updating the dirstate is optional
1394 # updating the dirstate is optional
1387 # so we don't wait on the lock
1395 # so we don't wait on the lock
1388 wlock = self.wlock(False)
1396 wlock = self.wlock(False)
1389 try:
1397 try:
1390 for f in fixup:
1398 for f in fixup:
1391 self.dirstate.normal(f)
1399 self.dirstate.normal(f)
1392 finally:
1400 finally:
1393 wlock.release()
1401 wlock.release()
1394 except error.LockError:
1402 except error.LockError:
1395 pass
1403 pass
1396
1404
1397 if not parentworking:
1405 if not parentworking:
1398 mf1 = mfmatches(ctx1)
1406 mf1 = mfmatches(ctx1)
1399 if working:
1407 if working:
1400 # we are comparing working dir against non-parent
1408 # we are comparing working dir against non-parent
1401 # generate a pseudo-manifest for the working dir
1409 # generate a pseudo-manifest for the working dir
1402 mf2 = mfmatches(self['.'])
1410 mf2 = mfmatches(self['.'])
1403 for f in cmp + modified + added:
1411 for f in cmp + modified + added:
1404 mf2[f] = None
1412 mf2[f] = None
1405 mf2.set(f, ctx2.flags(f))
1413 mf2.set(f, ctx2.flags(f))
1406 for f in removed:
1414 for f in removed:
1407 if f in mf2:
1415 if f in mf2:
1408 del mf2[f]
1416 del mf2[f]
1409 else:
1417 else:
1410 # we are comparing two revisions
1418 # we are comparing two revisions
1411 deleted, unknown, ignored = [], [], []
1419 deleted, unknown, ignored = [], [], []
1412 mf2 = mfmatches(ctx2)
1420 mf2 = mfmatches(ctx2)
1413
1421
1414 modified, added, clean = [], [], []
1422 modified, added, clean = [], [], []
1415 withflags = mf1.withflags() | mf2.withflags()
1423 withflags = mf1.withflags() | mf2.withflags()
1416 for fn in mf2:
1424 for fn in mf2:
1417 if fn in mf1:
1425 if fn in mf1:
1418 if (fn not in deleted and
1426 if (fn not in deleted and
1419 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1427 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1420 (mf1[fn] != mf2[fn] and
1428 (mf1[fn] != mf2[fn] and
1421 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1429 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1422 modified.append(fn)
1430 modified.append(fn)
1423 elif listclean:
1431 elif listclean:
1424 clean.append(fn)
1432 clean.append(fn)
1425 del mf1[fn]
1433 del mf1[fn]
1426 elif fn not in deleted:
1434 elif fn not in deleted:
1427 added.append(fn)
1435 added.append(fn)
1428 removed = mf1.keys()
1436 removed = mf1.keys()
1429
1437
1430 if working and modified and not self.dirstate._checklink:
1438 if working and modified and not self.dirstate._checklink:
1431 # Symlink placeholders may get non-symlink-like contents
1439 # Symlink placeholders may get non-symlink-like contents
1432 # via user error or dereferencing by NFS or Samba servers,
1440 # via user error or dereferencing by NFS or Samba servers,
1433 # so we filter out any placeholders that don't look like a
1441 # so we filter out any placeholders that don't look like a
1434 # symlink
1442 # symlink
1435 sane = []
1443 sane = []
1436 for f in modified:
1444 for f in modified:
1437 if ctx2.flags(f) == 'l':
1445 if ctx2.flags(f) == 'l':
1438 d = ctx2[f].data()
1446 d = ctx2[f].data()
1439 if len(d) >= 1024 or '\n' in d or util.binary(d):
1447 if len(d) >= 1024 or '\n' in d or util.binary(d):
1440 self.ui.debug('ignoring suspect symlink placeholder'
1448 self.ui.debug('ignoring suspect symlink placeholder'
1441 ' "%s"\n' % f)
1449 ' "%s"\n' % f)
1442 continue
1450 continue
1443 sane.append(f)
1451 sane.append(f)
1444 modified = sane
1452 modified = sane
1445
1453
1446 r = modified, added, removed, deleted, unknown, ignored, clean
1454 r = modified, added, removed, deleted, unknown, ignored, clean
1447
1455
1448 if listsubrepos:
1456 if listsubrepos:
1449 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1457 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1450 if working:
1458 if working:
1451 rev2 = None
1459 rev2 = None
1452 else:
1460 else:
1453 rev2 = ctx2.substate[subpath][1]
1461 rev2 = ctx2.substate[subpath][1]
1454 try:
1462 try:
1455 submatch = matchmod.narrowmatcher(subpath, match)
1463 submatch = matchmod.narrowmatcher(subpath, match)
1456 s = sub.status(rev2, match=submatch, ignored=listignored,
1464 s = sub.status(rev2, match=submatch, ignored=listignored,
1457 clean=listclean, unknown=listunknown,
1465 clean=listclean, unknown=listunknown,
1458 listsubrepos=True)
1466 listsubrepos=True)
1459 for rfiles, sfiles in zip(r, s):
1467 for rfiles, sfiles in zip(r, s):
1460 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1468 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1461 except error.LookupError:
1469 except error.LookupError:
1462 self.ui.status(_("skipping missing subrepository: %s\n")
1470 self.ui.status(_("skipping missing subrepository: %s\n")
1463 % subpath)
1471 % subpath)
1464
1472
1465 for l in r:
1473 for l in r:
1466 l.sort()
1474 l.sort()
1467 return r
1475 return r
1468
1476
1469 def heads(self, start=None):
1477 def heads(self, start=None):
1470 heads = self.changelog.heads(start)
1478 heads = self.changelog.heads(start)
1471 # sort the output in rev descending order
1479 # sort the output in rev descending order
1472 return sorted(heads, key=self.changelog.rev, reverse=True)
1480 return sorted(heads, key=self.changelog.rev, reverse=True)
1473
1481
1474 def branchheads(self, branch=None, start=None, closed=False):
1482 def branchheads(self, branch=None, start=None, closed=False):
1475 '''return a (possibly filtered) list of heads for the given branch
1483 '''return a (possibly filtered) list of heads for the given branch
1476
1484
1477 Heads are returned in topological order, from newest to oldest.
1485 Heads are returned in topological order, from newest to oldest.
1478 If branch is None, use the dirstate branch.
1486 If branch is None, use the dirstate branch.
1479 If start is not None, return only heads reachable from start.
1487 If start is not None, return only heads reachable from start.
1480 If closed is True, return heads that are marked as closed as well.
1488 If closed is True, return heads that are marked as closed as well.
1481 '''
1489 '''
1482 if branch is None:
1490 if branch is None:
1483 branch = self[None].branch()
1491 branch = self[None].branch()
1484 branches = self.branchmap()
1492 branches = self.branchmap()
1485 if branch not in branches:
1493 if branch not in branches:
1486 return []
1494 return []
1487 # the cache returns heads ordered lowest to highest
1495 # the cache returns heads ordered lowest to highest
1488 bheads = list(reversed(branches[branch]))
1496 bheads = list(reversed(branches[branch]))
1489 if start is not None:
1497 if start is not None:
1490 # filter out the heads that cannot be reached from startrev
1498 # filter out the heads that cannot be reached from startrev
1491 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1499 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1492 bheads = [h for h in bheads if h in fbheads]
1500 bheads = [h for h in bheads if h in fbheads]
1493 if not closed:
1501 if not closed:
1494 bheads = [h for h in bheads if
1502 bheads = [h for h in bheads if
1495 ('close' not in self.changelog.read(h)[5])]
1503 ('close' not in self.changelog.read(h)[5])]
1496 return bheads
1504 return bheads
1497
1505
1498 def branches(self, nodes):
1506 def branches(self, nodes):
1499 if not nodes:
1507 if not nodes:
1500 nodes = [self.changelog.tip()]
1508 nodes = [self.changelog.tip()]
1501 b = []
1509 b = []
1502 for n in nodes:
1510 for n in nodes:
1503 t = n
1511 t = n
1504 while True:
1512 while True:
1505 p = self.changelog.parents(n)
1513 p = self.changelog.parents(n)
1506 if p[1] != nullid or p[0] == nullid:
1514 if p[1] != nullid or p[0] == nullid:
1507 b.append((t, n, p[0], p[1]))
1515 b.append((t, n, p[0], p[1]))
1508 break
1516 break
1509 n = p[0]
1517 n = p[0]
1510 return b
1518 return b
1511
1519
1512 def between(self, pairs):
1520 def between(self, pairs):
1513 r = []
1521 r = []
1514
1522
1515 for top, bottom in pairs:
1523 for top, bottom in pairs:
1516 n, l, i = top, [], 0
1524 n, l, i = top, [], 0
1517 f = 1
1525 f = 1
1518
1526
1519 while n != bottom and n != nullid:
1527 while n != bottom and n != nullid:
1520 p = self.changelog.parents(n)[0]
1528 p = self.changelog.parents(n)[0]
1521 if i == f:
1529 if i == f:
1522 l.append(n)
1530 l.append(n)
1523 f = f * 2
1531 f = f * 2
1524 n = p
1532 n = p
1525 i += 1
1533 i += 1
1526
1534
1527 r.append(l)
1535 r.append(l)
1528
1536
1529 return r
1537 return r
1530
1538
1531 def pull(self, remote, heads=None, force=False):
1539 def pull(self, remote, heads=None, force=False):
1532 lock = self.lock()
1540 lock = self.lock()
1533 try:
1541 try:
1534 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1542 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1535 force=force)
1543 force=force)
1536 common, fetch, rheads = tmp
1544 common, fetch, rheads = tmp
1537 if not fetch:
1545 if not fetch:
1538 self.ui.status(_("no changes found\n"))
1546 self.ui.status(_("no changes found\n"))
1539 added = []
1547 added = []
1540 result = 0
1548 result = 0
1541 else:
1549 else:
1542 if heads is None and list(common) == [nullid]:
1550 if heads is None and list(common) == [nullid]:
1543 self.ui.status(_("requesting all changes\n"))
1551 self.ui.status(_("requesting all changes\n"))
1544 elif heads is None and remote.capable('changegroupsubset'):
1552 elif heads is None and remote.capable('changegroupsubset'):
1545 # issue1320, avoid a race if remote changed after discovery
1553 # issue1320, avoid a race if remote changed after discovery
1546 heads = rheads
1554 heads = rheads
1547
1555
1548 if remote.capable('getbundle'):
1556 if remote.capable('getbundle'):
1549 cg = remote.getbundle('pull', common=common,
1557 cg = remote.getbundle('pull', common=common,
1550 heads=heads or rheads)
1558 heads=heads or rheads)
1551 elif heads is None:
1559 elif heads is None:
1552 cg = remote.changegroup(fetch, 'pull')
1560 cg = remote.changegroup(fetch, 'pull')
1553 elif not remote.capable('changegroupsubset'):
1561 elif not remote.capable('changegroupsubset'):
1554 raise util.Abort(_("partial pull cannot be done because "
1562 raise util.Abort(_("partial pull cannot be done because "
1555 "other repository doesn't support "
1563 "other repository doesn't support "
1556 "changegroupsubset."))
1564 "changegroupsubset."))
1557 else:
1565 else:
1558 cg = remote.changegroupsubset(fetch, heads, 'pull')
1566 cg = remote.changegroupsubset(fetch, heads, 'pull')
1559 clstart = len(self.changelog)
1567 clstart = len(self.changelog)
1560 result = self.addchangegroup(cg, 'pull', remote.url())
1568 result = self.addchangegroup(cg, 'pull', remote.url())
1561 clend = len(self.changelog)
1569 clend = len(self.changelog)
1562 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1570 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1563
1571
1564 # compute target subset
1572 # compute target subset
1565 if heads is None:
1573 if heads is None:
1566 # We pulled every thing possible
1574 # We pulled every thing possible
1567 # sync on everything common
1575 # sync on everything common
1568 subset = common + added
1576 subset = common + added
1569 else:
1577 else:
1570 # We pulled a specific subset
1578 # We pulled a specific subset
1571 # sync on this subset
1579 # sync on this subset
1572 subset = heads
1580 subset = heads
1573
1581
1574 # Get remote phases data from remote
1582 # Get remote phases data from remote
1575 remotephases = remote.listkeys('phases')
1583 remotephases = remote.listkeys('phases')
1576 publishing = bool(remotephases.get('publishing', False))
1584 publishing = bool(remotephases.get('publishing', False))
1577 if remotephases and not publishing:
1585 if remotephases and not publishing:
1578 # remote is new and unpublishing
1586 # remote is new and unpublishing
1579 pheads, _dr = phases.analyzeremotephases(self, subset,
1587 pheads, _dr = phases.analyzeremotephases(self, subset,
1580 remotephases)
1588 remotephases)
1581 phases.advanceboundary(self, phases.public, pheads)
1589 phases.advanceboundary(self, phases.public, pheads)
1582 phases.advanceboundary(self, phases.draft, subset)
1590 phases.advanceboundary(self, phases.draft, subset)
1583 else:
1591 else:
1584 # Remote is old or publishing all common changesets
1592 # Remote is old or publishing all common changesets
1585 # should be seen as public
1593 # should be seen as public
1586 phases.advanceboundary(self, phases.public, subset)
1594 phases.advanceboundary(self, phases.public, subset)
1587 finally:
1595 finally:
1588 lock.release()
1596 lock.release()
1589
1597
1590 return result
1598 return result
1591
1599
1592 def checkpush(self, force, revs):
1600 def checkpush(self, force, revs):
1593 """Extensions can override this function if additional checks have
1601 """Extensions can override this function if additional checks have
1594 to be performed before pushing, or call it if they override push
1602 to be performed before pushing, or call it if they override push
1595 command.
1603 command.
1596 """
1604 """
1597 pass
1605 pass
1598
1606
1599 def push(self, remote, force=False, revs=None, newbranch=False):
1607 def push(self, remote, force=False, revs=None, newbranch=False):
1600 '''Push outgoing changesets (limited by revs) from the current
1608 '''Push outgoing changesets (limited by revs) from the current
1601 repository to remote. Return an integer:
1609 repository to remote. Return an integer:
1602 - None means nothing to push
1610 - None means nothing to push
1603 - 0 means HTTP error
1611 - 0 means HTTP error
1604 - 1 means we pushed and remote head count is unchanged *or*
1612 - 1 means we pushed and remote head count is unchanged *or*
1605 we have outgoing changesets but refused to push
1613 we have outgoing changesets but refused to push
1606 - other values as described by addchangegroup()
1614 - other values as described by addchangegroup()
1607 '''
1615 '''
1608 # there are two ways to push to remote repo:
1616 # there are two ways to push to remote repo:
1609 #
1617 #
1610 # addchangegroup assumes local user can lock remote
1618 # addchangegroup assumes local user can lock remote
1611 # repo (local filesystem, old ssh servers).
1619 # repo (local filesystem, old ssh servers).
1612 #
1620 #
1613 # unbundle assumes local user cannot lock remote repo (new ssh
1621 # unbundle assumes local user cannot lock remote repo (new ssh
1614 # servers, http servers).
1622 # servers, http servers).
1615
1623
1616 # get local lock as we might write phase data
1624 # get local lock as we might write phase data
1617 locallock = self.lock()
1625 locallock = self.lock()
1618 try:
1626 try:
1619 self.checkpush(force, revs)
1627 self.checkpush(force, revs)
1620 lock = None
1628 lock = None
1621 unbundle = remote.capable('unbundle')
1629 unbundle = remote.capable('unbundle')
1622 if not unbundle:
1630 if not unbundle:
1623 lock = remote.lock()
1631 lock = remote.lock()
1624 try:
1632 try:
1625 # discovery
1633 # discovery
1626 fci = discovery.findcommonincoming
1634 fci = discovery.findcommonincoming
1627 commoninc = fci(self, remote, force=force)
1635 commoninc = fci(self, remote, force=force)
1628 common, inc, remoteheads = commoninc
1636 common, inc, remoteheads = commoninc
1629 fco = discovery.findcommonoutgoing
1637 fco = discovery.findcommonoutgoing
1630 outgoing = fco(self, remote, onlyheads=revs,
1638 outgoing = fco(self, remote, onlyheads=revs,
1631 commoninc=commoninc, force=force)
1639 commoninc=commoninc, force=force)
1632
1640
1633
1641
1634 if not outgoing.missing:
1642 if not outgoing.missing:
1635 # nothing to push
1643 # nothing to push
1636 scmutil.nochangesfound(self.ui, outgoing.excluded)
1644 scmutil.nochangesfound(self.ui, outgoing.excluded)
1637 ret = None
1645 ret = None
1638 else:
1646 else:
1639 # something to push
1647 # something to push
1640 if not force:
1648 if not force:
1641 discovery.checkheads(self, remote, outgoing,
1649 discovery.checkheads(self, remote, outgoing,
1642 remoteheads, newbranch,
1650 remoteheads, newbranch,
1643 bool(inc))
1651 bool(inc))
1644
1652
1645 # create a changegroup from local
1653 # create a changegroup from local
1646 if revs is None and not outgoing.excluded:
1654 if revs is None and not outgoing.excluded:
1647 # push everything,
1655 # push everything,
1648 # use the fast path, no race possible on push
1656 # use the fast path, no race possible on push
1649 cg = self._changegroup(outgoing.missing, 'push')
1657 cg = self._changegroup(outgoing.missing, 'push')
1650 else:
1658 else:
1651 cg = self.getlocalbundle('push', outgoing)
1659 cg = self.getlocalbundle('push', outgoing)
1652
1660
1653 # apply changegroup to remote
1661 # apply changegroup to remote
1654 if unbundle:
1662 if unbundle:
1655 # local repo finds heads on server, finds out what
1663 # local repo finds heads on server, finds out what
1656 # revs it must push. once revs transferred, if server
1664 # revs it must push. once revs transferred, if server
1657 # finds it has different heads (someone else won
1665 # finds it has different heads (someone else won
1658 # commit/push race), server aborts.
1666 # commit/push race), server aborts.
1659 if force:
1667 if force:
1660 remoteheads = ['force']
1668 remoteheads = ['force']
1661 # ssh: return remote's addchangegroup()
1669 # ssh: return remote's addchangegroup()
1662 # http: return remote's addchangegroup() or 0 for error
1670 # http: return remote's addchangegroup() or 0 for error
1663 ret = remote.unbundle(cg, remoteheads, 'push')
1671 ret = remote.unbundle(cg, remoteheads, 'push')
1664 else:
1672 else:
1665 # we return an integer indicating remote head count
1673 # we return an integer indicating remote head count
1666 # change
1674 # change
1667 ret = remote.addchangegroup(cg, 'push', self.url())
1675 ret = remote.addchangegroup(cg, 'push', self.url())
1668
1676
1669 if ret:
1677 if ret:
1670 # push succeed, synchonize target of the push
1678 # push succeed, synchonize target of the push
1671 cheads = outgoing.missingheads
1679 cheads = outgoing.missingheads
1672 elif revs is None:
1680 elif revs is None:
1673 # All out push fails. synchronize all common
1681 # All out push fails. synchronize all common
1674 cheads = outgoing.commonheads
1682 cheads = outgoing.commonheads
1675 else:
1683 else:
1676 # I want cheads = heads(::missingheads and ::commonheads)
1684 # I want cheads = heads(::missingheads and ::commonheads)
1677 # (missingheads is revs with secret changeset filtered out)
1685 # (missingheads is revs with secret changeset filtered out)
1678 #
1686 #
1679 # This can be expressed as:
1687 # This can be expressed as:
1680 # cheads = ( (missingheads and ::commonheads)
1688 # cheads = ( (missingheads and ::commonheads)
1681 # + (commonheads and ::missingheads))"
1689 # + (commonheads and ::missingheads))"
1682 # )
1690 # )
1683 #
1691 #
1684 # while trying to push we already computed the following:
1692 # while trying to push we already computed the following:
1685 # common = (::commonheads)
1693 # common = (::commonheads)
1686 # missing = ((commonheads::missingheads) - commonheads)
1694 # missing = ((commonheads::missingheads) - commonheads)
1687 #
1695 #
1688 # We can pick:
1696 # We can pick:
1689 # * missingheads part of comon (::commonheads)
1697 # * missingheads part of comon (::commonheads)
1690 common = set(outgoing.common)
1698 common = set(outgoing.common)
1691 cheads = [node for node in revs if node in common]
1699 cheads = [node for node in revs if node in common]
1692 # and
1700 # and
1693 # * commonheads parents on missing
1701 # * commonheads parents on missing
1694 revset = self.set('%ln and parents(roots(%ln))',
1702 revset = self.set('%ln and parents(roots(%ln))',
1695 outgoing.commonheads,
1703 outgoing.commonheads,
1696 outgoing.missing)
1704 outgoing.missing)
1697 cheads.extend(c.node() for c in revset)
1705 cheads.extend(c.node() for c in revset)
1698 # even when we don't push, exchanging phase data is useful
1706 # even when we don't push, exchanging phase data is useful
1699 remotephases = remote.listkeys('phases')
1707 remotephases = remote.listkeys('phases')
1700 if not remotephases: # old server or public only repo
1708 if not remotephases: # old server or public only repo
1701 phases.advanceboundary(self, phases.public, cheads)
1709 phases.advanceboundary(self, phases.public, cheads)
1702 # don't push any phase data as there is nothing to push
1710 # don't push any phase data as there is nothing to push
1703 else:
1711 else:
1704 ana = phases.analyzeremotephases(self, cheads, remotephases)
1712 ana = phases.analyzeremotephases(self, cheads, remotephases)
1705 pheads, droots = ana
1713 pheads, droots = ana
1706 ### Apply remote phase on local
1714 ### Apply remote phase on local
1707 if remotephases.get('publishing', False):
1715 if remotephases.get('publishing', False):
1708 phases.advanceboundary(self, phases.public, cheads)
1716 phases.advanceboundary(self, phases.public, cheads)
1709 else: # publish = False
1717 else: # publish = False
1710 phases.advanceboundary(self, phases.public, pheads)
1718 phases.advanceboundary(self, phases.public, pheads)
1711 phases.advanceboundary(self, phases.draft, cheads)
1719 phases.advanceboundary(self, phases.draft, cheads)
1712 ### Apply local phase on remote
1720 ### Apply local phase on remote
1713
1721
1714 # Get the list of all revs draft on remote by public here.
1722 # Get the list of all revs draft on remote by public here.
1715 # XXX Beware that revset break if droots is not strictly
1723 # XXX Beware that revset break if droots is not strictly
1716 # XXX root we may want to ensure it is but it is costly
1724 # XXX root we may want to ensure it is but it is costly
1717 outdated = self.set('heads((%ln::%ln) and public())',
1725 outdated = self.set('heads((%ln::%ln) and public())',
1718 droots, cheads)
1726 droots, cheads)
1719 for newremotehead in outdated:
1727 for newremotehead in outdated:
1720 r = remote.pushkey('phases',
1728 r = remote.pushkey('phases',
1721 newremotehead.hex(),
1729 newremotehead.hex(),
1722 str(phases.draft),
1730 str(phases.draft),
1723 str(phases.public))
1731 str(phases.public))
1724 if not r:
1732 if not r:
1725 self.ui.warn(_('updating %s to public failed!\n')
1733 self.ui.warn(_('updating %s to public failed!\n')
1726 % newremotehead)
1734 % newremotehead)
1727 finally:
1735 finally:
1728 if lock is not None:
1736 if lock is not None:
1729 lock.release()
1737 lock.release()
1730 finally:
1738 finally:
1731 locallock.release()
1739 locallock.release()
1732
1740
1733 self.ui.debug("checking for updated bookmarks\n")
1741 self.ui.debug("checking for updated bookmarks\n")
1734 rb = remote.listkeys('bookmarks')
1742 rb = remote.listkeys('bookmarks')
1735 for k in rb.keys():
1743 for k in rb.keys():
1736 if k in self._bookmarks:
1744 if k in self._bookmarks:
1737 nr, nl = rb[k], hex(self._bookmarks[k])
1745 nr, nl = rb[k], hex(self._bookmarks[k])
1738 if nr in self:
1746 if nr in self:
1739 cr = self[nr]
1747 cr = self[nr]
1740 cl = self[nl]
1748 cl = self[nl]
1741 if cl in cr.descendants():
1749 if cl in cr.descendants():
1742 r = remote.pushkey('bookmarks', k, nr, nl)
1750 r = remote.pushkey('bookmarks', k, nr, nl)
1743 if r:
1751 if r:
1744 self.ui.status(_("updating bookmark %s\n") % k)
1752 self.ui.status(_("updating bookmark %s\n") % k)
1745 else:
1753 else:
1746 self.ui.warn(_('updating bookmark %s'
1754 self.ui.warn(_('updating bookmark %s'
1747 ' failed!\n') % k)
1755 ' failed!\n') % k)
1748
1756
1749 return ret
1757 return ret
1750
1758
1751 def changegroupinfo(self, nodes, source):
1759 def changegroupinfo(self, nodes, source):
1752 if self.ui.verbose or source == 'bundle':
1760 if self.ui.verbose or source == 'bundle':
1753 self.ui.status(_("%d changesets found\n") % len(nodes))
1761 self.ui.status(_("%d changesets found\n") % len(nodes))
1754 if self.ui.debugflag:
1762 if self.ui.debugflag:
1755 self.ui.debug("list of changesets:\n")
1763 self.ui.debug("list of changesets:\n")
1756 for node in nodes:
1764 for node in nodes:
1757 self.ui.debug("%s\n" % hex(node))
1765 self.ui.debug("%s\n" % hex(node))
1758
1766
1759 def changegroupsubset(self, bases, heads, source):
1767 def changegroupsubset(self, bases, heads, source):
1760 """Compute a changegroup consisting of all the nodes that are
1768 """Compute a changegroup consisting of all the nodes that are
1761 descendants of any of the bases and ancestors of any of the heads.
1769 descendants of any of the bases and ancestors of any of the heads.
1762 Return a chunkbuffer object whose read() method will return
1770 Return a chunkbuffer object whose read() method will return
1763 successive changegroup chunks.
1771 successive changegroup chunks.
1764
1772
1765 It is fairly complex as determining which filenodes and which
1773 It is fairly complex as determining which filenodes and which
1766 manifest nodes need to be included for the changeset to be complete
1774 manifest nodes need to be included for the changeset to be complete
1767 is non-trivial.
1775 is non-trivial.
1768
1776
1769 Another wrinkle is doing the reverse, figuring out which changeset in
1777 Another wrinkle is doing the reverse, figuring out which changeset in
1770 the changegroup a particular filenode or manifestnode belongs to.
1778 the changegroup a particular filenode or manifestnode belongs to.
1771 """
1779 """
1772 cl = self.changelog
1780 cl = self.changelog
1773 if not bases:
1781 if not bases:
1774 bases = [nullid]
1782 bases = [nullid]
1775 csets, bases, heads = cl.nodesbetween(bases, heads)
1783 csets, bases, heads = cl.nodesbetween(bases, heads)
1776 # We assume that all ancestors of bases are known
1784 # We assume that all ancestors of bases are known
1777 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1785 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1778 return self._changegroupsubset(common, csets, heads, source)
1786 return self._changegroupsubset(common, csets, heads, source)
1779
1787
1780 def getlocalbundle(self, source, outgoing):
1788 def getlocalbundle(self, source, outgoing):
1781 """Like getbundle, but taking a discovery.outgoing as an argument.
1789 """Like getbundle, but taking a discovery.outgoing as an argument.
1782
1790
1783 This is only implemented for local repos and reuses potentially
1791 This is only implemented for local repos and reuses potentially
1784 precomputed sets in outgoing."""
1792 precomputed sets in outgoing."""
1785 if not outgoing.missing:
1793 if not outgoing.missing:
1786 return None
1794 return None
1787 return self._changegroupsubset(outgoing.common,
1795 return self._changegroupsubset(outgoing.common,
1788 outgoing.missing,
1796 outgoing.missing,
1789 outgoing.missingheads,
1797 outgoing.missingheads,
1790 source)
1798 source)
1791
1799
1792 def getbundle(self, source, heads=None, common=None):
1800 def getbundle(self, source, heads=None, common=None):
1793 """Like changegroupsubset, but returns the set difference between the
1801 """Like changegroupsubset, but returns the set difference between the
1794 ancestors of heads and the ancestors common.
1802 ancestors of heads and the ancestors common.
1795
1803
1796 If heads is None, use the local heads. If common is None, use [nullid].
1804 If heads is None, use the local heads. If common is None, use [nullid].
1797
1805
1798 The nodes in common might not all be known locally due to the way the
1806 The nodes in common might not all be known locally due to the way the
1799 current discovery protocol works.
1807 current discovery protocol works.
1800 """
1808 """
1801 cl = self.changelog
1809 cl = self.changelog
1802 if common:
1810 if common:
1803 nm = cl.nodemap
1811 nm = cl.nodemap
1804 common = [n for n in common if n in nm]
1812 common = [n for n in common if n in nm]
1805 else:
1813 else:
1806 common = [nullid]
1814 common = [nullid]
1807 if not heads:
1815 if not heads:
1808 heads = cl.heads()
1816 heads = cl.heads()
1809 return self.getlocalbundle(source,
1817 return self.getlocalbundle(source,
1810 discovery.outgoing(cl, common, heads))
1818 discovery.outgoing(cl, common, heads))
1811
1819
1812 def _changegroupsubset(self, commonrevs, csets, heads, source):
1820 def _changegroupsubset(self, commonrevs, csets, heads, source):
1813
1821
1814 cl = self.changelog
1822 cl = self.changelog
1815 mf = self.manifest
1823 mf = self.manifest
1816 mfs = {} # needed manifests
1824 mfs = {} # needed manifests
1817 fnodes = {} # needed file nodes
1825 fnodes = {} # needed file nodes
1818 changedfiles = set()
1826 changedfiles = set()
1819 fstate = ['', {}]
1827 fstate = ['', {}]
1820 count = [0, 0]
1828 count = [0, 0]
1821
1829
1822 # can we go through the fast path ?
1830 # can we go through the fast path ?
1823 heads.sort()
1831 heads.sort()
1824 if heads == sorted(self.heads()):
1832 if heads == sorted(self.heads()):
1825 return self._changegroup(csets, source)
1833 return self._changegroup(csets, source)
1826
1834
1827 # slow path
1835 # slow path
1828 self.hook('preoutgoing', throw=True, source=source)
1836 self.hook('preoutgoing', throw=True, source=source)
1829 self.changegroupinfo(csets, source)
1837 self.changegroupinfo(csets, source)
1830
1838
1831 # filter any nodes that claim to be part of the known set
1839 # filter any nodes that claim to be part of the known set
1832 def prune(revlog, missing):
1840 def prune(revlog, missing):
1833 rr, rl = revlog.rev, revlog.linkrev
1841 rr, rl = revlog.rev, revlog.linkrev
1834 return [n for n in missing
1842 return [n for n in missing
1835 if rl(rr(n)) not in commonrevs]
1843 if rl(rr(n)) not in commonrevs]
1836
1844
1837 progress = self.ui.progress
1845 progress = self.ui.progress
1838 _bundling = _('bundling')
1846 _bundling = _('bundling')
1839 _changesets = _('changesets')
1847 _changesets = _('changesets')
1840 _manifests = _('manifests')
1848 _manifests = _('manifests')
1841 _files = _('files')
1849 _files = _('files')
1842
1850
1843 def lookup(revlog, x):
1851 def lookup(revlog, x):
1844 if revlog == cl:
1852 if revlog == cl:
1845 c = cl.read(x)
1853 c = cl.read(x)
1846 changedfiles.update(c[3])
1854 changedfiles.update(c[3])
1847 mfs.setdefault(c[0], x)
1855 mfs.setdefault(c[0], x)
1848 count[0] += 1
1856 count[0] += 1
1849 progress(_bundling, count[0],
1857 progress(_bundling, count[0],
1850 unit=_changesets, total=count[1])
1858 unit=_changesets, total=count[1])
1851 return x
1859 return x
1852 elif revlog == mf:
1860 elif revlog == mf:
1853 clnode = mfs[x]
1861 clnode = mfs[x]
1854 mdata = mf.readfast(x)
1862 mdata = mf.readfast(x)
1855 for f, n in mdata.iteritems():
1863 for f, n in mdata.iteritems():
1856 if f in changedfiles:
1864 if f in changedfiles:
1857 fnodes[f].setdefault(n, clnode)
1865 fnodes[f].setdefault(n, clnode)
1858 count[0] += 1
1866 count[0] += 1
1859 progress(_bundling, count[0],
1867 progress(_bundling, count[0],
1860 unit=_manifests, total=count[1])
1868 unit=_manifests, total=count[1])
1861 return clnode
1869 return clnode
1862 else:
1870 else:
1863 progress(_bundling, count[0], item=fstate[0],
1871 progress(_bundling, count[0], item=fstate[0],
1864 unit=_files, total=count[1])
1872 unit=_files, total=count[1])
1865 return fstate[1][x]
1873 return fstate[1][x]
1866
1874
1867 bundler = changegroup.bundle10(lookup)
1875 bundler = changegroup.bundle10(lookup)
1868 reorder = self.ui.config('bundle', 'reorder', 'auto')
1876 reorder = self.ui.config('bundle', 'reorder', 'auto')
1869 if reorder == 'auto':
1877 if reorder == 'auto':
1870 reorder = None
1878 reorder = None
1871 else:
1879 else:
1872 reorder = util.parsebool(reorder)
1880 reorder = util.parsebool(reorder)
1873
1881
1874 def gengroup():
1882 def gengroup():
1875 # Create a changenode group generator that will call our functions
1883 # Create a changenode group generator that will call our functions
1876 # back to lookup the owning changenode and collect information.
1884 # back to lookup the owning changenode and collect information.
1877 count[:] = [0, len(csets)]
1885 count[:] = [0, len(csets)]
1878 for chunk in cl.group(csets, bundler, reorder=reorder):
1886 for chunk in cl.group(csets, bundler, reorder=reorder):
1879 yield chunk
1887 yield chunk
1880 progress(_bundling, None)
1888 progress(_bundling, None)
1881
1889
1882 # Create a generator for the manifestnodes that calls our lookup
1890 # Create a generator for the manifestnodes that calls our lookup
1883 # and data collection functions back.
1891 # and data collection functions back.
1884 for f in changedfiles:
1892 for f in changedfiles:
1885 fnodes[f] = {}
1893 fnodes[f] = {}
1886 count[:] = [0, len(mfs)]
1894 count[:] = [0, len(mfs)]
1887 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1895 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1888 yield chunk
1896 yield chunk
1889 progress(_bundling, None)
1897 progress(_bundling, None)
1890
1898
1891 mfs.clear()
1899 mfs.clear()
1892
1900
1893 # Go through all our files in order sorted by name.
1901 # Go through all our files in order sorted by name.
1894 count[:] = [0, len(changedfiles)]
1902 count[:] = [0, len(changedfiles)]
1895 for fname in sorted(changedfiles):
1903 for fname in sorted(changedfiles):
1896 filerevlog = self.file(fname)
1904 filerevlog = self.file(fname)
1897 if not len(filerevlog):
1905 if not len(filerevlog):
1898 raise util.Abort(_("empty or missing revlog for %s")
1906 raise util.Abort(_("empty or missing revlog for %s")
1899 % fname)
1907 % fname)
1900 fstate[0] = fname
1908 fstate[0] = fname
1901 fstate[1] = fnodes.pop(fname, {})
1909 fstate[1] = fnodes.pop(fname, {})
1902
1910
1903 nodelist = prune(filerevlog, fstate[1])
1911 nodelist = prune(filerevlog, fstate[1])
1904 if nodelist:
1912 if nodelist:
1905 count[0] += 1
1913 count[0] += 1
1906 yield bundler.fileheader(fname)
1914 yield bundler.fileheader(fname)
1907 for chunk in filerevlog.group(nodelist, bundler, reorder):
1915 for chunk in filerevlog.group(nodelist, bundler, reorder):
1908 yield chunk
1916 yield chunk
1909
1917
1910 # Signal that no more groups are left.
1918 # Signal that no more groups are left.
1911 yield bundler.close()
1919 yield bundler.close()
1912 progress(_bundling, None)
1920 progress(_bundling, None)
1913
1921
1914 if csets:
1922 if csets:
1915 self.hook('outgoing', node=hex(csets[0]), source=source)
1923 self.hook('outgoing', node=hex(csets[0]), source=source)
1916
1924
1917 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1925 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1918
1926
1919 def changegroup(self, basenodes, source):
1927 def changegroup(self, basenodes, source):
1920 # to avoid a race we use changegroupsubset() (issue1320)
1928 # to avoid a race we use changegroupsubset() (issue1320)
1921 return self.changegroupsubset(basenodes, self.heads(), source)
1929 return self.changegroupsubset(basenodes, self.heads(), source)
1922
1930
1923 def _changegroup(self, nodes, source):
1931 def _changegroup(self, nodes, source):
1924 """Compute the changegroup of all nodes that we have that a recipient
1932 """Compute the changegroup of all nodes that we have that a recipient
1925 doesn't. Return a chunkbuffer object whose read() method will return
1933 doesn't. Return a chunkbuffer object whose read() method will return
1926 successive changegroup chunks.
1934 successive changegroup chunks.
1927
1935
1928 This is much easier than the previous function as we can assume that
1936 This is much easier than the previous function as we can assume that
1929 the recipient has any changenode we aren't sending them.
1937 the recipient has any changenode we aren't sending them.
1930
1938
1931 nodes is the set of nodes to send"""
1939 nodes is the set of nodes to send"""
1932
1940
1933 cl = self.changelog
1941 cl = self.changelog
1934 mf = self.manifest
1942 mf = self.manifest
1935 mfs = {}
1943 mfs = {}
1936 changedfiles = set()
1944 changedfiles = set()
1937 fstate = ['']
1945 fstate = ['']
1938 count = [0, 0]
1946 count = [0, 0]
1939
1947
1940 self.hook('preoutgoing', throw=True, source=source)
1948 self.hook('preoutgoing', throw=True, source=source)
1941 self.changegroupinfo(nodes, source)
1949 self.changegroupinfo(nodes, source)
1942
1950
1943 revset = set([cl.rev(n) for n in nodes])
1951 revset = set([cl.rev(n) for n in nodes])
1944
1952
1945 def gennodelst(log):
1953 def gennodelst(log):
1946 ln, llr = log.node, log.linkrev
1954 ln, llr = log.node, log.linkrev
1947 return [ln(r) for r in log if llr(r) in revset]
1955 return [ln(r) for r in log if llr(r) in revset]
1948
1956
1949 progress = self.ui.progress
1957 progress = self.ui.progress
1950 _bundling = _('bundling')
1958 _bundling = _('bundling')
1951 _changesets = _('changesets')
1959 _changesets = _('changesets')
1952 _manifests = _('manifests')
1960 _manifests = _('manifests')
1953 _files = _('files')
1961 _files = _('files')
1954
1962
1955 def lookup(revlog, x):
1963 def lookup(revlog, x):
1956 if revlog == cl:
1964 if revlog == cl:
1957 c = cl.read(x)
1965 c = cl.read(x)
1958 changedfiles.update(c[3])
1966 changedfiles.update(c[3])
1959 mfs.setdefault(c[0], x)
1967 mfs.setdefault(c[0], x)
1960 count[0] += 1
1968 count[0] += 1
1961 progress(_bundling, count[0],
1969 progress(_bundling, count[0],
1962 unit=_changesets, total=count[1])
1970 unit=_changesets, total=count[1])
1963 return x
1971 return x
1964 elif revlog == mf:
1972 elif revlog == mf:
1965 count[0] += 1
1973 count[0] += 1
1966 progress(_bundling, count[0],
1974 progress(_bundling, count[0],
1967 unit=_manifests, total=count[1])
1975 unit=_manifests, total=count[1])
1968 return cl.node(revlog.linkrev(revlog.rev(x)))
1976 return cl.node(revlog.linkrev(revlog.rev(x)))
1969 else:
1977 else:
1970 progress(_bundling, count[0], item=fstate[0],
1978 progress(_bundling, count[0], item=fstate[0],
1971 total=count[1], unit=_files)
1979 total=count[1], unit=_files)
1972 return cl.node(revlog.linkrev(revlog.rev(x)))
1980 return cl.node(revlog.linkrev(revlog.rev(x)))
1973
1981
1974 bundler = changegroup.bundle10(lookup)
1982 bundler = changegroup.bundle10(lookup)
1975 reorder = self.ui.config('bundle', 'reorder', 'auto')
1983 reorder = self.ui.config('bundle', 'reorder', 'auto')
1976 if reorder == 'auto':
1984 if reorder == 'auto':
1977 reorder = None
1985 reorder = None
1978 else:
1986 else:
1979 reorder = util.parsebool(reorder)
1987 reorder = util.parsebool(reorder)
1980
1988
1981 def gengroup():
1989 def gengroup():
1982 '''yield a sequence of changegroup chunks (strings)'''
1990 '''yield a sequence of changegroup chunks (strings)'''
1983 # construct a list of all changed files
1991 # construct a list of all changed files
1984
1992
1985 count[:] = [0, len(nodes)]
1993 count[:] = [0, len(nodes)]
1986 for chunk in cl.group(nodes, bundler, reorder=reorder):
1994 for chunk in cl.group(nodes, bundler, reorder=reorder):
1987 yield chunk
1995 yield chunk
1988 progress(_bundling, None)
1996 progress(_bundling, None)
1989
1997
1990 count[:] = [0, len(mfs)]
1998 count[:] = [0, len(mfs)]
1991 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1999 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1992 yield chunk
2000 yield chunk
1993 progress(_bundling, None)
2001 progress(_bundling, None)
1994
2002
1995 count[:] = [0, len(changedfiles)]
2003 count[:] = [0, len(changedfiles)]
1996 for fname in sorted(changedfiles):
2004 for fname in sorted(changedfiles):
1997 filerevlog = self.file(fname)
2005 filerevlog = self.file(fname)
1998 if not len(filerevlog):
2006 if not len(filerevlog):
1999 raise util.Abort(_("empty or missing revlog for %s")
2007 raise util.Abort(_("empty or missing revlog for %s")
2000 % fname)
2008 % fname)
2001 fstate[0] = fname
2009 fstate[0] = fname
2002 nodelist = gennodelst(filerevlog)
2010 nodelist = gennodelst(filerevlog)
2003 if nodelist:
2011 if nodelist:
2004 count[0] += 1
2012 count[0] += 1
2005 yield bundler.fileheader(fname)
2013 yield bundler.fileheader(fname)
2006 for chunk in filerevlog.group(nodelist, bundler, reorder):
2014 for chunk in filerevlog.group(nodelist, bundler, reorder):
2007 yield chunk
2015 yield chunk
2008 yield bundler.close()
2016 yield bundler.close()
2009 progress(_bundling, None)
2017 progress(_bundling, None)
2010
2018
2011 if nodes:
2019 if nodes:
2012 self.hook('outgoing', node=hex(nodes[0]), source=source)
2020 self.hook('outgoing', node=hex(nodes[0]), source=source)
2013
2021
2014 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2022 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2015
2023
2016 def addchangegroup(self, source, srctype, url, emptyok=False):
2024 def addchangegroup(self, source, srctype, url, emptyok=False):
2017 """Add the changegroup returned by source.read() to this repo.
2025 """Add the changegroup returned by source.read() to this repo.
2018 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2026 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2019 the URL of the repo where this changegroup is coming from.
2027 the URL of the repo where this changegroup is coming from.
2020
2028
2021 Return an integer summarizing the change to this repo:
2029 Return an integer summarizing the change to this repo:
2022 - nothing changed or no source: 0
2030 - nothing changed or no source: 0
2023 - more heads than before: 1+added heads (2..n)
2031 - more heads than before: 1+added heads (2..n)
2024 - fewer heads than before: -1-removed heads (-2..-n)
2032 - fewer heads than before: -1-removed heads (-2..-n)
2025 - number of heads stays the same: 1
2033 - number of heads stays the same: 1
2026 """
2034 """
2027 def csmap(x):
2035 def csmap(x):
2028 self.ui.debug("add changeset %s\n" % short(x))
2036 self.ui.debug("add changeset %s\n" % short(x))
2029 return len(cl)
2037 return len(cl)
2030
2038
2031 def revmap(x):
2039 def revmap(x):
2032 return cl.rev(x)
2040 return cl.rev(x)
2033
2041
2034 if not source:
2042 if not source:
2035 return 0
2043 return 0
2036
2044
2037 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2045 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2038
2046
2039 changesets = files = revisions = 0
2047 changesets = files = revisions = 0
2040 efiles = set()
2048 efiles = set()
2041
2049
2042 # write changelog data to temp files so concurrent readers will not see
2050 # write changelog data to temp files so concurrent readers will not see
2043 # inconsistent view
2051 # inconsistent view
2044 cl = self.changelog
2052 cl = self.changelog
2045 cl.delayupdate()
2053 cl.delayupdate()
2046 oldheads = cl.heads()
2054 oldheads = cl.heads()
2047
2055
2048 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2056 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2049 try:
2057 try:
2050 trp = weakref.proxy(tr)
2058 trp = weakref.proxy(tr)
2051 # pull off the changeset group
2059 # pull off the changeset group
2052 self.ui.status(_("adding changesets\n"))
2060 self.ui.status(_("adding changesets\n"))
2053 clstart = len(cl)
2061 clstart = len(cl)
2054 class prog(object):
2062 class prog(object):
2055 step = _('changesets')
2063 step = _('changesets')
2056 count = 1
2064 count = 1
2057 ui = self.ui
2065 ui = self.ui
2058 total = None
2066 total = None
2059 def __call__(self):
2067 def __call__(self):
2060 self.ui.progress(self.step, self.count, unit=_('chunks'),
2068 self.ui.progress(self.step, self.count, unit=_('chunks'),
2061 total=self.total)
2069 total=self.total)
2062 self.count += 1
2070 self.count += 1
2063 pr = prog()
2071 pr = prog()
2064 source.callback = pr
2072 source.callback = pr
2065
2073
2066 source.changelogheader()
2074 source.changelogheader()
2067 srccontent = cl.addgroup(source, csmap, trp)
2075 srccontent = cl.addgroup(source, csmap, trp)
2068 if not (srccontent or emptyok):
2076 if not (srccontent or emptyok):
2069 raise util.Abort(_("received changelog group is empty"))
2077 raise util.Abort(_("received changelog group is empty"))
2070 clend = len(cl)
2078 clend = len(cl)
2071 changesets = clend - clstart
2079 changesets = clend - clstart
2072 for c in xrange(clstart, clend):
2080 for c in xrange(clstart, clend):
2073 efiles.update(self[c].files())
2081 efiles.update(self[c].files())
2074 efiles = len(efiles)
2082 efiles = len(efiles)
2075 self.ui.progress(_('changesets'), None)
2083 self.ui.progress(_('changesets'), None)
2076
2084
2077 # pull off the manifest group
2085 # pull off the manifest group
2078 self.ui.status(_("adding manifests\n"))
2086 self.ui.status(_("adding manifests\n"))
2079 pr.step = _('manifests')
2087 pr.step = _('manifests')
2080 pr.count = 1
2088 pr.count = 1
2081 pr.total = changesets # manifests <= changesets
2089 pr.total = changesets # manifests <= changesets
2082 # no need to check for empty manifest group here:
2090 # no need to check for empty manifest group here:
2083 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2091 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2084 # no new manifest will be created and the manifest group will
2092 # no new manifest will be created and the manifest group will
2085 # be empty during the pull
2093 # be empty during the pull
2086 source.manifestheader()
2094 source.manifestheader()
2087 self.manifest.addgroup(source, revmap, trp)
2095 self.manifest.addgroup(source, revmap, trp)
2088 self.ui.progress(_('manifests'), None)
2096 self.ui.progress(_('manifests'), None)
2089
2097
2090 needfiles = {}
2098 needfiles = {}
2091 if self.ui.configbool('server', 'validate', default=False):
2099 if self.ui.configbool('server', 'validate', default=False):
2092 # validate incoming csets have their manifests
2100 # validate incoming csets have their manifests
2093 for cset in xrange(clstart, clend):
2101 for cset in xrange(clstart, clend):
2094 mfest = self.changelog.read(self.changelog.node(cset))[0]
2102 mfest = self.changelog.read(self.changelog.node(cset))[0]
2095 mfest = self.manifest.readdelta(mfest)
2103 mfest = self.manifest.readdelta(mfest)
2096 # store file nodes we must see
2104 # store file nodes we must see
2097 for f, n in mfest.iteritems():
2105 for f, n in mfest.iteritems():
2098 needfiles.setdefault(f, set()).add(n)
2106 needfiles.setdefault(f, set()).add(n)
2099
2107
2100 # process the files
2108 # process the files
2101 self.ui.status(_("adding file changes\n"))
2109 self.ui.status(_("adding file changes\n"))
2102 pr.step = _('files')
2110 pr.step = _('files')
2103 pr.count = 1
2111 pr.count = 1
2104 pr.total = efiles
2112 pr.total = efiles
2105 source.callback = None
2113 source.callback = None
2106
2114
2107 while True:
2115 while True:
2108 chunkdata = source.filelogheader()
2116 chunkdata = source.filelogheader()
2109 if not chunkdata:
2117 if not chunkdata:
2110 break
2118 break
2111 f = chunkdata["filename"]
2119 f = chunkdata["filename"]
2112 self.ui.debug("adding %s revisions\n" % f)
2120 self.ui.debug("adding %s revisions\n" % f)
2113 pr()
2121 pr()
2114 fl = self.file(f)
2122 fl = self.file(f)
2115 o = len(fl)
2123 o = len(fl)
2116 if not fl.addgroup(source, revmap, trp):
2124 if not fl.addgroup(source, revmap, trp):
2117 raise util.Abort(_("received file revlog group is empty"))
2125 raise util.Abort(_("received file revlog group is empty"))
2118 revisions += len(fl) - o
2126 revisions += len(fl) - o
2119 files += 1
2127 files += 1
2120 if f in needfiles:
2128 if f in needfiles:
2121 needs = needfiles[f]
2129 needs = needfiles[f]
2122 for new in xrange(o, len(fl)):
2130 for new in xrange(o, len(fl)):
2123 n = fl.node(new)
2131 n = fl.node(new)
2124 if n in needs:
2132 if n in needs:
2125 needs.remove(n)
2133 needs.remove(n)
2126 if not needs:
2134 if not needs:
2127 del needfiles[f]
2135 del needfiles[f]
2128 self.ui.progress(_('files'), None)
2136 self.ui.progress(_('files'), None)
2129
2137
2130 for f, needs in needfiles.iteritems():
2138 for f, needs in needfiles.iteritems():
2131 fl = self.file(f)
2139 fl = self.file(f)
2132 for n in needs:
2140 for n in needs:
2133 try:
2141 try:
2134 fl.rev(n)
2142 fl.rev(n)
2135 except error.LookupError:
2143 except error.LookupError:
2136 raise util.Abort(
2144 raise util.Abort(
2137 _('missing file data for %s:%s - run hg verify') %
2145 _('missing file data for %s:%s - run hg verify') %
2138 (f, hex(n)))
2146 (f, hex(n)))
2139
2147
2140 dh = 0
2148 dh = 0
2141 if oldheads:
2149 if oldheads:
2142 heads = cl.heads()
2150 heads = cl.heads()
2143 dh = len(heads) - len(oldheads)
2151 dh = len(heads) - len(oldheads)
2144 for h in heads:
2152 for h in heads:
2145 if h not in oldheads and 'close' in self[h].extra():
2153 if h not in oldheads and 'close' in self[h].extra():
2146 dh -= 1
2154 dh -= 1
2147 htext = ""
2155 htext = ""
2148 if dh:
2156 if dh:
2149 htext = _(" (%+d heads)") % dh
2157 htext = _(" (%+d heads)") % dh
2150
2158
2151 self.ui.status(_("added %d changesets"
2159 self.ui.status(_("added %d changesets"
2152 " with %d changes to %d files%s\n")
2160 " with %d changes to %d files%s\n")
2153 % (changesets, revisions, files, htext))
2161 % (changesets, revisions, files, htext))
2154
2162
2155 if changesets > 0:
2163 if changesets > 0:
2156 p = lambda: cl.writepending() and self.root or ""
2164 p = lambda: cl.writepending() and self.root or ""
2157 self.hook('pretxnchangegroup', throw=True,
2165 self.hook('pretxnchangegroup', throw=True,
2158 node=hex(cl.node(clstart)), source=srctype,
2166 node=hex(cl.node(clstart)), source=srctype,
2159 url=url, pending=p)
2167 url=url, pending=p)
2160
2168
2161 added = [cl.node(r) for r in xrange(clstart, clend)]
2169 added = [cl.node(r) for r in xrange(clstart, clend)]
2162 publishing = self.ui.configbool('phases', 'publish', True)
2170 publishing = self.ui.configbool('phases', 'publish', True)
2163 if srctype == 'push':
2171 if srctype == 'push':
2164 # Old server can not push the boundary themself.
2172 # Old server can not push the boundary themself.
2165 # New server won't push the boundary if changeset already
2173 # New server won't push the boundary if changeset already
2166 # existed locally as secrete
2174 # existed locally as secrete
2167 #
2175 #
2168 # We should not use added here but the list of all change in
2176 # We should not use added here but the list of all change in
2169 # the bundle
2177 # the bundle
2170 if publishing:
2178 if publishing:
2171 phases.advanceboundary(self, phases.public, srccontent)
2179 phases.advanceboundary(self, phases.public, srccontent)
2172 else:
2180 else:
2173 phases.advanceboundary(self, phases.draft, srccontent)
2181 phases.advanceboundary(self, phases.draft, srccontent)
2174 phases.retractboundary(self, phases.draft, added)
2182 phases.retractboundary(self, phases.draft, added)
2175 elif srctype != 'strip':
2183 elif srctype != 'strip':
2176 # publishing only alter behavior during push
2184 # publishing only alter behavior during push
2177 #
2185 #
2178 # strip should not touch boundary at all
2186 # strip should not touch boundary at all
2179 phases.retractboundary(self, phases.draft, added)
2187 phases.retractboundary(self, phases.draft, added)
2180
2188
2181 # make changelog see real files again
2189 # make changelog see real files again
2182 cl.finalize(trp)
2190 cl.finalize(trp)
2183
2191
2184 tr.close()
2192 tr.close()
2185
2193
2186 if changesets > 0:
2194 if changesets > 0:
2187 def runhooks():
2195 def runhooks():
2188 # forcefully update the on-disk branch cache
2196 # forcefully update the on-disk branch cache
2189 self.ui.debug("updating the branch cache\n")
2197 self.ui.debug("updating the branch cache\n")
2190 self.updatebranchcache()
2198 self.updatebranchcache()
2191 self.hook("changegroup", node=hex(cl.node(clstart)),
2199 self.hook("changegroup", node=hex(cl.node(clstart)),
2192 source=srctype, url=url)
2200 source=srctype, url=url)
2193
2201
2194 for n in added:
2202 for n in added:
2195 self.hook("incoming", node=hex(n), source=srctype,
2203 self.hook("incoming", node=hex(n), source=srctype,
2196 url=url)
2204 url=url)
2197 self._afterlock(runhooks)
2205 self._afterlock(runhooks)
2198
2206
2199 finally:
2207 finally:
2200 tr.release()
2208 tr.release()
2201 # never return 0 here:
2209 # never return 0 here:
2202 if dh < 0:
2210 if dh < 0:
2203 return dh - 1
2211 return dh - 1
2204 else:
2212 else:
2205 return dh + 1
2213 return dh + 1
2206
2214
2207 def stream_in(self, remote, requirements):
2215 def stream_in(self, remote, requirements):
2208 lock = self.lock()
2216 lock = self.lock()
2209 try:
2217 try:
2210 fp = remote.stream_out()
2218 fp = remote.stream_out()
2211 l = fp.readline()
2219 l = fp.readline()
2212 try:
2220 try:
2213 resp = int(l)
2221 resp = int(l)
2214 except ValueError:
2222 except ValueError:
2215 raise error.ResponseError(
2223 raise error.ResponseError(
2216 _('Unexpected response from remote server:'), l)
2224 _('Unexpected response from remote server:'), l)
2217 if resp == 1:
2225 if resp == 1:
2218 raise util.Abort(_('operation forbidden by server'))
2226 raise util.Abort(_('operation forbidden by server'))
2219 elif resp == 2:
2227 elif resp == 2:
2220 raise util.Abort(_('locking the remote repository failed'))
2228 raise util.Abort(_('locking the remote repository failed'))
2221 elif resp != 0:
2229 elif resp != 0:
2222 raise util.Abort(_('the server sent an unknown error code'))
2230 raise util.Abort(_('the server sent an unknown error code'))
2223 self.ui.status(_('streaming all changes\n'))
2231 self.ui.status(_('streaming all changes\n'))
2224 l = fp.readline()
2232 l = fp.readline()
2225 try:
2233 try:
2226 total_files, total_bytes = map(int, l.split(' ', 1))
2234 total_files, total_bytes = map(int, l.split(' ', 1))
2227 except (ValueError, TypeError):
2235 except (ValueError, TypeError):
2228 raise error.ResponseError(
2236 raise error.ResponseError(
2229 _('Unexpected response from remote server:'), l)
2237 _('Unexpected response from remote server:'), l)
2230 self.ui.status(_('%d files to transfer, %s of data\n') %
2238 self.ui.status(_('%d files to transfer, %s of data\n') %
2231 (total_files, util.bytecount(total_bytes)))
2239 (total_files, util.bytecount(total_bytes)))
2232 start = time.time()
2240 start = time.time()
2233 for i in xrange(total_files):
2241 for i in xrange(total_files):
2234 # XXX doesn't support '\n' or '\r' in filenames
2242 # XXX doesn't support '\n' or '\r' in filenames
2235 l = fp.readline()
2243 l = fp.readline()
2236 try:
2244 try:
2237 name, size = l.split('\0', 1)
2245 name, size = l.split('\0', 1)
2238 size = int(size)
2246 size = int(size)
2239 except (ValueError, TypeError):
2247 except (ValueError, TypeError):
2240 raise error.ResponseError(
2248 raise error.ResponseError(
2241 _('Unexpected response from remote server:'), l)
2249 _('Unexpected response from remote server:'), l)
2242 if self.ui.debugflag:
2250 if self.ui.debugflag:
2243 self.ui.debug('adding %s (%s)\n' %
2251 self.ui.debug('adding %s (%s)\n' %
2244 (name, util.bytecount(size)))
2252 (name, util.bytecount(size)))
2245 # for backwards compat, name was partially encoded
2253 # for backwards compat, name was partially encoded
2246 ofp = self.sopener(store.decodedir(name), 'w')
2254 ofp = self.sopener(store.decodedir(name), 'w')
2247 for chunk in util.filechunkiter(fp, limit=size):
2255 for chunk in util.filechunkiter(fp, limit=size):
2248 ofp.write(chunk)
2256 ofp.write(chunk)
2249 ofp.close()
2257 ofp.close()
2250 elapsed = time.time() - start
2258 elapsed = time.time() - start
2251 if elapsed <= 0:
2259 if elapsed <= 0:
2252 elapsed = 0.001
2260 elapsed = 0.001
2253 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2261 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2254 (util.bytecount(total_bytes), elapsed,
2262 (util.bytecount(total_bytes), elapsed,
2255 util.bytecount(total_bytes / elapsed)))
2263 util.bytecount(total_bytes / elapsed)))
2256
2264
2257 # new requirements = old non-format requirements +
2265 # new requirements = old non-format requirements +
2258 # new format-related
2266 # new format-related
2259 # requirements from the streamed-in repository
2267 # requirements from the streamed-in repository
2260 requirements.update(set(self.requirements) - self.supportedformats)
2268 requirements.update(set(self.requirements) - self.supportedformats)
2261 self._applyrequirements(requirements)
2269 self._applyrequirements(requirements)
2262 self._writerequirements()
2270 self._writerequirements()
2263
2271
2264 self.invalidate()
2272 self.invalidate()
2265 return len(self.heads()) + 1
2273 return len(self.heads()) + 1
2266 finally:
2274 finally:
2267 lock.release()
2275 lock.release()
2268
2276
2269 def clone(self, remote, heads=[], stream=False):
2277 def clone(self, remote, heads=[], stream=False):
2270 '''clone remote repository.
2278 '''clone remote repository.
2271
2279
2272 keyword arguments:
2280 keyword arguments:
2273 heads: list of revs to clone (forces use of pull)
2281 heads: list of revs to clone (forces use of pull)
2274 stream: use streaming clone if possible'''
2282 stream: use streaming clone if possible'''
2275
2283
2276 # now, all clients that can request uncompressed clones can
2284 # now, all clients that can request uncompressed clones can
2277 # read repo formats supported by all servers that can serve
2285 # read repo formats supported by all servers that can serve
2278 # them.
2286 # them.
2279
2287
2280 # if revlog format changes, client will have to check version
2288 # if revlog format changes, client will have to check version
2281 # and format flags on "stream" capability, and use
2289 # and format flags on "stream" capability, and use
2282 # uncompressed only if compatible.
2290 # uncompressed only if compatible.
2283
2291
2284 if not stream:
2292 if not stream:
2285 # if the server explicitely prefer to stream (for fast LANs)
2293 # if the server explicitely prefer to stream (for fast LANs)
2286 stream = remote.capable('stream-preferred')
2294 stream = remote.capable('stream-preferred')
2287
2295
2288 if stream and not heads:
2296 if stream and not heads:
2289 # 'stream' means remote revlog format is revlogv1 only
2297 # 'stream' means remote revlog format is revlogv1 only
2290 if remote.capable('stream'):
2298 if remote.capable('stream'):
2291 return self.stream_in(remote, set(('revlogv1',)))
2299 return self.stream_in(remote, set(('revlogv1',)))
2292 # otherwise, 'streamreqs' contains the remote revlog format
2300 # otherwise, 'streamreqs' contains the remote revlog format
2293 streamreqs = remote.capable('streamreqs')
2301 streamreqs = remote.capable('streamreqs')
2294 if streamreqs:
2302 if streamreqs:
2295 streamreqs = set(streamreqs.split(','))
2303 streamreqs = set(streamreqs.split(','))
2296 # if we support it, stream in and adjust our requirements
2304 # if we support it, stream in and adjust our requirements
2297 if not streamreqs - self.supportedformats:
2305 if not streamreqs - self.supportedformats:
2298 return self.stream_in(remote, streamreqs)
2306 return self.stream_in(remote, streamreqs)
2299 return self.pull(remote, heads)
2307 return self.pull(remote, heads)
2300
2308
2301 def pushkey(self, namespace, key, old, new):
2309 def pushkey(self, namespace, key, old, new):
2302 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2310 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2303 old=old, new=new)
2311 old=old, new=new)
2304 ret = pushkey.push(self, namespace, key, old, new)
2312 ret = pushkey.push(self, namespace, key, old, new)
2305 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2313 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2306 ret=ret)
2314 ret=ret)
2307 return ret
2315 return ret
2308
2316
2309 def listkeys(self, namespace):
2317 def listkeys(self, namespace):
2310 self.hook('prelistkeys', throw=True, namespace=namespace)
2318 self.hook('prelistkeys', throw=True, namespace=namespace)
2311 values = pushkey.list(self, namespace)
2319 values = pushkey.list(self, namespace)
2312 self.hook('listkeys', namespace=namespace, values=values)
2320 self.hook('listkeys', namespace=namespace, values=values)
2313 return values
2321 return values
2314
2322
2315 def debugwireargs(self, one, two, three=None, four=None, five=None):
2323 def debugwireargs(self, one, two, three=None, four=None, five=None):
2316 '''used to test argument passing over the wire'''
2324 '''used to test argument passing over the wire'''
2317 return "%s %s %s %s %s" % (one, two, three, four, five)
2325 return "%s %s %s %s %s" % (one, two, three, four, five)
2318
2326
2319 def savecommitmessage(self, text):
2327 def savecommitmessage(self, text):
2320 fp = self.opener('last-message.txt', 'wb')
2328 fp = self.opener('last-message.txt', 'wb')
2321 try:
2329 try:
2322 fp.write(text)
2330 fp.write(text)
2323 finally:
2331 finally:
2324 fp.close()
2332 fp.close()
2325 return self.pathto(fp.name[len(self.root)+1:])
2333 return self.pathto(fp.name[len(self.root)+1:])
2326
2334
2327 # used to avoid circular references so destructors work
2335 # used to avoid circular references so destructors work
2328 def aftertrans(files):
2336 def aftertrans(files):
2329 renamefiles = [tuple(t) for t in files]
2337 renamefiles = [tuple(t) for t in files]
2330 def a():
2338 def a():
2331 for src, dest in renamefiles:
2339 for src, dest in renamefiles:
2332 try:
2340 try:
2333 util.rename(src, dest)
2341 util.rename(src, dest)
2334 except OSError: # journal file does not yet exist
2342 except OSError: # journal file does not yet exist
2335 pass
2343 pass
2336 return a
2344 return a
2337
2345
2338 def undoname(fn):
2346 def undoname(fn):
2339 base, name = os.path.split(fn)
2347 base, name = os.path.split(fn)
2340 assert name.startswith('journal')
2348 assert name.startswith('journal')
2341 return os.path.join(base, name.replace('journal', 'undo', 1))
2349 return os.path.join(base, name.replace('journal', 'undo', 1))
2342
2350
2343 def instance(ui, path, create):
2351 def instance(ui, path, create):
2344 return localrepository(ui, util.urllocalpath(path), create)
2352 return localrepository(ui, util.urllocalpath(path), create)
2345
2353
2346 def islocal(path):
2354 def islocal(path):
2347 return True
2355 return True
General Comments 0
You need to be logged in to leave comments. Login now