##// END OF EJS Templates
localrepo: introduce method for explicit branch cache update...
Georg Brandl -
r12066:d01e2865 default
parent child Browse files
Show More
@@ -1,1803 +1,1805 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 import url as urlmod
17 import url as urlmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 propertycache = util.propertycache
20 propertycache = util.propertycache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
24 supported = set('revlogv1 store fncache shared parentdelta'.split())
24 supported = set('revlogv1 store fncache shared parentdelta'.split())
25
25
26 def __init__(self, baseui, path=None, create=0):
26 def __init__(self, baseui, path=None, create=0):
27 repo.repository.__init__(self)
27 repo.repository.__init__(self)
28 self.root = os.path.realpath(util.expandpath(path))
28 self.root = os.path.realpath(util.expandpath(path))
29 self.path = os.path.join(self.root, ".hg")
29 self.path = os.path.join(self.root, ".hg")
30 self.origroot = path
30 self.origroot = path
31 self.opener = util.opener(self.path)
31 self.opener = util.opener(self.path)
32 self.wopener = util.opener(self.root)
32 self.wopener = util.opener(self.root)
33 self.baseui = baseui
33 self.baseui = baseui
34 self.ui = baseui.copy()
34 self.ui = baseui.copy()
35
35
36 try:
36 try:
37 self.ui.readconfig(self.join("hgrc"), self.root)
37 self.ui.readconfig(self.join("hgrc"), self.root)
38 extensions.loadall(self.ui)
38 extensions.loadall(self.ui)
39 except IOError:
39 except IOError:
40 pass
40 pass
41
41
42 if not os.path.isdir(self.path):
42 if not os.path.isdir(self.path):
43 if create:
43 if create:
44 if not os.path.exists(path):
44 if not os.path.exists(path):
45 util.makedirs(path)
45 util.makedirs(path)
46 os.mkdir(self.path)
46 os.mkdir(self.path)
47 requirements = ["revlogv1"]
47 requirements = ["revlogv1"]
48 if self.ui.configbool('format', 'usestore', True):
48 if self.ui.configbool('format', 'usestore', True):
49 os.mkdir(os.path.join(self.path, "store"))
49 os.mkdir(os.path.join(self.path, "store"))
50 requirements.append("store")
50 requirements.append("store")
51 if self.ui.configbool('format', 'usefncache', True):
51 if self.ui.configbool('format', 'usefncache', True):
52 requirements.append("fncache")
52 requirements.append("fncache")
53 # create an invalid changelog
53 # create an invalid changelog
54 self.opener("00changelog.i", "a").write(
54 self.opener("00changelog.i", "a").write(
55 '\0\0\0\2' # represents revlogv2
55 '\0\0\0\2' # represents revlogv2
56 ' dummy changelog to prevent using the old repo layout'
56 ' dummy changelog to prevent using the old repo layout'
57 )
57 )
58 if self.ui.configbool('format', 'parentdelta', False):
58 if self.ui.configbool('format', 'parentdelta', False):
59 requirements.append("parentdelta")
59 requirements.append("parentdelta")
60 reqfile = self.opener("requires", "w")
60 reqfile = self.opener("requires", "w")
61 for r in requirements:
61 for r in requirements:
62 reqfile.write("%s\n" % r)
62 reqfile.write("%s\n" % r)
63 reqfile.close()
63 reqfile.close()
64 else:
64 else:
65 raise error.RepoError(_("repository %s not found") % path)
65 raise error.RepoError(_("repository %s not found") % path)
66 elif create:
66 elif create:
67 raise error.RepoError(_("repository %s already exists") % path)
67 raise error.RepoError(_("repository %s already exists") % path)
68 else:
68 else:
69 # find requirements
69 # find requirements
70 requirements = set()
70 requirements = set()
71 try:
71 try:
72 requirements = set(self.opener("requires").read().splitlines())
72 requirements = set(self.opener("requires").read().splitlines())
73 except IOError, inst:
73 except IOError, inst:
74 if inst.errno != errno.ENOENT:
74 if inst.errno != errno.ENOENT:
75 raise
75 raise
76 for r in requirements - self.supported:
76 for r in requirements - self.supported:
77 raise error.RepoError(_("requirement '%s' not supported") % r)
77 raise error.RepoError(_("requirement '%s' not supported") % r)
78
78
79 self.sharedpath = self.path
79 self.sharedpath = self.path
80 try:
80 try:
81 s = os.path.realpath(self.opener("sharedpath").read())
81 s = os.path.realpath(self.opener("sharedpath").read())
82 if not os.path.exists(s):
82 if not os.path.exists(s):
83 raise error.RepoError(
83 raise error.RepoError(
84 _('.hg/sharedpath points to nonexistent directory %s') % s)
84 _('.hg/sharedpath points to nonexistent directory %s') % s)
85 self.sharedpath = s
85 self.sharedpath = s
86 except IOError, inst:
86 except IOError, inst:
87 if inst.errno != errno.ENOENT:
87 if inst.errno != errno.ENOENT:
88 raise
88 raise
89
89
90 self.store = store.store(requirements, self.sharedpath, util.opener)
90 self.store = store.store(requirements, self.sharedpath, util.opener)
91 self.spath = self.store.path
91 self.spath = self.store.path
92 self.sopener = self.store.opener
92 self.sopener = self.store.opener
93 self.sjoin = self.store.join
93 self.sjoin = self.store.join
94 self.opener.createmode = self.store.createmode
94 self.opener.createmode = self.store.createmode
95 self.sopener.options = {}
95 self.sopener.options = {}
96 if 'parentdelta' in requirements:
96 if 'parentdelta' in requirements:
97 self.sopener.options['parentdelta'] = 1
97 self.sopener.options['parentdelta'] = 1
98
98
99 # These two define the set of tags for this repository. _tags
99 # These two define the set of tags for this repository. _tags
100 # maps tag name to node; _tagtypes maps tag name to 'global' or
100 # maps tag name to node; _tagtypes maps tag name to 'global' or
101 # 'local'. (Global tags are defined by .hgtags across all
101 # 'local'. (Global tags are defined by .hgtags across all
102 # heads, and local tags are defined in .hg/localtags.) They
102 # heads, and local tags are defined in .hg/localtags.) They
103 # constitute the in-memory cache of tags.
103 # constitute the in-memory cache of tags.
104 self._tags = None
104 self._tags = None
105 self._tagtypes = None
105 self._tagtypes = None
106
106
107 self._branchcache = None # in UTF-8
107 self._branchcache = None # in UTF-8
108 self._branchcachetip = None
108 self._branchcachetip = None
109 self.nodetagscache = None
109 self.nodetagscache = None
110 self.filterpats = {}
110 self.filterpats = {}
111 self._datafilters = {}
111 self._datafilters = {}
112 self._transref = self._lockref = self._wlockref = None
112 self._transref = self._lockref = self._wlockref = None
113
113
114 @propertycache
114 @propertycache
115 def changelog(self):
115 def changelog(self):
116 c = changelog.changelog(self.sopener)
116 c = changelog.changelog(self.sopener)
117 if 'HG_PENDING' in os.environ:
117 if 'HG_PENDING' in os.environ:
118 p = os.environ['HG_PENDING']
118 p = os.environ['HG_PENDING']
119 if p.startswith(self.root):
119 if p.startswith(self.root):
120 c.readpending('00changelog.i.a')
120 c.readpending('00changelog.i.a')
121 self.sopener.options['defversion'] = c.version
121 self.sopener.options['defversion'] = c.version
122 return c
122 return c
123
123
124 @propertycache
124 @propertycache
125 def manifest(self):
125 def manifest(self):
126 return manifest.manifest(self.sopener)
126 return manifest.manifest(self.sopener)
127
127
128 @propertycache
128 @propertycache
129 def dirstate(self):
129 def dirstate(self):
130 return dirstate.dirstate(self.opener, self.ui, self.root)
130 return dirstate.dirstate(self.opener, self.ui, self.root)
131
131
132 def __getitem__(self, changeid):
132 def __getitem__(self, changeid):
133 if changeid is None:
133 if changeid is None:
134 return context.workingctx(self)
134 return context.workingctx(self)
135 return context.changectx(self, changeid)
135 return context.changectx(self, changeid)
136
136
137 def __contains__(self, changeid):
137 def __contains__(self, changeid):
138 try:
138 try:
139 return bool(self.lookup(changeid))
139 return bool(self.lookup(changeid))
140 except error.RepoLookupError:
140 except error.RepoLookupError:
141 return False
141 return False
142
142
143 def __nonzero__(self):
143 def __nonzero__(self):
144 return True
144 return True
145
145
146 def __len__(self):
146 def __len__(self):
147 return len(self.changelog)
147 return len(self.changelog)
148
148
149 def __iter__(self):
149 def __iter__(self):
150 for i in xrange(len(self)):
150 for i in xrange(len(self)):
151 yield i
151 yield i
152
152
153 def url(self):
153 def url(self):
154 return 'file:' + self.root
154 return 'file:' + self.root
155
155
156 def hook(self, name, throw=False, **args):
156 def hook(self, name, throw=False, **args):
157 return hook.hook(self.ui, self, name, throw, **args)
157 return hook.hook(self.ui, self, name, throw, **args)
158
158
159 tag_disallowed = ':\r\n'
159 tag_disallowed = ':\r\n'
160
160
161 def _tag(self, names, node, message, local, user, date, extra={}):
161 def _tag(self, names, node, message, local, user, date, extra={}):
162 if isinstance(names, str):
162 if isinstance(names, str):
163 allchars = names
163 allchars = names
164 names = (names,)
164 names = (names,)
165 else:
165 else:
166 allchars = ''.join(names)
166 allchars = ''.join(names)
167 for c in self.tag_disallowed:
167 for c in self.tag_disallowed:
168 if c in allchars:
168 if c in allchars:
169 raise util.Abort(_('%r cannot be used in a tag name') % c)
169 raise util.Abort(_('%r cannot be used in a tag name') % c)
170
170
171 branches = self.branchmap()
171 branches = self.branchmap()
172 for name in names:
172 for name in names:
173 self.hook('pretag', throw=True, node=hex(node), tag=name,
173 self.hook('pretag', throw=True, node=hex(node), tag=name,
174 local=local)
174 local=local)
175 if name in branches:
175 if name in branches:
176 self.ui.warn(_("warning: tag %s conflicts with existing"
176 self.ui.warn(_("warning: tag %s conflicts with existing"
177 " branch name\n") % name)
177 " branch name\n") % name)
178
178
179 def writetags(fp, names, munge, prevtags):
179 def writetags(fp, names, munge, prevtags):
180 fp.seek(0, 2)
180 fp.seek(0, 2)
181 if prevtags and prevtags[-1] != '\n':
181 if prevtags and prevtags[-1] != '\n':
182 fp.write('\n')
182 fp.write('\n')
183 for name in names:
183 for name in names:
184 m = munge and munge(name) or name
184 m = munge and munge(name) or name
185 if self._tagtypes and name in self._tagtypes:
185 if self._tagtypes and name in self._tagtypes:
186 old = self._tags.get(name, nullid)
186 old = self._tags.get(name, nullid)
187 fp.write('%s %s\n' % (hex(old), m))
187 fp.write('%s %s\n' % (hex(old), m))
188 fp.write('%s %s\n' % (hex(node), m))
188 fp.write('%s %s\n' % (hex(node), m))
189 fp.close()
189 fp.close()
190
190
191 prevtags = ''
191 prevtags = ''
192 if local:
192 if local:
193 try:
193 try:
194 fp = self.opener('localtags', 'r+')
194 fp = self.opener('localtags', 'r+')
195 except IOError:
195 except IOError:
196 fp = self.opener('localtags', 'a')
196 fp = self.opener('localtags', 'a')
197 else:
197 else:
198 prevtags = fp.read()
198 prevtags = fp.read()
199
199
200 # local tags are stored in the current charset
200 # local tags are stored in the current charset
201 writetags(fp, names, None, prevtags)
201 writetags(fp, names, None, prevtags)
202 for name in names:
202 for name in names:
203 self.hook('tag', node=hex(node), tag=name, local=local)
203 self.hook('tag', node=hex(node), tag=name, local=local)
204 return
204 return
205
205
206 try:
206 try:
207 fp = self.wfile('.hgtags', 'rb+')
207 fp = self.wfile('.hgtags', 'rb+')
208 except IOError:
208 except IOError:
209 fp = self.wfile('.hgtags', 'ab')
209 fp = self.wfile('.hgtags', 'ab')
210 else:
210 else:
211 prevtags = fp.read()
211 prevtags = fp.read()
212
212
213 # committed tags are stored in UTF-8
213 # committed tags are stored in UTF-8
214 writetags(fp, names, encoding.fromlocal, prevtags)
214 writetags(fp, names, encoding.fromlocal, prevtags)
215
215
216 if '.hgtags' not in self.dirstate:
216 if '.hgtags' not in self.dirstate:
217 self[None].add(['.hgtags'])
217 self[None].add(['.hgtags'])
218
218
219 m = matchmod.exact(self.root, '', ['.hgtags'])
219 m = matchmod.exact(self.root, '', ['.hgtags'])
220 tagnode = self.commit(message, user, date, extra=extra, match=m)
220 tagnode = self.commit(message, user, date, extra=extra, match=m)
221
221
222 for name in names:
222 for name in names:
223 self.hook('tag', node=hex(node), tag=name, local=local)
223 self.hook('tag', node=hex(node), tag=name, local=local)
224
224
225 return tagnode
225 return tagnode
226
226
227 def tag(self, names, node, message, local, user, date):
227 def tag(self, names, node, message, local, user, date):
228 '''tag a revision with one or more symbolic names.
228 '''tag a revision with one or more symbolic names.
229
229
230 names is a list of strings or, when adding a single tag, names may be a
230 names is a list of strings or, when adding a single tag, names may be a
231 string.
231 string.
232
232
233 if local is True, the tags are stored in a per-repository file.
233 if local is True, the tags are stored in a per-repository file.
234 otherwise, they are stored in the .hgtags file, and a new
234 otherwise, they are stored in the .hgtags file, and a new
235 changeset is committed with the change.
235 changeset is committed with the change.
236
236
237 keyword arguments:
237 keyword arguments:
238
238
239 local: whether to store tags in non-version-controlled file
239 local: whether to store tags in non-version-controlled file
240 (default False)
240 (default False)
241
241
242 message: commit message to use if committing
242 message: commit message to use if committing
243
243
244 user: name of user to use if committing
244 user: name of user to use if committing
245
245
246 date: date tuple to use if committing'''
246 date: date tuple to use if committing'''
247
247
248 for x in self.status()[:5]:
248 for x in self.status()[:5]:
249 if '.hgtags' in x:
249 if '.hgtags' in x:
250 raise util.Abort(_('working copy of .hgtags is changed '
250 raise util.Abort(_('working copy of .hgtags is changed '
251 '(please commit .hgtags manually)'))
251 '(please commit .hgtags manually)'))
252
252
253 self.tags() # instantiate the cache
253 self.tags() # instantiate the cache
254 self._tag(names, node, message, local, user, date)
254 self._tag(names, node, message, local, user, date)
255
255
256 def tags(self):
256 def tags(self):
257 '''return a mapping of tag to node'''
257 '''return a mapping of tag to node'''
258 if self._tags is None:
258 if self._tags is None:
259 (self._tags, self._tagtypes) = self._findtags()
259 (self._tags, self._tagtypes) = self._findtags()
260
260
261 return self._tags
261 return self._tags
262
262
263 def _findtags(self):
263 def _findtags(self):
264 '''Do the hard work of finding tags. Return a pair of dicts
264 '''Do the hard work of finding tags. Return a pair of dicts
265 (tags, tagtypes) where tags maps tag name to node, and tagtypes
265 (tags, tagtypes) where tags maps tag name to node, and tagtypes
266 maps tag name to a string like \'global\' or \'local\'.
266 maps tag name to a string like \'global\' or \'local\'.
267 Subclasses or extensions are free to add their own tags, but
267 Subclasses or extensions are free to add their own tags, but
268 should be aware that the returned dicts will be retained for the
268 should be aware that the returned dicts will be retained for the
269 duration of the localrepo object.'''
269 duration of the localrepo object.'''
270
270
271 # XXX what tagtype should subclasses/extensions use? Currently
271 # XXX what tagtype should subclasses/extensions use? Currently
272 # mq and bookmarks add tags, but do not set the tagtype at all.
272 # mq and bookmarks add tags, but do not set the tagtype at all.
273 # Should each extension invent its own tag type? Should there
273 # Should each extension invent its own tag type? Should there
274 # be one tagtype for all such "virtual" tags? Or is the status
274 # be one tagtype for all such "virtual" tags? Or is the status
275 # quo fine?
275 # quo fine?
276
276
277 alltags = {} # map tag name to (node, hist)
277 alltags = {} # map tag name to (node, hist)
278 tagtypes = {}
278 tagtypes = {}
279
279
280 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
280 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
281 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
281 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
282
282
283 # Build the return dicts. Have to re-encode tag names because
283 # Build the return dicts. Have to re-encode tag names because
284 # the tags module always uses UTF-8 (in order not to lose info
284 # the tags module always uses UTF-8 (in order not to lose info
285 # writing to the cache), but the rest of Mercurial wants them in
285 # writing to the cache), but the rest of Mercurial wants them in
286 # local encoding.
286 # local encoding.
287 tags = {}
287 tags = {}
288 for (name, (node, hist)) in alltags.iteritems():
288 for (name, (node, hist)) in alltags.iteritems():
289 if node != nullid:
289 if node != nullid:
290 tags[encoding.tolocal(name)] = node
290 tags[encoding.tolocal(name)] = node
291 tags['tip'] = self.changelog.tip()
291 tags['tip'] = self.changelog.tip()
292 tagtypes = dict([(encoding.tolocal(name), value)
292 tagtypes = dict([(encoding.tolocal(name), value)
293 for (name, value) in tagtypes.iteritems()])
293 for (name, value) in tagtypes.iteritems()])
294 return (tags, tagtypes)
294 return (tags, tagtypes)
295
295
296 def tagtype(self, tagname):
296 def tagtype(self, tagname):
297 '''
297 '''
298 return the type of the given tag. result can be:
298 return the type of the given tag. result can be:
299
299
300 'local' : a local tag
300 'local' : a local tag
301 'global' : a global tag
301 'global' : a global tag
302 None : tag does not exist
302 None : tag does not exist
303 '''
303 '''
304
304
305 self.tags()
305 self.tags()
306
306
307 return self._tagtypes.get(tagname)
307 return self._tagtypes.get(tagname)
308
308
309 def tagslist(self):
309 def tagslist(self):
310 '''return a list of tags ordered by revision'''
310 '''return a list of tags ordered by revision'''
311 l = []
311 l = []
312 for t, n in self.tags().iteritems():
312 for t, n in self.tags().iteritems():
313 try:
313 try:
314 r = self.changelog.rev(n)
314 r = self.changelog.rev(n)
315 except:
315 except:
316 r = -2 # sort to the beginning of the list if unknown
316 r = -2 # sort to the beginning of the list if unknown
317 l.append((r, t, n))
317 l.append((r, t, n))
318 return [(t, n) for r, t, n in sorted(l)]
318 return [(t, n) for r, t, n in sorted(l)]
319
319
320 def nodetags(self, node):
320 def nodetags(self, node):
321 '''return the tags associated with a node'''
321 '''return the tags associated with a node'''
322 if not self.nodetagscache:
322 if not self.nodetagscache:
323 self.nodetagscache = {}
323 self.nodetagscache = {}
324 for t, n in self.tags().iteritems():
324 for t, n in self.tags().iteritems():
325 self.nodetagscache.setdefault(n, []).append(t)
325 self.nodetagscache.setdefault(n, []).append(t)
326 for tags in self.nodetagscache.itervalues():
326 for tags in self.nodetagscache.itervalues():
327 tags.sort()
327 tags.sort()
328 return self.nodetagscache.get(node, [])
328 return self.nodetagscache.get(node, [])
329
329
330 def _branchtags(self, partial, lrev):
330 def _branchtags(self, partial, lrev):
331 # TODO: rename this function?
331 # TODO: rename this function?
332 tiprev = len(self) - 1
332 tiprev = len(self) - 1
333 if lrev != tiprev:
333 if lrev != tiprev:
334 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
334 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
335 self._updatebranchcache(partial, ctxgen)
335 self._updatebranchcache(partial, ctxgen)
336 self._writebranchcache(partial, self.changelog.tip(), tiprev)
336 self._writebranchcache(partial, self.changelog.tip(), tiprev)
337
337
338 return partial
338 return partial
339
339
340 def branchmap(self):
340 def updatebranchcache(self):
341 '''returns a dictionary {branch: [branchheads]}'''
342 tip = self.changelog.tip()
341 tip = self.changelog.tip()
343 if self._branchcache is not None and self._branchcachetip == tip:
342 if self._branchcache is not None and self._branchcachetip == tip:
344 return self._branchcache
343 return self._branchcache
345
344
346 oldtip = self._branchcachetip
345 oldtip = self._branchcachetip
347 self._branchcachetip = tip
346 self._branchcachetip = tip
348 if oldtip is None or oldtip not in self.changelog.nodemap:
347 if oldtip is None or oldtip not in self.changelog.nodemap:
349 partial, last, lrev = self._readbranchcache()
348 partial, last, lrev = self._readbranchcache()
350 else:
349 else:
351 lrev = self.changelog.rev(oldtip)
350 lrev = self.changelog.rev(oldtip)
352 partial = self._branchcache
351 partial = self._branchcache
353
352
354 self._branchtags(partial, lrev)
353 self._branchtags(partial, lrev)
355 # this private cache holds all heads (not just tips)
354 # this private cache holds all heads (not just tips)
356 self._branchcache = partial
355 self._branchcache = partial
357
356
357 def branchmap(self):
358 '''returns a dictionary {branch: [branchheads]}'''
359 self.updatebranchcache()
358 return self._branchcache
360 return self._branchcache
359
361
360 def branchtags(self):
362 def branchtags(self):
361 '''return a dict where branch names map to the tipmost head of
363 '''return a dict where branch names map to the tipmost head of
362 the branch, open heads come before closed'''
364 the branch, open heads come before closed'''
363 bt = {}
365 bt = {}
364 for bn, heads in self.branchmap().iteritems():
366 for bn, heads in self.branchmap().iteritems():
365 tip = heads[-1]
367 tip = heads[-1]
366 for h in reversed(heads):
368 for h in reversed(heads):
367 if 'close' not in self.changelog.read(h)[5]:
369 if 'close' not in self.changelog.read(h)[5]:
368 tip = h
370 tip = h
369 break
371 break
370 bt[bn] = tip
372 bt[bn] = tip
371 return bt
373 return bt
372
374
373
375
374 def _readbranchcache(self):
376 def _readbranchcache(self):
375 partial = {}
377 partial = {}
376 try:
378 try:
377 f = self.opener("branchheads.cache")
379 f = self.opener("branchheads.cache")
378 lines = f.read().split('\n')
380 lines = f.read().split('\n')
379 f.close()
381 f.close()
380 except (IOError, OSError):
382 except (IOError, OSError):
381 return {}, nullid, nullrev
383 return {}, nullid, nullrev
382
384
383 try:
385 try:
384 last, lrev = lines.pop(0).split(" ", 1)
386 last, lrev = lines.pop(0).split(" ", 1)
385 last, lrev = bin(last), int(lrev)
387 last, lrev = bin(last), int(lrev)
386 if lrev >= len(self) or self[lrev].node() != last:
388 if lrev >= len(self) or self[lrev].node() != last:
387 # invalidate the cache
389 # invalidate the cache
388 raise ValueError('invalidating branch cache (tip differs)')
390 raise ValueError('invalidating branch cache (tip differs)')
389 for l in lines:
391 for l in lines:
390 if not l:
392 if not l:
391 continue
393 continue
392 node, label = l.split(" ", 1)
394 node, label = l.split(" ", 1)
393 partial.setdefault(label.strip(), []).append(bin(node))
395 partial.setdefault(label.strip(), []).append(bin(node))
394 except KeyboardInterrupt:
396 except KeyboardInterrupt:
395 raise
397 raise
396 except Exception, inst:
398 except Exception, inst:
397 if self.ui.debugflag:
399 if self.ui.debugflag:
398 self.ui.warn(str(inst), '\n')
400 self.ui.warn(str(inst), '\n')
399 partial, last, lrev = {}, nullid, nullrev
401 partial, last, lrev = {}, nullid, nullrev
400 return partial, last, lrev
402 return partial, last, lrev
401
403
402 def _writebranchcache(self, branches, tip, tiprev):
404 def _writebranchcache(self, branches, tip, tiprev):
403 try:
405 try:
404 f = self.opener("branchheads.cache", "w", atomictemp=True)
406 f = self.opener("branchheads.cache", "w", atomictemp=True)
405 f.write("%s %s\n" % (hex(tip), tiprev))
407 f.write("%s %s\n" % (hex(tip), tiprev))
406 for label, nodes in branches.iteritems():
408 for label, nodes in branches.iteritems():
407 for node in nodes:
409 for node in nodes:
408 f.write("%s %s\n" % (hex(node), label))
410 f.write("%s %s\n" % (hex(node), label))
409 f.rename()
411 f.rename()
410 except (IOError, OSError):
412 except (IOError, OSError):
411 pass
413 pass
412
414
413 def _updatebranchcache(self, partial, ctxgen):
415 def _updatebranchcache(self, partial, ctxgen):
414 # collect new branch entries
416 # collect new branch entries
415 newbranches = {}
417 newbranches = {}
416 for c in ctxgen:
418 for c in ctxgen:
417 newbranches.setdefault(c.branch(), []).append(c.node())
419 newbranches.setdefault(c.branch(), []).append(c.node())
418 # if older branchheads are reachable from new ones, they aren't
420 # if older branchheads are reachable from new ones, they aren't
419 # really branchheads. Note checking parents is insufficient:
421 # really branchheads. Note checking parents is insufficient:
420 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
422 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
421 for branch, newnodes in newbranches.iteritems():
423 for branch, newnodes in newbranches.iteritems():
422 bheads = partial.setdefault(branch, [])
424 bheads = partial.setdefault(branch, [])
423 bheads.extend(newnodes)
425 bheads.extend(newnodes)
424 if len(bheads) <= 1:
426 if len(bheads) <= 1:
425 continue
427 continue
426 # starting from tip means fewer passes over reachable
428 # starting from tip means fewer passes over reachable
427 while newnodes:
429 while newnodes:
428 latest = newnodes.pop()
430 latest = newnodes.pop()
429 if latest not in bheads:
431 if latest not in bheads:
430 continue
432 continue
431 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
433 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
432 reachable = self.changelog.reachable(latest, minbhrev)
434 reachable = self.changelog.reachable(latest, minbhrev)
433 reachable.remove(latest)
435 reachable.remove(latest)
434 bheads = [b for b in bheads if b not in reachable]
436 bheads = [b for b in bheads if b not in reachable]
435 partial[branch] = bheads
437 partial[branch] = bheads
436
438
437 def lookup(self, key):
439 def lookup(self, key):
438 if isinstance(key, int):
440 if isinstance(key, int):
439 return self.changelog.node(key)
441 return self.changelog.node(key)
440 elif key == '.':
442 elif key == '.':
441 return self.dirstate.parents()[0]
443 return self.dirstate.parents()[0]
442 elif key == 'null':
444 elif key == 'null':
443 return nullid
445 return nullid
444 elif key == 'tip':
446 elif key == 'tip':
445 return self.changelog.tip()
447 return self.changelog.tip()
446 n = self.changelog._match(key)
448 n = self.changelog._match(key)
447 if n:
449 if n:
448 return n
450 return n
449 if key in self.tags():
451 if key in self.tags():
450 return self.tags()[key]
452 return self.tags()[key]
451 if key in self.branchtags():
453 if key in self.branchtags():
452 return self.branchtags()[key]
454 return self.branchtags()[key]
453 n = self.changelog._partialmatch(key)
455 n = self.changelog._partialmatch(key)
454 if n:
456 if n:
455 return n
457 return n
456
458
457 # can't find key, check if it might have come from damaged dirstate
459 # can't find key, check if it might have come from damaged dirstate
458 if key in self.dirstate.parents():
460 if key in self.dirstate.parents():
459 raise error.Abort(_("working directory has unknown parent '%s'!")
461 raise error.Abort(_("working directory has unknown parent '%s'!")
460 % short(key))
462 % short(key))
461 try:
463 try:
462 if len(key) == 20:
464 if len(key) == 20:
463 key = hex(key)
465 key = hex(key)
464 except:
466 except:
465 pass
467 pass
466 raise error.RepoLookupError(_("unknown revision '%s'") % key)
468 raise error.RepoLookupError(_("unknown revision '%s'") % key)
467
469
468 def lookupbranch(self, key, remote=None):
470 def lookupbranch(self, key, remote=None):
469 repo = remote or self
471 repo = remote or self
470 if key in repo.branchmap():
472 if key in repo.branchmap():
471 return key
473 return key
472
474
473 repo = (remote and remote.local()) and remote or self
475 repo = (remote and remote.local()) and remote or self
474 return repo[key].branch()
476 return repo[key].branch()
475
477
476 def local(self):
478 def local(self):
477 return True
479 return True
478
480
479 def join(self, f):
481 def join(self, f):
480 return os.path.join(self.path, f)
482 return os.path.join(self.path, f)
481
483
482 def wjoin(self, f):
484 def wjoin(self, f):
483 return os.path.join(self.root, f)
485 return os.path.join(self.root, f)
484
486
485 def file(self, f):
487 def file(self, f):
486 if f[0] == '/':
488 if f[0] == '/':
487 f = f[1:]
489 f = f[1:]
488 return filelog.filelog(self.sopener, f)
490 return filelog.filelog(self.sopener, f)
489
491
490 def changectx(self, changeid):
492 def changectx(self, changeid):
491 return self[changeid]
493 return self[changeid]
492
494
493 def parents(self, changeid=None):
495 def parents(self, changeid=None):
494 '''get list of changectxs for parents of changeid'''
496 '''get list of changectxs for parents of changeid'''
495 return self[changeid].parents()
497 return self[changeid].parents()
496
498
497 def filectx(self, path, changeid=None, fileid=None):
499 def filectx(self, path, changeid=None, fileid=None):
498 """changeid can be a changeset revision, node, or tag.
500 """changeid can be a changeset revision, node, or tag.
499 fileid can be a file revision or node."""
501 fileid can be a file revision or node."""
500 return context.filectx(self, path, changeid, fileid)
502 return context.filectx(self, path, changeid, fileid)
501
503
502 def getcwd(self):
504 def getcwd(self):
503 return self.dirstate.getcwd()
505 return self.dirstate.getcwd()
504
506
505 def pathto(self, f, cwd=None):
507 def pathto(self, f, cwd=None):
506 return self.dirstate.pathto(f, cwd)
508 return self.dirstate.pathto(f, cwd)
507
509
508 def wfile(self, f, mode='r'):
510 def wfile(self, f, mode='r'):
509 return self.wopener(f, mode)
511 return self.wopener(f, mode)
510
512
511 def _link(self, f):
513 def _link(self, f):
512 return os.path.islink(self.wjoin(f))
514 return os.path.islink(self.wjoin(f))
513
515
514 def _loadfilter(self, filter):
516 def _loadfilter(self, filter):
515 if filter not in self.filterpats:
517 if filter not in self.filterpats:
516 l = []
518 l = []
517 for pat, cmd in self.ui.configitems(filter):
519 for pat, cmd in self.ui.configitems(filter):
518 if cmd == '!':
520 if cmd == '!':
519 continue
521 continue
520 mf = matchmod.match(self.root, '', [pat])
522 mf = matchmod.match(self.root, '', [pat])
521 fn = None
523 fn = None
522 params = cmd
524 params = cmd
523 for name, filterfn in self._datafilters.iteritems():
525 for name, filterfn in self._datafilters.iteritems():
524 if cmd.startswith(name):
526 if cmd.startswith(name):
525 fn = filterfn
527 fn = filterfn
526 params = cmd[len(name):].lstrip()
528 params = cmd[len(name):].lstrip()
527 break
529 break
528 if not fn:
530 if not fn:
529 fn = lambda s, c, **kwargs: util.filter(s, c)
531 fn = lambda s, c, **kwargs: util.filter(s, c)
530 # Wrap old filters not supporting keyword arguments
532 # Wrap old filters not supporting keyword arguments
531 if not inspect.getargspec(fn)[2]:
533 if not inspect.getargspec(fn)[2]:
532 oldfn = fn
534 oldfn = fn
533 fn = lambda s, c, **kwargs: oldfn(s, c)
535 fn = lambda s, c, **kwargs: oldfn(s, c)
534 l.append((mf, fn, params))
536 l.append((mf, fn, params))
535 self.filterpats[filter] = l
537 self.filterpats[filter] = l
536
538
537 def _filter(self, filter, filename, data):
539 def _filter(self, filter, filename, data):
538 self._loadfilter(filter)
540 self._loadfilter(filter)
539
541
540 for mf, fn, cmd in self.filterpats[filter]:
542 for mf, fn, cmd in self.filterpats[filter]:
541 if mf(filename):
543 if mf(filename):
542 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
544 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
543 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
545 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
544 break
546 break
545
547
546 return data
548 return data
547
549
548 def adddatafilter(self, name, filter):
550 def adddatafilter(self, name, filter):
549 self._datafilters[name] = filter
551 self._datafilters[name] = filter
550
552
551 def wread(self, filename):
553 def wread(self, filename):
552 if self._link(filename):
554 if self._link(filename):
553 data = os.readlink(self.wjoin(filename))
555 data = os.readlink(self.wjoin(filename))
554 else:
556 else:
555 data = self.wopener(filename, 'r').read()
557 data = self.wopener(filename, 'r').read()
556 return self._filter("encode", filename, data)
558 return self._filter("encode", filename, data)
557
559
558 def wwrite(self, filename, data, flags):
560 def wwrite(self, filename, data, flags):
559 data = self._filter("decode", filename, data)
561 data = self._filter("decode", filename, data)
560 try:
562 try:
561 os.unlink(self.wjoin(filename))
563 os.unlink(self.wjoin(filename))
562 except OSError:
564 except OSError:
563 pass
565 pass
564 if 'l' in flags:
566 if 'l' in flags:
565 self.wopener.symlink(data, filename)
567 self.wopener.symlink(data, filename)
566 else:
568 else:
567 self.wopener(filename, 'w').write(data)
569 self.wopener(filename, 'w').write(data)
568 if 'x' in flags:
570 if 'x' in flags:
569 util.set_flags(self.wjoin(filename), False, True)
571 util.set_flags(self.wjoin(filename), False, True)
570
572
571 def wwritedata(self, filename, data):
573 def wwritedata(self, filename, data):
572 return self._filter("decode", filename, data)
574 return self._filter("decode", filename, data)
573
575
574 def transaction(self, desc):
576 def transaction(self, desc):
575 tr = self._transref and self._transref() or None
577 tr = self._transref and self._transref() or None
576 if tr and tr.running():
578 if tr and tr.running():
577 return tr.nest()
579 return tr.nest()
578
580
579 # abort here if the journal already exists
581 # abort here if the journal already exists
580 if os.path.exists(self.sjoin("journal")):
582 if os.path.exists(self.sjoin("journal")):
581 raise error.RepoError(
583 raise error.RepoError(
582 _("abandoned transaction found - run hg recover"))
584 _("abandoned transaction found - run hg recover"))
583
585
584 # save dirstate for rollback
586 # save dirstate for rollback
585 try:
587 try:
586 ds = self.opener("dirstate").read()
588 ds = self.opener("dirstate").read()
587 except IOError:
589 except IOError:
588 ds = ""
590 ds = ""
589 self.opener("journal.dirstate", "w").write(ds)
591 self.opener("journal.dirstate", "w").write(ds)
590 self.opener("journal.branch", "w").write(self.dirstate.branch())
592 self.opener("journal.branch", "w").write(self.dirstate.branch())
591 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
593 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
592
594
593 renames = [(self.sjoin("journal"), self.sjoin("undo")),
595 renames = [(self.sjoin("journal"), self.sjoin("undo")),
594 (self.join("journal.dirstate"), self.join("undo.dirstate")),
596 (self.join("journal.dirstate"), self.join("undo.dirstate")),
595 (self.join("journal.branch"), self.join("undo.branch")),
597 (self.join("journal.branch"), self.join("undo.branch")),
596 (self.join("journal.desc"), self.join("undo.desc"))]
598 (self.join("journal.desc"), self.join("undo.desc"))]
597 tr = transaction.transaction(self.ui.warn, self.sopener,
599 tr = transaction.transaction(self.ui.warn, self.sopener,
598 self.sjoin("journal"),
600 self.sjoin("journal"),
599 aftertrans(renames),
601 aftertrans(renames),
600 self.store.createmode)
602 self.store.createmode)
601 self._transref = weakref.ref(tr)
603 self._transref = weakref.ref(tr)
602 return tr
604 return tr
603
605
604 def recover(self):
606 def recover(self):
605 lock = self.lock()
607 lock = self.lock()
606 try:
608 try:
607 if os.path.exists(self.sjoin("journal")):
609 if os.path.exists(self.sjoin("journal")):
608 self.ui.status(_("rolling back interrupted transaction\n"))
610 self.ui.status(_("rolling back interrupted transaction\n"))
609 transaction.rollback(self.sopener, self.sjoin("journal"),
611 transaction.rollback(self.sopener, self.sjoin("journal"),
610 self.ui.warn)
612 self.ui.warn)
611 self.invalidate()
613 self.invalidate()
612 return True
614 return True
613 else:
615 else:
614 self.ui.warn(_("no interrupted transaction available\n"))
616 self.ui.warn(_("no interrupted transaction available\n"))
615 return False
617 return False
616 finally:
618 finally:
617 lock.release()
619 lock.release()
618
620
619 def rollback(self, dryrun=False):
621 def rollback(self, dryrun=False):
620 wlock = lock = None
622 wlock = lock = None
621 try:
623 try:
622 wlock = self.wlock()
624 wlock = self.wlock()
623 lock = self.lock()
625 lock = self.lock()
624 if os.path.exists(self.sjoin("undo")):
626 if os.path.exists(self.sjoin("undo")):
625 try:
627 try:
626 args = self.opener("undo.desc", "r").read().splitlines()
628 args = self.opener("undo.desc", "r").read().splitlines()
627 if len(args) >= 3 and self.ui.verbose:
629 if len(args) >= 3 and self.ui.verbose:
628 desc = _("rolling back to revision %s"
630 desc = _("rolling back to revision %s"
629 " (undo %s: %s)\n") % (
631 " (undo %s: %s)\n") % (
630 int(args[0]) - 1, args[1], args[2])
632 int(args[0]) - 1, args[1], args[2])
631 elif len(args) >= 2:
633 elif len(args) >= 2:
632 desc = _("rolling back to revision %s (undo %s)\n") % (
634 desc = _("rolling back to revision %s (undo %s)\n") % (
633 int(args[0]) - 1, args[1])
635 int(args[0]) - 1, args[1])
634 except IOError:
636 except IOError:
635 desc = _("rolling back unknown transaction\n")
637 desc = _("rolling back unknown transaction\n")
636 self.ui.status(desc)
638 self.ui.status(desc)
637 if dryrun:
639 if dryrun:
638 return
640 return
639 transaction.rollback(self.sopener, self.sjoin("undo"),
641 transaction.rollback(self.sopener, self.sjoin("undo"),
640 self.ui.warn)
642 self.ui.warn)
641 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
643 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
642 try:
644 try:
643 branch = self.opener("undo.branch").read()
645 branch = self.opener("undo.branch").read()
644 self.dirstate.setbranch(branch)
646 self.dirstate.setbranch(branch)
645 except IOError:
647 except IOError:
646 self.ui.warn(_("Named branch could not be reset, "
648 self.ui.warn(_("Named branch could not be reset, "
647 "current branch still is: %s\n")
649 "current branch still is: %s\n")
648 % encoding.tolocal(self.dirstate.branch()))
650 % encoding.tolocal(self.dirstate.branch()))
649 self.invalidate()
651 self.invalidate()
650 self.dirstate.invalidate()
652 self.dirstate.invalidate()
651 self.destroyed()
653 self.destroyed()
652 else:
654 else:
653 self.ui.warn(_("no rollback information available\n"))
655 self.ui.warn(_("no rollback information available\n"))
654 return 1
656 return 1
655 finally:
657 finally:
656 release(lock, wlock)
658 release(lock, wlock)
657
659
658 def invalidatecaches(self):
660 def invalidatecaches(self):
659 self._tags = None
661 self._tags = None
660 self._tagtypes = None
662 self._tagtypes = None
661 self.nodetagscache = None
663 self.nodetagscache = None
662 self._branchcache = None # in UTF-8
664 self._branchcache = None # in UTF-8
663 self._branchcachetip = None
665 self._branchcachetip = None
664
666
665 def invalidate(self):
667 def invalidate(self):
666 for a in "changelog manifest".split():
668 for a in "changelog manifest".split():
667 if a in self.__dict__:
669 if a in self.__dict__:
668 delattr(self, a)
670 delattr(self, a)
669 self.invalidatecaches()
671 self.invalidatecaches()
670
672
671 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
673 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
672 try:
674 try:
673 l = lock.lock(lockname, 0, releasefn, desc=desc)
675 l = lock.lock(lockname, 0, releasefn, desc=desc)
674 except error.LockHeld, inst:
676 except error.LockHeld, inst:
675 if not wait:
677 if not wait:
676 raise
678 raise
677 self.ui.warn(_("waiting for lock on %s held by %r\n") %
679 self.ui.warn(_("waiting for lock on %s held by %r\n") %
678 (desc, inst.locker))
680 (desc, inst.locker))
679 # default to 600 seconds timeout
681 # default to 600 seconds timeout
680 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
682 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
681 releasefn, desc=desc)
683 releasefn, desc=desc)
682 if acquirefn:
684 if acquirefn:
683 acquirefn()
685 acquirefn()
684 return l
686 return l
685
687
686 def lock(self, wait=True):
688 def lock(self, wait=True):
687 '''Lock the repository store (.hg/store) and return a weak reference
689 '''Lock the repository store (.hg/store) and return a weak reference
688 to the lock. Use this before modifying the store (e.g. committing or
690 to the lock. Use this before modifying the store (e.g. committing or
689 stripping). If you are opening a transaction, get a lock as well.)'''
691 stripping). If you are opening a transaction, get a lock as well.)'''
690 l = self._lockref and self._lockref()
692 l = self._lockref and self._lockref()
691 if l is not None and l.held:
693 if l is not None and l.held:
692 l.lock()
694 l.lock()
693 return l
695 return l
694
696
695 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
697 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
696 _('repository %s') % self.origroot)
698 _('repository %s') % self.origroot)
697 self._lockref = weakref.ref(l)
699 self._lockref = weakref.ref(l)
698 return l
700 return l
699
701
700 def wlock(self, wait=True):
702 def wlock(self, wait=True):
701 '''Lock the non-store parts of the repository (everything under
703 '''Lock the non-store parts of the repository (everything under
702 .hg except .hg/store) and return a weak reference to the lock.
704 .hg except .hg/store) and return a weak reference to the lock.
703 Use this before modifying files in .hg.'''
705 Use this before modifying files in .hg.'''
704 l = self._wlockref and self._wlockref()
706 l = self._wlockref and self._wlockref()
705 if l is not None and l.held:
707 if l is not None and l.held:
706 l.lock()
708 l.lock()
707 return l
709 return l
708
710
709 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
711 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
710 self.dirstate.invalidate, _('working directory of %s') %
712 self.dirstate.invalidate, _('working directory of %s') %
711 self.origroot)
713 self.origroot)
712 self._wlockref = weakref.ref(l)
714 self._wlockref = weakref.ref(l)
713 return l
715 return l
714
716
715 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
717 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
716 """
718 """
717 commit an individual file as part of a larger transaction
719 commit an individual file as part of a larger transaction
718 """
720 """
719
721
720 fname = fctx.path()
722 fname = fctx.path()
721 text = fctx.data()
723 text = fctx.data()
722 flog = self.file(fname)
724 flog = self.file(fname)
723 fparent1 = manifest1.get(fname, nullid)
725 fparent1 = manifest1.get(fname, nullid)
724 fparent2 = fparent2o = manifest2.get(fname, nullid)
726 fparent2 = fparent2o = manifest2.get(fname, nullid)
725
727
726 meta = {}
728 meta = {}
727 copy = fctx.renamed()
729 copy = fctx.renamed()
728 if copy and copy[0] != fname:
730 if copy and copy[0] != fname:
729 # Mark the new revision of this file as a copy of another
731 # Mark the new revision of this file as a copy of another
730 # file. This copy data will effectively act as a parent
732 # file. This copy data will effectively act as a parent
731 # of this new revision. If this is a merge, the first
733 # of this new revision. If this is a merge, the first
732 # parent will be the nullid (meaning "look up the copy data")
734 # parent will be the nullid (meaning "look up the copy data")
733 # and the second one will be the other parent. For example:
735 # and the second one will be the other parent. For example:
734 #
736 #
735 # 0 --- 1 --- 3 rev1 changes file foo
737 # 0 --- 1 --- 3 rev1 changes file foo
736 # \ / rev2 renames foo to bar and changes it
738 # \ / rev2 renames foo to bar and changes it
737 # \- 2 -/ rev3 should have bar with all changes and
739 # \- 2 -/ rev3 should have bar with all changes and
738 # should record that bar descends from
740 # should record that bar descends from
739 # bar in rev2 and foo in rev1
741 # bar in rev2 and foo in rev1
740 #
742 #
741 # this allows this merge to succeed:
743 # this allows this merge to succeed:
742 #
744 #
743 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
745 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
744 # \ / merging rev3 and rev4 should use bar@rev2
746 # \ / merging rev3 and rev4 should use bar@rev2
745 # \- 2 --- 4 as the merge base
747 # \- 2 --- 4 as the merge base
746 #
748 #
747
749
748 cfname = copy[0]
750 cfname = copy[0]
749 crev = manifest1.get(cfname)
751 crev = manifest1.get(cfname)
750 newfparent = fparent2
752 newfparent = fparent2
751
753
752 if manifest2: # branch merge
754 if manifest2: # branch merge
753 if fparent2 == nullid or crev is None: # copied on remote side
755 if fparent2 == nullid or crev is None: # copied on remote side
754 if cfname in manifest2:
756 if cfname in manifest2:
755 crev = manifest2[cfname]
757 crev = manifest2[cfname]
756 newfparent = fparent1
758 newfparent = fparent1
757
759
758 # find source in nearest ancestor if we've lost track
760 # find source in nearest ancestor if we've lost track
759 if not crev:
761 if not crev:
760 self.ui.debug(" %s: searching for copy revision for %s\n" %
762 self.ui.debug(" %s: searching for copy revision for %s\n" %
761 (fname, cfname))
763 (fname, cfname))
762 for ancestor in self['.'].ancestors():
764 for ancestor in self['.'].ancestors():
763 if cfname in ancestor:
765 if cfname in ancestor:
764 crev = ancestor[cfname].filenode()
766 crev = ancestor[cfname].filenode()
765 break
767 break
766
768
767 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
769 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
768 meta["copy"] = cfname
770 meta["copy"] = cfname
769 meta["copyrev"] = hex(crev)
771 meta["copyrev"] = hex(crev)
770 fparent1, fparent2 = nullid, newfparent
772 fparent1, fparent2 = nullid, newfparent
771 elif fparent2 != nullid:
773 elif fparent2 != nullid:
772 # is one parent an ancestor of the other?
774 # is one parent an ancestor of the other?
773 fparentancestor = flog.ancestor(fparent1, fparent2)
775 fparentancestor = flog.ancestor(fparent1, fparent2)
774 if fparentancestor == fparent1:
776 if fparentancestor == fparent1:
775 fparent1, fparent2 = fparent2, nullid
777 fparent1, fparent2 = fparent2, nullid
776 elif fparentancestor == fparent2:
778 elif fparentancestor == fparent2:
777 fparent2 = nullid
779 fparent2 = nullid
778
780
779 # is the file changed?
781 # is the file changed?
780 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
782 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
781 changelist.append(fname)
783 changelist.append(fname)
782 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
784 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
783
785
784 # are just the flags changed during merge?
786 # are just the flags changed during merge?
785 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
787 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
786 changelist.append(fname)
788 changelist.append(fname)
787
789
788 return fparent1
790 return fparent1
789
791
790 def commit(self, text="", user=None, date=None, match=None, force=False,
792 def commit(self, text="", user=None, date=None, match=None, force=False,
791 editor=False, extra={}):
793 editor=False, extra={}):
792 """Add a new revision to current repository.
794 """Add a new revision to current repository.
793
795
794 Revision information is gathered from the working directory,
796 Revision information is gathered from the working directory,
795 match can be used to filter the committed files. If editor is
797 match can be used to filter the committed files. If editor is
796 supplied, it is called to get a commit message.
798 supplied, it is called to get a commit message.
797 """
799 """
798
800
799 def fail(f, msg):
801 def fail(f, msg):
800 raise util.Abort('%s: %s' % (f, msg))
802 raise util.Abort('%s: %s' % (f, msg))
801
803
802 if not match:
804 if not match:
803 match = matchmod.always(self.root, '')
805 match = matchmod.always(self.root, '')
804
806
805 if not force:
807 if not force:
806 vdirs = []
808 vdirs = []
807 match.dir = vdirs.append
809 match.dir = vdirs.append
808 match.bad = fail
810 match.bad = fail
809
811
810 wlock = self.wlock()
812 wlock = self.wlock()
811 try:
813 try:
812 wctx = self[None]
814 wctx = self[None]
813 merge = len(wctx.parents()) > 1
815 merge = len(wctx.parents()) > 1
814
816
815 if (not force and merge and match and
817 if (not force and merge and match and
816 (match.files() or match.anypats())):
818 (match.files() or match.anypats())):
817 raise util.Abort(_('cannot partially commit a merge '
819 raise util.Abort(_('cannot partially commit a merge '
818 '(do not specify files or patterns)'))
820 '(do not specify files or patterns)'))
819
821
820 changes = self.status(match=match, clean=force)
822 changes = self.status(match=match, clean=force)
821 if force:
823 if force:
822 changes[0].extend(changes[6]) # mq may commit unchanged files
824 changes[0].extend(changes[6]) # mq may commit unchanged files
823
825
824 # check subrepos
826 # check subrepos
825 subs = []
827 subs = []
826 removedsubs = set()
828 removedsubs = set()
827 for p in wctx.parents():
829 for p in wctx.parents():
828 removedsubs.update(s for s in p.substate if match(s))
830 removedsubs.update(s for s in p.substate if match(s))
829 for s in wctx.substate:
831 for s in wctx.substate:
830 removedsubs.discard(s)
832 removedsubs.discard(s)
831 if match(s) and wctx.sub(s).dirty():
833 if match(s) and wctx.sub(s).dirty():
832 subs.append(s)
834 subs.append(s)
833 if (subs or removedsubs):
835 if (subs or removedsubs):
834 if (not match('.hgsub') and
836 if (not match('.hgsub') and
835 '.hgsub' in (wctx.modified() + wctx.added())):
837 '.hgsub' in (wctx.modified() + wctx.added())):
836 raise util.Abort(_("can't commit subrepos without .hgsub"))
838 raise util.Abort(_("can't commit subrepos without .hgsub"))
837 if '.hgsubstate' not in changes[0]:
839 if '.hgsubstate' not in changes[0]:
838 changes[0].insert(0, '.hgsubstate')
840 changes[0].insert(0, '.hgsubstate')
839
841
840 # make sure all explicit patterns are matched
842 # make sure all explicit patterns are matched
841 if not force and match.files():
843 if not force and match.files():
842 matched = set(changes[0] + changes[1] + changes[2])
844 matched = set(changes[0] + changes[1] + changes[2])
843
845
844 for f in match.files():
846 for f in match.files():
845 if f == '.' or f in matched or f in wctx.substate:
847 if f == '.' or f in matched or f in wctx.substate:
846 continue
848 continue
847 if f in changes[3]: # missing
849 if f in changes[3]: # missing
848 fail(f, _('file not found!'))
850 fail(f, _('file not found!'))
849 if f in vdirs: # visited directory
851 if f in vdirs: # visited directory
850 d = f + '/'
852 d = f + '/'
851 for mf in matched:
853 for mf in matched:
852 if mf.startswith(d):
854 if mf.startswith(d):
853 break
855 break
854 else:
856 else:
855 fail(f, _("no match under directory!"))
857 fail(f, _("no match under directory!"))
856 elif f not in self.dirstate:
858 elif f not in self.dirstate:
857 fail(f, _("file not tracked!"))
859 fail(f, _("file not tracked!"))
858
860
859 if (not force and not extra.get("close") and not merge
861 if (not force and not extra.get("close") and not merge
860 and not (changes[0] or changes[1] or changes[2])
862 and not (changes[0] or changes[1] or changes[2])
861 and wctx.branch() == wctx.p1().branch()):
863 and wctx.branch() == wctx.p1().branch()):
862 return None
864 return None
863
865
864 ms = mergemod.mergestate(self)
866 ms = mergemod.mergestate(self)
865 for f in changes[0]:
867 for f in changes[0]:
866 if f in ms and ms[f] == 'u':
868 if f in ms and ms[f] == 'u':
867 raise util.Abort(_("unresolved merge conflicts "
869 raise util.Abort(_("unresolved merge conflicts "
868 "(see hg resolve)"))
870 "(see hg resolve)"))
869
871
870 cctx = context.workingctx(self, text, user, date, extra, changes)
872 cctx = context.workingctx(self, text, user, date, extra, changes)
871 if editor:
873 if editor:
872 cctx._text = editor(self, cctx, subs)
874 cctx._text = editor(self, cctx, subs)
873 edited = (text != cctx._text)
875 edited = (text != cctx._text)
874
876
875 # commit subs
877 # commit subs
876 if subs or removedsubs:
878 if subs or removedsubs:
877 state = wctx.substate.copy()
879 state = wctx.substate.copy()
878 for s in subs:
880 for s in subs:
879 sub = wctx.sub(s)
881 sub = wctx.sub(s)
880 self.ui.status(_('committing subrepository %s\n') %
882 self.ui.status(_('committing subrepository %s\n') %
881 subrepo.relpath(sub))
883 subrepo.relpath(sub))
882 sr = sub.commit(cctx._text, user, date)
884 sr = sub.commit(cctx._text, user, date)
883 state[s] = (state[s][0], sr)
885 state[s] = (state[s][0], sr)
884 subrepo.writestate(self, state)
886 subrepo.writestate(self, state)
885
887
886 # Save commit message in case this transaction gets rolled back
888 # Save commit message in case this transaction gets rolled back
887 # (e.g. by a pretxncommit hook). Leave the content alone on
889 # (e.g. by a pretxncommit hook). Leave the content alone on
888 # the assumption that the user will use the same editor again.
890 # the assumption that the user will use the same editor again.
889 msgfile = self.opener('last-message.txt', 'wb')
891 msgfile = self.opener('last-message.txt', 'wb')
890 msgfile.write(cctx._text)
892 msgfile.write(cctx._text)
891 msgfile.close()
893 msgfile.close()
892
894
893 p1, p2 = self.dirstate.parents()
895 p1, p2 = self.dirstate.parents()
894 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
896 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
895 try:
897 try:
896 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
898 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
897 ret = self.commitctx(cctx, True)
899 ret = self.commitctx(cctx, True)
898 except:
900 except:
899 if edited:
901 if edited:
900 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
902 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
901 self.ui.write(
903 self.ui.write(
902 _('note: commit message saved in %s\n') % msgfn)
904 _('note: commit message saved in %s\n') % msgfn)
903 raise
905 raise
904
906
905 # update dirstate and mergestate
907 # update dirstate and mergestate
906 for f in changes[0] + changes[1]:
908 for f in changes[0] + changes[1]:
907 self.dirstate.normal(f)
909 self.dirstate.normal(f)
908 for f in changes[2]:
910 for f in changes[2]:
909 self.dirstate.forget(f)
911 self.dirstate.forget(f)
910 self.dirstate.setparents(ret)
912 self.dirstate.setparents(ret)
911 ms.reset()
913 ms.reset()
912 finally:
914 finally:
913 wlock.release()
915 wlock.release()
914
916
915 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
917 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
916 return ret
918 return ret
917
919
918 def commitctx(self, ctx, error=False):
920 def commitctx(self, ctx, error=False):
919 """Add a new revision to current repository.
921 """Add a new revision to current repository.
920 Revision information is passed via the context argument.
922 Revision information is passed via the context argument.
921 """
923 """
922
924
923 tr = lock = None
925 tr = lock = None
924 removed = ctx.removed()
926 removed = ctx.removed()
925 p1, p2 = ctx.p1(), ctx.p2()
927 p1, p2 = ctx.p1(), ctx.p2()
926 m1 = p1.manifest().copy()
928 m1 = p1.manifest().copy()
927 m2 = p2.manifest()
929 m2 = p2.manifest()
928 user = ctx.user()
930 user = ctx.user()
929
931
930 lock = self.lock()
932 lock = self.lock()
931 try:
933 try:
932 tr = self.transaction("commit")
934 tr = self.transaction("commit")
933 trp = weakref.proxy(tr)
935 trp = weakref.proxy(tr)
934
936
935 # check in files
937 # check in files
936 new = {}
938 new = {}
937 changed = []
939 changed = []
938 linkrev = len(self)
940 linkrev = len(self)
939 for f in sorted(ctx.modified() + ctx.added()):
941 for f in sorted(ctx.modified() + ctx.added()):
940 self.ui.note(f + "\n")
942 self.ui.note(f + "\n")
941 try:
943 try:
942 fctx = ctx[f]
944 fctx = ctx[f]
943 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
945 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
944 changed)
946 changed)
945 m1.set(f, fctx.flags())
947 m1.set(f, fctx.flags())
946 except OSError, inst:
948 except OSError, inst:
947 self.ui.warn(_("trouble committing %s!\n") % f)
949 self.ui.warn(_("trouble committing %s!\n") % f)
948 raise
950 raise
949 except IOError, inst:
951 except IOError, inst:
950 errcode = getattr(inst, 'errno', errno.ENOENT)
952 errcode = getattr(inst, 'errno', errno.ENOENT)
951 if error or errcode and errcode != errno.ENOENT:
953 if error or errcode and errcode != errno.ENOENT:
952 self.ui.warn(_("trouble committing %s!\n") % f)
954 self.ui.warn(_("trouble committing %s!\n") % f)
953 raise
955 raise
954 else:
956 else:
955 removed.append(f)
957 removed.append(f)
956
958
957 # update manifest
959 # update manifest
958 m1.update(new)
960 m1.update(new)
959 removed = [f for f in sorted(removed) if f in m1 or f in m2]
961 removed = [f for f in sorted(removed) if f in m1 or f in m2]
960 drop = [f for f in removed if f in m1]
962 drop = [f for f in removed if f in m1]
961 for f in drop:
963 for f in drop:
962 del m1[f]
964 del m1[f]
963 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
965 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
964 p2.manifestnode(), (new, drop))
966 p2.manifestnode(), (new, drop))
965
967
966 # update changelog
968 # update changelog
967 self.changelog.delayupdate()
969 self.changelog.delayupdate()
968 n = self.changelog.add(mn, changed + removed, ctx.description(),
970 n = self.changelog.add(mn, changed + removed, ctx.description(),
969 trp, p1.node(), p2.node(),
971 trp, p1.node(), p2.node(),
970 user, ctx.date(), ctx.extra().copy())
972 user, ctx.date(), ctx.extra().copy())
971 p = lambda: self.changelog.writepending() and self.root or ""
973 p = lambda: self.changelog.writepending() and self.root or ""
972 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
974 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
973 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
975 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
974 parent2=xp2, pending=p)
976 parent2=xp2, pending=p)
975 self.changelog.finalize(trp)
977 self.changelog.finalize(trp)
976 tr.close()
978 tr.close()
977
979
978 if self._branchcache:
980 if self._branchcache:
979 self.branchtags()
981 self.updatebranchcache()
980 return n
982 return n
981 finally:
983 finally:
982 if tr:
984 if tr:
983 tr.release()
985 tr.release()
984 lock.release()
986 lock.release()
985
987
986 def destroyed(self):
988 def destroyed(self):
987 '''Inform the repository that nodes have been destroyed.
989 '''Inform the repository that nodes have been destroyed.
988 Intended for use by strip and rollback, so there's a common
990 Intended for use by strip and rollback, so there's a common
989 place for anything that has to be done after destroying history.'''
991 place for anything that has to be done after destroying history.'''
990 # XXX it might be nice if we could take the list of destroyed
992 # XXX it might be nice if we could take the list of destroyed
991 # nodes, but I don't see an easy way for rollback() to do that
993 # nodes, but I don't see an easy way for rollback() to do that
992
994
993 # Ensure the persistent tag cache is updated. Doing it now
995 # Ensure the persistent tag cache is updated. Doing it now
994 # means that the tag cache only has to worry about destroyed
996 # means that the tag cache only has to worry about destroyed
995 # heads immediately after a strip/rollback. That in turn
997 # heads immediately after a strip/rollback. That in turn
996 # guarantees that "cachetip == currenttip" (comparing both rev
998 # guarantees that "cachetip == currenttip" (comparing both rev
997 # and node) always means no nodes have been added or destroyed.
999 # and node) always means no nodes have been added or destroyed.
998
1000
999 # XXX this is suboptimal when qrefresh'ing: we strip the current
1001 # XXX this is suboptimal when qrefresh'ing: we strip the current
1000 # head, refresh the tag cache, then immediately add a new head.
1002 # head, refresh the tag cache, then immediately add a new head.
1001 # But I think doing it this way is necessary for the "instant
1003 # But I think doing it this way is necessary for the "instant
1002 # tag cache retrieval" case to work.
1004 # tag cache retrieval" case to work.
1003 self.invalidatecaches()
1005 self.invalidatecaches()
1004
1006
1005 def walk(self, match, node=None):
1007 def walk(self, match, node=None):
1006 '''
1008 '''
1007 walk recursively through the directory tree or a given
1009 walk recursively through the directory tree or a given
1008 changeset, finding all files matched by the match
1010 changeset, finding all files matched by the match
1009 function
1011 function
1010 '''
1012 '''
1011 return self[node].walk(match)
1013 return self[node].walk(match)
1012
1014
1013 def status(self, node1='.', node2=None, match=None,
1015 def status(self, node1='.', node2=None, match=None,
1014 ignored=False, clean=False, unknown=False):
1016 ignored=False, clean=False, unknown=False):
1015 """return status of files between two nodes or node and working directory
1017 """return status of files between two nodes or node and working directory
1016
1018
1017 If node1 is None, use the first dirstate parent instead.
1019 If node1 is None, use the first dirstate parent instead.
1018 If node2 is None, compare node1 with working directory.
1020 If node2 is None, compare node1 with working directory.
1019 """
1021 """
1020
1022
1021 def mfmatches(ctx):
1023 def mfmatches(ctx):
1022 mf = ctx.manifest().copy()
1024 mf = ctx.manifest().copy()
1023 for fn in mf.keys():
1025 for fn in mf.keys():
1024 if not match(fn):
1026 if not match(fn):
1025 del mf[fn]
1027 del mf[fn]
1026 return mf
1028 return mf
1027
1029
1028 if isinstance(node1, context.changectx):
1030 if isinstance(node1, context.changectx):
1029 ctx1 = node1
1031 ctx1 = node1
1030 else:
1032 else:
1031 ctx1 = self[node1]
1033 ctx1 = self[node1]
1032 if isinstance(node2, context.changectx):
1034 if isinstance(node2, context.changectx):
1033 ctx2 = node2
1035 ctx2 = node2
1034 else:
1036 else:
1035 ctx2 = self[node2]
1037 ctx2 = self[node2]
1036
1038
1037 working = ctx2.rev() is None
1039 working = ctx2.rev() is None
1038 parentworking = working and ctx1 == self['.']
1040 parentworking = working and ctx1 == self['.']
1039 match = match or matchmod.always(self.root, self.getcwd())
1041 match = match or matchmod.always(self.root, self.getcwd())
1040 listignored, listclean, listunknown = ignored, clean, unknown
1042 listignored, listclean, listunknown = ignored, clean, unknown
1041
1043
1042 # load earliest manifest first for caching reasons
1044 # load earliest manifest first for caching reasons
1043 if not working and ctx2.rev() < ctx1.rev():
1045 if not working and ctx2.rev() < ctx1.rev():
1044 ctx2.manifest()
1046 ctx2.manifest()
1045
1047
1046 if not parentworking:
1048 if not parentworking:
1047 def bad(f, msg):
1049 def bad(f, msg):
1048 if f not in ctx1:
1050 if f not in ctx1:
1049 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1051 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1050 match.bad = bad
1052 match.bad = bad
1051
1053
1052 if working: # we need to scan the working dir
1054 if working: # we need to scan the working dir
1053 subrepos = []
1055 subrepos = []
1054 if '.hgsub' in self.dirstate:
1056 if '.hgsub' in self.dirstate:
1055 subrepos = ctx1.substate.keys()
1057 subrepos = ctx1.substate.keys()
1056 s = self.dirstate.status(match, subrepos, listignored,
1058 s = self.dirstate.status(match, subrepos, listignored,
1057 listclean, listunknown)
1059 listclean, listunknown)
1058 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1060 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1059
1061
1060 # check for any possibly clean files
1062 # check for any possibly clean files
1061 if parentworking and cmp:
1063 if parentworking and cmp:
1062 fixup = []
1064 fixup = []
1063 # do a full compare of any files that might have changed
1065 # do a full compare of any files that might have changed
1064 for f in sorted(cmp):
1066 for f in sorted(cmp):
1065 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1067 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1066 or ctx1[f].cmp(ctx2[f])):
1068 or ctx1[f].cmp(ctx2[f])):
1067 modified.append(f)
1069 modified.append(f)
1068 else:
1070 else:
1069 fixup.append(f)
1071 fixup.append(f)
1070
1072
1071 # update dirstate for files that are actually clean
1073 # update dirstate for files that are actually clean
1072 if fixup:
1074 if fixup:
1073 if listclean:
1075 if listclean:
1074 clean += fixup
1076 clean += fixup
1075
1077
1076 try:
1078 try:
1077 # updating the dirstate is optional
1079 # updating the dirstate is optional
1078 # so we don't wait on the lock
1080 # so we don't wait on the lock
1079 wlock = self.wlock(False)
1081 wlock = self.wlock(False)
1080 try:
1082 try:
1081 for f in fixup:
1083 for f in fixup:
1082 self.dirstate.normal(f)
1084 self.dirstate.normal(f)
1083 finally:
1085 finally:
1084 wlock.release()
1086 wlock.release()
1085 except error.LockError:
1087 except error.LockError:
1086 pass
1088 pass
1087
1089
1088 if not parentworking:
1090 if not parentworking:
1089 mf1 = mfmatches(ctx1)
1091 mf1 = mfmatches(ctx1)
1090 if working:
1092 if working:
1091 # we are comparing working dir against non-parent
1093 # we are comparing working dir against non-parent
1092 # generate a pseudo-manifest for the working dir
1094 # generate a pseudo-manifest for the working dir
1093 mf2 = mfmatches(self['.'])
1095 mf2 = mfmatches(self['.'])
1094 for f in cmp + modified + added:
1096 for f in cmp + modified + added:
1095 mf2[f] = None
1097 mf2[f] = None
1096 mf2.set(f, ctx2.flags(f))
1098 mf2.set(f, ctx2.flags(f))
1097 for f in removed:
1099 for f in removed:
1098 if f in mf2:
1100 if f in mf2:
1099 del mf2[f]
1101 del mf2[f]
1100 else:
1102 else:
1101 # we are comparing two revisions
1103 # we are comparing two revisions
1102 deleted, unknown, ignored = [], [], []
1104 deleted, unknown, ignored = [], [], []
1103 mf2 = mfmatches(ctx2)
1105 mf2 = mfmatches(ctx2)
1104
1106
1105 modified, added, clean = [], [], []
1107 modified, added, clean = [], [], []
1106 for fn in mf2:
1108 for fn in mf2:
1107 if fn in mf1:
1109 if fn in mf1:
1108 if (mf1.flags(fn) != mf2.flags(fn) or
1110 if (mf1.flags(fn) != mf2.flags(fn) or
1109 (mf1[fn] != mf2[fn] and
1111 (mf1[fn] != mf2[fn] and
1110 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1112 (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))):
1111 modified.append(fn)
1113 modified.append(fn)
1112 elif listclean:
1114 elif listclean:
1113 clean.append(fn)
1115 clean.append(fn)
1114 del mf1[fn]
1116 del mf1[fn]
1115 else:
1117 else:
1116 added.append(fn)
1118 added.append(fn)
1117 removed = mf1.keys()
1119 removed = mf1.keys()
1118
1120
1119 r = modified, added, removed, deleted, unknown, ignored, clean
1121 r = modified, added, removed, deleted, unknown, ignored, clean
1120 [l.sort() for l in r]
1122 [l.sort() for l in r]
1121 return r
1123 return r
1122
1124
1123 def heads(self, start=None):
1125 def heads(self, start=None):
1124 heads = self.changelog.heads(start)
1126 heads = self.changelog.heads(start)
1125 # sort the output in rev descending order
1127 # sort the output in rev descending order
1126 heads = [(-self.changelog.rev(h), h) for h in heads]
1128 heads = [(-self.changelog.rev(h), h) for h in heads]
1127 return [n for (r, n) in sorted(heads)]
1129 return [n for (r, n) in sorted(heads)]
1128
1130
1129 def branchheads(self, branch=None, start=None, closed=False):
1131 def branchheads(self, branch=None, start=None, closed=False):
1130 '''return a (possibly filtered) list of heads for the given branch
1132 '''return a (possibly filtered) list of heads for the given branch
1131
1133
1132 Heads are returned in topological order, from newest to oldest.
1134 Heads are returned in topological order, from newest to oldest.
1133 If branch is None, use the dirstate branch.
1135 If branch is None, use the dirstate branch.
1134 If start is not None, return only heads reachable from start.
1136 If start is not None, return only heads reachable from start.
1135 If closed is True, return heads that are marked as closed as well.
1137 If closed is True, return heads that are marked as closed as well.
1136 '''
1138 '''
1137 if branch is None:
1139 if branch is None:
1138 branch = self[None].branch()
1140 branch = self[None].branch()
1139 branches = self.branchmap()
1141 branches = self.branchmap()
1140 if branch not in branches:
1142 if branch not in branches:
1141 return []
1143 return []
1142 # the cache returns heads ordered lowest to highest
1144 # the cache returns heads ordered lowest to highest
1143 bheads = list(reversed(branches[branch]))
1145 bheads = list(reversed(branches[branch]))
1144 if start is not None:
1146 if start is not None:
1145 # filter out the heads that cannot be reached from startrev
1147 # filter out the heads that cannot be reached from startrev
1146 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1148 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1147 bheads = [h for h in bheads if h in fbheads]
1149 bheads = [h for h in bheads if h in fbheads]
1148 if not closed:
1150 if not closed:
1149 bheads = [h for h in bheads if
1151 bheads = [h for h in bheads if
1150 ('close' not in self.changelog.read(h)[5])]
1152 ('close' not in self.changelog.read(h)[5])]
1151 return bheads
1153 return bheads
1152
1154
1153 def branches(self, nodes):
1155 def branches(self, nodes):
1154 if not nodes:
1156 if not nodes:
1155 nodes = [self.changelog.tip()]
1157 nodes = [self.changelog.tip()]
1156 b = []
1158 b = []
1157 for n in nodes:
1159 for n in nodes:
1158 t = n
1160 t = n
1159 while 1:
1161 while 1:
1160 p = self.changelog.parents(n)
1162 p = self.changelog.parents(n)
1161 if p[1] != nullid or p[0] == nullid:
1163 if p[1] != nullid or p[0] == nullid:
1162 b.append((t, n, p[0], p[1]))
1164 b.append((t, n, p[0], p[1]))
1163 break
1165 break
1164 n = p[0]
1166 n = p[0]
1165 return b
1167 return b
1166
1168
1167 def between(self, pairs):
1169 def between(self, pairs):
1168 r = []
1170 r = []
1169
1171
1170 for top, bottom in pairs:
1172 for top, bottom in pairs:
1171 n, l, i = top, [], 0
1173 n, l, i = top, [], 0
1172 f = 1
1174 f = 1
1173
1175
1174 while n != bottom and n != nullid:
1176 while n != bottom and n != nullid:
1175 p = self.changelog.parents(n)[0]
1177 p = self.changelog.parents(n)[0]
1176 if i == f:
1178 if i == f:
1177 l.append(n)
1179 l.append(n)
1178 f = f * 2
1180 f = f * 2
1179 n = p
1181 n = p
1180 i += 1
1182 i += 1
1181
1183
1182 r.append(l)
1184 r.append(l)
1183
1185
1184 return r
1186 return r
1185
1187
1186 def pull(self, remote, heads=None, force=False):
1188 def pull(self, remote, heads=None, force=False):
1187 lock = self.lock()
1189 lock = self.lock()
1188 try:
1190 try:
1189 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1191 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1190 force=force)
1192 force=force)
1191 common, fetch, rheads = tmp
1193 common, fetch, rheads = tmp
1192 if not fetch:
1194 if not fetch:
1193 self.ui.status(_("no changes found\n"))
1195 self.ui.status(_("no changes found\n"))
1194 return 0
1196 return 0
1195
1197
1196 if fetch == [nullid]:
1198 if fetch == [nullid]:
1197 self.ui.status(_("requesting all changes\n"))
1199 self.ui.status(_("requesting all changes\n"))
1198 elif heads is None and remote.capable('changegroupsubset'):
1200 elif heads is None and remote.capable('changegroupsubset'):
1199 # issue1320, avoid a race if remote changed after discovery
1201 # issue1320, avoid a race if remote changed after discovery
1200 heads = rheads
1202 heads = rheads
1201
1203
1202 if heads is None:
1204 if heads is None:
1203 cg = remote.changegroup(fetch, 'pull')
1205 cg = remote.changegroup(fetch, 'pull')
1204 else:
1206 else:
1205 if not remote.capable('changegroupsubset'):
1207 if not remote.capable('changegroupsubset'):
1206 raise util.Abort(_("Partial pull cannot be done because "
1208 raise util.Abort(_("Partial pull cannot be done because "
1207 "other repository doesn't support "
1209 "other repository doesn't support "
1208 "changegroupsubset."))
1210 "changegroupsubset."))
1209 cg = remote.changegroupsubset(fetch, heads, 'pull')
1211 cg = remote.changegroupsubset(fetch, heads, 'pull')
1210 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1212 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1211 finally:
1213 finally:
1212 lock.release()
1214 lock.release()
1213
1215
1214 def push(self, remote, force=False, revs=None, newbranch=False):
1216 def push(self, remote, force=False, revs=None, newbranch=False):
1215 '''Push outgoing changesets (limited by revs) from the current
1217 '''Push outgoing changesets (limited by revs) from the current
1216 repository to remote. Return an integer:
1218 repository to remote. Return an integer:
1217 - 0 means HTTP error *or* nothing to push
1219 - 0 means HTTP error *or* nothing to push
1218 - 1 means we pushed and remote head count is unchanged *or*
1220 - 1 means we pushed and remote head count is unchanged *or*
1219 we have outgoing changesets but refused to push
1221 we have outgoing changesets but refused to push
1220 - other values as described by addchangegroup()
1222 - other values as described by addchangegroup()
1221 '''
1223 '''
1222 # there are two ways to push to remote repo:
1224 # there are two ways to push to remote repo:
1223 #
1225 #
1224 # addchangegroup assumes local user can lock remote
1226 # addchangegroup assumes local user can lock remote
1225 # repo (local filesystem, old ssh servers).
1227 # repo (local filesystem, old ssh servers).
1226 #
1228 #
1227 # unbundle assumes local user cannot lock remote repo (new ssh
1229 # unbundle assumes local user cannot lock remote repo (new ssh
1228 # servers, http servers).
1230 # servers, http servers).
1229
1231
1230 lock = None
1232 lock = None
1231 unbundle = remote.capable('unbundle')
1233 unbundle = remote.capable('unbundle')
1232 if not unbundle:
1234 if not unbundle:
1233 lock = remote.lock()
1235 lock = remote.lock()
1234 try:
1236 try:
1235 ret = discovery.prepush(self, remote, force, revs, newbranch)
1237 ret = discovery.prepush(self, remote, force, revs, newbranch)
1236 if ret[0] is None:
1238 if ret[0] is None:
1237 # and here we return 0 for "nothing to push" or 1 for
1239 # and here we return 0 for "nothing to push" or 1 for
1238 # "something to push but I refuse"
1240 # "something to push but I refuse"
1239 return ret[1]
1241 return ret[1]
1240
1242
1241 cg, remote_heads = ret
1243 cg, remote_heads = ret
1242 if unbundle:
1244 if unbundle:
1243 # local repo finds heads on server, finds out what revs it must
1245 # local repo finds heads on server, finds out what revs it must
1244 # push. once revs transferred, if server finds it has
1246 # push. once revs transferred, if server finds it has
1245 # different heads (someone else won commit/push race), server
1247 # different heads (someone else won commit/push race), server
1246 # aborts.
1248 # aborts.
1247 if force:
1249 if force:
1248 remote_heads = ['force']
1250 remote_heads = ['force']
1249 # ssh: return remote's addchangegroup()
1251 # ssh: return remote's addchangegroup()
1250 # http: return remote's addchangegroup() or 0 for error
1252 # http: return remote's addchangegroup() or 0 for error
1251 return remote.unbundle(cg, remote_heads, 'push')
1253 return remote.unbundle(cg, remote_heads, 'push')
1252 else:
1254 else:
1253 # we return an integer indicating remote head count change
1255 # we return an integer indicating remote head count change
1254 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1256 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1255 finally:
1257 finally:
1256 if lock is not None:
1258 if lock is not None:
1257 lock.release()
1259 lock.release()
1258
1260
1259 def changegroupinfo(self, nodes, source):
1261 def changegroupinfo(self, nodes, source):
1260 if self.ui.verbose or source == 'bundle':
1262 if self.ui.verbose or source == 'bundle':
1261 self.ui.status(_("%d changesets found\n") % len(nodes))
1263 self.ui.status(_("%d changesets found\n") % len(nodes))
1262 if self.ui.debugflag:
1264 if self.ui.debugflag:
1263 self.ui.debug("list of changesets:\n")
1265 self.ui.debug("list of changesets:\n")
1264 for node in nodes:
1266 for node in nodes:
1265 self.ui.debug("%s\n" % hex(node))
1267 self.ui.debug("%s\n" % hex(node))
1266
1268
1267 def changegroupsubset(self, bases, heads, source, extranodes=None):
1269 def changegroupsubset(self, bases, heads, source, extranodes=None):
1268 """Compute a changegroup consisting of all the nodes that are
1270 """Compute a changegroup consisting of all the nodes that are
1269 descendents of any of the bases and ancestors of any of the heads.
1271 descendents of any of the bases and ancestors of any of the heads.
1270 Return a chunkbuffer object whose read() method will return
1272 Return a chunkbuffer object whose read() method will return
1271 successive changegroup chunks.
1273 successive changegroup chunks.
1272
1274
1273 It is fairly complex as determining which filenodes and which
1275 It is fairly complex as determining which filenodes and which
1274 manifest nodes need to be included for the changeset to be complete
1276 manifest nodes need to be included for the changeset to be complete
1275 is non-trivial.
1277 is non-trivial.
1276
1278
1277 Another wrinkle is doing the reverse, figuring out which changeset in
1279 Another wrinkle is doing the reverse, figuring out which changeset in
1278 the changegroup a particular filenode or manifestnode belongs to.
1280 the changegroup a particular filenode or manifestnode belongs to.
1279
1281
1280 The caller can specify some nodes that must be included in the
1282 The caller can specify some nodes that must be included in the
1281 changegroup using the extranodes argument. It should be a dict
1283 changegroup using the extranodes argument. It should be a dict
1282 where the keys are the filenames (or 1 for the manifest), and the
1284 where the keys are the filenames (or 1 for the manifest), and the
1283 values are lists of (node, linknode) tuples, where node is a wanted
1285 values are lists of (node, linknode) tuples, where node is a wanted
1284 node and linknode is the changelog node that should be transmitted as
1286 node and linknode is the changelog node that should be transmitted as
1285 the linkrev.
1287 the linkrev.
1286 """
1288 """
1287
1289
1288 # Set up some initial variables
1290 # Set up some initial variables
1289 # Make it easy to refer to self.changelog
1291 # Make it easy to refer to self.changelog
1290 cl = self.changelog
1292 cl = self.changelog
1291 # Compute the list of changesets in this changegroup.
1293 # Compute the list of changesets in this changegroup.
1292 # Some bases may turn out to be superfluous, and some heads may be
1294 # Some bases may turn out to be superfluous, and some heads may be
1293 # too. nodesbetween will return the minimal set of bases and heads
1295 # too. nodesbetween will return the minimal set of bases and heads
1294 # necessary to re-create the changegroup.
1296 # necessary to re-create the changegroup.
1295 if not bases:
1297 if not bases:
1296 bases = [nullid]
1298 bases = [nullid]
1297 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1299 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1298
1300
1299 if extranodes is None:
1301 if extranodes is None:
1300 # can we go through the fast path ?
1302 # can we go through the fast path ?
1301 heads.sort()
1303 heads.sort()
1302 allheads = self.heads()
1304 allheads = self.heads()
1303 allheads.sort()
1305 allheads.sort()
1304 if heads == allheads:
1306 if heads == allheads:
1305 return self._changegroup(msng_cl_lst, source)
1307 return self._changegroup(msng_cl_lst, source)
1306
1308
1307 # slow path
1309 # slow path
1308 self.hook('preoutgoing', throw=True, source=source)
1310 self.hook('preoutgoing', throw=True, source=source)
1309
1311
1310 self.changegroupinfo(msng_cl_lst, source)
1312 self.changegroupinfo(msng_cl_lst, source)
1311
1313
1312 # We assume that all ancestors of bases are known
1314 # We assume that all ancestors of bases are known
1313 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1315 commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1314
1316
1315 # Make it easy to refer to self.manifest
1317 # Make it easy to refer to self.manifest
1316 mnfst = self.manifest
1318 mnfst = self.manifest
1317 # We don't know which manifests are missing yet
1319 # We don't know which manifests are missing yet
1318 msng_mnfst_set = {}
1320 msng_mnfst_set = {}
1319 # Nor do we know which filenodes are missing.
1321 # Nor do we know which filenodes are missing.
1320 msng_filenode_set = {}
1322 msng_filenode_set = {}
1321
1323
1322 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1324 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1323 junk = None
1325 junk = None
1324
1326
1325 # A changeset always belongs to itself, so the changenode lookup
1327 # A changeset always belongs to itself, so the changenode lookup
1326 # function for a changenode is identity.
1328 # function for a changenode is identity.
1327 def identity(x):
1329 def identity(x):
1328 return x
1330 return x
1329
1331
1330 # A function generating function that sets up the initial environment
1332 # A function generating function that sets up the initial environment
1331 # the inner function.
1333 # the inner function.
1332 def filenode_collector(changedfiles):
1334 def filenode_collector(changedfiles):
1333 # This gathers information from each manifestnode included in the
1335 # This gathers information from each manifestnode included in the
1334 # changegroup about which filenodes the manifest node references
1336 # changegroup about which filenodes the manifest node references
1335 # so we can include those in the changegroup too.
1337 # so we can include those in the changegroup too.
1336 #
1338 #
1337 # It also remembers which changenode each filenode belongs to. It
1339 # It also remembers which changenode each filenode belongs to. It
1338 # does this by assuming the a filenode belongs to the changenode
1340 # does this by assuming the a filenode belongs to the changenode
1339 # the first manifest that references it belongs to.
1341 # the first manifest that references it belongs to.
1340 def collect_msng_filenodes(mnfstnode):
1342 def collect_msng_filenodes(mnfstnode):
1341 r = mnfst.rev(mnfstnode)
1343 r = mnfst.rev(mnfstnode)
1342 if r - 1 in mnfst.parentrevs(r):
1344 if r - 1 in mnfst.parentrevs(r):
1343 # If the previous rev is one of the parents,
1345 # If the previous rev is one of the parents,
1344 # we only need to see a diff.
1346 # we only need to see a diff.
1345 deltamf = mnfst.readdelta(mnfstnode)
1347 deltamf = mnfst.readdelta(mnfstnode)
1346 # For each line in the delta
1348 # For each line in the delta
1347 for f, fnode in deltamf.iteritems():
1349 for f, fnode in deltamf.iteritems():
1348 # And if the file is in the list of files we care
1350 # And if the file is in the list of files we care
1349 # about.
1351 # about.
1350 if f in changedfiles:
1352 if f in changedfiles:
1351 # Get the changenode this manifest belongs to
1353 # Get the changenode this manifest belongs to
1352 clnode = msng_mnfst_set[mnfstnode]
1354 clnode = msng_mnfst_set[mnfstnode]
1353 # Create the set of filenodes for the file if
1355 # Create the set of filenodes for the file if
1354 # there isn't one already.
1356 # there isn't one already.
1355 ndset = msng_filenode_set.setdefault(f, {})
1357 ndset = msng_filenode_set.setdefault(f, {})
1356 # And set the filenode's changelog node to the
1358 # And set the filenode's changelog node to the
1357 # manifest's if it hasn't been set already.
1359 # manifest's if it hasn't been set already.
1358 ndset.setdefault(fnode, clnode)
1360 ndset.setdefault(fnode, clnode)
1359 else:
1361 else:
1360 # Otherwise we need a full manifest.
1362 # Otherwise we need a full manifest.
1361 m = mnfst.read(mnfstnode)
1363 m = mnfst.read(mnfstnode)
1362 # For every file in we care about.
1364 # For every file in we care about.
1363 for f in changedfiles:
1365 for f in changedfiles:
1364 fnode = m.get(f, None)
1366 fnode = m.get(f, None)
1365 # If it's in the manifest
1367 # If it's in the manifest
1366 if fnode is not None:
1368 if fnode is not None:
1367 # See comments above.
1369 # See comments above.
1368 clnode = msng_mnfst_set[mnfstnode]
1370 clnode = msng_mnfst_set[mnfstnode]
1369 ndset = msng_filenode_set.setdefault(f, {})
1371 ndset = msng_filenode_set.setdefault(f, {})
1370 ndset.setdefault(fnode, clnode)
1372 ndset.setdefault(fnode, clnode)
1371 return collect_msng_filenodes
1373 return collect_msng_filenodes
1372
1374
1373 # If we determine that a particular file or manifest node must be a
1375 # If we determine that a particular file or manifest node must be a
1374 # node that the recipient of the changegroup will already have, we can
1376 # node that the recipient of the changegroup will already have, we can
1375 # also assume the recipient will have all the parents. This function
1377 # also assume the recipient will have all the parents. This function
1376 # prunes them from the set of missing nodes.
1378 # prunes them from the set of missing nodes.
1377 def prune(revlog, missingnodes):
1379 def prune(revlog, missingnodes):
1378 hasset = set()
1380 hasset = set()
1379 # If a 'missing' filenode thinks it belongs to a changenode we
1381 # If a 'missing' filenode thinks it belongs to a changenode we
1380 # assume the recipient must have, then the recipient must have
1382 # assume the recipient must have, then the recipient must have
1381 # that filenode.
1383 # that filenode.
1382 for n in missingnodes:
1384 for n in missingnodes:
1383 clrev = revlog.linkrev(revlog.rev(n))
1385 clrev = revlog.linkrev(revlog.rev(n))
1384 if clrev in commonrevs:
1386 if clrev in commonrevs:
1385 hasset.add(n)
1387 hasset.add(n)
1386 for n in hasset:
1388 for n in hasset:
1387 missingnodes.pop(n, None)
1389 missingnodes.pop(n, None)
1388 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1390 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1389 missingnodes.pop(revlog.node(r), None)
1391 missingnodes.pop(revlog.node(r), None)
1390
1392
1391 # Add the nodes that were explicitly requested.
1393 # Add the nodes that were explicitly requested.
1392 def add_extra_nodes(name, nodes):
1394 def add_extra_nodes(name, nodes):
1393 if not extranodes or name not in extranodes:
1395 if not extranodes or name not in extranodes:
1394 return
1396 return
1395
1397
1396 for node, linknode in extranodes[name]:
1398 for node, linknode in extranodes[name]:
1397 if node not in nodes:
1399 if node not in nodes:
1398 nodes[node] = linknode
1400 nodes[node] = linknode
1399
1401
1400 # Now that we have all theses utility functions to help out and
1402 # Now that we have all theses utility functions to help out and
1401 # logically divide up the task, generate the group.
1403 # logically divide up the task, generate the group.
1402 def gengroup():
1404 def gengroup():
1403 # The set of changed files starts empty.
1405 # The set of changed files starts empty.
1404 changedfiles = set()
1406 changedfiles = set()
1405 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1407 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1406
1408
1407 # Create a changenode group generator that will call our functions
1409 # Create a changenode group generator that will call our functions
1408 # back to lookup the owning changenode and collect information.
1410 # back to lookup the owning changenode and collect information.
1409 group = cl.group(msng_cl_lst, identity, collect)
1411 group = cl.group(msng_cl_lst, identity, collect)
1410 for cnt, chnk in enumerate(group):
1412 for cnt, chnk in enumerate(group):
1411 yield chnk
1413 yield chnk
1412 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1414 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1413 self.ui.progress(_('bundling changes'), None)
1415 self.ui.progress(_('bundling changes'), None)
1414
1416
1415 prune(mnfst, msng_mnfst_set)
1417 prune(mnfst, msng_mnfst_set)
1416 add_extra_nodes(1, msng_mnfst_set)
1418 add_extra_nodes(1, msng_mnfst_set)
1417 msng_mnfst_lst = msng_mnfst_set.keys()
1419 msng_mnfst_lst = msng_mnfst_set.keys()
1418 # Sort the manifestnodes by revision number.
1420 # Sort the manifestnodes by revision number.
1419 msng_mnfst_lst.sort(key=mnfst.rev)
1421 msng_mnfst_lst.sort(key=mnfst.rev)
1420 # Create a generator for the manifestnodes that calls our lookup
1422 # Create a generator for the manifestnodes that calls our lookup
1421 # and data collection functions back.
1423 # and data collection functions back.
1422 group = mnfst.group(msng_mnfst_lst,
1424 group = mnfst.group(msng_mnfst_lst,
1423 lambda mnode: msng_mnfst_set[mnode],
1425 lambda mnode: msng_mnfst_set[mnode],
1424 filenode_collector(changedfiles))
1426 filenode_collector(changedfiles))
1425 for cnt, chnk in enumerate(group):
1427 for cnt, chnk in enumerate(group):
1426 yield chnk
1428 yield chnk
1427 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1429 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1428 self.ui.progress(_('bundling manifests'), None)
1430 self.ui.progress(_('bundling manifests'), None)
1429
1431
1430 # These are no longer needed, dereference and toss the memory for
1432 # These are no longer needed, dereference and toss the memory for
1431 # them.
1433 # them.
1432 msng_mnfst_lst = None
1434 msng_mnfst_lst = None
1433 msng_mnfst_set.clear()
1435 msng_mnfst_set.clear()
1434
1436
1435 if extranodes:
1437 if extranodes:
1436 for fname in extranodes:
1438 for fname in extranodes:
1437 if isinstance(fname, int):
1439 if isinstance(fname, int):
1438 continue
1440 continue
1439 msng_filenode_set.setdefault(fname, {})
1441 msng_filenode_set.setdefault(fname, {})
1440 changedfiles.add(fname)
1442 changedfiles.add(fname)
1441 # Go through all our files in order sorted by name.
1443 # Go through all our files in order sorted by name.
1442 cnt = 0
1444 cnt = 0
1443 for fname in sorted(changedfiles):
1445 for fname in sorted(changedfiles):
1444 filerevlog = self.file(fname)
1446 filerevlog = self.file(fname)
1445 if not len(filerevlog):
1447 if not len(filerevlog):
1446 raise util.Abort(_("empty or missing revlog for %s") % fname)
1448 raise util.Abort(_("empty or missing revlog for %s") % fname)
1447 # Toss out the filenodes that the recipient isn't really
1449 # Toss out the filenodes that the recipient isn't really
1448 # missing.
1450 # missing.
1449 missingfnodes = msng_filenode_set.pop(fname, {})
1451 missingfnodes = msng_filenode_set.pop(fname, {})
1450 prune(filerevlog, missingfnodes)
1452 prune(filerevlog, missingfnodes)
1451 add_extra_nodes(fname, missingfnodes)
1453 add_extra_nodes(fname, missingfnodes)
1452 # If any filenodes are left, generate the group for them,
1454 # If any filenodes are left, generate the group for them,
1453 # otherwise don't bother.
1455 # otherwise don't bother.
1454 if missingfnodes:
1456 if missingfnodes:
1455 yield changegroup.chunkheader(len(fname))
1457 yield changegroup.chunkheader(len(fname))
1456 yield fname
1458 yield fname
1457 # Sort the filenodes by their revision # (topological order)
1459 # Sort the filenodes by their revision # (topological order)
1458 nodeiter = list(missingfnodes)
1460 nodeiter = list(missingfnodes)
1459 nodeiter.sort(key=filerevlog.rev)
1461 nodeiter.sort(key=filerevlog.rev)
1460 # Create a group generator and only pass in a changenode
1462 # Create a group generator and only pass in a changenode
1461 # lookup function as we need to collect no information
1463 # lookup function as we need to collect no information
1462 # from filenodes.
1464 # from filenodes.
1463 group = filerevlog.group(nodeiter,
1465 group = filerevlog.group(nodeiter,
1464 lambda fnode: missingfnodes[fnode])
1466 lambda fnode: missingfnodes[fnode])
1465 for chnk in group:
1467 for chnk in group:
1466 self.ui.progress(
1468 self.ui.progress(
1467 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1469 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1468 cnt += 1
1470 cnt += 1
1469 yield chnk
1471 yield chnk
1470 # Signal that no more groups are left.
1472 # Signal that no more groups are left.
1471 yield changegroup.closechunk()
1473 yield changegroup.closechunk()
1472 self.ui.progress(_('bundling files'), None)
1474 self.ui.progress(_('bundling files'), None)
1473
1475
1474 if msng_cl_lst:
1476 if msng_cl_lst:
1475 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1477 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1476
1478
1477 return util.chunkbuffer(gengroup())
1479 return util.chunkbuffer(gengroup())
1478
1480
1479 def changegroup(self, basenodes, source):
1481 def changegroup(self, basenodes, source):
1480 # to avoid a race we use changegroupsubset() (issue1320)
1482 # to avoid a race we use changegroupsubset() (issue1320)
1481 return self.changegroupsubset(basenodes, self.heads(), source)
1483 return self.changegroupsubset(basenodes, self.heads(), source)
1482
1484
1483 def _changegroup(self, nodes, source):
1485 def _changegroup(self, nodes, source):
1484 """Compute the changegroup of all nodes that we have that a recipient
1486 """Compute the changegroup of all nodes that we have that a recipient
1485 doesn't. Return a chunkbuffer object whose read() method will return
1487 doesn't. Return a chunkbuffer object whose read() method will return
1486 successive changegroup chunks.
1488 successive changegroup chunks.
1487
1489
1488 This is much easier than the previous function as we can assume that
1490 This is much easier than the previous function as we can assume that
1489 the recipient has any changenode we aren't sending them.
1491 the recipient has any changenode we aren't sending them.
1490
1492
1491 nodes is the set of nodes to send"""
1493 nodes is the set of nodes to send"""
1492
1494
1493 self.hook('preoutgoing', throw=True, source=source)
1495 self.hook('preoutgoing', throw=True, source=source)
1494
1496
1495 cl = self.changelog
1497 cl = self.changelog
1496 revset = set([cl.rev(n) for n in nodes])
1498 revset = set([cl.rev(n) for n in nodes])
1497 self.changegroupinfo(nodes, source)
1499 self.changegroupinfo(nodes, source)
1498
1500
1499 def identity(x):
1501 def identity(x):
1500 return x
1502 return x
1501
1503
1502 def gennodelst(log):
1504 def gennodelst(log):
1503 for r in log:
1505 for r in log:
1504 if log.linkrev(r) in revset:
1506 if log.linkrev(r) in revset:
1505 yield log.node(r)
1507 yield log.node(r)
1506
1508
1507 def lookuplinkrev_func(revlog):
1509 def lookuplinkrev_func(revlog):
1508 def lookuplinkrev(n):
1510 def lookuplinkrev(n):
1509 return cl.node(revlog.linkrev(revlog.rev(n)))
1511 return cl.node(revlog.linkrev(revlog.rev(n)))
1510 return lookuplinkrev
1512 return lookuplinkrev
1511
1513
1512 def gengroup():
1514 def gengroup():
1513 '''yield a sequence of changegroup chunks (strings)'''
1515 '''yield a sequence of changegroup chunks (strings)'''
1514 # construct a list of all changed files
1516 # construct a list of all changed files
1515 changedfiles = set()
1517 changedfiles = set()
1516 mmfs = {}
1518 mmfs = {}
1517 collect = changegroup.collector(cl, mmfs, changedfiles)
1519 collect = changegroup.collector(cl, mmfs, changedfiles)
1518
1520
1519 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1521 for cnt, chnk in enumerate(cl.group(nodes, identity, collect)):
1520 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1522 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1521 yield chnk
1523 yield chnk
1522 self.ui.progress(_('bundling changes'), None)
1524 self.ui.progress(_('bundling changes'), None)
1523
1525
1524 mnfst = self.manifest
1526 mnfst = self.manifest
1525 nodeiter = gennodelst(mnfst)
1527 nodeiter = gennodelst(mnfst)
1526 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1528 for cnt, chnk in enumerate(mnfst.group(nodeiter,
1527 lookuplinkrev_func(mnfst))):
1529 lookuplinkrev_func(mnfst))):
1528 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1530 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1529 yield chnk
1531 yield chnk
1530 self.ui.progress(_('bundling manifests'), None)
1532 self.ui.progress(_('bundling manifests'), None)
1531
1533
1532 cnt = 0
1534 cnt = 0
1533 for fname in sorted(changedfiles):
1535 for fname in sorted(changedfiles):
1534 filerevlog = self.file(fname)
1536 filerevlog = self.file(fname)
1535 if not len(filerevlog):
1537 if not len(filerevlog):
1536 raise util.Abort(_("empty or missing revlog for %s") % fname)
1538 raise util.Abort(_("empty or missing revlog for %s") % fname)
1537 nodeiter = gennodelst(filerevlog)
1539 nodeiter = gennodelst(filerevlog)
1538 nodeiter = list(nodeiter)
1540 nodeiter = list(nodeiter)
1539 if nodeiter:
1541 if nodeiter:
1540 yield changegroup.chunkheader(len(fname))
1542 yield changegroup.chunkheader(len(fname))
1541 yield fname
1543 yield fname
1542 lookup = lookuplinkrev_func(filerevlog)
1544 lookup = lookuplinkrev_func(filerevlog)
1543 for chnk in filerevlog.group(nodeiter, lookup):
1545 for chnk in filerevlog.group(nodeiter, lookup):
1544 self.ui.progress(
1546 self.ui.progress(
1545 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1547 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1546 cnt += 1
1548 cnt += 1
1547 yield chnk
1549 yield chnk
1548 self.ui.progress(_('bundling files'), None)
1550 self.ui.progress(_('bundling files'), None)
1549
1551
1550 yield changegroup.closechunk()
1552 yield changegroup.closechunk()
1551
1553
1552 if nodes:
1554 if nodes:
1553 self.hook('outgoing', node=hex(nodes[0]), source=source)
1555 self.hook('outgoing', node=hex(nodes[0]), source=source)
1554
1556
1555 return util.chunkbuffer(gengroup())
1557 return util.chunkbuffer(gengroup())
1556
1558
1557 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1559 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1558 """Add the changegroup returned by source.read() to this repo.
1560 """Add the changegroup returned by source.read() to this repo.
1559 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1561 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1560 the URL of the repo where this changegroup is coming from.
1562 the URL of the repo where this changegroup is coming from.
1561
1563
1562 Return an integer summarizing the change to this repo:
1564 Return an integer summarizing the change to this repo:
1563 - nothing changed or no source: 0
1565 - nothing changed or no source: 0
1564 - more heads than before: 1+added heads (2..n)
1566 - more heads than before: 1+added heads (2..n)
1565 - fewer heads than before: -1-removed heads (-2..-n)
1567 - fewer heads than before: -1-removed heads (-2..-n)
1566 - number of heads stays the same: 1
1568 - number of heads stays the same: 1
1567 """
1569 """
1568 def csmap(x):
1570 def csmap(x):
1569 self.ui.debug("add changeset %s\n" % short(x))
1571 self.ui.debug("add changeset %s\n" % short(x))
1570 return len(cl)
1572 return len(cl)
1571
1573
1572 def revmap(x):
1574 def revmap(x):
1573 return cl.rev(x)
1575 return cl.rev(x)
1574
1576
1575 if not source:
1577 if not source:
1576 return 0
1578 return 0
1577
1579
1578 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1580 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1579
1581
1580 changesets = files = revisions = 0
1582 changesets = files = revisions = 0
1581 efiles = set()
1583 efiles = set()
1582
1584
1583 # write changelog data to temp files so concurrent readers will not see
1585 # write changelog data to temp files so concurrent readers will not see
1584 # inconsistent view
1586 # inconsistent view
1585 cl = self.changelog
1587 cl = self.changelog
1586 cl.delayupdate()
1588 cl.delayupdate()
1587 oldheads = len(cl.heads())
1589 oldheads = len(cl.heads())
1588
1590
1589 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1591 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1590 try:
1592 try:
1591 trp = weakref.proxy(tr)
1593 trp = weakref.proxy(tr)
1592 # pull off the changeset group
1594 # pull off the changeset group
1593 self.ui.status(_("adding changesets\n"))
1595 self.ui.status(_("adding changesets\n"))
1594 clstart = len(cl)
1596 clstart = len(cl)
1595 class prog(object):
1597 class prog(object):
1596 step = _('changesets')
1598 step = _('changesets')
1597 count = 1
1599 count = 1
1598 ui = self.ui
1600 ui = self.ui
1599 total = None
1601 total = None
1600 def __call__(self):
1602 def __call__(self):
1601 self.ui.progress(self.step, self.count, unit=_('chunks'),
1603 self.ui.progress(self.step, self.count, unit=_('chunks'),
1602 total=self.total)
1604 total=self.total)
1603 self.count += 1
1605 self.count += 1
1604 pr = prog()
1606 pr = prog()
1605 chunkiter = changegroup.chunkiter(source, progress=pr)
1607 chunkiter = changegroup.chunkiter(source, progress=pr)
1606 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1608 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1607 raise util.Abort(_("received changelog group is empty"))
1609 raise util.Abort(_("received changelog group is empty"))
1608 clend = len(cl)
1610 clend = len(cl)
1609 changesets = clend - clstart
1611 changesets = clend - clstart
1610 for c in xrange(clstart, clend):
1612 for c in xrange(clstart, clend):
1611 efiles.update(self[c].files())
1613 efiles.update(self[c].files())
1612 efiles = len(efiles)
1614 efiles = len(efiles)
1613 self.ui.progress(_('changesets'), None)
1615 self.ui.progress(_('changesets'), None)
1614
1616
1615 # pull off the manifest group
1617 # pull off the manifest group
1616 self.ui.status(_("adding manifests\n"))
1618 self.ui.status(_("adding manifests\n"))
1617 pr.step = _('manifests')
1619 pr.step = _('manifests')
1618 pr.count = 1
1620 pr.count = 1
1619 pr.total = changesets # manifests <= changesets
1621 pr.total = changesets # manifests <= changesets
1620 chunkiter = changegroup.chunkiter(source, progress=pr)
1622 chunkiter = changegroup.chunkiter(source, progress=pr)
1621 # no need to check for empty manifest group here:
1623 # no need to check for empty manifest group here:
1622 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1624 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1623 # no new manifest will be created and the manifest group will
1625 # no new manifest will be created and the manifest group will
1624 # be empty during the pull
1626 # be empty during the pull
1625 self.manifest.addgroup(chunkiter, revmap, trp)
1627 self.manifest.addgroup(chunkiter, revmap, trp)
1626 self.ui.progress(_('manifests'), None)
1628 self.ui.progress(_('manifests'), None)
1627
1629
1628 needfiles = {}
1630 needfiles = {}
1629 if self.ui.configbool('server', 'validate', default=False):
1631 if self.ui.configbool('server', 'validate', default=False):
1630 # validate incoming csets have their manifests
1632 # validate incoming csets have their manifests
1631 for cset in xrange(clstart, clend):
1633 for cset in xrange(clstart, clend):
1632 mfest = self.changelog.read(self.changelog.node(cset))[0]
1634 mfest = self.changelog.read(self.changelog.node(cset))[0]
1633 mfest = self.manifest.readdelta(mfest)
1635 mfest = self.manifest.readdelta(mfest)
1634 # store file nodes we must see
1636 # store file nodes we must see
1635 for f, n in mfest.iteritems():
1637 for f, n in mfest.iteritems():
1636 needfiles.setdefault(f, set()).add(n)
1638 needfiles.setdefault(f, set()).add(n)
1637
1639
1638 # process the files
1640 # process the files
1639 self.ui.status(_("adding file changes\n"))
1641 self.ui.status(_("adding file changes\n"))
1640 pr.step = 'files'
1642 pr.step = 'files'
1641 pr.count = 1
1643 pr.count = 1
1642 pr.total = efiles
1644 pr.total = efiles
1643 while 1:
1645 while 1:
1644 f = changegroup.getchunk(source)
1646 f = changegroup.getchunk(source)
1645 if not f:
1647 if not f:
1646 break
1648 break
1647 self.ui.debug("adding %s revisions\n" % f)
1649 self.ui.debug("adding %s revisions\n" % f)
1648 pr()
1650 pr()
1649 fl = self.file(f)
1651 fl = self.file(f)
1650 o = len(fl)
1652 o = len(fl)
1651 chunkiter = changegroup.chunkiter(source)
1653 chunkiter = changegroup.chunkiter(source)
1652 if fl.addgroup(chunkiter, revmap, trp) is None:
1654 if fl.addgroup(chunkiter, revmap, trp) is None:
1653 raise util.Abort(_("received file revlog group is empty"))
1655 raise util.Abort(_("received file revlog group is empty"))
1654 revisions += len(fl) - o
1656 revisions += len(fl) - o
1655 files += 1
1657 files += 1
1656 if f in needfiles:
1658 if f in needfiles:
1657 needs = needfiles[f]
1659 needs = needfiles[f]
1658 for new in xrange(o, len(fl)):
1660 for new in xrange(o, len(fl)):
1659 n = fl.node(new)
1661 n = fl.node(new)
1660 if n in needs:
1662 if n in needs:
1661 needs.remove(n)
1663 needs.remove(n)
1662 if not needs:
1664 if not needs:
1663 del needfiles[f]
1665 del needfiles[f]
1664 self.ui.progress(_('files'), None)
1666 self.ui.progress(_('files'), None)
1665
1667
1666 for f, needs in needfiles.iteritems():
1668 for f, needs in needfiles.iteritems():
1667 fl = self.file(f)
1669 fl = self.file(f)
1668 for n in needs:
1670 for n in needs:
1669 try:
1671 try:
1670 fl.rev(n)
1672 fl.rev(n)
1671 except error.LookupError:
1673 except error.LookupError:
1672 raise util.Abort(
1674 raise util.Abort(
1673 _('missing file data for %s:%s - run hg verify') %
1675 _('missing file data for %s:%s - run hg verify') %
1674 (f, hex(n)))
1676 (f, hex(n)))
1675
1677
1676 newheads = len(cl.heads())
1678 newheads = len(cl.heads())
1677 heads = ""
1679 heads = ""
1678 if oldheads and newheads != oldheads:
1680 if oldheads and newheads != oldheads:
1679 heads = _(" (%+d heads)") % (newheads - oldheads)
1681 heads = _(" (%+d heads)") % (newheads - oldheads)
1680
1682
1681 self.ui.status(_("added %d changesets"
1683 self.ui.status(_("added %d changesets"
1682 " with %d changes to %d files%s\n")
1684 " with %d changes to %d files%s\n")
1683 % (changesets, revisions, files, heads))
1685 % (changesets, revisions, files, heads))
1684
1686
1685 if changesets > 0:
1687 if changesets > 0:
1686 p = lambda: cl.writepending() and self.root or ""
1688 p = lambda: cl.writepending() and self.root or ""
1687 self.hook('pretxnchangegroup', throw=True,
1689 self.hook('pretxnchangegroup', throw=True,
1688 node=hex(cl.node(clstart)), source=srctype,
1690 node=hex(cl.node(clstart)), source=srctype,
1689 url=url, pending=p)
1691 url=url, pending=p)
1690
1692
1691 # make changelog see real files again
1693 # make changelog see real files again
1692 cl.finalize(trp)
1694 cl.finalize(trp)
1693
1695
1694 tr.close()
1696 tr.close()
1695 finally:
1697 finally:
1696 tr.release()
1698 tr.release()
1697 if lock:
1699 if lock:
1698 lock.release()
1700 lock.release()
1699
1701
1700 if changesets > 0:
1702 if changesets > 0:
1701 # forcefully update the on-disk branch cache
1703 # forcefully update the on-disk branch cache
1702 self.ui.debug("updating the branch cache\n")
1704 self.ui.debug("updating the branch cache\n")
1703 self.branchtags()
1705 self.updatebranchcache()
1704 self.hook("changegroup", node=hex(cl.node(clstart)),
1706 self.hook("changegroup", node=hex(cl.node(clstart)),
1705 source=srctype, url=url)
1707 source=srctype, url=url)
1706
1708
1707 for i in xrange(clstart, clend):
1709 for i in xrange(clstart, clend):
1708 self.hook("incoming", node=hex(cl.node(i)),
1710 self.hook("incoming", node=hex(cl.node(i)),
1709 source=srctype, url=url)
1711 source=srctype, url=url)
1710
1712
1711 # never return 0 here:
1713 # never return 0 here:
1712 if newheads < oldheads:
1714 if newheads < oldheads:
1713 return newheads - oldheads - 1
1715 return newheads - oldheads - 1
1714 else:
1716 else:
1715 return newheads - oldheads + 1
1717 return newheads - oldheads + 1
1716
1718
1717
1719
1718 def stream_in(self, remote):
1720 def stream_in(self, remote):
1719 fp = remote.stream_out()
1721 fp = remote.stream_out()
1720 l = fp.readline()
1722 l = fp.readline()
1721 try:
1723 try:
1722 resp = int(l)
1724 resp = int(l)
1723 except ValueError:
1725 except ValueError:
1724 raise error.ResponseError(
1726 raise error.ResponseError(
1725 _('Unexpected response from remote server:'), l)
1727 _('Unexpected response from remote server:'), l)
1726 if resp == 1:
1728 if resp == 1:
1727 raise util.Abort(_('operation forbidden by server'))
1729 raise util.Abort(_('operation forbidden by server'))
1728 elif resp == 2:
1730 elif resp == 2:
1729 raise util.Abort(_('locking the remote repository failed'))
1731 raise util.Abort(_('locking the remote repository failed'))
1730 elif resp != 0:
1732 elif resp != 0:
1731 raise util.Abort(_('the server sent an unknown error code'))
1733 raise util.Abort(_('the server sent an unknown error code'))
1732 self.ui.status(_('streaming all changes\n'))
1734 self.ui.status(_('streaming all changes\n'))
1733 l = fp.readline()
1735 l = fp.readline()
1734 try:
1736 try:
1735 total_files, total_bytes = map(int, l.split(' ', 1))
1737 total_files, total_bytes = map(int, l.split(' ', 1))
1736 except (ValueError, TypeError):
1738 except (ValueError, TypeError):
1737 raise error.ResponseError(
1739 raise error.ResponseError(
1738 _('Unexpected response from remote server:'), l)
1740 _('Unexpected response from remote server:'), l)
1739 self.ui.status(_('%d files to transfer, %s of data\n') %
1741 self.ui.status(_('%d files to transfer, %s of data\n') %
1740 (total_files, util.bytecount(total_bytes)))
1742 (total_files, util.bytecount(total_bytes)))
1741 start = time.time()
1743 start = time.time()
1742 for i in xrange(total_files):
1744 for i in xrange(total_files):
1743 # XXX doesn't support '\n' or '\r' in filenames
1745 # XXX doesn't support '\n' or '\r' in filenames
1744 l = fp.readline()
1746 l = fp.readline()
1745 try:
1747 try:
1746 name, size = l.split('\0', 1)
1748 name, size = l.split('\0', 1)
1747 size = int(size)
1749 size = int(size)
1748 except (ValueError, TypeError):
1750 except (ValueError, TypeError):
1749 raise error.ResponseError(
1751 raise error.ResponseError(
1750 _('Unexpected response from remote server:'), l)
1752 _('Unexpected response from remote server:'), l)
1751 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1753 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1752 # for backwards compat, name was partially encoded
1754 # for backwards compat, name was partially encoded
1753 ofp = self.sopener(store.decodedir(name), 'w')
1755 ofp = self.sopener(store.decodedir(name), 'w')
1754 for chunk in util.filechunkiter(fp, limit=size):
1756 for chunk in util.filechunkiter(fp, limit=size):
1755 ofp.write(chunk)
1757 ofp.write(chunk)
1756 ofp.close()
1758 ofp.close()
1757 elapsed = time.time() - start
1759 elapsed = time.time() - start
1758 if elapsed <= 0:
1760 if elapsed <= 0:
1759 elapsed = 0.001
1761 elapsed = 0.001
1760 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1762 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1761 (util.bytecount(total_bytes), elapsed,
1763 (util.bytecount(total_bytes), elapsed,
1762 util.bytecount(total_bytes / elapsed)))
1764 util.bytecount(total_bytes / elapsed)))
1763 self.invalidate()
1765 self.invalidate()
1764 return len(self.heads()) + 1
1766 return len(self.heads()) + 1
1765
1767
1766 def clone(self, remote, heads=[], stream=False):
1768 def clone(self, remote, heads=[], stream=False):
1767 '''clone remote repository.
1769 '''clone remote repository.
1768
1770
1769 keyword arguments:
1771 keyword arguments:
1770 heads: list of revs to clone (forces use of pull)
1772 heads: list of revs to clone (forces use of pull)
1771 stream: use streaming clone if possible'''
1773 stream: use streaming clone if possible'''
1772
1774
1773 # now, all clients that can request uncompressed clones can
1775 # now, all clients that can request uncompressed clones can
1774 # read repo formats supported by all servers that can serve
1776 # read repo formats supported by all servers that can serve
1775 # them.
1777 # them.
1776
1778
1777 # if revlog format changes, client will have to check version
1779 # if revlog format changes, client will have to check version
1778 # and format flags on "stream" capability, and use
1780 # and format flags on "stream" capability, and use
1779 # uncompressed only if compatible.
1781 # uncompressed only if compatible.
1780
1782
1781 if stream and not heads and remote.capable('stream'):
1783 if stream and not heads and remote.capable('stream'):
1782 return self.stream_in(remote)
1784 return self.stream_in(remote)
1783 return self.pull(remote, heads)
1785 return self.pull(remote, heads)
1784
1786
1785 def pushkey(self, namespace, key, old, new):
1787 def pushkey(self, namespace, key, old, new):
1786 return pushkey.push(self, namespace, key, old, new)
1788 return pushkey.push(self, namespace, key, old, new)
1787
1789
1788 def listkeys(self, namespace):
1790 def listkeys(self, namespace):
1789 return pushkey.list(self, namespace)
1791 return pushkey.list(self, namespace)
1790
1792
1791 # used to avoid circular references so destructors work
1793 # used to avoid circular references so destructors work
1792 def aftertrans(files):
1794 def aftertrans(files):
1793 renamefiles = [tuple(t) for t in files]
1795 renamefiles = [tuple(t) for t in files]
1794 def a():
1796 def a():
1795 for src, dest in renamefiles:
1797 for src, dest in renamefiles:
1796 util.rename(src, dest)
1798 util.rename(src, dest)
1797 return a
1799 return a
1798
1800
1799 def instance(ui, path, create):
1801 def instance(ui, path, create):
1800 return localrepository(ui, util.drop_scheme('file', path), create)
1802 return localrepository(ui, util.drop_scheme('file', path), create)
1801
1803
1802 def islocal(path):
1804 def islocal(path):
1803 return True
1805 return True
General Comments 0
You need to be logged in to leave comments. Login now