##// END OF EJS Templates
changegroupsubset(): refactor the prune() functions
Benoit Boissinot -
r11659:deecf195 default
parent child Browse files
Show More
@@ -1,1840 +1,1824 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 import url as urlmod
17 import url as urlmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 propertycache = util.propertycache
20 propertycache = util.propertycache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey'))
24 supported = set('revlogv1 store fncache shared'.split())
24 supported = set('revlogv1 store fncache shared'.split())
25
25
26 def __init__(self, baseui, path=None, create=0):
26 def __init__(self, baseui, path=None, create=0):
27 repo.repository.__init__(self)
27 repo.repository.__init__(self)
28 self.root = os.path.realpath(util.expandpath(path))
28 self.root = os.path.realpath(util.expandpath(path))
29 self.path = os.path.join(self.root, ".hg")
29 self.path = os.path.join(self.root, ".hg")
30 self.origroot = path
30 self.origroot = path
31 self.opener = util.opener(self.path)
31 self.opener = util.opener(self.path)
32 self.wopener = util.opener(self.root)
32 self.wopener = util.opener(self.root)
33 self.baseui = baseui
33 self.baseui = baseui
34 self.ui = baseui.copy()
34 self.ui = baseui.copy()
35
35
36 try:
36 try:
37 self.ui.readconfig(self.join("hgrc"), self.root)
37 self.ui.readconfig(self.join("hgrc"), self.root)
38 extensions.loadall(self.ui)
38 extensions.loadall(self.ui)
39 except IOError:
39 except IOError:
40 pass
40 pass
41
41
42 if not os.path.isdir(self.path):
42 if not os.path.isdir(self.path):
43 if create:
43 if create:
44 if not os.path.exists(path):
44 if not os.path.exists(path):
45 util.makedirs(path)
45 util.makedirs(path)
46 os.mkdir(self.path)
46 os.mkdir(self.path)
47 requirements = ["revlogv1"]
47 requirements = ["revlogv1"]
48 if self.ui.configbool('format', 'usestore', True):
48 if self.ui.configbool('format', 'usestore', True):
49 os.mkdir(os.path.join(self.path, "store"))
49 os.mkdir(os.path.join(self.path, "store"))
50 requirements.append("store")
50 requirements.append("store")
51 if self.ui.configbool('format', 'usefncache', True):
51 if self.ui.configbool('format', 'usefncache', True):
52 requirements.append("fncache")
52 requirements.append("fncache")
53 # create an invalid changelog
53 # create an invalid changelog
54 self.opener("00changelog.i", "a").write(
54 self.opener("00changelog.i", "a").write(
55 '\0\0\0\2' # represents revlogv2
55 '\0\0\0\2' # represents revlogv2
56 ' dummy changelog to prevent using the old repo layout'
56 ' dummy changelog to prevent using the old repo layout'
57 )
57 )
58 reqfile = self.opener("requires", "w")
58 reqfile = self.opener("requires", "w")
59 for r in requirements:
59 for r in requirements:
60 reqfile.write("%s\n" % r)
60 reqfile.write("%s\n" % r)
61 reqfile.close()
61 reqfile.close()
62 else:
62 else:
63 raise error.RepoError(_("repository %s not found") % path)
63 raise error.RepoError(_("repository %s not found") % path)
64 elif create:
64 elif create:
65 raise error.RepoError(_("repository %s already exists") % path)
65 raise error.RepoError(_("repository %s already exists") % path)
66 else:
66 else:
67 # find requirements
67 # find requirements
68 requirements = set()
68 requirements = set()
69 try:
69 try:
70 requirements = set(self.opener("requires").read().splitlines())
70 requirements = set(self.opener("requires").read().splitlines())
71 except IOError, inst:
71 except IOError, inst:
72 if inst.errno != errno.ENOENT:
72 if inst.errno != errno.ENOENT:
73 raise
73 raise
74 for r in requirements - self.supported:
74 for r in requirements - self.supported:
75 raise error.RepoError(_("requirement '%s' not supported") % r)
75 raise error.RepoError(_("requirement '%s' not supported") % r)
76
76
77 self.sharedpath = self.path
77 self.sharedpath = self.path
78 try:
78 try:
79 s = os.path.realpath(self.opener("sharedpath").read())
79 s = os.path.realpath(self.opener("sharedpath").read())
80 if not os.path.exists(s):
80 if not os.path.exists(s):
81 raise error.RepoError(
81 raise error.RepoError(
82 _('.hg/sharedpath points to nonexistent directory %s') % s)
82 _('.hg/sharedpath points to nonexistent directory %s') % s)
83 self.sharedpath = s
83 self.sharedpath = s
84 except IOError, inst:
84 except IOError, inst:
85 if inst.errno != errno.ENOENT:
85 if inst.errno != errno.ENOENT:
86 raise
86 raise
87
87
88 self.store = store.store(requirements, self.sharedpath, util.opener)
88 self.store = store.store(requirements, self.sharedpath, util.opener)
89 self.spath = self.store.path
89 self.spath = self.store.path
90 self.sopener = self.store.opener
90 self.sopener = self.store.opener
91 self.sjoin = self.store.join
91 self.sjoin = self.store.join
92 self.opener.createmode = self.store.createmode
92 self.opener.createmode = self.store.createmode
93 self.sopener.options = {}
93 self.sopener.options = {}
94
94
95 # These two define the set of tags for this repository. _tags
95 # These two define the set of tags for this repository. _tags
96 # maps tag name to node; _tagtypes maps tag name to 'global' or
96 # maps tag name to node; _tagtypes maps tag name to 'global' or
97 # 'local'. (Global tags are defined by .hgtags across all
97 # 'local'. (Global tags are defined by .hgtags across all
98 # heads, and local tags are defined in .hg/localtags.) They
98 # heads, and local tags are defined in .hg/localtags.) They
99 # constitute the in-memory cache of tags.
99 # constitute the in-memory cache of tags.
100 self._tags = None
100 self._tags = None
101 self._tagtypes = None
101 self._tagtypes = None
102
102
103 self._branchcache = None # in UTF-8
103 self._branchcache = None # in UTF-8
104 self._branchcachetip = None
104 self._branchcachetip = None
105 self.nodetagscache = None
105 self.nodetagscache = None
106 self.filterpats = {}
106 self.filterpats = {}
107 self._datafilters = {}
107 self._datafilters = {}
108 self._transref = self._lockref = self._wlockref = None
108 self._transref = self._lockref = self._wlockref = None
109
109
110 @propertycache
110 @propertycache
111 def changelog(self):
111 def changelog(self):
112 c = changelog.changelog(self.sopener)
112 c = changelog.changelog(self.sopener)
113 if 'HG_PENDING' in os.environ:
113 if 'HG_PENDING' in os.environ:
114 p = os.environ['HG_PENDING']
114 p = os.environ['HG_PENDING']
115 if p.startswith(self.root):
115 if p.startswith(self.root):
116 c.readpending('00changelog.i.a')
116 c.readpending('00changelog.i.a')
117 self.sopener.options['defversion'] = c.version
117 self.sopener.options['defversion'] = c.version
118 return c
118 return c
119
119
120 @propertycache
120 @propertycache
121 def manifest(self):
121 def manifest(self):
122 return manifest.manifest(self.sopener)
122 return manifest.manifest(self.sopener)
123
123
124 @propertycache
124 @propertycache
125 def dirstate(self):
125 def dirstate(self):
126 return dirstate.dirstate(self.opener, self.ui, self.root)
126 return dirstate.dirstate(self.opener, self.ui, self.root)
127
127
128 def __getitem__(self, changeid):
128 def __getitem__(self, changeid):
129 if changeid is None:
129 if changeid is None:
130 return context.workingctx(self)
130 return context.workingctx(self)
131 return context.changectx(self, changeid)
131 return context.changectx(self, changeid)
132
132
133 def __contains__(self, changeid):
133 def __contains__(self, changeid):
134 try:
134 try:
135 return bool(self.lookup(changeid))
135 return bool(self.lookup(changeid))
136 except error.RepoLookupError:
136 except error.RepoLookupError:
137 return False
137 return False
138
138
139 def __nonzero__(self):
139 def __nonzero__(self):
140 return True
140 return True
141
141
142 def __len__(self):
142 def __len__(self):
143 return len(self.changelog)
143 return len(self.changelog)
144
144
145 def __iter__(self):
145 def __iter__(self):
146 for i in xrange(len(self)):
146 for i in xrange(len(self)):
147 yield i
147 yield i
148
148
149 def url(self):
149 def url(self):
150 return 'file:' + self.root
150 return 'file:' + self.root
151
151
152 def hook(self, name, throw=False, **args):
152 def hook(self, name, throw=False, **args):
153 return hook.hook(self.ui, self, name, throw, **args)
153 return hook.hook(self.ui, self, name, throw, **args)
154
154
155 tag_disallowed = ':\r\n'
155 tag_disallowed = ':\r\n'
156
156
157 def _tag(self, names, node, message, local, user, date, extra={}):
157 def _tag(self, names, node, message, local, user, date, extra={}):
158 if isinstance(names, str):
158 if isinstance(names, str):
159 allchars = names
159 allchars = names
160 names = (names,)
160 names = (names,)
161 else:
161 else:
162 allchars = ''.join(names)
162 allchars = ''.join(names)
163 for c in self.tag_disallowed:
163 for c in self.tag_disallowed:
164 if c in allchars:
164 if c in allchars:
165 raise util.Abort(_('%r cannot be used in a tag name') % c)
165 raise util.Abort(_('%r cannot be used in a tag name') % c)
166
166
167 branches = self.branchmap()
167 branches = self.branchmap()
168 for name in names:
168 for name in names:
169 self.hook('pretag', throw=True, node=hex(node), tag=name,
169 self.hook('pretag', throw=True, node=hex(node), tag=name,
170 local=local)
170 local=local)
171 if name in branches:
171 if name in branches:
172 self.ui.warn(_("warning: tag %s conflicts with existing"
172 self.ui.warn(_("warning: tag %s conflicts with existing"
173 " branch name\n") % name)
173 " branch name\n") % name)
174
174
175 def writetags(fp, names, munge, prevtags):
175 def writetags(fp, names, munge, prevtags):
176 fp.seek(0, 2)
176 fp.seek(0, 2)
177 if prevtags and prevtags[-1] != '\n':
177 if prevtags and prevtags[-1] != '\n':
178 fp.write('\n')
178 fp.write('\n')
179 for name in names:
179 for name in names:
180 m = munge and munge(name) or name
180 m = munge and munge(name) or name
181 if self._tagtypes and name in self._tagtypes:
181 if self._tagtypes and name in self._tagtypes:
182 old = self._tags.get(name, nullid)
182 old = self._tags.get(name, nullid)
183 fp.write('%s %s\n' % (hex(old), m))
183 fp.write('%s %s\n' % (hex(old), m))
184 fp.write('%s %s\n' % (hex(node), m))
184 fp.write('%s %s\n' % (hex(node), m))
185 fp.close()
185 fp.close()
186
186
187 prevtags = ''
187 prevtags = ''
188 if local:
188 if local:
189 try:
189 try:
190 fp = self.opener('localtags', 'r+')
190 fp = self.opener('localtags', 'r+')
191 except IOError:
191 except IOError:
192 fp = self.opener('localtags', 'a')
192 fp = self.opener('localtags', 'a')
193 else:
193 else:
194 prevtags = fp.read()
194 prevtags = fp.read()
195
195
196 # local tags are stored in the current charset
196 # local tags are stored in the current charset
197 writetags(fp, names, None, prevtags)
197 writetags(fp, names, None, prevtags)
198 for name in names:
198 for name in names:
199 self.hook('tag', node=hex(node), tag=name, local=local)
199 self.hook('tag', node=hex(node), tag=name, local=local)
200 return
200 return
201
201
202 try:
202 try:
203 fp = self.wfile('.hgtags', 'rb+')
203 fp = self.wfile('.hgtags', 'rb+')
204 except IOError:
204 except IOError:
205 fp = self.wfile('.hgtags', 'ab')
205 fp = self.wfile('.hgtags', 'ab')
206 else:
206 else:
207 prevtags = fp.read()
207 prevtags = fp.read()
208
208
209 # committed tags are stored in UTF-8
209 # committed tags are stored in UTF-8
210 writetags(fp, names, encoding.fromlocal, prevtags)
210 writetags(fp, names, encoding.fromlocal, prevtags)
211
211
212 if '.hgtags' not in self.dirstate:
212 if '.hgtags' not in self.dirstate:
213 self[None].add(['.hgtags'])
213 self[None].add(['.hgtags'])
214
214
215 m = matchmod.exact(self.root, '', ['.hgtags'])
215 m = matchmod.exact(self.root, '', ['.hgtags'])
216 tagnode = self.commit(message, user, date, extra=extra, match=m)
216 tagnode = self.commit(message, user, date, extra=extra, match=m)
217
217
218 for name in names:
218 for name in names:
219 self.hook('tag', node=hex(node), tag=name, local=local)
219 self.hook('tag', node=hex(node), tag=name, local=local)
220
220
221 return tagnode
221 return tagnode
222
222
223 def tag(self, names, node, message, local, user, date):
223 def tag(self, names, node, message, local, user, date):
224 '''tag a revision with one or more symbolic names.
224 '''tag a revision with one or more symbolic names.
225
225
226 names is a list of strings or, when adding a single tag, names may be a
226 names is a list of strings or, when adding a single tag, names may be a
227 string.
227 string.
228
228
229 if local is True, the tags are stored in a per-repository file.
229 if local is True, the tags are stored in a per-repository file.
230 otherwise, they are stored in the .hgtags file, and a new
230 otherwise, they are stored in the .hgtags file, and a new
231 changeset is committed with the change.
231 changeset is committed with the change.
232
232
233 keyword arguments:
233 keyword arguments:
234
234
235 local: whether to store tags in non-version-controlled file
235 local: whether to store tags in non-version-controlled file
236 (default False)
236 (default False)
237
237
238 message: commit message to use if committing
238 message: commit message to use if committing
239
239
240 user: name of user to use if committing
240 user: name of user to use if committing
241
241
242 date: date tuple to use if committing'''
242 date: date tuple to use if committing'''
243
243
244 for x in self.status()[:5]:
244 for x in self.status()[:5]:
245 if '.hgtags' in x:
245 if '.hgtags' in x:
246 raise util.Abort(_('working copy of .hgtags is changed '
246 raise util.Abort(_('working copy of .hgtags is changed '
247 '(please commit .hgtags manually)'))
247 '(please commit .hgtags manually)'))
248
248
249 self.tags() # instantiate the cache
249 self.tags() # instantiate the cache
250 self._tag(names, node, message, local, user, date)
250 self._tag(names, node, message, local, user, date)
251
251
252 def tags(self):
252 def tags(self):
253 '''return a mapping of tag to node'''
253 '''return a mapping of tag to node'''
254 if self._tags is None:
254 if self._tags is None:
255 (self._tags, self._tagtypes) = self._findtags()
255 (self._tags, self._tagtypes) = self._findtags()
256
256
257 return self._tags
257 return self._tags
258
258
259 def _findtags(self):
259 def _findtags(self):
260 '''Do the hard work of finding tags. Return a pair of dicts
260 '''Do the hard work of finding tags. Return a pair of dicts
261 (tags, tagtypes) where tags maps tag name to node, and tagtypes
261 (tags, tagtypes) where tags maps tag name to node, and tagtypes
262 maps tag name to a string like \'global\' or \'local\'.
262 maps tag name to a string like \'global\' or \'local\'.
263 Subclasses or extensions are free to add their own tags, but
263 Subclasses or extensions are free to add their own tags, but
264 should be aware that the returned dicts will be retained for the
264 should be aware that the returned dicts will be retained for the
265 duration of the localrepo object.'''
265 duration of the localrepo object.'''
266
266
267 # XXX what tagtype should subclasses/extensions use? Currently
267 # XXX what tagtype should subclasses/extensions use? Currently
268 # mq and bookmarks add tags, but do not set the tagtype at all.
268 # mq and bookmarks add tags, but do not set the tagtype at all.
269 # Should each extension invent its own tag type? Should there
269 # Should each extension invent its own tag type? Should there
270 # be one tagtype for all such "virtual" tags? Or is the status
270 # be one tagtype for all such "virtual" tags? Or is the status
271 # quo fine?
271 # quo fine?
272
272
273 alltags = {} # map tag name to (node, hist)
273 alltags = {} # map tag name to (node, hist)
274 tagtypes = {}
274 tagtypes = {}
275
275
276 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
276 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
277 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
277 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
278
278
279 # Build the return dicts. Have to re-encode tag names because
279 # Build the return dicts. Have to re-encode tag names because
280 # the tags module always uses UTF-8 (in order not to lose info
280 # the tags module always uses UTF-8 (in order not to lose info
281 # writing to the cache), but the rest of Mercurial wants them in
281 # writing to the cache), but the rest of Mercurial wants them in
282 # local encoding.
282 # local encoding.
283 tags = {}
283 tags = {}
284 for (name, (node, hist)) in alltags.iteritems():
284 for (name, (node, hist)) in alltags.iteritems():
285 if node != nullid:
285 if node != nullid:
286 tags[encoding.tolocal(name)] = node
286 tags[encoding.tolocal(name)] = node
287 tags['tip'] = self.changelog.tip()
287 tags['tip'] = self.changelog.tip()
288 tagtypes = dict([(encoding.tolocal(name), value)
288 tagtypes = dict([(encoding.tolocal(name), value)
289 for (name, value) in tagtypes.iteritems()])
289 for (name, value) in tagtypes.iteritems()])
290 return (tags, tagtypes)
290 return (tags, tagtypes)
291
291
292 def tagtype(self, tagname):
292 def tagtype(self, tagname):
293 '''
293 '''
294 return the type of the given tag. result can be:
294 return the type of the given tag. result can be:
295
295
296 'local' : a local tag
296 'local' : a local tag
297 'global' : a global tag
297 'global' : a global tag
298 None : tag does not exist
298 None : tag does not exist
299 '''
299 '''
300
300
301 self.tags()
301 self.tags()
302
302
303 return self._tagtypes.get(tagname)
303 return self._tagtypes.get(tagname)
304
304
305 def tagslist(self):
305 def tagslist(self):
306 '''return a list of tags ordered by revision'''
306 '''return a list of tags ordered by revision'''
307 l = []
307 l = []
308 for t, n in self.tags().iteritems():
308 for t, n in self.tags().iteritems():
309 try:
309 try:
310 r = self.changelog.rev(n)
310 r = self.changelog.rev(n)
311 except:
311 except:
312 r = -2 # sort to the beginning of the list if unknown
312 r = -2 # sort to the beginning of the list if unknown
313 l.append((r, t, n))
313 l.append((r, t, n))
314 return [(t, n) for r, t, n in sorted(l)]
314 return [(t, n) for r, t, n in sorted(l)]
315
315
316 def nodetags(self, node):
316 def nodetags(self, node):
317 '''return the tags associated with a node'''
317 '''return the tags associated with a node'''
318 if not self.nodetagscache:
318 if not self.nodetagscache:
319 self.nodetagscache = {}
319 self.nodetagscache = {}
320 for t, n in self.tags().iteritems():
320 for t, n in self.tags().iteritems():
321 self.nodetagscache.setdefault(n, []).append(t)
321 self.nodetagscache.setdefault(n, []).append(t)
322 for tags in self.nodetagscache.itervalues():
322 for tags in self.nodetagscache.itervalues():
323 tags.sort()
323 tags.sort()
324 return self.nodetagscache.get(node, [])
324 return self.nodetagscache.get(node, [])
325
325
326 def _branchtags(self, partial, lrev):
326 def _branchtags(self, partial, lrev):
327 # TODO: rename this function?
327 # TODO: rename this function?
328 tiprev = len(self) - 1
328 tiprev = len(self) - 1
329 if lrev != tiprev:
329 if lrev != tiprev:
330 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
330 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
331 self._updatebranchcache(partial, ctxgen)
331 self._updatebranchcache(partial, ctxgen)
332 self._writebranchcache(partial, self.changelog.tip(), tiprev)
332 self._writebranchcache(partial, self.changelog.tip(), tiprev)
333
333
334 return partial
334 return partial
335
335
336 def branchmap(self):
336 def branchmap(self):
337 '''returns a dictionary {branch: [branchheads]}'''
337 '''returns a dictionary {branch: [branchheads]}'''
338 tip = self.changelog.tip()
338 tip = self.changelog.tip()
339 if self._branchcache is not None and self._branchcachetip == tip:
339 if self._branchcache is not None and self._branchcachetip == tip:
340 return self._branchcache
340 return self._branchcache
341
341
342 oldtip = self._branchcachetip
342 oldtip = self._branchcachetip
343 self._branchcachetip = tip
343 self._branchcachetip = tip
344 if oldtip is None or oldtip not in self.changelog.nodemap:
344 if oldtip is None or oldtip not in self.changelog.nodemap:
345 partial, last, lrev = self._readbranchcache()
345 partial, last, lrev = self._readbranchcache()
346 else:
346 else:
347 lrev = self.changelog.rev(oldtip)
347 lrev = self.changelog.rev(oldtip)
348 partial = self._branchcache
348 partial = self._branchcache
349
349
350 self._branchtags(partial, lrev)
350 self._branchtags(partial, lrev)
351 # this private cache holds all heads (not just tips)
351 # this private cache holds all heads (not just tips)
352 self._branchcache = partial
352 self._branchcache = partial
353
353
354 return self._branchcache
354 return self._branchcache
355
355
356 def branchtags(self):
356 def branchtags(self):
357 '''return a dict where branch names map to the tipmost head of
357 '''return a dict where branch names map to the tipmost head of
358 the branch, open heads come before closed'''
358 the branch, open heads come before closed'''
359 bt = {}
359 bt = {}
360 for bn, heads in self.branchmap().iteritems():
360 for bn, heads in self.branchmap().iteritems():
361 tip = heads[-1]
361 tip = heads[-1]
362 for h in reversed(heads):
362 for h in reversed(heads):
363 if 'close' not in self.changelog.read(h)[5]:
363 if 'close' not in self.changelog.read(h)[5]:
364 tip = h
364 tip = h
365 break
365 break
366 bt[bn] = tip
366 bt[bn] = tip
367 return bt
367 return bt
368
368
369
369
370 def _readbranchcache(self):
370 def _readbranchcache(self):
371 partial = {}
371 partial = {}
372 try:
372 try:
373 f = self.opener("branchheads.cache")
373 f = self.opener("branchheads.cache")
374 lines = f.read().split('\n')
374 lines = f.read().split('\n')
375 f.close()
375 f.close()
376 except (IOError, OSError):
376 except (IOError, OSError):
377 return {}, nullid, nullrev
377 return {}, nullid, nullrev
378
378
379 try:
379 try:
380 last, lrev = lines.pop(0).split(" ", 1)
380 last, lrev = lines.pop(0).split(" ", 1)
381 last, lrev = bin(last), int(lrev)
381 last, lrev = bin(last), int(lrev)
382 if lrev >= len(self) or self[lrev].node() != last:
382 if lrev >= len(self) or self[lrev].node() != last:
383 # invalidate the cache
383 # invalidate the cache
384 raise ValueError('invalidating branch cache (tip differs)')
384 raise ValueError('invalidating branch cache (tip differs)')
385 for l in lines:
385 for l in lines:
386 if not l:
386 if not l:
387 continue
387 continue
388 node, label = l.split(" ", 1)
388 node, label = l.split(" ", 1)
389 partial.setdefault(label.strip(), []).append(bin(node))
389 partial.setdefault(label.strip(), []).append(bin(node))
390 except KeyboardInterrupt:
390 except KeyboardInterrupt:
391 raise
391 raise
392 except Exception, inst:
392 except Exception, inst:
393 if self.ui.debugflag:
393 if self.ui.debugflag:
394 self.ui.warn(str(inst), '\n')
394 self.ui.warn(str(inst), '\n')
395 partial, last, lrev = {}, nullid, nullrev
395 partial, last, lrev = {}, nullid, nullrev
396 return partial, last, lrev
396 return partial, last, lrev
397
397
398 def _writebranchcache(self, branches, tip, tiprev):
398 def _writebranchcache(self, branches, tip, tiprev):
399 try:
399 try:
400 f = self.opener("branchheads.cache", "w", atomictemp=True)
400 f = self.opener("branchheads.cache", "w", atomictemp=True)
401 f.write("%s %s\n" % (hex(tip), tiprev))
401 f.write("%s %s\n" % (hex(tip), tiprev))
402 for label, nodes in branches.iteritems():
402 for label, nodes in branches.iteritems():
403 for node in nodes:
403 for node in nodes:
404 f.write("%s %s\n" % (hex(node), label))
404 f.write("%s %s\n" % (hex(node), label))
405 f.rename()
405 f.rename()
406 except (IOError, OSError):
406 except (IOError, OSError):
407 pass
407 pass
408
408
409 def _updatebranchcache(self, partial, ctxgen):
409 def _updatebranchcache(self, partial, ctxgen):
410 # collect new branch entries
410 # collect new branch entries
411 newbranches = {}
411 newbranches = {}
412 for c in ctxgen:
412 for c in ctxgen:
413 newbranches.setdefault(c.branch(), []).append(c.node())
413 newbranches.setdefault(c.branch(), []).append(c.node())
414 # if older branchheads are reachable from new ones, they aren't
414 # if older branchheads are reachable from new ones, they aren't
415 # really branchheads. Note checking parents is insufficient:
415 # really branchheads. Note checking parents is insufficient:
416 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
416 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
417 for branch, newnodes in newbranches.iteritems():
417 for branch, newnodes in newbranches.iteritems():
418 bheads = partial.setdefault(branch, [])
418 bheads = partial.setdefault(branch, [])
419 bheads.extend(newnodes)
419 bheads.extend(newnodes)
420 if len(bheads) <= 1:
420 if len(bheads) <= 1:
421 continue
421 continue
422 # starting from tip means fewer passes over reachable
422 # starting from tip means fewer passes over reachable
423 while newnodes:
423 while newnodes:
424 latest = newnodes.pop()
424 latest = newnodes.pop()
425 if latest not in bheads:
425 if latest not in bheads:
426 continue
426 continue
427 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
427 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
428 reachable = self.changelog.reachable(latest, minbhrev)
428 reachable = self.changelog.reachable(latest, minbhrev)
429 reachable.remove(latest)
429 reachable.remove(latest)
430 bheads = [b for b in bheads if b not in reachable]
430 bheads = [b for b in bheads if b not in reachable]
431 partial[branch] = bheads
431 partial[branch] = bheads
432
432
433 def lookup(self, key):
433 def lookup(self, key):
434 if isinstance(key, int):
434 if isinstance(key, int):
435 return self.changelog.node(key)
435 return self.changelog.node(key)
436 elif key == '.':
436 elif key == '.':
437 return self.dirstate.parents()[0]
437 return self.dirstate.parents()[0]
438 elif key == 'null':
438 elif key == 'null':
439 return nullid
439 return nullid
440 elif key == 'tip':
440 elif key == 'tip':
441 return self.changelog.tip()
441 return self.changelog.tip()
442 n = self.changelog._match(key)
442 n = self.changelog._match(key)
443 if n:
443 if n:
444 return n
444 return n
445 if key in self.tags():
445 if key in self.tags():
446 return self.tags()[key]
446 return self.tags()[key]
447 if key in self.branchtags():
447 if key in self.branchtags():
448 return self.branchtags()[key]
448 return self.branchtags()[key]
449 n = self.changelog._partialmatch(key)
449 n = self.changelog._partialmatch(key)
450 if n:
450 if n:
451 return n
451 return n
452
452
453 # can't find key, check if it might have come from damaged dirstate
453 # can't find key, check if it might have come from damaged dirstate
454 if key in self.dirstate.parents():
454 if key in self.dirstate.parents():
455 raise error.Abort(_("working directory has unknown parent '%s'!")
455 raise error.Abort(_("working directory has unknown parent '%s'!")
456 % short(key))
456 % short(key))
457 try:
457 try:
458 if len(key) == 20:
458 if len(key) == 20:
459 key = hex(key)
459 key = hex(key)
460 except:
460 except:
461 pass
461 pass
462 raise error.RepoLookupError(_("unknown revision '%s'") % key)
462 raise error.RepoLookupError(_("unknown revision '%s'") % key)
463
463
464 def lookupbranch(self, key, remote=None):
464 def lookupbranch(self, key, remote=None):
465 repo = remote or self
465 repo = remote or self
466 if key in repo.branchmap():
466 if key in repo.branchmap():
467 return key
467 return key
468
468
469 repo = (remote and remote.local()) and remote or self
469 repo = (remote and remote.local()) and remote or self
470 return repo[key].branch()
470 return repo[key].branch()
471
471
472 def local(self):
472 def local(self):
473 return True
473 return True
474
474
475 def join(self, f):
475 def join(self, f):
476 return os.path.join(self.path, f)
476 return os.path.join(self.path, f)
477
477
478 def wjoin(self, f):
478 def wjoin(self, f):
479 return os.path.join(self.root, f)
479 return os.path.join(self.root, f)
480
480
481 def rjoin(self, f):
481 def rjoin(self, f):
482 return os.path.join(self.root, util.pconvert(f))
482 return os.path.join(self.root, util.pconvert(f))
483
483
484 def file(self, f):
484 def file(self, f):
485 if f[0] == '/':
485 if f[0] == '/':
486 f = f[1:]
486 f = f[1:]
487 return filelog.filelog(self.sopener, f)
487 return filelog.filelog(self.sopener, f)
488
488
489 def changectx(self, changeid):
489 def changectx(self, changeid):
490 return self[changeid]
490 return self[changeid]
491
491
492 def parents(self, changeid=None):
492 def parents(self, changeid=None):
493 '''get list of changectxs for parents of changeid'''
493 '''get list of changectxs for parents of changeid'''
494 return self[changeid].parents()
494 return self[changeid].parents()
495
495
496 def filectx(self, path, changeid=None, fileid=None):
496 def filectx(self, path, changeid=None, fileid=None):
497 """changeid can be a changeset revision, node, or tag.
497 """changeid can be a changeset revision, node, or tag.
498 fileid can be a file revision or node."""
498 fileid can be a file revision or node."""
499 return context.filectx(self, path, changeid, fileid)
499 return context.filectx(self, path, changeid, fileid)
500
500
501 def getcwd(self):
501 def getcwd(self):
502 return self.dirstate.getcwd()
502 return self.dirstate.getcwd()
503
503
504 def pathto(self, f, cwd=None):
504 def pathto(self, f, cwd=None):
505 return self.dirstate.pathto(f, cwd)
505 return self.dirstate.pathto(f, cwd)
506
506
507 def wfile(self, f, mode='r'):
507 def wfile(self, f, mode='r'):
508 return self.wopener(f, mode)
508 return self.wopener(f, mode)
509
509
510 def _link(self, f):
510 def _link(self, f):
511 return os.path.islink(self.wjoin(f))
511 return os.path.islink(self.wjoin(f))
512
512
513 def _filter(self, filter, filename, data):
513 def _filter(self, filter, filename, data):
514 if filter not in self.filterpats:
514 if filter not in self.filterpats:
515 l = []
515 l = []
516 for pat, cmd in self.ui.configitems(filter):
516 for pat, cmd in self.ui.configitems(filter):
517 if cmd == '!':
517 if cmd == '!':
518 continue
518 continue
519 mf = matchmod.match(self.root, '', [pat])
519 mf = matchmod.match(self.root, '', [pat])
520 fn = None
520 fn = None
521 params = cmd
521 params = cmd
522 for name, filterfn in self._datafilters.iteritems():
522 for name, filterfn in self._datafilters.iteritems():
523 if cmd.startswith(name):
523 if cmd.startswith(name):
524 fn = filterfn
524 fn = filterfn
525 params = cmd[len(name):].lstrip()
525 params = cmd[len(name):].lstrip()
526 break
526 break
527 if not fn:
527 if not fn:
528 fn = lambda s, c, **kwargs: util.filter(s, c)
528 fn = lambda s, c, **kwargs: util.filter(s, c)
529 # Wrap old filters not supporting keyword arguments
529 # Wrap old filters not supporting keyword arguments
530 if not inspect.getargspec(fn)[2]:
530 if not inspect.getargspec(fn)[2]:
531 oldfn = fn
531 oldfn = fn
532 fn = lambda s, c, **kwargs: oldfn(s, c)
532 fn = lambda s, c, **kwargs: oldfn(s, c)
533 l.append((mf, fn, params))
533 l.append((mf, fn, params))
534 self.filterpats[filter] = l
534 self.filterpats[filter] = l
535
535
536 for mf, fn, cmd in self.filterpats[filter]:
536 for mf, fn, cmd in self.filterpats[filter]:
537 if mf(filename):
537 if mf(filename):
538 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
538 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
539 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
539 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
540 break
540 break
541
541
542 return data
542 return data
543
543
544 def adddatafilter(self, name, filter):
544 def adddatafilter(self, name, filter):
545 self._datafilters[name] = filter
545 self._datafilters[name] = filter
546
546
547 def wread(self, filename):
547 def wread(self, filename):
548 if self._link(filename):
548 if self._link(filename):
549 data = os.readlink(self.wjoin(filename))
549 data = os.readlink(self.wjoin(filename))
550 else:
550 else:
551 data = self.wopener(filename, 'r').read()
551 data = self.wopener(filename, 'r').read()
552 return self._filter("encode", filename, data)
552 return self._filter("encode", filename, data)
553
553
554 def wwrite(self, filename, data, flags):
554 def wwrite(self, filename, data, flags):
555 data = self._filter("decode", filename, data)
555 data = self._filter("decode", filename, data)
556 try:
556 try:
557 os.unlink(self.wjoin(filename))
557 os.unlink(self.wjoin(filename))
558 except OSError:
558 except OSError:
559 pass
559 pass
560 if 'l' in flags:
560 if 'l' in flags:
561 self.wopener.symlink(data, filename)
561 self.wopener.symlink(data, filename)
562 else:
562 else:
563 self.wopener(filename, 'w').write(data)
563 self.wopener(filename, 'w').write(data)
564 if 'x' in flags:
564 if 'x' in flags:
565 util.set_flags(self.wjoin(filename), False, True)
565 util.set_flags(self.wjoin(filename), False, True)
566
566
567 def wwritedata(self, filename, data):
567 def wwritedata(self, filename, data):
568 return self._filter("decode", filename, data)
568 return self._filter("decode", filename, data)
569
569
570 def transaction(self, desc):
570 def transaction(self, desc):
571 tr = self._transref and self._transref() or None
571 tr = self._transref and self._transref() or None
572 if tr and tr.running():
572 if tr and tr.running():
573 return tr.nest()
573 return tr.nest()
574
574
575 # abort here if the journal already exists
575 # abort here if the journal already exists
576 if os.path.exists(self.sjoin("journal")):
576 if os.path.exists(self.sjoin("journal")):
577 raise error.RepoError(
577 raise error.RepoError(
578 _("abandoned transaction found - run hg recover"))
578 _("abandoned transaction found - run hg recover"))
579
579
580 # save dirstate for rollback
580 # save dirstate for rollback
581 try:
581 try:
582 ds = self.opener("dirstate").read()
582 ds = self.opener("dirstate").read()
583 except IOError:
583 except IOError:
584 ds = ""
584 ds = ""
585 self.opener("journal.dirstate", "w").write(ds)
585 self.opener("journal.dirstate", "w").write(ds)
586 self.opener("journal.branch", "w").write(self.dirstate.branch())
586 self.opener("journal.branch", "w").write(self.dirstate.branch())
587 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
587 self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc))
588
588
589 renames = [(self.sjoin("journal"), self.sjoin("undo")),
589 renames = [(self.sjoin("journal"), self.sjoin("undo")),
590 (self.join("journal.dirstate"), self.join("undo.dirstate")),
590 (self.join("journal.dirstate"), self.join("undo.dirstate")),
591 (self.join("journal.branch"), self.join("undo.branch")),
591 (self.join("journal.branch"), self.join("undo.branch")),
592 (self.join("journal.desc"), self.join("undo.desc"))]
592 (self.join("journal.desc"), self.join("undo.desc"))]
593 tr = transaction.transaction(self.ui.warn, self.sopener,
593 tr = transaction.transaction(self.ui.warn, self.sopener,
594 self.sjoin("journal"),
594 self.sjoin("journal"),
595 aftertrans(renames),
595 aftertrans(renames),
596 self.store.createmode)
596 self.store.createmode)
597 self._transref = weakref.ref(tr)
597 self._transref = weakref.ref(tr)
598 return tr
598 return tr
599
599
600 def recover(self):
600 def recover(self):
601 lock = self.lock()
601 lock = self.lock()
602 try:
602 try:
603 if os.path.exists(self.sjoin("journal")):
603 if os.path.exists(self.sjoin("journal")):
604 self.ui.status(_("rolling back interrupted transaction\n"))
604 self.ui.status(_("rolling back interrupted transaction\n"))
605 transaction.rollback(self.sopener, self.sjoin("journal"),
605 transaction.rollback(self.sopener, self.sjoin("journal"),
606 self.ui.warn)
606 self.ui.warn)
607 self.invalidate()
607 self.invalidate()
608 return True
608 return True
609 else:
609 else:
610 self.ui.warn(_("no interrupted transaction available\n"))
610 self.ui.warn(_("no interrupted transaction available\n"))
611 return False
611 return False
612 finally:
612 finally:
613 lock.release()
613 lock.release()
614
614
615 def rollback(self, dryrun=False):
615 def rollback(self, dryrun=False):
616 wlock = lock = None
616 wlock = lock = None
617 try:
617 try:
618 wlock = self.wlock()
618 wlock = self.wlock()
619 lock = self.lock()
619 lock = self.lock()
620 if os.path.exists(self.sjoin("undo")):
620 if os.path.exists(self.sjoin("undo")):
621 try:
621 try:
622 args = self.opener("undo.desc", "r").read().splitlines()
622 args = self.opener("undo.desc", "r").read().splitlines()
623 if len(args) >= 3 and self.ui.verbose:
623 if len(args) >= 3 and self.ui.verbose:
624 desc = _("rolling back to revision %s"
624 desc = _("rolling back to revision %s"
625 " (undo %s: %s)\n") % (
625 " (undo %s: %s)\n") % (
626 int(args[0]) - 1, args[1], args[2])
626 int(args[0]) - 1, args[1], args[2])
627 elif len(args) >= 2:
627 elif len(args) >= 2:
628 desc = _("rolling back to revision %s (undo %s)\n") % (
628 desc = _("rolling back to revision %s (undo %s)\n") % (
629 int(args[0]) - 1, args[1])
629 int(args[0]) - 1, args[1])
630 except IOError:
630 except IOError:
631 desc = _("rolling back unknown transaction\n")
631 desc = _("rolling back unknown transaction\n")
632 self.ui.status(desc)
632 self.ui.status(desc)
633 if dryrun:
633 if dryrun:
634 return
634 return
635 transaction.rollback(self.sopener, self.sjoin("undo"),
635 transaction.rollback(self.sopener, self.sjoin("undo"),
636 self.ui.warn)
636 self.ui.warn)
637 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
637 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
638 try:
638 try:
639 branch = self.opener("undo.branch").read()
639 branch = self.opener("undo.branch").read()
640 self.dirstate.setbranch(branch)
640 self.dirstate.setbranch(branch)
641 except IOError:
641 except IOError:
642 self.ui.warn(_("Named branch could not be reset, "
642 self.ui.warn(_("Named branch could not be reset, "
643 "current branch still is: %s\n")
643 "current branch still is: %s\n")
644 % encoding.tolocal(self.dirstate.branch()))
644 % encoding.tolocal(self.dirstate.branch()))
645 self.invalidate()
645 self.invalidate()
646 self.dirstate.invalidate()
646 self.dirstate.invalidate()
647 self.destroyed()
647 self.destroyed()
648 else:
648 else:
649 self.ui.warn(_("no rollback information available\n"))
649 self.ui.warn(_("no rollback information available\n"))
650 return 1
650 return 1
651 finally:
651 finally:
652 release(lock, wlock)
652 release(lock, wlock)
653
653
654 def invalidatecaches(self):
654 def invalidatecaches(self):
655 self._tags = None
655 self._tags = None
656 self._tagtypes = None
656 self._tagtypes = None
657 self.nodetagscache = None
657 self.nodetagscache = None
658 self._branchcache = None # in UTF-8
658 self._branchcache = None # in UTF-8
659 self._branchcachetip = None
659 self._branchcachetip = None
660
660
661 def invalidate(self):
661 def invalidate(self):
662 for a in "changelog manifest".split():
662 for a in "changelog manifest".split():
663 if a in self.__dict__:
663 if a in self.__dict__:
664 delattr(self, a)
664 delattr(self, a)
665 self.invalidatecaches()
665 self.invalidatecaches()
666
666
667 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
667 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
668 try:
668 try:
669 l = lock.lock(lockname, 0, releasefn, desc=desc)
669 l = lock.lock(lockname, 0, releasefn, desc=desc)
670 except error.LockHeld, inst:
670 except error.LockHeld, inst:
671 if not wait:
671 if not wait:
672 raise
672 raise
673 self.ui.warn(_("waiting for lock on %s held by %r\n") %
673 self.ui.warn(_("waiting for lock on %s held by %r\n") %
674 (desc, inst.locker))
674 (desc, inst.locker))
675 # default to 600 seconds timeout
675 # default to 600 seconds timeout
676 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
676 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
677 releasefn, desc=desc)
677 releasefn, desc=desc)
678 if acquirefn:
678 if acquirefn:
679 acquirefn()
679 acquirefn()
680 return l
680 return l
681
681
682 def lock(self, wait=True):
682 def lock(self, wait=True):
683 '''Lock the repository store (.hg/store) and return a weak reference
683 '''Lock the repository store (.hg/store) and return a weak reference
684 to the lock. Use this before modifying the store (e.g. committing or
684 to the lock. Use this before modifying the store (e.g. committing or
685 stripping). If you are opening a transaction, get a lock as well.)'''
685 stripping). If you are opening a transaction, get a lock as well.)'''
686 l = self._lockref and self._lockref()
686 l = self._lockref and self._lockref()
687 if l is not None and l.held:
687 if l is not None and l.held:
688 l.lock()
688 l.lock()
689 return l
689 return l
690
690
691 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
691 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
692 _('repository %s') % self.origroot)
692 _('repository %s') % self.origroot)
693 self._lockref = weakref.ref(l)
693 self._lockref = weakref.ref(l)
694 return l
694 return l
695
695
696 def wlock(self, wait=True):
696 def wlock(self, wait=True):
697 '''Lock the non-store parts of the repository (everything under
697 '''Lock the non-store parts of the repository (everything under
698 .hg except .hg/store) and return a weak reference to the lock.
698 .hg except .hg/store) and return a weak reference to the lock.
699 Use this before modifying files in .hg.'''
699 Use this before modifying files in .hg.'''
700 l = self._wlockref and self._wlockref()
700 l = self._wlockref and self._wlockref()
701 if l is not None and l.held:
701 if l is not None and l.held:
702 l.lock()
702 l.lock()
703 return l
703 return l
704
704
705 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
705 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
706 self.dirstate.invalidate, _('working directory of %s') %
706 self.dirstate.invalidate, _('working directory of %s') %
707 self.origroot)
707 self.origroot)
708 self._wlockref = weakref.ref(l)
708 self._wlockref = weakref.ref(l)
709 return l
709 return l
710
710
711 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
711 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
712 """
712 """
713 commit an individual file as part of a larger transaction
713 commit an individual file as part of a larger transaction
714 """
714 """
715
715
716 fname = fctx.path()
716 fname = fctx.path()
717 text = fctx.data()
717 text = fctx.data()
718 flog = self.file(fname)
718 flog = self.file(fname)
719 fparent1 = manifest1.get(fname, nullid)
719 fparent1 = manifest1.get(fname, nullid)
720 fparent2 = fparent2o = manifest2.get(fname, nullid)
720 fparent2 = fparent2o = manifest2.get(fname, nullid)
721
721
722 meta = {}
722 meta = {}
723 copy = fctx.renamed()
723 copy = fctx.renamed()
724 if copy and copy[0] != fname:
724 if copy and copy[0] != fname:
725 # Mark the new revision of this file as a copy of another
725 # Mark the new revision of this file as a copy of another
726 # file. This copy data will effectively act as a parent
726 # file. This copy data will effectively act as a parent
727 # of this new revision. If this is a merge, the first
727 # of this new revision. If this is a merge, the first
728 # parent will be the nullid (meaning "look up the copy data")
728 # parent will be the nullid (meaning "look up the copy data")
729 # and the second one will be the other parent. For example:
729 # and the second one will be the other parent. For example:
730 #
730 #
731 # 0 --- 1 --- 3 rev1 changes file foo
731 # 0 --- 1 --- 3 rev1 changes file foo
732 # \ / rev2 renames foo to bar and changes it
732 # \ / rev2 renames foo to bar and changes it
733 # \- 2 -/ rev3 should have bar with all changes and
733 # \- 2 -/ rev3 should have bar with all changes and
734 # should record that bar descends from
734 # should record that bar descends from
735 # bar in rev2 and foo in rev1
735 # bar in rev2 and foo in rev1
736 #
736 #
737 # this allows this merge to succeed:
737 # this allows this merge to succeed:
738 #
738 #
739 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
739 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
740 # \ / merging rev3 and rev4 should use bar@rev2
740 # \ / merging rev3 and rev4 should use bar@rev2
741 # \- 2 --- 4 as the merge base
741 # \- 2 --- 4 as the merge base
742 #
742 #
743
743
744 cfname = copy[0]
744 cfname = copy[0]
745 crev = manifest1.get(cfname)
745 crev = manifest1.get(cfname)
746 newfparent = fparent2
746 newfparent = fparent2
747
747
748 if manifest2: # branch merge
748 if manifest2: # branch merge
749 if fparent2 == nullid or crev is None: # copied on remote side
749 if fparent2 == nullid or crev is None: # copied on remote side
750 if cfname in manifest2:
750 if cfname in manifest2:
751 crev = manifest2[cfname]
751 crev = manifest2[cfname]
752 newfparent = fparent1
752 newfparent = fparent1
753
753
754 # find source in nearest ancestor if we've lost track
754 # find source in nearest ancestor if we've lost track
755 if not crev:
755 if not crev:
756 self.ui.debug(" %s: searching for copy revision for %s\n" %
756 self.ui.debug(" %s: searching for copy revision for %s\n" %
757 (fname, cfname))
757 (fname, cfname))
758 for ancestor in self['.'].ancestors():
758 for ancestor in self['.'].ancestors():
759 if cfname in ancestor:
759 if cfname in ancestor:
760 crev = ancestor[cfname].filenode()
760 crev = ancestor[cfname].filenode()
761 break
761 break
762
762
763 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
763 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
764 meta["copy"] = cfname
764 meta["copy"] = cfname
765 meta["copyrev"] = hex(crev)
765 meta["copyrev"] = hex(crev)
766 fparent1, fparent2 = nullid, newfparent
766 fparent1, fparent2 = nullid, newfparent
767 elif fparent2 != nullid:
767 elif fparent2 != nullid:
768 # is one parent an ancestor of the other?
768 # is one parent an ancestor of the other?
769 fparentancestor = flog.ancestor(fparent1, fparent2)
769 fparentancestor = flog.ancestor(fparent1, fparent2)
770 if fparentancestor == fparent1:
770 if fparentancestor == fparent1:
771 fparent1, fparent2 = fparent2, nullid
771 fparent1, fparent2 = fparent2, nullid
772 elif fparentancestor == fparent2:
772 elif fparentancestor == fparent2:
773 fparent2 = nullid
773 fparent2 = nullid
774
774
775 # is the file changed?
775 # is the file changed?
776 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
776 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
777 changelist.append(fname)
777 changelist.append(fname)
778 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
778 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
779
779
780 # are just the flags changed during merge?
780 # are just the flags changed during merge?
781 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
781 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
782 changelist.append(fname)
782 changelist.append(fname)
783
783
784 return fparent1
784 return fparent1
785
785
786 def commit(self, text="", user=None, date=None, match=None, force=False,
786 def commit(self, text="", user=None, date=None, match=None, force=False,
787 editor=False, extra={}):
787 editor=False, extra={}):
788 """Add a new revision to current repository.
788 """Add a new revision to current repository.
789
789
790 Revision information is gathered from the working directory,
790 Revision information is gathered from the working directory,
791 match can be used to filter the committed files. If editor is
791 match can be used to filter the committed files. If editor is
792 supplied, it is called to get a commit message.
792 supplied, it is called to get a commit message.
793 """
793 """
794
794
795 def fail(f, msg):
795 def fail(f, msg):
796 raise util.Abort('%s: %s' % (f, msg))
796 raise util.Abort('%s: %s' % (f, msg))
797
797
798 if not match:
798 if not match:
799 match = matchmod.always(self.root, '')
799 match = matchmod.always(self.root, '')
800
800
801 if not force:
801 if not force:
802 vdirs = []
802 vdirs = []
803 match.dir = vdirs.append
803 match.dir = vdirs.append
804 match.bad = fail
804 match.bad = fail
805
805
806 wlock = self.wlock()
806 wlock = self.wlock()
807 try:
807 try:
808 wctx = self[None]
808 wctx = self[None]
809 merge = len(wctx.parents()) > 1
809 merge = len(wctx.parents()) > 1
810
810
811 if (not force and merge and match and
811 if (not force and merge and match and
812 (match.files() or match.anypats())):
812 (match.files() or match.anypats())):
813 raise util.Abort(_('cannot partially commit a merge '
813 raise util.Abort(_('cannot partially commit a merge '
814 '(do not specify files or patterns)'))
814 '(do not specify files or patterns)'))
815
815
816 changes = self.status(match=match, clean=force)
816 changes = self.status(match=match, clean=force)
817 if force:
817 if force:
818 changes[0].extend(changes[6]) # mq may commit unchanged files
818 changes[0].extend(changes[6]) # mq may commit unchanged files
819
819
820 # check subrepos
820 # check subrepos
821 subs = []
821 subs = []
822 removedsubs = set()
822 removedsubs = set()
823 for p in wctx.parents():
823 for p in wctx.parents():
824 removedsubs.update(s for s in p.substate if match(s))
824 removedsubs.update(s for s in p.substate if match(s))
825 for s in wctx.substate:
825 for s in wctx.substate:
826 removedsubs.discard(s)
826 removedsubs.discard(s)
827 if match(s) and wctx.sub(s).dirty():
827 if match(s) and wctx.sub(s).dirty():
828 subs.append(s)
828 subs.append(s)
829 if (subs or removedsubs):
829 if (subs or removedsubs):
830 if (not match('.hgsub') and
830 if (not match('.hgsub') and
831 '.hgsub' in (wctx.modified() + wctx.added())):
831 '.hgsub' in (wctx.modified() + wctx.added())):
832 raise util.Abort(_("can't commit subrepos without .hgsub"))
832 raise util.Abort(_("can't commit subrepos without .hgsub"))
833 if '.hgsubstate' not in changes[0]:
833 if '.hgsubstate' not in changes[0]:
834 changes[0].insert(0, '.hgsubstate')
834 changes[0].insert(0, '.hgsubstate')
835
835
836 # make sure all explicit patterns are matched
836 # make sure all explicit patterns are matched
837 if not force and match.files():
837 if not force and match.files():
838 matched = set(changes[0] + changes[1] + changes[2])
838 matched = set(changes[0] + changes[1] + changes[2])
839
839
840 for f in match.files():
840 for f in match.files():
841 if f == '.' or f in matched or f in wctx.substate:
841 if f == '.' or f in matched or f in wctx.substate:
842 continue
842 continue
843 if f in changes[3]: # missing
843 if f in changes[3]: # missing
844 fail(f, _('file not found!'))
844 fail(f, _('file not found!'))
845 if f in vdirs: # visited directory
845 if f in vdirs: # visited directory
846 d = f + '/'
846 d = f + '/'
847 for mf in matched:
847 for mf in matched:
848 if mf.startswith(d):
848 if mf.startswith(d):
849 break
849 break
850 else:
850 else:
851 fail(f, _("no match under directory!"))
851 fail(f, _("no match under directory!"))
852 elif f not in self.dirstate:
852 elif f not in self.dirstate:
853 fail(f, _("file not tracked!"))
853 fail(f, _("file not tracked!"))
854
854
855 if (not force and not extra.get("close") and not merge
855 if (not force and not extra.get("close") and not merge
856 and not (changes[0] or changes[1] or changes[2])
856 and not (changes[0] or changes[1] or changes[2])
857 and wctx.branch() == wctx.p1().branch()):
857 and wctx.branch() == wctx.p1().branch()):
858 return None
858 return None
859
859
860 ms = mergemod.mergestate(self)
860 ms = mergemod.mergestate(self)
861 for f in changes[0]:
861 for f in changes[0]:
862 if f in ms and ms[f] == 'u':
862 if f in ms and ms[f] == 'u':
863 raise util.Abort(_("unresolved merge conflicts "
863 raise util.Abort(_("unresolved merge conflicts "
864 "(see hg resolve)"))
864 "(see hg resolve)"))
865
865
866 cctx = context.workingctx(self, text, user, date, extra, changes)
866 cctx = context.workingctx(self, text, user, date, extra, changes)
867 if editor:
867 if editor:
868 cctx._text = editor(self, cctx, subs)
868 cctx._text = editor(self, cctx, subs)
869 edited = (text != cctx._text)
869 edited = (text != cctx._text)
870
870
871 # commit subs
871 # commit subs
872 if subs or removedsubs:
872 if subs or removedsubs:
873 state = wctx.substate.copy()
873 state = wctx.substate.copy()
874 for s in subs:
874 for s in subs:
875 sub = wctx.sub(s)
875 sub = wctx.sub(s)
876 self.ui.status(_('committing subrepository %s\n') %
876 self.ui.status(_('committing subrepository %s\n') %
877 subrepo.relpath(sub))
877 subrepo.relpath(sub))
878 sr = sub.commit(cctx._text, user, date)
878 sr = sub.commit(cctx._text, user, date)
879 state[s] = (state[s][0], sr)
879 state[s] = (state[s][0], sr)
880 subrepo.writestate(self, state)
880 subrepo.writestate(self, state)
881
881
882 # Save commit message in case this transaction gets rolled back
882 # Save commit message in case this transaction gets rolled back
883 # (e.g. by a pretxncommit hook). Leave the content alone on
883 # (e.g. by a pretxncommit hook). Leave the content alone on
884 # the assumption that the user will use the same editor again.
884 # the assumption that the user will use the same editor again.
885 msgfile = self.opener('last-message.txt', 'wb')
885 msgfile = self.opener('last-message.txt', 'wb')
886 msgfile.write(cctx._text)
886 msgfile.write(cctx._text)
887 msgfile.close()
887 msgfile.close()
888
888
889 p1, p2 = self.dirstate.parents()
889 p1, p2 = self.dirstate.parents()
890 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
890 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
891 try:
891 try:
892 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
892 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
893 ret = self.commitctx(cctx, True)
893 ret = self.commitctx(cctx, True)
894 except:
894 except:
895 if edited:
895 if edited:
896 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
896 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
897 self.ui.write(
897 self.ui.write(
898 _('note: commit message saved in %s\n') % msgfn)
898 _('note: commit message saved in %s\n') % msgfn)
899 raise
899 raise
900
900
901 # update dirstate and mergestate
901 # update dirstate and mergestate
902 for f in changes[0] + changes[1]:
902 for f in changes[0] + changes[1]:
903 self.dirstate.normal(f)
903 self.dirstate.normal(f)
904 for f in changes[2]:
904 for f in changes[2]:
905 self.dirstate.forget(f)
905 self.dirstate.forget(f)
906 self.dirstate.setparents(ret)
906 self.dirstate.setparents(ret)
907 ms.reset()
907 ms.reset()
908 finally:
908 finally:
909 wlock.release()
909 wlock.release()
910
910
911 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
911 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
912 return ret
912 return ret
913
913
914 def commitctx(self, ctx, error=False):
914 def commitctx(self, ctx, error=False):
915 """Add a new revision to current repository.
915 """Add a new revision to current repository.
916 Revision information is passed via the context argument.
916 Revision information is passed via the context argument.
917 """
917 """
918
918
919 tr = lock = None
919 tr = lock = None
920 removed = ctx.removed()
920 removed = ctx.removed()
921 p1, p2 = ctx.p1(), ctx.p2()
921 p1, p2 = ctx.p1(), ctx.p2()
922 m1 = p1.manifest().copy()
922 m1 = p1.manifest().copy()
923 m2 = p2.manifest()
923 m2 = p2.manifest()
924 user = ctx.user()
924 user = ctx.user()
925
925
926 lock = self.lock()
926 lock = self.lock()
927 try:
927 try:
928 tr = self.transaction("commit")
928 tr = self.transaction("commit")
929 trp = weakref.proxy(tr)
929 trp = weakref.proxy(tr)
930
930
931 # check in files
931 # check in files
932 new = {}
932 new = {}
933 changed = []
933 changed = []
934 linkrev = len(self)
934 linkrev = len(self)
935 for f in sorted(ctx.modified() + ctx.added()):
935 for f in sorted(ctx.modified() + ctx.added()):
936 self.ui.note(f + "\n")
936 self.ui.note(f + "\n")
937 try:
937 try:
938 fctx = ctx[f]
938 fctx = ctx[f]
939 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
939 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
940 changed)
940 changed)
941 m1.set(f, fctx.flags())
941 m1.set(f, fctx.flags())
942 except OSError, inst:
942 except OSError, inst:
943 self.ui.warn(_("trouble committing %s!\n") % f)
943 self.ui.warn(_("trouble committing %s!\n") % f)
944 raise
944 raise
945 except IOError, inst:
945 except IOError, inst:
946 errcode = getattr(inst, 'errno', errno.ENOENT)
946 errcode = getattr(inst, 'errno', errno.ENOENT)
947 if error or errcode and errcode != errno.ENOENT:
947 if error or errcode and errcode != errno.ENOENT:
948 self.ui.warn(_("trouble committing %s!\n") % f)
948 self.ui.warn(_("trouble committing %s!\n") % f)
949 raise
949 raise
950 else:
950 else:
951 removed.append(f)
951 removed.append(f)
952
952
953 # update manifest
953 # update manifest
954 m1.update(new)
954 m1.update(new)
955 removed = [f for f in sorted(removed) if f in m1 or f in m2]
955 removed = [f for f in sorted(removed) if f in m1 or f in m2]
956 drop = [f for f in removed if f in m1]
956 drop = [f for f in removed if f in m1]
957 for f in drop:
957 for f in drop:
958 del m1[f]
958 del m1[f]
959 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
959 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
960 p2.manifestnode(), (new, drop))
960 p2.manifestnode(), (new, drop))
961
961
962 # update changelog
962 # update changelog
963 self.changelog.delayupdate()
963 self.changelog.delayupdate()
964 n = self.changelog.add(mn, changed + removed, ctx.description(),
964 n = self.changelog.add(mn, changed + removed, ctx.description(),
965 trp, p1.node(), p2.node(),
965 trp, p1.node(), p2.node(),
966 user, ctx.date(), ctx.extra().copy())
966 user, ctx.date(), ctx.extra().copy())
967 p = lambda: self.changelog.writepending() and self.root or ""
967 p = lambda: self.changelog.writepending() and self.root or ""
968 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
968 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
969 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
969 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
970 parent2=xp2, pending=p)
970 parent2=xp2, pending=p)
971 self.changelog.finalize(trp)
971 self.changelog.finalize(trp)
972 tr.close()
972 tr.close()
973
973
974 if self._branchcache:
974 if self._branchcache:
975 self.branchtags()
975 self.branchtags()
976 return n
976 return n
977 finally:
977 finally:
978 if tr:
978 if tr:
979 tr.release()
979 tr.release()
980 lock.release()
980 lock.release()
981
981
982 def destroyed(self):
982 def destroyed(self):
983 '''Inform the repository that nodes have been destroyed.
983 '''Inform the repository that nodes have been destroyed.
984 Intended for use by strip and rollback, so there's a common
984 Intended for use by strip and rollback, so there's a common
985 place for anything that has to be done after destroying history.'''
985 place for anything that has to be done after destroying history.'''
986 # XXX it might be nice if we could take the list of destroyed
986 # XXX it might be nice if we could take the list of destroyed
987 # nodes, but I don't see an easy way for rollback() to do that
987 # nodes, but I don't see an easy way for rollback() to do that
988
988
989 # Ensure the persistent tag cache is updated. Doing it now
989 # Ensure the persistent tag cache is updated. Doing it now
990 # means that the tag cache only has to worry about destroyed
990 # means that the tag cache only has to worry about destroyed
991 # heads immediately after a strip/rollback. That in turn
991 # heads immediately after a strip/rollback. That in turn
992 # guarantees that "cachetip == currenttip" (comparing both rev
992 # guarantees that "cachetip == currenttip" (comparing both rev
993 # and node) always means no nodes have been added or destroyed.
993 # and node) always means no nodes have been added or destroyed.
994
994
995 # XXX this is suboptimal when qrefresh'ing: we strip the current
995 # XXX this is suboptimal when qrefresh'ing: we strip the current
996 # head, refresh the tag cache, then immediately add a new head.
996 # head, refresh the tag cache, then immediately add a new head.
997 # But I think doing it this way is necessary for the "instant
997 # But I think doing it this way is necessary for the "instant
998 # tag cache retrieval" case to work.
998 # tag cache retrieval" case to work.
999 self.invalidatecaches()
999 self.invalidatecaches()
1000
1000
1001 def walk(self, match, node=None):
1001 def walk(self, match, node=None):
1002 '''
1002 '''
1003 walk recursively through the directory tree or a given
1003 walk recursively through the directory tree or a given
1004 changeset, finding all files matched by the match
1004 changeset, finding all files matched by the match
1005 function
1005 function
1006 '''
1006 '''
1007 return self[node].walk(match)
1007 return self[node].walk(match)
1008
1008
1009 def status(self, node1='.', node2=None, match=None,
1009 def status(self, node1='.', node2=None, match=None,
1010 ignored=False, clean=False, unknown=False):
1010 ignored=False, clean=False, unknown=False):
1011 """return status of files between two nodes or node and working directory
1011 """return status of files between two nodes or node and working directory
1012
1012
1013 If node1 is None, use the first dirstate parent instead.
1013 If node1 is None, use the first dirstate parent instead.
1014 If node2 is None, compare node1 with working directory.
1014 If node2 is None, compare node1 with working directory.
1015 """
1015 """
1016
1016
1017 def mfmatches(ctx):
1017 def mfmatches(ctx):
1018 mf = ctx.manifest().copy()
1018 mf = ctx.manifest().copy()
1019 for fn in mf.keys():
1019 for fn in mf.keys():
1020 if not match(fn):
1020 if not match(fn):
1021 del mf[fn]
1021 del mf[fn]
1022 return mf
1022 return mf
1023
1023
1024 if isinstance(node1, context.changectx):
1024 if isinstance(node1, context.changectx):
1025 ctx1 = node1
1025 ctx1 = node1
1026 else:
1026 else:
1027 ctx1 = self[node1]
1027 ctx1 = self[node1]
1028 if isinstance(node2, context.changectx):
1028 if isinstance(node2, context.changectx):
1029 ctx2 = node2
1029 ctx2 = node2
1030 else:
1030 else:
1031 ctx2 = self[node2]
1031 ctx2 = self[node2]
1032
1032
1033 working = ctx2.rev() is None
1033 working = ctx2.rev() is None
1034 parentworking = working and ctx1 == self['.']
1034 parentworking = working and ctx1 == self['.']
1035 match = match or matchmod.always(self.root, self.getcwd())
1035 match = match or matchmod.always(self.root, self.getcwd())
1036 listignored, listclean, listunknown = ignored, clean, unknown
1036 listignored, listclean, listunknown = ignored, clean, unknown
1037
1037
1038 # load earliest manifest first for caching reasons
1038 # load earliest manifest first for caching reasons
1039 if not working and ctx2.rev() < ctx1.rev():
1039 if not working and ctx2.rev() < ctx1.rev():
1040 ctx2.manifest()
1040 ctx2.manifest()
1041
1041
1042 if not parentworking:
1042 if not parentworking:
1043 def bad(f, msg):
1043 def bad(f, msg):
1044 if f not in ctx1:
1044 if f not in ctx1:
1045 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1045 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1046 match.bad = bad
1046 match.bad = bad
1047
1047
1048 if working: # we need to scan the working dir
1048 if working: # we need to scan the working dir
1049 subrepos = []
1049 subrepos = []
1050 if '.hgsub' in self.dirstate:
1050 if '.hgsub' in self.dirstate:
1051 subrepos = ctx1.substate.keys()
1051 subrepos = ctx1.substate.keys()
1052 s = self.dirstate.status(match, subrepos, listignored,
1052 s = self.dirstate.status(match, subrepos, listignored,
1053 listclean, listunknown)
1053 listclean, listunknown)
1054 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1054 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1055
1055
1056 # check for any possibly clean files
1056 # check for any possibly clean files
1057 if parentworking and cmp:
1057 if parentworking and cmp:
1058 fixup = []
1058 fixup = []
1059 # do a full compare of any files that might have changed
1059 # do a full compare of any files that might have changed
1060 for f in sorted(cmp):
1060 for f in sorted(cmp):
1061 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1061 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1062 or ctx1[f].cmp(ctx2[f].data())):
1062 or ctx1[f].cmp(ctx2[f].data())):
1063 modified.append(f)
1063 modified.append(f)
1064 else:
1064 else:
1065 fixup.append(f)
1065 fixup.append(f)
1066
1066
1067 if listclean:
1067 if listclean:
1068 clean += fixup
1068 clean += fixup
1069
1069
1070 # update dirstate for files that are actually clean
1070 # update dirstate for files that are actually clean
1071 if fixup:
1071 if fixup:
1072 try:
1072 try:
1073 # updating the dirstate is optional
1073 # updating the dirstate is optional
1074 # so we don't wait on the lock
1074 # so we don't wait on the lock
1075 wlock = self.wlock(False)
1075 wlock = self.wlock(False)
1076 try:
1076 try:
1077 for f in fixup:
1077 for f in fixup:
1078 self.dirstate.normal(f)
1078 self.dirstate.normal(f)
1079 finally:
1079 finally:
1080 wlock.release()
1080 wlock.release()
1081 except error.LockError:
1081 except error.LockError:
1082 pass
1082 pass
1083
1083
1084 if not parentworking:
1084 if not parentworking:
1085 mf1 = mfmatches(ctx1)
1085 mf1 = mfmatches(ctx1)
1086 if working:
1086 if working:
1087 # we are comparing working dir against non-parent
1087 # we are comparing working dir against non-parent
1088 # generate a pseudo-manifest for the working dir
1088 # generate a pseudo-manifest for the working dir
1089 mf2 = mfmatches(self['.'])
1089 mf2 = mfmatches(self['.'])
1090 for f in cmp + modified + added:
1090 for f in cmp + modified + added:
1091 mf2[f] = None
1091 mf2[f] = None
1092 mf2.set(f, ctx2.flags(f))
1092 mf2.set(f, ctx2.flags(f))
1093 for f in removed:
1093 for f in removed:
1094 if f in mf2:
1094 if f in mf2:
1095 del mf2[f]
1095 del mf2[f]
1096 else:
1096 else:
1097 # we are comparing two revisions
1097 # we are comparing two revisions
1098 deleted, unknown, ignored = [], [], []
1098 deleted, unknown, ignored = [], [], []
1099 mf2 = mfmatches(ctx2)
1099 mf2 = mfmatches(ctx2)
1100
1100
1101 modified, added, clean = [], [], []
1101 modified, added, clean = [], [], []
1102 for fn in mf2:
1102 for fn in mf2:
1103 if fn in mf1:
1103 if fn in mf1:
1104 if (mf1.flags(fn) != mf2.flags(fn) or
1104 if (mf1.flags(fn) != mf2.flags(fn) or
1105 (mf1[fn] != mf2[fn] and
1105 (mf1[fn] != mf2[fn] and
1106 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1106 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1107 modified.append(fn)
1107 modified.append(fn)
1108 elif listclean:
1108 elif listclean:
1109 clean.append(fn)
1109 clean.append(fn)
1110 del mf1[fn]
1110 del mf1[fn]
1111 else:
1111 else:
1112 added.append(fn)
1112 added.append(fn)
1113 removed = mf1.keys()
1113 removed = mf1.keys()
1114
1114
1115 r = modified, added, removed, deleted, unknown, ignored, clean
1115 r = modified, added, removed, deleted, unknown, ignored, clean
1116 [l.sort() for l in r]
1116 [l.sort() for l in r]
1117 return r
1117 return r
1118
1118
1119 def heads(self, start=None):
1119 def heads(self, start=None):
1120 heads = self.changelog.heads(start)
1120 heads = self.changelog.heads(start)
1121 # sort the output in rev descending order
1121 # sort the output in rev descending order
1122 heads = [(-self.changelog.rev(h), h) for h in heads]
1122 heads = [(-self.changelog.rev(h), h) for h in heads]
1123 return [n for (r, n) in sorted(heads)]
1123 return [n for (r, n) in sorted(heads)]
1124
1124
1125 def branchheads(self, branch=None, start=None, closed=False):
1125 def branchheads(self, branch=None, start=None, closed=False):
1126 '''return a (possibly filtered) list of heads for the given branch
1126 '''return a (possibly filtered) list of heads for the given branch
1127
1127
1128 Heads are returned in topological order, from newest to oldest.
1128 Heads are returned in topological order, from newest to oldest.
1129 If branch is None, use the dirstate branch.
1129 If branch is None, use the dirstate branch.
1130 If start is not None, return only heads reachable from start.
1130 If start is not None, return only heads reachable from start.
1131 If closed is True, return heads that are marked as closed as well.
1131 If closed is True, return heads that are marked as closed as well.
1132 '''
1132 '''
1133 if branch is None:
1133 if branch is None:
1134 branch = self[None].branch()
1134 branch = self[None].branch()
1135 branches = self.branchmap()
1135 branches = self.branchmap()
1136 if branch not in branches:
1136 if branch not in branches:
1137 return []
1137 return []
1138 # the cache returns heads ordered lowest to highest
1138 # the cache returns heads ordered lowest to highest
1139 bheads = list(reversed(branches[branch]))
1139 bheads = list(reversed(branches[branch]))
1140 if start is not None:
1140 if start is not None:
1141 # filter out the heads that cannot be reached from startrev
1141 # filter out the heads that cannot be reached from startrev
1142 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1142 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1143 bheads = [h for h in bheads if h in fbheads]
1143 bheads = [h for h in bheads if h in fbheads]
1144 if not closed:
1144 if not closed:
1145 bheads = [h for h in bheads if
1145 bheads = [h for h in bheads if
1146 ('close' not in self.changelog.read(h)[5])]
1146 ('close' not in self.changelog.read(h)[5])]
1147 return bheads
1147 return bheads
1148
1148
1149 def branches(self, nodes):
1149 def branches(self, nodes):
1150 if not nodes:
1150 if not nodes:
1151 nodes = [self.changelog.tip()]
1151 nodes = [self.changelog.tip()]
1152 b = []
1152 b = []
1153 for n in nodes:
1153 for n in nodes:
1154 t = n
1154 t = n
1155 while 1:
1155 while 1:
1156 p = self.changelog.parents(n)
1156 p = self.changelog.parents(n)
1157 if p[1] != nullid or p[0] == nullid:
1157 if p[1] != nullid or p[0] == nullid:
1158 b.append((t, n, p[0], p[1]))
1158 b.append((t, n, p[0], p[1]))
1159 break
1159 break
1160 n = p[0]
1160 n = p[0]
1161 return b
1161 return b
1162
1162
1163 def between(self, pairs):
1163 def between(self, pairs):
1164 r = []
1164 r = []
1165
1165
1166 for top, bottom in pairs:
1166 for top, bottom in pairs:
1167 n, l, i = top, [], 0
1167 n, l, i = top, [], 0
1168 f = 1
1168 f = 1
1169
1169
1170 while n != bottom and n != nullid:
1170 while n != bottom and n != nullid:
1171 p = self.changelog.parents(n)[0]
1171 p = self.changelog.parents(n)[0]
1172 if i == f:
1172 if i == f:
1173 l.append(n)
1173 l.append(n)
1174 f = f * 2
1174 f = f * 2
1175 n = p
1175 n = p
1176 i += 1
1176 i += 1
1177
1177
1178 r.append(l)
1178 r.append(l)
1179
1179
1180 return r
1180 return r
1181
1181
1182 def pull(self, remote, heads=None, force=False):
1182 def pull(self, remote, heads=None, force=False):
1183 lock = self.lock()
1183 lock = self.lock()
1184 try:
1184 try:
1185 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1185 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1186 force=force)
1186 force=force)
1187 common, fetch, rheads = tmp
1187 common, fetch, rheads = tmp
1188 if not fetch:
1188 if not fetch:
1189 self.ui.status(_("no changes found\n"))
1189 self.ui.status(_("no changes found\n"))
1190 return 0
1190 return 0
1191
1191
1192 if fetch == [nullid]:
1192 if fetch == [nullid]:
1193 self.ui.status(_("requesting all changes\n"))
1193 self.ui.status(_("requesting all changes\n"))
1194 elif heads is None and remote.capable('changegroupsubset'):
1194 elif heads is None and remote.capable('changegroupsubset'):
1195 # issue1320, avoid a race if remote changed after discovery
1195 # issue1320, avoid a race if remote changed after discovery
1196 heads = rheads
1196 heads = rheads
1197
1197
1198 if heads is None:
1198 if heads is None:
1199 cg = remote.changegroup(fetch, 'pull')
1199 cg = remote.changegroup(fetch, 'pull')
1200 else:
1200 else:
1201 if not remote.capable('changegroupsubset'):
1201 if not remote.capable('changegroupsubset'):
1202 raise util.Abort(_("Partial pull cannot be done because "
1202 raise util.Abort(_("Partial pull cannot be done because "
1203 "other repository doesn't support "
1203 "other repository doesn't support "
1204 "changegroupsubset."))
1204 "changegroupsubset."))
1205 cg = remote.changegroupsubset(fetch, heads, 'pull')
1205 cg = remote.changegroupsubset(fetch, heads, 'pull')
1206 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1206 return self.addchangegroup(cg, 'pull', remote.url(), lock=lock)
1207 finally:
1207 finally:
1208 lock.release()
1208 lock.release()
1209
1209
1210 def push(self, remote, force=False, revs=None, newbranch=False):
1210 def push(self, remote, force=False, revs=None, newbranch=False):
1211 '''Push outgoing changesets (limited by revs) from the current
1211 '''Push outgoing changesets (limited by revs) from the current
1212 repository to remote. Return an integer:
1212 repository to remote. Return an integer:
1213 - 0 means HTTP error *or* nothing to push
1213 - 0 means HTTP error *or* nothing to push
1214 - 1 means we pushed and remote head count is unchanged *or*
1214 - 1 means we pushed and remote head count is unchanged *or*
1215 we have outgoing changesets but refused to push
1215 we have outgoing changesets but refused to push
1216 - other values as described by addchangegroup()
1216 - other values as described by addchangegroup()
1217 '''
1217 '''
1218 # there are two ways to push to remote repo:
1218 # there are two ways to push to remote repo:
1219 #
1219 #
1220 # addchangegroup assumes local user can lock remote
1220 # addchangegroup assumes local user can lock remote
1221 # repo (local filesystem, old ssh servers).
1221 # repo (local filesystem, old ssh servers).
1222 #
1222 #
1223 # unbundle assumes local user cannot lock remote repo (new ssh
1223 # unbundle assumes local user cannot lock remote repo (new ssh
1224 # servers, http servers).
1224 # servers, http servers).
1225
1225
1226 lock = None
1226 lock = None
1227 unbundle = remote.capable('unbundle')
1227 unbundle = remote.capable('unbundle')
1228 if not unbundle:
1228 if not unbundle:
1229 lock = remote.lock()
1229 lock = remote.lock()
1230 try:
1230 try:
1231 ret = discovery.prepush(self, remote, force, revs, newbranch)
1231 ret = discovery.prepush(self, remote, force, revs, newbranch)
1232 if ret[0] is None:
1232 if ret[0] is None:
1233 # and here we return 0 for "nothing to push" or 1 for
1233 # and here we return 0 for "nothing to push" or 1 for
1234 # "something to push but I refuse"
1234 # "something to push but I refuse"
1235 return ret[1]
1235 return ret[1]
1236
1236
1237 cg, remote_heads = ret
1237 cg, remote_heads = ret
1238 if unbundle:
1238 if unbundle:
1239 # local repo finds heads on server, finds out what revs it must
1239 # local repo finds heads on server, finds out what revs it must
1240 # push. once revs transferred, if server finds it has
1240 # push. once revs transferred, if server finds it has
1241 # different heads (someone else won commit/push race), server
1241 # different heads (someone else won commit/push race), server
1242 # aborts.
1242 # aborts.
1243 if force:
1243 if force:
1244 remote_heads = ['force']
1244 remote_heads = ['force']
1245 # ssh: return remote's addchangegroup()
1245 # ssh: return remote's addchangegroup()
1246 # http: return remote's addchangegroup() or 0 for error
1246 # http: return remote's addchangegroup() or 0 for error
1247 return remote.unbundle(cg, remote_heads, 'push')
1247 return remote.unbundle(cg, remote_heads, 'push')
1248 else:
1248 else:
1249 # we return an integer indicating remote head count change
1249 # we return an integer indicating remote head count change
1250 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1250 return remote.addchangegroup(cg, 'push', self.url(), lock=lock)
1251 finally:
1251 finally:
1252 if lock is not None:
1252 if lock is not None:
1253 lock.release()
1253 lock.release()
1254
1254
1255 def changegroupinfo(self, nodes, source):
1255 def changegroupinfo(self, nodes, source):
1256 if self.ui.verbose or source == 'bundle':
1256 if self.ui.verbose or source == 'bundle':
1257 self.ui.status(_("%d changesets found\n") % len(nodes))
1257 self.ui.status(_("%d changesets found\n") % len(nodes))
1258 if self.ui.debugflag:
1258 if self.ui.debugflag:
1259 self.ui.debug("list of changesets:\n")
1259 self.ui.debug("list of changesets:\n")
1260 for node in nodes:
1260 for node in nodes:
1261 self.ui.debug("%s\n" % hex(node))
1261 self.ui.debug("%s\n" % hex(node))
1262
1262
1263 def changegroupsubset(self, bases, heads, source, extranodes=None):
1263 def changegroupsubset(self, bases, heads, source, extranodes=None):
1264 """Compute a changegroup consisting of all the nodes that are
1264 """Compute a changegroup consisting of all the nodes that are
1265 descendents of any of the bases and ancestors of any of the heads.
1265 descendents of any of the bases and ancestors of any of the heads.
1266 Return a chunkbuffer object whose read() method will return
1266 Return a chunkbuffer object whose read() method will return
1267 successive changegroup chunks.
1267 successive changegroup chunks.
1268
1268
1269 It is fairly complex as determining which filenodes and which
1269 It is fairly complex as determining which filenodes and which
1270 manifest nodes need to be included for the changeset to be complete
1270 manifest nodes need to be included for the changeset to be complete
1271 is non-trivial.
1271 is non-trivial.
1272
1272
1273 Another wrinkle is doing the reverse, figuring out which changeset in
1273 Another wrinkle is doing the reverse, figuring out which changeset in
1274 the changegroup a particular filenode or manifestnode belongs to.
1274 the changegroup a particular filenode or manifestnode belongs to.
1275
1275
1276 The caller can specify some nodes that must be included in the
1276 The caller can specify some nodes that must be included in the
1277 changegroup using the extranodes argument. It should be a dict
1277 changegroup using the extranodes argument. It should be a dict
1278 where the keys are the filenames (or 1 for the manifest), and the
1278 where the keys are the filenames (or 1 for the manifest), and the
1279 values are lists of (node, linknode) tuples, where node is a wanted
1279 values are lists of (node, linknode) tuples, where node is a wanted
1280 node and linknode is the changelog node that should be transmitted as
1280 node and linknode is the changelog node that should be transmitted as
1281 the linkrev.
1281 the linkrev.
1282 """
1282 """
1283
1283
1284 # Set up some initial variables
1284 # Set up some initial variables
1285 # Make it easy to refer to self.changelog
1285 # Make it easy to refer to self.changelog
1286 cl = self.changelog
1286 cl = self.changelog
1287 # msng is short for missing - compute the list of changesets in this
1287 # msng is short for missing - compute the list of changesets in this
1288 # changegroup.
1288 # changegroup.
1289 if not bases:
1289 if not bases:
1290 bases = [nullid]
1290 bases = [nullid]
1291 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1291 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1292
1292
1293 if extranodes is None:
1293 if extranodes is None:
1294 # can we go through the fast path ?
1294 # can we go through the fast path ?
1295 heads.sort()
1295 heads.sort()
1296 allheads = self.heads()
1296 allheads = self.heads()
1297 allheads.sort()
1297 allheads.sort()
1298 if heads == allheads:
1298 if heads == allheads:
1299 return self._changegroup(msng_cl_lst, source)
1299 return self._changegroup(msng_cl_lst, source)
1300
1300
1301 # slow path
1301 # slow path
1302 self.hook('preoutgoing', throw=True, source=source)
1302 self.hook('preoutgoing', throw=True, source=source)
1303
1303
1304 self.changegroupinfo(msng_cl_lst, source)
1304 self.changegroupinfo(msng_cl_lst, source)
1305 # Some bases may turn out to be superfluous, and some heads may be
1305 # Some bases may turn out to be superfluous, and some heads may be
1306 # too. nodesbetween will return the minimal set of bases and heads
1306 # too. nodesbetween will return the minimal set of bases and heads
1307 # necessary to re-create the changegroup.
1307 # necessary to re-create the changegroup.
1308
1308
1309 # Known heads are the list of heads that it is assumed the recipient
1309 # Known heads are the list of heads that it is assumed the recipient
1310 # of this changegroup will know about.
1310 # of this changegroup will know about.
1311 knownheads = set()
1311 knownheads = set()
1312 # We assume that all parents of bases are known heads.
1312 # We assume that all parents of bases are known heads.
1313 for n in bases:
1313 for n in bases:
1314 knownheads.update(cl.parents(n))
1314 knownheads.update(cl.parents(n))
1315 knownheads.discard(nullid)
1315 knownheads.discard(nullid)
1316 if knownheads:
1316 if knownheads:
1317 # Now that we know what heads are known, we can compute which
1317 # Now that we know what heads are known, we can compute which
1318 # changesets are known. The recipient must know about all
1318 # changesets are known. The recipient must know about all
1319 # changesets required to reach the known heads from the null
1319 # changesets required to reach the known heads from the null
1320 # changeset.
1320 # changeset.
1321 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1321 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1322 junk = None
1322 junk = None
1323 # Transform the list into a set.
1323 # Transform the list into a set.
1324 has_cl_set = set(has_cl_set)
1324 has_cl_set = set(has_cl_set)
1325 else:
1325 else:
1326 # If there were no known heads, the recipient cannot be assumed to
1326 # If there were no known heads, the recipient cannot be assumed to
1327 # know about any changesets.
1327 # know about any changesets.
1328 has_cl_set = set()
1328 has_cl_set = set()
1329
1329
1330 # Make it easy to refer to self.manifest
1330 # Make it easy to refer to self.manifest
1331 mnfst = self.manifest
1331 mnfst = self.manifest
1332 # We don't know which manifests are missing yet
1332 # We don't know which manifests are missing yet
1333 msng_mnfst_set = {}
1333 msng_mnfst_set = {}
1334 # Nor do we know which filenodes are missing.
1334 # Nor do we know which filenodes are missing.
1335 msng_filenode_set = {}
1335 msng_filenode_set = {}
1336
1336
1337 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1337 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1338 junk = None
1338 junk = None
1339
1339
1340 # A changeset always belongs to itself, so the changenode lookup
1340 # A changeset always belongs to itself, so the changenode lookup
1341 # function for a changenode is identity.
1341 # function for a changenode is identity.
1342 def identity(x):
1342 def identity(x):
1343 return x
1343 return x
1344
1344
1345 # If we determine that a particular file or manifest node must be a
1346 # node that the recipient of the changegroup will already have, we can
1347 # also assume the recipient will have all the parents. This function
1348 # prunes them from the set of missing nodes.
1349 def prune_parents(revlog, hasset, msngset):
1350 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1351 msngset.pop(revlog.node(r), None)
1352
1353 # A function generating function that sets up the initial environment
1345 # A function generating function that sets up the initial environment
1354 # the inner function.
1346 # the inner function.
1355 def filenode_collector(changedfiles):
1347 def filenode_collector(changedfiles):
1356 # This gathers information from each manifestnode included in the
1348 # This gathers information from each manifestnode included in the
1357 # changegroup about which filenodes the manifest node references
1349 # changegroup about which filenodes the manifest node references
1358 # so we can include those in the changegroup too.
1350 # so we can include those in the changegroup too.
1359 #
1351 #
1360 # It also remembers which changenode each filenode belongs to. It
1352 # It also remembers which changenode each filenode belongs to. It
1361 # does this by assuming the a filenode belongs to the changenode
1353 # does this by assuming the a filenode belongs to the changenode
1362 # the first manifest that references it belongs to.
1354 # the first manifest that references it belongs to.
1363 def collect_msng_filenodes(mnfstnode):
1355 def collect_msng_filenodes(mnfstnode):
1364 r = mnfst.rev(mnfstnode)
1356 r = mnfst.rev(mnfstnode)
1365 if r - 1 in mnfst.parentrevs(r):
1357 if r - 1 in mnfst.parentrevs(r):
1366 # If the previous rev is one of the parents,
1358 # If the previous rev is one of the parents,
1367 # we only need to see a diff.
1359 # we only need to see a diff.
1368 deltamf = mnfst.readdelta(mnfstnode)
1360 deltamf = mnfst.readdelta(mnfstnode)
1369 # For each line in the delta
1361 # For each line in the delta
1370 for f, fnode in deltamf.iteritems():
1362 for f, fnode in deltamf.iteritems():
1371 # And if the file is in the list of files we care
1363 # And if the file is in the list of files we care
1372 # about.
1364 # about.
1373 if f in changedfiles:
1365 if f in changedfiles:
1374 # Get the changenode this manifest belongs to
1366 # Get the changenode this manifest belongs to
1375 clnode = msng_mnfst_set[mnfstnode]
1367 clnode = msng_mnfst_set[mnfstnode]
1376 # Create the set of filenodes for the file if
1368 # Create the set of filenodes for the file if
1377 # there isn't one already.
1369 # there isn't one already.
1378 ndset = msng_filenode_set.setdefault(f, {})
1370 ndset = msng_filenode_set.setdefault(f, {})
1379 # And set the filenode's changelog node to the
1371 # And set the filenode's changelog node to the
1380 # manifest's if it hasn't been set already.
1372 # manifest's if it hasn't been set already.
1381 ndset.setdefault(fnode, clnode)
1373 ndset.setdefault(fnode, clnode)
1382 else:
1374 else:
1383 # Otherwise we need a full manifest.
1375 # Otherwise we need a full manifest.
1384 m = mnfst.read(mnfstnode)
1376 m = mnfst.read(mnfstnode)
1385 # For every file in we care about.
1377 # For every file in we care about.
1386 for f in changedfiles:
1378 for f in changedfiles:
1387 fnode = m.get(f, None)
1379 fnode = m.get(f, None)
1388 # If it's in the manifest
1380 # If it's in the manifest
1389 if fnode is not None:
1381 if fnode is not None:
1390 # See comments above.
1382 # See comments above.
1391 clnode = msng_mnfst_set[mnfstnode]
1383 clnode = msng_mnfst_set[mnfstnode]
1392 ndset = msng_filenode_set.setdefault(f, {})
1384 ndset = msng_filenode_set.setdefault(f, {})
1393 ndset.setdefault(fnode, clnode)
1385 ndset.setdefault(fnode, clnode)
1394 return collect_msng_filenodes
1386 return collect_msng_filenodes
1395
1387
1396 # We have a list of filenodes we think we need for a file, lets remove
1388 # If we determine that a particular file or manifest node must be a
1397 # all those we know the recipient must have.
1389 # node that the recipient of the changegroup will already have, we can
1398 def prune_filenodes(f, filerevlog, missingnodes):
1390 # also assume the recipient will have all the parents. This function
1391 # prunes them from the set of missing nodes.
1392 # XXX is it even useful? the testsuite doesn't trigger it
1393 def prune(revlog, missingnodes):
1399 hasset = set()
1394 hasset = set()
1400 # If a 'missing' filenode thinks it belongs to a changenode we
1395 # If a 'missing' filenode thinks it belongs to a changenode we
1401 # assume the recipient must have, then the recipient must have
1396 # assume the recipient must have, then the recipient must have
1402 # that filenode.
1397 # that filenode.
1403 for n in missingnodes:
1398 for n in missingnodes:
1404 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1399 clnode = cl.node(revlog.linkrev(revlog.rev(n)))
1405 if clnode in has_cl_set:
1400 if clnode in has_cl_set:
1406 hasset.add(n)
1401 hasset.add(n)
1407 prune_parents(filerevlog, hasset, missingnodes)
1402 for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]):
1403 missingnodes.pop(revlog.node(r), None)
1408
1404
1409 # Add the nodes that were explicitly requested.
1405 # Add the nodes that were explicitly requested.
1410 def add_extra_nodes(name, nodes):
1406 def add_extra_nodes(name, nodes):
1411 if not extranodes or name not in extranodes:
1407 if not extranodes or name not in extranodes:
1412 return
1408 return
1413
1409
1414 for node, linknode in extranodes[name]:
1410 for node, linknode in extranodes[name]:
1415 if node not in nodes:
1411 if node not in nodes:
1416 nodes[node] = linknode
1412 nodes[node] = linknode
1417
1413
1418 # Now that we have all theses utility functions to help out and
1414 # Now that we have all theses utility functions to help out and
1419 # logically divide up the task, generate the group.
1415 # logically divide up the task, generate the group.
1420 def gengroup():
1416 def gengroup():
1421 # The set of changed files starts empty.
1417 # The set of changed files starts empty.
1422 changedfiles = set()
1418 changedfiles = set()
1423 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1419 collect = changegroup.collector(cl, msng_mnfst_set, changedfiles)
1424
1420
1425 # Create a changenode group generator that will call our functions
1421 # Create a changenode group generator that will call our functions
1426 # back to lookup the owning changenode and collect information.
1422 # back to lookup the owning changenode and collect information.
1427 group = cl.group(msng_cl_lst, identity, collect)
1423 group = cl.group(msng_cl_lst, identity, collect)
1428 cnt = 0
1424 cnt = 0
1429 for chnk in group:
1425 for chnk in group:
1430 yield chnk
1426 yield chnk
1431 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1427 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1432 cnt += 1
1428 cnt += 1
1433 self.ui.progress(_('bundling changes'), None)
1429 self.ui.progress(_('bundling changes'), None)
1434
1430
1435
1431 prune(mnfst, msng_mnfst_set)
1436 # Figure out which manifest nodes (of the ones we think might be
1437 # part of the changegroup) the recipient must know about and
1438 # remove them from the changegroup.
1439 has_mnfst_set = set()
1440 for n in msng_mnfst_set:
1441 # If a 'missing' manifest thinks it belongs to a changenode
1442 # the recipient is assumed to have, obviously the recipient
1443 # must have that manifest.
1444 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1445 if linknode in has_cl_set:
1446 has_mnfst_set.add(n)
1447 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1448 add_extra_nodes(1, msng_mnfst_set)
1432 add_extra_nodes(1, msng_mnfst_set)
1449 msng_mnfst_lst = msng_mnfst_set.keys()
1433 msng_mnfst_lst = msng_mnfst_set.keys()
1450 # Sort the manifestnodes by revision number.
1434 # Sort the manifestnodes by revision number.
1451 msng_mnfst_lst.sort(key=mnfst.rev)
1435 msng_mnfst_lst.sort(key=mnfst.rev)
1452 # Create a generator for the manifestnodes that calls our lookup
1436 # Create a generator for the manifestnodes that calls our lookup
1453 # and data collection functions back.
1437 # and data collection functions back.
1454 group = mnfst.group(msng_mnfst_lst,
1438 group = mnfst.group(msng_mnfst_lst,
1455 lambda mnode: msng_mnfst_set[mnode],
1439 lambda mnode: msng_mnfst_set[mnode],
1456 filenode_collector(changedfiles))
1440 filenode_collector(changedfiles))
1457 cnt = 0
1441 cnt = 0
1458 for chnk in group:
1442 for chnk in group:
1459 yield chnk
1443 yield chnk
1460 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1444 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1461 cnt += 1
1445 cnt += 1
1462 self.ui.progress(_('bundling manifests'), None)
1446 self.ui.progress(_('bundling manifests'), None)
1463
1447
1464 # These are no longer needed, dereference and toss the memory for
1448 # These are no longer needed, dereference and toss the memory for
1465 # them.
1449 # them.
1466 msng_mnfst_lst = None
1450 msng_mnfst_lst = None
1467 msng_mnfst_set.clear()
1451 msng_mnfst_set.clear()
1468
1452
1469 if extranodes:
1453 if extranodes:
1470 for fname in extranodes:
1454 for fname in extranodes:
1471 if isinstance(fname, int):
1455 if isinstance(fname, int):
1472 continue
1456 continue
1473 msng_filenode_set.setdefault(fname, {})
1457 msng_filenode_set.setdefault(fname, {})
1474 changedfiles.add(fname)
1458 changedfiles.add(fname)
1475 # Go through all our files in order sorted by name.
1459 # Go through all our files in order sorted by name.
1476 cnt = 0
1460 cnt = 0
1477 for fname in sorted(changedfiles):
1461 for fname in sorted(changedfiles):
1478 filerevlog = self.file(fname)
1462 filerevlog = self.file(fname)
1479 if not len(filerevlog):
1463 if not len(filerevlog):
1480 raise util.Abort(_("empty or missing revlog for %s") % fname)
1464 raise util.Abort(_("empty or missing revlog for %s") % fname)
1481 # Toss out the filenodes that the recipient isn't really
1465 # Toss out the filenodes that the recipient isn't really
1482 # missing.
1466 # missing.
1483 missingfnodes = msng_filenode_set.pop(fname, {})
1467 missingfnodes = msng_filenode_set.pop(fname, {})
1484 prune_filenodes(fname, filerevlog, missingfnodes)
1468 prune(filerevlog, missingfnodes)
1485 add_extra_nodes(fname, missingfnodes)
1469 add_extra_nodes(fname, missingfnodes)
1486 # If any filenodes are left, generate the group for them,
1470 # If any filenodes are left, generate the group for them,
1487 # otherwise don't bother.
1471 # otherwise don't bother.
1488 if missingfnodes:
1472 if missingfnodes:
1489 yield changegroup.chunkheader(len(fname))
1473 yield changegroup.chunkheader(len(fname))
1490 yield fname
1474 yield fname
1491 # Sort the filenodes by their revision # (topological order)
1475 # Sort the filenodes by their revision # (topological order)
1492 nodeiter = list(missingfnodes)
1476 nodeiter = list(missingfnodes)
1493 nodeiter.sort(key=filerevlog.rev)
1477 nodeiter.sort(key=filerevlog.rev)
1494 # Create a group generator and only pass in a changenode
1478 # Create a group generator and only pass in a changenode
1495 # lookup function as we need to collect no information
1479 # lookup function as we need to collect no information
1496 # from filenodes.
1480 # from filenodes.
1497 group = filerevlog.group(nodeiter,
1481 group = filerevlog.group(nodeiter,
1498 lambda fnode: missingfnodes[fnode])
1482 lambda fnode: missingfnodes[fnode])
1499 for chnk in group:
1483 for chnk in group:
1500 self.ui.progress(
1484 self.ui.progress(
1501 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1485 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1502 cnt += 1
1486 cnt += 1
1503 yield chnk
1487 yield chnk
1504 # Signal that no more groups are left.
1488 # Signal that no more groups are left.
1505 yield changegroup.closechunk()
1489 yield changegroup.closechunk()
1506 self.ui.progress(_('bundling files'), None)
1490 self.ui.progress(_('bundling files'), None)
1507
1491
1508 if msng_cl_lst:
1492 if msng_cl_lst:
1509 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1493 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1510
1494
1511 return util.chunkbuffer(gengroup())
1495 return util.chunkbuffer(gengroup())
1512
1496
1513 def changegroup(self, basenodes, source):
1497 def changegroup(self, basenodes, source):
1514 # to avoid a race we use changegroupsubset() (issue1320)
1498 # to avoid a race we use changegroupsubset() (issue1320)
1515 return self.changegroupsubset(basenodes, self.heads(), source)
1499 return self.changegroupsubset(basenodes, self.heads(), source)
1516
1500
1517 def _changegroup(self, nodes, source):
1501 def _changegroup(self, nodes, source):
1518 """Compute the changegroup of all nodes that we have that a recipient
1502 """Compute the changegroup of all nodes that we have that a recipient
1519 doesn't. Return a chunkbuffer object whose read() method will return
1503 doesn't. Return a chunkbuffer object whose read() method will return
1520 successive changegroup chunks.
1504 successive changegroup chunks.
1521
1505
1522 This is much easier than the previous function as we can assume that
1506 This is much easier than the previous function as we can assume that
1523 the recipient has any changenode we aren't sending them.
1507 the recipient has any changenode we aren't sending them.
1524
1508
1525 nodes is the set of nodes to send"""
1509 nodes is the set of nodes to send"""
1526
1510
1527 self.hook('preoutgoing', throw=True, source=source)
1511 self.hook('preoutgoing', throw=True, source=source)
1528
1512
1529 cl = self.changelog
1513 cl = self.changelog
1530 revset = set([cl.rev(n) for n in nodes])
1514 revset = set([cl.rev(n) for n in nodes])
1531 self.changegroupinfo(nodes, source)
1515 self.changegroupinfo(nodes, source)
1532
1516
1533 def identity(x):
1517 def identity(x):
1534 return x
1518 return x
1535
1519
1536 def gennodelst(log):
1520 def gennodelst(log):
1537 for r in log:
1521 for r in log:
1538 if log.linkrev(r) in revset:
1522 if log.linkrev(r) in revset:
1539 yield log.node(r)
1523 yield log.node(r)
1540
1524
1541 def lookuplinkrev_func(revlog):
1525 def lookuplinkrev_func(revlog):
1542 def lookuplinkrev(n):
1526 def lookuplinkrev(n):
1543 return cl.node(revlog.linkrev(revlog.rev(n)))
1527 return cl.node(revlog.linkrev(revlog.rev(n)))
1544 return lookuplinkrev
1528 return lookuplinkrev
1545
1529
1546 def gengroup():
1530 def gengroup():
1547 '''yield a sequence of changegroup chunks (strings)'''
1531 '''yield a sequence of changegroup chunks (strings)'''
1548 # construct a list of all changed files
1532 # construct a list of all changed files
1549 changedfiles = set()
1533 changedfiles = set()
1550 mmfs = {}
1534 mmfs = {}
1551 collect = changegroup.collector(cl, mmfs, changedfiles)
1535 collect = changegroup.collector(cl, mmfs, changedfiles)
1552
1536
1553 cnt = 0
1537 cnt = 0
1554 for chnk in cl.group(nodes, identity, collect):
1538 for chnk in cl.group(nodes, identity, collect):
1555 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1539 self.ui.progress(_('bundling changes'), cnt, unit=_('chunks'))
1556 cnt += 1
1540 cnt += 1
1557 yield chnk
1541 yield chnk
1558 self.ui.progress(_('bundling changes'), None)
1542 self.ui.progress(_('bundling changes'), None)
1559
1543
1560 mnfst = self.manifest
1544 mnfst = self.manifest
1561 nodeiter = gennodelst(mnfst)
1545 nodeiter = gennodelst(mnfst)
1562 cnt = 0
1546 cnt = 0
1563 for chnk in mnfst.group(nodeiter, lookuplinkrev_func(mnfst)):
1547 for chnk in mnfst.group(nodeiter, lookuplinkrev_func(mnfst)):
1564 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1548 self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks'))
1565 cnt += 1
1549 cnt += 1
1566 yield chnk
1550 yield chnk
1567 self.ui.progress(_('bundling manifests'), None)
1551 self.ui.progress(_('bundling manifests'), None)
1568
1552
1569 cnt = 0
1553 cnt = 0
1570 for fname in sorted(changedfiles):
1554 for fname in sorted(changedfiles):
1571 filerevlog = self.file(fname)
1555 filerevlog = self.file(fname)
1572 if not len(filerevlog):
1556 if not len(filerevlog):
1573 raise util.Abort(_("empty or missing revlog for %s") % fname)
1557 raise util.Abort(_("empty or missing revlog for %s") % fname)
1574 nodeiter = gennodelst(filerevlog)
1558 nodeiter = gennodelst(filerevlog)
1575 nodeiter = list(nodeiter)
1559 nodeiter = list(nodeiter)
1576 if nodeiter:
1560 if nodeiter:
1577 yield changegroup.chunkheader(len(fname))
1561 yield changegroup.chunkheader(len(fname))
1578 yield fname
1562 yield fname
1579 lookup = lookuplinkrev_func(filerevlog)
1563 lookup = lookuplinkrev_func(filerevlog)
1580 for chnk in filerevlog.group(nodeiter, lookup):
1564 for chnk in filerevlog.group(nodeiter, lookup):
1581 self.ui.progress(
1565 self.ui.progress(
1582 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1566 _('bundling files'), cnt, item=fname, unit=_('chunks'))
1583 cnt += 1
1567 cnt += 1
1584 yield chnk
1568 yield chnk
1585 self.ui.progress(_('bundling files'), None)
1569 self.ui.progress(_('bundling files'), None)
1586
1570
1587 yield changegroup.closechunk()
1571 yield changegroup.closechunk()
1588
1572
1589 if nodes:
1573 if nodes:
1590 self.hook('outgoing', node=hex(nodes[0]), source=source)
1574 self.hook('outgoing', node=hex(nodes[0]), source=source)
1591
1575
1592 return util.chunkbuffer(gengroup())
1576 return util.chunkbuffer(gengroup())
1593
1577
1594 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1578 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1595 """Add the changegroup returned by source.read() to this repo.
1579 """Add the changegroup returned by source.read() to this repo.
1596 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1580 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1597 the URL of the repo where this changegroup is coming from.
1581 the URL of the repo where this changegroup is coming from.
1598
1582
1599 Return an integer summarizing the change to this repo:
1583 Return an integer summarizing the change to this repo:
1600 - nothing changed or no source: 0
1584 - nothing changed or no source: 0
1601 - more heads than before: 1+added heads (2..n)
1585 - more heads than before: 1+added heads (2..n)
1602 - fewer heads than before: -1-removed heads (-2..-n)
1586 - fewer heads than before: -1-removed heads (-2..-n)
1603 - number of heads stays the same: 1
1587 - number of heads stays the same: 1
1604 """
1588 """
1605 def csmap(x):
1589 def csmap(x):
1606 self.ui.debug("add changeset %s\n" % short(x))
1590 self.ui.debug("add changeset %s\n" % short(x))
1607 return len(cl)
1591 return len(cl)
1608
1592
1609 def revmap(x):
1593 def revmap(x):
1610 return cl.rev(x)
1594 return cl.rev(x)
1611
1595
1612 if not source:
1596 if not source:
1613 return 0
1597 return 0
1614
1598
1615 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1599 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1616
1600
1617 changesets = files = revisions = 0
1601 changesets = files = revisions = 0
1618 efiles = set()
1602 efiles = set()
1619
1603
1620 # write changelog data to temp files so concurrent readers will not see
1604 # write changelog data to temp files so concurrent readers will not see
1621 # inconsistent view
1605 # inconsistent view
1622 cl = self.changelog
1606 cl = self.changelog
1623 cl.delayupdate()
1607 cl.delayupdate()
1624 oldheads = len(cl.heads())
1608 oldheads = len(cl.heads())
1625
1609
1626 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1610 tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)]))
1627 try:
1611 try:
1628 trp = weakref.proxy(tr)
1612 trp = weakref.proxy(tr)
1629 # pull off the changeset group
1613 # pull off the changeset group
1630 self.ui.status(_("adding changesets\n"))
1614 self.ui.status(_("adding changesets\n"))
1631 clstart = len(cl)
1615 clstart = len(cl)
1632 class prog(object):
1616 class prog(object):
1633 step = _('changesets')
1617 step = _('changesets')
1634 count = 1
1618 count = 1
1635 ui = self.ui
1619 ui = self.ui
1636 total = None
1620 total = None
1637 def __call__(self):
1621 def __call__(self):
1638 self.ui.progress(self.step, self.count, unit=_('chunks'),
1622 self.ui.progress(self.step, self.count, unit=_('chunks'),
1639 total=self.total)
1623 total=self.total)
1640 self.count += 1
1624 self.count += 1
1641 pr = prog()
1625 pr = prog()
1642 chunkiter = changegroup.chunkiter(source, progress=pr)
1626 chunkiter = changegroup.chunkiter(source, progress=pr)
1643 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1627 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1644 raise util.Abort(_("received changelog group is empty"))
1628 raise util.Abort(_("received changelog group is empty"))
1645 clend = len(cl)
1629 clend = len(cl)
1646 changesets = clend - clstart
1630 changesets = clend - clstart
1647 for c in xrange(clstart, clend):
1631 for c in xrange(clstart, clend):
1648 efiles.update(self[c].files())
1632 efiles.update(self[c].files())
1649 efiles = len(efiles)
1633 efiles = len(efiles)
1650 self.ui.progress(_('changesets'), None)
1634 self.ui.progress(_('changesets'), None)
1651
1635
1652 # pull off the manifest group
1636 # pull off the manifest group
1653 self.ui.status(_("adding manifests\n"))
1637 self.ui.status(_("adding manifests\n"))
1654 pr.step = _('manifests')
1638 pr.step = _('manifests')
1655 pr.count = 1
1639 pr.count = 1
1656 pr.total = changesets # manifests <= changesets
1640 pr.total = changesets # manifests <= changesets
1657 chunkiter = changegroup.chunkiter(source, progress=pr)
1641 chunkiter = changegroup.chunkiter(source, progress=pr)
1658 # no need to check for empty manifest group here:
1642 # no need to check for empty manifest group here:
1659 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1643 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1660 # no new manifest will be created and the manifest group will
1644 # no new manifest will be created and the manifest group will
1661 # be empty during the pull
1645 # be empty during the pull
1662 self.manifest.addgroup(chunkiter, revmap, trp)
1646 self.manifest.addgroup(chunkiter, revmap, trp)
1663 self.ui.progress(_('manifests'), None)
1647 self.ui.progress(_('manifests'), None)
1664
1648
1665 needfiles = {}
1649 needfiles = {}
1666 if self.ui.configbool('server', 'validate', default=False):
1650 if self.ui.configbool('server', 'validate', default=False):
1667 # validate incoming csets have their manifests
1651 # validate incoming csets have their manifests
1668 for cset in xrange(clstart, clend):
1652 for cset in xrange(clstart, clend):
1669 mfest = self.changelog.read(self.changelog.node(cset))[0]
1653 mfest = self.changelog.read(self.changelog.node(cset))[0]
1670 mfest = self.manifest.readdelta(mfest)
1654 mfest = self.manifest.readdelta(mfest)
1671 # store file nodes we must see
1655 # store file nodes we must see
1672 for f, n in mfest.iteritems():
1656 for f, n in mfest.iteritems():
1673 needfiles.setdefault(f, set()).add(n)
1657 needfiles.setdefault(f, set()).add(n)
1674
1658
1675 # process the files
1659 # process the files
1676 self.ui.status(_("adding file changes\n"))
1660 self.ui.status(_("adding file changes\n"))
1677 pr.step = 'files'
1661 pr.step = 'files'
1678 pr.count = 1
1662 pr.count = 1
1679 pr.total = efiles
1663 pr.total = efiles
1680 while 1:
1664 while 1:
1681 f = changegroup.getchunk(source)
1665 f = changegroup.getchunk(source)
1682 if not f:
1666 if not f:
1683 break
1667 break
1684 self.ui.debug("adding %s revisions\n" % f)
1668 self.ui.debug("adding %s revisions\n" % f)
1685 pr()
1669 pr()
1686 fl = self.file(f)
1670 fl = self.file(f)
1687 o = len(fl)
1671 o = len(fl)
1688 chunkiter = changegroup.chunkiter(source)
1672 chunkiter = changegroup.chunkiter(source)
1689 if fl.addgroup(chunkiter, revmap, trp) is None:
1673 if fl.addgroup(chunkiter, revmap, trp) is None:
1690 raise util.Abort(_("received file revlog group is empty"))
1674 raise util.Abort(_("received file revlog group is empty"))
1691 revisions += len(fl) - o
1675 revisions += len(fl) - o
1692 files += 1
1676 files += 1
1693 if f in needfiles:
1677 if f in needfiles:
1694 needs = needfiles[f]
1678 needs = needfiles[f]
1695 for new in xrange(o, len(fl)):
1679 for new in xrange(o, len(fl)):
1696 n = fl.node(new)
1680 n = fl.node(new)
1697 if n in needs:
1681 if n in needs:
1698 needs.remove(n)
1682 needs.remove(n)
1699 if not needs:
1683 if not needs:
1700 del needfiles[f]
1684 del needfiles[f]
1701 self.ui.progress(_('files'), None)
1685 self.ui.progress(_('files'), None)
1702
1686
1703 for f, needs in needfiles.iteritems():
1687 for f, needs in needfiles.iteritems():
1704 fl = self.file(f)
1688 fl = self.file(f)
1705 for n in needs:
1689 for n in needs:
1706 try:
1690 try:
1707 fl.rev(n)
1691 fl.rev(n)
1708 except error.LookupError:
1692 except error.LookupError:
1709 raise util.Abort(
1693 raise util.Abort(
1710 _('missing file data for %s:%s - run hg verify') %
1694 _('missing file data for %s:%s - run hg verify') %
1711 (f, hex(n)))
1695 (f, hex(n)))
1712
1696
1713 newheads = len(cl.heads())
1697 newheads = len(cl.heads())
1714 heads = ""
1698 heads = ""
1715 if oldheads and newheads != oldheads:
1699 if oldheads and newheads != oldheads:
1716 heads = _(" (%+d heads)") % (newheads - oldheads)
1700 heads = _(" (%+d heads)") % (newheads - oldheads)
1717
1701
1718 self.ui.status(_("added %d changesets"
1702 self.ui.status(_("added %d changesets"
1719 " with %d changes to %d files%s\n")
1703 " with %d changes to %d files%s\n")
1720 % (changesets, revisions, files, heads))
1704 % (changesets, revisions, files, heads))
1721
1705
1722 if changesets > 0:
1706 if changesets > 0:
1723 p = lambda: cl.writepending() and self.root or ""
1707 p = lambda: cl.writepending() and self.root or ""
1724 self.hook('pretxnchangegroup', throw=True,
1708 self.hook('pretxnchangegroup', throw=True,
1725 node=hex(cl.node(clstart)), source=srctype,
1709 node=hex(cl.node(clstart)), source=srctype,
1726 url=url, pending=p)
1710 url=url, pending=p)
1727
1711
1728 # make changelog see real files again
1712 # make changelog see real files again
1729 cl.finalize(trp)
1713 cl.finalize(trp)
1730
1714
1731 tr.close()
1715 tr.close()
1732 finally:
1716 finally:
1733 tr.release()
1717 tr.release()
1734 if lock:
1718 if lock:
1735 lock.release()
1719 lock.release()
1736
1720
1737 if changesets > 0:
1721 if changesets > 0:
1738 # forcefully update the on-disk branch cache
1722 # forcefully update the on-disk branch cache
1739 self.ui.debug("updating the branch cache\n")
1723 self.ui.debug("updating the branch cache\n")
1740 self.branchtags()
1724 self.branchtags()
1741 self.hook("changegroup", node=hex(cl.node(clstart)),
1725 self.hook("changegroup", node=hex(cl.node(clstart)),
1742 source=srctype, url=url)
1726 source=srctype, url=url)
1743
1727
1744 for i in xrange(clstart, clend):
1728 for i in xrange(clstart, clend):
1745 self.hook("incoming", node=hex(cl.node(i)),
1729 self.hook("incoming", node=hex(cl.node(i)),
1746 source=srctype, url=url)
1730 source=srctype, url=url)
1747
1731
1748 # never return 0 here:
1732 # never return 0 here:
1749 if newheads < oldheads:
1733 if newheads < oldheads:
1750 return newheads - oldheads - 1
1734 return newheads - oldheads - 1
1751 else:
1735 else:
1752 return newheads - oldheads + 1
1736 return newheads - oldheads + 1
1753
1737
1754
1738
1755 def stream_in(self, remote):
1739 def stream_in(self, remote):
1756 fp = remote.stream_out()
1740 fp = remote.stream_out()
1757 l = fp.readline()
1741 l = fp.readline()
1758 try:
1742 try:
1759 resp = int(l)
1743 resp = int(l)
1760 except ValueError:
1744 except ValueError:
1761 raise error.ResponseError(
1745 raise error.ResponseError(
1762 _('Unexpected response from remote server:'), l)
1746 _('Unexpected response from remote server:'), l)
1763 if resp == 1:
1747 if resp == 1:
1764 raise util.Abort(_('operation forbidden by server'))
1748 raise util.Abort(_('operation forbidden by server'))
1765 elif resp == 2:
1749 elif resp == 2:
1766 raise util.Abort(_('locking the remote repository failed'))
1750 raise util.Abort(_('locking the remote repository failed'))
1767 elif resp != 0:
1751 elif resp != 0:
1768 raise util.Abort(_('the server sent an unknown error code'))
1752 raise util.Abort(_('the server sent an unknown error code'))
1769 self.ui.status(_('streaming all changes\n'))
1753 self.ui.status(_('streaming all changes\n'))
1770 l = fp.readline()
1754 l = fp.readline()
1771 try:
1755 try:
1772 total_files, total_bytes = map(int, l.split(' ', 1))
1756 total_files, total_bytes = map(int, l.split(' ', 1))
1773 except (ValueError, TypeError):
1757 except (ValueError, TypeError):
1774 raise error.ResponseError(
1758 raise error.ResponseError(
1775 _('Unexpected response from remote server:'), l)
1759 _('Unexpected response from remote server:'), l)
1776 self.ui.status(_('%d files to transfer, %s of data\n') %
1760 self.ui.status(_('%d files to transfer, %s of data\n') %
1777 (total_files, util.bytecount(total_bytes)))
1761 (total_files, util.bytecount(total_bytes)))
1778 start = time.time()
1762 start = time.time()
1779 for i in xrange(total_files):
1763 for i in xrange(total_files):
1780 # XXX doesn't support '\n' or '\r' in filenames
1764 # XXX doesn't support '\n' or '\r' in filenames
1781 l = fp.readline()
1765 l = fp.readline()
1782 try:
1766 try:
1783 name, size = l.split('\0', 1)
1767 name, size = l.split('\0', 1)
1784 size = int(size)
1768 size = int(size)
1785 except (ValueError, TypeError):
1769 except (ValueError, TypeError):
1786 raise error.ResponseError(
1770 raise error.ResponseError(
1787 _('Unexpected response from remote server:'), l)
1771 _('Unexpected response from remote server:'), l)
1788 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1772 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1789 # for backwards compat, name was partially encoded
1773 # for backwards compat, name was partially encoded
1790 ofp = self.sopener(store.decodedir(name), 'w')
1774 ofp = self.sopener(store.decodedir(name), 'w')
1791 for chunk in util.filechunkiter(fp, limit=size):
1775 for chunk in util.filechunkiter(fp, limit=size):
1792 ofp.write(chunk)
1776 ofp.write(chunk)
1793 ofp.close()
1777 ofp.close()
1794 elapsed = time.time() - start
1778 elapsed = time.time() - start
1795 if elapsed <= 0:
1779 if elapsed <= 0:
1796 elapsed = 0.001
1780 elapsed = 0.001
1797 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1781 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1798 (util.bytecount(total_bytes), elapsed,
1782 (util.bytecount(total_bytes), elapsed,
1799 util.bytecount(total_bytes / elapsed)))
1783 util.bytecount(total_bytes / elapsed)))
1800 self.invalidate()
1784 self.invalidate()
1801 return len(self.heads()) + 1
1785 return len(self.heads()) + 1
1802
1786
1803 def clone(self, remote, heads=[], stream=False):
1787 def clone(self, remote, heads=[], stream=False):
1804 '''clone remote repository.
1788 '''clone remote repository.
1805
1789
1806 keyword arguments:
1790 keyword arguments:
1807 heads: list of revs to clone (forces use of pull)
1791 heads: list of revs to clone (forces use of pull)
1808 stream: use streaming clone if possible'''
1792 stream: use streaming clone if possible'''
1809
1793
1810 # now, all clients that can request uncompressed clones can
1794 # now, all clients that can request uncompressed clones can
1811 # read repo formats supported by all servers that can serve
1795 # read repo formats supported by all servers that can serve
1812 # them.
1796 # them.
1813
1797
1814 # if revlog format changes, client will have to check version
1798 # if revlog format changes, client will have to check version
1815 # and format flags on "stream" capability, and use
1799 # and format flags on "stream" capability, and use
1816 # uncompressed only if compatible.
1800 # uncompressed only if compatible.
1817
1801
1818 if stream and not heads and remote.capable('stream'):
1802 if stream and not heads and remote.capable('stream'):
1819 return self.stream_in(remote)
1803 return self.stream_in(remote)
1820 return self.pull(remote, heads)
1804 return self.pull(remote, heads)
1821
1805
1822 def pushkey(self, namespace, key, old, new):
1806 def pushkey(self, namespace, key, old, new):
1823 return pushkey.push(self, namespace, key, old, new)
1807 return pushkey.push(self, namespace, key, old, new)
1824
1808
1825 def listkeys(self, namespace):
1809 def listkeys(self, namespace):
1826 return pushkey.list(self, namespace)
1810 return pushkey.list(self, namespace)
1827
1811
1828 # used to avoid circular references so destructors work
1812 # used to avoid circular references so destructors work
1829 def aftertrans(files):
1813 def aftertrans(files):
1830 renamefiles = [tuple(t) for t in files]
1814 renamefiles = [tuple(t) for t in files]
1831 def a():
1815 def a():
1832 for src, dest in renamefiles:
1816 for src, dest in renamefiles:
1833 util.rename(src, dest)
1817 util.rename(src, dest)
1834 return a
1818 return a
1835
1819
1836 def instance(ui, path, create):
1820 def instance(ui, path, create):
1837 return localrepository(ui, util.drop_scheme('file', path), create)
1821 return localrepository(ui, util.drop_scheme('file', path), create)
1838
1822
1839 def islocal(path):
1823 def islocal(path):
1840 return True
1824 return True
General Comments 0
You need to be logged in to leave comments. Login now