##// END OF EJS Templates
commit: move lots of commitctx outside of the repo lock
Matt Mackall -
r8411:4d591635 default
parent child Browse files
Show More
@@ -1,2103 +1,2103
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16 from lock import release
16 from lock import release
17 import weakref, stat, errno, os, time, inspect
17 import weakref, stat, errno, os, time, inspect
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19
19
20 class localrepository(repo.repository):
20 class localrepository(repo.repository):
21 capabilities = set(('lookup', 'changegroupsubset'))
21 capabilities = set(('lookup', 'changegroupsubset'))
22 supported = set('revlogv1 store fncache'.split())
22 supported = set('revlogv1 store fncache'.split())
23
23
24 def __init__(self, baseui, path=None, create=0):
24 def __init__(self, baseui, path=None, create=0):
25 repo.repository.__init__(self)
25 repo.repository.__init__(self)
26 self.root = os.path.realpath(path)
26 self.root = os.path.realpath(path)
27 self.path = os.path.join(self.root, ".hg")
27 self.path = os.path.join(self.root, ".hg")
28 self.origroot = path
28 self.origroot = path
29 self.opener = util.opener(self.path)
29 self.opener = util.opener(self.path)
30 self.wopener = util.opener(self.root)
30 self.wopener = util.opener(self.root)
31
31
32 if not os.path.isdir(self.path):
32 if not os.path.isdir(self.path):
33 if create:
33 if create:
34 if not os.path.exists(path):
34 if not os.path.exists(path):
35 os.mkdir(path)
35 os.mkdir(path)
36 os.mkdir(self.path)
36 os.mkdir(self.path)
37 requirements = ["revlogv1"]
37 requirements = ["revlogv1"]
38 if baseui.configbool('format', 'usestore', True):
38 if baseui.configbool('format', 'usestore', True):
39 os.mkdir(os.path.join(self.path, "store"))
39 os.mkdir(os.path.join(self.path, "store"))
40 requirements.append("store")
40 requirements.append("store")
41 if baseui.configbool('format', 'usefncache', True):
41 if baseui.configbool('format', 'usefncache', True):
42 requirements.append("fncache")
42 requirements.append("fncache")
43 # create an invalid changelog
43 # create an invalid changelog
44 self.opener("00changelog.i", "a").write(
44 self.opener("00changelog.i", "a").write(
45 '\0\0\0\2' # represents revlogv2
45 '\0\0\0\2' # represents revlogv2
46 ' dummy changelog to prevent using the old repo layout'
46 ' dummy changelog to prevent using the old repo layout'
47 )
47 )
48 reqfile = self.opener("requires", "w")
48 reqfile = self.opener("requires", "w")
49 for r in requirements:
49 for r in requirements:
50 reqfile.write("%s\n" % r)
50 reqfile.write("%s\n" % r)
51 reqfile.close()
51 reqfile.close()
52 else:
52 else:
53 raise error.RepoError(_("repository %s not found") % path)
53 raise error.RepoError(_("repository %s not found") % path)
54 elif create:
54 elif create:
55 raise error.RepoError(_("repository %s already exists") % path)
55 raise error.RepoError(_("repository %s already exists") % path)
56 else:
56 else:
57 # find requirements
57 # find requirements
58 requirements = set()
58 requirements = set()
59 try:
59 try:
60 requirements = set(self.opener("requires").read().splitlines())
60 requirements = set(self.opener("requires").read().splitlines())
61 except IOError, inst:
61 except IOError, inst:
62 if inst.errno != errno.ENOENT:
62 if inst.errno != errno.ENOENT:
63 raise
63 raise
64 for r in requirements - self.supported:
64 for r in requirements - self.supported:
65 raise error.RepoError(_("requirement '%s' not supported") % r)
65 raise error.RepoError(_("requirement '%s' not supported") % r)
66
66
67 self.store = store.store(requirements, self.path, util.opener)
67 self.store = store.store(requirements, self.path, util.opener)
68 self.spath = self.store.path
68 self.spath = self.store.path
69 self.sopener = self.store.opener
69 self.sopener = self.store.opener
70 self.sjoin = self.store.join
70 self.sjoin = self.store.join
71 self.opener.createmode = self.store.createmode
71 self.opener.createmode = self.store.createmode
72
72
73 self.baseui = baseui
73 self.baseui = baseui
74 self.ui = baseui.copy()
74 self.ui = baseui.copy()
75 try:
75 try:
76 self.ui.readconfig(self.join("hgrc"), self.root)
76 self.ui.readconfig(self.join("hgrc"), self.root)
77 extensions.loadall(self.ui)
77 extensions.loadall(self.ui)
78 except IOError:
78 except IOError:
79 pass
79 pass
80
80
81 self.tagscache = None
81 self.tagscache = None
82 self._tagstypecache = None
82 self._tagstypecache = None
83 self.branchcache = None
83 self.branchcache = None
84 self._ubranchcache = None # UTF-8 version of branchcache
84 self._ubranchcache = None # UTF-8 version of branchcache
85 self._branchcachetip = None
85 self._branchcachetip = None
86 self.nodetagscache = None
86 self.nodetagscache = None
87 self.filterpats = {}
87 self.filterpats = {}
88 self._datafilters = {}
88 self._datafilters = {}
89 self._transref = self._lockref = self._wlockref = None
89 self._transref = self._lockref = self._wlockref = None
90
90
91 @propertycache
91 @propertycache
92 def changelog(self):
92 def changelog(self):
93 c = changelog.changelog(self.sopener)
93 c = changelog.changelog(self.sopener)
94 if 'HG_PENDING' in os.environ:
94 if 'HG_PENDING' in os.environ:
95 p = os.environ['HG_PENDING']
95 p = os.environ['HG_PENDING']
96 if p.startswith(self.root):
96 if p.startswith(self.root):
97 c.readpending('00changelog.i.a')
97 c.readpending('00changelog.i.a')
98 self.sopener.defversion = c.version
98 self.sopener.defversion = c.version
99 return c
99 return c
100
100
101 @propertycache
101 @propertycache
102 def manifest(self):
102 def manifest(self):
103 return manifest.manifest(self.sopener)
103 return manifest.manifest(self.sopener)
104
104
105 @propertycache
105 @propertycache
106 def dirstate(self):
106 def dirstate(self):
107 return dirstate.dirstate(self.opener, self.ui, self.root)
107 return dirstate.dirstate(self.opener, self.ui, self.root)
108
108
109 def __getitem__(self, changeid):
109 def __getitem__(self, changeid):
110 if changeid == None:
110 if changeid == None:
111 return context.workingctx(self)
111 return context.workingctx(self)
112 return context.changectx(self, changeid)
112 return context.changectx(self, changeid)
113
113
114 def __nonzero__(self):
114 def __nonzero__(self):
115 return True
115 return True
116
116
117 def __len__(self):
117 def __len__(self):
118 return len(self.changelog)
118 return len(self.changelog)
119
119
120 def __iter__(self):
120 def __iter__(self):
121 for i in xrange(len(self)):
121 for i in xrange(len(self)):
122 yield i
122 yield i
123
123
124 def url(self):
124 def url(self):
125 return 'file:' + self.root
125 return 'file:' + self.root
126
126
127 def hook(self, name, throw=False, **args):
127 def hook(self, name, throw=False, **args):
128 return hook.hook(self.ui, self, name, throw, **args)
128 return hook.hook(self.ui, self, name, throw, **args)
129
129
130 tag_disallowed = ':\r\n'
130 tag_disallowed = ':\r\n'
131
131
132 def _tag(self, names, node, message, local, user, date, extra={}):
132 def _tag(self, names, node, message, local, user, date, extra={}):
133 if isinstance(names, str):
133 if isinstance(names, str):
134 allchars = names
134 allchars = names
135 names = (names,)
135 names = (names,)
136 else:
136 else:
137 allchars = ''.join(names)
137 allchars = ''.join(names)
138 for c in self.tag_disallowed:
138 for c in self.tag_disallowed:
139 if c in allchars:
139 if c in allchars:
140 raise util.Abort(_('%r cannot be used in a tag name') % c)
140 raise util.Abort(_('%r cannot be used in a tag name') % c)
141
141
142 for name in names:
142 for name in names:
143 self.hook('pretag', throw=True, node=hex(node), tag=name,
143 self.hook('pretag', throw=True, node=hex(node), tag=name,
144 local=local)
144 local=local)
145
145
146 def writetags(fp, names, munge, prevtags):
146 def writetags(fp, names, munge, prevtags):
147 fp.seek(0, 2)
147 fp.seek(0, 2)
148 if prevtags and prevtags[-1] != '\n':
148 if prevtags and prevtags[-1] != '\n':
149 fp.write('\n')
149 fp.write('\n')
150 for name in names:
150 for name in names:
151 m = munge and munge(name) or name
151 m = munge and munge(name) or name
152 if self._tagstypecache and name in self._tagstypecache:
152 if self._tagstypecache and name in self._tagstypecache:
153 old = self.tagscache.get(name, nullid)
153 old = self.tagscache.get(name, nullid)
154 fp.write('%s %s\n' % (hex(old), m))
154 fp.write('%s %s\n' % (hex(old), m))
155 fp.write('%s %s\n' % (hex(node), m))
155 fp.write('%s %s\n' % (hex(node), m))
156 fp.close()
156 fp.close()
157
157
158 prevtags = ''
158 prevtags = ''
159 if local:
159 if local:
160 try:
160 try:
161 fp = self.opener('localtags', 'r+')
161 fp = self.opener('localtags', 'r+')
162 except IOError:
162 except IOError:
163 fp = self.opener('localtags', 'a')
163 fp = self.opener('localtags', 'a')
164 else:
164 else:
165 prevtags = fp.read()
165 prevtags = fp.read()
166
166
167 # local tags are stored in the current charset
167 # local tags are stored in the current charset
168 writetags(fp, names, None, prevtags)
168 writetags(fp, names, None, prevtags)
169 for name in names:
169 for name in names:
170 self.hook('tag', node=hex(node), tag=name, local=local)
170 self.hook('tag', node=hex(node), tag=name, local=local)
171 return
171 return
172
172
173 try:
173 try:
174 fp = self.wfile('.hgtags', 'rb+')
174 fp = self.wfile('.hgtags', 'rb+')
175 except IOError:
175 except IOError:
176 fp = self.wfile('.hgtags', 'ab')
176 fp = self.wfile('.hgtags', 'ab')
177 else:
177 else:
178 prevtags = fp.read()
178 prevtags = fp.read()
179
179
180 # committed tags are stored in UTF-8
180 # committed tags are stored in UTF-8
181 writetags(fp, names, encoding.fromlocal, prevtags)
181 writetags(fp, names, encoding.fromlocal, prevtags)
182
182
183 if '.hgtags' not in self.dirstate:
183 if '.hgtags' not in self.dirstate:
184 self.add(['.hgtags'])
184 self.add(['.hgtags'])
185
185
186 tagnode = self.commit(['.hgtags'], message, user, date, extra=extra)
186 tagnode = self.commit(['.hgtags'], message, user, date, extra=extra)
187
187
188 for name in names:
188 for name in names:
189 self.hook('tag', node=hex(node), tag=name, local=local)
189 self.hook('tag', node=hex(node), tag=name, local=local)
190
190
191 return tagnode
191 return tagnode
192
192
193 def tag(self, names, node, message, local, user, date):
193 def tag(self, names, node, message, local, user, date):
194 '''tag a revision with one or more symbolic names.
194 '''tag a revision with one or more symbolic names.
195
195
196 names is a list of strings or, when adding a single tag, names may be a
196 names is a list of strings or, when adding a single tag, names may be a
197 string.
197 string.
198
198
199 if local is True, the tags are stored in a per-repository file.
199 if local is True, the tags are stored in a per-repository file.
200 otherwise, they are stored in the .hgtags file, and a new
200 otherwise, they are stored in the .hgtags file, and a new
201 changeset is committed with the change.
201 changeset is committed with the change.
202
202
203 keyword arguments:
203 keyword arguments:
204
204
205 local: whether to store tags in non-version-controlled file
205 local: whether to store tags in non-version-controlled file
206 (default False)
206 (default False)
207
207
208 message: commit message to use if committing
208 message: commit message to use if committing
209
209
210 user: name of user to use if committing
210 user: name of user to use if committing
211
211
212 date: date tuple to use if committing'''
212 date: date tuple to use if committing'''
213
213
214 for x in self.status()[:5]:
214 for x in self.status()[:5]:
215 if '.hgtags' in x:
215 if '.hgtags' in x:
216 raise util.Abort(_('working copy of .hgtags is changed '
216 raise util.Abort(_('working copy of .hgtags is changed '
217 '(please commit .hgtags manually)'))
217 '(please commit .hgtags manually)'))
218
218
219 self.tags() # instantiate the cache
219 self.tags() # instantiate the cache
220 self._tag(names, node, message, local, user, date)
220 self._tag(names, node, message, local, user, date)
221
221
222 def tags(self):
222 def tags(self):
223 '''return a mapping of tag to node'''
223 '''return a mapping of tag to node'''
224 if self.tagscache:
224 if self.tagscache:
225 return self.tagscache
225 return self.tagscache
226
226
227 globaltags = {}
227 globaltags = {}
228 tagtypes = {}
228 tagtypes = {}
229
229
230 def readtags(lines, fn, tagtype):
230 def readtags(lines, fn, tagtype):
231 filetags = {}
231 filetags = {}
232 count = 0
232 count = 0
233
233
234 def warn(msg):
234 def warn(msg):
235 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
235 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
236
236
237 for l in lines:
237 for l in lines:
238 count += 1
238 count += 1
239 if not l:
239 if not l:
240 continue
240 continue
241 s = l.split(" ", 1)
241 s = l.split(" ", 1)
242 if len(s) != 2:
242 if len(s) != 2:
243 warn(_("cannot parse entry"))
243 warn(_("cannot parse entry"))
244 continue
244 continue
245 node, key = s
245 node, key = s
246 key = encoding.tolocal(key.strip()) # stored in UTF-8
246 key = encoding.tolocal(key.strip()) # stored in UTF-8
247 try:
247 try:
248 bin_n = bin(node)
248 bin_n = bin(node)
249 except TypeError:
249 except TypeError:
250 warn(_("node '%s' is not well formed") % node)
250 warn(_("node '%s' is not well formed") % node)
251 continue
251 continue
252 if bin_n not in self.changelog.nodemap:
252 if bin_n not in self.changelog.nodemap:
253 warn(_("tag '%s' refers to unknown node") % key)
253 warn(_("tag '%s' refers to unknown node") % key)
254 continue
254 continue
255
255
256 h = []
256 h = []
257 if key in filetags:
257 if key in filetags:
258 n, h = filetags[key]
258 n, h = filetags[key]
259 h.append(n)
259 h.append(n)
260 filetags[key] = (bin_n, h)
260 filetags[key] = (bin_n, h)
261
261
262 for k, nh in filetags.iteritems():
262 for k, nh in filetags.iteritems():
263 if k not in globaltags:
263 if k not in globaltags:
264 globaltags[k] = nh
264 globaltags[k] = nh
265 tagtypes[k] = tagtype
265 tagtypes[k] = tagtype
266 continue
266 continue
267
267
268 # we prefer the global tag if:
268 # we prefer the global tag if:
269 # it supercedes us OR
269 # it supercedes us OR
270 # mutual supercedes and it has a higher rank
270 # mutual supercedes and it has a higher rank
271 # otherwise we win because we're tip-most
271 # otherwise we win because we're tip-most
272 an, ah = nh
272 an, ah = nh
273 bn, bh = globaltags[k]
273 bn, bh = globaltags[k]
274 if (bn != an and an in bh and
274 if (bn != an and an in bh and
275 (bn not in ah or len(bh) > len(ah))):
275 (bn not in ah or len(bh) > len(ah))):
276 an = bn
276 an = bn
277 ah.extend([n for n in bh if n not in ah])
277 ah.extend([n for n in bh if n not in ah])
278 globaltags[k] = an, ah
278 globaltags[k] = an, ah
279 tagtypes[k] = tagtype
279 tagtypes[k] = tagtype
280
280
281 # read the tags file from each head, ending with the tip
281 # read the tags file from each head, ending with the tip
282 f = None
282 f = None
283 for rev, node, fnode in self._hgtagsnodes():
283 for rev, node, fnode in self._hgtagsnodes():
284 f = (f and f.filectx(fnode) or
284 f = (f and f.filectx(fnode) or
285 self.filectx('.hgtags', fileid=fnode))
285 self.filectx('.hgtags', fileid=fnode))
286 readtags(f.data().splitlines(), f, "global")
286 readtags(f.data().splitlines(), f, "global")
287
287
288 try:
288 try:
289 data = encoding.fromlocal(self.opener("localtags").read())
289 data = encoding.fromlocal(self.opener("localtags").read())
290 # localtags are stored in the local character set
290 # localtags are stored in the local character set
291 # while the internal tag table is stored in UTF-8
291 # while the internal tag table is stored in UTF-8
292 readtags(data.splitlines(), "localtags", "local")
292 readtags(data.splitlines(), "localtags", "local")
293 except IOError:
293 except IOError:
294 pass
294 pass
295
295
296 self.tagscache = {}
296 self.tagscache = {}
297 self._tagstypecache = {}
297 self._tagstypecache = {}
298 for k, nh in globaltags.iteritems():
298 for k, nh in globaltags.iteritems():
299 n = nh[0]
299 n = nh[0]
300 if n != nullid:
300 if n != nullid:
301 self.tagscache[k] = n
301 self.tagscache[k] = n
302 self._tagstypecache[k] = tagtypes[k]
302 self._tagstypecache[k] = tagtypes[k]
303 self.tagscache['tip'] = self.changelog.tip()
303 self.tagscache['tip'] = self.changelog.tip()
304 return self.tagscache
304 return self.tagscache
305
305
306 def tagtype(self, tagname):
306 def tagtype(self, tagname):
307 '''
307 '''
308 return the type of the given tag. result can be:
308 return the type of the given tag. result can be:
309
309
310 'local' : a local tag
310 'local' : a local tag
311 'global' : a global tag
311 'global' : a global tag
312 None : tag does not exist
312 None : tag does not exist
313 '''
313 '''
314
314
315 self.tags()
315 self.tags()
316
316
317 return self._tagstypecache.get(tagname)
317 return self._tagstypecache.get(tagname)
318
318
319 def _hgtagsnodes(self):
319 def _hgtagsnodes(self):
320 last = {}
320 last = {}
321 ret = []
321 ret = []
322 for node in reversed(self.heads()):
322 for node in reversed(self.heads()):
323 c = self[node]
323 c = self[node]
324 rev = c.rev()
324 rev = c.rev()
325 try:
325 try:
326 fnode = c.filenode('.hgtags')
326 fnode = c.filenode('.hgtags')
327 except error.LookupError:
327 except error.LookupError:
328 continue
328 continue
329 ret.append((rev, node, fnode))
329 ret.append((rev, node, fnode))
330 if fnode in last:
330 if fnode in last:
331 ret[last[fnode]] = None
331 ret[last[fnode]] = None
332 last[fnode] = len(ret) - 1
332 last[fnode] = len(ret) - 1
333 return [item for item in ret if item]
333 return [item for item in ret if item]
334
334
335 def tagslist(self):
335 def tagslist(self):
336 '''return a list of tags ordered by revision'''
336 '''return a list of tags ordered by revision'''
337 l = []
337 l = []
338 for t, n in self.tags().iteritems():
338 for t, n in self.tags().iteritems():
339 try:
339 try:
340 r = self.changelog.rev(n)
340 r = self.changelog.rev(n)
341 except:
341 except:
342 r = -2 # sort to the beginning of the list if unknown
342 r = -2 # sort to the beginning of the list if unknown
343 l.append((r, t, n))
343 l.append((r, t, n))
344 return [(t, n) for r, t, n in sorted(l)]
344 return [(t, n) for r, t, n in sorted(l)]
345
345
346 def nodetags(self, node):
346 def nodetags(self, node):
347 '''return the tags associated with a node'''
347 '''return the tags associated with a node'''
348 if not self.nodetagscache:
348 if not self.nodetagscache:
349 self.nodetagscache = {}
349 self.nodetagscache = {}
350 for t, n in self.tags().iteritems():
350 for t, n in self.tags().iteritems():
351 self.nodetagscache.setdefault(n, []).append(t)
351 self.nodetagscache.setdefault(n, []).append(t)
352 return self.nodetagscache.get(node, [])
352 return self.nodetagscache.get(node, [])
353
353
354 def _branchtags(self, partial, lrev):
354 def _branchtags(self, partial, lrev):
355 # TODO: rename this function?
355 # TODO: rename this function?
356 tiprev = len(self) - 1
356 tiprev = len(self) - 1
357 if lrev != tiprev:
357 if lrev != tiprev:
358 self._updatebranchcache(partial, lrev+1, tiprev+1)
358 self._updatebranchcache(partial, lrev+1, tiprev+1)
359 self._writebranchcache(partial, self.changelog.tip(), tiprev)
359 self._writebranchcache(partial, self.changelog.tip(), tiprev)
360
360
361 return partial
361 return partial
362
362
363 def _branchheads(self):
363 def _branchheads(self):
364 tip = self.changelog.tip()
364 tip = self.changelog.tip()
365 if self.branchcache is not None and self._branchcachetip == tip:
365 if self.branchcache is not None and self._branchcachetip == tip:
366 return self.branchcache
366 return self.branchcache
367
367
368 oldtip = self._branchcachetip
368 oldtip = self._branchcachetip
369 self._branchcachetip = tip
369 self._branchcachetip = tip
370 if self.branchcache is None:
370 if self.branchcache is None:
371 self.branchcache = {} # avoid recursion in changectx
371 self.branchcache = {} # avoid recursion in changectx
372 else:
372 else:
373 self.branchcache.clear() # keep using the same dict
373 self.branchcache.clear() # keep using the same dict
374 if oldtip is None or oldtip not in self.changelog.nodemap:
374 if oldtip is None or oldtip not in self.changelog.nodemap:
375 partial, last, lrev = self._readbranchcache()
375 partial, last, lrev = self._readbranchcache()
376 else:
376 else:
377 lrev = self.changelog.rev(oldtip)
377 lrev = self.changelog.rev(oldtip)
378 partial = self._ubranchcache
378 partial = self._ubranchcache
379
379
380 self._branchtags(partial, lrev)
380 self._branchtags(partial, lrev)
381 # this private cache holds all heads (not just tips)
381 # this private cache holds all heads (not just tips)
382 self._ubranchcache = partial
382 self._ubranchcache = partial
383
383
384 # the branch cache is stored on disk as UTF-8, but in the local
384 # the branch cache is stored on disk as UTF-8, but in the local
385 # charset internally
385 # charset internally
386 for k, v in partial.iteritems():
386 for k, v in partial.iteritems():
387 self.branchcache[encoding.tolocal(k)] = v
387 self.branchcache[encoding.tolocal(k)] = v
388 return self.branchcache
388 return self.branchcache
389
389
390
390
391 def branchtags(self):
391 def branchtags(self):
392 '''return a dict where branch names map to the tipmost head of
392 '''return a dict where branch names map to the tipmost head of
393 the branch, open heads come before closed'''
393 the branch, open heads come before closed'''
394 bt = {}
394 bt = {}
395 for bn, heads in self._branchheads().iteritems():
395 for bn, heads in self._branchheads().iteritems():
396 head = None
396 head = None
397 for i in range(len(heads)-1, -1, -1):
397 for i in range(len(heads)-1, -1, -1):
398 h = heads[i]
398 h = heads[i]
399 if 'close' not in self.changelog.read(h)[5]:
399 if 'close' not in self.changelog.read(h)[5]:
400 head = h
400 head = h
401 break
401 break
402 # no open heads were found
402 # no open heads were found
403 if head is None:
403 if head is None:
404 head = heads[-1]
404 head = heads[-1]
405 bt[bn] = head
405 bt[bn] = head
406 return bt
406 return bt
407
407
408
408
409 def _readbranchcache(self):
409 def _readbranchcache(self):
410 partial = {}
410 partial = {}
411 try:
411 try:
412 f = self.opener("branchheads.cache")
412 f = self.opener("branchheads.cache")
413 lines = f.read().split('\n')
413 lines = f.read().split('\n')
414 f.close()
414 f.close()
415 except (IOError, OSError):
415 except (IOError, OSError):
416 return {}, nullid, nullrev
416 return {}, nullid, nullrev
417
417
418 try:
418 try:
419 last, lrev = lines.pop(0).split(" ", 1)
419 last, lrev = lines.pop(0).split(" ", 1)
420 last, lrev = bin(last), int(lrev)
420 last, lrev = bin(last), int(lrev)
421 if lrev >= len(self) or self[lrev].node() != last:
421 if lrev >= len(self) or self[lrev].node() != last:
422 # invalidate the cache
422 # invalidate the cache
423 raise ValueError('invalidating branch cache (tip differs)')
423 raise ValueError('invalidating branch cache (tip differs)')
424 for l in lines:
424 for l in lines:
425 if not l: continue
425 if not l: continue
426 node, label = l.split(" ", 1)
426 node, label = l.split(" ", 1)
427 partial.setdefault(label.strip(), []).append(bin(node))
427 partial.setdefault(label.strip(), []).append(bin(node))
428 except KeyboardInterrupt:
428 except KeyboardInterrupt:
429 raise
429 raise
430 except Exception, inst:
430 except Exception, inst:
431 if self.ui.debugflag:
431 if self.ui.debugflag:
432 self.ui.warn(str(inst), '\n')
432 self.ui.warn(str(inst), '\n')
433 partial, last, lrev = {}, nullid, nullrev
433 partial, last, lrev = {}, nullid, nullrev
434 return partial, last, lrev
434 return partial, last, lrev
435
435
436 def _writebranchcache(self, branches, tip, tiprev):
436 def _writebranchcache(self, branches, tip, tiprev):
437 try:
437 try:
438 f = self.opener("branchheads.cache", "w", atomictemp=True)
438 f = self.opener("branchheads.cache", "w", atomictemp=True)
439 f.write("%s %s\n" % (hex(tip), tiprev))
439 f.write("%s %s\n" % (hex(tip), tiprev))
440 for label, nodes in branches.iteritems():
440 for label, nodes in branches.iteritems():
441 for node in nodes:
441 for node in nodes:
442 f.write("%s %s\n" % (hex(node), label))
442 f.write("%s %s\n" % (hex(node), label))
443 f.rename()
443 f.rename()
444 except (IOError, OSError):
444 except (IOError, OSError):
445 pass
445 pass
446
446
447 def _updatebranchcache(self, partial, start, end):
447 def _updatebranchcache(self, partial, start, end):
448 for r in xrange(start, end):
448 for r in xrange(start, end):
449 c = self[r]
449 c = self[r]
450 b = c.branch()
450 b = c.branch()
451 bheads = partial.setdefault(b, [])
451 bheads = partial.setdefault(b, [])
452 bheads.append(c.node())
452 bheads.append(c.node())
453 for p in c.parents():
453 for p in c.parents():
454 pn = p.node()
454 pn = p.node()
455 if pn in bheads:
455 if pn in bheads:
456 bheads.remove(pn)
456 bheads.remove(pn)
457
457
458 def lookup(self, key):
458 def lookup(self, key):
459 if isinstance(key, int):
459 if isinstance(key, int):
460 return self.changelog.node(key)
460 return self.changelog.node(key)
461 elif key == '.':
461 elif key == '.':
462 return self.dirstate.parents()[0]
462 return self.dirstate.parents()[0]
463 elif key == 'null':
463 elif key == 'null':
464 return nullid
464 return nullid
465 elif key == 'tip':
465 elif key == 'tip':
466 return self.changelog.tip()
466 return self.changelog.tip()
467 n = self.changelog._match(key)
467 n = self.changelog._match(key)
468 if n:
468 if n:
469 return n
469 return n
470 if key in self.tags():
470 if key in self.tags():
471 return self.tags()[key]
471 return self.tags()[key]
472 if key in self.branchtags():
472 if key in self.branchtags():
473 return self.branchtags()[key]
473 return self.branchtags()[key]
474 n = self.changelog._partialmatch(key)
474 n = self.changelog._partialmatch(key)
475 if n:
475 if n:
476 return n
476 return n
477 try:
477 try:
478 if len(key) == 20:
478 if len(key) == 20:
479 key = hex(key)
479 key = hex(key)
480 except:
480 except:
481 pass
481 pass
482 raise error.RepoError(_("unknown revision '%s'") % key)
482 raise error.RepoError(_("unknown revision '%s'") % key)
483
483
484 def local(self):
484 def local(self):
485 return True
485 return True
486
486
487 def join(self, f):
487 def join(self, f):
488 return os.path.join(self.path, f)
488 return os.path.join(self.path, f)
489
489
490 def wjoin(self, f):
490 def wjoin(self, f):
491 return os.path.join(self.root, f)
491 return os.path.join(self.root, f)
492
492
493 def rjoin(self, f):
493 def rjoin(self, f):
494 return os.path.join(self.root, util.pconvert(f))
494 return os.path.join(self.root, util.pconvert(f))
495
495
496 def file(self, f):
496 def file(self, f):
497 if f[0] == '/':
497 if f[0] == '/':
498 f = f[1:]
498 f = f[1:]
499 return filelog.filelog(self.sopener, f)
499 return filelog.filelog(self.sopener, f)
500
500
501 def changectx(self, changeid):
501 def changectx(self, changeid):
502 return self[changeid]
502 return self[changeid]
503
503
504 def parents(self, changeid=None):
504 def parents(self, changeid=None):
505 '''get list of changectxs for parents of changeid'''
505 '''get list of changectxs for parents of changeid'''
506 return self[changeid].parents()
506 return self[changeid].parents()
507
507
508 def filectx(self, path, changeid=None, fileid=None):
508 def filectx(self, path, changeid=None, fileid=None):
509 """changeid can be a changeset revision, node, or tag.
509 """changeid can be a changeset revision, node, or tag.
510 fileid can be a file revision or node."""
510 fileid can be a file revision or node."""
511 return context.filectx(self, path, changeid, fileid)
511 return context.filectx(self, path, changeid, fileid)
512
512
513 def getcwd(self):
513 def getcwd(self):
514 return self.dirstate.getcwd()
514 return self.dirstate.getcwd()
515
515
516 def pathto(self, f, cwd=None):
516 def pathto(self, f, cwd=None):
517 return self.dirstate.pathto(f, cwd)
517 return self.dirstate.pathto(f, cwd)
518
518
519 def wfile(self, f, mode='r'):
519 def wfile(self, f, mode='r'):
520 return self.wopener(f, mode)
520 return self.wopener(f, mode)
521
521
522 def _link(self, f):
522 def _link(self, f):
523 return os.path.islink(self.wjoin(f))
523 return os.path.islink(self.wjoin(f))
524
524
525 def _filter(self, filter, filename, data):
525 def _filter(self, filter, filename, data):
526 if filter not in self.filterpats:
526 if filter not in self.filterpats:
527 l = []
527 l = []
528 for pat, cmd in self.ui.configitems(filter):
528 for pat, cmd in self.ui.configitems(filter):
529 if cmd == '!':
529 if cmd == '!':
530 continue
530 continue
531 mf = util.matcher(self.root, "", [pat], [], [])[1]
531 mf = util.matcher(self.root, "", [pat], [], [])[1]
532 fn = None
532 fn = None
533 params = cmd
533 params = cmd
534 for name, filterfn in self._datafilters.iteritems():
534 for name, filterfn in self._datafilters.iteritems():
535 if cmd.startswith(name):
535 if cmd.startswith(name):
536 fn = filterfn
536 fn = filterfn
537 params = cmd[len(name):].lstrip()
537 params = cmd[len(name):].lstrip()
538 break
538 break
539 if not fn:
539 if not fn:
540 fn = lambda s, c, **kwargs: util.filter(s, c)
540 fn = lambda s, c, **kwargs: util.filter(s, c)
541 # Wrap old filters not supporting keyword arguments
541 # Wrap old filters not supporting keyword arguments
542 if not inspect.getargspec(fn)[2]:
542 if not inspect.getargspec(fn)[2]:
543 oldfn = fn
543 oldfn = fn
544 fn = lambda s, c, **kwargs: oldfn(s, c)
544 fn = lambda s, c, **kwargs: oldfn(s, c)
545 l.append((mf, fn, params))
545 l.append((mf, fn, params))
546 self.filterpats[filter] = l
546 self.filterpats[filter] = l
547
547
548 for mf, fn, cmd in self.filterpats[filter]:
548 for mf, fn, cmd in self.filterpats[filter]:
549 if mf(filename):
549 if mf(filename):
550 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
550 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
551 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
551 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
552 break
552 break
553
553
554 return data
554 return data
555
555
556 def adddatafilter(self, name, filter):
556 def adddatafilter(self, name, filter):
557 self._datafilters[name] = filter
557 self._datafilters[name] = filter
558
558
559 def wread(self, filename):
559 def wread(self, filename):
560 if self._link(filename):
560 if self._link(filename):
561 data = os.readlink(self.wjoin(filename))
561 data = os.readlink(self.wjoin(filename))
562 else:
562 else:
563 data = self.wopener(filename, 'r').read()
563 data = self.wopener(filename, 'r').read()
564 return self._filter("encode", filename, data)
564 return self._filter("encode", filename, data)
565
565
566 def wwrite(self, filename, data, flags):
566 def wwrite(self, filename, data, flags):
567 data = self._filter("decode", filename, data)
567 data = self._filter("decode", filename, data)
568 try:
568 try:
569 os.unlink(self.wjoin(filename))
569 os.unlink(self.wjoin(filename))
570 except OSError:
570 except OSError:
571 pass
571 pass
572 if 'l' in flags:
572 if 'l' in flags:
573 self.wopener.symlink(data, filename)
573 self.wopener.symlink(data, filename)
574 else:
574 else:
575 self.wopener(filename, 'w').write(data)
575 self.wopener(filename, 'w').write(data)
576 if 'x' in flags:
576 if 'x' in flags:
577 util.set_flags(self.wjoin(filename), False, True)
577 util.set_flags(self.wjoin(filename), False, True)
578
578
579 def wwritedata(self, filename, data):
579 def wwritedata(self, filename, data):
580 return self._filter("decode", filename, data)
580 return self._filter("decode", filename, data)
581
581
582 def transaction(self):
582 def transaction(self):
583 tr = self._transref and self._transref() or None
583 tr = self._transref and self._transref() or None
584 if tr and tr.running():
584 if tr and tr.running():
585 return tr.nest()
585 return tr.nest()
586
586
587 # abort here if the journal already exists
587 # abort here if the journal already exists
588 if os.path.exists(self.sjoin("journal")):
588 if os.path.exists(self.sjoin("journal")):
589 raise error.RepoError(_("journal already exists - run hg recover"))
589 raise error.RepoError(_("journal already exists - run hg recover"))
590
590
591 # save dirstate for rollback
591 # save dirstate for rollback
592 try:
592 try:
593 ds = self.opener("dirstate").read()
593 ds = self.opener("dirstate").read()
594 except IOError:
594 except IOError:
595 ds = ""
595 ds = ""
596 self.opener("journal.dirstate", "w").write(ds)
596 self.opener("journal.dirstate", "w").write(ds)
597 self.opener("journal.branch", "w").write(self.dirstate.branch())
597 self.opener("journal.branch", "w").write(self.dirstate.branch())
598
598
599 renames = [(self.sjoin("journal"), self.sjoin("undo")),
599 renames = [(self.sjoin("journal"), self.sjoin("undo")),
600 (self.join("journal.dirstate"), self.join("undo.dirstate")),
600 (self.join("journal.dirstate"), self.join("undo.dirstate")),
601 (self.join("journal.branch"), self.join("undo.branch"))]
601 (self.join("journal.branch"), self.join("undo.branch"))]
602 tr = transaction.transaction(self.ui.warn, self.sopener,
602 tr = transaction.transaction(self.ui.warn, self.sopener,
603 self.sjoin("journal"),
603 self.sjoin("journal"),
604 aftertrans(renames),
604 aftertrans(renames),
605 self.store.createmode)
605 self.store.createmode)
606 self._transref = weakref.ref(tr)
606 self._transref = weakref.ref(tr)
607 return tr
607 return tr
608
608
609 def recover(self):
609 def recover(self):
610 lock = self.lock()
610 lock = self.lock()
611 try:
611 try:
612 if os.path.exists(self.sjoin("journal")):
612 if os.path.exists(self.sjoin("journal")):
613 self.ui.status(_("rolling back interrupted transaction\n"))
613 self.ui.status(_("rolling back interrupted transaction\n"))
614 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
614 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
615 self.invalidate()
615 self.invalidate()
616 return True
616 return True
617 else:
617 else:
618 self.ui.warn(_("no interrupted transaction available\n"))
618 self.ui.warn(_("no interrupted transaction available\n"))
619 return False
619 return False
620 finally:
620 finally:
621 lock.release()
621 lock.release()
622
622
623 def rollback(self):
623 def rollback(self):
624 wlock = lock = None
624 wlock = lock = None
625 try:
625 try:
626 wlock = self.wlock()
626 wlock = self.wlock()
627 lock = self.lock()
627 lock = self.lock()
628 if os.path.exists(self.sjoin("undo")):
628 if os.path.exists(self.sjoin("undo")):
629 self.ui.status(_("rolling back last transaction\n"))
629 self.ui.status(_("rolling back last transaction\n"))
630 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
630 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
631 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
631 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
632 try:
632 try:
633 branch = self.opener("undo.branch").read()
633 branch = self.opener("undo.branch").read()
634 self.dirstate.setbranch(branch)
634 self.dirstate.setbranch(branch)
635 except IOError:
635 except IOError:
636 self.ui.warn(_("Named branch could not be reset, "
636 self.ui.warn(_("Named branch could not be reset, "
637 "current branch still is: %s\n")
637 "current branch still is: %s\n")
638 % encoding.tolocal(self.dirstate.branch()))
638 % encoding.tolocal(self.dirstate.branch()))
639 self.invalidate()
639 self.invalidate()
640 self.dirstate.invalidate()
640 self.dirstate.invalidate()
641 else:
641 else:
642 self.ui.warn(_("no rollback information available\n"))
642 self.ui.warn(_("no rollback information available\n"))
643 finally:
643 finally:
644 release(lock, wlock)
644 release(lock, wlock)
645
645
646 def invalidate(self):
646 def invalidate(self):
647 for a in "changelog manifest".split():
647 for a in "changelog manifest".split():
648 if a in self.__dict__:
648 if a in self.__dict__:
649 delattr(self, a)
649 delattr(self, a)
650 self.tagscache = None
650 self.tagscache = None
651 self._tagstypecache = None
651 self._tagstypecache = None
652 self.nodetagscache = None
652 self.nodetagscache = None
653 self.branchcache = None
653 self.branchcache = None
654 self._ubranchcache = None
654 self._ubranchcache = None
655 self._branchcachetip = None
655 self._branchcachetip = None
656
656
657 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
657 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
658 try:
658 try:
659 l = lock.lock(lockname, 0, releasefn, desc=desc)
659 l = lock.lock(lockname, 0, releasefn, desc=desc)
660 except error.LockHeld, inst:
660 except error.LockHeld, inst:
661 if not wait:
661 if not wait:
662 raise
662 raise
663 self.ui.warn(_("waiting for lock on %s held by %r\n") %
663 self.ui.warn(_("waiting for lock on %s held by %r\n") %
664 (desc, inst.locker))
664 (desc, inst.locker))
665 # default to 600 seconds timeout
665 # default to 600 seconds timeout
666 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
666 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
667 releasefn, desc=desc)
667 releasefn, desc=desc)
668 if acquirefn:
668 if acquirefn:
669 acquirefn()
669 acquirefn()
670 return l
670 return l
671
671
672 def lock(self, wait=True):
672 def lock(self, wait=True):
673 l = self._lockref and self._lockref()
673 l = self._lockref and self._lockref()
674 if l is not None and l.held:
674 if l is not None and l.held:
675 l.lock()
675 l.lock()
676 return l
676 return l
677
677
678 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
678 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
679 _('repository %s') % self.origroot)
679 _('repository %s') % self.origroot)
680 self._lockref = weakref.ref(l)
680 self._lockref = weakref.ref(l)
681 return l
681 return l
682
682
683 def wlock(self, wait=True):
683 def wlock(self, wait=True):
684 l = self._wlockref and self._wlockref()
684 l = self._wlockref and self._wlockref()
685 if l is not None and l.held:
685 if l is not None and l.held:
686 l.lock()
686 l.lock()
687 return l
687 return l
688
688
689 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
689 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
690 self.dirstate.invalidate, _('working directory of %s') %
690 self.dirstate.invalidate, _('working directory of %s') %
691 self.origroot)
691 self.origroot)
692 self._wlockref = weakref.ref(l)
692 self._wlockref = weakref.ref(l)
693 return l
693 return l
694
694
695 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
695 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
696 """
696 """
697 commit an individual file as part of a larger transaction
697 commit an individual file as part of a larger transaction
698 """
698 """
699
699
700 fname = fctx.path()
700 fname = fctx.path()
701 text = fctx.data()
701 text = fctx.data()
702 flog = self.file(fname)
702 flog = self.file(fname)
703 fparent1 = manifest1.get(fname, nullid)
703 fparent1 = manifest1.get(fname, nullid)
704 fparent2 = fparent2o = manifest2.get(fname, nullid)
704 fparent2 = fparent2o = manifest2.get(fname, nullid)
705
705
706 meta = {}
706 meta = {}
707 copy = fctx.renamed()
707 copy = fctx.renamed()
708 if copy and copy[0] != fname:
708 if copy and copy[0] != fname:
709 # Mark the new revision of this file as a copy of another
709 # Mark the new revision of this file as a copy of another
710 # file. This copy data will effectively act as a parent
710 # file. This copy data will effectively act as a parent
711 # of this new revision. If this is a merge, the first
711 # of this new revision. If this is a merge, the first
712 # parent will be the nullid (meaning "look up the copy data")
712 # parent will be the nullid (meaning "look up the copy data")
713 # and the second one will be the other parent. For example:
713 # and the second one will be the other parent. For example:
714 #
714 #
715 # 0 --- 1 --- 3 rev1 changes file foo
715 # 0 --- 1 --- 3 rev1 changes file foo
716 # \ / rev2 renames foo to bar and changes it
716 # \ / rev2 renames foo to bar and changes it
717 # \- 2 -/ rev3 should have bar with all changes and
717 # \- 2 -/ rev3 should have bar with all changes and
718 # should record that bar descends from
718 # should record that bar descends from
719 # bar in rev2 and foo in rev1
719 # bar in rev2 and foo in rev1
720 #
720 #
721 # this allows this merge to succeed:
721 # this allows this merge to succeed:
722 #
722 #
723 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
723 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
724 # \ / merging rev3 and rev4 should use bar@rev2
724 # \ / merging rev3 and rev4 should use bar@rev2
725 # \- 2 --- 4 as the merge base
725 # \- 2 --- 4 as the merge base
726 #
726 #
727
727
728 cfname = copy[0]
728 cfname = copy[0]
729 crev = manifest1.get(cfname)
729 crev = manifest1.get(cfname)
730 newfparent = fparent2
730 newfparent = fparent2
731
731
732 if manifest2: # branch merge
732 if manifest2: # branch merge
733 if fparent2 == nullid or crev is None: # copied on remote side
733 if fparent2 == nullid or crev is None: # copied on remote side
734 if cfname in manifest2:
734 if cfname in manifest2:
735 crev = manifest2[cfname]
735 crev = manifest2[cfname]
736 newfparent = fparent1
736 newfparent = fparent1
737
737
738 # find source in nearest ancestor if we've lost track
738 # find source in nearest ancestor if we've lost track
739 if not crev:
739 if not crev:
740 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
740 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
741 (fname, cfname))
741 (fname, cfname))
742 for ancestor in self['.'].ancestors():
742 for ancestor in self['.'].ancestors():
743 if cfname in ancestor:
743 if cfname in ancestor:
744 crev = ancestor[cfname].filenode()
744 crev = ancestor[cfname].filenode()
745 break
745 break
746
746
747 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
747 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
748 meta["copy"] = cfname
748 meta["copy"] = cfname
749 meta["copyrev"] = hex(crev)
749 meta["copyrev"] = hex(crev)
750 fparent1, fparent2 = nullid, newfparent
750 fparent1, fparent2 = nullid, newfparent
751 elif fparent2 != nullid:
751 elif fparent2 != nullid:
752 # is one parent an ancestor of the other?
752 # is one parent an ancestor of the other?
753 fparentancestor = flog.ancestor(fparent1, fparent2)
753 fparentancestor = flog.ancestor(fparent1, fparent2)
754 if fparentancestor == fparent1:
754 if fparentancestor == fparent1:
755 fparent1, fparent2 = fparent2, nullid
755 fparent1, fparent2 = fparent2, nullid
756 elif fparentancestor == fparent2:
756 elif fparentancestor == fparent2:
757 fparent2 = nullid
757 fparent2 = nullid
758
758
759 # is the file changed?
759 # is the file changed?
760 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
760 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
761 changelist.append(fname)
761 changelist.append(fname)
762 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
762 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
763
763
764 # are just the flags changed during merge?
764 # are just the flags changed during merge?
765 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
765 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
766 changelist.append(fname)
766 changelist.append(fname)
767
767
768 return fparent1
768 return fparent1
769
769
770 def commit(self, files=None, text="", user=None, date=None, match=None,
770 def commit(self, files=None, text="", user=None, date=None, match=None,
771 force=False, editor=False, extra={}):
771 force=False, editor=False, extra={}):
772 wlock = lock = None
772 wlock = lock = None
773 if extra.get("close"):
773 if extra.get("close"):
774 force = True
774 force = True
775 if files:
775 if files:
776 files = list(set(files))
776 files = list(set(files))
777
777
778 wlock = self.wlock()
778 wlock = self.wlock()
779 try:
779 try:
780 p1, p2 = self.dirstate.parents()
780 p1, p2 = self.dirstate.parents()
781
781
782 if (not force and p2 != nullid and
782 if (not force and p2 != nullid and
783 (match and (match.files() or match.anypats()))):
783 (match and (match.files() or match.anypats()))):
784 raise util.Abort(_('cannot partially commit a merge '
784 raise util.Abort(_('cannot partially commit a merge '
785 '(do not specify files or patterns)'))
785 '(do not specify files or patterns)'))
786
786
787 if files:
787 if files:
788 modified, removed = [], []
788 modified, removed = [], []
789 for f in files:
789 for f in files:
790 s = self.dirstate[f]
790 s = self.dirstate[f]
791 if s in 'nma':
791 if s in 'nma':
792 modified.append(f)
792 modified.append(f)
793 elif s == 'r':
793 elif s == 'r':
794 removed.append(f)
794 removed.append(f)
795 else:
795 else:
796 self.ui.warn(_("%s not tracked!\n") % f)
796 self.ui.warn(_("%s not tracked!\n") % f)
797 changes = [modified, [], removed, [], []]
797 changes = [modified, [], removed, [], []]
798 else:
798 else:
799 changes = self.status(match=match)
799 changes = self.status(match=match)
800
800
801 if (not (changes[0] or changes[1] or changes[2])
801 if (not (changes[0] or changes[1] or changes[2])
802 and not force and p2 == nullid and
802 and not force and p2 == nullid and
803 self[None].branch() == self['.'].branch()):
803 self[None].branch() == self['.'].branch()):
804 self.ui.status(_("nothing changed\n"))
804 self.ui.status(_("nothing changed\n"))
805 return None
805 return None
806
806
807 ms = merge_.mergestate(self)
807 ms = merge_.mergestate(self)
808 for f in changes[0]:
808 for f in changes[0]:
809 if f in ms and ms[f] == 'u':
809 if f in ms and ms[f] == 'u':
810 raise util.Abort(_("unresolved merge conflicts "
810 raise util.Abort(_("unresolved merge conflicts "
811 "(see hg resolve)"))
811 "(see hg resolve)"))
812 wctx = context.workingctx(self, (p1, p2), text, user, date,
812 wctx = context.workingctx(self, (p1, p2), text, user, date,
813 extra, changes)
813 extra, changes)
814 r = self.commitctx(wctx, editor, True)
814 r = self.commitctx(wctx, editor, True)
815 ms.reset()
815 ms.reset()
816 return r
816 return r
817
817
818 finally:
818 finally:
819 wlock.release()
819 wlock.release()
820
820
821 def commitctx(self, ctx, editor=None, working=False):
821 def commitctx(self, ctx, editor=None, working=False):
822 """Add a new revision to current repository.
822 """Add a new revision to current repository.
823
823
824 Revision information is passed via the context argument.
824 Revision information is passed via the context argument.
825 If editor is supplied, it is called to get a commit message.
825 If editor is supplied, it is called to get a commit message.
826 If working is set, the working directory is affected.
826 If working is set, the working directory is affected.
827 """
827 """
828
828
829 lock = self.lock()
830 tr = None
829 tr = None
831 valid = 0 # don't save the dirstate if this isn't set
830 valid = 0 # don't save the dirstate if this isn't set
832 try:
831 lock = None
833 commit = sorted(ctx.modified() + ctx.added())
832 commit = sorted(ctx.modified() + ctx.added())
834 remove = ctx.removed()
833 remove = ctx.removed()
835 extra = ctx.extra().copy()
834 extra = ctx.extra().copy()
836 branchname = extra['branch']
835 branchname = extra['branch']
837 user = ctx.user()
836 user = ctx.user()
838 text = ctx.description()
837 text = ctx.description()
839
838
840 p1, p2 = [p.node() for p in ctx.parents()]
839 p1, p2 = [p.node() for p in ctx.parents()]
841 c1 = self.changelog.read(p1)
840 c1 = self.changelog.read(p1)
842 c2 = self.changelog.read(p2)
841 c2 = self.changelog.read(p2)
843 m1 = self.manifest.read(c1[0]).copy()
842 m1 = self.manifest.read(c1[0]).copy()
844 m2 = self.manifest.read(c2[0])
843 m2 = self.manifest.read(c2[0])
845
844
846 xp1 = hex(p1)
845 xp1 = hex(p1)
847 if p2 == nullid: xp2 = ''
846 if p2 == nullid: xp2 = ''
848 else: xp2 = hex(p2)
847 else: xp2 = hex(p2)
849
850 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
848 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
851
849
850 lock = self.lock()
851 try:
852 tr = self.transaction()
852 tr = self.transaction()
853 trp = weakref.proxy(tr)
853 trp = weakref.proxy(tr)
854
854
855 # check in files
855 # check in files
856 new = {}
856 new = {}
857 changed = []
857 changed = []
858 linkrev = len(self)
858 linkrev = len(self)
859 for f in commit:
859 for f in commit:
860 self.ui.note(f + "\n")
860 self.ui.note(f + "\n")
861 try:
861 try:
862 fctx = ctx[f]
862 fctx = ctx[f]
863 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
863 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
864 changed)
864 changed)
865 m1.set(f, fctx.flags())
865 m1.set(f, fctx.flags())
866 if working:
866 if working:
867 self.dirstate.normal(f)
867 self.dirstate.normal(f)
868
868
869 except (OSError, IOError):
869 except (OSError, IOError):
870 if working:
870 if working:
871 self.ui.warn(_("trouble committing %s!\n") % f)
871 self.ui.warn(_("trouble committing %s!\n") % f)
872 raise
872 raise
873 else:
873 else:
874 remove.append(f)
874 remove.append(f)
875
875
876 updated, added = [], []
876 updated, added = [], []
877 for f in sorted(changed):
877 for f in sorted(changed):
878 if f in m1 or f in m2:
878 if f in m1 or f in m2:
879 updated.append(f)
879 updated.append(f)
880 else:
880 else:
881 added.append(f)
881 added.append(f)
882
882
883 # update manifest
883 # update manifest
884 m1.update(new)
884 m1.update(new)
885 removed = [f for f in sorted(remove) if f in m1 or f in m2]
885 removed = [f for f in sorted(remove) if f in m1 or f in m2]
886 removed1 = []
886 removed1 = []
887
887
888 for f in removed:
888 for f in removed:
889 if f in m1:
889 if f in m1:
890 del m1[f]
890 del m1[f]
891 removed1.append(f)
891 removed1.append(f)
892 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
892 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
893 (new, removed1))
893 (new, removed1))
894
894
895 if editor:
895 if editor:
896 text = editor(self, ctx, added, updated, removed)
896 text = editor(self, ctx, added, updated, removed)
897
897
898 lines = [line.rstrip() for line in text.rstrip().splitlines()]
898 lines = [line.rstrip() for line in text.rstrip().splitlines()]
899 while lines and not lines[0]:
899 while lines and not lines[0]:
900 del lines[0]
900 del lines[0]
901 text = '\n'.join(lines)
901 text = '\n'.join(lines)
902
902
903 self.changelog.delayupdate()
903 self.changelog.delayupdate()
904 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
904 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
905 user, ctx.date(), extra)
905 user, ctx.date(), extra)
906 p = lambda: self.changelog.writepending() and self.root or ""
906 p = lambda: self.changelog.writepending() and self.root or ""
907 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
907 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
908 parent2=xp2, pending=p)
908 parent2=xp2, pending=p)
909 self.changelog.finalize(trp)
909 self.changelog.finalize(trp)
910 tr.close()
910 tr.close()
911
911
912 if self.branchcache:
912 if self.branchcache:
913 self.branchtags()
913 self.branchtags()
914
914
915 if working:
915 if working:
916 self.dirstate.setparents(n)
916 self.dirstate.setparents(n)
917 for f in removed:
917 for f in removed:
918 self.dirstate.forget(f)
918 self.dirstate.forget(f)
919 valid = 1 # our dirstate updates are complete
919 valid = 1 # our dirstate updates are complete
920
920
921 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
921 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
922 return n
922 return n
923 finally:
923 finally:
924 if not valid: # don't save our updated dirstate
924 if not valid: # don't save our updated dirstate
925 self.dirstate.invalidate()
925 self.dirstate.invalidate()
926 del tr
926 del tr
927 lock.release()
927 lock.release()
928
928
929 def walk(self, match, node=None):
929 def walk(self, match, node=None):
930 '''
930 '''
931 walk recursively through the directory tree or a given
931 walk recursively through the directory tree or a given
932 changeset, finding all files matched by the match
932 changeset, finding all files matched by the match
933 function
933 function
934 '''
934 '''
935 return self[node].walk(match)
935 return self[node].walk(match)
936
936
937 def status(self, node1='.', node2=None, match=None,
937 def status(self, node1='.', node2=None, match=None,
938 ignored=False, clean=False, unknown=False):
938 ignored=False, clean=False, unknown=False):
939 """return status of files between two nodes or node and working directory
939 """return status of files between two nodes or node and working directory
940
940
941 If node1 is None, use the first dirstate parent instead.
941 If node1 is None, use the first dirstate parent instead.
942 If node2 is None, compare node1 with working directory.
942 If node2 is None, compare node1 with working directory.
943 """
943 """
944
944
945 def mfmatches(ctx):
945 def mfmatches(ctx):
946 mf = ctx.manifest().copy()
946 mf = ctx.manifest().copy()
947 for fn in mf.keys():
947 for fn in mf.keys():
948 if not match(fn):
948 if not match(fn):
949 del mf[fn]
949 del mf[fn]
950 return mf
950 return mf
951
951
952 if isinstance(node1, context.changectx):
952 if isinstance(node1, context.changectx):
953 ctx1 = node1
953 ctx1 = node1
954 else:
954 else:
955 ctx1 = self[node1]
955 ctx1 = self[node1]
956 if isinstance(node2, context.changectx):
956 if isinstance(node2, context.changectx):
957 ctx2 = node2
957 ctx2 = node2
958 else:
958 else:
959 ctx2 = self[node2]
959 ctx2 = self[node2]
960
960
961 working = ctx2.rev() is None
961 working = ctx2.rev() is None
962 parentworking = working and ctx1 == self['.']
962 parentworking = working and ctx1 == self['.']
963 match = match or match_.always(self.root, self.getcwd())
963 match = match or match_.always(self.root, self.getcwd())
964 listignored, listclean, listunknown = ignored, clean, unknown
964 listignored, listclean, listunknown = ignored, clean, unknown
965
965
966 # load earliest manifest first for caching reasons
966 # load earliest manifest first for caching reasons
967 if not working and ctx2.rev() < ctx1.rev():
967 if not working and ctx2.rev() < ctx1.rev():
968 ctx2.manifest()
968 ctx2.manifest()
969
969
970 if not parentworking:
970 if not parentworking:
971 def bad(f, msg):
971 def bad(f, msg):
972 if f not in ctx1:
972 if f not in ctx1:
973 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
973 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
974 return False
974 return False
975 match.bad = bad
975 match.bad = bad
976
976
977 if working: # we need to scan the working dir
977 if working: # we need to scan the working dir
978 s = self.dirstate.status(match, listignored, listclean, listunknown)
978 s = self.dirstate.status(match, listignored, listclean, listunknown)
979 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
979 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
980
980
981 # check for any possibly clean files
981 # check for any possibly clean files
982 if parentworking and cmp:
982 if parentworking and cmp:
983 fixup = []
983 fixup = []
984 # do a full compare of any files that might have changed
984 # do a full compare of any files that might have changed
985 for f in sorted(cmp):
985 for f in sorted(cmp):
986 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
986 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
987 or ctx1[f].cmp(ctx2[f].data())):
987 or ctx1[f].cmp(ctx2[f].data())):
988 modified.append(f)
988 modified.append(f)
989 else:
989 else:
990 fixup.append(f)
990 fixup.append(f)
991
991
992 if listclean:
992 if listclean:
993 clean += fixup
993 clean += fixup
994
994
995 # update dirstate for files that are actually clean
995 # update dirstate for files that are actually clean
996 if fixup:
996 if fixup:
997 wlock = None
997 wlock = None
998 try:
998 try:
999 try:
999 try:
1000 # updating the dirstate is optional
1000 # updating the dirstate is optional
1001 # so we don't wait on the lock
1001 # so we don't wait on the lock
1002 wlock = self.wlock(False)
1002 wlock = self.wlock(False)
1003 for f in fixup:
1003 for f in fixup:
1004 self.dirstate.normal(f)
1004 self.dirstate.normal(f)
1005 except error.LockError:
1005 except error.LockError:
1006 pass
1006 pass
1007 finally:
1007 finally:
1008 release(wlock)
1008 release(wlock)
1009
1009
1010 if not parentworking:
1010 if not parentworking:
1011 mf1 = mfmatches(ctx1)
1011 mf1 = mfmatches(ctx1)
1012 if working:
1012 if working:
1013 # we are comparing working dir against non-parent
1013 # we are comparing working dir against non-parent
1014 # generate a pseudo-manifest for the working dir
1014 # generate a pseudo-manifest for the working dir
1015 mf2 = mfmatches(self['.'])
1015 mf2 = mfmatches(self['.'])
1016 for f in cmp + modified + added:
1016 for f in cmp + modified + added:
1017 mf2[f] = None
1017 mf2[f] = None
1018 mf2.set(f, ctx2.flags(f))
1018 mf2.set(f, ctx2.flags(f))
1019 for f in removed:
1019 for f in removed:
1020 if f in mf2:
1020 if f in mf2:
1021 del mf2[f]
1021 del mf2[f]
1022 else:
1022 else:
1023 # we are comparing two revisions
1023 # we are comparing two revisions
1024 deleted, unknown, ignored = [], [], []
1024 deleted, unknown, ignored = [], [], []
1025 mf2 = mfmatches(ctx2)
1025 mf2 = mfmatches(ctx2)
1026
1026
1027 modified, added, clean = [], [], []
1027 modified, added, clean = [], [], []
1028 for fn in mf2:
1028 for fn in mf2:
1029 if fn in mf1:
1029 if fn in mf1:
1030 if (mf1.flags(fn) != mf2.flags(fn) or
1030 if (mf1.flags(fn) != mf2.flags(fn) or
1031 (mf1[fn] != mf2[fn] and
1031 (mf1[fn] != mf2[fn] and
1032 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1032 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1033 modified.append(fn)
1033 modified.append(fn)
1034 elif listclean:
1034 elif listclean:
1035 clean.append(fn)
1035 clean.append(fn)
1036 del mf1[fn]
1036 del mf1[fn]
1037 else:
1037 else:
1038 added.append(fn)
1038 added.append(fn)
1039 removed = mf1.keys()
1039 removed = mf1.keys()
1040
1040
1041 r = modified, added, removed, deleted, unknown, ignored, clean
1041 r = modified, added, removed, deleted, unknown, ignored, clean
1042 [l.sort() for l in r]
1042 [l.sort() for l in r]
1043 return r
1043 return r
1044
1044
1045 def add(self, list):
1045 def add(self, list):
1046 wlock = self.wlock()
1046 wlock = self.wlock()
1047 try:
1047 try:
1048 rejected = []
1048 rejected = []
1049 for f in list:
1049 for f in list:
1050 p = self.wjoin(f)
1050 p = self.wjoin(f)
1051 try:
1051 try:
1052 st = os.lstat(p)
1052 st = os.lstat(p)
1053 except:
1053 except:
1054 self.ui.warn(_("%s does not exist!\n") % f)
1054 self.ui.warn(_("%s does not exist!\n") % f)
1055 rejected.append(f)
1055 rejected.append(f)
1056 continue
1056 continue
1057 if st.st_size > 10000000:
1057 if st.st_size > 10000000:
1058 self.ui.warn(_("%s: files over 10MB may cause memory and"
1058 self.ui.warn(_("%s: files over 10MB may cause memory and"
1059 " performance problems\n"
1059 " performance problems\n"
1060 "(use 'hg revert %s' to unadd the file)\n")
1060 "(use 'hg revert %s' to unadd the file)\n")
1061 % (f, f))
1061 % (f, f))
1062 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1062 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1063 self.ui.warn(_("%s not added: only files and symlinks "
1063 self.ui.warn(_("%s not added: only files and symlinks "
1064 "supported currently\n") % f)
1064 "supported currently\n") % f)
1065 rejected.append(p)
1065 rejected.append(p)
1066 elif self.dirstate[f] in 'amn':
1066 elif self.dirstate[f] in 'amn':
1067 self.ui.warn(_("%s already tracked!\n") % f)
1067 self.ui.warn(_("%s already tracked!\n") % f)
1068 elif self.dirstate[f] == 'r':
1068 elif self.dirstate[f] == 'r':
1069 self.dirstate.normallookup(f)
1069 self.dirstate.normallookup(f)
1070 else:
1070 else:
1071 self.dirstate.add(f)
1071 self.dirstate.add(f)
1072 return rejected
1072 return rejected
1073 finally:
1073 finally:
1074 wlock.release()
1074 wlock.release()
1075
1075
1076 def forget(self, list):
1076 def forget(self, list):
1077 wlock = self.wlock()
1077 wlock = self.wlock()
1078 try:
1078 try:
1079 for f in list:
1079 for f in list:
1080 if self.dirstate[f] != 'a':
1080 if self.dirstate[f] != 'a':
1081 self.ui.warn(_("%s not added!\n") % f)
1081 self.ui.warn(_("%s not added!\n") % f)
1082 else:
1082 else:
1083 self.dirstate.forget(f)
1083 self.dirstate.forget(f)
1084 finally:
1084 finally:
1085 wlock.release()
1085 wlock.release()
1086
1086
1087 def remove(self, list, unlink=False):
1087 def remove(self, list, unlink=False):
1088 wlock = None
1088 wlock = None
1089 try:
1089 try:
1090 if unlink:
1090 if unlink:
1091 for f in list:
1091 for f in list:
1092 try:
1092 try:
1093 util.unlink(self.wjoin(f))
1093 util.unlink(self.wjoin(f))
1094 except OSError, inst:
1094 except OSError, inst:
1095 if inst.errno != errno.ENOENT:
1095 if inst.errno != errno.ENOENT:
1096 raise
1096 raise
1097 wlock = self.wlock()
1097 wlock = self.wlock()
1098 for f in list:
1098 for f in list:
1099 if unlink and os.path.exists(self.wjoin(f)):
1099 if unlink and os.path.exists(self.wjoin(f)):
1100 self.ui.warn(_("%s still exists!\n") % f)
1100 self.ui.warn(_("%s still exists!\n") % f)
1101 elif self.dirstate[f] == 'a':
1101 elif self.dirstate[f] == 'a':
1102 self.dirstate.forget(f)
1102 self.dirstate.forget(f)
1103 elif f not in self.dirstate:
1103 elif f not in self.dirstate:
1104 self.ui.warn(_("%s not tracked!\n") % f)
1104 self.ui.warn(_("%s not tracked!\n") % f)
1105 else:
1105 else:
1106 self.dirstate.remove(f)
1106 self.dirstate.remove(f)
1107 finally:
1107 finally:
1108 release(wlock)
1108 release(wlock)
1109
1109
1110 def undelete(self, list):
1110 def undelete(self, list):
1111 manifests = [self.manifest.read(self.changelog.read(p)[0])
1111 manifests = [self.manifest.read(self.changelog.read(p)[0])
1112 for p in self.dirstate.parents() if p != nullid]
1112 for p in self.dirstate.parents() if p != nullid]
1113 wlock = self.wlock()
1113 wlock = self.wlock()
1114 try:
1114 try:
1115 for f in list:
1115 for f in list:
1116 if self.dirstate[f] != 'r':
1116 if self.dirstate[f] != 'r':
1117 self.ui.warn(_("%s not removed!\n") % f)
1117 self.ui.warn(_("%s not removed!\n") % f)
1118 else:
1118 else:
1119 m = f in manifests[0] and manifests[0] or manifests[1]
1119 m = f in manifests[0] and manifests[0] or manifests[1]
1120 t = self.file(f).read(m[f])
1120 t = self.file(f).read(m[f])
1121 self.wwrite(f, t, m.flags(f))
1121 self.wwrite(f, t, m.flags(f))
1122 self.dirstate.normal(f)
1122 self.dirstate.normal(f)
1123 finally:
1123 finally:
1124 wlock.release()
1124 wlock.release()
1125
1125
1126 def copy(self, source, dest):
1126 def copy(self, source, dest):
1127 p = self.wjoin(dest)
1127 p = self.wjoin(dest)
1128 if not (os.path.exists(p) or os.path.islink(p)):
1128 if not (os.path.exists(p) or os.path.islink(p)):
1129 self.ui.warn(_("%s does not exist!\n") % dest)
1129 self.ui.warn(_("%s does not exist!\n") % dest)
1130 elif not (os.path.isfile(p) or os.path.islink(p)):
1130 elif not (os.path.isfile(p) or os.path.islink(p)):
1131 self.ui.warn(_("copy failed: %s is not a file or a "
1131 self.ui.warn(_("copy failed: %s is not a file or a "
1132 "symbolic link\n") % dest)
1132 "symbolic link\n") % dest)
1133 else:
1133 else:
1134 wlock = self.wlock()
1134 wlock = self.wlock()
1135 try:
1135 try:
1136 if self.dirstate[dest] in '?r':
1136 if self.dirstate[dest] in '?r':
1137 self.dirstate.add(dest)
1137 self.dirstate.add(dest)
1138 self.dirstate.copy(source, dest)
1138 self.dirstate.copy(source, dest)
1139 finally:
1139 finally:
1140 wlock.release()
1140 wlock.release()
1141
1141
1142 def heads(self, start=None, closed=True):
1142 def heads(self, start=None, closed=True):
1143 heads = self.changelog.heads(start)
1143 heads = self.changelog.heads(start)
1144 def display(head):
1144 def display(head):
1145 if closed:
1145 if closed:
1146 return True
1146 return True
1147 extras = self.changelog.read(head)[5]
1147 extras = self.changelog.read(head)[5]
1148 return ('close' not in extras)
1148 return ('close' not in extras)
1149 # sort the output in rev descending order
1149 # sort the output in rev descending order
1150 heads = [(-self.changelog.rev(h), h) for h in heads if display(h)]
1150 heads = [(-self.changelog.rev(h), h) for h in heads if display(h)]
1151 return [n for (r, n) in sorted(heads)]
1151 return [n for (r, n) in sorted(heads)]
1152
1152
1153 def branchheads(self, branch=None, start=None, closed=True):
1153 def branchheads(self, branch=None, start=None, closed=True):
1154 if branch is None:
1154 if branch is None:
1155 branch = self[None].branch()
1155 branch = self[None].branch()
1156 branches = self._branchheads()
1156 branches = self._branchheads()
1157 if branch not in branches:
1157 if branch not in branches:
1158 return []
1158 return []
1159 bheads = branches[branch]
1159 bheads = branches[branch]
1160 # the cache returns heads ordered lowest to highest
1160 # the cache returns heads ordered lowest to highest
1161 bheads.reverse()
1161 bheads.reverse()
1162 if start is not None:
1162 if start is not None:
1163 # filter out the heads that cannot be reached from startrev
1163 # filter out the heads that cannot be reached from startrev
1164 bheads = self.changelog.nodesbetween([start], bheads)[2]
1164 bheads = self.changelog.nodesbetween([start], bheads)[2]
1165 if not closed:
1165 if not closed:
1166 bheads = [h for h in bheads if
1166 bheads = [h for h in bheads if
1167 ('close' not in self.changelog.read(h)[5])]
1167 ('close' not in self.changelog.read(h)[5])]
1168 return bheads
1168 return bheads
1169
1169
1170 def branches(self, nodes):
1170 def branches(self, nodes):
1171 if not nodes:
1171 if not nodes:
1172 nodes = [self.changelog.tip()]
1172 nodes = [self.changelog.tip()]
1173 b = []
1173 b = []
1174 for n in nodes:
1174 for n in nodes:
1175 t = n
1175 t = n
1176 while 1:
1176 while 1:
1177 p = self.changelog.parents(n)
1177 p = self.changelog.parents(n)
1178 if p[1] != nullid or p[0] == nullid:
1178 if p[1] != nullid or p[0] == nullid:
1179 b.append((t, n, p[0], p[1]))
1179 b.append((t, n, p[0], p[1]))
1180 break
1180 break
1181 n = p[0]
1181 n = p[0]
1182 return b
1182 return b
1183
1183
1184 def between(self, pairs):
1184 def between(self, pairs):
1185 r = []
1185 r = []
1186
1186
1187 for top, bottom in pairs:
1187 for top, bottom in pairs:
1188 n, l, i = top, [], 0
1188 n, l, i = top, [], 0
1189 f = 1
1189 f = 1
1190
1190
1191 while n != bottom and n != nullid:
1191 while n != bottom and n != nullid:
1192 p = self.changelog.parents(n)[0]
1192 p = self.changelog.parents(n)[0]
1193 if i == f:
1193 if i == f:
1194 l.append(n)
1194 l.append(n)
1195 f = f * 2
1195 f = f * 2
1196 n = p
1196 n = p
1197 i += 1
1197 i += 1
1198
1198
1199 r.append(l)
1199 r.append(l)
1200
1200
1201 return r
1201 return r
1202
1202
1203 def findincoming(self, remote, base=None, heads=None, force=False):
1203 def findincoming(self, remote, base=None, heads=None, force=False):
1204 """Return list of roots of the subsets of missing nodes from remote
1204 """Return list of roots of the subsets of missing nodes from remote
1205
1205
1206 If base dict is specified, assume that these nodes and their parents
1206 If base dict is specified, assume that these nodes and their parents
1207 exist on the remote side and that no child of a node of base exists
1207 exist on the remote side and that no child of a node of base exists
1208 in both remote and self.
1208 in both remote and self.
1209 Furthermore base will be updated to include the nodes that exists
1209 Furthermore base will be updated to include the nodes that exists
1210 in self and remote but no children exists in self and remote.
1210 in self and remote but no children exists in self and remote.
1211 If a list of heads is specified, return only nodes which are heads
1211 If a list of heads is specified, return only nodes which are heads
1212 or ancestors of these heads.
1212 or ancestors of these heads.
1213
1213
1214 All the ancestors of base are in self and in remote.
1214 All the ancestors of base are in self and in remote.
1215 All the descendants of the list returned are missing in self.
1215 All the descendants of the list returned are missing in self.
1216 (and so we know that the rest of the nodes are missing in remote, see
1216 (and so we know that the rest of the nodes are missing in remote, see
1217 outgoing)
1217 outgoing)
1218 """
1218 """
1219 return self.findcommonincoming(remote, base, heads, force)[1]
1219 return self.findcommonincoming(remote, base, heads, force)[1]
1220
1220
1221 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1221 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1222 """Return a tuple (common, missing roots, heads) used to identify
1222 """Return a tuple (common, missing roots, heads) used to identify
1223 missing nodes from remote.
1223 missing nodes from remote.
1224
1224
1225 If base dict is specified, assume that these nodes and their parents
1225 If base dict is specified, assume that these nodes and their parents
1226 exist on the remote side and that no child of a node of base exists
1226 exist on the remote side and that no child of a node of base exists
1227 in both remote and self.
1227 in both remote and self.
1228 Furthermore base will be updated to include the nodes that exists
1228 Furthermore base will be updated to include the nodes that exists
1229 in self and remote but no children exists in self and remote.
1229 in self and remote but no children exists in self and remote.
1230 If a list of heads is specified, return only nodes which are heads
1230 If a list of heads is specified, return only nodes which are heads
1231 or ancestors of these heads.
1231 or ancestors of these heads.
1232
1232
1233 All the ancestors of base are in self and in remote.
1233 All the ancestors of base are in self and in remote.
1234 """
1234 """
1235 m = self.changelog.nodemap
1235 m = self.changelog.nodemap
1236 search = []
1236 search = []
1237 fetch = set()
1237 fetch = set()
1238 seen = set()
1238 seen = set()
1239 seenbranch = set()
1239 seenbranch = set()
1240 if base == None:
1240 if base == None:
1241 base = {}
1241 base = {}
1242
1242
1243 if not heads:
1243 if not heads:
1244 heads = remote.heads()
1244 heads = remote.heads()
1245
1245
1246 if self.changelog.tip() == nullid:
1246 if self.changelog.tip() == nullid:
1247 base[nullid] = 1
1247 base[nullid] = 1
1248 if heads != [nullid]:
1248 if heads != [nullid]:
1249 return [nullid], [nullid], list(heads)
1249 return [nullid], [nullid], list(heads)
1250 return [nullid], [], []
1250 return [nullid], [], []
1251
1251
1252 # assume we're closer to the tip than the root
1252 # assume we're closer to the tip than the root
1253 # and start by examining the heads
1253 # and start by examining the heads
1254 self.ui.status(_("searching for changes\n"))
1254 self.ui.status(_("searching for changes\n"))
1255
1255
1256 unknown = []
1256 unknown = []
1257 for h in heads:
1257 for h in heads:
1258 if h not in m:
1258 if h not in m:
1259 unknown.append(h)
1259 unknown.append(h)
1260 else:
1260 else:
1261 base[h] = 1
1261 base[h] = 1
1262
1262
1263 heads = unknown
1263 heads = unknown
1264 if not unknown:
1264 if not unknown:
1265 return base.keys(), [], []
1265 return base.keys(), [], []
1266
1266
1267 req = set(unknown)
1267 req = set(unknown)
1268 reqcnt = 0
1268 reqcnt = 0
1269
1269
1270 # search through remote branches
1270 # search through remote branches
1271 # a 'branch' here is a linear segment of history, with four parts:
1271 # a 'branch' here is a linear segment of history, with four parts:
1272 # head, root, first parent, second parent
1272 # head, root, first parent, second parent
1273 # (a branch always has two parents (or none) by definition)
1273 # (a branch always has two parents (or none) by definition)
1274 unknown = remote.branches(unknown)
1274 unknown = remote.branches(unknown)
1275 while unknown:
1275 while unknown:
1276 r = []
1276 r = []
1277 while unknown:
1277 while unknown:
1278 n = unknown.pop(0)
1278 n = unknown.pop(0)
1279 if n[0] in seen:
1279 if n[0] in seen:
1280 continue
1280 continue
1281
1281
1282 self.ui.debug(_("examining %s:%s\n")
1282 self.ui.debug(_("examining %s:%s\n")
1283 % (short(n[0]), short(n[1])))
1283 % (short(n[0]), short(n[1])))
1284 if n[0] == nullid: # found the end of the branch
1284 if n[0] == nullid: # found the end of the branch
1285 pass
1285 pass
1286 elif n in seenbranch:
1286 elif n in seenbranch:
1287 self.ui.debug(_("branch already found\n"))
1287 self.ui.debug(_("branch already found\n"))
1288 continue
1288 continue
1289 elif n[1] and n[1] in m: # do we know the base?
1289 elif n[1] and n[1] in m: # do we know the base?
1290 self.ui.debug(_("found incomplete branch %s:%s\n")
1290 self.ui.debug(_("found incomplete branch %s:%s\n")
1291 % (short(n[0]), short(n[1])))
1291 % (short(n[0]), short(n[1])))
1292 search.append(n[0:2]) # schedule branch range for scanning
1292 search.append(n[0:2]) # schedule branch range for scanning
1293 seenbranch.add(n)
1293 seenbranch.add(n)
1294 else:
1294 else:
1295 if n[1] not in seen and n[1] not in fetch:
1295 if n[1] not in seen and n[1] not in fetch:
1296 if n[2] in m and n[3] in m:
1296 if n[2] in m and n[3] in m:
1297 self.ui.debug(_("found new changeset %s\n") %
1297 self.ui.debug(_("found new changeset %s\n") %
1298 short(n[1]))
1298 short(n[1]))
1299 fetch.add(n[1]) # earliest unknown
1299 fetch.add(n[1]) # earliest unknown
1300 for p in n[2:4]:
1300 for p in n[2:4]:
1301 if p in m:
1301 if p in m:
1302 base[p] = 1 # latest known
1302 base[p] = 1 # latest known
1303
1303
1304 for p in n[2:4]:
1304 for p in n[2:4]:
1305 if p not in req and p not in m:
1305 if p not in req and p not in m:
1306 r.append(p)
1306 r.append(p)
1307 req.add(p)
1307 req.add(p)
1308 seen.add(n[0])
1308 seen.add(n[0])
1309
1309
1310 if r:
1310 if r:
1311 reqcnt += 1
1311 reqcnt += 1
1312 self.ui.debug(_("request %d: %s\n") %
1312 self.ui.debug(_("request %d: %s\n") %
1313 (reqcnt, " ".join(map(short, r))))
1313 (reqcnt, " ".join(map(short, r))))
1314 for p in xrange(0, len(r), 10):
1314 for p in xrange(0, len(r), 10):
1315 for b in remote.branches(r[p:p+10]):
1315 for b in remote.branches(r[p:p+10]):
1316 self.ui.debug(_("received %s:%s\n") %
1316 self.ui.debug(_("received %s:%s\n") %
1317 (short(b[0]), short(b[1])))
1317 (short(b[0]), short(b[1])))
1318 unknown.append(b)
1318 unknown.append(b)
1319
1319
1320 # do binary search on the branches we found
1320 # do binary search on the branches we found
1321 while search:
1321 while search:
1322 newsearch = []
1322 newsearch = []
1323 reqcnt += 1
1323 reqcnt += 1
1324 for n, l in zip(search, remote.between(search)):
1324 for n, l in zip(search, remote.between(search)):
1325 l.append(n[1])
1325 l.append(n[1])
1326 p = n[0]
1326 p = n[0]
1327 f = 1
1327 f = 1
1328 for i in l:
1328 for i in l:
1329 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1329 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1330 if i in m:
1330 if i in m:
1331 if f <= 2:
1331 if f <= 2:
1332 self.ui.debug(_("found new branch changeset %s\n") %
1332 self.ui.debug(_("found new branch changeset %s\n") %
1333 short(p))
1333 short(p))
1334 fetch.add(p)
1334 fetch.add(p)
1335 base[i] = 1
1335 base[i] = 1
1336 else:
1336 else:
1337 self.ui.debug(_("narrowed branch search to %s:%s\n")
1337 self.ui.debug(_("narrowed branch search to %s:%s\n")
1338 % (short(p), short(i)))
1338 % (short(p), short(i)))
1339 newsearch.append((p, i))
1339 newsearch.append((p, i))
1340 break
1340 break
1341 p, f = i, f * 2
1341 p, f = i, f * 2
1342 search = newsearch
1342 search = newsearch
1343
1343
1344 # sanity check our fetch list
1344 # sanity check our fetch list
1345 for f in fetch:
1345 for f in fetch:
1346 if f in m:
1346 if f in m:
1347 raise error.RepoError(_("already have changeset ")
1347 raise error.RepoError(_("already have changeset ")
1348 + short(f[:4]))
1348 + short(f[:4]))
1349
1349
1350 if base.keys() == [nullid]:
1350 if base.keys() == [nullid]:
1351 if force:
1351 if force:
1352 self.ui.warn(_("warning: repository is unrelated\n"))
1352 self.ui.warn(_("warning: repository is unrelated\n"))
1353 else:
1353 else:
1354 raise util.Abort(_("repository is unrelated"))
1354 raise util.Abort(_("repository is unrelated"))
1355
1355
1356 self.ui.debug(_("found new changesets starting at ") +
1356 self.ui.debug(_("found new changesets starting at ") +
1357 " ".join([short(f) for f in fetch]) + "\n")
1357 " ".join([short(f) for f in fetch]) + "\n")
1358
1358
1359 self.ui.debug(_("%d total queries\n") % reqcnt)
1359 self.ui.debug(_("%d total queries\n") % reqcnt)
1360
1360
1361 return base.keys(), list(fetch), heads
1361 return base.keys(), list(fetch), heads
1362
1362
1363 def findoutgoing(self, remote, base=None, heads=None, force=False):
1363 def findoutgoing(self, remote, base=None, heads=None, force=False):
1364 """Return list of nodes that are roots of subsets not in remote
1364 """Return list of nodes that are roots of subsets not in remote
1365
1365
1366 If base dict is specified, assume that these nodes and their parents
1366 If base dict is specified, assume that these nodes and their parents
1367 exist on the remote side.
1367 exist on the remote side.
1368 If a list of heads is specified, return only nodes which are heads
1368 If a list of heads is specified, return only nodes which are heads
1369 or ancestors of these heads, and return a second element which
1369 or ancestors of these heads, and return a second element which
1370 contains all remote heads which get new children.
1370 contains all remote heads which get new children.
1371 """
1371 """
1372 if base == None:
1372 if base == None:
1373 base = {}
1373 base = {}
1374 self.findincoming(remote, base, heads, force=force)
1374 self.findincoming(remote, base, heads, force=force)
1375
1375
1376 self.ui.debug(_("common changesets up to ")
1376 self.ui.debug(_("common changesets up to ")
1377 + " ".join(map(short, base.keys())) + "\n")
1377 + " ".join(map(short, base.keys())) + "\n")
1378
1378
1379 remain = set(self.changelog.nodemap)
1379 remain = set(self.changelog.nodemap)
1380
1380
1381 # prune everything remote has from the tree
1381 # prune everything remote has from the tree
1382 remain.remove(nullid)
1382 remain.remove(nullid)
1383 remove = base.keys()
1383 remove = base.keys()
1384 while remove:
1384 while remove:
1385 n = remove.pop(0)
1385 n = remove.pop(0)
1386 if n in remain:
1386 if n in remain:
1387 remain.remove(n)
1387 remain.remove(n)
1388 for p in self.changelog.parents(n):
1388 for p in self.changelog.parents(n):
1389 remove.append(p)
1389 remove.append(p)
1390
1390
1391 # find every node whose parents have been pruned
1391 # find every node whose parents have been pruned
1392 subset = []
1392 subset = []
1393 # find every remote head that will get new children
1393 # find every remote head that will get new children
1394 updated_heads = {}
1394 updated_heads = {}
1395 for n in remain:
1395 for n in remain:
1396 p1, p2 = self.changelog.parents(n)
1396 p1, p2 = self.changelog.parents(n)
1397 if p1 not in remain and p2 not in remain:
1397 if p1 not in remain and p2 not in remain:
1398 subset.append(n)
1398 subset.append(n)
1399 if heads:
1399 if heads:
1400 if p1 in heads:
1400 if p1 in heads:
1401 updated_heads[p1] = True
1401 updated_heads[p1] = True
1402 if p2 in heads:
1402 if p2 in heads:
1403 updated_heads[p2] = True
1403 updated_heads[p2] = True
1404
1404
1405 # this is the set of all roots we have to push
1405 # this is the set of all roots we have to push
1406 if heads:
1406 if heads:
1407 return subset, updated_heads.keys()
1407 return subset, updated_heads.keys()
1408 else:
1408 else:
1409 return subset
1409 return subset
1410
1410
1411 def pull(self, remote, heads=None, force=False):
1411 def pull(self, remote, heads=None, force=False):
1412 lock = self.lock()
1412 lock = self.lock()
1413 try:
1413 try:
1414 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1414 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1415 force=force)
1415 force=force)
1416 if fetch == [nullid]:
1416 if fetch == [nullid]:
1417 self.ui.status(_("requesting all changes\n"))
1417 self.ui.status(_("requesting all changes\n"))
1418
1418
1419 if not fetch:
1419 if not fetch:
1420 self.ui.status(_("no changes found\n"))
1420 self.ui.status(_("no changes found\n"))
1421 return 0
1421 return 0
1422
1422
1423 if heads is None and remote.capable('changegroupsubset'):
1423 if heads is None and remote.capable('changegroupsubset'):
1424 heads = rheads
1424 heads = rheads
1425
1425
1426 if heads is None:
1426 if heads is None:
1427 cg = remote.changegroup(fetch, 'pull')
1427 cg = remote.changegroup(fetch, 'pull')
1428 else:
1428 else:
1429 if not remote.capable('changegroupsubset'):
1429 if not remote.capable('changegroupsubset'):
1430 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1430 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1431 cg = remote.changegroupsubset(fetch, heads, 'pull')
1431 cg = remote.changegroupsubset(fetch, heads, 'pull')
1432 return self.addchangegroup(cg, 'pull', remote.url())
1432 return self.addchangegroup(cg, 'pull', remote.url())
1433 finally:
1433 finally:
1434 lock.release()
1434 lock.release()
1435
1435
1436 def push(self, remote, force=False, revs=None):
1436 def push(self, remote, force=False, revs=None):
1437 # there are two ways to push to remote repo:
1437 # there are two ways to push to remote repo:
1438 #
1438 #
1439 # addchangegroup assumes local user can lock remote
1439 # addchangegroup assumes local user can lock remote
1440 # repo (local filesystem, old ssh servers).
1440 # repo (local filesystem, old ssh servers).
1441 #
1441 #
1442 # unbundle assumes local user cannot lock remote repo (new ssh
1442 # unbundle assumes local user cannot lock remote repo (new ssh
1443 # servers, http servers).
1443 # servers, http servers).
1444
1444
1445 if remote.capable('unbundle'):
1445 if remote.capable('unbundle'):
1446 return self.push_unbundle(remote, force, revs)
1446 return self.push_unbundle(remote, force, revs)
1447 return self.push_addchangegroup(remote, force, revs)
1447 return self.push_addchangegroup(remote, force, revs)
1448
1448
1449 def prepush(self, remote, force, revs):
1449 def prepush(self, remote, force, revs):
1450 common = {}
1450 common = {}
1451 remote_heads = remote.heads()
1451 remote_heads = remote.heads()
1452 inc = self.findincoming(remote, common, remote_heads, force=force)
1452 inc = self.findincoming(remote, common, remote_heads, force=force)
1453
1453
1454 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1454 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1455 if revs is not None:
1455 if revs is not None:
1456 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1456 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1457 else:
1457 else:
1458 bases, heads = update, self.changelog.heads()
1458 bases, heads = update, self.changelog.heads()
1459
1459
1460 if not bases:
1460 if not bases:
1461 self.ui.status(_("no changes found\n"))
1461 self.ui.status(_("no changes found\n"))
1462 return None, 1
1462 return None, 1
1463 elif not force:
1463 elif not force:
1464 # check if we're creating new remote heads
1464 # check if we're creating new remote heads
1465 # to be a remote head after push, node must be either
1465 # to be a remote head after push, node must be either
1466 # - unknown locally
1466 # - unknown locally
1467 # - a local outgoing head descended from update
1467 # - a local outgoing head descended from update
1468 # - a remote head that's known locally and not
1468 # - a remote head that's known locally and not
1469 # ancestral to an outgoing head
1469 # ancestral to an outgoing head
1470
1470
1471 warn = 0
1471 warn = 0
1472
1472
1473 if remote_heads == [nullid]:
1473 if remote_heads == [nullid]:
1474 warn = 0
1474 warn = 0
1475 elif not revs and len(heads) > len(remote_heads):
1475 elif not revs and len(heads) > len(remote_heads):
1476 warn = 1
1476 warn = 1
1477 else:
1477 else:
1478 newheads = list(heads)
1478 newheads = list(heads)
1479 for r in remote_heads:
1479 for r in remote_heads:
1480 if r in self.changelog.nodemap:
1480 if r in self.changelog.nodemap:
1481 desc = self.changelog.heads(r, heads)
1481 desc = self.changelog.heads(r, heads)
1482 l = [h for h in heads if h in desc]
1482 l = [h for h in heads if h in desc]
1483 if not l:
1483 if not l:
1484 newheads.append(r)
1484 newheads.append(r)
1485 else:
1485 else:
1486 newheads.append(r)
1486 newheads.append(r)
1487 if len(newheads) > len(remote_heads):
1487 if len(newheads) > len(remote_heads):
1488 warn = 1
1488 warn = 1
1489
1489
1490 if warn:
1490 if warn:
1491 self.ui.warn(_("abort: push creates new remote heads!\n"))
1491 self.ui.warn(_("abort: push creates new remote heads!\n"))
1492 self.ui.status(_("(did you forget to merge?"
1492 self.ui.status(_("(did you forget to merge?"
1493 " use push -f to force)\n"))
1493 " use push -f to force)\n"))
1494 return None, 0
1494 return None, 0
1495 elif inc:
1495 elif inc:
1496 self.ui.warn(_("note: unsynced remote changes!\n"))
1496 self.ui.warn(_("note: unsynced remote changes!\n"))
1497
1497
1498
1498
1499 if revs is None:
1499 if revs is None:
1500 # use the fast path, no race possible on push
1500 # use the fast path, no race possible on push
1501 cg = self._changegroup(common.keys(), 'push')
1501 cg = self._changegroup(common.keys(), 'push')
1502 else:
1502 else:
1503 cg = self.changegroupsubset(update, revs, 'push')
1503 cg = self.changegroupsubset(update, revs, 'push')
1504 return cg, remote_heads
1504 return cg, remote_heads
1505
1505
1506 def push_addchangegroup(self, remote, force, revs):
1506 def push_addchangegroup(self, remote, force, revs):
1507 lock = remote.lock()
1507 lock = remote.lock()
1508 try:
1508 try:
1509 ret = self.prepush(remote, force, revs)
1509 ret = self.prepush(remote, force, revs)
1510 if ret[0] is not None:
1510 if ret[0] is not None:
1511 cg, remote_heads = ret
1511 cg, remote_heads = ret
1512 return remote.addchangegroup(cg, 'push', self.url())
1512 return remote.addchangegroup(cg, 'push', self.url())
1513 return ret[1]
1513 return ret[1]
1514 finally:
1514 finally:
1515 lock.release()
1515 lock.release()
1516
1516
1517 def push_unbundle(self, remote, force, revs):
1517 def push_unbundle(self, remote, force, revs):
1518 # local repo finds heads on server, finds out what revs it
1518 # local repo finds heads on server, finds out what revs it
1519 # must push. once revs transferred, if server finds it has
1519 # must push. once revs transferred, if server finds it has
1520 # different heads (someone else won commit/push race), server
1520 # different heads (someone else won commit/push race), server
1521 # aborts.
1521 # aborts.
1522
1522
1523 ret = self.prepush(remote, force, revs)
1523 ret = self.prepush(remote, force, revs)
1524 if ret[0] is not None:
1524 if ret[0] is not None:
1525 cg, remote_heads = ret
1525 cg, remote_heads = ret
1526 if force: remote_heads = ['force']
1526 if force: remote_heads = ['force']
1527 return remote.unbundle(cg, remote_heads, 'push')
1527 return remote.unbundle(cg, remote_heads, 'push')
1528 return ret[1]
1528 return ret[1]
1529
1529
1530 def changegroupinfo(self, nodes, source):
1530 def changegroupinfo(self, nodes, source):
1531 if self.ui.verbose or source == 'bundle':
1531 if self.ui.verbose or source == 'bundle':
1532 self.ui.status(_("%d changesets found\n") % len(nodes))
1532 self.ui.status(_("%d changesets found\n") % len(nodes))
1533 if self.ui.debugflag:
1533 if self.ui.debugflag:
1534 self.ui.debug(_("list of changesets:\n"))
1534 self.ui.debug(_("list of changesets:\n"))
1535 for node in nodes:
1535 for node in nodes:
1536 self.ui.debug("%s\n" % hex(node))
1536 self.ui.debug("%s\n" % hex(node))
1537
1537
1538 def changegroupsubset(self, bases, heads, source, extranodes=None):
1538 def changegroupsubset(self, bases, heads, source, extranodes=None):
1539 """This function generates a changegroup consisting of all the nodes
1539 """This function generates a changegroup consisting of all the nodes
1540 that are descendents of any of the bases, and ancestors of any of
1540 that are descendents of any of the bases, and ancestors of any of
1541 the heads.
1541 the heads.
1542
1542
1543 It is fairly complex as determining which filenodes and which
1543 It is fairly complex as determining which filenodes and which
1544 manifest nodes need to be included for the changeset to be complete
1544 manifest nodes need to be included for the changeset to be complete
1545 is non-trivial.
1545 is non-trivial.
1546
1546
1547 Another wrinkle is doing the reverse, figuring out which changeset in
1547 Another wrinkle is doing the reverse, figuring out which changeset in
1548 the changegroup a particular filenode or manifestnode belongs to.
1548 the changegroup a particular filenode or manifestnode belongs to.
1549
1549
1550 The caller can specify some nodes that must be included in the
1550 The caller can specify some nodes that must be included in the
1551 changegroup using the extranodes argument. It should be a dict
1551 changegroup using the extranodes argument. It should be a dict
1552 where the keys are the filenames (or 1 for the manifest), and the
1552 where the keys are the filenames (or 1 for the manifest), and the
1553 values are lists of (node, linknode) tuples, where node is a wanted
1553 values are lists of (node, linknode) tuples, where node is a wanted
1554 node and linknode is the changelog node that should be transmitted as
1554 node and linknode is the changelog node that should be transmitted as
1555 the linkrev.
1555 the linkrev.
1556 """
1556 """
1557
1557
1558 if extranodes is None:
1558 if extranodes is None:
1559 # can we go through the fast path ?
1559 # can we go through the fast path ?
1560 heads.sort()
1560 heads.sort()
1561 allheads = self.heads()
1561 allheads = self.heads()
1562 allheads.sort()
1562 allheads.sort()
1563 if heads == allheads:
1563 if heads == allheads:
1564 common = []
1564 common = []
1565 # parents of bases are known from both sides
1565 # parents of bases are known from both sides
1566 for n in bases:
1566 for n in bases:
1567 for p in self.changelog.parents(n):
1567 for p in self.changelog.parents(n):
1568 if p != nullid:
1568 if p != nullid:
1569 common.append(p)
1569 common.append(p)
1570 return self._changegroup(common, source)
1570 return self._changegroup(common, source)
1571
1571
1572 self.hook('preoutgoing', throw=True, source=source)
1572 self.hook('preoutgoing', throw=True, source=source)
1573
1573
1574 # Set up some initial variables
1574 # Set up some initial variables
1575 # Make it easy to refer to self.changelog
1575 # Make it easy to refer to self.changelog
1576 cl = self.changelog
1576 cl = self.changelog
1577 # msng is short for missing - compute the list of changesets in this
1577 # msng is short for missing - compute the list of changesets in this
1578 # changegroup.
1578 # changegroup.
1579 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1579 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1580 self.changegroupinfo(msng_cl_lst, source)
1580 self.changegroupinfo(msng_cl_lst, source)
1581 # Some bases may turn out to be superfluous, and some heads may be
1581 # Some bases may turn out to be superfluous, and some heads may be
1582 # too. nodesbetween will return the minimal set of bases and heads
1582 # too. nodesbetween will return the minimal set of bases and heads
1583 # necessary to re-create the changegroup.
1583 # necessary to re-create the changegroup.
1584
1584
1585 # Known heads are the list of heads that it is assumed the recipient
1585 # Known heads are the list of heads that it is assumed the recipient
1586 # of this changegroup will know about.
1586 # of this changegroup will know about.
1587 knownheads = {}
1587 knownheads = {}
1588 # We assume that all parents of bases are known heads.
1588 # We assume that all parents of bases are known heads.
1589 for n in bases:
1589 for n in bases:
1590 for p in cl.parents(n):
1590 for p in cl.parents(n):
1591 if p != nullid:
1591 if p != nullid:
1592 knownheads[p] = 1
1592 knownheads[p] = 1
1593 knownheads = knownheads.keys()
1593 knownheads = knownheads.keys()
1594 if knownheads:
1594 if knownheads:
1595 # Now that we know what heads are known, we can compute which
1595 # Now that we know what heads are known, we can compute which
1596 # changesets are known. The recipient must know about all
1596 # changesets are known. The recipient must know about all
1597 # changesets required to reach the known heads from the null
1597 # changesets required to reach the known heads from the null
1598 # changeset.
1598 # changeset.
1599 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1599 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1600 junk = None
1600 junk = None
1601 # Transform the list into a set.
1601 # Transform the list into a set.
1602 has_cl_set = set(has_cl_set)
1602 has_cl_set = set(has_cl_set)
1603 else:
1603 else:
1604 # If there were no known heads, the recipient cannot be assumed to
1604 # If there were no known heads, the recipient cannot be assumed to
1605 # know about any changesets.
1605 # know about any changesets.
1606 has_cl_set = set()
1606 has_cl_set = set()
1607
1607
1608 # Make it easy to refer to self.manifest
1608 # Make it easy to refer to self.manifest
1609 mnfst = self.manifest
1609 mnfst = self.manifest
1610 # We don't know which manifests are missing yet
1610 # We don't know which manifests are missing yet
1611 msng_mnfst_set = {}
1611 msng_mnfst_set = {}
1612 # Nor do we know which filenodes are missing.
1612 # Nor do we know which filenodes are missing.
1613 msng_filenode_set = {}
1613 msng_filenode_set = {}
1614
1614
1615 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1615 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1616 junk = None
1616 junk = None
1617
1617
1618 # A changeset always belongs to itself, so the changenode lookup
1618 # A changeset always belongs to itself, so the changenode lookup
1619 # function for a changenode is identity.
1619 # function for a changenode is identity.
1620 def identity(x):
1620 def identity(x):
1621 return x
1621 return x
1622
1622
1623 # A function generating function. Sets up an environment for the
1623 # A function generating function. Sets up an environment for the
1624 # inner function.
1624 # inner function.
1625 def cmp_by_rev_func(revlog):
1625 def cmp_by_rev_func(revlog):
1626 # Compare two nodes by their revision number in the environment's
1626 # Compare two nodes by their revision number in the environment's
1627 # revision history. Since the revision number both represents the
1627 # revision history. Since the revision number both represents the
1628 # most efficient order to read the nodes in, and represents a
1628 # most efficient order to read the nodes in, and represents a
1629 # topological sorting of the nodes, this function is often useful.
1629 # topological sorting of the nodes, this function is often useful.
1630 def cmp_by_rev(a, b):
1630 def cmp_by_rev(a, b):
1631 return cmp(revlog.rev(a), revlog.rev(b))
1631 return cmp(revlog.rev(a), revlog.rev(b))
1632 return cmp_by_rev
1632 return cmp_by_rev
1633
1633
1634 # If we determine that a particular file or manifest node must be a
1634 # If we determine that a particular file or manifest node must be a
1635 # node that the recipient of the changegroup will already have, we can
1635 # node that the recipient of the changegroup will already have, we can
1636 # also assume the recipient will have all the parents. This function
1636 # also assume the recipient will have all the parents. This function
1637 # prunes them from the set of missing nodes.
1637 # prunes them from the set of missing nodes.
1638 def prune_parents(revlog, hasset, msngset):
1638 def prune_parents(revlog, hasset, msngset):
1639 haslst = hasset.keys()
1639 haslst = hasset.keys()
1640 haslst.sort(cmp_by_rev_func(revlog))
1640 haslst.sort(cmp_by_rev_func(revlog))
1641 for node in haslst:
1641 for node in haslst:
1642 parentlst = [p for p in revlog.parents(node) if p != nullid]
1642 parentlst = [p for p in revlog.parents(node) if p != nullid]
1643 while parentlst:
1643 while parentlst:
1644 n = parentlst.pop()
1644 n = parentlst.pop()
1645 if n not in hasset:
1645 if n not in hasset:
1646 hasset[n] = 1
1646 hasset[n] = 1
1647 p = [p for p in revlog.parents(n) if p != nullid]
1647 p = [p for p in revlog.parents(n) if p != nullid]
1648 parentlst.extend(p)
1648 parentlst.extend(p)
1649 for n in hasset:
1649 for n in hasset:
1650 msngset.pop(n, None)
1650 msngset.pop(n, None)
1651
1651
1652 # This is a function generating function used to set up an environment
1652 # This is a function generating function used to set up an environment
1653 # for the inner function to execute in.
1653 # for the inner function to execute in.
1654 def manifest_and_file_collector(changedfileset):
1654 def manifest_and_file_collector(changedfileset):
1655 # This is an information gathering function that gathers
1655 # This is an information gathering function that gathers
1656 # information from each changeset node that goes out as part of
1656 # information from each changeset node that goes out as part of
1657 # the changegroup. The information gathered is a list of which
1657 # the changegroup. The information gathered is a list of which
1658 # manifest nodes are potentially required (the recipient may
1658 # manifest nodes are potentially required (the recipient may
1659 # already have them) and total list of all files which were
1659 # already have them) and total list of all files which were
1660 # changed in any changeset in the changegroup.
1660 # changed in any changeset in the changegroup.
1661 #
1661 #
1662 # We also remember the first changenode we saw any manifest
1662 # We also remember the first changenode we saw any manifest
1663 # referenced by so we can later determine which changenode 'owns'
1663 # referenced by so we can later determine which changenode 'owns'
1664 # the manifest.
1664 # the manifest.
1665 def collect_manifests_and_files(clnode):
1665 def collect_manifests_and_files(clnode):
1666 c = cl.read(clnode)
1666 c = cl.read(clnode)
1667 for f in c[3]:
1667 for f in c[3]:
1668 # This is to make sure we only have one instance of each
1668 # This is to make sure we only have one instance of each
1669 # filename string for each filename.
1669 # filename string for each filename.
1670 changedfileset.setdefault(f, f)
1670 changedfileset.setdefault(f, f)
1671 msng_mnfst_set.setdefault(c[0], clnode)
1671 msng_mnfst_set.setdefault(c[0], clnode)
1672 return collect_manifests_and_files
1672 return collect_manifests_and_files
1673
1673
1674 # Figure out which manifest nodes (of the ones we think might be part
1674 # Figure out which manifest nodes (of the ones we think might be part
1675 # of the changegroup) the recipient must know about and remove them
1675 # of the changegroup) the recipient must know about and remove them
1676 # from the changegroup.
1676 # from the changegroup.
1677 def prune_manifests():
1677 def prune_manifests():
1678 has_mnfst_set = {}
1678 has_mnfst_set = {}
1679 for n in msng_mnfst_set:
1679 for n in msng_mnfst_set:
1680 # If a 'missing' manifest thinks it belongs to a changenode
1680 # If a 'missing' manifest thinks it belongs to a changenode
1681 # the recipient is assumed to have, obviously the recipient
1681 # the recipient is assumed to have, obviously the recipient
1682 # must have that manifest.
1682 # must have that manifest.
1683 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1683 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1684 if linknode in has_cl_set:
1684 if linknode in has_cl_set:
1685 has_mnfst_set[n] = 1
1685 has_mnfst_set[n] = 1
1686 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1686 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1687
1687
1688 # Use the information collected in collect_manifests_and_files to say
1688 # Use the information collected in collect_manifests_and_files to say
1689 # which changenode any manifestnode belongs to.
1689 # which changenode any manifestnode belongs to.
1690 def lookup_manifest_link(mnfstnode):
1690 def lookup_manifest_link(mnfstnode):
1691 return msng_mnfst_set[mnfstnode]
1691 return msng_mnfst_set[mnfstnode]
1692
1692
1693 # A function generating function that sets up the initial environment
1693 # A function generating function that sets up the initial environment
1694 # the inner function.
1694 # the inner function.
1695 def filenode_collector(changedfiles):
1695 def filenode_collector(changedfiles):
1696 next_rev = [0]
1696 next_rev = [0]
1697 # This gathers information from each manifestnode included in the
1697 # This gathers information from each manifestnode included in the
1698 # changegroup about which filenodes the manifest node references
1698 # changegroup about which filenodes the manifest node references
1699 # so we can include those in the changegroup too.
1699 # so we can include those in the changegroup too.
1700 #
1700 #
1701 # It also remembers which changenode each filenode belongs to. It
1701 # It also remembers which changenode each filenode belongs to. It
1702 # does this by assuming the a filenode belongs to the changenode
1702 # does this by assuming the a filenode belongs to the changenode
1703 # the first manifest that references it belongs to.
1703 # the first manifest that references it belongs to.
1704 def collect_msng_filenodes(mnfstnode):
1704 def collect_msng_filenodes(mnfstnode):
1705 r = mnfst.rev(mnfstnode)
1705 r = mnfst.rev(mnfstnode)
1706 if r == next_rev[0]:
1706 if r == next_rev[0]:
1707 # If the last rev we looked at was the one just previous,
1707 # If the last rev we looked at was the one just previous,
1708 # we only need to see a diff.
1708 # we only need to see a diff.
1709 deltamf = mnfst.readdelta(mnfstnode)
1709 deltamf = mnfst.readdelta(mnfstnode)
1710 # For each line in the delta
1710 # For each line in the delta
1711 for f, fnode in deltamf.iteritems():
1711 for f, fnode in deltamf.iteritems():
1712 f = changedfiles.get(f, None)
1712 f = changedfiles.get(f, None)
1713 # And if the file is in the list of files we care
1713 # And if the file is in the list of files we care
1714 # about.
1714 # about.
1715 if f is not None:
1715 if f is not None:
1716 # Get the changenode this manifest belongs to
1716 # Get the changenode this manifest belongs to
1717 clnode = msng_mnfst_set[mnfstnode]
1717 clnode = msng_mnfst_set[mnfstnode]
1718 # Create the set of filenodes for the file if
1718 # Create the set of filenodes for the file if
1719 # there isn't one already.
1719 # there isn't one already.
1720 ndset = msng_filenode_set.setdefault(f, {})
1720 ndset = msng_filenode_set.setdefault(f, {})
1721 # And set the filenode's changelog node to the
1721 # And set the filenode's changelog node to the
1722 # manifest's if it hasn't been set already.
1722 # manifest's if it hasn't been set already.
1723 ndset.setdefault(fnode, clnode)
1723 ndset.setdefault(fnode, clnode)
1724 else:
1724 else:
1725 # Otherwise we need a full manifest.
1725 # Otherwise we need a full manifest.
1726 m = mnfst.read(mnfstnode)
1726 m = mnfst.read(mnfstnode)
1727 # For every file in we care about.
1727 # For every file in we care about.
1728 for f in changedfiles:
1728 for f in changedfiles:
1729 fnode = m.get(f, None)
1729 fnode = m.get(f, None)
1730 # If it's in the manifest
1730 # If it's in the manifest
1731 if fnode is not None:
1731 if fnode is not None:
1732 # See comments above.
1732 # See comments above.
1733 clnode = msng_mnfst_set[mnfstnode]
1733 clnode = msng_mnfst_set[mnfstnode]
1734 ndset = msng_filenode_set.setdefault(f, {})
1734 ndset = msng_filenode_set.setdefault(f, {})
1735 ndset.setdefault(fnode, clnode)
1735 ndset.setdefault(fnode, clnode)
1736 # Remember the revision we hope to see next.
1736 # Remember the revision we hope to see next.
1737 next_rev[0] = r + 1
1737 next_rev[0] = r + 1
1738 return collect_msng_filenodes
1738 return collect_msng_filenodes
1739
1739
1740 # We have a list of filenodes we think we need for a file, lets remove
1740 # We have a list of filenodes we think we need for a file, lets remove
1741 # all those we know the recipient must have.
1741 # all those we know the recipient must have.
1742 def prune_filenodes(f, filerevlog):
1742 def prune_filenodes(f, filerevlog):
1743 msngset = msng_filenode_set[f]
1743 msngset = msng_filenode_set[f]
1744 hasset = {}
1744 hasset = {}
1745 # If a 'missing' filenode thinks it belongs to a changenode we
1745 # If a 'missing' filenode thinks it belongs to a changenode we
1746 # assume the recipient must have, then the recipient must have
1746 # assume the recipient must have, then the recipient must have
1747 # that filenode.
1747 # that filenode.
1748 for n in msngset:
1748 for n in msngset:
1749 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1749 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1750 if clnode in has_cl_set:
1750 if clnode in has_cl_set:
1751 hasset[n] = 1
1751 hasset[n] = 1
1752 prune_parents(filerevlog, hasset, msngset)
1752 prune_parents(filerevlog, hasset, msngset)
1753
1753
1754 # A function generator function that sets up the a context for the
1754 # A function generator function that sets up the a context for the
1755 # inner function.
1755 # inner function.
1756 def lookup_filenode_link_func(fname):
1756 def lookup_filenode_link_func(fname):
1757 msngset = msng_filenode_set[fname]
1757 msngset = msng_filenode_set[fname]
1758 # Lookup the changenode the filenode belongs to.
1758 # Lookup the changenode the filenode belongs to.
1759 def lookup_filenode_link(fnode):
1759 def lookup_filenode_link(fnode):
1760 return msngset[fnode]
1760 return msngset[fnode]
1761 return lookup_filenode_link
1761 return lookup_filenode_link
1762
1762
1763 # Add the nodes that were explicitly requested.
1763 # Add the nodes that were explicitly requested.
1764 def add_extra_nodes(name, nodes):
1764 def add_extra_nodes(name, nodes):
1765 if not extranodes or name not in extranodes:
1765 if not extranodes or name not in extranodes:
1766 return
1766 return
1767
1767
1768 for node, linknode in extranodes[name]:
1768 for node, linknode in extranodes[name]:
1769 if node not in nodes:
1769 if node not in nodes:
1770 nodes[node] = linknode
1770 nodes[node] = linknode
1771
1771
1772 # Now that we have all theses utility functions to help out and
1772 # Now that we have all theses utility functions to help out and
1773 # logically divide up the task, generate the group.
1773 # logically divide up the task, generate the group.
1774 def gengroup():
1774 def gengroup():
1775 # The set of changed files starts empty.
1775 # The set of changed files starts empty.
1776 changedfiles = {}
1776 changedfiles = {}
1777 # Create a changenode group generator that will call our functions
1777 # Create a changenode group generator that will call our functions
1778 # back to lookup the owning changenode and collect information.
1778 # back to lookup the owning changenode and collect information.
1779 group = cl.group(msng_cl_lst, identity,
1779 group = cl.group(msng_cl_lst, identity,
1780 manifest_and_file_collector(changedfiles))
1780 manifest_and_file_collector(changedfiles))
1781 for chnk in group:
1781 for chnk in group:
1782 yield chnk
1782 yield chnk
1783
1783
1784 # The list of manifests has been collected by the generator
1784 # The list of manifests has been collected by the generator
1785 # calling our functions back.
1785 # calling our functions back.
1786 prune_manifests()
1786 prune_manifests()
1787 add_extra_nodes(1, msng_mnfst_set)
1787 add_extra_nodes(1, msng_mnfst_set)
1788 msng_mnfst_lst = msng_mnfst_set.keys()
1788 msng_mnfst_lst = msng_mnfst_set.keys()
1789 # Sort the manifestnodes by revision number.
1789 # Sort the manifestnodes by revision number.
1790 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1790 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1791 # Create a generator for the manifestnodes that calls our lookup
1791 # Create a generator for the manifestnodes that calls our lookup
1792 # and data collection functions back.
1792 # and data collection functions back.
1793 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1793 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1794 filenode_collector(changedfiles))
1794 filenode_collector(changedfiles))
1795 for chnk in group:
1795 for chnk in group:
1796 yield chnk
1796 yield chnk
1797
1797
1798 # These are no longer needed, dereference and toss the memory for
1798 # These are no longer needed, dereference and toss the memory for
1799 # them.
1799 # them.
1800 msng_mnfst_lst = None
1800 msng_mnfst_lst = None
1801 msng_mnfst_set.clear()
1801 msng_mnfst_set.clear()
1802
1802
1803 if extranodes:
1803 if extranodes:
1804 for fname in extranodes:
1804 for fname in extranodes:
1805 if isinstance(fname, int):
1805 if isinstance(fname, int):
1806 continue
1806 continue
1807 msng_filenode_set.setdefault(fname, {})
1807 msng_filenode_set.setdefault(fname, {})
1808 changedfiles[fname] = 1
1808 changedfiles[fname] = 1
1809 # Go through all our files in order sorted by name.
1809 # Go through all our files in order sorted by name.
1810 for fname in sorted(changedfiles):
1810 for fname in sorted(changedfiles):
1811 filerevlog = self.file(fname)
1811 filerevlog = self.file(fname)
1812 if not len(filerevlog):
1812 if not len(filerevlog):
1813 raise util.Abort(_("empty or missing revlog for %s") % fname)
1813 raise util.Abort(_("empty or missing revlog for %s") % fname)
1814 # Toss out the filenodes that the recipient isn't really
1814 # Toss out the filenodes that the recipient isn't really
1815 # missing.
1815 # missing.
1816 if fname in msng_filenode_set:
1816 if fname in msng_filenode_set:
1817 prune_filenodes(fname, filerevlog)
1817 prune_filenodes(fname, filerevlog)
1818 add_extra_nodes(fname, msng_filenode_set[fname])
1818 add_extra_nodes(fname, msng_filenode_set[fname])
1819 msng_filenode_lst = msng_filenode_set[fname].keys()
1819 msng_filenode_lst = msng_filenode_set[fname].keys()
1820 else:
1820 else:
1821 msng_filenode_lst = []
1821 msng_filenode_lst = []
1822 # If any filenodes are left, generate the group for them,
1822 # If any filenodes are left, generate the group for them,
1823 # otherwise don't bother.
1823 # otherwise don't bother.
1824 if len(msng_filenode_lst) > 0:
1824 if len(msng_filenode_lst) > 0:
1825 yield changegroup.chunkheader(len(fname))
1825 yield changegroup.chunkheader(len(fname))
1826 yield fname
1826 yield fname
1827 # Sort the filenodes by their revision #
1827 # Sort the filenodes by their revision #
1828 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1828 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1829 # Create a group generator and only pass in a changenode
1829 # Create a group generator and only pass in a changenode
1830 # lookup function as we need to collect no information
1830 # lookup function as we need to collect no information
1831 # from filenodes.
1831 # from filenodes.
1832 group = filerevlog.group(msng_filenode_lst,
1832 group = filerevlog.group(msng_filenode_lst,
1833 lookup_filenode_link_func(fname))
1833 lookup_filenode_link_func(fname))
1834 for chnk in group:
1834 for chnk in group:
1835 yield chnk
1835 yield chnk
1836 if fname in msng_filenode_set:
1836 if fname in msng_filenode_set:
1837 # Don't need this anymore, toss it to free memory.
1837 # Don't need this anymore, toss it to free memory.
1838 del msng_filenode_set[fname]
1838 del msng_filenode_set[fname]
1839 # Signal that no more groups are left.
1839 # Signal that no more groups are left.
1840 yield changegroup.closechunk()
1840 yield changegroup.closechunk()
1841
1841
1842 if msng_cl_lst:
1842 if msng_cl_lst:
1843 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1843 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1844
1844
1845 return util.chunkbuffer(gengroup())
1845 return util.chunkbuffer(gengroup())
1846
1846
1847 def changegroup(self, basenodes, source):
1847 def changegroup(self, basenodes, source):
1848 # to avoid a race we use changegroupsubset() (issue1320)
1848 # to avoid a race we use changegroupsubset() (issue1320)
1849 return self.changegroupsubset(basenodes, self.heads(), source)
1849 return self.changegroupsubset(basenodes, self.heads(), source)
1850
1850
1851 def _changegroup(self, common, source):
1851 def _changegroup(self, common, source):
1852 """Generate a changegroup of all nodes that we have that a recipient
1852 """Generate a changegroup of all nodes that we have that a recipient
1853 doesn't.
1853 doesn't.
1854
1854
1855 This is much easier than the previous function as we can assume that
1855 This is much easier than the previous function as we can assume that
1856 the recipient has any changenode we aren't sending them.
1856 the recipient has any changenode we aren't sending them.
1857
1857
1858 common is the set of common nodes between remote and self"""
1858 common is the set of common nodes between remote and self"""
1859
1859
1860 self.hook('preoutgoing', throw=True, source=source)
1860 self.hook('preoutgoing', throw=True, source=source)
1861
1861
1862 cl = self.changelog
1862 cl = self.changelog
1863 nodes = cl.findmissing(common)
1863 nodes = cl.findmissing(common)
1864 revset = set([cl.rev(n) for n in nodes])
1864 revset = set([cl.rev(n) for n in nodes])
1865 self.changegroupinfo(nodes, source)
1865 self.changegroupinfo(nodes, source)
1866
1866
1867 def identity(x):
1867 def identity(x):
1868 return x
1868 return x
1869
1869
1870 def gennodelst(log):
1870 def gennodelst(log):
1871 for r in log:
1871 for r in log:
1872 if log.linkrev(r) in revset:
1872 if log.linkrev(r) in revset:
1873 yield log.node(r)
1873 yield log.node(r)
1874
1874
1875 def changed_file_collector(changedfileset):
1875 def changed_file_collector(changedfileset):
1876 def collect_changed_files(clnode):
1876 def collect_changed_files(clnode):
1877 c = cl.read(clnode)
1877 c = cl.read(clnode)
1878 for fname in c[3]:
1878 for fname in c[3]:
1879 changedfileset[fname] = 1
1879 changedfileset[fname] = 1
1880 return collect_changed_files
1880 return collect_changed_files
1881
1881
1882 def lookuprevlink_func(revlog):
1882 def lookuprevlink_func(revlog):
1883 def lookuprevlink(n):
1883 def lookuprevlink(n):
1884 return cl.node(revlog.linkrev(revlog.rev(n)))
1884 return cl.node(revlog.linkrev(revlog.rev(n)))
1885 return lookuprevlink
1885 return lookuprevlink
1886
1886
1887 def gengroup():
1887 def gengroup():
1888 # construct a list of all changed files
1888 # construct a list of all changed files
1889 changedfiles = {}
1889 changedfiles = {}
1890
1890
1891 for chnk in cl.group(nodes, identity,
1891 for chnk in cl.group(nodes, identity,
1892 changed_file_collector(changedfiles)):
1892 changed_file_collector(changedfiles)):
1893 yield chnk
1893 yield chnk
1894
1894
1895 mnfst = self.manifest
1895 mnfst = self.manifest
1896 nodeiter = gennodelst(mnfst)
1896 nodeiter = gennodelst(mnfst)
1897 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1897 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1898 yield chnk
1898 yield chnk
1899
1899
1900 for fname in sorted(changedfiles):
1900 for fname in sorted(changedfiles):
1901 filerevlog = self.file(fname)
1901 filerevlog = self.file(fname)
1902 if not len(filerevlog):
1902 if not len(filerevlog):
1903 raise util.Abort(_("empty or missing revlog for %s") % fname)
1903 raise util.Abort(_("empty or missing revlog for %s") % fname)
1904 nodeiter = gennodelst(filerevlog)
1904 nodeiter = gennodelst(filerevlog)
1905 nodeiter = list(nodeiter)
1905 nodeiter = list(nodeiter)
1906 if nodeiter:
1906 if nodeiter:
1907 yield changegroup.chunkheader(len(fname))
1907 yield changegroup.chunkheader(len(fname))
1908 yield fname
1908 yield fname
1909 lookup = lookuprevlink_func(filerevlog)
1909 lookup = lookuprevlink_func(filerevlog)
1910 for chnk in filerevlog.group(nodeiter, lookup):
1910 for chnk in filerevlog.group(nodeiter, lookup):
1911 yield chnk
1911 yield chnk
1912
1912
1913 yield changegroup.closechunk()
1913 yield changegroup.closechunk()
1914
1914
1915 if nodes:
1915 if nodes:
1916 self.hook('outgoing', node=hex(nodes[0]), source=source)
1916 self.hook('outgoing', node=hex(nodes[0]), source=source)
1917
1917
1918 return util.chunkbuffer(gengroup())
1918 return util.chunkbuffer(gengroup())
1919
1919
1920 def addchangegroup(self, source, srctype, url, emptyok=False):
1920 def addchangegroup(self, source, srctype, url, emptyok=False):
1921 """add changegroup to repo.
1921 """add changegroup to repo.
1922
1922
1923 return values:
1923 return values:
1924 - nothing changed or no source: 0
1924 - nothing changed or no source: 0
1925 - more heads than before: 1+added heads (2..n)
1925 - more heads than before: 1+added heads (2..n)
1926 - less heads than before: -1-removed heads (-2..-n)
1926 - less heads than before: -1-removed heads (-2..-n)
1927 - number of heads stays the same: 1
1927 - number of heads stays the same: 1
1928 """
1928 """
1929 def csmap(x):
1929 def csmap(x):
1930 self.ui.debug(_("add changeset %s\n") % short(x))
1930 self.ui.debug(_("add changeset %s\n") % short(x))
1931 return len(cl)
1931 return len(cl)
1932
1932
1933 def revmap(x):
1933 def revmap(x):
1934 return cl.rev(x)
1934 return cl.rev(x)
1935
1935
1936 if not source:
1936 if not source:
1937 return 0
1937 return 0
1938
1938
1939 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1939 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1940
1940
1941 changesets = files = revisions = 0
1941 changesets = files = revisions = 0
1942
1942
1943 # write changelog data to temp files so concurrent readers will not see
1943 # write changelog data to temp files so concurrent readers will not see
1944 # inconsistent view
1944 # inconsistent view
1945 cl = self.changelog
1945 cl = self.changelog
1946 cl.delayupdate()
1946 cl.delayupdate()
1947 oldheads = len(cl.heads())
1947 oldheads = len(cl.heads())
1948
1948
1949 tr = self.transaction()
1949 tr = self.transaction()
1950 try:
1950 try:
1951 trp = weakref.proxy(tr)
1951 trp = weakref.proxy(tr)
1952 # pull off the changeset group
1952 # pull off the changeset group
1953 self.ui.status(_("adding changesets\n"))
1953 self.ui.status(_("adding changesets\n"))
1954 clstart = len(cl)
1954 clstart = len(cl)
1955 chunkiter = changegroup.chunkiter(source)
1955 chunkiter = changegroup.chunkiter(source)
1956 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1956 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1957 raise util.Abort(_("received changelog group is empty"))
1957 raise util.Abort(_("received changelog group is empty"))
1958 clend = len(cl)
1958 clend = len(cl)
1959 changesets = clend - clstart
1959 changesets = clend - clstart
1960
1960
1961 # pull off the manifest group
1961 # pull off the manifest group
1962 self.ui.status(_("adding manifests\n"))
1962 self.ui.status(_("adding manifests\n"))
1963 chunkiter = changegroup.chunkiter(source)
1963 chunkiter = changegroup.chunkiter(source)
1964 # no need to check for empty manifest group here:
1964 # no need to check for empty manifest group here:
1965 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1965 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1966 # no new manifest will be created and the manifest group will
1966 # no new manifest will be created and the manifest group will
1967 # be empty during the pull
1967 # be empty during the pull
1968 self.manifest.addgroup(chunkiter, revmap, trp)
1968 self.manifest.addgroup(chunkiter, revmap, trp)
1969
1969
1970 # process the files
1970 # process the files
1971 self.ui.status(_("adding file changes\n"))
1971 self.ui.status(_("adding file changes\n"))
1972 while 1:
1972 while 1:
1973 f = changegroup.getchunk(source)
1973 f = changegroup.getchunk(source)
1974 if not f:
1974 if not f:
1975 break
1975 break
1976 self.ui.debug(_("adding %s revisions\n") % f)
1976 self.ui.debug(_("adding %s revisions\n") % f)
1977 fl = self.file(f)
1977 fl = self.file(f)
1978 o = len(fl)
1978 o = len(fl)
1979 chunkiter = changegroup.chunkiter(source)
1979 chunkiter = changegroup.chunkiter(source)
1980 if fl.addgroup(chunkiter, revmap, trp) is None:
1980 if fl.addgroup(chunkiter, revmap, trp) is None:
1981 raise util.Abort(_("received file revlog group is empty"))
1981 raise util.Abort(_("received file revlog group is empty"))
1982 revisions += len(fl) - o
1982 revisions += len(fl) - o
1983 files += 1
1983 files += 1
1984
1984
1985 newheads = len(cl.heads())
1985 newheads = len(cl.heads())
1986 heads = ""
1986 heads = ""
1987 if oldheads and newheads != oldheads:
1987 if oldheads and newheads != oldheads:
1988 heads = _(" (%+d heads)") % (newheads - oldheads)
1988 heads = _(" (%+d heads)") % (newheads - oldheads)
1989
1989
1990 self.ui.status(_("added %d changesets"
1990 self.ui.status(_("added %d changesets"
1991 " with %d changes to %d files%s\n")
1991 " with %d changes to %d files%s\n")
1992 % (changesets, revisions, files, heads))
1992 % (changesets, revisions, files, heads))
1993
1993
1994 if changesets > 0:
1994 if changesets > 0:
1995 p = lambda: cl.writepending() and self.root or ""
1995 p = lambda: cl.writepending() and self.root or ""
1996 self.hook('pretxnchangegroup', throw=True,
1996 self.hook('pretxnchangegroup', throw=True,
1997 node=hex(cl.node(clstart)), source=srctype,
1997 node=hex(cl.node(clstart)), source=srctype,
1998 url=url, pending=p)
1998 url=url, pending=p)
1999
1999
2000 # make changelog see real files again
2000 # make changelog see real files again
2001 cl.finalize(trp)
2001 cl.finalize(trp)
2002
2002
2003 tr.close()
2003 tr.close()
2004 finally:
2004 finally:
2005 del tr
2005 del tr
2006
2006
2007 if changesets > 0:
2007 if changesets > 0:
2008 # forcefully update the on-disk branch cache
2008 # forcefully update the on-disk branch cache
2009 self.ui.debug(_("updating the branch cache\n"))
2009 self.ui.debug(_("updating the branch cache\n"))
2010 self.branchtags()
2010 self.branchtags()
2011 self.hook("changegroup", node=hex(cl.node(clstart)),
2011 self.hook("changegroup", node=hex(cl.node(clstart)),
2012 source=srctype, url=url)
2012 source=srctype, url=url)
2013
2013
2014 for i in xrange(clstart, clend):
2014 for i in xrange(clstart, clend):
2015 self.hook("incoming", node=hex(cl.node(i)),
2015 self.hook("incoming", node=hex(cl.node(i)),
2016 source=srctype, url=url)
2016 source=srctype, url=url)
2017
2017
2018 # never return 0 here:
2018 # never return 0 here:
2019 if newheads < oldheads:
2019 if newheads < oldheads:
2020 return newheads - oldheads - 1
2020 return newheads - oldheads - 1
2021 else:
2021 else:
2022 return newheads - oldheads + 1
2022 return newheads - oldheads + 1
2023
2023
2024
2024
2025 def stream_in(self, remote):
2025 def stream_in(self, remote):
2026 fp = remote.stream_out()
2026 fp = remote.stream_out()
2027 l = fp.readline()
2027 l = fp.readline()
2028 try:
2028 try:
2029 resp = int(l)
2029 resp = int(l)
2030 except ValueError:
2030 except ValueError:
2031 raise error.ResponseError(
2031 raise error.ResponseError(
2032 _('Unexpected response from remote server:'), l)
2032 _('Unexpected response from remote server:'), l)
2033 if resp == 1:
2033 if resp == 1:
2034 raise util.Abort(_('operation forbidden by server'))
2034 raise util.Abort(_('operation forbidden by server'))
2035 elif resp == 2:
2035 elif resp == 2:
2036 raise util.Abort(_('locking the remote repository failed'))
2036 raise util.Abort(_('locking the remote repository failed'))
2037 elif resp != 0:
2037 elif resp != 0:
2038 raise util.Abort(_('the server sent an unknown error code'))
2038 raise util.Abort(_('the server sent an unknown error code'))
2039 self.ui.status(_('streaming all changes\n'))
2039 self.ui.status(_('streaming all changes\n'))
2040 l = fp.readline()
2040 l = fp.readline()
2041 try:
2041 try:
2042 total_files, total_bytes = map(int, l.split(' ', 1))
2042 total_files, total_bytes = map(int, l.split(' ', 1))
2043 except (ValueError, TypeError):
2043 except (ValueError, TypeError):
2044 raise error.ResponseError(
2044 raise error.ResponseError(
2045 _('Unexpected response from remote server:'), l)
2045 _('Unexpected response from remote server:'), l)
2046 self.ui.status(_('%d files to transfer, %s of data\n') %
2046 self.ui.status(_('%d files to transfer, %s of data\n') %
2047 (total_files, util.bytecount(total_bytes)))
2047 (total_files, util.bytecount(total_bytes)))
2048 start = time.time()
2048 start = time.time()
2049 for i in xrange(total_files):
2049 for i in xrange(total_files):
2050 # XXX doesn't support '\n' or '\r' in filenames
2050 # XXX doesn't support '\n' or '\r' in filenames
2051 l = fp.readline()
2051 l = fp.readline()
2052 try:
2052 try:
2053 name, size = l.split('\0', 1)
2053 name, size = l.split('\0', 1)
2054 size = int(size)
2054 size = int(size)
2055 except (ValueError, TypeError):
2055 except (ValueError, TypeError):
2056 raise error.ResponseError(
2056 raise error.ResponseError(
2057 _('Unexpected response from remote server:'), l)
2057 _('Unexpected response from remote server:'), l)
2058 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2058 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2059 ofp = self.sopener(name, 'w')
2059 ofp = self.sopener(name, 'w')
2060 for chunk in util.filechunkiter(fp, limit=size):
2060 for chunk in util.filechunkiter(fp, limit=size):
2061 ofp.write(chunk)
2061 ofp.write(chunk)
2062 ofp.close()
2062 ofp.close()
2063 elapsed = time.time() - start
2063 elapsed = time.time() - start
2064 if elapsed <= 0:
2064 if elapsed <= 0:
2065 elapsed = 0.001
2065 elapsed = 0.001
2066 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2066 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2067 (util.bytecount(total_bytes), elapsed,
2067 (util.bytecount(total_bytes), elapsed,
2068 util.bytecount(total_bytes / elapsed)))
2068 util.bytecount(total_bytes / elapsed)))
2069 self.invalidate()
2069 self.invalidate()
2070 return len(self.heads()) + 1
2070 return len(self.heads()) + 1
2071
2071
2072 def clone(self, remote, heads=[], stream=False):
2072 def clone(self, remote, heads=[], stream=False):
2073 '''clone remote repository.
2073 '''clone remote repository.
2074
2074
2075 keyword arguments:
2075 keyword arguments:
2076 heads: list of revs to clone (forces use of pull)
2076 heads: list of revs to clone (forces use of pull)
2077 stream: use streaming clone if possible'''
2077 stream: use streaming clone if possible'''
2078
2078
2079 # now, all clients that can request uncompressed clones can
2079 # now, all clients that can request uncompressed clones can
2080 # read repo formats supported by all servers that can serve
2080 # read repo formats supported by all servers that can serve
2081 # them.
2081 # them.
2082
2082
2083 # if revlog format changes, client will have to check version
2083 # if revlog format changes, client will have to check version
2084 # and format flags on "stream" capability, and use
2084 # and format flags on "stream" capability, and use
2085 # uncompressed only if compatible.
2085 # uncompressed only if compatible.
2086
2086
2087 if stream and not heads and remote.capable('stream'):
2087 if stream and not heads and remote.capable('stream'):
2088 return self.stream_in(remote)
2088 return self.stream_in(remote)
2089 return self.pull(remote, heads)
2089 return self.pull(remote, heads)
2090
2090
2091 # used to avoid circular references so destructors work
2091 # used to avoid circular references so destructors work
2092 def aftertrans(files):
2092 def aftertrans(files):
2093 renamefiles = [tuple(t) for t in files]
2093 renamefiles = [tuple(t) for t in files]
2094 def a():
2094 def a():
2095 for src, dest in renamefiles:
2095 for src, dest in renamefiles:
2096 util.rename(src, dest)
2096 util.rename(src, dest)
2097 return a
2097 return a
2098
2098
2099 def instance(ui, path, create):
2099 def instance(ui, path, create):
2100 return localrepository(ui, util.drop_scheme('file', path), create)
2100 return localrepository(ui, util.drop_scheme('file', path), create)
2101
2101
2102 def islocal(path):
2102 def islocal(path):
2103 return True
2103 return True
General Comments 0
You need to be logged in to leave comments. Login now